]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-2.6.32.42-201107011911.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.42-201107011911.patch
1 diff -urNp linux-2.6.32.42/arch/alpha/include/asm/elf.h linux-2.6.32.42/arch/alpha/include/asm/elf.h
2 --- linux-2.6.32.42/arch/alpha/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3 +++ linux-2.6.32.42/arch/alpha/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
4 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-2.6.32.42/arch/alpha/include/asm/pgtable.h linux-2.6.32.42/arch/alpha/include/asm/pgtable.h
19 --- linux-2.6.32.42/arch/alpha/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
20 +++ linux-2.6.32.42/arch/alpha/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-2.6.32.42/arch/alpha/kernel/module.c linux-2.6.32.42/arch/alpha/kernel/module.c
40 --- linux-2.6.32.42/arch/alpha/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
41 +++ linux-2.6.32.42/arch/alpha/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-2.6.32.42/arch/alpha/kernel/osf_sys.c linux-2.6.32.42/arch/alpha/kernel/osf_sys.c
52 --- linux-2.6.32.42/arch/alpha/kernel/osf_sys.c 2011-03-27 14:31:47.000000000 -0400
53 +++ linux-2.6.32.42/arch/alpha/kernel/osf_sys.c 2011-06-13 17:19:47.000000000 -0400
54 @@ -431,7 +431,7 @@ SYSCALL_DEFINE2(osf_getdomainname, char
55 return -EFAULT;
56
57 len = namelen;
58 - if (namelen > 32)
59 + if (len > 32)
60 len = 32;
61
62 down_read(&uts_sem);
63 @@ -618,7 +618,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, comman
64 down_read(&uts_sem);
65 res = sysinfo_table[offset];
66 len = strlen(res)+1;
67 - if (len > count)
68 + if ((unsigned long)len > (unsigned long)count)
69 len = count;
70 if (copy_to_user(buf, res, len))
71 err = -EFAULT;
72 @@ -673,7 +673,7 @@ SYSCALL_DEFINE5(osf_getsysinfo, unsigned
73 return 1;
74
75 case GSI_GET_HWRPB:
76 - if (nbytes < sizeof(*hwrpb))
77 + if (nbytes > sizeof(*hwrpb))
78 return -EINVAL;
79 if (copy_to_user(buffer, hwrpb, nbytes) != 0)
80 return -EFAULT;
81 @@ -1035,6 +1035,7 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, i
82 {
83 struct rusage r;
84 long ret, err;
85 + unsigned int status = 0;
86 mm_segment_t old_fs;
87
88 if (!ur)
89 @@ -1043,13 +1044,15 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, i
90 old_fs = get_fs();
91
92 set_fs (KERNEL_DS);
93 - ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r);
94 + ret = sys_wait4(pid, (unsigned int __user *) &status, options,
95 + (struct rusage __user *) &r);
96 set_fs (old_fs);
97
98 if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
99 return -EFAULT;
100
101 err = 0;
102 + err |= put_user(status, ustatus);
103 err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
104 err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
105 err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
106 @@ -1169,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long a
107 /* At this point: (!vma || addr < vma->vm_end). */
108 if (limit - len < addr)
109 return -ENOMEM;
110 - if (!vma || addr + len <= vma->vm_start)
111 + if (check_heap_stack_gap(vma, addr, len))
112 return addr;
113 addr = vma->vm_end;
114 vma = vma->vm_next;
115 @@ -1205,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp
116 merely specific addresses, but regions of memory -- perhaps
117 this feature should be incorporated into all ports? */
118
119 +#ifdef CONFIG_PAX_RANDMMAP
120 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
121 +#endif
122 +
123 if (addr) {
124 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
125 if (addr != (unsigned long) -ENOMEM)
126 @@ -1212,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp
127 }
128
129 /* Next, try allocating at TASK_UNMAPPED_BASE. */
130 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
131 - len, limit);
132 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
133 +
134 if (addr != (unsigned long) -ENOMEM)
135 return addr;
136
137 diff -urNp linux-2.6.32.42/arch/alpha/mm/fault.c linux-2.6.32.42/arch/alpha/mm/fault.c
138 --- linux-2.6.32.42/arch/alpha/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
139 +++ linux-2.6.32.42/arch/alpha/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
140 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
141 __reload_thread(pcb);
142 }
143
144 +#ifdef CONFIG_PAX_PAGEEXEC
145 +/*
146 + * PaX: decide what to do with offenders (regs->pc = fault address)
147 + *
148 + * returns 1 when task should be killed
149 + * 2 when patched PLT trampoline was detected
150 + * 3 when unpatched PLT trampoline was detected
151 + */
152 +static int pax_handle_fetch_fault(struct pt_regs *regs)
153 +{
154 +
155 +#ifdef CONFIG_PAX_EMUPLT
156 + int err;
157 +
158 + do { /* PaX: patched PLT emulation #1 */
159 + unsigned int ldah, ldq, jmp;
160 +
161 + err = get_user(ldah, (unsigned int *)regs->pc);
162 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
163 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
164 +
165 + if (err)
166 + break;
167 +
168 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
169 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
170 + jmp == 0x6BFB0000U)
171 + {
172 + unsigned long r27, addr;
173 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
174 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
175 +
176 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
177 + err = get_user(r27, (unsigned long *)addr);
178 + if (err)
179 + break;
180 +
181 + regs->r27 = r27;
182 + regs->pc = r27;
183 + return 2;
184 + }
185 + } while (0);
186 +
187 + do { /* PaX: patched PLT emulation #2 */
188 + unsigned int ldah, lda, br;
189 +
190 + err = get_user(ldah, (unsigned int *)regs->pc);
191 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
192 + err |= get_user(br, (unsigned int *)(regs->pc+8));
193 +
194 + if (err)
195 + break;
196 +
197 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
198 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
199 + (br & 0xFFE00000U) == 0xC3E00000U)
200 + {
201 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
202 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
203 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
204 +
205 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
206 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
207 + return 2;
208 + }
209 + } while (0);
210 +
211 + do { /* PaX: unpatched PLT emulation */
212 + unsigned int br;
213 +
214 + err = get_user(br, (unsigned int *)regs->pc);
215 +
216 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
217 + unsigned int br2, ldq, nop, jmp;
218 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
219 +
220 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
221 + err = get_user(br2, (unsigned int *)addr);
222 + err |= get_user(ldq, (unsigned int *)(addr+4));
223 + err |= get_user(nop, (unsigned int *)(addr+8));
224 + err |= get_user(jmp, (unsigned int *)(addr+12));
225 + err |= get_user(resolver, (unsigned long *)(addr+16));
226 +
227 + if (err)
228 + break;
229 +
230 + if (br2 == 0xC3600000U &&
231 + ldq == 0xA77B000CU &&
232 + nop == 0x47FF041FU &&
233 + jmp == 0x6B7B0000U)
234 + {
235 + regs->r28 = regs->pc+4;
236 + regs->r27 = addr+16;
237 + regs->pc = resolver;
238 + return 3;
239 + }
240 + }
241 + } while (0);
242 +#endif
243 +
244 + return 1;
245 +}
246 +
247 +void pax_report_insns(void *pc, void *sp)
248 +{
249 + unsigned long i;
250 +
251 + printk(KERN_ERR "PAX: bytes at PC: ");
252 + for (i = 0; i < 5; i++) {
253 + unsigned int c;
254 + if (get_user(c, (unsigned int *)pc+i))
255 + printk(KERN_CONT "???????? ");
256 + else
257 + printk(KERN_CONT "%08x ", c);
258 + }
259 + printk("\n");
260 +}
261 +#endif
262
263 /*
264 * This routine handles page faults. It determines the address,
265 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
266 good_area:
267 si_code = SEGV_ACCERR;
268 if (cause < 0) {
269 - if (!(vma->vm_flags & VM_EXEC))
270 + if (!(vma->vm_flags & VM_EXEC)) {
271 +
272 +#ifdef CONFIG_PAX_PAGEEXEC
273 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
274 + goto bad_area;
275 +
276 + up_read(&mm->mmap_sem);
277 + switch (pax_handle_fetch_fault(regs)) {
278 +
279 +#ifdef CONFIG_PAX_EMUPLT
280 + case 2:
281 + case 3:
282 + return;
283 +#endif
284 +
285 + }
286 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
287 + do_group_exit(SIGKILL);
288 +#else
289 goto bad_area;
290 +#endif
291 +
292 + }
293 } else if (!cause) {
294 /* Allow reads even for write-only mappings */
295 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
296 diff -urNp linux-2.6.32.42/arch/arm/include/asm/elf.h linux-2.6.32.42/arch/arm/include/asm/elf.h
297 --- linux-2.6.32.42/arch/arm/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
298 +++ linux-2.6.32.42/arch/arm/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
299 @@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t
300 the loader. We need to make sure that it is out of the way of the program
301 that it will "exec", and that there is sufficient room for the brk. */
302
303 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
304 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
305 +
306 +#ifdef CONFIG_PAX_ASLR
307 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
308 +
309 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
310 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
311 +#endif
312
313 /* When the program starts, a1 contains a pointer to a function to be
314 registered with atexit, as per the SVR4 ABI. A value of 0 means we
315 diff -urNp linux-2.6.32.42/arch/arm/include/asm/kmap_types.h linux-2.6.32.42/arch/arm/include/asm/kmap_types.h
316 --- linux-2.6.32.42/arch/arm/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
317 +++ linux-2.6.32.42/arch/arm/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
318 @@ -19,6 +19,7 @@ enum km_type {
319 KM_SOFTIRQ0,
320 KM_SOFTIRQ1,
321 KM_L2_CACHE,
322 + KM_CLEARPAGE,
323 KM_TYPE_NR
324 };
325
326 diff -urNp linux-2.6.32.42/arch/arm/include/asm/uaccess.h linux-2.6.32.42/arch/arm/include/asm/uaccess.h
327 --- linux-2.6.32.42/arch/arm/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
328 +++ linux-2.6.32.42/arch/arm/include/asm/uaccess.h 2011-06-29 21:02:24.000000000 -0400
329 @@ -22,6 +22,8 @@
330 #define VERIFY_READ 0
331 #define VERIFY_WRITE 1
332
333 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
334 +
335 /*
336 * The exception table consists of pairs of addresses: the first is the
337 * address of an instruction that is allowed to fault, and the second is
338 @@ -387,8 +389,23 @@ do { \
339
340
341 #ifdef CONFIG_MMU
342 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
343 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
344 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
345 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
346 +
347 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
348 +{
349 + if (!__builtin_constant_p(n))
350 + check_object_size(to, n, false);
351 + return ___copy_from_user(to, from, n);
352 +}
353 +
354 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
355 +{
356 + if (!__builtin_constant_p(n))
357 + check_object_size(from, n, true);
358 + return ___copy_to_user(to, from, n);
359 +}
360 +
361 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
362 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
363 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
364 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
365
366 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
367 {
368 + if ((long)n < 0)
369 + return n;
370 +
371 if (access_ok(VERIFY_READ, from, n))
372 n = __copy_from_user(to, from, n);
373 else /* security hole - plug it */
374 @@ -412,6 +432,9 @@ static inline unsigned long __must_check
375
376 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
377 {
378 + if ((long)n < 0)
379 + return n;
380 +
381 if (access_ok(VERIFY_WRITE, to, n))
382 n = __copy_to_user(to, from, n);
383 return n;
384 diff -urNp linux-2.6.32.42/arch/arm/kernel/kgdb.c linux-2.6.32.42/arch/arm/kernel/kgdb.c
385 --- linux-2.6.32.42/arch/arm/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
386 +++ linux-2.6.32.42/arch/arm/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
387 @@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
388 * and we handle the normal undef case within the do_undefinstr
389 * handler.
390 */
391 -struct kgdb_arch arch_kgdb_ops = {
392 +const struct kgdb_arch arch_kgdb_ops = {
393 #ifndef __ARMEB__
394 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
395 #else /* ! __ARMEB__ */
396 diff -urNp linux-2.6.32.42/arch/arm/kernel/traps.c linux-2.6.32.42/arch/arm/kernel/traps.c
397 --- linux-2.6.32.42/arch/arm/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
398 +++ linux-2.6.32.42/arch/arm/kernel/traps.c 2011-06-13 21:31:18.000000000 -0400
399 @@ -247,6 +247,8 @@ static void __die(const char *str, int e
400
401 DEFINE_SPINLOCK(die_lock);
402
403 +extern void gr_handle_kernel_exploit(void);
404 +
405 /*
406 * This function is protected against re-entrancy.
407 */
408 @@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, str
409 if (panic_on_oops)
410 panic("Fatal exception");
411
412 + gr_handle_kernel_exploit();
413 +
414 do_exit(SIGSEGV);
415 }
416
417 diff -urNp linux-2.6.32.42/arch/arm/lib/copy_from_user.S linux-2.6.32.42/arch/arm/lib/copy_from_user.S
418 --- linux-2.6.32.42/arch/arm/lib/copy_from_user.S 2011-03-27 14:31:47.000000000 -0400
419 +++ linux-2.6.32.42/arch/arm/lib/copy_from_user.S 2011-06-29 20:48:38.000000000 -0400
420 @@ -16,7 +16,7 @@
421 /*
422 * Prototype:
423 *
424 - * size_t __copy_from_user(void *to, const void *from, size_t n)
425 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
426 *
427 * Purpose:
428 *
429 @@ -84,11 +84,11 @@
430
431 .text
432
433 -ENTRY(__copy_from_user)
434 +ENTRY(___copy_from_user)
435
436 #include "copy_template.S"
437
438 -ENDPROC(__copy_from_user)
439 +ENDPROC(___copy_from_user)
440
441 .section .fixup,"ax"
442 .align 0
443 diff -urNp linux-2.6.32.42/arch/arm/lib/copy_to_user.S linux-2.6.32.42/arch/arm/lib/copy_to_user.S
444 --- linux-2.6.32.42/arch/arm/lib/copy_to_user.S 2011-03-27 14:31:47.000000000 -0400
445 +++ linux-2.6.32.42/arch/arm/lib/copy_to_user.S 2011-06-29 20:46:49.000000000 -0400
446 @@ -16,7 +16,7 @@
447 /*
448 * Prototype:
449 *
450 - * size_t __copy_to_user(void *to, const void *from, size_t n)
451 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
452 *
453 * Purpose:
454 *
455 @@ -88,11 +88,11 @@
456 .text
457
458 ENTRY(__copy_to_user_std)
459 -WEAK(__copy_to_user)
460 +WEAK(___copy_to_user)
461
462 #include "copy_template.S"
463
464 -ENDPROC(__copy_to_user)
465 +ENDPROC(___copy_to_user)
466
467 .section .fixup,"ax"
468 .align 0
469 diff -urNp linux-2.6.32.42/arch/arm/lib/uaccess.S linux-2.6.32.42/arch/arm/lib/uaccess.S
470 --- linux-2.6.32.42/arch/arm/lib/uaccess.S 2011-03-27 14:31:47.000000000 -0400
471 +++ linux-2.6.32.42/arch/arm/lib/uaccess.S 2011-06-29 20:48:53.000000000 -0400
472 @@ -19,7 +19,7 @@
473
474 #define PAGE_SHIFT 12
475
476 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
477 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
478 * Purpose : copy a block to user memory from kernel memory
479 * Params : to - user memory
480 * : from - kernel memory
481 @@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fau
482 sub r2, r2, ip
483 b .Lc2u_dest_aligned
484
485 -ENTRY(__copy_to_user)
486 +ENTRY(___copy_to_user)
487 stmfd sp!, {r2, r4 - r7, lr}
488 cmp r2, #4
489 blt .Lc2u_not_enough
490 @@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fau
491 ldrgtb r3, [r1], #0
492 USER( strgtbt r3, [r0], #1) @ May fault
493 b .Lc2u_finished
494 -ENDPROC(__copy_to_user)
495 +ENDPROC(___copy_to_user)
496
497 .section .fixup,"ax"
498 .align 0
499 9001: ldmfd sp!, {r0, r4 - r7, pc}
500 .previous
501
502 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
503 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
504 * Purpose : copy a block from user memory to kernel memory
505 * Params : to - kernel memory
506 * : from - user memory
507 @@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fau
508 sub r2, r2, ip
509 b .Lcfu_dest_aligned
510
511 -ENTRY(__copy_from_user)
512 +ENTRY(___copy_from_user)
513 stmfd sp!, {r0, r2, r4 - r7, lr}
514 cmp r2, #4
515 blt .Lcfu_not_enough
516 @@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fau
517 USER( ldrgtbt r3, [r1], #1) @ May fault
518 strgtb r3, [r0], #1
519 b .Lcfu_finished
520 -ENDPROC(__copy_from_user)
521 +ENDPROC(___copy_from_user)
522
523 .section .fixup,"ax"
524 .align 0
525 diff -urNp linux-2.6.32.42/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.32.42/arch/arm/lib/uaccess_with_memcpy.c
526 --- linux-2.6.32.42/arch/arm/lib/uaccess_with_memcpy.c 2011-03-27 14:31:47.000000000 -0400
527 +++ linux-2.6.32.42/arch/arm/lib/uaccess_with_memcpy.c 2011-06-29 20:44:35.000000000 -0400
528 @@ -97,7 +97,7 @@ out:
529 }
530
531 unsigned long
532 -__copy_to_user(void __user *to, const void *from, unsigned long n)
533 +___copy_to_user(void __user *to, const void *from, unsigned long n)
534 {
535 /*
536 * This test is stubbed out of the main function above to keep
537 diff -urNp linux-2.6.32.42/arch/arm/mach-at91/pm.c linux-2.6.32.42/arch/arm/mach-at91/pm.c
538 --- linux-2.6.32.42/arch/arm/mach-at91/pm.c 2011-03-27 14:31:47.000000000 -0400
539 +++ linux-2.6.32.42/arch/arm/mach-at91/pm.c 2011-04-17 15:56:45.000000000 -0400
540 @@ -348,7 +348,7 @@ static void at91_pm_end(void)
541 }
542
543
544 -static struct platform_suspend_ops at91_pm_ops ={
545 +static const struct platform_suspend_ops at91_pm_ops ={
546 .valid = at91_pm_valid_state,
547 .begin = at91_pm_begin,
548 .enter = at91_pm_enter,
549 diff -urNp linux-2.6.32.42/arch/arm/mach-omap1/pm.c linux-2.6.32.42/arch/arm/mach-omap1/pm.c
550 --- linux-2.6.32.42/arch/arm/mach-omap1/pm.c 2011-03-27 14:31:47.000000000 -0400
551 +++ linux-2.6.32.42/arch/arm/mach-omap1/pm.c 2011-04-17 15:56:45.000000000 -0400
552 @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
553
554
555
556 -static struct platform_suspend_ops omap_pm_ops ={
557 +static const struct platform_suspend_ops omap_pm_ops ={
558 .prepare = omap_pm_prepare,
559 .enter = omap_pm_enter,
560 .finish = omap_pm_finish,
561 diff -urNp linux-2.6.32.42/arch/arm/mach-omap2/pm24xx.c linux-2.6.32.42/arch/arm/mach-omap2/pm24xx.c
562 --- linux-2.6.32.42/arch/arm/mach-omap2/pm24xx.c 2011-03-27 14:31:47.000000000 -0400
563 +++ linux-2.6.32.42/arch/arm/mach-omap2/pm24xx.c 2011-04-17 15:56:45.000000000 -0400
564 @@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
565 enable_hlt();
566 }
567
568 -static struct platform_suspend_ops omap_pm_ops = {
569 +static const struct platform_suspend_ops omap_pm_ops = {
570 .prepare = omap2_pm_prepare,
571 .enter = omap2_pm_enter,
572 .finish = omap2_pm_finish,
573 diff -urNp linux-2.6.32.42/arch/arm/mach-omap2/pm34xx.c linux-2.6.32.42/arch/arm/mach-omap2/pm34xx.c
574 --- linux-2.6.32.42/arch/arm/mach-omap2/pm34xx.c 2011-03-27 14:31:47.000000000 -0400
575 +++ linux-2.6.32.42/arch/arm/mach-omap2/pm34xx.c 2011-04-17 15:56:45.000000000 -0400
576 @@ -401,7 +401,7 @@ static void omap3_pm_end(void)
577 return;
578 }
579
580 -static struct platform_suspend_ops omap_pm_ops = {
581 +static const struct platform_suspend_ops omap_pm_ops = {
582 .begin = omap3_pm_begin,
583 .end = omap3_pm_end,
584 .prepare = omap3_pm_prepare,
585 diff -urNp linux-2.6.32.42/arch/arm/mach-pnx4008/pm.c linux-2.6.32.42/arch/arm/mach-pnx4008/pm.c
586 --- linux-2.6.32.42/arch/arm/mach-pnx4008/pm.c 2011-03-27 14:31:47.000000000 -0400
587 +++ linux-2.6.32.42/arch/arm/mach-pnx4008/pm.c 2011-04-17 15:56:45.000000000 -0400
588 @@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_stat
589 (state == PM_SUSPEND_MEM);
590 }
591
592 -static struct platform_suspend_ops pnx4008_pm_ops = {
593 +static const struct platform_suspend_ops pnx4008_pm_ops = {
594 .enter = pnx4008_pm_enter,
595 .valid = pnx4008_pm_valid,
596 };
597 diff -urNp linux-2.6.32.42/arch/arm/mach-pxa/pm.c linux-2.6.32.42/arch/arm/mach-pxa/pm.c
598 --- linux-2.6.32.42/arch/arm/mach-pxa/pm.c 2011-03-27 14:31:47.000000000 -0400
599 +++ linux-2.6.32.42/arch/arm/mach-pxa/pm.c 2011-04-17 15:56:45.000000000 -0400
600 @@ -95,7 +95,7 @@ void pxa_pm_finish(void)
601 pxa_cpu_pm_fns->finish();
602 }
603
604 -static struct platform_suspend_ops pxa_pm_ops = {
605 +static const struct platform_suspend_ops pxa_pm_ops = {
606 .valid = pxa_pm_valid,
607 .enter = pxa_pm_enter,
608 .prepare = pxa_pm_prepare,
609 diff -urNp linux-2.6.32.42/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.32.42/arch/arm/mach-pxa/sharpsl_pm.c
610 --- linux-2.6.32.42/arch/arm/mach-pxa/sharpsl_pm.c 2011-03-27 14:31:47.000000000 -0400
611 +++ linux-2.6.32.42/arch/arm/mach-pxa/sharpsl_pm.c 2011-04-17 15:56:45.000000000 -0400
612 @@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
613 }
614
615 #ifdef CONFIG_PM
616 -static struct platform_suspend_ops sharpsl_pm_ops = {
617 +static const struct platform_suspend_ops sharpsl_pm_ops = {
618 .prepare = pxa_pm_prepare,
619 .finish = pxa_pm_finish,
620 .enter = corgi_pxa_pm_enter,
621 diff -urNp linux-2.6.32.42/arch/arm/mach-sa1100/pm.c linux-2.6.32.42/arch/arm/mach-sa1100/pm.c
622 --- linux-2.6.32.42/arch/arm/mach-sa1100/pm.c 2011-03-27 14:31:47.000000000 -0400
623 +++ linux-2.6.32.42/arch/arm/mach-sa1100/pm.c 2011-04-17 15:56:45.000000000 -0400
624 @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
625 return virt_to_phys(sp);
626 }
627
628 -static struct platform_suspend_ops sa11x0_pm_ops = {
629 +static const struct platform_suspend_ops sa11x0_pm_ops = {
630 .enter = sa11x0_pm_enter,
631 .valid = suspend_valid_only_mem,
632 };
633 diff -urNp linux-2.6.32.42/arch/arm/mm/fault.c linux-2.6.32.42/arch/arm/mm/fault.c
634 --- linux-2.6.32.42/arch/arm/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
635 +++ linux-2.6.32.42/arch/arm/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
636 @@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk,
637 }
638 #endif
639
640 +#ifdef CONFIG_PAX_PAGEEXEC
641 + if (fsr & FSR_LNX_PF) {
642 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
643 + do_group_exit(SIGKILL);
644 + }
645 +#endif
646 +
647 tsk->thread.address = addr;
648 tsk->thread.error_code = fsr;
649 tsk->thread.trap_no = 14;
650 @@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsign
651 }
652 #endif /* CONFIG_MMU */
653
654 +#ifdef CONFIG_PAX_PAGEEXEC
655 +void pax_report_insns(void *pc, void *sp)
656 +{
657 + long i;
658 +
659 + printk(KERN_ERR "PAX: bytes at PC: ");
660 + for (i = 0; i < 20; i++) {
661 + unsigned char c;
662 + if (get_user(c, (__force unsigned char __user *)pc+i))
663 + printk(KERN_CONT "?? ");
664 + else
665 + printk(KERN_CONT "%02x ", c);
666 + }
667 + printk("\n");
668 +
669 + printk(KERN_ERR "PAX: bytes at SP-4: ");
670 + for (i = -1; i < 20; i++) {
671 + unsigned long c;
672 + if (get_user(c, (__force unsigned long __user *)sp+i))
673 + printk(KERN_CONT "???????? ");
674 + else
675 + printk(KERN_CONT "%08lx ", c);
676 + }
677 + printk("\n");
678 +}
679 +#endif
680 +
681 /*
682 * First Level Translation Fault Handler
683 *
684 diff -urNp linux-2.6.32.42/arch/arm/mm/mmap.c linux-2.6.32.42/arch/arm/mm/mmap.c
685 --- linux-2.6.32.42/arch/arm/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
686 +++ linux-2.6.32.42/arch/arm/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
687 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
688 if (len > TASK_SIZE)
689 return -ENOMEM;
690
691 +#ifdef CONFIG_PAX_RANDMMAP
692 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
693 +#endif
694 +
695 if (addr) {
696 if (do_align)
697 addr = COLOUR_ALIGN(addr, pgoff);
698 @@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
699 addr = PAGE_ALIGN(addr);
700
701 vma = find_vma(mm, addr);
702 - if (TASK_SIZE - len >= addr &&
703 - (!vma || addr + len <= vma->vm_start))
704 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
705 return addr;
706 }
707 if (len > mm->cached_hole_size) {
708 - start_addr = addr = mm->free_area_cache;
709 + start_addr = addr = mm->free_area_cache;
710 } else {
711 - start_addr = addr = TASK_UNMAPPED_BASE;
712 - mm->cached_hole_size = 0;
713 + start_addr = addr = mm->mmap_base;
714 + mm->cached_hole_size = 0;
715 }
716
717 full_search:
718 @@ -94,14 +97,14 @@ full_search:
719 * Start a new search - just in case we missed
720 * some holes.
721 */
722 - if (start_addr != TASK_UNMAPPED_BASE) {
723 - start_addr = addr = TASK_UNMAPPED_BASE;
724 + if (start_addr != mm->mmap_base) {
725 + start_addr = addr = mm->mmap_base;
726 mm->cached_hole_size = 0;
727 goto full_search;
728 }
729 return -ENOMEM;
730 }
731 - if (!vma || addr + len <= vma->vm_start) {
732 + if (check_heap_stack_gap(vma, addr, len)) {
733 /*
734 * Remember the place where we stopped the search:
735 */
736 diff -urNp linux-2.6.32.42/arch/arm/plat-s3c/pm.c linux-2.6.32.42/arch/arm/plat-s3c/pm.c
737 --- linux-2.6.32.42/arch/arm/plat-s3c/pm.c 2011-03-27 14:31:47.000000000 -0400
738 +++ linux-2.6.32.42/arch/arm/plat-s3c/pm.c 2011-04-17 15:56:45.000000000 -0400
739 @@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
740 s3c_pm_check_cleanup();
741 }
742
743 -static struct platform_suspend_ops s3c_pm_ops = {
744 +static const struct platform_suspend_ops s3c_pm_ops = {
745 .enter = s3c_pm_enter,
746 .prepare = s3c_pm_prepare,
747 .finish = s3c_pm_finish,
748 diff -urNp linux-2.6.32.42/arch/avr32/include/asm/elf.h linux-2.6.32.42/arch/avr32/include/asm/elf.h
749 --- linux-2.6.32.42/arch/avr32/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
750 +++ linux-2.6.32.42/arch/avr32/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
751 @@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
752 the loader. We need to make sure that it is out of the way of the program
753 that it will "exec", and that there is sufficient room for the brk. */
754
755 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
756 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
757
758 +#ifdef CONFIG_PAX_ASLR
759 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
760 +
761 +#define PAX_DELTA_MMAP_LEN 15
762 +#define PAX_DELTA_STACK_LEN 15
763 +#endif
764
765 /* This yields a mask that user programs can use to figure out what
766 instruction set this CPU supports. This could be done in user space,
767 diff -urNp linux-2.6.32.42/arch/avr32/include/asm/kmap_types.h linux-2.6.32.42/arch/avr32/include/asm/kmap_types.h
768 --- linux-2.6.32.42/arch/avr32/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
769 +++ linux-2.6.32.42/arch/avr32/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
770 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
771 D(11) KM_IRQ1,
772 D(12) KM_SOFTIRQ0,
773 D(13) KM_SOFTIRQ1,
774 -D(14) KM_TYPE_NR
775 +D(14) KM_CLEARPAGE,
776 +D(15) KM_TYPE_NR
777 };
778
779 #undef D
780 diff -urNp linux-2.6.32.42/arch/avr32/mach-at32ap/pm.c linux-2.6.32.42/arch/avr32/mach-at32ap/pm.c
781 --- linux-2.6.32.42/arch/avr32/mach-at32ap/pm.c 2011-03-27 14:31:47.000000000 -0400
782 +++ linux-2.6.32.42/arch/avr32/mach-at32ap/pm.c 2011-04-17 15:56:45.000000000 -0400
783 @@ -176,7 +176,7 @@ out:
784 return 0;
785 }
786
787 -static struct platform_suspend_ops avr32_pm_ops = {
788 +static const struct platform_suspend_ops avr32_pm_ops = {
789 .valid = avr32_pm_valid_state,
790 .enter = avr32_pm_enter,
791 };
792 diff -urNp linux-2.6.32.42/arch/avr32/mm/fault.c linux-2.6.32.42/arch/avr32/mm/fault.c
793 --- linux-2.6.32.42/arch/avr32/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
794 +++ linux-2.6.32.42/arch/avr32/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
795 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
796
797 int exception_trace = 1;
798
799 +#ifdef CONFIG_PAX_PAGEEXEC
800 +void pax_report_insns(void *pc, void *sp)
801 +{
802 + unsigned long i;
803 +
804 + printk(KERN_ERR "PAX: bytes at PC: ");
805 + for (i = 0; i < 20; i++) {
806 + unsigned char c;
807 + if (get_user(c, (unsigned char *)pc+i))
808 + printk(KERN_CONT "???????? ");
809 + else
810 + printk(KERN_CONT "%02x ", c);
811 + }
812 + printk("\n");
813 +}
814 +#endif
815 +
816 /*
817 * This routine handles page faults. It determines the address and the
818 * problem, and then passes it off to one of the appropriate routines.
819 @@ -157,6 +174,16 @@ bad_area:
820 up_read(&mm->mmap_sem);
821
822 if (user_mode(regs)) {
823 +
824 +#ifdef CONFIG_PAX_PAGEEXEC
825 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
826 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
827 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
828 + do_group_exit(SIGKILL);
829 + }
830 + }
831 +#endif
832 +
833 if (exception_trace && printk_ratelimit())
834 printk("%s%s[%d]: segfault at %08lx pc %08lx "
835 "sp %08lx ecr %lu\n",
836 diff -urNp linux-2.6.32.42/arch/blackfin/kernel/kgdb.c linux-2.6.32.42/arch/blackfin/kernel/kgdb.c
837 --- linux-2.6.32.42/arch/blackfin/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
838 +++ linux-2.6.32.42/arch/blackfin/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
839 @@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vecto
840 return -1; /* this means that we do not want to exit from the handler */
841 }
842
843 -struct kgdb_arch arch_kgdb_ops = {
844 +const struct kgdb_arch arch_kgdb_ops = {
845 .gdb_bpt_instr = {0xa1},
846 #ifdef CONFIG_SMP
847 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
848 diff -urNp linux-2.6.32.42/arch/blackfin/mach-common/pm.c linux-2.6.32.42/arch/blackfin/mach-common/pm.c
849 --- linux-2.6.32.42/arch/blackfin/mach-common/pm.c 2011-03-27 14:31:47.000000000 -0400
850 +++ linux-2.6.32.42/arch/blackfin/mach-common/pm.c 2011-04-17 15:56:45.000000000 -0400
851 @@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t
852 return 0;
853 }
854
855 -struct platform_suspend_ops bfin_pm_ops = {
856 +const struct platform_suspend_ops bfin_pm_ops = {
857 .enter = bfin_pm_enter,
858 .valid = bfin_pm_valid,
859 };
860 diff -urNp linux-2.6.32.42/arch/frv/include/asm/kmap_types.h linux-2.6.32.42/arch/frv/include/asm/kmap_types.h
861 --- linux-2.6.32.42/arch/frv/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
862 +++ linux-2.6.32.42/arch/frv/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
863 @@ -23,6 +23,7 @@ enum km_type {
864 KM_IRQ1,
865 KM_SOFTIRQ0,
866 KM_SOFTIRQ1,
867 + KM_CLEARPAGE,
868 KM_TYPE_NR
869 };
870
871 diff -urNp linux-2.6.32.42/arch/frv/mm/elf-fdpic.c linux-2.6.32.42/arch/frv/mm/elf-fdpic.c
872 --- linux-2.6.32.42/arch/frv/mm/elf-fdpic.c 2011-03-27 14:31:47.000000000 -0400
873 +++ linux-2.6.32.42/arch/frv/mm/elf-fdpic.c 2011-04-17 15:56:45.000000000 -0400
874 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
875 if (addr) {
876 addr = PAGE_ALIGN(addr);
877 vma = find_vma(current->mm, addr);
878 - if (TASK_SIZE - len >= addr &&
879 - (!vma || addr + len <= vma->vm_start))
880 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
881 goto success;
882 }
883
884 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
885 for (; vma; vma = vma->vm_next) {
886 if (addr > limit)
887 break;
888 - if (addr + len <= vma->vm_start)
889 + if (check_heap_stack_gap(vma, addr, len))
890 goto success;
891 addr = vma->vm_end;
892 }
893 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
894 for (; vma; vma = vma->vm_next) {
895 if (addr > limit)
896 break;
897 - if (addr + len <= vma->vm_start)
898 + if (check_heap_stack_gap(vma, addr, len))
899 goto success;
900 addr = vma->vm_end;
901 }
902 diff -urNp linux-2.6.32.42/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.42/arch/ia64/hp/common/hwsw_iommu.c
903 --- linux-2.6.32.42/arch/ia64/hp/common/hwsw_iommu.c 2011-03-27 14:31:47.000000000 -0400
904 +++ linux-2.6.32.42/arch/ia64/hp/common/hwsw_iommu.c 2011-04-17 15:56:45.000000000 -0400
905 @@ -17,7 +17,7 @@
906 #include <linux/swiotlb.h>
907 #include <asm/machvec.h>
908
909 -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
910 +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
911
912 /* swiotlb declarations & definitions: */
913 extern int swiotlb_late_init_with_default_size (size_t size);
914 @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
915 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
916 }
917
918 -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
919 +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
920 {
921 if (use_swiotlb(dev))
922 return &swiotlb_dma_ops;
923 diff -urNp linux-2.6.32.42/arch/ia64/hp/common/sba_iommu.c linux-2.6.32.42/arch/ia64/hp/common/sba_iommu.c
924 --- linux-2.6.32.42/arch/ia64/hp/common/sba_iommu.c 2011-03-27 14:31:47.000000000 -0400
925 +++ linux-2.6.32.42/arch/ia64/hp/common/sba_iommu.c 2011-04-17 15:56:45.000000000 -0400
926 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
927 },
928 };
929
930 -extern struct dma_map_ops swiotlb_dma_ops;
931 +extern const struct dma_map_ops swiotlb_dma_ops;
932
933 static int __init
934 sba_init(void)
935 @@ -2211,7 +2211,7 @@ sba_page_override(char *str)
936
937 __setup("sbapagesize=",sba_page_override);
938
939 -struct dma_map_ops sba_dma_ops = {
940 +const struct dma_map_ops sba_dma_ops = {
941 .alloc_coherent = sba_alloc_coherent,
942 .free_coherent = sba_free_coherent,
943 .map_page = sba_map_page,
944 diff -urNp linux-2.6.32.42/arch/ia64/ia32/binfmt_elf32.c linux-2.6.32.42/arch/ia64/ia32/binfmt_elf32.c
945 --- linux-2.6.32.42/arch/ia64/ia32/binfmt_elf32.c 2011-03-27 14:31:47.000000000 -0400
946 +++ linux-2.6.32.42/arch/ia64/ia32/binfmt_elf32.c 2011-04-17 15:56:45.000000000 -0400
947 @@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
948
949 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
950
951 +#ifdef CONFIG_PAX_ASLR
952 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
953 +
954 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
955 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
956 +#endif
957 +
958 /* Ugly but avoids duplication */
959 #include "../../../fs/binfmt_elf.c"
960
961 diff -urNp linux-2.6.32.42/arch/ia64/ia32/ia32priv.h linux-2.6.32.42/arch/ia64/ia32/ia32priv.h
962 --- linux-2.6.32.42/arch/ia64/ia32/ia32priv.h 2011-03-27 14:31:47.000000000 -0400
963 +++ linux-2.6.32.42/arch/ia64/ia32/ia32priv.h 2011-04-17 15:56:45.000000000 -0400
964 @@ -296,7 +296,14 @@ typedef struct compat_siginfo {
965 #define ELF_DATA ELFDATA2LSB
966 #define ELF_ARCH EM_386
967
968 -#define IA32_STACK_TOP IA32_PAGE_OFFSET
969 +#ifdef CONFIG_PAX_RANDUSTACK
970 +#define __IA32_DELTA_STACK (current->mm->delta_stack)
971 +#else
972 +#define __IA32_DELTA_STACK 0UL
973 +#endif
974 +
975 +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
976 +
977 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
978 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
979
980 diff -urNp linux-2.6.32.42/arch/ia64/include/asm/dma-mapping.h linux-2.6.32.42/arch/ia64/include/asm/dma-mapping.h
981 --- linux-2.6.32.42/arch/ia64/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
982 +++ linux-2.6.32.42/arch/ia64/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
983 @@ -12,7 +12,7 @@
984
985 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
986
987 -extern struct dma_map_ops *dma_ops;
988 +extern const struct dma_map_ops *dma_ops;
989 extern struct ia64_machine_vector ia64_mv;
990 extern void set_iommu_machvec(void);
991
992 @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
993 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
994 dma_addr_t *daddr, gfp_t gfp)
995 {
996 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
997 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
998 void *caddr;
999
1000 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
1001 @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
1002 static inline void dma_free_coherent(struct device *dev, size_t size,
1003 void *caddr, dma_addr_t daddr)
1004 {
1005 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1006 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1007 debug_dma_free_coherent(dev, size, caddr, daddr);
1008 ops->free_coherent(dev, size, caddr, daddr);
1009 }
1010 @@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
1011
1012 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
1013 {
1014 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1015 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1016 return ops->mapping_error(dev, daddr);
1017 }
1018
1019 static inline int dma_supported(struct device *dev, u64 mask)
1020 {
1021 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1022 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1023 return ops->dma_supported(dev, mask);
1024 }
1025
1026 diff -urNp linux-2.6.32.42/arch/ia64/include/asm/elf.h linux-2.6.32.42/arch/ia64/include/asm/elf.h
1027 --- linux-2.6.32.42/arch/ia64/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1028 +++ linux-2.6.32.42/arch/ia64/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1029 @@ -43,6 +43,13 @@
1030 */
1031 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1032
1033 +#ifdef CONFIG_PAX_ASLR
1034 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1035 +
1036 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1037 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1038 +#endif
1039 +
1040 #define PT_IA_64_UNWIND 0x70000001
1041
1042 /* IA-64 relocations: */
1043 diff -urNp linux-2.6.32.42/arch/ia64/include/asm/machvec.h linux-2.6.32.42/arch/ia64/include/asm/machvec.h
1044 --- linux-2.6.32.42/arch/ia64/include/asm/machvec.h 2011-03-27 14:31:47.000000000 -0400
1045 +++ linux-2.6.32.42/arch/ia64/include/asm/machvec.h 2011-04-17 15:56:45.000000000 -0400
1046 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
1047 /* DMA-mapping interface: */
1048 typedef void ia64_mv_dma_init (void);
1049 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1050 -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1051 +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1052
1053 /*
1054 * WARNING: The legacy I/O space is _architected_. Platforms are
1055 @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
1056 # endif /* CONFIG_IA64_GENERIC */
1057
1058 extern void swiotlb_dma_init(void);
1059 -extern struct dma_map_ops *dma_get_ops(struct device *);
1060 +extern const struct dma_map_ops *dma_get_ops(struct device *);
1061
1062 /*
1063 * Define default versions so we can extend machvec for new platforms without having
1064 diff -urNp linux-2.6.32.42/arch/ia64/include/asm/pgtable.h linux-2.6.32.42/arch/ia64/include/asm/pgtable.h
1065 --- linux-2.6.32.42/arch/ia64/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1066 +++ linux-2.6.32.42/arch/ia64/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1067 @@ -12,7 +12,7 @@
1068 * David Mosberger-Tang <davidm@hpl.hp.com>
1069 */
1070
1071 -
1072 +#include <linux/const.h>
1073 #include <asm/mman.h>
1074 #include <asm/page.h>
1075 #include <asm/processor.h>
1076 @@ -143,6 +143,17 @@
1077 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1078 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1079 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1080 +
1081 +#ifdef CONFIG_PAX_PAGEEXEC
1082 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1083 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1084 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1085 +#else
1086 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1087 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1088 +# define PAGE_COPY_NOEXEC PAGE_COPY
1089 +#endif
1090 +
1091 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1092 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1093 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1094 diff -urNp linux-2.6.32.42/arch/ia64/include/asm/spinlock.h linux-2.6.32.42/arch/ia64/include/asm/spinlock.h
1095 --- linux-2.6.32.42/arch/ia64/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
1096 +++ linux-2.6.32.42/arch/ia64/include/asm/spinlock.h 2011-04-17 15:56:45.000000000 -0400
1097 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
1098 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1099
1100 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1101 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1102 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1103 }
1104
1105 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1106 diff -urNp linux-2.6.32.42/arch/ia64/include/asm/uaccess.h linux-2.6.32.42/arch/ia64/include/asm/uaccess.h
1107 --- linux-2.6.32.42/arch/ia64/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
1108 +++ linux-2.6.32.42/arch/ia64/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
1109 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
1110 const void *__cu_from = (from); \
1111 long __cu_len = (n); \
1112 \
1113 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1114 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1115 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1116 __cu_len; \
1117 })
1118 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
1119 long __cu_len = (n); \
1120 \
1121 __chk_user_ptr(__cu_from); \
1122 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1123 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1124 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1125 __cu_len; \
1126 })
1127 diff -urNp linux-2.6.32.42/arch/ia64/kernel/dma-mapping.c linux-2.6.32.42/arch/ia64/kernel/dma-mapping.c
1128 --- linux-2.6.32.42/arch/ia64/kernel/dma-mapping.c 2011-03-27 14:31:47.000000000 -0400
1129 +++ linux-2.6.32.42/arch/ia64/kernel/dma-mapping.c 2011-04-17 15:56:45.000000000 -0400
1130 @@ -3,7 +3,7 @@
1131 /* Set this to 1 if there is a HW IOMMU in the system */
1132 int iommu_detected __read_mostly;
1133
1134 -struct dma_map_ops *dma_ops;
1135 +const struct dma_map_ops *dma_ops;
1136 EXPORT_SYMBOL(dma_ops);
1137
1138 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1139 @@ -16,7 +16,7 @@ static int __init dma_init(void)
1140 }
1141 fs_initcall(dma_init);
1142
1143 -struct dma_map_ops *dma_get_ops(struct device *dev)
1144 +const struct dma_map_ops *dma_get_ops(struct device *dev)
1145 {
1146 return dma_ops;
1147 }
1148 diff -urNp linux-2.6.32.42/arch/ia64/kernel/module.c linux-2.6.32.42/arch/ia64/kernel/module.c
1149 --- linux-2.6.32.42/arch/ia64/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1150 +++ linux-2.6.32.42/arch/ia64/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1151 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1152 void
1153 module_free (struct module *mod, void *module_region)
1154 {
1155 - if (mod && mod->arch.init_unw_table &&
1156 - module_region == mod->module_init) {
1157 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1158 unw_remove_unwind_table(mod->arch.init_unw_table);
1159 mod->arch.init_unw_table = NULL;
1160 }
1161 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
1162 }
1163
1164 static inline int
1165 +in_init_rx (const struct module *mod, uint64_t addr)
1166 +{
1167 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1168 +}
1169 +
1170 +static inline int
1171 +in_init_rw (const struct module *mod, uint64_t addr)
1172 +{
1173 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1174 +}
1175 +
1176 +static inline int
1177 in_init (const struct module *mod, uint64_t addr)
1178 {
1179 - return addr - (uint64_t) mod->module_init < mod->init_size;
1180 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1181 +}
1182 +
1183 +static inline int
1184 +in_core_rx (const struct module *mod, uint64_t addr)
1185 +{
1186 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1187 +}
1188 +
1189 +static inline int
1190 +in_core_rw (const struct module *mod, uint64_t addr)
1191 +{
1192 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1193 }
1194
1195 static inline int
1196 in_core (const struct module *mod, uint64_t addr)
1197 {
1198 - return addr - (uint64_t) mod->module_core < mod->core_size;
1199 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1200 }
1201
1202 static inline int
1203 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
1204 break;
1205
1206 case RV_BDREL:
1207 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1208 + if (in_init_rx(mod, val))
1209 + val -= (uint64_t) mod->module_init_rx;
1210 + else if (in_init_rw(mod, val))
1211 + val -= (uint64_t) mod->module_init_rw;
1212 + else if (in_core_rx(mod, val))
1213 + val -= (uint64_t) mod->module_core_rx;
1214 + else if (in_core_rw(mod, val))
1215 + val -= (uint64_t) mod->module_core_rw;
1216 break;
1217
1218 case RV_LTV:
1219 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
1220 * addresses have been selected...
1221 */
1222 uint64_t gp;
1223 - if (mod->core_size > MAX_LTOFF)
1224 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1225 /*
1226 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1227 * at the end of the module.
1228 */
1229 - gp = mod->core_size - MAX_LTOFF / 2;
1230 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1231 else
1232 - gp = mod->core_size / 2;
1233 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1234 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1235 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1236 mod->arch.gp = gp;
1237 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1238 }
1239 diff -urNp linux-2.6.32.42/arch/ia64/kernel/pci-dma.c linux-2.6.32.42/arch/ia64/kernel/pci-dma.c
1240 --- linux-2.6.32.42/arch/ia64/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
1241 +++ linux-2.6.32.42/arch/ia64/kernel/pci-dma.c 2011-04-17 15:56:45.000000000 -0400
1242 @@ -43,7 +43,7 @@ struct device fallback_dev = {
1243 .dma_mask = &fallback_dev.coherent_dma_mask,
1244 };
1245
1246 -extern struct dma_map_ops intel_dma_ops;
1247 +extern const struct dma_map_ops intel_dma_ops;
1248
1249 static int __init pci_iommu_init(void)
1250 {
1251 @@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *d
1252 }
1253 EXPORT_SYMBOL(iommu_dma_supported);
1254
1255 +extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1256 +extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1257 +extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1258 +extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1259 +extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1260 +extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1261 +extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1262 +
1263 +static const struct dma_map_ops intel_iommu_dma_ops = {
1264 + /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1265 + .alloc_coherent = intel_alloc_coherent,
1266 + .free_coherent = intel_free_coherent,
1267 + .map_sg = intel_map_sg,
1268 + .unmap_sg = intel_unmap_sg,
1269 + .map_page = intel_map_page,
1270 + .unmap_page = intel_unmap_page,
1271 + .mapping_error = intel_mapping_error,
1272 +
1273 + .sync_single_for_cpu = machvec_dma_sync_single,
1274 + .sync_sg_for_cpu = machvec_dma_sync_sg,
1275 + .sync_single_for_device = machvec_dma_sync_single,
1276 + .sync_sg_for_device = machvec_dma_sync_sg,
1277 + .dma_supported = iommu_dma_supported,
1278 +};
1279 +
1280 void __init pci_iommu_alloc(void)
1281 {
1282 - dma_ops = &intel_dma_ops;
1283 -
1284 - dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1285 - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1286 - dma_ops->sync_single_for_device = machvec_dma_sync_single;
1287 - dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1288 - dma_ops->dma_supported = iommu_dma_supported;
1289 + dma_ops = &intel_iommu_dma_ops;
1290
1291 /*
1292 * The order of these functions is important for
1293 diff -urNp linux-2.6.32.42/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.42/arch/ia64/kernel/pci-swiotlb.c
1294 --- linux-2.6.32.42/arch/ia64/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
1295 +++ linux-2.6.32.42/arch/ia64/kernel/pci-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
1296 @@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent
1297 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1298 }
1299
1300 -struct dma_map_ops swiotlb_dma_ops = {
1301 +const struct dma_map_ops swiotlb_dma_ops = {
1302 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1303 .free_coherent = swiotlb_free_coherent,
1304 .map_page = swiotlb_map_page,
1305 diff -urNp linux-2.6.32.42/arch/ia64/kernel/sys_ia64.c linux-2.6.32.42/arch/ia64/kernel/sys_ia64.c
1306 --- linux-2.6.32.42/arch/ia64/kernel/sys_ia64.c 2011-03-27 14:31:47.000000000 -0400
1307 +++ linux-2.6.32.42/arch/ia64/kernel/sys_ia64.c 2011-04-17 15:56:45.000000000 -0400
1308 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1309 if (REGION_NUMBER(addr) == RGN_HPAGE)
1310 addr = 0;
1311 #endif
1312 +
1313 +#ifdef CONFIG_PAX_RANDMMAP
1314 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1315 + addr = mm->free_area_cache;
1316 + else
1317 +#endif
1318 +
1319 if (!addr)
1320 addr = mm->free_area_cache;
1321
1322 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1323 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1324 /* At this point: (!vma || addr < vma->vm_end). */
1325 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1326 - if (start_addr != TASK_UNMAPPED_BASE) {
1327 + if (start_addr != mm->mmap_base) {
1328 /* Start a new search --- just in case we missed some holes. */
1329 - addr = TASK_UNMAPPED_BASE;
1330 + addr = mm->mmap_base;
1331 goto full_search;
1332 }
1333 return -ENOMEM;
1334 }
1335 - if (!vma || addr + len <= vma->vm_start) {
1336 + if (check_heap_stack_gap(vma, addr, len)) {
1337 /* Remember the address where we stopped this search: */
1338 mm->free_area_cache = addr + len;
1339 return addr;
1340 diff -urNp linux-2.6.32.42/arch/ia64/kernel/topology.c linux-2.6.32.42/arch/ia64/kernel/topology.c
1341 --- linux-2.6.32.42/arch/ia64/kernel/topology.c 2011-03-27 14:31:47.000000000 -0400
1342 +++ linux-2.6.32.42/arch/ia64/kernel/topology.c 2011-04-17 15:56:45.000000000 -0400
1343 @@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject
1344 return ret;
1345 }
1346
1347 -static struct sysfs_ops cache_sysfs_ops = {
1348 +static const struct sysfs_ops cache_sysfs_ops = {
1349 .show = cache_show
1350 };
1351
1352 diff -urNp linux-2.6.32.42/arch/ia64/kernel/vmlinux.lds.S linux-2.6.32.42/arch/ia64/kernel/vmlinux.lds.S
1353 --- linux-2.6.32.42/arch/ia64/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
1354 +++ linux-2.6.32.42/arch/ia64/kernel/vmlinux.lds.S 2011-04-17 15:56:45.000000000 -0400
1355 @@ -190,7 +190,7 @@ SECTIONS
1356 /* Per-cpu data: */
1357 . = ALIGN(PERCPU_PAGE_SIZE);
1358 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1359 - __phys_per_cpu_start = __per_cpu_load;
1360 + __phys_per_cpu_start = per_cpu_load;
1361 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1362 * into percpu page size
1363 */
1364 diff -urNp linux-2.6.32.42/arch/ia64/mm/fault.c linux-2.6.32.42/arch/ia64/mm/fault.c
1365 --- linux-2.6.32.42/arch/ia64/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1366 +++ linux-2.6.32.42/arch/ia64/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1367 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1368 return pte_present(pte);
1369 }
1370
1371 +#ifdef CONFIG_PAX_PAGEEXEC
1372 +void pax_report_insns(void *pc, void *sp)
1373 +{
1374 + unsigned long i;
1375 +
1376 + printk(KERN_ERR "PAX: bytes at PC: ");
1377 + for (i = 0; i < 8; i++) {
1378 + unsigned int c;
1379 + if (get_user(c, (unsigned int *)pc+i))
1380 + printk(KERN_CONT "???????? ");
1381 + else
1382 + printk(KERN_CONT "%08x ", c);
1383 + }
1384 + printk("\n");
1385 +}
1386 +#endif
1387 +
1388 void __kprobes
1389 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1390 {
1391 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1392 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1393 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1394
1395 - if ((vma->vm_flags & mask) != mask)
1396 + if ((vma->vm_flags & mask) != mask) {
1397 +
1398 +#ifdef CONFIG_PAX_PAGEEXEC
1399 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1400 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1401 + goto bad_area;
1402 +
1403 + up_read(&mm->mmap_sem);
1404 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1405 + do_group_exit(SIGKILL);
1406 + }
1407 +#endif
1408 +
1409 goto bad_area;
1410
1411 + }
1412 +
1413 survive:
1414 /*
1415 * If for any reason at all we couldn't handle the fault, make
1416 diff -urNp linux-2.6.32.42/arch/ia64/mm/hugetlbpage.c linux-2.6.32.42/arch/ia64/mm/hugetlbpage.c
1417 --- linux-2.6.32.42/arch/ia64/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
1418 +++ linux-2.6.32.42/arch/ia64/mm/hugetlbpage.c 2011-04-17 15:56:45.000000000 -0400
1419 @@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
1420 /* At this point: (!vmm || addr < vmm->vm_end). */
1421 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1422 return -ENOMEM;
1423 - if (!vmm || (addr + len) <= vmm->vm_start)
1424 + if (check_heap_stack_gap(vmm, addr, len))
1425 return addr;
1426 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1427 }
1428 diff -urNp linux-2.6.32.42/arch/ia64/mm/init.c linux-2.6.32.42/arch/ia64/mm/init.c
1429 --- linux-2.6.32.42/arch/ia64/mm/init.c 2011-03-27 14:31:47.000000000 -0400
1430 +++ linux-2.6.32.42/arch/ia64/mm/init.c 2011-04-17 15:56:45.000000000 -0400
1431 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1432 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1433 vma->vm_end = vma->vm_start + PAGE_SIZE;
1434 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1435 +
1436 +#ifdef CONFIG_PAX_PAGEEXEC
1437 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1438 + vma->vm_flags &= ~VM_EXEC;
1439 +
1440 +#ifdef CONFIG_PAX_MPROTECT
1441 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1442 + vma->vm_flags &= ~VM_MAYEXEC;
1443 +#endif
1444 +
1445 + }
1446 +#endif
1447 +
1448 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1449 down_write(&current->mm->mmap_sem);
1450 if (insert_vm_struct(current->mm, vma)) {
1451 diff -urNp linux-2.6.32.42/arch/ia64/sn/pci/pci_dma.c linux-2.6.32.42/arch/ia64/sn/pci/pci_dma.c
1452 --- linux-2.6.32.42/arch/ia64/sn/pci/pci_dma.c 2011-03-27 14:31:47.000000000 -0400
1453 +++ linux-2.6.32.42/arch/ia64/sn/pci/pci_dma.c 2011-04-17 15:56:45.000000000 -0400
1454 @@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *
1455 return ret;
1456 }
1457
1458 -static struct dma_map_ops sn_dma_ops = {
1459 +static const struct dma_map_ops sn_dma_ops = {
1460 .alloc_coherent = sn_dma_alloc_coherent,
1461 .free_coherent = sn_dma_free_coherent,
1462 .map_page = sn_dma_map_page,
1463 diff -urNp linux-2.6.32.42/arch/m32r/lib/usercopy.c linux-2.6.32.42/arch/m32r/lib/usercopy.c
1464 --- linux-2.6.32.42/arch/m32r/lib/usercopy.c 2011-03-27 14:31:47.000000000 -0400
1465 +++ linux-2.6.32.42/arch/m32r/lib/usercopy.c 2011-04-17 15:56:45.000000000 -0400
1466 @@ -14,6 +14,9 @@
1467 unsigned long
1468 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1469 {
1470 + if ((long)n < 0)
1471 + return n;
1472 +
1473 prefetch(from);
1474 if (access_ok(VERIFY_WRITE, to, n))
1475 __copy_user(to,from,n);
1476 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1477 unsigned long
1478 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1479 {
1480 + if ((long)n < 0)
1481 + return n;
1482 +
1483 prefetchw(to);
1484 if (access_ok(VERIFY_READ, from, n))
1485 __copy_user_zeroing(to,from,n);
1486 diff -urNp linux-2.6.32.42/arch/mips/alchemy/devboards/pm.c linux-2.6.32.42/arch/mips/alchemy/devboards/pm.c
1487 --- linux-2.6.32.42/arch/mips/alchemy/devboards/pm.c 2011-03-27 14:31:47.000000000 -0400
1488 +++ linux-2.6.32.42/arch/mips/alchemy/devboards/pm.c 2011-04-17 15:56:45.000000000 -0400
1489 @@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1490
1491 }
1492
1493 -static struct platform_suspend_ops db1x_pm_ops = {
1494 +static const struct platform_suspend_ops db1x_pm_ops = {
1495 .valid = suspend_valid_only_mem,
1496 .begin = db1x_pm_begin,
1497 .enter = db1x_pm_enter,
1498 diff -urNp linux-2.6.32.42/arch/mips/include/asm/elf.h linux-2.6.32.42/arch/mips/include/asm/elf.h
1499 --- linux-2.6.32.42/arch/mips/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1500 +++ linux-2.6.32.42/arch/mips/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1501 @@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_str
1502 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1503 #endif
1504
1505 +#ifdef CONFIG_PAX_ASLR
1506 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1507 +
1508 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1509 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1510 +#endif
1511 +
1512 #endif /* _ASM_ELF_H */
1513 diff -urNp linux-2.6.32.42/arch/mips/include/asm/page.h linux-2.6.32.42/arch/mips/include/asm/page.h
1514 --- linux-2.6.32.42/arch/mips/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1515 +++ linux-2.6.32.42/arch/mips/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1516 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1517 #ifdef CONFIG_CPU_MIPS32
1518 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1519 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1520 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1521 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1522 #else
1523 typedef struct { unsigned long long pte; } pte_t;
1524 #define pte_val(x) ((x).pte)
1525 diff -urNp linux-2.6.32.42/arch/mips/include/asm/system.h linux-2.6.32.42/arch/mips/include/asm/system.h
1526 --- linux-2.6.32.42/arch/mips/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
1527 +++ linux-2.6.32.42/arch/mips/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
1528 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1529 */
1530 #define __ARCH_WANT_UNLOCKED_CTXSW
1531
1532 -extern unsigned long arch_align_stack(unsigned long sp);
1533 +#define arch_align_stack(x) ((x) & ~0xfUL)
1534
1535 #endif /* _ASM_SYSTEM_H */
1536 diff -urNp linux-2.6.32.42/arch/mips/kernel/binfmt_elfn32.c linux-2.6.32.42/arch/mips/kernel/binfmt_elfn32.c
1537 --- linux-2.6.32.42/arch/mips/kernel/binfmt_elfn32.c 2011-03-27 14:31:47.000000000 -0400
1538 +++ linux-2.6.32.42/arch/mips/kernel/binfmt_elfn32.c 2011-04-17 15:56:45.000000000 -0400
1539 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1540 #undef ELF_ET_DYN_BASE
1541 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1542
1543 +#ifdef CONFIG_PAX_ASLR
1544 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1545 +
1546 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1547 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1548 +#endif
1549 +
1550 #include <asm/processor.h>
1551 #include <linux/module.h>
1552 #include <linux/elfcore.h>
1553 diff -urNp linux-2.6.32.42/arch/mips/kernel/binfmt_elfo32.c linux-2.6.32.42/arch/mips/kernel/binfmt_elfo32.c
1554 --- linux-2.6.32.42/arch/mips/kernel/binfmt_elfo32.c 2011-03-27 14:31:47.000000000 -0400
1555 +++ linux-2.6.32.42/arch/mips/kernel/binfmt_elfo32.c 2011-04-17 15:56:45.000000000 -0400
1556 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1557 #undef ELF_ET_DYN_BASE
1558 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1559
1560 +#ifdef CONFIG_PAX_ASLR
1561 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1562 +
1563 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1564 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1565 +#endif
1566 +
1567 #include <asm/processor.h>
1568
1569 /*
1570 diff -urNp linux-2.6.32.42/arch/mips/kernel/kgdb.c linux-2.6.32.42/arch/mips/kernel/kgdb.c
1571 --- linux-2.6.32.42/arch/mips/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
1572 +++ linux-2.6.32.42/arch/mips/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
1573 @@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vecto
1574 return -1;
1575 }
1576
1577 +/* cannot be const */
1578 struct kgdb_arch arch_kgdb_ops;
1579
1580 /*
1581 diff -urNp linux-2.6.32.42/arch/mips/kernel/process.c linux-2.6.32.42/arch/mips/kernel/process.c
1582 --- linux-2.6.32.42/arch/mips/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
1583 +++ linux-2.6.32.42/arch/mips/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
1584 @@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_stru
1585 out:
1586 return pc;
1587 }
1588 -
1589 -/*
1590 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1591 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1592 - */
1593 -unsigned long arch_align_stack(unsigned long sp)
1594 -{
1595 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1596 - sp -= get_random_int() & ~PAGE_MASK;
1597 -
1598 - return sp & ALMASK;
1599 -}
1600 diff -urNp linux-2.6.32.42/arch/mips/kernel/syscall.c linux-2.6.32.42/arch/mips/kernel/syscall.c
1601 --- linux-2.6.32.42/arch/mips/kernel/syscall.c 2011-03-27 14:31:47.000000000 -0400
1602 +++ linux-2.6.32.42/arch/mips/kernel/syscall.c 2011-04-17 15:56:45.000000000 -0400
1603 @@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
1604 do_color_align = 0;
1605 if (filp || (flags & MAP_SHARED))
1606 do_color_align = 1;
1607 +
1608 +#ifdef CONFIG_PAX_RANDMMAP
1609 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1610 +#endif
1611 +
1612 if (addr) {
1613 if (do_color_align)
1614 addr = COLOUR_ALIGN(addr, pgoff);
1615 else
1616 addr = PAGE_ALIGN(addr);
1617 vmm = find_vma(current->mm, addr);
1618 - if (task_size - len >= addr &&
1619 - (!vmm || addr + len <= vmm->vm_start))
1620 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1621 return addr;
1622 }
1623 - addr = TASK_UNMAPPED_BASE;
1624 + addr = current->mm->mmap_base;
1625 if (do_color_align)
1626 addr = COLOUR_ALIGN(addr, pgoff);
1627 else
1628 @@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
1629 /* At this point: (!vmm || addr < vmm->vm_end). */
1630 if (task_size - len < addr)
1631 return -ENOMEM;
1632 - if (!vmm || addr + len <= vmm->vm_start)
1633 + if (check_heap_stack_gap(vmm, addr, len))
1634 return addr;
1635 addr = vmm->vm_end;
1636 if (do_color_align)
1637 diff -urNp linux-2.6.32.42/arch/mips/mm/fault.c linux-2.6.32.42/arch/mips/mm/fault.c
1638 --- linux-2.6.32.42/arch/mips/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1639 +++ linux-2.6.32.42/arch/mips/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1640 @@ -26,6 +26,23 @@
1641 #include <asm/ptrace.h>
1642 #include <asm/highmem.h> /* For VMALLOC_END */
1643
1644 +#ifdef CONFIG_PAX_PAGEEXEC
1645 +void pax_report_insns(void *pc, void *sp)
1646 +{
1647 + unsigned long i;
1648 +
1649 + printk(KERN_ERR "PAX: bytes at PC: ");
1650 + for (i = 0; i < 5; i++) {
1651 + unsigned int c;
1652 + if (get_user(c, (unsigned int *)pc+i))
1653 + printk(KERN_CONT "???????? ");
1654 + else
1655 + printk(KERN_CONT "%08x ", c);
1656 + }
1657 + printk("\n");
1658 +}
1659 +#endif
1660 +
1661 /*
1662 * This routine handles page faults. It determines the address,
1663 * and the problem, and then passes it off to one of the appropriate
1664 diff -urNp linux-2.6.32.42/arch/parisc/include/asm/elf.h linux-2.6.32.42/arch/parisc/include/asm/elf.h
1665 --- linux-2.6.32.42/arch/parisc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1666 +++ linux-2.6.32.42/arch/parisc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1667 @@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
1668
1669 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1670
1671 +#ifdef CONFIG_PAX_ASLR
1672 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1673 +
1674 +#define PAX_DELTA_MMAP_LEN 16
1675 +#define PAX_DELTA_STACK_LEN 16
1676 +#endif
1677 +
1678 /* This yields a mask that user programs can use to figure out what
1679 instruction set this CPU supports. This could be done in user space,
1680 but it's not easy, and we've already done it here. */
1681 diff -urNp linux-2.6.32.42/arch/parisc/include/asm/pgtable.h linux-2.6.32.42/arch/parisc/include/asm/pgtable.h
1682 --- linux-2.6.32.42/arch/parisc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1683 +++ linux-2.6.32.42/arch/parisc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1684 @@ -207,6 +207,17 @@
1685 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1686 #define PAGE_COPY PAGE_EXECREAD
1687 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1688 +
1689 +#ifdef CONFIG_PAX_PAGEEXEC
1690 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1691 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1692 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1693 +#else
1694 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1695 +# define PAGE_COPY_NOEXEC PAGE_COPY
1696 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1697 +#endif
1698 +
1699 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1700 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1701 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1702 diff -urNp linux-2.6.32.42/arch/parisc/kernel/module.c linux-2.6.32.42/arch/parisc/kernel/module.c
1703 --- linux-2.6.32.42/arch/parisc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1704 +++ linux-2.6.32.42/arch/parisc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1705 @@ -95,16 +95,38 @@
1706
1707 /* three functions to determine where in the module core
1708 * or init pieces the location is */
1709 +static inline int in_init_rx(struct module *me, void *loc)
1710 +{
1711 + return (loc >= me->module_init_rx &&
1712 + loc < (me->module_init_rx + me->init_size_rx));
1713 +}
1714 +
1715 +static inline int in_init_rw(struct module *me, void *loc)
1716 +{
1717 + return (loc >= me->module_init_rw &&
1718 + loc < (me->module_init_rw + me->init_size_rw));
1719 +}
1720 +
1721 static inline int in_init(struct module *me, void *loc)
1722 {
1723 - return (loc >= me->module_init &&
1724 - loc <= (me->module_init + me->init_size));
1725 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1726 +}
1727 +
1728 +static inline int in_core_rx(struct module *me, void *loc)
1729 +{
1730 + return (loc >= me->module_core_rx &&
1731 + loc < (me->module_core_rx + me->core_size_rx));
1732 +}
1733 +
1734 +static inline int in_core_rw(struct module *me, void *loc)
1735 +{
1736 + return (loc >= me->module_core_rw &&
1737 + loc < (me->module_core_rw + me->core_size_rw));
1738 }
1739
1740 static inline int in_core(struct module *me, void *loc)
1741 {
1742 - return (loc >= me->module_core &&
1743 - loc <= (me->module_core + me->core_size));
1744 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1745 }
1746
1747 static inline int in_local(struct module *me, void *loc)
1748 @@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_
1749 }
1750
1751 /* align things a bit */
1752 - me->core_size = ALIGN(me->core_size, 16);
1753 - me->arch.got_offset = me->core_size;
1754 - me->core_size += gots * sizeof(struct got_entry);
1755 -
1756 - me->core_size = ALIGN(me->core_size, 16);
1757 - me->arch.fdesc_offset = me->core_size;
1758 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1759 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1760 + me->arch.got_offset = me->core_size_rw;
1761 + me->core_size_rw += gots * sizeof(struct got_entry);
1762 +
1763 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1764 + me->arch.fdesc_offset = me->core_size_rw;
1765 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1766
1767 me->arch.got_max = gots;
1768 me->arch.fdesc_max = fdescs;
1769 @@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module
1770
1771 BUG_ON(value == 0);
1772
1773 - got = me->module_core + me->arch.got_offset;
1774 + got = me->module_core_rw + me->arch.got_offset;
1775 for (i = 0; got[i].addr; i++)
1776 if (got[i].addr == value)
1777 goto out;
1778 @@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module
1779 #ifdef CONFIG_64BIT
1780 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1781 {
1782 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1783 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1784
1785 if (!value) {
1786 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1787 @@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module
1788
1789 /* Create new one */
1790 fdesc->addr = value;
1791 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1792 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1793 return (Elf_Addr)fdesc;
1794 }
1795 #endif /* CONFIG_64BIT */
1796 @@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
1797
1798 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1799 end = table + sechdrs[me->arch.unwind_section].sh_size;
1800 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1801 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1802
1803 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1804 me->arch.unwind_section, table, end, gp);
1805 diff -urNp linux-2.6.32.42/arch/parisc/kernel/sys_parisc.c linux-2.6.32.42/arch/parisc/kernel/sys_parisc.c
1806 --- linux-2.6.32.42/arch/parisc/kernel/sys_parisc.c 2011-03-27 14:31:47.000000000 -0400
1807 +++ linux-2.6.32.42/arch/parisc/kernel/sys_parisc.c 2011-04-17 15:56:45.000000000 -0400
1808 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1809 /* At this point: (!vma || addr < vma->vm_end). */
1810 if (TASK_SIZE - len < addr)
1811 return -ENOMEM;
1812 - if (!vma || addr + len <= vma->vm_start)
1813 + if (check_heap_stack_gap(vma, addr, len))
1814 return addr;
1815 addr = vma->vm_end;
1816 }
1817 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1818 /* At this point: (!vma || addr < vma->vm_end). */
1819 if (TASK_SIZE - len < addr)
1820 return -ENOMEM;
1821 - if (!vma || addr + len <= vma->vm_start)
1822 + if (check_heap_stack_gap(vma, addr, len))
1823 return addr;
1824 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1825 if (addr < vma->vm_end) /* handle wraparound */
1826 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1827 if (flags & MAP_FIXED)
1828 return addr;
1829 if (!addr)
1830 - addr = TASK_UNMAPPED_BASE;
1831 + addr = current->mm->mmap_base;
1832
1833 if (filp) {
1834 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1835 diff -urNp linux-2.6.32.42/arch/parisc/kernel/traps.c linux-2.6.32.42/arch/parisc/kernel/traps.c
1836 --- linux-2.6.32.42/arch/parisc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
1837 +++ linux-2.6.32.42/arch/parisc/kernel/traps.c 2011-04-17 15:56:45.000000000 -0400
1838 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1839
1840 down_read(&current->mm->mmap_sem);
1841 vma = find_vma(current->mm,regs->iaoq[0]);
1842 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1843 - && (vma->vm_flags & VM_EXEC)) {
1844 -
1845 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1846 fault_address = regs->iaoq[0];
1847 fault_space = regs->iasq[0];
1848
1849 diff -urNp linux-2.6.32.42/arch/parisc/mm/fault.c linux-2.6.32.42/arch/parisc/mm/fault.c
1850 --- linux-2.6.32.42/arch/parisc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1851 +++ linux-2.6.32.42/arch/parisc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1852 @@ -15,6 +15,7 @@
1853 #include <linux/sched.h>
1854 #include <linux/interrupt.h>
1855 #include <linux/module.h>
1856 +#include <linux/unistd.h>
1857
1858 #include <asm/uaccess.h>
1859 #include <asm/traps.h>
1860 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1861 static unsigned long
1862 parisc_acctyp(unsigned long code, unsigned int inst)
1863 {
1864 - if (code == 6 || code == 16)
1865 + if (code == 6 || code == 7 || code == 16)
1866 return VM_EXEC;
1867
1868 switch (inst & 0xf0000000) {
1869 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1870 }
1871 #endif
1872
1873 +#ifdef CONFIG_PAX_PAGEEXEC
1874 +/*
1875 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1876 + *
1877 + * returns 1 when task should be killed
1878 + * 2 when rt_sigreturn trampoline was detected
1879 + * 3 when unpatched PLT trampoline was detected
1880 + */
1881 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1882 +{
1883 +
1884 +#ifdef CONFIG_PAX_EMUPLT
1885 + int err;
1886 +
1887 + do { /* PaX: unpatched PLT emulation */
1888 + unsigned int bl, depwi;
1889 +
1890 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1891 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1892 +
1893 + if (err)
1894 + break;
1895 +
1896 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1897 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1898 +
1899 + err = get_user(ldw, (unsigned int *)addr);
1900 + err |= get_user(bv, (unsigned int *)(addr+4));
1901 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1902 +
1903 + if (err)
1904 + break;
1905 +
1906 + if (ldw == 0x0E801096U &&
1907 + bv == 0xEAC0C000U &&
1908 + ldw2 == 0x0E881095U)
1909 + {
1910 + unsigned int resolver, map;
1911 +
1912 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1913 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1914 + if (err)
1915 + break;
1916 +
1917 + regs->gr[20] = instruction_pointer(regs)+8;
1918 + regs->gr[21] = map;
1919 + regs->gr[22] = resolver;
1920 + regs->iaoq[0] = resolver | 3UL;
1921 + regs->iaoq[1] = regs->iaoq[0] + 4;
1922 + return 3;
1923 + }
1924 + }
1925 + } while (0);
1926 +#endif
1927 +
1928 +#ifdef CONFIG_PAX_EMUTRAMP
1929 +
1930 +#ifndef CONFIG_PAX_EMUSIGRT
1931 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1932 + return 1;
1933 +#endif
1934 +
1935 + do { /* PaX: rt_sigreturn emulation */
1936 + unsigned int ldi1, ldi2, bel, nop;
1937 +
1938 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1939 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1940 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1941 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1942 +
1943 + if (err)
1944 + break;
1945 +
1946 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1947 + ldi2 == 0x3414015AU &&
1948 + bel == 0xE4008200U &&
1949 + nop == 0x08000240U)
1950 + {
1951 + regs->gr[25] = (ldi1 & 2) >> 1;
1952 + regs->gr[20] = __NR_rt_sigreturn;
1953 + regs->gr[31] = regs->iaoq[1] + 16;
1954 + regs->sr[0] = regs->iasq[1];
1955 + regs->iaoq[0] = 0x100UL;
1956 + regs->iaoq[1] = regs->iaoq[0] + 4;
1957 + regs->iasq[0] = regs->sr[2];
1958 + regs->iasq[1] = regs->sr[2];
1959 + return 2;
1960 + }
1961 + } while (0);
1962 +#endif
1963 +
1964 + return 1;
1965 +}
1966 +
1967 +void pax_report_insns(void *pc, void *sp)
1968 +{
1969 + unsigned long i;
1970 +
1971 + printk(KERN_ERR "PAX: bytes at PC: ");
1972 + for (i = 0; i < 5; i++) {
1973 + unsigned int c;
1974 + if (get_user(c, (unsigned int *)pc+i))
1975 + printk(KERN_CONT "???????? ");
1976 + else
1977 + printk(KERN_CONT "%08x ", c);
1978 + }
1979 + printk("\n");
1980 +}
1981 +#endif
1982 +
1983 int fixup_exception(struct pt_regs *regs)
1984 {
1985 const struct exception_table_entry *fix;
1986 @@ -192,8 +303,33 @@ good_area:
1987
1988 acc_type = parisc_acctyp(code,regs->iir);
1989
1990 - if ((vma->vm_flags & acc_type) != acc_type)
1991 + if ((vma->vm_flags & acc_type) != acc_type) {
1992 +
1993 +#ifdef CONFIG_PAX_PAGEEXEC
1994 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1995 + (address & ~3UL) == instruction_pointer(regs))
1996 + {
1997 + up_read(&mm->mmap_sem);
1998 + switch (pax_handle_fetch_fault(regs)) {
1999 +
2000 +#ifdef CONFIG_PAX_EMUPLT
2001 + case 3:
2002 + return;
2003 +#endif
2004 +
2005 +#ifdef CONFIG_PAX_EMUTRAMP
2006 + case 2:
2007 + return;
2008 +#endif
2009 +
2010 + }
2011 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2012 + do_group_exit(SIGKILL);
2013 + }
2014 +#endif
2015 +
2016 goto bad_area;
2017 + }
2018
2019 /*
2020 * If for any reason at all we couldn't handle the fault, make
2021 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/device.h linux-2.6.32.42/arch/powerpc/include/asm/device.h
2022 --- linux-2.6.32.42/arch/powerpc/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
2023 +++ linux-2.6.32.42/arch/powerpc/include/asm/device.h 2011-04-17 15:56:45.000000000 -0400
2024 @@ -14,7 +14,7 @@ struct dev_archdata {
2025 struct device_node *of_node;
2026
2027 /* DMA operations on that device */
2028 - struct dma_map_ops *dma_ops;
2029 + const struct dma_map_ops *dma_ops;
2030
2031 /*
2032 * When an iommu is in use, dma_data is used as a ptr to the base of the
2033 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/dma-mapping.h linux-2.6.32.42/arch/powerpc/include/asm/dma-mapping.h
2034 --- linux-2.6.32.42/arch/powerpc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
2035 +++ linux-2.6.32.42/arch/powerpc/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
2036 @@ -69,9 +69,9 @@ static inline unsigned long device_to_ma
2037 #ifdef CONFIG_PPC64
2038 extern struct dma_map_ops dma_iommu_ops;
2039 #endif
2040 -extern struct dma_map_ops dma_direct_ops;
2041 +extern const struct dma_map_ops dma_direct_ops;
2042
2043 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2044 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2045 {
2046 /* We don't handle the NULL dev case for ISA for now. We could
2047 * do it via an out of line call but it is not needed for now. The
2048 @@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dm
2049 return dev->archdata.dma_ops;
2050 }
2051
2052 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2053 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2054 {
2055 dev->archdata.dma_ops = ops;
2056 }
2057 @@ -118,7 +118,7 @@ static inline void set_dma_offset(struct
2058
2059 static inline int dma_supported(struct device *dev, u64 mask)
2060 {
2061 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2062 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2063
2064 if (unlikely(dma_ops == NULL))
2065 return 0;
2066 @@ -132,7 +132,7 @@ static inline int dma_supported(struct d
2067
2068 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2069 {
2070 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2071 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2072
2073 if (unlikely(dma_ops == NULL))
2074 return -EIO;
2075 @@ -147,7 +147,7 @@ static inline int dma_set_mask(struct de
2076 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2077 dma_addr_t *dma_handle, gfp_t flag)
2078 {
2079 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2080 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2081 void *cpu_addr;
2082
2083 BUG_ON(!dma_ops);
2084 @@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(s
2085 static inline void dma_free_coherent(struct device *dev, size_t size,
2086 void *cpu_addr, dma_addr_t dma_handle)
2087 {
2088 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2089 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2090
2091 BUG_ON(!dma_ops);
2092
2093 @@ -173,7 +173,7 @@ static inline void dma_free_coherent(str
2094
2095 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2096 {
2097 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2098 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2099
2100 if (dma_ops->mapping_error)
2101 return dma_ops->mapping_error(dev, dma_addr);
2102 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/elf.h linux-2.6.32.42/arch/powerpc/include/asm/elf.h
2103 --- linux-2.6.32.42/arch/powerpc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
2104 +++ linux-2.6.32.42/arch/powerpc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
2105 @@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
2106 the loader. We need to make sure that it is out of the way of the program
2107 that it will "exec", and that there is sufficient room for the brk. */
2108
2109 -extern unsigned long randomize_et_dyn(unsigned long base);
2110 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2111 +#define ELF_ET_DYN_BASE (0x20000000)
2112 +
2113 +#ifdef CONFIG_PAX_ASLR
2114 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2115 +
2116 +#ifdef __powerpc64__
2117 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2118 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2119 +#else
2120 +#define PAX_DELTA_MMAP_LEN 15
2121 +#define PAX_DELTA_STACK_LEN 15
2122 +#endif
2123 +#endif
2124
2125 /*
2126 * Our registers are always unsigned longs, whether we're a 32 bit
2127 @@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(s
2128 (0x7ff >> (PAGE_SHIFT - 12)) : \
2129 (0x3ffff >> (PAGE_SHIFT - 12)))
2130
2131 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2132 -#define arch_randomize_brk arch_randomize_brk
2133 -
2134 #endif /* __KERNEL__ */
2135
2136 /*
2137 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/iommu.h linux-2.6.32.42/arch/powerpc/include/asm/iommu.h
2138 --- linux-2.6.32.42/arch/powerpc/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
2139 +++ linux-2.6.32.42/arch/powerpc/include/asm/iommu.h 2011-04-17 15:56:45.000000000 -0400
2140 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
2141 extern void iommu_init_early_dart(void);
2142 extern void iommu_init_early_pasemi(void);
2143
2144 +/* dma-iommu.c */
2145 +extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2146 +
2147 #ifdef CONFIG_PCI
2148 extern void pci_iommu_init(void);
2149 extern void pci_direct_iommu_init(void);
2150 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/kmap_types.h linux-2.6.32.42/arch/powerpc/include/asm/kmap_types.h
2151 --- linux-2.6.32.42/arch/powerpc/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
2152 +++ linux-2.6.32.42/arch/powerpc/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
2153 @@ -26,6 +26,7 @@ enum km_type {
2154 KM_SOFTIRQ1,
2155 KM_PPC_SYNC_PAGE,
2156 KM_PPC_SYNC_ICACHE,
2157 + KM_CLEARPAGE,
2158 KM_TYPE_NR
2159 };
2160
2161 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/page_64.h linux-2.6.32.42/arch/powerpc/include/asm/page_64.h
2162 --- linux-2.6.32.42/arch/powerpc/include/asm/page_64.h 2011-03-27 14:31:47.000000000 -0400
2163 +++ linux-2.6.32.42/arch/powerpc/include/asm/page_64.h 2011-04-17 15:56:45.000000000 -0400
2164 @@ -180,15 +180,18 @@ do { \
2165 * stack by default, so in the absense of a PT_GNU_STACK program header
2166 * we turn execute permission off.
2167 */
2168 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2169 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2170 +#define VM_STACK_DEFAULT_FLAGS32 \
2171 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2172 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2173
2174 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2175 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2176
2177 +#ifndef CONFIG_PAX_PAGEEXEC
2178 #define VM_STACK_DEFAULT_FLAGS \
2179 (test_thread_flag(TIF_32BIT) ? \
2180 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2181 +#endif
2182
2183 #include <asm-generic/getorder.h>
2184
2185 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/page.h linux-2.6.32.42/arch/powerpc/include/asm/page.h
2186 --- linux-2.6.32.42/arch/powerpc/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
2187 +++ linux-2.6.32.42/arch/powerpc/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
2188 @@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2189 * and needs to be executable. This means the whole heap ends
2190 * up being executable.
2191 */
2192 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2193 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2194 +#define VM_DATA_DEFAULT_FLAGS32 \
2195 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2196 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2197
2198 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2199 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2200 @@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2201 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2202 #endif
2203
2204 +#define ktla_ktva(addr) (addr)
2205 +#define ktva_ktla(addr) (addr)
2206 +
2207 #ifndef __ASSEMBLY__
2208
2209 #undef STRICT_MM_TYPECHECKS
2210 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/pci.h linux-2.6.32.42/arch/powerpc/include/asm/pci.h
2211 --- linux-2.6.32.42/arch/powerpc/include/asm/pci.h 2011-03-27 14:31:47.000000000 -0400
2212 +++ linux-2.6.32.42/arch/powerpc/include/asm/pci.h 2011-04-17 15:56:45.000000000 -0400
2213 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
2214 }
2215
2216 #ifdef CONFIG_PCI
2217 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2218 -extern struct dma_map_ops *get_pci_dma_ops(void);
2219 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2220 +extern const struct dma_map_ops *get_pci_dma_ops(void);
2221 #else /* CONFIG_PCI */
2222 #define set_pci_dma_ops(d)
2223 #define get_pci_dma_ops() NULL
2224 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/pgtable.h linux-2.6.32.42/arch/powerpc/include/asm/pgtable.h
2225 --- linux-2.6.32.42/arch/powerpc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
2226 +++ linux-2.6.32.42/arch/powerpc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
2227 @@ -2,6 +2,7 @@
2228 #define _ASM_POWERPC_PGTABLE_H
2229 #ifdef __KERNEL__
2230
2231 +#include <linux/const.h>
2232 #ifndef __ASSEMBLY__
2233 #include <asm/processor.h> /* For TASK_SIZE */
2234 #include <asm/mmu.h>
2235 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/pte-hash32.h linux-2.6.32.42/arch/powerpc/include/asm/pte-hash32.h
2236 --- linux-2.6.32.42/arch/powerpc/include/asm/pte-hash32.h 2011-03-27 14:31:47.000000000 -0400
2237 +++ linux-2.6.32.42/arch/powerpc/include/asm/pte-hash32.h 2011-04-17 15:56:45.000000000 -0400
2238 @@ -21,6 +21,7 @@
2239 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2240 #define _PAGE_USER 0x004 /* usermode access allowed */
2241 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2242 +#define _PAGE_EXEC _PAGE_GUARDED
2243 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2244 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2245 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2246 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/reg.h linux-2.6.32.42/arch/powerpc/include/asm/reg.h
2247 --- linux-2.6.32.42/arch/powerpc/include/asm/reg.h 2011-03-27 14:31:47.000000000 -0400
2248 +++ linux-2.6.32.42/arch/powerpc/include/asm/reg.h 2011-04-17 15:56:45.000000000 -0400
2249 @@ -191,6 +191,7 @@
2250 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2251 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2252 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2253 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2254 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2255 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2256 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2257 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/swiotlb.h linux-2.6.32.42/arch/powerpc/include/asm/swiotlb.h
2258 --- linux-2.6.32.42/arch/powerpc/include/asm/swiotlb.h 2011-03-27 14:31:47.000000000 -0400
2259 +++ linux-2.6.32.42/arch/powerpc/include/asm/swiotlb.h 2011-04-17 15:56:45.000000000 -0400
2260 @@ -13,7 +13,7 @@
2261
2262 #include <linux/swiotlb.h>
2263
2264 -extern struct dma_map_ops swiotlb_dma_ops;
2265 +extern const struct dma_map_ops swiotlb_dma_ops;
2266
2267 static inline void dma_mark_clean(void *addr, size_t size) {}
2268
2269 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/system.h linux-2.6.32.42/arch/powerpc/include/asm/system.h
2270 --- linux-2.6.32.42/arch/powerpc/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
2271 +++ linux-2.6.32.42/arch/powerpc/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
2272 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
2273 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2274 #endif
2275
2276 -extern unsigned long arch_align_stack(unsigned long sp);
2277 +#define arch_align_stack(x) ((x) & ~0xfUL)
2278
2279 /* Used in very early kernel initialization. */
2280 extern unsigned long reloc_offset(void);
2281 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/uaccess.h linux-2.6.32.42/arch/powerpc/include/asm/uaccess.h
2282 --- linux-2.6.32.42/arch/powerpc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2283 +++ linux-2.6.32.42/arch/powerpc/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2284 @@ -13,6 +13,8 @@
2285 #define VERIFY_READ 0
2286 #define VERIFY_WRITE 1
2287
2288 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2289 +
2290 /*
2291 * The fs value determines whether argument validity checking should be
2292 * performed or not. If get_fs() == USER_DS, checking is performed, with
2293 @@ -327,52 +329,6 @@ do { \
2294 extern unsigned long __copy_tofrom_user(void __user *to,
2295 const void __user *from, unsigned long size);
2296
2297 -#ifndef __powerpc64__
2298 -
2299 -static inline unsigned long copy_from_user(void *to,
2300 - const void __user *from, unsigned long n)
2301 -{
2302 - unsigned long over;
2303 -
2304 - if (access_ok(VERIFY_READ, from, n))
2305 - return __copy_tofrom_user((__force void __user *)to, from, n);
2306 - if ((unsigned long)from < TASK_SIZE) {
2307 - over = (unsigned long)from + n - TASK_SIZE;
2308 - return __copy_tofrom_user((__force void __user *)to, from,
2309 - n - over) + over;
2310 - }
2311 - return n;
2312 -}
2313 -
2314 -static inline unsigned long copy_to_user(void __user *to,
2315 - const void *from, unsigned long n)
2316 -{
2317 - unsigned long over;
2318 -
2319 - if (access_ok(VERIFY_WRITE, to, n))
2320 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2321 - if ((unsigned long)to < TASK_SIZE) {
2322 - over = (unsigned long)to + n - TASK_SIZE;
2323 - return __copy_tofrom_user(to, (__force void __user *)from,
2324 - n - over) + over;
2325 - }
2326 - return n;
2327 -}
2328 -
2329 -#else /* __powerpc64__ */
2330 -
2331 -#define __copy_in_user(to, from, size) \
2332 - __copy_tofrom_user((to), (from), (size))
2333 -
2334 -extern unsigned long copy_from_user(void *to, const void __user *from,
2335 - unsigned long n);
2336 -extern unsigned long copy_to_user(void __user *to, const void *from,
2337 - unsigned long n);
2338 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2339 - unsigned long n);
2340 -
2341 -#endif /* __powerpc64__ */
2342 -
2343 static inline unsigned long __copy_from_user_inatomic(void *to,
2344 const void __user *from, unsigned long n)
2345 {
2346 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2347 if (ret == 0)
2348 return 0;
2349 }
2350 +
2351 + if (!__builtin_constant_p(n))
2352 + check_object_size(to, n, false);
2353 +
2354 return __copy_tofrom_user((__force void __user *)to, from, n);
2355 }
2356
2357 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2358 if (ret == 0)
2359 return 0;
2360 }
2361 +
2362 + if (!__builtin_constant_p(n))
2363 + check_object_size(from, n, true);
2364 +
2365 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2366 }
2367
2368 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2369 return __copy_to_user_inatomic(to, from, size);
2370 }
2371
2372 +#ifndef __powerpc64__
2373 +
2374 +static inline unsigned long __must_check copy_from_user(void *to,
2375 + const void __user *from, unsigned long n)
2376 +{
2377 + unsigned long over;
2378 +
2379 + if ((long)n < 0)
2380 + return n;
2381 +
2382 + if (access_ok(VERIFY_READ, from, n)) {
2383 + if (!__builtin_constant_p(n))
2384 + check_object_size(to, n, false);
2385 + return __copy_tofrom_user((__force void __user *)to, from, n);
2386 + }
2387 + if ((unsigned long)from < TASK_SIZE) {
2388 + over = (unsigned long)from + n - TASK_SIZE;
2389 + if (!__builtin_constant_p(n - over))
2390 + check_object_size(to, n - over, false);
2391 + return __copy_tofrom_user((__force void __user *)to, from,
2392 + n - over) + over;
2393 + }
2394 + return n;
2395 +}
2396 +
2397 +static inline unsigned long __must_check copy_to_user(void __user *to,
2398 + const void *from, unsigned long n)
2399 +{
2400 + unsigned long over;
2401 +
2402 + if ((long)n < 0)
2403 + return n;
2404 +
2405 + if (access_ok(VERIFY_WRITE, to, n)) {
2406 + if (!__builtin_constant_p(n))
2407 + check_object_size(from, n, true);
2408 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2409 + }
2410 + if ((unsigned long)to < TASK_SIZE) {
2411 + over = (unsigned long)to + n - TASK_SIZE;
2412 + if (!__builtin_constant_p(n))
2413 + check_object_size(from, n - over, true);
2414 + return __copy_tofrom_user(to, (__force void __user *)from,
2415 + n - over) + over;
2416 + }
2417 + return n;
2418 +}
2419 +
2420 +#else /* __powerpc64__ */
2421 +
2422 +#define __copy_in_user(to, from, size) \
2423 + __copy_tofrom_user((to), (from), (size))
2424 +
2425 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2426 +{
2427 + if ((long)n < 0 || n > INT_MAX)
2428 + return n;
2429 +
2430 + if (!__builtin_constant_p(n))
2431 + check_object_size(to, n, false);
2432 +
2433 + if (likely(access_ok(VERIFY_READ, from, n)))
2434 + n = __copy_from_user(to, from, n);
2435 + else
2436 + memset(to, 0, n);
2437 + return n;
2438 +}
2439 +
2440 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2441 +{
2442 + if ((long)n < 0 || n > INT_MAX)
2443 + return n;
2444 +
2445 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2446 + if (!__builtin_constant_p(n))
2447 + check_object_size(from, n, true);
2448 + n = __copy_to_user(to, from, n);
2449 + }
2450 + return n;
2451 +}
2452 +
2453 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2454 + unsigned long n);
2455 +
2456 +#endif /* __powerpc64__ */
2457 +
2458 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2459
2460 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2461 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/cacheinfo.c linux-2.6.32.42/arch/powerpc/kernel/cacheinfo.c
2462 --- linux-2.6.32.42/arch/powerpc/kernel/cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
2463 +++ linux-2.6.32.42/arch/powerpc/kernel/cacheinfo.c 2011-04-17 15:56:45.000000000 -0400
2464 @@ -642,7 +642,7 @@ static struct kobj_attribute *cache_inde
2465 &cache_assoc_attr,
2466 };
2467
2468 -static struct sysfs_ops cache_index_ops = {
2469 +static const struct sysfs_ops cache_index_ops = {
2470 .show = cache_index_show,
2471 };
2472
2473 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/dma.c linux-2.6.32.42/arch/powerpc/kernel/dma.c
2474 --- linux-2.6.32.42/arch/powerpc/kernel/dma.c 2011-03-27 14:31:47.000000000 -0400
2475 +++ linux-2.6.32.42/arch/powerpc/kernel/dma.c 2011-04-17 15:56:45.000000000 -0400
2476 @@ -134,7 +134,7 @@ static inline void dma_direct_sync_singl
2477 }
2478 #endif
2479
2480 -struct dma_map_ops dma_direct_ops = {
2481 +const struct dma_map_ops dma_direct_ops = {
2482 .alloc_coherent = dma_direct_alloc_coherent,
2483 .free_coherent = dma_direct_free_coherent,
2484 .map_sg = dma_direct_map_sg,
2485 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/dma-iommu.c linux-2.6.32.42/arch/powerpc/kernel/dma-iommu.c
2486 --- linux-2.6.32.42/arch/powerpc/kernel/dma-iommu.c 2011-03-27 14:31:47.000000000 -0400
2487 +++ linux-2.6.32.42/arch/powerpc/kernel/dma-iommu.c 2011-04-17 15:56:45.000000000 -0400
2488 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2489 }
2490
2491 /* We support DMA to/from any memory page via the iommu */
2492 -static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2493 +int dma_iommu_dma_supported(struct device *dev, u64 mask)
2494 {
2495 struct iommu_table *tbl = get_iommu_table_base(dev);
2496
2497 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.32.42/arch/powerpc/kernel/dma-swiotlb.c
2498 --- linux-2.6.32.42/arch/powerpc/kernel/dma-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
2499 +++ linux-2.6.32.42/arch/powerpc/kernel/dma-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
2500 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2501 * map_page, and unmap_page on highmem, use normal dma_ops
2502 * for everything else.
2503 */
2504 -struct dma_map_ops swiotlb_dma_ops = {
2505 +const struct dma_map_ops swiotlb_dma_ops = {
2506 .alloc_coherent = dma_direct_alloc_coherent,
2507 .free_coherent = dma_direct_free_coherent,
2508 .map_sg = swiotlb_map_sg_attrs,
2509 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/exceptions-64e.S linux-2.6.32.42/arch/powerpc/kernel/exceptions-64e.S
2510 --- linux-2.6.32.42/arch/powerpc/kernel/exceptions-64e.S 2011-03-27 14:31:47.000000000 -0400
2511 +++ linux-2.6.32.42/arch/powerpc/kernel/exceptions-64e.S 2011-04-17 15:56:45.000000000 -0400
2512 @@ -455,6 +455,7 @@ storage_fault_common:
2513 std r14,_DAR(r1)
2514 std r15,_DSISR(r1)
2515 addi r3,r1,STACK_FRAME_OVERHEAD
2516 + bl .save_nvgprs
2517 mr r4,r14
2518 mr r5,r15
2519 ld r14,PACA_EXGEN+EX_R14(r13)
2520 @@ -464,8 +465,7 @@ storage_fault_common:
2521 cmpdi r3,0
2522 bne- 1f
2523 b .ret_from_except_lite
2524 -1: bl .save_nvgprs
2525 - mr r5,r3
2526 +1: mr r5,r3
2527 addi r3,r1,STACK_FRAME_OVERHEAD
2528 ld r4,_DAR(r1)
2529 bl .bad_page_fault
2530 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/exceptions-64s.S linux-2.6.32.42/arch/powerpc/kernel/exceptions-64s.S
2531 --- linux-2.6.32.42/arch/powerpc/kernel/exceptions-64s.S 2011-03-27 14:31:47.000000000 -0400
2532 +++ linux-2.6.32.42/arch/powerpc/kernel/exceptions-64s.S 2011-04-17 15:56:45.000000000 -0400
2533 @@ -818,10 +818,10 @@ handle_page_fault:
2534 11: ld r4,_DAR(r1)
2535 ld r5,_DSISR(r1)
2536 addi r3,r1,STACK_FRAME_OVERHEAD
2537 + bl .save_nvgprs
2538 bl .do_page_fault
2539 cmpdi r3,0
2540 beq+ 13f
2541 - bl .save_nvgprs
2542 mr r5,r3
2543 addi r3,r1,STACK_FRAME_OVERHEAD
2544 lwz r4,_DAR(r1)
2545 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/ibmebus.c linux-2.6.32.42/arch/powerpc/kernel/ibmebus.c
2546 --- linux-2.6.32.42/arch/powerpc/kernel/ibmebus.c 2011-03-27 14:31:47.000000000 -0400
2547 +++ linux-2.6.32.42/arch/powerpc/kernel/ibmebus.c 2011-04-17 15:56:45.000000000 -0400
2548 @@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct
2549 return 1;
2550 }
2551
2552 -static struct dma_map_ops ibmebus_dma_ops = {
2553 +static const struct dma_map_ops ibmebus_dma_ops = {
2554 .alloc_coherent = ibmebus_alloc_coherent,
2555 .free_coherent = ibmebus_free_coherent,
2556 .map_sg = ibmebus_map_sg,
2557 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/kgdb.c linux-2.6.32.42/arch/powerpc/kernel/kgdb.c
2558 --- linux-2.6.32.42/arch/powerpc/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
2559 +++ linux-2.6.32.42/arch/powerpc/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
2560 @@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct
2561 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
2562 return 0;
2563
2564 - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2565 + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2566 regs->nip += 4;
2567
2568 return 1;
2569 @@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vecto
2570 /*
2571 * Global data
2572 */
2573 -struct kgdb_arch arch_kgdb_ops = {
2574 +const struct kgdb_arch arch_kgdb_ops = {
2575 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2576 };
2577
2578 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/module_32.c linux-2.6.32.42/arch/powerpc/kernel/module_32.c
2579 --- linux-2.6.32.42/arch/powerpc/kernel/module_32.c 2011-03-27 14:31:47.000000000 -0400
2580 +++ linux-2.6.32.42/arch/powerpc/kernel/module_32.c 2011-04-17 15:56:45.000000000 -0400
2581 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2582 me->arch.core_plt_section = i;
2583 }
2584 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2585 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2586 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2587 return -ENOEXEC;
2588 }
2589
2590 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2591
2592 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2593 /* Init, or core PLT? */
2594 - if (location >= mod->module_core
2595 - && location < mod->module_core + mod->core_size)
2596 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2597 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2598 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2599 - else
2600 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2601 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2602 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2603 + else {
2604 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2605 + return ~0UL;
2606 + }
2607
2608 /* Find this entry, or if that fails, the next avail. entry */
2609 while (entry->jump[0]) {
2610 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/module.c linux-2.6.32.42/arch/powerpc/kernel/module.c
2611 --- linux-2.6.32.42/arch/powerpc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
2612 +++ linux-2.6.32.42/arch/powerpc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
2613 @@ -31,11 +31,24 @@
2614
2615 LIST_HEAD(module_bug_list);
2616
2617 +#ifdef CONFIG_PAX_KERNEXEC
2618 void *module_alloc(unsigned long size)
2619 {
2620 if (size == 0)
2621 return NULL;
2622
2623 + return vmalloc(size);
2624 +}
2625 +
2626 +void *module_alloc_exec(unsigned long size)
2627 +#else
2628 +void *module_alloc(unsigned long size)
2629 +#endif
2630 +
2631 +{
2632 + if (size == 0)
2633 + return NULL;
2634 +
2635 return vmalloc_exec(size);
2636 }
2637
2638 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2639 vfree(module_region);
2640 }
2641
2642 +#ifdef CONFIG_PAX_KERNEXEC
2643 +void module_free_exec(struct module *mod, void *module_region)
2644 +{
2645 + module_free(mod, module_region);
2646 +}
2647 +#endif
2648 +
2649 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2650 const Elf_Shdr *sechdrs,
2651 const char *name)
2652 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/pci-common.c linux-2.6.32.42/arch/powerpc/kernel/pci-common.c
2653 --- linux-2.6.32.42/arch/powerpc/kernel/pci-common.c 2011-03-27 14:31:47.000000000 -0400
2654 +++ linux-2.6.32.42/arch/powerpc/kernel/pci-common.c 2011-04-17 15:56:45.000000000 -0400
2655 @@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
2656 unsigned int ppc_pci_flags = 0;
2657
2658
2659 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2660 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2661
2662 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2663 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2664 {
2665 pci_dma_ops = dma_ops;
2666 }
2667
2668 -struct dma_map_ops *get_pci_dma_ops(void)
2669 +const struct dma_map_ops *get_pci_dma_ops(void)
2670 {
2671 return pci_dma_ops;
2672 }
2673 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/process.c linux-2.6.32.42/arch/powerpc/kernel/process.c
2674 --- linux-2.6.32.42/arch/powerpc/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
2675 +++ linux-2.6.32.42/arch/powerpc/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
2676 @@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
2677 * Lookup NIP late so we have the best change of getting the
2678 * above info out without failing
2679 */
2680 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2681 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2682 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2683 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2684 #endif
2685 show_stack(current, (unsigned long *) regs->gpr[1]);
2686 if (!user_mode(regs))
2687 @@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk,
2688 newsp = stack[0];
2689 ip = stack[STACK_FRAME_LR_SAVE];
2690 if (!firstframe || ip != lr) {
2691 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2692 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2693 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2694 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2695 - printk(" (%pS)",
2696 + printk(" (%pA)",
2697 (void *)current->ret_stack[curr_frame].ret);
2698 curr_frame--;
2699 }
2700 @@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk,
2701 struct pt_regs *regs = (struct pt_regs *)
2702 (sp + STACK_FRAME_OVERHEAD);
2703 lr = regs->link;
2704 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2705 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2706 regs->trap, (void *)regs->nip, (void *)lr);
2707 firstframe = 1;
2708 }
2709 @@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
2710 }
2711
2712 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2713 -
2714 -unsigned long arch_align_stack(unsigned long sp)
2715 -{
2716 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2717 - sp -= get_random_int() & ~PAGE_MASK;
2718 - return sp & ~0xf;
2719 -}
2720 -
2721 -static inline unsigned long brk_rnd(void)
2722 -{
2723 - unsigned long rnd = 0;
2724 -
2725 - /* 8MB for 32bit, 1GB for 64bit */
2726 - if (is_32bit_task())
2727 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2728 - else
2729 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2730 -
2731 - return rnd << PAGE_SHIFT;
2732 -}
2733 -
2734 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2735 -{
2736 - unsigned long base = mm->brk;
2737 - unsigned long ret;
2738 -
2739 -#ifdef CONFIG_PPC_STD_MMU_64
2740 - /*
2741 - * If we are using 1TB segments and we are allowed to randomise
2742 - * the heap, we can put it above 1TB so it is backed by a 1TB
2743 - * segment. Otherwise the heap will be in the bottom 1TB
2744 - * which always uses 256MB segments and this may result in a
2745 - * performance penalty.
2746 - */
2747 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2748 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2749 -#endif
2750 -
2751 - ret = PAGE_ALIGN(base + brk_rnd());
2752 -
2753 - if (ret < mm->brk)
2754 - return mm->brk;
2755 -
2756 - return ret;
2757 -}
2758 -
2759 -unsigned long randomize_et_dyn(unsigned long base)
2760 -{
2761 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2762 -
2763 - if (ret < base)
2764 - return base;
2765 -
2766 - return ret;
2767 -}
2768 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/signal_32.c linux-2.6.32.42/arch/powerpc/kernel/signal_32.c
2769 --- linux-2.6.32.42/arch/powerpc/kernel/signal_32.c 2011-03-27 14:31:47.000000000 -0400
2770 +++ linux-2.6.32.42/arch/powerpc/kernel/signal_32.c 2011-04-17 15:56:45.000000000 -0400
2771 @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2772 /* Save user registers on the stack */
2773 frame = &rt_sf->uc.uc_mcontext;
2774 addr = frame;
2775 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2776 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2777 if (save_user_regs(regs, frame, 0, 1))
2778 goto badframe;
2779 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2780 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/signal_64.c linux-2.6.32.42/arch/powerpc/kernel/signal_64.c
2781 --- linux-2.6.32.42/arch/powerpc/kernel/signal_64.c 2011-03-27 14:31:47.000000000 -0400
2782 +++ linux-2.6.32.42/arch/powerpc/kernel/signal_64.c 2011-04-17 15:56:45.000000000 -0400
2783 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2784 current->thread.fpscr.val = 0;
2785
2786 /* Set up to return from userspace. */
2787 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2788 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2789 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2790 } else {
2791 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2792 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/sys_ppc32.c linux-2.6.32.42/arch/powerpc/kernel/sys_ppc32.c
2793 --- linux-2.6.32.42/arch/powerpc/kernel/sys_ppc32.c 2011-03-27 14:31:47.000000000 -0400
2794 +++ linux-2.6.32.42/arch/powerpc/kernel/sys_ppc32.c 2011-04-17 15:56:45.000000000 -0400
2795 @@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct
2796 if (oldlenp) {
2797 if (!error) {
2798 if (get_user(oldlen, oldlenp) ||
2799 - put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
2800 + put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
2801 + copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
2802 error = -EFAULT;
2803 }
2804 - copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
2805 }
2806 return error;
2807 }
2808 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/traps.c linux-2.6.32.42/arch/powerpc/kernel/traps.c
2809 --- linux-2.6.32.42/arch/powerpc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
2810 +++ linux-2.6.32.42/arch/powerpc/kernel/traps.c 2011-06-13 21:33:37.000000000 -0400
2811 @@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
2812 static inline void pmac_backlight_unblank(void) { }
2813 #endif
2814
2815 +extern void gr_handle_kernel_exploit(void);
2816 +
2817 int die(const char *str, struct pt_regs *regs, long err)
2818 {
2819 static struct {
2820 @@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs
2821 if (panic_on_oops)
2822 panic("Fatal exception");
2823
2824 + gr_handle_kernel_exploit();
2825 +
2826 oops_exit();
2827 do_exit(err);
2828
2829 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/vdso.c linux-2.6.32.42/arch/powerpc/kernel/vdso.c
2830 --- linux-2.6.32.42/arch/powerpc/kernel/vdso.c 2011-03-27 14:31:47.000000000 -0400
2831 +++ linux-2.6.32.42/arch/powerpc/kernel/vdso.c 2011-04-17 15:56:45.000000000 -0400
2832 @@ -36,6 +36,7 @@
2833 #include <asm/firmware.h>
2834 #include <asm/vdso.h>
2835 #include <asm/vdso_datapage.h>
2836 +#include <asm/mman.h>
2837
2838 #include "setup.h"
2839
2840 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2841 vdso_base = VDSO32_MBASE;
2842 #endif
2843
2844 - current->mm->context.vdso_base = 0;
2845 + current->mm->context.vdso_base = ~0UL;
2846
2847 /* vDSO has a problem and was disabled, just don't "enable" it for the
2848 * process
2849 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2850 vdso_base = get_unmapped_area(NULL, vdso_base,
2851 (vdso_pages << PAGE_SHIFT) +
2852 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2853 - 0, 0);
2854 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2855 if (IS_ERR_VALUE(vdso_base)) {
2856 rc = vdso_base;
2857 goto fail_mmapsem;
2858 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/vio.c linux-2.6.32.42/arch/powerpc/kernel/vio.c
2859 --- linux-2.6.32.42/arch/powerpc/kernel/vio.c 2011-03-27 14:31:47.000000000 -0400
2860 +++ linux-2.6.32.42/arch/powerpc/kernel/vio.c 2011-04-17 15:56:45.000000000 -0400
2861 @@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struc
2862 vio_cmo_dealloc(viodev, alloc_size);
2863 }
2864
2865 -struct dma_map_ops vio_dma_mapping_ops = {
2866 +static const struct dma_map_ops vio_dma_mapping_ops = {
2867 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2868 .free_coherent = vio_dma_iommu_free_coherent,
2869 .map_sg = vio_dma_iommu_map_sg,
2870 .unmap_sg = vio_dma_iommu_unmap_sg,
2871 + .dma_supported = dma_iommu_dma_supported,
2872 .map_page = vio_dma_iommu_map_page,
2873 .unmap_page = vio_dma_iommu_unmap_page,
2874
2875 @@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vi
2876
2877 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2878 {
2879 - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2880 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2881 }
2882
2883 diff -urNp linux-2.6.32.42/arch/powerpc/lib/usercopy_64.c linux-2.6.32.42/arch/powerpc/lib/usercopy_64.c
2884 --- linux-2.6.32.42/arch/powerpc/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
2885 +++ linux-2.6.32.42/arch/powerpc/lib/usercopy_64.c 2011-04-17 15:56:45.000000000 -0400
2886 @@ -9,22 +9,6 @@
2887 #include <linux/module.h>
2888 #include <asm/uaccess.h>
2889
2890 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2891 -{
2892 - if (likely(access_ok(VERIFY_READ, from, n)))
2893 - n = __copy_from_user(to, from, n);
2894 - else
2895 - memset(to, 0, n);
2896 - return n;
2897 -}
2898 -
2899 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2900 -{
2901 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2902 - n = __copy_to_user(to, from, n);
2903 - return n;
2904 -}
2905 -
2906 unsigned long copy_in_user(void __user *to, const void __user *from,
2907 unsigned long n)
2908 {
2909 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2910 return n;
2911 }
2912
2913 -EXPORT_SYMBOL(copy_from_user);
2914 -EXPORT_SYMBOL(copy_to_user);
2915 EXPORT_SYMBOL(copy_in_user);
2916
2917 diff -urNp linux-2.6.32.42/arch/powerpc/mm/fault.c linux-2.6.32.42/arch/powerpc/mm/fault.c
2918 --- linux-2.6.32.42/arch/powerpc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
2919 +++ linux-2.6.32.42/arch/powerpc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
2920 @@ -30,6 +30,10 @@
2921 #include <linux/kprobes.h>
2922 #include <linux/kdebug.h>
2923 #include <linux/perf_event.h>
2924 +#include <linux/slab.h>
2925 +#include <linux/pagemap.h>
2926 +#include <linux/compiler.h>
2927 +#include <linux/unistd.h>
2928
2929 #include <asm/firmware.h>
2930 #include <asm/page.h>
2931 @@ -40,6 +44,7 @@
2932 #include <asm/uaccess.h>
2933 #include <asm/tlbflush.h>
2934 #include <asm/siginfo.h>
2935 +#include <asm/ptrace.h>
2936
2937
2938 #ifdef CONFIG_KPROBES
2939 @@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
2940 }
2941 #endif
2942
2943 +#ifdef CONFIG_PAX_PAGEEXEC
2944 +/*
2945 + * PaX: decide what to do with offenders (regs->nip = fault address)
2946 + *
2947 + * returns 1 when task should be killed
2948 + */
2949 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2950 +{
2951 + return 1;
2952 +}
2953 +
2954 +void pax_report_insns(void *pc, void *sp)
2955 +{
2956 + unsigned long i;
2957 +
2958 + printk(KERN_ERR "PAX: bytes at PC: ");
2959 + for (i = 0; i < 5; i++) {
2960 + unsigned int c;
2961 + if (get_user(c, (unsigned int __user *)pc+i))
2962 + printk(KERN_CONT "???????? ");
2963 + else
2964 + printk(KERN_CONT "%08x ", c);
2965 + }
2966 + printk("\n");
2967 +}
2968 +#endif
2969 +
2970 /*
2971 * Check whether the instruction at regs->nip is a store using
2972 * an update addressing form which will update r1.
2973 @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
2974 * indicate errors in DSISR but can validly be set in SRR1.
2975 */
2976 if (trap == 0x400)
2977 - error_code &= 0x48200000;
2978 + error_code &= 0x58200000;
2979 else
2980 is_write = error_code & DSISR_ISSTORE;
2981 #else
2982 @@ -250,7 +282,7 @@ good_area:
2983 * "undefined". Of those that can be set, this is the only
2984 * one which seems bad.
2985 */
2986 - if (error_code & 0x10000000)
2987 + if (error_code & DSISR_GUARDED)
2988 /* Guarded storage error. */
2989 goto bad_area;
2990 #endif /* CONFIG_8xx */
2991 @@ -265,7 +297,7 @@ good_area:
2992 * processors use the same I/D cache coherency mechanism
2993 * as embedded.
2994 */
2995 - if (error_code & DSISR_PROTFAULT)
2996 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2997 goto bad_area;
2998 #endif /* CONFIG_PPC_STD_MMU */
2999
3000 @@ -335,6 +367,23 @@ bad_area:
3001 bad_area_nosemaphore:
3002 /* User mode accesses cause a SIGSEGV */
3003 if (user_mode(regs)) {
3004 +
3005 +#ifdef CONFIG_PAX_PAGEEXEC
3006 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3007 +#ifdef CONFIG_PPC_STD_MMU
3008 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3009 +#else
3010 + if (is_exec && regs->nip == address) {
3011 +#endif
3012 + switch (pax_handle_fetch_fault(regs)) {
3013 + }
3014 +
3015 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3016 + do_group_exit(SIGKILL);
3017 + }
3018 + }
3019 +#endif
3020 +
3021 _exception(SIGSEGV, regs, code, address);
3022 return 0;
3023 }
3024 diff -urNp linux-2.6.32.42/arch/powerpc/mm/mmap_64.c linux-2.6.32.42/arch/powerpc/mm/mmap_64.c
3025 --- linux-2.6.32.42/arch/powerpc/mm/mmap_64.c 2011-03-27 14:31:47.000000000 -0400
3026 +++ linux-2.6.32.42/arch/powerpc/mm/mmap_64.c 2011-04-17 15:56:45.000000000 -0400
3027 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
3028 */
3029 if (mmap_is_legacy()) {
3030 mm->mmap_base = TASK_UNMAPPED_BASE;
3031 +
3032 +#ifdef CONFIG_PAX_RANDMMAP
3033 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3034 + mm->mmap_base += mm->delta_mmap;
3035 +#endif
3036 +
3037 mm->get_unmapped_area = arch_get_unmapped_area;
3038 mm->unmap_area = arch_unmap_area;
3039 } else {
3040 mm->mmap_base = mmap_base();
3041 +
3042 +#ifdef CONFIG_PAX_RANDMMAP
3043 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3044 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3045 +#endif
3046 +
3047 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3048 mm->unmap_area = arch_unmap_area_topdown;
3049 }
3050 diff -urNp linux-2.6.32.42/arch/powerpc/mm/slice.c linux-2.6.32.42/arch/powerpc/mm/slice.c
3051 --- linux-2.6.32.42/arch/powerpc/mm/slice.c 2011-03-27 14:31:47.000000000 -0400
3052 +++ linux-2.6.32.42/arch/powerpc/mm/slice.c 2011-04-17 15:56:45.000000000 -0400
3053 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
3054 if ((mm->task_size - len) < addr)
3055 return 0;
3056 vma = find_vma(mm, addr);
3057 - return (!vma || (addr + len) <= vma->vm_start);
3058 + return check_heap_stack_gap(vma, addr, len);
3059 }
3060
3061 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3062 @@ -256,7 +256,7 @@ full_search:
3063 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3064 continue;
3065 }
3066 - if (!vma || addr + len <= vma->vm_start) {
3067 + if (check_heap_stack_gap(vma, addr, len)) {
3068 /*
3069 * Remember the place where we stopped the search:
3070 */
3071 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
3072 }
3073 }
3074
3075 - addr = mm->mmap_base;
3076 - while (addr > len) {
3077 + if (mm->mmap_base < len)
3078 + addr = -ENOMEM;
3079 + else
3080 + addr = mm->mmap_base - len;
3081 +
3082 + while (!IS_ERR_VALUE(addr)) {
3083 /* Go down by chunk size */
3084 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3085 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
3086
3087 /* Check for hit with different page size */
3088 mask = slice_range_to_mask(addr, len);
3089 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
3090 * return with success:
3091 */
3092 vma = find_vma(mm, addr);
3093 - if (!vma || (addr + len) <= vma->vm_start) {
3094 + if (check_heap_stack_gap(vma, addr, len)) {
3095 /* remember the address as a hint for next time */
3096 if (use_cache)
3097 mm->free_area_cache = addr;
3098 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
3099 mm->cached_hole_size = vma->vm_start - addr;
3100
3101 /* try just below the current vma->vm_start */
3102 - addr = vma->vm_start;
3103 + addr = skip_heap_stack_gap(vma, len);
3104 }
3105
3106 /*
3107 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
3108 if (fixed && addr > (mm->task_size - len))
3109 return -EINVAL;
3110
3111 +#ifdef CONFIG_PAX_RANDMMAP
3112 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3113 + addr = 0;
3114 +#endif
3115 +
3116 /* If hint, make sure it matches our alignment restrictions */
3117 if (!fixed && addr) {
3118 addr = _ALIGN_UP(addr, 1ul << pshift);
3119 diff -urNp linux-2.6.32.42/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.32.42/arch/powerpc/platforms/52xx/lite5200_pm.c
3120 --- linux-2.6.32.42/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-03-27 14:31:47.000000000 -0400
3121 +++ linux-2.6.32.42/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-04-17 15:56:45.000000000 -0400
3122 @@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3123 lite5200_pm_target_state = PM_SUSPEND_ON;
3124 }
3125
3126 -static struct platform_suspend_ops lite5200_pm_ops = {
3127 +static const struct platform_suspend_ops lite5200_pm_ops = {
3128 .valid = lite5200_pm_valid,
3129 .begin = lite5200_pm_begin,
3130 .prepare = lite5200_pm_prepare,
3131 diff -urNp linux-2.6.32.42/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.32.42/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3132 --- linux-2.6.32.42/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-03-27 14:31:47.000000000 -0400
3133 +++ linux-2.6.32.42/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-04-17 15:56:45.000000000 -0400
3134 @@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3135 iounmap(mbar);
3136 }
3137
3138 -static struct platform_suspend_ops mpc52xx_pm_ops = {
3139 +static const struct platform_suspend_ops mpc52xx_pm_ops = {
3140 .valid = mpc52xx_pm_valid,
3141 .prepare = mpc52xx_pm_prepare,
3142 .enter = mpc52xx_pm_enter,
3143 diff -urNp linux-2.6.32.42/arch/powerpc/platforms/83xx/suspend.c linux-2.6.32.42/arch/powerpc/platforms/83xx/suspend.c
3144 --- linux-2.6.32.42/arch/powerpc/platforms/83xx/suspend.c 2011-03-27 14:31:47.000000000 -0400
3145 +++ linux-2.6.32.42/arch/powerpc/platforms/83xx/suspend.c 2011-04-17 15:56:45.000000000 -0400
3146 @@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3147 return ret;
3148 }
3149
3150 -static struct platform_suspend_ops mpc83xx_suspend_ops = {
3151 +static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3152 .valid = mpc83xx_suspend_valid,
3153 .begin = mpc83xx_suspend_begin,
3154 .enter = mpc83xx_suspend_enter,
3155 diff -urNp linux-2.6.32.42/arch/powerpc/platforms/cell/iommu.c linux-2.6.32.42/arch/powerpc/platforms/cell/iommu.c
3156 --- linux-2.6.32.42/arch/powerpc/platforms/cell/iommu.c 2011-03-27 14:31:47.000000000 -0400
3157 +++ linux-2.6.32.42/arch/powerpc/platforms/cell/iommu.c 2011-04-17 15:56:45.000000000 -0400
3158 @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
3159
3160 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3161
3162 -struct dma_map_ops dma_iommu_fixed_ops = {
3163 +const struct dma_map_ops dma_iommu_fixed_ops = {
3164 .alloc_coherent = dma_fixed_alloc_coherent,
3165 .free_coherent = dma_fixed_free_coherent,
3166 .map_sg = dma_fixed_map_sg,
3167 diff -urNp linux-2.6.32.42/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.32.42/arch/powerpc/platforms/ps3/system-bus.c
3168 --- linux-2.6.32.42/arch/powerpc/platforms/ps3/system-bus.c 2011-03-27 14:31:47.000000000 -0400
3169 +++ linux-2.6.32.42/arch/powerpc/platforms/ps3/system-bus.c 2011-04-17 15:56:45.000000000 -0400
3170 @@ -694,7 +694,7 @@ static int ps3_dma_supported(struct devi
3171 return mask >= DMA_BIT_MASK(32);
3172 }
3173
3174 -static struct dma_map_ops ps3_sb_dma_ops = {
3175 +static const struct dma_map_ops ps3_sb_dma_ops = {
3176 .alloc_coherent = ps3_alloc_coherent,
3177 .free_coherent = ps3_free_coherent,
3178 .map_sg = ps3_sb_map_sg,
3179 @@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops
3180 .unmap_page = ps3_unmap_page,
3181 };
3182
3183 -static struct dma_map_ops ps3_ioc0_dma_ops = {
3184 +static const struct dma_map_ops ps3_ioc0_dma_ops = {
3185 .alloc_coherent = ps3_alloc_coherent,
3186 .free_coherent = ps3_free_coherent,
3187 .map_sg = ps3_ioc0_map_sg,
3188 diff -urNp linux-2.6.32.42/arch/powerpc/platforms/pseries/Kconfig linux-2.6.32.42/arch/powerpc/platforms/pseries/Kconfig
3189 --- linux-2.6.32.42/arch/powerpc/platforms/pseries/Kconfig 2011-03-27 14:31:47.000000000 -0400
3190 +++ linux-2.6.32.42/arch/powerpc/platforms/pseries/Kconfig 2011-04-17 15:56:45.000000000 -0400
3191 @@ -2,6 +2,8 @@ config PPC_PSERIES
3192 depends on PPC64 && PPC_BOOK3S
3193 bool "IBM pSeries & new (POWER5-based) iSeries"
3194 select MPIC
3195 + select PCI_MSI
3196 + select XICS
3197 select PPC_I8259
3198 select PPC_RTAS
3199 select RTAS_ERROR_LOGGING
3200 diff -urNp linux-2.6.32.42/arch/s390/include/asm/elf.h linux-2.6.32.42/arch/s390/include/asm/elf.h
3201 --- linux-2.6.32.42/arch/s390/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3202 +++ linux-2.6.32.42/arch/s390/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
3203 @@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3204 that it will "exec", and that there is sufficient room for the brk. */
3205 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3206
3207 +#ifdef CONFIG_PAX_ASLR
3208 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3209 +
3210 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3211 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3212 +#endif
3213 +
3214 /* This yields a mask that user programs can use to figure out what
3215 instruction set this CPU supports. */
3216
3217 diff -urNp linux-2.6.32.42/arch/s390/include/asm/setup.h linux-2.6.32.42/arch/s390/include/asm/setup.h
3218 --- linux-2.6.32.42/arch/s390/include/asm/setup.h 2011-03-27 14:31:47.000000000 -0400
3219 +++ linux-2.6.32.42/arch/s390/include/asm/setup.h 2011-04-17 15:56:45.000000000 -0400
3220 @@ -50,13 +50,13 @@ extern unsigned long memory_end;
3221 void detect_memory_layout(struct mem_chunk chunk[]);
3222
3223 #ifdef CONFIG_S390_SWITCH_AMODE
3224 -extern unsigned int switch_amode;
3225 +#define switch_amode (1)
3226 #else
3227 #define switch_amode (0)
3228 #endif
3229
3230 #ifdef CONFIG_S390_EXEC_PROTECT
3231 -extern unsigned int s390_noexec;
3232 +#define s390_noexec (1)
3233 #else
3234 #define s390_noexec (0)
3235 #endif
3236 diff -urNp linux-2.6.32.42/arch/s390/include/asm/uaccess.h linux-2.6.32.42/arch/s390/include/asm/uaccess.h
3237 --- linux-2.6.32.42/arch/s390/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3238 +++ linux-2.6.32.42/arch/s390/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
3239 @@ -232,6 +232,10 @@ static inline unsigned long __must_check
3240 copy_to_user(void __user *to, const void *from, unsigned long n)
3241 {
3242 might_fault();
3243 +
3244 + if ((long)n < 0)
3245 + return n;
3246 +
3247 if (access_ok(VERIFY_WRITE, to, n))
3248 n = __copy_to_user(to, from, n);
3249 return n;
3250 @@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
3251 static inline unsigned long __must_check
3252 __copy_from_user(void *to, const void __user *from, unsigned long n)
3253 {
3254 + if ((long)n < 0)
3255 + return n;
3256 +
3257 if (__builtin_constant_p(n) && (n <= 256))
3258 return uaccess.copy_from_user_small(n, from, to);
3259 else
3260 @@ -283,6 +290,10 @@ static inline unsigned long __must_check
3261 copy_from_user(void *to, const void __user *from, unsigned long n)
3262 {
3263 might_fault();
3264 +
3265 + if ((long)n < 0)
3266 + return n;
3267 +
3268 if (access_ok(VERIFY_READ, from, n))
3269 n = __copy_from_user(to, from, n);
3270 else
3271 diff -urNp linux-2.6.32.42/arch/s390/Kconfig linux-2.6.32.42/arch/s390/Kconfig
3272 --- linux-2.6.32.42/arch/s390/Kconfig 2011-03-27 14:31:47.000000000 -0400
3273 +++ linux-2.6.32.42/arch/s390/Kconfig 2011-04-17 15:56:45.000000000 -0400
3274 @@ -194,28 +194,26 @@ config AUDIT_ARCH
3275
3276 config S390_SWITCH_AMODE
3277 bool "Switch kernel/user addressing modes"
3278 + default y
3279 help
3280 This option allows to switch the addressing modes of kernel and user
3281 - space. The kernel parameter switch_amode=on will enable this feature,
3282 - default is disabled. Enabling this (via kernel parameter) on machines
3283 - earlier than IBM System z9-109 EC/BC will reduce system performance.
3284 + space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3285 + will reduce system performance.
3286
3287 Note that this option will also be selected by selecting the execute
3288 - protection option below. Enabling the execute protection via the
3289 - noexec kernel parameter will also switch the addressing modes,
3290 - independent of the switch_amode kernel parameter.
3291 + protection option below. Enabling the execute protection will also
3292 + switch the addressing modes, independent of this option.
3293
3294
3295 config S390_EXEC_PROTECT
3296 bool "Data execute protection"
3297 + default y
3298 select S390_SWITCH_AMODE
3299 help
3300 This option allows to enable a buffer overflow protection for user
3301 space programs and it also selects the addressing mode option above.
3302 - The kernel parameter noexec=on will enable this feature and also
3303 - switch the addressing modes, default is disabled. Enabling this (via
3304 - kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3305 - will reduce system performance.
3306 + Enabling this on machines earlier than IBM System z9-109 EC/BC will
3307 + reduce system performance.
3308
3309 comment "Code generation options"
3310
3311 diff -urNp linux-2.6.32.42/arch/s390/kernel/module.c linux-2.6.32.42/arch/s390/kernel/module.c
3312 --- linux-2.6.32.42/arch/s390/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
3313 +++ linux-2.6.32.42/arch/s390/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
3314 @@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3315
3316 /* Increase core size by size of got & plt and set start
3317 offsets for got and plt. */
3318 - me->core_size = ALIGN(me->core_size, 4);
3319 - me->arch.got_offset = me->core_size;
3320 - me->core_size += me->arch.got_size;
3321 - me->arch.plt_offset = me->core_size;
3322 - me->core_size += me->arch.plt_size;
3323 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3324 + me->arch.got_offset = me->core_size_rw;
3325 + me->core_size_rw += me->arch.got_size;
3326 + me->arch.plt_offset = me->core_size_rx;
3327 + me->core_size_rx += me->arch.plt_size;
3328 return 0;
3329 }
3330
3331 @@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3332 if (info->got_initialized == 0) {
3333 Elf_Addr *gotent;
3334
3335 - gotent = me->module_core + me->arch.got_offset +
3336 + gotent = me->module_core_rw + me->arch.got_offset +
3337 info->got_offset;
3338 *gotent = val;
3339 info->got_initialized = 1;
3340 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3341 else if (r_type == R_390_GOTENT ||
3342 r_type == R_390_GOTPLTENT)
3343 *(unsigned int *) loc =
3344 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3345 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3346 else if (r_type == R_390_GOT64 ||
3347 r_type == R_390_GOTPLT64)
3348 *(unsigned long *) loc = val;
3349 @@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3350 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3351 if (info->plt_initialized == 0) {
3352 unsigned int *ip;
3353 - ip = me->module_core + me->arch.plt_offset +
3354 + ip = me->module_core_rx + me->arch.plt_offset +
3355 info->plt_offset;
3356 #ifndef CONFIG_64BIT
3357 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3358 @@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3359 val - loc + 0xffffUL < 0x1ffffeUL) ||
3360 (r_type == R_390_PLT32DBL &&
3361 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3362 - val = (Elf_Addr) me->module_core +
3363 + val = (Elf_Addr) me->module_core_rx +
3364 me->arch.plt_offset +
3365 info->plt_offset;
3366 val += rela->r_addend - loc;
3367 @@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3368 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3369 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3370 val = val + rela->r_addend -
3371 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3372 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3373 if (r_type == R_390_GOTOFF16)
3374 *(unsigned short *) loc = val;
3375 else if (r_type == R_390_GOTOFF32)
3376 @@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3377 break;
3378 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3379 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3380 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3381 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3382 rela->r_addend - loc;
3383 if (r_type == R_390_GOTPC)
3384 *(unsigned int *) loc = val;
3385 diff -urNp linux-2.6.32.42/arch/s390/kernel/setup.c linux-2.6.32.42/arch/s390/kernel/setup.c
3386 --- linux-2.6.32.42/arch/s390/kernel/setup.c 2011-03-27 14:31:47.000000000 -0400
3387 +++ linux-2.6.32.42/arch/s390/kernel/setup.c 2011-04-17 15:56:45.000000000 -0400
3388 @@ -306,9 +306,6 @@ static int __init early_parse_mem(char *
3389 early_param("mem", early_parse_mem);
3390
3391 #ifdef CONFIG_S390_SWITCH_AMODE
3392 -unsigned int switch_amode = 0;
3393 -EXPORT_SYMBOL_GPL(switch_amode);
3394 -
3395 static int set_amode_and_uaccess(unsigned long user_amode,
3396 unsigned long user32_amode)
3397 {
3398 @@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigne
3399 return 0;
3400 }
3401 }
3402 -
3403 -/*
3404 - * Switch kernel/user addressing modes?
3405 - */
3406 -static int __init early_parse_switch_amode(char *p)
3407 -{
3408 - switch_amode = 1;
3409 - return 0;
3410 -}
3411 -early_param("switch_amode", early_parse_switch_amode);
3412 -
3413 #else /* CONFIG_S390_SWITCH_AMODE */
3414 static inline int set_amode_and_uaccess(unsigned long user_amode,
3415 unsigned long user32_amode)
3416 @@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(
3417 }
3418 #endif /* CONFIG_S390_SWITCH_AMODE */
3419
3420 -#ifdef CONFIG_S390_EXEC_PROTECT
3421 -unsigned int s390_noexec = 0;
3422 -EXPORT_SYMBOL_GPL(s390_noexec);
3423 -
3424 -/*
3425 - * Enable execute protection?
3426 - */
3427 -static int __init early_parse_noexec(char *p)
3428 -{
3429 - if (!strncmp(p, "off", 3))
3430 - return 0;
3431 - switch_amode = 1;
3432 - s390_noexec = 1;
3433 - return 0;
3434 -}
3435 -early_param("noexec", early_parse_noexec);
3436 -#endif /* CONFIG_S390_EXEC_PROTECT */
3437 -
3438 static void setup_addressing_mode(void)
3439 {
3440 if (s390_noexec) {
3441 diff -urNp linux-2.6.32.42/arch/s390/mm/mmap.c linux-2.6.32.42/arch/s390/mm/mmap.c
3442 --- linux-2.6.32.42/arch/s390/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3443 +++ linux-2.6.32.42/arch/s390/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
3444 @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3445 */
3446 if (mmap_is_legacy()) {
3447 mm->mmap_base = TASK_UNMAPPED_BASE;
3448 +
3449 +#ifdef CONFIG_PAX_RANDMMAP
3450 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3451 + mm->mmap_base += mm->delta_mmap;
3452 +#endif
3453 +
3454 mm->get_unmapped_area = arch_get_unmapped_area;
3455 mm->unmap_area = arch_unmap_area;
3456 } else {
3457 mm->mmap_base = mmap_base();
3458 +
3459 +#ifdef CONFIG_PAX_RANDMMAP
3460 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3461 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3462 +#endif
3463 +
3464 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3465 mm->unmap_area = arch_unmap_area_topdown;
3466 }
3467 @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3468 */
3469 if (mmap_is_legacy()) {
3470 mm->mmap_base = TASK_UNMAPPED_BASE;
3471 +
3472 +#ifdef CONFIG_PAX_RANDMMAP
3473 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3474 + mm->mmap_base += mm->delta_mmap;
3475 +#endif
3476 +
3477 mm->get_unmapped_area = s390_get_unmapped_area;
3478 mm->unmap_area = arch_unmap_area;
3479 } else {
3480 mm->mmap_base = mmap_base();
3481 +
3482 +#ifdef CONFIG_PAX_RANDMMAP
3483 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3484 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3485 +#endif
3486 +
3487 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3488 mm->unmap_area = arch_unmap_area_topdown;
3489 }
3490 diff -urNp linux-2.6.32.42/arch/score/include/asm/system.h linux-2.6.32.42/arch/score/include/asm/system.h
3491 --- linux-2.6.32.42/arch/score/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
3492 +++ linux-2.6.32.42/arch/score/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
3493 @@ -17,7 +17,7 @@ do { \
3494 #define finish_arch_switch(prev) do {} while (0)
3495
3496 typedef void (*vi_handler_t)(void);
3497 -extern unsigned long arch_align_stack(unsigned long sp);
3498 +#define arch_align_stack(x) (x)
3499
3500 #define mb() barrier()
3501 #define rmb() barrier()
3502 diff -urNp linux-2.6.32.42/arch/score/kernel/process.c linux-2.6.32.42/arch/score/kernel/process.c
3503 --- linux-2.6.32.42/arch/score/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
3504 +++ linux-2.6.32.42/arch/score/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
3505 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
3506
3507 return task_pt_regs(task)->cp0_epc;
3508 }
3509 -
3510 -unsigned long arch_align_stack(unsigned long sp)
3511 -{
3512 - return sp;
3513 -}
3514 diff -urNp linux-2.6.32.42/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.32.42/arch/sh/boards/mach-hp6xx/pm.c
3515 --- linux-2.6.32.42/arch/sh/boards/mach-hp6xx/pm.c 2011-03-27 14:31:47.000000000 -0400
3516 +++ linux-2.6.32.42/arch/sh/boards/mach-hp6xx/pm.c 2011-04-17 15:56:45.000000000 -0400
3517 @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3518 return 0;
3519 }
3520
3521 -static struct platform_suspend_ops hp6x0_pm_ops = {
3522 +static const struct platform_suspend_ops hp6x0_pm_ops = {
3523 .enter = hp6x0_pm_enter,
3524 .valid = suspend_valid_only_mem,
3525 };
3526 diff -urNp linux-2.6.32.42/arch/sh/kernel/cpu/sh4/sq.c linux-2.6.32.42/arch/sh/kernel/cpu/sh4/sq.c
3527 --- linux-2.6.32.42/arch/sh/kernel/cpu/sh4/sq.c 2011-03-27 14:31:47.000000000 -0400
3528 +++ linux-2.6.32.42/arch/sh/kernel/cpu/sh4/sq.c 2011-04-17 15:56:46.000000000 -0400
3529 @@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[
3530 NULL,
3531 };
3532
3533 -static struct sysfs_ops sq_sysfs_ops = {
3534 +static const struct sysfs_ops sq_sysfs_ops = {
3535 .show = sq_sysfs_show,
3536 .store = sq_sysfs_store,
3537 };
3538 diff -urNp linux-2.6.32.42/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.32.42/arch/sh/kernel/cpu/shmobile/pm.c
3539 --- linux-2.6.32.42/arch/sh/kernel/cpu/shmobile/pm.c 2011-03-27 14:31:47.000000000 -0400
3540 +++ linux-2.6.32.42/arch/sh/kernel/cpu/shmobile/pm.c 2011-04-17 15:56:46.000000000 -0400
3541 @@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t s
3542 return 0;
3543 }
3544
3545 -static struct platform_suspend_ops sh_pm_ops = {
3546 +static const struct platform_suspend_ops sh_pm_ops = {
3547 .enter = sh_pm_enter,
3548 .valid = suspend_valid_only_mem,
3549 };
3550 diff -urNp linux-2.6.32.42/arch/sh/kernel/kgdb.c linux-2.6.32.42/arch/sh/kernel/kgdb.c
3551 --- linux-2.6.32.42/arch/sh/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
3552 +++ linux-2.6.32.42/arch/sh/kernel/kgdb.c 2011-04-17 15:56:46.000000000 -0400
3553 @@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
3554 {
3555 }
3556
3557 -struct kgdb_arch arch_kgdb_ops = {
3558 +const struct kgdb_arch arch_kgdb_ops = {
3559 /* Breakpoint instruction: trapa #0x3c */
3560 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3561 .gdb_bpt_instr = { 0x3c, 0xc3 },
3562 diff -urNp linux-2.6.32.42/arch/sh/mm/mmap.c linux-2.6.32.42/arch/sh/mm/mmap.c
3563 --- linux-2.6.32.42/arch/sh/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3564 +++ linux-2.6.32.42/arch/sh/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
3565 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3566 addr = PAGE_ALIGN(addr);
3567
3568 vma = find_vma(mm, addr);
3569 - if (TASK_SIZE - len >= addr &&
3570 - (!vma || addr + len <= vma->vm_start))
3571 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3572 return addr;
3573 }
3574
3575 @@ -106,7 +105,7 @@ full_search:
3576 }
3577 return -ENOMEM;
3578 }
3579 - if (likely(!vma || addr + len <= vma->vm_start)) {
3580 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3581 /*
3582 * Remember the place where we stopped the search:
3583 */
3584 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3585 addr = PAGE_ALIGN(addr);
3586
3587 vma = find_vma(mm, addr);
3588 - if (TASK_SIZE - len >= addr &&
3589 - (!vma || addr + len <= vma->vm_start))
3590 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3591 return addr;
3592 }
3593
3594 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3595 /* make sure it can fit in the remaining address space */
3596 if (likely(addr > len)) {
3597 vma = find_vma(mm, addr-len);
3598 - if (!vma || addr <= vma->vm_start) {
3599 + if (check_heap_stack_gap(vma, addr - len, len)) {
3600 /* remember the address as a hint for next time */
3601 return (mm->free_area_cache = addr-len);
3602 }
3603 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
3604 if (unlikely(mm->mmap_base < len))
3605 goto bottomup;
3606
3607 - addr = mm->mmap_base-len;
3608 - if (do_colour_align)
3609 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3610 + addr = mm->mmap_base - len;
3611
3612 do {
3613 + if (do_colour_align)
3614 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3615 /*
3616 * Lookup failure means no vma is above this address,
3617 * else if new region fits below vma->vm_start,
3618 * return with success:
3619 */
3620 vma = find_vma(mm, addr);
3621 - if (likely(!vma || addr+len <= vma->vm_start)) {
3622 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3623 /* remember the address as a hint for next time */
3624 return (mm->free_area_cache = addr);
3625 }
3626 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
3627 mm->cached_hole_size = vma->vm_start - addr;
3628
3629 /* try just below the current vma->vm_start */
3630 - addr = vma->vm_start-len;
3631 - if (do_colour_align)
3632 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3633 - } while (likely(len < vma->vm_start));
3634 + addr = skip_heap_stack_gap(vma, len);
3635 + } while (!IS_ERR_VALUE(addr));
3636
3637 bottomup:
3638 /*
3639 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/atomic_64.h linux-2.6.32.42/arch/sparc/include/asm/atomic_64.h
3640 --- linux-2.6.32.42/arch/sparc/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
3641 +++ linux-2.6.32.42/arch/sparc/include/asm/atomic_64.h 2011-05-04 17:56:20.000000000 -0400
3642 @@ -14,18 +14,40 @@
3643 #define ATOMIC64_INIT(i) { (i) }
3644
3645 #define atomic_read(v) ((v)->counter)
3646 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3647 +{
3648 + return v->counter;
3649 +}
3650 #define atomic64_read(v) ((v)->counter)
3651 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3652 +{
3653 + return v->counter;
3654 +}
3655
3656 #define atomic_set(v, i) (((v)->counter) = i)
3657 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3658 +{
3659 + v->counter = i;
3660 +}
3661 #define atomic64_set(v, i) (((v)->counter) = i)
3662 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3663 +{
3664 + v->counter = i;
3665 +}
3666
3667 extern void atomic_add(int, atomic_t *);
3668 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3669 extern void atomic64_add(long, atomic64_t *);
3670 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3671 extern void atomic_sub(int, atomic_t *);
3672 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3673 extern void atomic64_sub(long, atomic64_t *);
3674 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3675
3676 extern int atomic_add_ret(int, atomic_t *);
3677 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3678 extern long atomic64_add_ret(long, atomic64_t *);
3679 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3680 extern int atomic_sub_ret(int, atomic_t *);
3681 extern long atomic64_sub_ret(long, atomic64_t *);
3682
3683 @@ -33,7 +55,15 @@ extern long atomic64_sub_ret(long, atomi
3684 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3685
3686 #define atomic_inc_return(v) atomic_add_ret(1, v)
3687 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3688 +{
3689 + return atomic_add_ret_unchecked(1, v);
3690 +}
3691 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3692 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3693 +{
3694 + return atomic64_add_ret_unchecked(1, v);
3695 +}
3696
3697 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3698 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3699 @@ -50,6 +80,7 @@ extern long atomic64_sub_ret(long, atomi
3700 * other cases.
3701 */
3702 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3703 +#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0)
3704 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3705
3706 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3707 @@ -59,30 +90,59 @@ extern long atomic64_sub_ret(long, atomi
3708 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3709
3710 #define atomic_inc(v) atomic_add(1, v)
3711 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3712 +{
3713 + atomic_add_unchecked(1, v);
3714 +}
3715 #define atomic64_inc(v) atomic64_add(1, v)
3716 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3717 +{
3718 + atomic64_add_unchecked(1, v);
3719 +}
3720
3721 #define atomic_dec(v) atomic_sub(1, v)
3722 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3723 +{
3724 + atomic_sub_unchecked(1, v);
3725 +}
3726 #define atomic64_dec(v) atomic64_sub(1, v)
3727 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3728 +{
3729 + atomic64_sub_unchecked(1, v);
3730 +}
3731
3732 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3733 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3734
3735 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3736 +#define atomic_cmpxchg_unchecked(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3737 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3738 +#define atomic_xchg_unchecked(v, new) (xchg(&((v)->counter), new))
3739
3740 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3741 {
3742 - int c, old;
3743 + int c, old, new;
3744 c = atomic_read(v);
3745 for (;;) {
3746 - if (unlikely(c == (u)))
3747 + if (unlikely(c == u))
3748 break;
3749 - old = atomic_cmpxchg((v), c, c + (a));
3750 +
3751 + asm volatile("addcc %2, %0, %0\n"
3752 +
3753 +#ifdef CONFIG_PAX_REFCOUNT
3754 + "tvs %%icc, 6\n"
3755 +#endif
3756 +
3757 + : "=r" (new)
3758 + : "0" (c), "ir" (a)
3759 + : "cc");
3760 +
3761 + old = atomic_cmpxchg(v, c, new);
3762 if (likely(old == c))
3763 break;
3764 c = old;
3765 }
3766 - return c != (u);
3767 + return c != u;
3768 }
3769
3770 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3771 @@ -93,17 +153,28 @@ static inline int atomic_add_unless(atom
3772
3773 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3774 {
3775 - long c, old;
3776 + long c, old, new;
3777 c = atomic64_read(v);
3778 for (;;) {
3779 - if (unlikely(c == (u)))
3780 + if (unlikely(c == u))
3781 break;
3782 - old = atomic64_cmpxchg((v), c, c + (a));
3783 +
3784 + asm volatile("addcc %2, %0, %0\n"
3785 +
3786 +#ifdef CONFIG_PAX_REFCOUNT
3787 + "tvs %%xcc, 6\n"
3788 +#endif
3789 +
3790 + : "=r" (new)
3791 + : "0" (c), "ir" (a)
3792 + : "cc");
3793 +
3794 + old = atomic64_cmpxchg(v, c, new);
3795 if (likely(old == c))
3796 break;
3797 c = old;
3798 }
3799 - return c != (u);
3800 + return c != u;
3801 }
3802
3803 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3804 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/cache.h linux-2.6.32.42/arch/sparc/include/asm/cache.h
3805 --- linux-2.6.32.42/arch/sparc/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
3806 +++ linux-2.6.32.42/arch/sparc/include/asm/cache.h 2011-05-17 19:26:34.000000000 -0400
3807 @@ -8,7 +8,7 @@
3808 #define _SPARC_CACHE_H
3809
3810 #define L1_CACHE_SHIFT 5
3811 -#define L1_CACHE_BYTES 32
3812 +#define L1_CACHE_BYTES 32U
3813 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
3814
3815 #ifdef CONFIG_SPARC32
3816 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/dma-mapping.h linux-2.6.32.42/arch/sparc/include/asm/dma-mapping.h
3817 --- linux-2.6.32.42/arch/sparc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
3818 +++ linux-2.6.32.42/arch/sparc/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
3819 @@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *d
3820 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3821 #define dma_is_consistent(d, h) (1)
3822
3823 -extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3824 +extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3825 extern struct bus_type pci_bus_type;
3826
3827 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3828 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3829 {
3830 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3831 if (dev->bus == &pci_bus_type)
3832 @@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dm
3833 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3834 dma_addr_t *dma_handle, gfp_t flag)
3835 {
3836 - struct dma_map_ops *ops = get_dma_ops(dev);
3837 + const struct dma_map_ops *ops = get_dma_ops(dev);
3838 void *cpu_addr;
3839
3840 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3841 @@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(s
3842 static inline void dma_free_coherent(struct device *dev, size_t size,
3843 void *cpu_addr, dma_addr_t dma_handle)
3844 {
3845 - struct dma_map_ops *ops = get_dma_ops(dev);
3846 + const struct dma_map_ops *ops = get_dma_ops(dev);
3847
3848 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3849 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3850 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/elf_32.h linux-2.6.32.42/arch/sparc/include/asm/elf_32.h
3851 --- linux-2.6.32.42/arch/sparc/include/asm/elf_32.h 2011-03-27 14:31:47.000000000 -0400
3852 +++ linux-2.6.32.42/arch/sparc/include/asm/elf_32.h 2011-04-17 15:56:46.000000000 -0400
3853 @@ -116,6 +116,13 @@ typedef struct {
3854
3855 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3856
3857 +#ifdef CONFIG_PAX_ASLR
3858 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3859 +
3860 +#define PAX_DELTA_MMAP_LEN 16
3861 +#define PAX_DELTA_STACK_LEN 16
3862 +#endif
3863 +
3864 /* This yields a mask that user programs can use to figure out what
3865 instruction set this cpu supports. This can NOT be done in userspace
3866 on Sparc. */
3867 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/elf_64.h linux-2.6.32.42/arch/sparc/include/asm/elf_64.h
3868 --- linux-2.6.32.42/arch/sparc/include/asm/elf_64.h 2011-03-27 14:31:47.000000000 -0400
3869 +++ linux-2.6.32.42/arch/sparc/include/asm/elf_64.h 2011-04-17 15:56:46.000000000 -0400
3870 @@ -163,6 +163,12 @@ typedef struct {
3871 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3872 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3873
3874 +#ifdef CONFIG_PAX_ASLR
3875 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3876 +
3877 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3878 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3879 +#endif
3880
3881 /* This yields a mask that user programs can use to figure out what
3882 instruction set this cpu supports. */
3883 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/pgtable_32.h linux-2.6.32.42/arch/sparc/include/asm/pgtable_32.h
3884 --- linux-2.6.32.42/arch/sparc/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
3885 +++ linux-2.6.32.42/arch/sparc/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
3886 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3887 BTFIXUPDEF_INT(page_none)
3888 BTFIXUPDEF_INT(page_copy)
3889 BTFIXUPDEF_INT(page_readonly)
3890 +
3891 +#ifdef CONFIG_PAX_PAGEEXEC
3892 +BTFIXUPDEF_INT(page_shared_noexec)
3893 +BTFIXUPDEF_INT(page_copy_noexec)
3894 +BTFIXUPDEF_INT(page_readonly_noexec)
3895 +#endif
3896 +
3897 BTFIXUPDEF_INT(page_kernel)
3898
3899 #define PMD_SHIFT SUN4C_PMD_SHIFT
3900 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3901 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3902 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3903
3904 +#ifdef CONFIG_PAX_PAGEEXEC
3905 +extern pgprot_t PAGE_SHARED_NOEXEC;
3906 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3907 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3908 +#else
3909 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3910 +# define PAGE_COPY_NOEXEC PAGE_COPY
3911 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3912 +#endif
3913 +
3914 extern unsigned long page_kernel;
3915
3916 #ifdef MODULE
3917 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.32.42/arch/sparc/include/asm/pgtsrmmu.h
3918 --- linux-2.6.32.42/arch/sparc/include/asm/pgtsrmmu.h 2011-03-27 14:31:47.000000000 -0400
3919 +++ linux-2.6.32.42/arch/sparc/include/asm/pgtsrmmu.h 2011-04-17 15:56:46.000000000 -0400
3920 @@ -115,6 +115,13 @@
3921 SRMMU_EXEC | SRMMU_REF)
3922 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3923 SRMMU_EXEC | SRMMU_REF)
3924 +
3925 +#ifdef CONFIG_PAX_PAGEEXEC
3926 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3927 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3928 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3929 +#endif
3930 +
3931 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3932 SRMMU_DIRTY | SRMMU_REF)
3933
3934 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/spinlock_64.h linux-2.6.32.42/arch/sparc/include/asm/spinlock_64.h
3935 --- linux-2.6.32.42/arch/sparc/include/asm/spinlock_64.h 2011-03-27 14:31:47.000000000 -0400
3936 +++ linux-2.6.32.42/arch/sparc/include/asm/spinlock_64.h 2011-05-04 17:56:20.000000000 -0400
3937 @@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags
3938
3939 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3940
3941 -static void inline arch_read_lock(raw_rwlock_t *lock)
3942 +static inline void arch_read_lock(raw_rwlock_t *lock)
3943 {
3944 unsigned long tmp1, tmp2;
3945
3946 __asm__ __volatile__ (
3947 "1: ldsw [%2], %0\n"
3948 " brlz,pn %0, 2f\n"
3949 -"4: add %0, 1, %1\n"
3950 +"4: addcc %0, 1, %1\n"
3951 +
3952 +#ifdef CONFIG_PAX_REFCOUNT
3953 +" tvs %%icc, 6\n"
3954 +#endif
3955 +
3956 " cas [%2], %0, %1\n"
3957 " cmp %0, %1\n"
3958 " bne,pn %%icc, 1b\n"
3959 @@ -112,7 +117,7 @@ static void inline arch_read_lock(raw_rw
3960 " .previous"
3961 : "=&r" (tmp1), "=&r" (tmp2)
3962 : "r" (lock)
3963 - : "memory");
3964 + : "memory", "cc");
3965 }
3966
3967 static int inline arch_read_trylock(raw_rwlock_t *lock)
3968 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_
3969 "1: ldsw [%2], %0\n"
3970 " brlz,a,pn %0, 2f\n"
3971 " mov 0, %0\n"
3972 -" add %0, 1, %1\n"
3973 +" addcc %0, 1, %1\n"
3974 +
3975 +#ifdef CONFIG_PAX_REFCOUNT
3976 +" tvs %%icc, 6\n"
3977 +#endif
3978 +
3979 " cas [%2], %0, %1\n"
3980 " cmp %0, %1\n"
3981 " bne,pn %%icc, 1b\n"
3982 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_
3983 return tmp1;
3984 }
3985
3986 -static void inline arch_read_unlock(raw_rwlock_t *lock)
3987 +static inline void arch_read_unlock(raw_rwlock_t *lock)
3988 {
3989 unsigned long tmp1, tmp2;
3990
3991 __asm__ __volatile__(
3992 "1: lduw [%2], %0\n"
3993 -" sub %0, 1, %1\n"
3994 +" subcc %0, 1, %1\n"
3995 +
3996 +#ifdef CONFIG_PAX_REFCOUNT
3997 +" tvs %%icc, 6\n"
3998 +#endif
3999 +
4000 " cas [%2], %0, %1\n"
4001 " cmp %0, %1\n"
4002 " bne,pn %%xcc, 1b\n"
4003 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_
4004 : "memory");
4005 }
4006
4007 -static void inline arch_write_lock(raw_rwlock_t *lock)
4008 +static inline void arch_write_lock(raw_rwlock_t *lock)
4009 {
4010 unsigned long mask, tmp1, tmp2;
4011
4012 @@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_r
4013 : "memory");
4014 }
4015
4016 -static void inline arch_write_unlock(raw_rwlock_t *lock)
4017 +static inline void arch_write_unlock(raw_rwlock_t *lock)
4018 {
4019 __asm__ __volatile__(
4020 " stw %%g0, [%0]"
4021 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/thread_info_32.h linux-2.6.32.42/arch/sparc/include/asm/thread_info_32.h
4022 --- linux-2.6.32.42/arch/sparc/include/asm/thread_info_32.h 2011-03-27 14:31:47.000000000 -0400
4023 +++ linux-2.6.32.42/arch/sparc/include/asm/thread_info_32.h 2011-06-04 20:46:01.000000000 -0400
4024 @@ -50,6 +50,8 @@ struct thread_info {
4025 unsigned long w_saved;
4026
4027 struct restart_block restart_block;
4028 +
4029 + unsigned long lowest_stack;
4030 };
4031
4032 /*
4033 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/thread_info_64.h linux-2.6.32.42/arch/sparc/include/asm/thread_info_64.h
4034 --- linux-2.6.32.42/arch/sparc/include/asm/thread_info_64.h 2011-03-27 14:31:47.000000000 -0400
4035 +++ linux-2.6.32.42/arch/sparc/include/asm/thread_info_64.h 2011-06-04 20:46:21.000000000 -0400
4036 @@ -68,6 +68,8 @@ struct thread_info {
4037 struct pt_regs *kern_una_regs;
4038 unsigned int kern_una_insn;
4039
4040 + unsigned long lowest_stack;
4041 +
4042 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4043 };
4044
4045 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/uaccess_32.h linux-2.6.32.42/arch/sparc/include/asm/uaccess_32.h
4046 --- linux-2.6.32.42/arch/sparc/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
4047 +++ linux-2.6.32.42/arch/sparc/include/asm/uaccess_32.h 2011-04-17 15:56:46.000000000 -0400
4048 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
4049
4050 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4051 {
4052 - if (n && __access_ok((unsigned long) to, n))
4053 + if ((long)n < 0)
4054 + return n;
4055 +
4056 + if (n && __access_ok((unsigned long) to, n)) {
4057 + if (!__builtin_constant_p(n))
4058 + check_object_size(from, n, true);
4059 return __copy_user(to, (__force void __user *) from, n);
4060 - else
4061 + } else
4062 return n;
4063 }
4064
4065 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4066 {
4067 + if ((long)n < 0)
4068 + return n;
4069 +
4070 + if (!__builtin_constant_p(n))
4071 + check_object_size(from, n, true);
4072 +
4073 return __copy_user(to, (__force void __user *) from, n);
4074 }
4075
4076 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4077 {
4078 - if (n && __access_ok((unsigned long) from, n))
4079 + if ((long)n < 0)
4080 + return n;
4081 +
4082 + if (n && __access_ok((unsigned long) from, n)) {
4083 + if (!__builtin_constant_p(n))
4084 + check_object_size(to, n, false);
4085 return __copy_user((__force void __user *) to, from, n);
4086 - else
4087 + } else
4088 return n;
4089 }
4090
4091 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4092 {
4093 + if ((long)n < 0)
4094 + return n;
4095 +
4096 return __copy_user((__force void __user *) to, from, n);
4097 }
4098
4099 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/uaccess_64.h linux-2.6.32.42/arch/sparc/include/asm/uaccess_64.h
4100 --- linux-2.6.32.42/arch/sparc/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
4101 +++ linux-2.6.32.42/arch/sparc/include/asm/uaccess_64.h 2011-04-17 15:56:46.000000000 -0400
4102 @@ -9,6 +9,7 @@
4103 #include <linux/compiler.h>
4104 #include <linux/string.h>
4105 #include <linux/thread_info.h>
4106 +#include <linux/kernel.h>
4107 #include <asm/asi.h>
4108 #include <asm/system.h>
4109 #include <asm/spitfire.h>
4110 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixu
4111 static inline unsigned long __must_check
4112 copy_from_user(void *to, const void __user *from, unsigned long size)
4113 {
4114 - unsigned long ret = ___copy_from_user(to, from, size);
4115 + unsigned long ret;
4116
4117 + if ((long)size < 0 || size > INT_MAX)
4118 + return size;
4119 +
4120 + if (!__builtin_constant_p(size))
4121 + check_object_size(to, size, false);
4122 +
4123 + ret = ___copy_from_user(to, from, size);
4124 if (unlikely(ret))
4125 ret = copy_from_user_fixup(to, from, size);
4126 return ret;
4127 @@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(
4128 static inline unsigned long __must_check
4129 copy_to_user(void __user *to, const void *from, unsigned long size)
4130 {
4131 - unsigned long ret = ___copy_to_user(to, from, size);
4132 + unsigned long ret;
4133 +
4134 + if ((long)size < 0 || size > INT_MAX)
4135 + return size;
4136 +
4137 + if (!__builtin_constant_p(size))
4138 + check_object_size(from, size, true);
4139
4140 + ret = ___copy_to_user(to, from, size);
4141 if (unlikely(ret))
4142 ret = copy_to_user_fixup(to, from, size);
4143 return ret;
4144 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/uaccess.h linux-2.6.32.42/arch/sparc/include/asm/uaccess.h
4145 --- linux-2.6.32.42/arch/sparc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
4146 +++ linux-2.6.32.42/arch/sparc/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
4147 @@ -1,5 +1,13 @@
4148 #ifndef ___ASM_SPARC_UACCESS_H
4149 #define ___ASM_SPARC_UACCESS_H
4150 +
4151 +#ifdef __KERNEL__
4152 +#ifndef __ASSEMBLY__
4153 +#include <linux/types.h>
4154 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
4155 +#endif
4156 +#endif
4157 +
4158 #if defined(__sparc__) && defined(__arch64__)
4159 #include <asm/uaccess_64.h>
4160 #else
4161 diff -urNp linux-2.6.32.42/arch/sparc/kernel/iommu.c linux-2.6.32.42/arch/sparc/kernel/iommu.c
4162 --- linux-2.6.32.42/arch/sparc/kernel/iommu.c 2011-03-27 14:31:47.000000000 -0400
4163 +++ linux-2.6.32.42/arch/sparc/kernel/iommu.c 2011-04-17 15:56:46.000000000 -0400
4164 @@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struc
4165 spin_unlock_irqrestore(&iommu->lock, flags);
4166 }
4167
4168 -static struct dma_map_ops sun4u_dma_ops = {
4169 +static const struct dma_map_ops sun4u_dma_ops = {
4170 .alloc_coherent = dma_4u_alloc_coherent,
4171 .free_coherent = dma_4u_free_coherent,
4172 .map_page = dma_4u_map_page,
4173 @@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops
4174 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4175 };
4176
4177 -struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4178 +const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4179 EXPORT_SYMBOL(dma_ops);
4180
4181 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4182 diff -urNp linux-2.6.32.42/arch/sparc/kernel/ioport.c linux-2.6.32.42/arch/sparc/kernel/ioport.c
4183 --- linux-2.6.32.42/arch/sparc/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
4184 +++ linux-2.6.32.42/arch/sparc/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
4185 @@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(stru
4186 BUG();
4187 }
4188
4189 -struct dma_map_ops sbus_dma_ops = {
4190 +const struct dma_map_ops sbus_dma_ops = {
4191 .alloc_coherent = sbus_alloc_coherent,
4192 .free_coherent = sbus_free_coherent,
4193 .map_page = sbus_map_page,
4194 @@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4195 .sync_sg_for_device = sbus_sync_sg_for_device,
4196 };
4197
4198 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
4199 +const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4200 EXPORT_SYMBOL(dma_ops);
4201
4202 static int __init sparc_register_ioport(void)
4203 @@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(str
4204 }
4205 }
4206
4207 -struct dma_map_ops pci32_dma_ops = {
4208 +const struct dma_map_ops pci32_dma_ops = {
4209 .alloc_coherent = pci32_alloc_coherent,
4210 .free_coherent = pci32_free_coherent,
4211 .map_page = pci32_map_page,
4212 diff -urNp linux-2.6.32.42/arch/sparc/kernel/kgdb_32.c linux-2.6.32.42/arch/sparc/kernel/kgdb_32.c
4213 --- linux-2.6.32.42/arch/sparc/kernel/kgdb_32.c 2011-03-27 14:31:47.000000000 -0400
4214 +++ linux-2.6.32.42/arch/sparc/kernel/kgdb_32.c 2011-04-17 15:56:46.000000000 -0400
4215 @@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4216 {
4217 }
4218
4219 -struct kgdb_arch arch_kgdb_ops = {
4220 +const struct kgdb_arch arch_kgdb_ops = {
4221 /* Breakpoint instruction: ta 0x7d */
4222 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4223 };
4224 diff -urNp linux-2.6.32.42/arch/sparc/kernel/kgdb_64.c linux-2.6.32.42/arch/sparc/kernel/kgdb_64.c
4225 --- linux-2.6.32.42/arch/sparc/kernel/kgdb_64.c 2011-03-27 14:31:47.000000000 -0400
4226 +++ linux-2.6.32.42/arch/sparc/kernel/kgdb_64.c 2011-04-17 15:56:46.000000000 -0400
4227 @@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4228 {
4229 }
4230
4231 -struct kgdb_arch arch_kgdb_ops = {
4232 +const struct kgdb_arch arch_kgdb_ops = {
4233 /* Breakpoint instruction: ta 0x72 */
4234 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4235 };
4236 diff -urNp linux-2.6.32.42/arch/sparc/kernel/Makefile linux-2.6.32.42/arch/sparc/kernel/Makefile
4237 --- linux-2.6.32.42/arch/sparc/kernel/Makefile 2011-03-27 14:31:47.000000000 -0400
4238 +++ linux-2.6.32.42/arch/sparc/kernel/Makefile 2011-04-17 15:56:46.000000000 -0400
4239 @@ -3,7 +3,7 @@
4240 #
4241
4242 asflags-y := -ansi
4243 -ccflags-y := -Werror
4244 +#ccflags-y := -Werror
4245
4246 extra-y := head_$(BITS).o
4247 extra-y += init_task.o
4248 diff -urNp linux-2.6.32.42/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.42/arch/sparc/kernel/pci_sun4v.c
4249 --- linux-2.6.32.42/arch/sparc/kernel/pci_sun4v.c 2011-03-27 14:31:47.000000000 -0400
4250 +++ linux-2.6.32.42/arch/sparc/kernel/pci_sun4v.c 2011-04-17 15:56:46.000000000 -0400
4251 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
4252 spin_unlock_irqrestore(&iommu->lock, flags);
4253 }
4254
4255 -static struct dma_map_ops sun4v_dma_ops = {
4256 +static const struct dma_map_ops sun4v_dma_ops = {
4257 .alloc_coherent = dma_4v_alloc_coherent,
4258 .free_coherent = dma_4v_free_coherent,
4259 .map_page = dma_4v_map_page,
4260 diff -urNp linux-2.6.32.42/arch/sparc/kernel/process_32.c linux-2.6.32.42/arch/sparc/kernel/process_32.c
4261 --- linux-2.6.32.42/arch/sparc/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
4262 +++ linux-2.6.32.42/arch/sparc/kernel/process_32.c 2011-04-17 15:56:46.000000000 -0400
4263 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4264 rw->ins[4], rw->ins[5],
4265 rw->ins[6],
4266 rw->ins[7]);
4267 - printk("%pS\n", (void *) rw->ins[7]);
4268 + printk("%pA\n", (void *) rw->ins[7]);
4269 rw = (struct reg_window32 *) rw->ins[6];
4270 }
4271 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4272 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4273
4274 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4275 r->psr, r->pc, r->npc, r->y, print_tainted());
4276 - printk("PC: <%pS>\n", (void *) r->pc);
4277 + printk("PC: <%pA>\n", (void *) r->pc);
4278 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4279 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4280 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4281 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4282 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4283 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4284 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4285 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4286
4287 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4288 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4289 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
4290 rw = (struct reg_window32 *) fp;
4291 pc = rw->ins[7];
4292 printk("[%08lx : ", pc);
4293 - printk("%pS ] ", (void *) pc);
4294 + printk("%pA ] ", (void *) pc);
4295 fp = rw->ins[6];
4296 } while (++count < 16);
4297 printk("\n");
4298 diff -urNp linux-2.6.32.42/arch/sparc/kernel/process_64.c linux-2.6.32.42/arch/sparc/kernel/process_64.c
4299 --- linux-2.6.32.42/arch/sparc/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
4300 +++ linux-2.6.32.42/arch/sparc/kernel/process_64.c 2011-04-17 15:56:46.000000000 -0400
4301 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
4302 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4303 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4304 if (regs->tstate & TSTATE_PRIV)
4305 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4306 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4307 }
4308
4309 void show_regs(struct pt_regs *regs)
4310 {
4311 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4312 regs->tpc, regs->tnpc, regs->y, print_tainted());
4313 - printk("TPC: <%pS>\n", (void *) regs->tpc);
4314 + printk("TPC: <%pA>\n", (void *) regs->tpc);
4315 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4316 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4317 regs->u_regs[3]);
4318 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4319 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4320 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4321 regs->u_regs[15]);
4322 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4323 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4324 show_regwindow(regs);
4325 }
4326
4327 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void
4328 ((tp && tp->task) ? tp->task->pid : -1));
4329
4330 if (gp->tstate & TSTATE_PRIV) {
4331 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4332 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4333 (void *) gp->tpc,
4334 (void *) gp->o7,
4335 (void *) gp->i7,
4336 diff -urNp linux-2.6.32.42/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.42/arch/sparc/kernel/sys_sparc_32.c
4337 --- linux-2.6.32.42/arch/sparc/kernel/sys_sparc_32.c 2011-03-27 14:31:47.000000000 -0400
4338 +++ linux-2.6.32.42/arch/sparc/kernel/sys_sparc_32.c 2011-04-17 15:56:46.000000000 -0400
4339 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
4340 if (ARCH_SUN4C && len > 0x20000000)
4341 return -ENOMEM;
4342 if (!addr)
4343 - addr = TASK_UNMAPPED_BASE;
4344 + addr = current->mm->mmap_base;
4345
4346 if (flags & MAP_SHARED)
4347 addr = COLOUR_ALIGN(addr);
4348 @@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
4349 }
4350 if (TASK_SIZE - PAGE_SIZE - len < addr)
4351 return -ENOMEM;
4352 - if (!vmm || addr + len <= vmm->vm_start)
4353 + if (check_heap_stack_gap(vmm, addr, len))
4354 return addr;
4355 addr = vmm->vm_end;
4356 if (flags & MAP_SHARED)
4357 diff -urNp linux-2.6.32.42/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.42/arch/sparc/kernel/sys_sparc_64.c
4358 --- linux-2.6.32.42/arch/sparc/kernel/sys_sparc_64.c 2011-03-27 14:31:47.000000000 -0400
4359 +++ linux-2.6.32.42/arch/sparc/kernel/sys_sparc_64.c 2011-04-17 15:56:46.000000000 -0400
4360 @@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
4361 /* We do not accept a shared mapping if it would violate
4362 * cache aliasing constraints.
4363 */
4364 - if ((flags & MAP_SHARED) &&
4365 + if ((filp || (flags & MAP_SHARED)) &&
4366 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4367 return -EINVAL;
4368 return addr;
4369 @@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
4370 if (filp || (flags & MAP_SHARED))
4371 do_color_align = 1;
4372
4373 +#ifdef CONFIG_PAX_RANDMMAP
4374 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4375 +#endif
4376 +
4377 if (addr) {
4378 if (do_color_align)
4379 addr = COLOUR_ALIGN(addr, pgoff);
4380 @@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
4381 addr = PAGE_ALIGN(addr);
4382
4383 vma = find_vma(mm, addr);
4384 - if (task_size - len >= addr &&
4385 - (!vma || addr + len <= vma->vm_start))
4386 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4387 return addr;
4388 }
4389
4390 if (len > mm->cached_hole_size) {
4391 - start_addr = addr = mm->free_area_cache;
4392 + start_addr = addr = mm->free_area_cache;
4393 } else {
4394 - start_addr = addr = TASK_UNMAPPED_BASE;
4395 + start_addr = addr = mm->mmap_base;
4396 mm->cached_hole_size = 0;
4397 }
4398
4399 @@ -175,14 +178,14 @@ full_search:
4400 vma = find_vma(mm, VA_EXCLUDE_END);
4401 }
4402 if (unlikely(task_size < addr)) {
4403 - if (start_addr != TASK_UNMAPPED_BASE) {
4404 - start_addr = addr = TASK_UNMAPPED_BASE;
4405 + if (start_addr != mm->mmap_base) {
4406 + start_addr = addr = mm->mmap_base;
4407 mm->cached_hole_size = 0;
4408 goto full_search;
4409 }
4410 return -ENOMEM;
4411 }
4412 - if (likely(!vma || addr + len <= vma->vm_start)) {
4413 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4414 /*
4415 * Remember the place where we stopped the search:
4416 */
4417 @@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
4418 /* We do not accept a shared mapping if it would violate
4419 * cache aliasing constraints.
4420 */
4421 - if ((flags & MAP_SHARED) &&
4422 + if ((filp || (flags & MAP_SHARED)) &&
4423 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4424 return -EINVAL;
4425 return addr;
4426 @@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
4427 addr = PAGE_ALIGN(addr);
4428
4429 vma = find_vma(mm, addr);
4430 - if (task_size - len >= addr &&
4431 - (!vma || addr + len <= vma->vm_start))
4432 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4433 return addr;
4434 }
4435
4436 @@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
4437 /* make sure it can fit in the remaining address space */
4438 if (likely(addr > len)) {
4439 vma = find_vma(mm, addr-len);
4440 - if (!vma || addr <= vma->vm_start) {
4441 + if (check_heap_stack_gap(vma, addr - len, len)) {
4442 /* remember the address as a hint for next time */
4443 return (mm->free_area_cache = addr-len);
4444 }
4445 @@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct fi
4446 if (unlikely(mm->mmap_base < len))
4447 goto bottomup;
4448
4449 - addr = mm->mmap_base-len;
4450 - if (do_color_align)
4451 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4452 + addr = mm->mmap_base - len;
4453
4454 do {
4455 + if (do_color_align)
4456 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4457 /*
4458 * Lookup failure means no vma is above this address,
4459 * else if new region fits below vma->vm_start,
4460 * return with success:
4461 */
4462 vma = find_vma(mm, addr);
4463 - if (likely(!vma || addr+len <= vma->vm_start)) {
4464 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4465 /* remember the address as a hint for next time */
4466 return (mm->free_area_cache = addr);
4467 }
4468 @@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct fi
4469 mm->cached_hole_size = vma->vm_start - addr;
4470
4471 /* try just below the current vma->vm_start */
4472 - addr = vma->vm_start-len;
4473 - if (do_color_align)
4474 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4475 - } while (likely(len < vma->vm_start));
4476 + addr = skip_heap_stack_gap(vma, len);
4477 + } while (!IS_ERR_VALUE(addr));
4478
4479 bottomup:
4480 /*
4481 @@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
4482 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
4483 sysctl_legacy_va_layout) {
4484 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4485 +
4486 +#ifdef CONFIG_PAX_RANDMMAP
4487 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4488 + mm->mmap_base += mm->delta_mmap;
4489 +#endif
4490 +
4491 mm->get_unmapped_area = arch_get_unmapped_area;
4492 mm->unmap_area = arch_unmap_area;
4493 } else {
4494 @@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
4495 gap = (task_size / 6 * 5);
4496
4497 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4498 +
4499 +#ifdef CONFIG_PAX_RANDMMAP
4500 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4501 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4502 +#endif
4503 +
4504 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4505 mm->unmap_area = arch_unmap_area_topdown;
4506 }
4507 diff -urNp linux-2.6.32.42/arch/sparc/kernel/traps_32.c linux-2.6.32.42/arch/sparc/kernel/traps_32.c
4508 --- linux-2.6.32.42/arch/sparc/kernel/traps_32.c 2011-03-27 14:31:47.000000000 -0400
4509 +++ linux-2.6.32.42/arch/sparc/kernel/traps_32.c 2011-06-13 21:25:39.000000000 -0400
4510 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
4511 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4512 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4513
4514 +extern void gr_handle_kernel_exploit(void);
4515 +
4516 void die_if_kernel(char *str, struct pt_regs *regs)
4517 {
4518 static int die_counter;
4519 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
4520 count++ < 30 &&
4521 (((unsigned long) rw) >= PAGE_OFFSET) &&
4522 !(((unsigned long) rw) & 0x7)) {
4523 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
4524 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
4525 (void *) rw->ins[7]);
4526 rw = (struct reg_window32 *)rw->ins[6];
4527 }
4528 }
4529 printk("Instruction DUMP:");
4530 instruction_dump ((unsigned long *) regs->pc);
4531 - if(regs->psr & PSR_PS)
4532 + if(regs->psr & PSR_PS) {
4533 + gr_handle_kernel_exploit();
4534 do_exit(SIGKILL);
4535 + }
4536 do_exit(SIGSEGV);
4537 }
4538
4539 diff -urNp linux-2.6.32.42/arch/sparc/kernel/traps_64.c linux-2.6.32.42/arch/sparc/kernel/traps_64.c
4540 --- linux-2.6.32.42/arch/sparc/kernel/traps_64.c 2011-03-27 14:31:47.000000000 -0400
4541 +++ linux-2.6.32.42/arch/sparc/kernel/traps_64.c 2011-06-13 21:24:11.000000000 -0400
4542 @@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_
4543 i + 1,
4544 p->trapstack[i].tstate, p->trapstack[i].tpc,
4545 p->trapstack[i].tnpc, p->trapstack[i].tt);
4546 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4547 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4548 }
4549 }
4550
4551 @@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long
4552
4553 lvl -= 0x100;
4554 if (regs->tstate & TSTATE_PRIV) {
4555 +
4556 +#ifdef CONFIG_PAX_REFCOUNT
4557 + if (lvl == 6)
4558 + pax_report_refcount_overflow(regs);
4559 +#endif
4560 +
4561 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4562 die_if_kernel(buffer, regs);
4563 }
4564 @@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long
4565 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4566 {
4567 char buffer[32];
4568 -
4569 +
4570 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4571 0, lvl, SIGTRAP) == NOTIFY_STOP)
4572 return;
4573
4574 +#ifdef CONFIG_PAX_REFCOUNT
4575 + if (lvl == 6)
4576 + pax_report_refcount_overflow(regs);
4577 +#endif
4578 +
4579 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4580
4581 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4582 @@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt
4583 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4584 printk("%s" "ERROR(%d): ",
4585 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4586 - printk("TPC<%pS>\n", (void *) regs->tpc);
4587 + printk("TPC<%pA>\n", (void *) regs->tpc);
4588 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4589 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4590 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4591 @@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type,
4592 smp_processor_id(),
4593 (type & 0x1) ? 'I' : 'D',
4594 regs->tpc);
4595 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4596 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4597 panic("Irrecoverable Cheetah+ parity error.");
4598 }
4599
4600 @@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type,
4601 smp_processor_id(),
4602 (type & 0x1) ? 'I' : 'D',
4603 regs->tpc);
4604 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4605 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4606 }
4607
4608 struct sun4v_error_entry {
4609 @@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_r
4610
4611 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4612 regs->tpc, tl);
4613 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4614 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4615 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4616 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4617 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4618 (void *) regs->u_regs[UREG_I7]);
4619 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4620 "pte[%lx] error[%lx]\n",
4621 @@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_r
4622
4623 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4624 regs->tpc, tl);
4625 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4626 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4627 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4628 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4629 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4630 (void *) regs->u_regs[UREG_I7]);
4631 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4632 "pte[%lx] error[%lx]\n",
4633 @@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk,
4634 fp = (unsigned long)sf->fp + STACK_BIAS;
4635 }
4636
4637 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4638 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4639 } while (++count < 16);
4640 }
4641
4642 @@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_
4643 return (struct reg_window *) (fp + STACK_BIAS);
4644 }
4645
4646 +extern void gr_handle_kernel_exploit(void);
4647 +
4648 void die_if_kernel(char *str, struct pt_regs *regs)
4649 {
4650 static int die_counter;
4651 @@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_
4652 while (rw &&
4653 count++ < 30&&
4654 is_kernel_stack(current, rw)) {
4655 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
4656 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
4657 (void *) rw->ins[7]);
4658
4659 rw = kernel_stack_up(rw);
4660 @@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_
4661 }
4662 user_instruction_dump ((unsigned int __user *) regs->tpc);
4663 }
4664 - if (regs->tstate & TSTATE_PRIV)
4665 + if (regs->tstate & TSTATE_PRIV) {
4666 + gr_handle_kernel_exploit();
4667 do_exit(SIGKILL);
4668 + }
4669 +
4670 do_exit(SIGSEGV);
4671 }
4672 EXPORT_SYMBOL(die_if_kernel);
4673 diff -urNp linux-2.6.32.42/arch/sparc/kernel/unaligned_64.c linux-2.6.32.42/arch/sparc/kernel/unaligned_64.c
4674 --- linux-2.6.32.42/arch/sparc/kernel/unaligned_64.c 2011-03-27 14:31:47.000000000 -0400
4675 +++ linux-2.6.32.42/arch/sparc/kernel/unaligned_64.c 2011-04-17 15:56:46.000000000 -0400
4676 @@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs
4677 if (count < 5) {
4678 last_time = jiffies;
4679 count++;
4680 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
4681 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
4682 regs->tpc, (void *) regs->tpc);
4683 }
4684 }
4685 diff -urNp linux-2.6.32.42/arch/sparc/lib/atomic_64.S linux-2.6.32.42/arch/sparc/lib/atomic_64.S
4686 --- linux-2.6.32.42/arch/sparc/lib/atomic_64.S 2011-03-27 14:31:47.000000000 -0400
4687 +++ linux-2.6.32.42/arch/sparc/lib/atomic_64.S 2011-04-17 15:56:46.000000000 -0400
4688 @@ -18,7 +18,12 @@
4689 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4690 BACKOFF_SETUP(%o2)
4691 1: lduw [%o1], %g1
4692 - add %g1, %o0, %g7
4693 + addcc %g1, %o0, %g7
4694 +
4695 +#ifdef CONFIG_PAX_REFCOUNT
4696 + tvs %icc, 6
4697 +#endif
4698 +
4699 cas [%o1], %g1, %g7
4700 cmp %g1, %g7
4701 bne,pn %icc, 2f
4702 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4703 2: BACKOFF_SPIN(%o2, %o3, 1b)
4704 .size atomic_add, .-atomic_add
4705
4706 + .globl atomic_add_unchecked
4707 + .type atomic_add_unchecked,#function
4708 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4709 + BACKOFF_SETUP(%o2)
4710 +1: lduw [%o1], %g1
4711 + add %g1, %o0, %g7
4712 + cas [%o1], %g1, %g7
4713 + cmp %g1, %g7
4714 + bne,pn %icc, 2f
4715 + nop
4716 + retl
4717 + nop
4718 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4719 + .size atomic_add_unchecked, .-atomic_add_unchecked
4720 +
4721 .globl atomic_sub
4722 .type atomic_sub,#function
4723 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4724 BACKOFF_SETUP(%o2)
4725 1: lduw [%o1], %g1
4726 - sub %g1, %o0, %g7
4727 + subcc %g1, %o0, %g7
4728 +
4729 +#ifdef CONFIG_PAX_REFCOUNT
4730 + tvs %icc, 6
4731 +#endif
4732 +
4733 cas [%o1], %g1, %g7
4734 cmp %g1, %g7
4735 bne,pn %icc, 2f
4736 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4737 2: BACKOFF_SPIN(%o2, %o3, 1b)
4738 .size atomic_sub, .-atomic_sub
4739
4740 + .globl atomic_sub_unchecked
4741 + .type atomic_sub_unchecked,#function
4742 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4743 + BACKOFF_SETUP(%o2)
4744 +1: lduw [%o1], %g1
4745 + sub %g1, %o0, %g7
4746 + cas [%o1], %g1, %g7
4747 + cmp %g1, %g7
4748 + bne,pn %icc, 2f
4749 + nop
4750 + retl
4751 + nop
4752 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4753 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4754 +
4755 .globl atomic_add_ret
4756 .type atomic_add_ret,#function
4757 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4758 BACKOFF_SETUP(%o2)
4759 1: lduw [%o1], %g1
4760 - add %g1, %o0, %g7
4761 + addcc %g1, %o0, %g7
4762 +
4763 +#ifdef CONFIG_PAX_REFCOUNT
4764 + tvs %icc, 6
4765 +#endif
4766 +
4767 cas [%o1], %g1, %g7
4768 cmp %g1, %g7
4769 bne,pn %icc, 2f
4770 @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4771 2: BACKOFF_SPIN(%o2, %o3, 1b)
4772 .size atomic_add_ret, .-atomic_add_ret
4773
4774 + .globl atomic_add_ret_unchecked
4775 + .type atomic_add_ret_unchecked,#function
4776 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4777 + BACKOFF_SETUP(%o2)
4778 +1: lduw [%o1], %g1
4779 + addcc %g1, %o0, %g7
4780 + cas [%o1], %g1, %g7
4781 + cmp %g1, %g7
4782 + bne,pn %icc, 2f
4783 + add %g7, %o0, %g7
4784 + sra %g7, 0, %o0
4785 + retl
4786 + nop
4787 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4788 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4789 +
4790 .globl atomic_sub_ret
4791 .type atomic_sub_ret,#function
4792 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4793 BACKOFF_SETUP(%o2)
4794 1: lduw [%o1], %g1
4795 - sub %g1, %o0, %g7
4796 + subcc %g1, %o0, %g7
4797 +
4798 +#ifdef CONFIG_PAX_REFCOUNT
4799 + tvs %icc, 6
4800 +#endif
4801 +
4802 cas [%o1], %g1, %g7
4803 cmp %g1, %g7
4804 bne,pn %icc, 2f
4805 @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4806 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4807 BACKOFF_SETUP(%o2)
4808 1: ldx [%o1], %g1
4809 - add %g1, %o0, %g7
4810 + addcc %g1, %o0, %g7
4811 +
4812 +#ifdef CONFIG_PAX_REFCOUNT
4813 + tvs %xcc, 6
4814 +#endif
4815 +
4816 casx [%o1], %g1, %g7
4817 cmp %g1, %g7
4818 bne,pn %xcc, 2f
4819 @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4820 2: BACKOFF_SPIN(%o2, %o3, 1b)
4821 .size atomic64_add, .-atomic64_add
4822
4823 + .globl atomic64_add_unchecked
4824 + .type atomic64_add_unchecked,#function
4825 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4826 + BACKOFF_SETUP(%o2)
4827 +1: ldx [%o1], %g1
4828 + addcc %g1, %o0, %g7
4829 + casx [%o1], %g1, %g7
4830 + cmp %g1, %g7
4831 + bne,pn %xcc, 2f
4832 + nop
4833 + retl
4834 + nop
4835 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4836 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4837 +
4838 .globl atomic64_sub
4839 .type atomic64_sub,#function
4840 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4841 BACKOFF_SETUP(%o2)
4842 1: ldx [%o1], %g1
4843 - sub %g1, %o0, %g7
4844 + subcc %g1, %o0, %g7
4845 +
4846 +#ifdef CONFIG_PAX_REFCOUNT
4847 + tvs %xcc, 6
4848 +#endif
4849 +
4850 casx [%o1], %g1, %g7
4851 cmp %g1, %g7
4852 bne,pn %xcc, 2f
4853 @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4854 2: BACKOFF_SPIN(%o2, %o3, 1b)
4855 .size atomic64_sub, .-atomic64_sub
4856
4857 + .globl atomic64_sub_unchecked
4858 + .type atomic64_sub_unchecked,#function
4859 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4860 + BACKOFF_SETUP(%o2)
4861 +1: ldx [%o1], %g1
4862 + subcc %g1, %o0, %g7
4863 + casx [%o1], %g1, %g7
4864 + cmp %g1, %g7
4865 + bne,pn %xcc, 2f
4866 + nop
4867 + retl
4868 + nop
4869 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4870 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4871 +
4872 .globl atomic64_add_ret
4873 .type atomic64_add_ret,#function
4874 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4875 BACKOFF_SETUP(%o2)
4876 1: ldx [%o1], %g1
4877 - add %g1, %o0, %g7
4878 + addcc %g1, %o0, %g7
4879 +
4880 +#ifdef CONFIG_PAX_REFCOUNT
4881 + tvs %xcc, 6
4882 +#endif
4883 +
4884 casx [%o1], %g1, %g7
4885 cmp %g1, %g7
4886 bne,pn %xcc, 2f
4887 @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4888 2: BACKOFF_SPIN(%o2, %o3, 1b)
4889 .size atomic64_add_ret, .-atomic64_add_ret
4890
4891 + .globl atomic64_add_ret_unchecked
4892 + .type atomic64_add_ret_unchecked,#function
4893 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4894 + BACKOFF_SETUP(%o2)
4895 +1: ldx [%o1], %g1
4896 + addcc %g1, %o0, %g7
4897 + casx [%o1], %g1, %g7
4898 + cmp %g1, %g7
4899 + bne,pn %xcc, 2f
4900 + add %g7, %o0, %g7
4901 + mov %g7, %o0
4902 + retl
4903 + nop
4904 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4905 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4906 +
4907 .globl atomic64_sub_ret
4908 .type atomic64_sub_ret,#function
4909 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4910 BACKOFF_SETUP(%o2)
4911 1: ldx [%o1], %g1
4912 - sub %g1, %o0, %g7
4913 + subcc %g1, %o0, %g7
4914 +
4915 +#ifdef CONFIG_PAX_REFCOUNT
4916 + tvs %xcc, 6
4917 +#endif
4918 +
4919 casx [%o1], %g1, %g7
4920 cmp %g1, %g7
4921 bne,pn %xcc, 2f
4922 diff -urNp linux-2.6.32.42/arch/sparc/lib/ksyms.c linux-2.6.32.42/arch/sparc/lib/ksyms.c
4923 --- linux-2.6.32.42/arch/sparc/lib/ksyms.c 2011-03-27 14:31:47.000000000 -0400
4924 +++ linux-2.6.32.42/arch/sparc/lib/ksyms.c 2011-04-17 15:56:46.000000000 -0400
4925 @@ -144,12 +144,17 @@ EXPORT_SYMBOL(__downgrade_write);
4926
4927 /* Atomic counter implementation. */
4928 EXPORT_SYMBOL(atomic_add);
4929 +EXPORT_SYMBOL(atomic_add_unchecked);
4930 EXPORT_SYMBOL(atomic_add_ret);
4931 EXPORT_SYMBOL(atomic_sub);
4932 +EXPORT_SYMBOL(atomic_sub_unchecked);
4933 EXPORT_SYMBOL(atomic_sub_ret);
4934 EXPORT_SYMBOL(atomic64_add);
4935 +EXPORT_SYMBOL(atomic64_add_unchecked);
4936 EXPORT_SYMBOL(atomic64_add_ret);
4937 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4938 EXPORT_SYMBOL(atomic64_sub);
4939 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4940 EXPORT_SYMBOL(atomic64_sub_ret);
4941
4942 /* Atomic bit operations. */
4943 diff -urNp linux-2.6.32.42/arch/sparc/lib/Makefile linux-2.6.32.42/arch/sparc/lib/Makefile
4944 --- linux-2.6.32.42/arch/sparc/lib/Makefile 2011-03-27 14:31:47.000000000 -0400
4945 +++ linux-2.6.32.42/arch/sparc/lib/Makefile 2011-05-17 19:26:34.000000000 -0400
4946 @@ -2,7 +2,7 @@
4947 #
4948
4949 asflags-y := -ansi -DST_DIV0=0x02
4950 -ccflags-y := -Werror
4951 +#ccflags-y := -Werror
4952
4953 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4954 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4955 diff -urNp linux-2.6.32.42/arch/sparc/lib/rwsem_64.S linux-2.6.32.42/arch/sparc/lib/rwsem_64.S
4956 --- linux-2.6.32.42/arch/sparc/lib/rwsem_64.S 2011-03-27 14:31:47.000000000 -0400
4957 +++ linux-2.6.32.42/arch/sparc/lib/rwsem_64.S 2011-04-17 15:56:46.000000000 -0400
4958 @@ -11,7 +11,12 @@
4959 .globl __down_read
4960 __down_read:
4961 1: lduw [%o0], %g1
4962 - add %g1, 1, %g7
4963 + addcc %g1, 1, %g7
4964 +
4965 +#ifdef CONFIG_PAX_REFCOUNT
4966 + tvs %icc, 6
4967 +#endif
4968 +
4969 cas [%o0], %g1, %g7
4970 cmp %g1, %g7
4971 bne,pn %icc, 1b
4972 @@ -33,7 +38,12 @@ __down_read:
4973 .globl __down_read_trylock
4974 __down_read_trylock:
4975 1: lduw [%o0], %g1
4976 - add %g1, 1, %g7
4977 + addcc %g1, 1, %g7
4978 +
4979 +#ifdef CONFIG_PAX_REFCOUNT
4980 + tvs %icc, 6
4981 +#endif
4982 +
4983 cmp %g7, 0
4984 bl,pn %icc, 2f
4985 mov 0, %o1
4986 @@ -51,7 +61,12 @@ __down_write:
4987 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
4988 1:
4989 lduw [%o0], %g3
4990 - add %g3, %g1, %g7
4991 + addcc %g3, %g1, %g7
4992 +
4993 +#ifdef CONFIG_PAX_REFCOUNT
4994 + tvs %icc, 6
4995 +#endif
4996 +
4997 cas [%o0], %g3, %g7
4998 cmp %g3, %g7
4999 bne,pn %icc, 1b
5000 @@ -77,7 +92,12 @@ __down_write_trylock:
5001 cmp %g3, 0
5002 bne,pn %icc, 2f
5003 mov 0, %o1
5004 - add %g3, %g1, %g7
5005 + addcc %g3, %g1, %g7
5006 +
5007 +#ifdef CONFIG_PAX_REFCOUNT
5008 + tvs %icc, 6
5009 +#endif
5010 +
5011 cas [%o0], %g3, %g7
5012 cmp %g3, %g7
5013 bne,pn %icc, 1b
5014 @@ -90,7 +110,12 @@ __down_write_trylock:
5015 __up_read:
5016 1:
5017 lduw [%o0], %g1
5018 - sub %g1, 1, %g7
5019 + subcc %g1, 1, %g7
5020 +
5021 +#ifdef CONFIG_PAX_REFCOUNT
5022 + tvs %icc, 6
5023 +#endif
5024 +
5025 cas [%o0], %g1, %g7
5026 cmp %g1, %g7
5027 bne,pn %icc, 1b
5028 @@ -118,7 +143,12 @@ __up_write:
5029 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5030 1:
5031 lduw [%o0], %g3
5032 - sub %g3, %g1, %g7
5033 + subcc %g3, %g1, %g7
5034 +
5035 +#ifdef CONFIG_PAX_REFCOUNT
5036 + tvs %icc, 6
5037 +#endif
5038 +
5039 cas [%o0], %g3, %g7
5040 cmp %g3, %g7
5041 bne,pn %icc, 1b
5042 @@ -143,7 +173,12 @@ __downgrade_write:
5043 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5044 1:
5045 lduw [%o0], %g3
5046 - sub %g3, %g1, %g7
5047 + subcc %g3, %g1, %g7
5048 +
5049 +#ifdef CONFIG_PAX_REFCOUNT
5050 + tvs %icc, 6
5051 +#endif
5052 +
5053 cas [%o0], %g3, %g7
5054 cmp %g3, %g7
5055 bne,pn %icc, 1b
5056 diff -urNp linux-2.6.32.42/arch/sparc/Makefile linux-2.6.32.42/arch/sparc/Makefile
5057 --- linux-2.6.32.42/arch/sparc/Makefile 2011-03-27 14:31:47.000000000 -0400
5058 +++ linux-2.6.32.42/arch/sparc/Makefile 2011-04-17 15:56:46.000000000 -0400
5059 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
5060 # Export what is needed by arch/sparc/boot/Makefile
5061 export VMLINUX_INIT VMLINUX_MAIN
5062 VMLINUX_INIT := $(head-y) $(init-y)
5063 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5064 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5065 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5066 VMLINUX_MAIN += $(drivers-y) $(net-y)
5067
5068 diff -urNp linux-2.6.32.42/arch/sparc/mm/fault_32.c linux-2.6.32.42/arch/sparc/mm/fault_32.c
5069 --- linux-2.6.32.42/arch/sparc/mm/fault_32.c 2011-03-27 14:31:47.000000000 -0400
5070 +++ linux-2.6.32.42/arch/sparc/mm/fault_32.c 2011-04-17 15:56:46.000000000 -0400
5071 @@ -21,6 +21,9 @@
5072 #include <linux/interrupt.h>
5073 #include <linux/module.h>
5074 #include <linux/kdebug.h>
5075 +#include <linux/slab.h>
5076 +#include <linux/pagemap.h>
5077 +#include <linux/compiler.h>
5078
5079 #include <asm/system.h>
5080 #include <asm/page.h>
5081 @@ -167,6 +170,267 @@ static unsigned long compute_si_addr(str
5082 return safe_compute_effective_address(regs, insn);
5083 }
5084
5085 +#ifdef CONFIG_PAX_PAGEEXEC
5086 +#ifdef CONFIG_PAX_DLRESOLVE
5087 +static void pax_emuplt_close(struct vm_area_struct *vma)
5088 +{
5089 + vma->vm_mm->call_dl_resolve = 0UL;
5090 +}
5091 +
5092 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5093 +{
5094 + unsigned int *kaddr;
5095 +
5096 + vmf->page = alloc_page(GFP_HIGHUSER);
5097 + if (!vmf->page)
5098 + return VM_FAULT_OOM;
5099 +
5100 + kaddr = kmap(vmf->page);
5101 + memset(kaddr, 0, PAGE_SIZE);
5102 + kaddr[0] = 0x9DE3BFA8U; /* save */
5103 + flush_dcache_page(vmf->page);
5104 + kunmap(vmf->page);
5105 + return VM_FAULT_MAJOR;
5106 +}
5107 +
5108 +static const struct vm_operations_struct pax_vm_ops = {
5109 + .close = pax_emuplt_close,
5110 + .fault = pax_emuplt_fault
5111 +};
5112 +
5113 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5114 +{
5115 + int ret;
5116 +
5117 + vma->vm_mm = current->mm;
5118 + vma->vm_start = addr;
5119 + vma->vm_end = addr + PAGE_SIZE;
5120 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5121 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5122 + vma->vm_ops = &pax_vm_ops;
5123 +
5124 + ret = insert_vm_struct(current->mm, vma);
5125 + if (ret)
5126 + return ret;
5127 +
5128 + ++current->mm->total_vm;
5129 + return 0;
5130 +}
5131 +#endif
5132 +
5133 +/*
5134 + * PaX: decide what to do with offenders (regs->pc = fault address)
5135 + *
5136 + * returns 1 when task should be killed
5137 + * 2 when patched PLT trampoline was detected
5138 + * 3 when unpatched PLT trampoline was detected
5139 + */
5140 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5141 +{
5142 +
5143 +#ifdef CONFIG_PAX_EMUPLT
5144 + int err;
5145 +
5146 + do { /* PaX: patched PLT emulation #1 */
5147 + unsigned int sethi1, sethi2, jmpl;
5148 +
5149 + err = get_user(sethi1, (unsigned int *)regs->pc);
5150 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5151 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5152 +
5153 + if (err)
5154 + break;
5155 +
5156 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5157 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5158 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5159 + {
5160 + unsigned int addr;
5161 +
5162 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5163 + addr = regs->u_regs[UREG_G1];
5164 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5165 + regs->pc = addr;
5166 + regs->npc = addr+4;
5167 + return 2;
5168 + }
5169 + } while (0);
5170 +
5171 + { /* PaX: patched PLT emulation #2 */
5172 + unsigned int ba;
5173 +
5174 + err = get_user(ba, (unsigned int *)regs->pc);
5175 +
5176 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5177 + unsigned int addr;
5178 +
5179 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5180 + regs->pc = addr;
5181 + regs->npc = addr+4;
5182 + return 2;
5183 + }
5184 + }
5185 +
5186 + do { /* PaX: patched PLT emulation #3 */
5187 + unsigned int sethi, jmpl, nop;
5188 +
5189 + err = get_user(sethi, (unsigned int *)regs->pc);
5190 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5191 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5192 +
5193 + if (err)
5194 + break;
5195 +
5196 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5197 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5198 + nop == 0x01000000U)
5199 + {
5200 + unsigned int addr;
5201 +
5202 + addr = (sethi & 0x003FFFFFU) << 10;
5203 + regs->u_regs[UREG_G1] = addr;
5204 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5205 + regs->pc = addr;
5206 + regs->npc = addr+4;
5207 + return 2;
5208 + }
5209 + } while (0);
5210 +
5211 + do { /* PaX: unpatched PLT emulation step 1 */
5212 + unsigned int sethi, ba, nop;
5213 +
5214 + err = get_user(sethi, (unsigned int *)regs->pc);
5215 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
5216 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5217 +
5218 + if (err)
5219 + break;
5220 +
5221 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5222 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5223 + nop == 0x01000000U)
5224 + {
5225 + unsigned int addr, save, call;
5226 +
5227 + if ((ba & 0xFFC00000U) == 0x30800000U)
5228 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5229 + else
5230 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5231 +
5232 + err = get_user(save, (unsigned int *)addr);
5233 + err |= get_user(call, (unsigned int *)(addr+4));
5234 + err |= get_user(nop, (unsigned int *)(addr+8));
5235 + if (err)
5236 + break;
5237 +
5238 +#ifdef CONFIG_PAX_DLRESOLVE
5239 + if (save == 0x9DE3BFA8U &&
5240 + (call & 0xC0000000U) == 0x40000000U &&
5241 + nop == 0x01000000U)
5242 + {
5243 + struct vm_area_struct *vma;
5244 + unsigned long call_dl_resolve;
5245 +
5246 + down_read(&current->mm->mmap_sem);
5247 + call_dl_resolve = current->mm->call_dl_resolve;
5248 + up_read(&current->mm->mmap_sem);
5249 + if (likely(call_dl_resolve))
5250 + goto emulate;
5251 +
5252 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5253 +
5254 + down_write(&current->mm->mmap_sem);
5255 + if (current->mm->call_dl_resolve) {
5256 + call_dl_resolve = current->mm->call_dl_resolve;
5257 + up_write(&current->mm->mmap_sem);
5258 + if (vma)
5259 + kmem_cache_free(vm_area_cachep, vma);
5260 + goto emulate;
5261 + }
5262 +
5263 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5264 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5265 + up_write(&current->mm->mmap_sem);
5266 + if (vma)
5267 + kmem_cache_free(vm_area_cachep, vma);
5268 + return 1;
5269 + }
5270 +
5271 + if (pax_insert_vma(vma, call_dl_resolve)) {
5272 + up_write(&current->mm->mmap_sem);
5273 + kmem_cache_free(vm_area_cachep, vma);
5274 + return 1;
5275 + }
5276 +
5277 + current->mm->call_dl_resolve = call_dl_resolve;
5278 + up_write(&current->mm->mmap_sem);
5279 +
5280 +emulate:
5281 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5282 + regs->pc = call_dl_resolve;
5283 + regs->npc = addr+4;
5284 + return 3;
5285 + }
5286 +#endif
5287 +
5288 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5289 + if ((save & 0xFFC00000U) == 0x05000000U &&
5290 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5291 + nop == 0x01000000U)
5292 + {
5293 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5294 + regs->u_regs[UREG_G2] = addr + 4;
5295 + addr = (save & 0x003FFFFFU) << 10;
5296 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5297 + regs->pc = addr;
5298 + regs->npc = addr+4;
5299 + return 3;
5300 + }
5301 + }
5302 + } while (0);
5303 +
5304 + do { /* PaX: unpatched PLT emulation step 2 */
5305 + unsigned int save, call, nop;
5306 +
5307 + err = get_user(save, (unsigned int *)(regs->pc-4));
5308 + err |= get_user(call, (unsigned int *)regs->pc);
5309 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
5310 + if (err)
5311 + break;
5312 +
5313 + if (save == 0x9DE3BFA8U &&
5314 + (call & 0xC0000000U) == 0x40000000U &&
5315 + nop == 0x01000000U)
5316 + {
5317 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5318 +
5319 + regs->u_regs[UREG_RETPC] = regs->pc;
5320 + regs->pc = dl_resolve;
5321 + regs->npc = dl_resolve+4;
5322 + return 3;
5323 + }
5324 + } while (0);
5325 +#endif
5326 +
5327 + return 1;
5328 +}
5329 +
5330 +void pax_report_insns(void *pc, void *sp)
5331 +{
5332 + unsigned long i;
5333 +
5334 + printk(KERN_ERR "PAX: bytes at PC: ");
5335 + for (i = 0; i < 8; i++) {
5336 + unsigned int c;
5337 + if (get_user(c, (unsigned int *)pc+i))
5338 + printk(KERN_CONT "???????? ");
5339 + else
5340 + printk(KERN_CONT "%08x ", c);
5341 + }
5342 + printk("\n");
5343 +}
5344 +#endif
5345 +
5346 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
5347 unsigned long address)
5348 {
5349 @@ -231,6 +495,24 @@ good_area:
5350 if(!(vma->vm_flags & VM_WRITE))
5351 goto bad_area;
5352 } else {
5353 +
5354 +#ifdef CONFIG_PAX_PAGEEXEC
5355 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5356 + up_read(&mm->mmap_sem);
5357 + switch (pax_handle_fetch_fault(regs)) {
5358 +
5359 +#ifdef CONFIG_PAX_EMUPLT
5360 + case 2:
5361 + case 3:
5362 + return;
5363 +#endif
5364 +
5365 + }
5366 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5367 + do_group_exit(SIGKILL);
5368 + }
5369 +#endif
5370 +
5371 /* Allow reads even for write-only mappings */
5372 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5373 goto bad_area;
5374 diff -urNp linux-2.6.32.42/arch/sparc/mm/fault_64.c linux-2.6.32.42/arch/sparc/mm/fault_64.c
5375 --- linux-2.6.32.42/arch/sparc/mm/fault_64.c 2011-03-27 14:31:47.000000000 -0400
5376 +++ linux-2.6.32.42/arch/sparc/mm/fault_64.c 2011-04-17 15:56:46.000000000 -0400
5377 @@ -20,6 +20,9 @@
5378 #include <linux/kprobes.h>
5379 #include <linux/kdebug.h>
5380 #include <linux/percpu.h>
5381 +#include <linux/slab.h>
5382 +#include <linux/pagemap.h>
5383 +#include <linux/compiler.h>
5384
5385 #include <asm/page.h>
5386 #include <asm/pgtable.h>
5387 @@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs
5388 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5389 regs->tpc);
5390 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5391 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5392 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5393 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5394 dump_stack();
5395 unhandled_fault(regs->tpc, current, regs);
5396 @@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_a
5397 show_regs(regs);
5398 }
5399
5400 +#ifdef CONFIG_PAX_PAGEEXEC
5401 +#ifdef CONFIG_PAX_DLRESOLVE
5402 +static void pax_emuplt_close(struct vm_area_struct *vma)
5403 +{
5404 + vma->vm_mm->call_dl_resolve = 0UL;
5405 +}
5406 +
5407 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5408 +{
5409 + unsigned int *kaddr;
5410 +
5411 + vmf->page = alloc_page(GFP_HIGHUSER);
5412 + if (!vmf->page)
5413 + return VM_FAULT_OOM;
5414 +
5415 + kaddr = kmap(vmf->page);
5416 + memset(kaddr, 0, PAGE_SIZE);
5417 + kaddr[0] = 0x9DE3BFA8U; /* save */
5418 + flush_dcache_page(vmf->page);
5419 + kunmap(vmf->page);
5420 + return VM_FAULT_MAJOR;
5421 +}
5422 +
5423 +static const struct vm_operations_struct pax_vm_ops = {
5424 + .close = pax_emuplt_close,
5425 + .fault = pax_emuplt_fault
5426 +};
5427 +
5428 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5429 +{
5430 + int ret;
5431 +
5432 + vma->vm_mm = current->mm;
5433 + vma->vm_start = addr;
5434 + vma->vm_end = addr + PAGE_SIZE;
5435 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5436 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5437 + vma->vm_ops = &pax_vm_ops;
5438 +
5439 + ret = insert_vm_struct(current->mm, vma);
5440 + if (ret)
5441 + return ret;
5442 +
5443 + ++current->mm->total_vm;
5444 + return 0;
5445 +}
5446 +#endif
5447 +
5448 +/*
5449 + * PaX: decide what to do with offenders (regs->tpc = fault address)
5450 + *
5451 + * returns 1 when task should be killed
5452 + * 2 when patched PLT trampoline was detected
5453 + * 3 when unpatched PLT trampoline was detected
5454 + */
5455 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5456 +{
5457 +
5458 +#ifdef CONFIG_PAX_EMUPLT
5459 + int err;
5460 +
5461 + do { /* PaX: patched PLT emulation #1 */
5462 + unsigned int sethi1, sethi2, jmpl;
5463 +
5464 + err = get_user(sethi1, (unsigned int *)regs->tpc);
5465 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5466 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5467 +
5468 + if (err)
5469 + break;
5470 +
5471 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5472 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5473 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5474 + {
5475 + unsigned long addr;
5476 +
5477 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5478 + addr = regs->u_regs[UREG_G1];
5479 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5480 +
5481 + if (test_thread_flag(TIF_32BIT))
5482 + addr &= 0xFFFFFFFFUL;
5483 +
5484 + regs->tpc = addr;
5485 + regs->tnpc = addr+4;
5486 + return 2;
5487 + }
5488 + } while (0);
5489 +
5490 + { /* PaX: patched PLT emulation #2 */
5491 + unsigned int ba;
5492 +
5493 + err = get_user(ba, (unsigned int *)regs->tpc);
5494 +
5495 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5496 + unsigned long addr;
5497 +
5498 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5499 +
5500 + if (test_thread_flag(TIF_32BIT))
5501 + addr &= 0xFFFFFFFFUL;
5502 +
5503 + regs->tpc = addr;
5504 + regs->tnpc = addr+4;
5505 + return 2;
5506 + }
5507 + }
5508 +
5509 + do { /* PaX: patched PLT emulation #3 */
5510 + unsigned int sethi, jmpl, nop;
5511 +
5512 + err = get_user(sethi, (unsigned int *)regs->tpc);
5513 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5514 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5515 +
5516 + if (err)
5517 + break;
5518 +
5519 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5520 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5521 + nop == 0x01000000U)
5522 + {
5523 + unsigned long addr;
5524 +
5525 + addr = (sethi & 0x003FFFFFU) << 10;
5526 + regs->u_regs[UREG_G1] = addr;
5527 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5528 +
5529 + if (test_thread_flag(TIF_32BIT))
5530 + addr &= 0xFFFFFFFFUL;
5531 +
5532 + regs->tpc = addr;
5533 + regs->tnpc = addr+4;
5534 + return 2;
5535 + }
5536 + } while (0);
5537 +
5538 + do { /* PaX: patched PLT emulation #4 */
5539 + unsigned int sethi, mov1, call, mov2;
5540 +
5541 + err = get_user(sethi, (unsigned int *)regs->tpc);
5542 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5543 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
5544 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5545 +
5546 + if (err)
5547 + break;
5548 +
5549 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5550 + mov1 == 0x8210000FU &&
5551 + (call & 0xC0000000U) == 0x40000000U &&
5552 + mov2 == 0x9E100001U)
5553 + {
5554 + unsigned long addr;
5555 +
5556 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5557 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5558 +
5559 + if (test_thread_flag(TIF_32BIT))
5560 + addr &= 0xFFFFFFFFUL;
5561 +
5562 + regs->tpc = addr;
5563 + regs->tnpc = addr+4;
5564 + return 2;
5565 + }
5566 + } while (0);
5567 +
5568 + do { /* PaX: patched PLT emulation #5 */
5569 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5570 +
5571 + err = get_user(sethi, (unsigned int *)regs->tpc);
5572 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5573 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5574 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5575 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5576 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5577 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5578 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5579 +
5580 + if (err)
5581 + break;
5582 +
5583 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5584 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5585 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5586 + (or1 & 0xFFFFE000U) == 0x82106000U &&
5587 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5588 + sllx == 0x83287020U &&
5589 + jmpl == 0x81C04005U &&
5590 + nop == 0x01000000U)
5591 + {
5592 + unsigned long addr;
5593 +
5594 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5595 + regs->u_regs[UREG_G1] <<= 32;
5596 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5597 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5598 + regs->tpc = addr;
5599 + regs->tnpc = addr+4;
5600 + return 2;
5601 + }
5602 + } while (0);
5603 +
5604 + do { /* PaX: patched PLT emulation #6 */
5605 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5606 +
5607 + err = get_user(sethi, (unsigned int *)regs->tpc);
5608 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5609 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5610 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5611 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
5612 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5613 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5614 +
5615 + if (err)
5616 + break;
5617 +
5618 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5619 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5620 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5621 + sllx == 0x83287020U &&
5622 + (or & 0xFFFFE000U) == 0x8A116000U &&
5623 + jmpl == 0x81C04005U &&
5624 + nop == 0x01000000U)
5625 + {
5626 + unsigned long addr;
5627 +
5628 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5629 + regs->u_regs[UREG_G1] <<= 32;
5630 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5631 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5632 + regs->tpc = addr;
5633 + regs->tnpc = addr+4;
5634 + return 2;
5635 + }
5636 + } while (0);
5637 +
5638 + do { /* PaX: unpatched PLT emulation step 1 */
5639 + unsigned int sethi, ba, nop;
5640 +
5641 + err = get_user(sethi, (unsigned int *)regs->tpc);
5642 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5643 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5644 +
5645 + if (err)
5646 + break;
5647 +
5648 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5649 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5650 + nop == 0x01000000U)
5651 + {
5652 + unsigned long addr;
5653 + unsigned int save, call;
5654 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5655 +
5656 + if ((ba & 0xFFC00000U) == 0x30800000U)
5657 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5658 + else
5659 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5660 +
5661 + if (test_thread_flag(TIF_32BIT))
5662 + addr &= 0xFFFFFFFFUL;
5663 +
5664 + err = get_user(save, (unsigned int *)addr);
5665 + err |= get_user(call, (unsigned int *)(addr+4));
5666 + err |= get_user(nop, (unsigned int *)(addr+8));
5667 + if (err)
5668 + break;
5669 +
5670 +#ifdef CONFIG_PAX_DLRESOLVE
5671 + if (save == 0x9DE3BFA8U &&
5672 + (call & 0xC0000000U) == 0x40000000U &&
5673 + nop == 0x01000000U)
5674 + {
5675 + struct vm_area_struct *vma;
5676 + unsigned long call_dl_resolve;
5677 +
5678 + down_read(&current->mm->mmap_sem);
5679 + call_dl_resolve = current->mm->call_dl_resolve;
5680 + up_read(&current->mm->mmap_sem);
5681 + if (likely(call_dl_resolve))
5682 + goto emulate;
5683 +
5684 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5685 +
5686 + down_write(&current->mm->mmap_sem);
5687 + if (current->mm->call_dl_resolve) {
5688 + call_dl_resolve = current->mm->call_dl_resolve;
5689 + up_write(&current->mm->mmap_sem);
5690 + if (vma)
5691 + kmem_cache_free(vm_area_cachep, vma);
5692 + goto emulate;
5693 + }
5694 +
5695 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5696 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5697 + up_write(&current->mm->mmap_sem);
5698 + if (vma)
5699 + kmem_cache_free(vm_area_cachep, vma);
5700 + return 1;
5701 + }
5702 +
5703 + if (pax_insert_vma(vma, call_dl_resolve)) {
5704 + up_write(&current->mm->mmap_sem);
5705 + kmem_cache_free(vm_area_cachep, vma);
5706 + return 1;
5707 + }
5708 +
5709 + current->mm->call_dl_resolve = call_dl_resolve;
5710 + up_write(&current->mm->mmap_sem);
5711 +
5712 +emulate:
5713 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5714 + regs->tpc = call_dl_resolve;
5715 + regs->tnpc = addr+4;
5716 + return 3;
5717 + }
5718 +#endif
5719 +
5720 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5721 + if ((save & 0xFFC00000U) == 0x05000000U &&
5722 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5723 + nop == 0x01000000U)
5724 + {
5725 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5726 + regs->u_regs[UREG_G2] = addr + 4;
5727 + addr = (save & 0x003FFFFFU) << 10;
5728 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5729 +
5730 + if (test_thread_flag(TIF_32BIT))
5731 + addr &= 0xFFFFFFFFUL;
5732 +
5733 + regs->tpc = addr;
5734 + regs->tnpc = addr+4;
5735 + return 3;
5736 + }
5737 +
5738 + /* PaX: 64-bit PLT stub */
5739 + err = get_user(sethi1, (unsigned int *)addr);
5740 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5741 + err |= get_user(or1, (unsigned int *)(addr+8));
5742 + err |= get_user(or2, (unsigned int *)(addr+12));
5743 + err |= get_user(sllx, (unsigned int *)(addr+16));
5744 + err |= get_user(add, (unsigned int *)(addr+20));
5745 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5746 + err |= get_user(nop, (unsigned int *)(addr+28));
5747 + if (err)
5748 + break;
5749 +
5750 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5751 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5752 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5753 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5754 + sllx == 0x89293020U &&
5755 + add == 0x8A010005U &&
5756 + jmpl == 0x89C14000U &&
5757 + nop == 0x01000000U)
5758 + {
5759 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5760 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5761 + regs->u_regs[UREG_G4] <<= 32;
5762 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5763 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5764 + regs->u_regs[UREG_G4] = addr + 24;
5765 + addr = regs->u_regs[UREG_G5];
5766 + regs->tpc = addr;
5767 + regs->tnpc = addr+4;
5768 + return 3;
5769 + }
5770 + }
5771 + } while (0);
5772 +
5773 +#ifdef CONFIG_PAX_DLRESOLVE
5774 + do { /* PaX: unpatched PLT emulation step 2 */
5775 + unsigned int save, call, nop;
5776 +
5777 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5778 + err |= get_user(call, (unsigned int *)regs->tpc);
5779 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5780 + if (err)
5781 + break;
5782 +
5783 + if (save == 0x9DE3BFA8U &&
5784 + (call & 0xC0000000U) == 0x40000000U &&
5785 + nop == 0x01000000U)
5786 + {
5787 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5788 +
5789 + if (test_thread_flag(TIF_32BIT))
5790 + dl_resolve &= 0xFFFFFFFFUL;
5791 +
5792 + regs->u_regs[UREG_RETPC] = regs->tpc;
5793 + regs->tpc = dl_resolve;
5794 + regs->tnpc = dl_resolve+4;
5795 + return 3;
5796 + }
5797 + } while (0);
5798 +#endif
5799 +
5800 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5801 + unsigned int sethi, ba, nop;
5802 +
5803 + err = get_user(sethi, (unsigned int *)regs->tpc);
5804 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5805 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5806 +
5807 + if (err)
5808 + break;
5809 +
5810 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5811 + (ba & 0xFFF00000U) == 0x30600000U &&
5812 + nop == 0x01000000U)
5813 + {
5814 + unsigned long addr;
5815 +
5816 + addr = (sethi & 0x003FFFFFU) << 10;
5817 + regs->u_regs[UREG_G1] = addr;
5818 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5819 +
5820 + if (test_thread_flag(TIF_32BIT))
5821 + addr &= 0xFFFFFFFFUL;
5822 +
5823 + regs->tpc = addr;
5824 + regs->tnpc = addr+4;
5825 + return 2;
5826 + }
5827 + } while (0);
5828 +
5829 +#endif
5830 +
5831 + return 1;
5832 +}
5833 +
5834 +void pax_report_insns(void *pc, void *sp)
5835 +{
5836 + unsigned long i;
5837 +
5838 + printk(KERN_ERR "PAX: bytes at PC: ");
5839 + for (i = 0; i < 8; i++) {
5840 + unsigned int c;
5841 + if (get_user(c, (unsigned int *)pc+i))
5842 + printk(KERN_CONT "???????? ");
5843 + else
5844 + printk(KERN_CONT "%08x ", c);
5845 + }
5846 + printk("\n");
5847 +}
5848 +#endif
5849 +
5850 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5851 {
5852 struct mm_struct *mm = current->mm;
5853 @@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fau
5854 if (!vma)
5855 goto bad_area;
5856
5857 +#ifdef CONFIG_PAX_PAGEEXEC
5858 + /* PaX: detect ITLB misses on non-exec pages */
5859 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5860 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5861 + {
5862 + if (address != regs->tpc)
5863 + goto good_area;
5864 +
5865 + up_read(&mm->mmap_sem);
5866 + switch (pax_handle_fetch_fault(regs)) {
5867 +
5868 +#ifdef CONFIG_PAX_EMUPLT
5869 + case 2:
5870 + case 3:
5871 + return;
5872 +#endif
5873 +
5874 + }
5875 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5876 + do_group_exit(SIGKILL);
5877 + }
5878 +#endif
5879 +
5880 /* Pure DTLB misses do not tell us whether the fault causing
5881 * load/store/atomic was a write or not, it only says that there
5882 * was no match. So in such a case we (carefully) read the
5883 diff -urNp linux-2.6.32.42/arch/sparc/mm/hugetlbpage.c linux-2.6.32.42/arch/sparc/mm/hugetlbpage.c
5884 --- linux-2.6.32.42/arch/sparc/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
5885 +++ linux-2.6.32.42/arch/sparc/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
5886 @@ -69,7 +69,7 @@ full_search:
5887 }
5888 return -ENOMEM;
5889 }
5890 - if (likely(!vma || addr + len <= vma->vm_start)) {
5891 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5892 /*
5893 * Remember the place where we stopped the search:
5894 */
5895 @@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
5896 /* make sure it can fit in the remaining address space */
5897 if (likely(addr > len)) {
5898 vma = find_vma(mm, addr-len);
5899 - if (!vma || addr <= vma->vm_start) {
5900 + if (check_heap_stack_gap(vma, addr - len, len)) {
5901 /* remember the address as a hint for next time */
5902 return (mm->free_area_cache = addr-len);
5903 }
5904 @@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct
5905 if (unlikely(mm->mmap_base < len))
5906 goto bottomup;
5907
5908 - addr = (mm->mmap_base-len) & HPAGE_MASK;
5909 + addr = mm->mmap_base - len;
5910
5911 do {
5912 + addr &= HPAGE_MASK;
5913 /*
5914 * Lookup failure means no vma is above this address,
5915 * else if new region fits below vma->vm_start,
5916 * return with success:
5917 */
5918 vma = find_vma(mm, addr);
5919 - if (likely(!vma || addr+len <= vma->vm_start)) {
5920 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5921 /* remember the address as a hint for next time */
5922 return (mm->free_area_cache = addr);
5923 }
5924 @@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct
5925 mm->cached_hole_size = vma->vm_start - addr;
5926
5927 /* try just below the current vma->vm_start */
5928 - addr = (vma->vm_start-len) & HPAGE_MASK;
5929 - } while (likely(len < vma->vm_start));
5930 + addr = skip_heap_stack_gap(vma, len);
5931 + } while (!IS_ERR_VALUE(addr));
5932
5933 bottomup:
5934 /*
5935 @@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *f
5936 if (addr) {
5937 addr = ALIGN(addr, HPAGE_SIZE);
5938 vma = find_vma(mm, addr);
5939 - if (task_size - len >= addr &&
5940 - (!vma || addr + len <= vma->vm_start))
5941 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5942 return addr;
5943 }
5944 if (mm->get_unmapped_area == arch_get_unmapped_area)
5945 diff -urNp linux-2.6.32.42/arch/sparc/mm/init_32.c linux-2.6.32.42/arch/sparc/mm/init_32.c
5946 --- linux-2.6.32.42/arch/sparc/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
5947 +++ linux-2.6.32.42/arch/sparc/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
5948 @@ -317,6 +317,9 @@ extern void device_scan(void);
5949 pgprot_t PAGE_SHARED __read_mostly;
5950 EXPORT_SYMBOL(PAGE_SHARED);
5951
5952 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5953 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5954 +
5955 void __init paging_init(void)
5956 {
5957 switch(sparc_cpu_model) {
5958 @@ -345,17 +348,17 @@ void __init paging_init(void)
5959
5960 /* Initialize the protection map with non-constant, MMU dependent values. */
5961 protection_map[0] = PAGE_NONE;
5962 - protection_map[1] = PAGE_READONLY;
5963 - protection_map[2] = PAGE_COPY;
5964 - protection_map[3] = PAGE_COPY;
5965 + protection_map[1] = PAGE_READONLY_NOEXEC;
5966 + protection_map[2] = PAGE_COPY_NOEXEC;
5967 + protection_map[3] = PAGE_COPY_NOEXEC;
5968 protection_map[4] = PAGE_READONLY;
5969 protection_map[5] = PAGE_READONLY;
5970 protection_map[6] = PAGE_COPY;
5971 protection_map[7] = PAGE_COPY;
5972 protection_map[8] = PAGE_NONE;
5973 - protection_map[9] = PAGE_READONLY;
5974 - protection_map[10] = PAGE_SHARED;
5975 - protection_map[11] = PAGE_SHARED;
5976 + protection_map[9] = PAGE_READONLY_NOEXEC;
5977 + protection_map[10] = PAGE_SHARED_NOEXEC;
5978 + protection_map[11] = PAGE_SHARED_NOEXEC;
5979 protection_map[12] = PAGE_READONLY;
5980 protection_map[13] = PAGE_READONLY;
5981 protection_map[14] = PAGE_SHARED;
5982 diff -urNp linux-2.6.32.42/arch/sparc/mm/Makefile linux-2.6.32.42/arch/sparc/mm/Makefile
5983 --- linux-2.6.32.42/arch/sparc/mm/Makefile 2011-03-27 14:31:47.000000000 -0400
5984 +++ linux-2.6.32.42/arch/sparc/mm/Makefile 2011-04-17 15:56:46.000000000 -0400
5985 @@ -2,7 +2,7 @@
5986 #
5987
5988 asflags-y := -ansi
5989 -ccflags-y := -Werror
5990 +#ccflags-y := -Werror
5991
5992 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5993 obj-y += fault_$(BITS).o
5994 diff -urNp linux-2.6.32.42/arch/sparc/mm/srmmu.c linux-2.6.32.42/arch/sparc/mm/srmmu.c
5995 --- linux-2.6.32.42/arch/sparc/mm/srmmu.c 2011-03-27 14:31:47.000000000 -0400
5996 +++ linux-2.6.32.42/arch/sparc/mm/srmmu.c 2011-04-17 15:56:46.000000000 -0400
5997 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5998 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5999 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6000 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6001 +
6002 +#ifdef CONFIG_PAX_PAGEEXEC
6003 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6004 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6005 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6006 +#endif
6007 +
6008 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6009 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6010
6011 diff -urNp linux-2.6.32.42/arch/um/include/asm/kmap_types.h linux-2.6.32.42/arch/um/include/asm/kmap_types.h
6012 --- linux-2.6.32.42/arch/um/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
6013 +++ linux-2.6.32.42/arch/um/include/asm/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
6014 @@ -23,6 +23,7 @@ enum km_type {
6015 KM_IRQ1,
6016 KM_SOFTIRQ0,
6017 KM_SOFTIRQ1,
6018 + KM_CLEARPAGE,
6019 KM_TYPE_NR
6020 };
6021
6022 diff -urNp linux-2.6.32.42/arch/um/include/asm/page.h linux-2.6.32.42/arch/um/include/asm/page.h
6023 --- linux-2.6.32.42/arch/um/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
6024 +++ linux-2.6.32.42/arch/um/include/asm/page.h 2011-04-17 15:56:46.000000000 -0400
6025 @@ -14,6 +14,9 @@
6026 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6027 #define PAGE_MASK (~(PAGE_SIZE-1))
6028
6029 +#define ktla_ktva(addr) (addr)
6030 +#define ktva_ktla(addr) (addr)
6031 +
6032 #ifndef __ASSEMBLY__
6033
6034 struct page;
6035 diff -urNp linux-2.6.32.42/arch/um/kernel/process.c linux-2.6.32.42/arch/um/kernel/process.c
6036 --- linux-2.6.32.42/arch/um/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
6037 +++ linux-2.6.32.42/arch/um/kernel/process.c 2011-04-17 15:56:46.000000000 -0400
6038 @@ -393,22 +393,6 @@ int singlestepping(void * t)
6039 return 2;
6040 }
6041
6042 -/*
6043 - * Only x86 and x86_64 have an arch_align_stack().
6044 - * All other arches have "#define arch_align_stack(x) (x)"
6045 - * in their asm/system.h
6046 - * As this is included in UML from asm-um/system-generic.h,
6047 - * we can use it to behave as the subarch does.
6048 - */
6049 -#ifndef arch_align_stack
6050 -unsigned long arch_align_stack(unsigned long sp)
6051 -{
6052 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6053 - sp -= get_random_int() % 8192;
6054 - return sp & ~0xf;
6055 -}
6056 -#endif
6057 -
6058 unsigned long get_wchan(struct task_struct *p)
6059 {
6060 unsigned long stack_page, sp, ip;
6061 diff -urNp linux-2.6.32.42/arch/um/sys-i386/syscalls.c linux-2.6.32.42/arch/um/sys-i386/syscalls.c
6062 --- linux-2.6.32.42/arch/um/sys-i386/syscalls.c 2011-03-27 14:31:47.000000000 -0400
6063 +++ linux-2.6.32.42/arch/um/sys-i386/syscalls.c 2011-04-17 15:56:46.000000000 -0400
6064 @@ -11,6 +11,21 @@
6065 #include "asm/uaccess.h"
6066 #include "asm/unistd.h"
6067
6068 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6069 +{
6070 + unsigned long pax_task_size = TASK_SIZE;
6071 +
6072 +#ifdef CONFIG_PAX_SEGMEXEC
6073 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6074 + pax_task_size = SEGMEXEC_TASK_SIZE;
6075 +#endif
6076 +
6077 + if (len > pax_task_size || addr > pax_task_size - len)
6078 + return -EINVAL;
6079 +
6080 + return 0;
6081 +}
6082 +
6083 /*
6084 * Perform the select(nd, in, out, ex, tv) and mmap() system
6085 * calls. Linux/i386 didn't use to be able to handle more than
6086 diff -urNp linux-2.6.32.42/arch/x86/boot/bitops.h linux-2.6.32.42/arch/x86/boot/bitops.h
6087 --- linux-2.6.32.42/arch/x86/boot/bitops.h 2011-03-27 14:31:47.000000000 -0400
6088 +++ linux-2.6.32.42/arch/x86/boot/bitops.h 2011-04-17 15:56:46.000000000 -0400
6089 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
6090 u8 v;
6091 const u32 *p = (const u32 *)addr;
6092
6093 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6094 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6095 return v;
6096 }
6097
6098 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
6099
6100 static inline void set_bit(int nr, void *addr)
6101 {
6102 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6103 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6104 }
6105
6106 #endif /* BOOT_BITOPS_H */
6107 diff -urNp linux-2.6.32.42/arch/x86/boot/boot.h linux-2.6.32.42/arch/x86/boot/boot.h
6108 --- linux-2.6.32.42/arch/x86/boot/boot.h 2011-03-27 14:31:47.000000000 -0400
6109 +++ linux-2.6.32.42/arch/x86/boot/boot.h 2011-04-17 15:56:46.000000000 -0400
6110 @@ -82,7 +82,7 @@ static inline void io_delay(void)
6111 static inline u16 ds(void)
6112 {
6113 u16 seg;
6114 - asm("movw %%ds,%0" : "=rm" (seg));
6115 + asm volatile("movw %%ds,%0" : "=rm" (seg));
6116 return seg;
6117 }
6118
6119 @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
6120 static inline int memcmp(const void *s1, const void *s2, size_t len)
6121 {
6122 u8 diff;
6123 - asm("repe; cmpsb; setnz %0"
6124 + asm volatile("repe; cmpsb; setnz %0"
6125 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
6126 return diff;
6127 }
6128 diff -urNp linux-2.6.32.42/arch/x86/boot/compressed/head_32.S linux-2.6.32.42/arch/x86/boot/compressed/head_32.S
6129 --- linux-2.6.32.42/arch/x86/boot/compressed/head_32.S 2011-03-27 14:31:47.000000000 -0400
6130 +++ linux-2.6.32.42/arch/x86/boot/compressed/head_32.S 2011-04-17 15:56:46.000000000 -0400
6131 @@ -76,7 +76,7 @@ ENTRY(startup_32)
6132 notl %eax
6133 andl %eax, %ebx
6134 #else
6135 - movl $LOAD_PHYSICAL_ADDR, %ebx
6136 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6137 #endif
6138
6139 /* Target address to relocate to for decompression */
6140 @@ -149,7 +149,7 @@ relocated:
6141 * and where it was actually loaded.
6142 */
6143 movl %ebp, %ebx
6144 - subl $LOAD_PHYSICAL_ADDR, %ebx
6145 + subl $____LOAD_PHYSICAL_ADDR, %ebx
6146 jz 2f /* Nothing to be done if loaded at compiled addr. */
6147 /*
6148 * Process relocations.
6149 @@ -157,8 +157,7 @@ relocated:
6150
6151 1: subl $4, %edi
6152 movl (%edi), %ecx
6153 - testl %ecx, %ecx
6154 - jz 2f
6155 + jecxz 2f
6156 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
6157 jmp 1b
6158 2:
6159 diff -urNp linux-2.6.32.42/arch/x86/boot/compressed/head_64.S linux-2.6.32.42/arch/x86/boot/compressed/head_64.S
6160 --- linux-2.6.32.42/arch/x86/boot/compressed/head_64.S 2011-03-27 14:31:47.000000000 -0400
6161 +++ linux-2.6.32.42/arch/x86/boot/compressed/head_64.S 2011-07-01 18:53:00.000000000 -0400
6162 @@ -91,7 +91,7 @@ ENTRY(startup_32)
6163 notl %eax
6164 andl %eax, %ebx
6165 #else
6166 - movl $LOAD_PHYSICAL_ADDR, %ebx
6167 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6168 #endif
6169
6170 /* Target address to relocate to for decompression */
6171 @@ -183,7 +183,7 @@ no_longmode:
6172 hlt
6173 jmp 1b
6174
6175 -#include "../../kernel/verify_cpu_64.S"
6176 +#include "../../kernel/verify_cpu.S"
6177
6178 /*
6179 * Be careful here startup_64 needs to be at a predictable
6180 @@ -234,7 +234,7 @@ ENTRY(startup_64)
6181 notq %rax
6182 andq %rax, %rbp
6183 #else
6184 - movq $LOAD_PHYSICAL_ADDR, %rbp
6185 + movq $____LOAD_PHYSICAL_ADDR, %rbp
6186 #endif
6187
6188 /* Target address to relocate to for decompression */
6189 diff -urNp linux-2.6.32.42/arch/x86/boot/compressed/misc.c linux-2.6.32.42/arch/x86/boot/compressed/misc.c
6190 --- linux-2.6.32.42/arch/x86/boot/compressed/misc.c 2011-03-27 14:31:47.000000000 -0400
6191 +++ linux-2.6.32.42/arch/x86/boot/compressed/misc.c 2011-04-17 15:56:46.000000000 -0400
6192 @@ -288,7 +288,7 @@ static void parse_elf(void *output)
6193 case PT_LOAD:
6194 #ifdef CONFIG_RELOCATABLE
6195 dest = output;
6196 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
6197 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
6198 #else
6199 dest = (void *)(phdr->p_paddr);
6200 #endif
6201 @@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *
6202 error("Destination address too large");
6203 #endif
6204 #ifndef CONFIG_RELOCATABLE
6205 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
6206 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
6207 error("Wrong destination address");
6208 #endif
6209
6210 diff -urNp linux-2.6.32.42/arch/x86/boot/compressed/mkpiggy.c linux-2.6.32.42/arch/x86/boot/compressed/mkpiggy.c
6211 --- linux-2.6.32.42/arch/x86/boot/compressed/mkpiggy.c 2011-03-27 14:31:47.000000000 -0400
6212 +++ linux-2.6.32.42/arch/x86/boot/compressed/mkpiggy.c 2011-04-17 15:56:46.000000000 -0400
6213 @@ -74,7 +74,7 @@ int main(int argc, char *argv[])
6214
6215 offs = (olen > ilen) ? olen - ilen : 0;
6216 offs += olen >> 12; /* Add 8 bytes for each 32K block */
6217 - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
6218 + offs += 64*1024; /* Add 64K bytes slack */
6219 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
6220
6221 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
6222 diff -urNp linux-2.6.32.42/arch/x86/boot/compressed/relocs.c linux-2.6.32.42/arch/x86/boot/compressed/relocs.c
6223 --- linux-2.6.32.42/arch/x86/boot/compressed/relocs.c 2011-03-27 14:31:47.000000000 -0400
6224 +++ linux-2.6.32.42/arch/x86/boot/compressed/relocs.c 2011-04-17 15:56:46.000000000 -0400
6225 @@ -10,8 +10,11 @@
6226 #define USE_BSD
6227 #include <endian.h>
6228
6229 +#include "../../../../include/linux/autoconf.h"
6230 +
6231 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
6232 static Elf32_Ehdr ehdr;
6233 +static Elf32_Phdr *phdr;
6234 static unsigned long reloc_count, reloc_idx;
6235 static unsigned long *relocs;
6236
6237 @@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
6238
6239 static int is_safe_abs_reloc(const char* sym_name)
6240 {
6241 - int i;
6242 + unsigned int i;
6243
6244 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
6245 if (!strcmp(sym_name, safe_abs_relocs[i]))
6246 @@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
6247 }
6248 }
6249
6250 +static void read_phdrs(FILE *fp)
6251 +{
6252 + unsigned int i;
6253 +
6254 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6255 + if (!phdr) {
6256 + die("Unable to allocate %d program headers\n",
6257 + ehdr.e_phnum);
6258 + }
6259 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6260 + die("Seek to %d failed: %s\n",
6261 + ehdr.e_phoff, strerror(errno));
6262 + }
6263 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6264 + die("Cannot read ELF program headers: %s\n",
6265 + strerror(errno));
6266 + }
6267 + for(i = 0; i < ehdr.e_phnum; i++) {
6268 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6269 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6270 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6271 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6272 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6273 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6274 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6275 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6276 + }
6277 +
6278 +}
6279 +
6280 static void read_shdrs(FILE *fp)
6281 {
6282 - int i;
6283 + unsigned int i;
6284 Elf32_Shdr shdr;
6285
6286 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6287 @@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
6288
6289 static void read_strtabs(FILE *fp)
6290 {
6291 - int i;
6292 + unsigned int i;
6293 for (i = 0; i < ehdr.e_shnum; i++) {
6294 struct section *sec = &secs[i];
6295 if (sec->shdr.sh_type != SHT_STRTAB) {
6296 @@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
6297
6298 static void read_symtabs(FILE *fp)
6299 {
6300 - int i,j;
6301 + unsigned int i,j;
6302 for (i = 0; i < ehdr.e_shnum; i++) {
6303 struct section *sec = &secs[i];
6304 if (sec->shdr.sh_type != SHT_SYMTAB) {
6305 @@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
6306
6307 static void read_relocs(FILE *fp)
6308 {
6309 - int i,j;
6310 + unsigned int i,j;
6311 + uint32_t base;
6312 +
6313 for (i = 0; i < ehdr.e_shnum; i++) {
6314 struct section *sec = &secs[i];
6315 if (sec->shdr.sh_type != SHT_REL) {
6316 @@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
6317 die("Cannot read symbol table: %s\n",
6318 strerror(errno));
6319 }
6320 + base = 0;
6321 + for (j = 0; j < ehdr.e_phnum; j++) {
6322 + if (phdr[j].p_type != PT_LOAD )
6323 + continue;
6324 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6325 + continue;
6326 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6327 + break;
6328 + }
6329 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6330 Elf32_Rel *rel = &sec->reltab[j];
6331 - rel->r_offset = elf32_to_cpu(rel->r_offset);
6332 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6333 rel->r_info = elf32_to_cpu(rel->r_info);
6334 }
6335 }
6336 @@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
6337
6338 static void print_absolute_symbols(void)
6339 {
6340 - int i;
6341 + unsigned int i;
6342 printf("Absolute symbols\n");
6343 printf(" Num: Value Size Type Bind Visibility Name\n");
6344 for (i = 0; i < ehdr.e_shnum; i++) {
6345 struct section *sec = &secs[i];
6346 char *sym_strtab;
6347 Elf32_Sym *sh_symtab;
6348 - int j;
6349 + unsigned int j;
6350
6351 if (sec->shdr.sh_type != SHT_SYMTAB) {
6352 continue;
6353 @@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
6354
6355 static void print_absolute_relocs(void)
6356 {
6357 - int i, printed = 0;
6358 + unsigned int i, printed = 0;
6359
6360 for (i = 0; i < ehdr.e_shnum; i++) {
6361 struct section *sec = &secs[i];
6362 struct section *sec_applies, *sec_symtab;
6363 char *sym_strtab;
6364 Elf32_Sym *sh_symtab;
6365 - int j;
6366 + unsigned int j;
6367 if (sec->shdr.sh_type != SHT_REL) {
6368 continue;
6369 }
6370 @@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
6371
6372 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6373 {
6374 - int i;
6375 + unsigned int i;
6376 /* Walk through the relocations */
6377 for (i = 0; i < ehdr.e_shnum; i++) {
6378 char *sym_strtab;
6379 Elf32_Sym *sh_symtab;
6380 struct section *sec_applies, *sec_symtab;
6381 - int j;
6382 + unsigned int j;
6383 struct section *sec = &secs[i];
6384
6385 if (sec->shdr.sh_type != SHT_REL) {
6386 @@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(El
6387 if (sym->st_shndx == SHN_ABS) {
6388 continue;
6389 }
6390 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6391 + if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6392 + continue;
6393 +
6394 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6395 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6396 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6397 + continue;
6398 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6399 + continue;
6400 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6401 + continue;
6402 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6403 + continue;
6404 +#endif
6405 if (r_type == R_386_NONE || r_type == R_386_PC32) {
6406 /*
6407 * NONE can be ignored and and PC relative
6408 @@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, co
6409
6410 static void emit_relocs(int as_text)
6411 {
6412 - int i;
6413 + unsigned int i;
6414 /* Count how many relocations I have and allocate space for them. */
6415 reloc_count = 0;
6416 walk_relocs(count_reloc);
6417 @@ -634,6 +693,7 @@ int main(int argc, char **argv)
6418 fname, strerror(errno));
6419 }
6420 read_ehdr(fp);
6421 + read_phdrs(fp);
6422 read_shdrs(fp);
6423 read_strtabs(fp);
6424 read_symtabs(fp);
6425 diff -urNp linux-2.6.32.42/arch/x86/boot/cpucheck.c linux-2.6.32.42/arch/x86/boot/cpucheck.c
6426 --- linux-2.6.32.42/arch/x86/boot/cpucheck.c 2011-03-27 14:31:47.000000000 -0400
6427 +++ linux-2.6.32.42/arch/x86/boot/cpucheck.c 2011-04-17 15:56:46.000000000 -0400
6428 @@ -74,7 +74,7 @@ static int has_fpu(void)
6429 u16 fcw = -1, fsw = -1;
6430 u32 cr0;
6431
6432 - asm("movl %%cr0,%0" : "=r" (cr0));
6433 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
6434 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6435 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6436 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6437 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6438 {
6439 u32 f0, f1;
6440
6441 - asm("pushfl ; "
6442 + asm volatile("pushfl ; "
6443 "pushfl ; "
6444 "popl %0 ; "
6445 "movl %0,%1 ; "
6446 @@ -115,7 +115,7 @@ static void get_flags(void)
6447 set_bit(X86_FEATURE_FPU, cpu.flags);
6448
6449 if (has_eflag(X86_EFLAGS_ID)) {
6450 - asm("cpuid"
6451 + asm volatile("cpuid"
6452 : "=a" (max_intel_level),
6453 "=b" (cpu_vendor[0]),
6454 "=d" (cpu_vendor[1]),
6455 @@ -124,7 +124,7 @@ static void get_flags(void)
6456
6457 if (max_intel_level >= 0x00000001 &&
6458 max_intel_level <= 0x0000ffff) {
6459 - asm("cpuid"
6460 + asm volatile("cpuid"
6461 : "=a" (tfms),
6462 "=c" (cpu.flags[4]),
6463 "=d" (cpu.flags[0])
6464 @@ -136,7 +136,7 @@ static void get_flags(void)
6465 cpu.model += ((tfms >> 16) & 0xf) << 4;
6466 }
6467
6468 - asm("cpuid"
6469 + asm volatile("cpuid"
6470 : "=a" (max_amd_level)
6471 : "a" (0x80000000)
6472 : "ebx", "ecx", "edx");
6473 @@ -144,7 +144,7 @@ static void get_flags(void)
6474 if (max_amd_level >= 0x80000001 &&
6475 max_amd_level <= 0x8000ffff) {
6476 u32 eax = 0x80000001;
6477 - asm("cpuid"
6478 + asm volatile("cpuid"
6479 : "+a" (eax),
6480 "=c" (cpu.flags[6]),
6481 "=d" (cpu.flags[1])
6482 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6483 u32 ecx = MSR_K7_HWCR;
6484 u32 eax, edx;
6485
6486 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6487 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6488 eax &= ~(1 << 15);
6489 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6490 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6491
6492 get_flags(); /* Make sure it really did something */
6493 err = check_flags();
6494 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6495 u32 ecx = MSR_VIA_FCR;
6496 u32 eax, edx;
6497
6498 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6499 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6500 eax |= (1<<1)|(1<<7);
6501 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6502 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6503
6504 set_bit(X86_FEATURE_CX8, cpu.flags);
6505 err = check_flags();
6506 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
6507 u32 eax, edx;
6508 u32 level = 1;
6509
6510 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6511 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6512 - asm("cpuid"
6513 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6514 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6515 + asm volatile("cpuid"
6516 : "+a" (level), "=d" (cpu.flags[0])
6517 : : "ecx", "ebx");
6518 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6519 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6520
6521 err = check_flags();
6522 }
6523 diff -urNp linux-2.6.32.42/arch/x86/boot/header.S linux-2.6.32.42/arch/x86/boot/header.S
6524 --- linux-2.6.32.42/arch/x86/boot/header.S 2011-03-27 14:31:47.000000000 -0400
6525 +++ linux-2.6.32.42/arch/x86/boot/header.S 2011-04-17 15:56:46.000000000 -0400
6526 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
6527 # single linked list of
6528 # struct setup_data
6529
6530 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6531 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6532
6533 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6534 #define VO_INIT_SIZE (VO__end - VO__text)
6535 diff -urNp linux-2.6.32.42/arch/x86/boot/memory.c linux-2.6.32.42/arch/x86/boot/memory.c
6536 --- linux-2.6.32.42/arch/x86/boot/memory.c 2011-03-27 14:31:47.000000000 -0400
6537 +++ linux-2.6.32.42/arch/x86/boot/memory.c 2011-04-17 15:56:46.000000000 -0400
6538 @@ -19,7 +19,7 @@
6539
6540 static int detect_memory_e820(void)
6541 {
6542 - int count = 0;
6543 + unsigned int count = 0;
6544 struct biosregs ireg, oreg;
6545 struct e820entry *desc = boot_params.e820_map;
6546 static struct e820entry buf; /* static so it is zeroed */
6547 diff -urNp linux-2.6.32.42/arch/x86/boot/video.c linux-2.6.32.42/arch/x86/boot/video.c
6548 --- linux-2.6.32.42/arch/x86/boot/video.c 2011-03-27 14:31:47.000000000 -0400
6549 +++ linux-2.6.32.42/arch/x86/boot/video.c 2011-04-17 15:56:46.000000000 -0400
6550 @@ -90,7 +90,7 @@ static void store_mode_params(void)
6551 static unsigned int get_entry(void)
6552 {
6553 char entry_buf[4];
6554 - int i, len = 0;
6555 + unsigned int i, len = 0;
6556 int key;
6557 unsigned int v;
6558
6559 diff -urNp linux-2.6.32.42/arch/x86/boot/video-vesa.c linux-2.6.32.42/arch/x86/boot/video-vesa.c
6560 --- linux-2.6.32.42/arch/x86/boot/video-vesa.c 2011-03-27 14:31:47.000000000 -0400
6561 +++ linux-2.6.32.42/arch/x86/boot/video-vesa.c 2011-04-17 15:56:46.000000000 -0400
6562 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6563
6564 boot_params.screen_info.vesapm_seg = oreg.es;
6565 boot_params.screen_info.vesapm_off = oreg.di;
6566 + boot_params.screen_info.vesapm_size = oreg.cx;
6567 }
6568
6569 /*
6570 diff -urNp linux-2.6.32.42/arch/x86/ia32/ia32_aout.c linux-2.6.32.42/arch/x86/ia32/ia32_aout.c
6571 --- linux-2.6.32.42/arch/x86/ia32/ia32_aout.c 2011-03-27 14:31:47.000000000 -0400
6572 +++ linux-2.6.32.42/arch/x86/ia32/ia32_aout.c 2011-04-17 15:56:46.000000000 -0400
6573 @@ -169,6 +169,8 @@ static int aout_core_dump(long signr, st
6574 unsigned long dump_start, dump_size;
6575 struct user32 dump;
6576
6577 + memset(&dump, 0, sizeof(dump));
6578 +
6579 fs = get_fs();
6580 set_fs(KERNEL_DS);
6581 has_dumped = 1;
6582 @@ -218,12 +220,6 @@ static int aout_core_dump(long signr, st
6583 dump_size = dump.u_ssize << PAGE_SHIFT;
6584 DUMP_WRITE(dump_start, dump_size);
6585 }
6586 - /*
6587 - * Finally dump the task struct. Not be used by gdb, but
6588 - * could be useful
6589 - */
6590 - set_fs(KERNEL_DS);
6591 - DUMP_WRITE(current, sizeof(*current));
6592 end_coredump:
6593 set_fs(fs);
6594 return has_dumped;
6595 diff -urNp linux-2.6.32.42/arch/x86/ia32/ia32entry.S linux-2.6.32.42/arch/x86/ia32/ia32entry.S
6596 --- linux-2.6.32.42/arch/x86/ia32/ia32entry.S 2011-03-27 14:31:47.000000000 -0400
6597 +++ linux-2.6.32.42/arch/x86/ia32/ia32entry.S 2011-06-04 20:29:52.000000000 -0400
6598 @@ -13,6 +13,7 @@
6599 #include <asm/thread_info.h>
6600 #include <asm/segment.h>
6601 #include <asm/irqflags.h>
6602 +#include <asm/pgtable.h>
6603 #include <linux/linkage.h>
6604
6605 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6606 @@ -93,6 +94,30 @@ ENTRY(native_irq_enable_sysexit)
6607 ENDPROC(native_irq_enable_sysexit)
6608 #endif
6609
6610 + .macro pax_enter_kernel_user
6611 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6612 + call pax_enter_kernel_user
6613 +#endif
6614 + .endm
6615 +
6616 + .macro pax_exit_kernel_user
6617 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6618 + call pax_exit_kernel_user
6619 +#endif
6620 +#ifdef CONFIG_PAX_RANDKSTACK
6621 + pushq %rax
6622 + call pax_randomize_kstack
6623 + popq %rax
6624 +#endif
6625 + pax_erase_kstack
6626 + .endm
6627 +
6628 +.macro pax_erase_kstack
6629 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6630 + call pax_erase_kstack
6631 +#endif
6632 +.endm
6633 +
6634 /*
6635 * 32bit SYSENTER instruction entry.
6636 *
6637 @@ -119,7 +144,7 @@ ENTRY(ia32_sysenter_target)
6638 CFI_REGISTER rsp,rbp
6639 SWAPGS_UNSAFE_STACK
6640 movq PER_CPU_VAR(kernel_stack), %rsp
6641 - addq $(KERNEL_STACK_OFFSET),%rsp
6642 + pax_enter_kernel_user
6643 /*
6644 * No need to follow this irqs on/off section: the syscall
6645 * disabled irqs, here we enable it straight after entry:
6646 @@ -135,7 +160,8 @@ ENTRY(ia32_sysenter_target)
6647 pushfq
6648 CFI_ADJUST_CFA_OFFSET 8
6649 /*CFI_REL_OFFSET rflags,0*/
6650 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6651 + GET_THREAD_INFO(%r10)
6652 + movl TI_sysenter_return(%r10), %r10d
6653 CFI_REGISTER rip,r10
6654 pushq $__USER32_CS
6655 CFI_ADJUST_CFA_OFFSET 8
6656 @@ -150,6 +176,12 @@ ENTRY(ia32_sysenter_target)
6657 SAVE_ARGS 0,0,1
6658 /* no need to do an access_ok check here because rbp has been
6659 32bit zero extended */
6660 +
6661 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6662 + mov $PAX_USER_SHADOW_BASE,%r10
6663 + add %r10,%rbp
6664 +#endif
6665 +
6666 1: movl (%rbp),%ebp
6667 .section __ex_table,"a"
6668 .quad 1b,ia32_badarg
6669 @@ -172,6 +204,7 @@ sysenter_dispatch:
6670 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6671 jnz sysexit_audit
6672 sysexit_from_sys_call:
6673 + pax_exit_kernel_user
6674 andl $~TS_COMPAT,TI_status(%r10)
6675 /* clear IF, that popfq doesn't enable interrupts early */
6676 andl $~0x200,EFLAGS-R11(%rsp)
6677 @@ -200,6 +233,9 @@ sysexit_from_sys_call:
6678 movl %eax,%esi /* 2nd arg: syscall number */
6679 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6680 call audit_syscall_entry
6681 +
6682 + pax_erase_kstack
6683 +
6684 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6685 cmpq $(IA32_NR_syscalls-1),%rax
6686 ja ia32_badsys
6687 @@ -252,6 +288,9 @@ sysenter_tracesys:
6688 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6689 movq %rsp,%rdi /* &pt_regs -> arg1 */
6690 call syscall_trace_enter
6691 +
6692 + pax_erase_kstack
6693 +
6694 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6695 RESTORE_REST
6696 cmpq $(IA32_NR_syscalls-1),%rax
6697 @@ -283,19 +322,24 @@ ENDPROC(ia32_sysenter_target)
6698 ENTRY(ia32_cstar_target)
6699 CFI_STARTPROC32 simple
6700 CFI_SIGNAL_FRAME
6701 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6702 + CFI_DEF_CFA rsp,0
6703 CFI_REGISTER rip,rcx
6704 /*CFI_REGISTER rflags,r11*/
6705 SWAPGS_UNSAFE_STACK
6706 movl %esp,%r8d
6707 CFI_REGISTER rsp,r8
6708 movq PER_CPU_VAR(kernel_stack),%rsp
6709 +
6710 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6711 + pax_enter_kernel_user
6712 +#endif
6713 +
6714 /*
6715 * No need to follow this irqs on/off section: the syscall
6716 * disabled irqs and here we enable it straight after entry:
6717 */
6718 ENABLE_INTERRUPTS(CLBR_NONE)
6719 - SAVE_ARGS 8,1,1
6720 + SAVE_ARGS 8*6,1,1
6721 movl %eax,%eax /* zero extension */
6722 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6723 movq %rcx,RIP-ARGOFFSET(%rsp)
6724 @@ -311,6 +355,12 @@ ENTRY(ia32_cstar_target)
6725 /* no need to do an access_ok check here because r8 has been
6726 32bit zero extended */
6727 /* hardware stack frame is complete now */
6728 +
6729 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6730 + mov $PAX_USER_SHADOW_BASE,%r10
6731 + add %r10,%r8
6732 +#endif
6733 +
6734 1: movl (%r8),%r9d
6735 .section __ex_table,"a"
6736 .quad 1b,ia32_badarg
6737 @@ -333,6 +383,7 @@ cstar_dispatch:
6738 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6739 jnz sysretl_audit
6740 sysretl_from_sys_call:
6741 + pax_exit_kernel_user
6742 andl $~TS_COMPAT,TI_status(%r10)
6743 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6744 movl RIP-ARGOFFSET(%rsp),%ecx
6745 @@ -370,6 +421,9 @@ cstar_tracesys:
6746 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6747 movq %rsp,%rdi /* &pt_regs -> arg1 */
6748 call syscall_trace_enter
6749 +
6750 + pax_erase_kstack
6751 +
6752 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6753 RESTORE_REST
6754 xchgl %ebp,%r9d
6755 @@ -415,6 +469,7 @@ ENTRY(ia32_syscall)
6756 CFI_REL_OFFSET rip,RIP-RIP
6757 PARAVIRT_ADJUST_EXCEPTION_FRAME
6758 SWAPGS
6759 + pax_enter_kernel_user
6760 /*
6761 * No need to follow this irqs on/off section: the syscall
6762 * disabled irqs and here we enable it straight after entry:
6763 @@ -448,6 +503,9 @@ ia32_tracesys:
6764 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6765 movq %rsp,%rdi /* &pt_regs -> arg1 */
6766 call syscall_trace_enter
6767 +
6768 + pax_erase_kstack
6769 +
6770 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6771 RESTORE_REST
6772 cmpq $(IA32_NR_syscalls-1),%rax
6773 diff -urNp linux-2.6.32.42/arch/x86/ia32/ia32_signal.c linux-2.6.32.42/arch/x86/ia32/ia32_signal.c
6774 --- linux-2.6.32.42/arch/x86/ia32/ia32_signal.c 2011-03-27 14:31:47.000000000 -0400
6775 +++ linux-2.6.32.42/arch/x86/ia32/ia32_signal.c 2011-04-17 15:56:46.000000000 -0400
6776 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
6777 sp -= frame_size;
6778 /* Align the stack pointer according to the i386 ABI,
6779 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6780 - sp = ((sp + 4) & -16ul) - 4;
6781 + sp = ((sp - 12) & -16ul) - 4;
6782 return (void __user *) sp;
6783 }
6784
6785 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
6786 * These are actually not used anymore, but left because some
6787 * gdb versions depend on them as a marker.
6788 */
6789 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6790 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6791 } put_user_catch(err);
6792
6793 if (err)
6794 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
6795 0xb8,
6796 __NR_ia32_rt_sigreturn,
6797 0x80cd,
6798 - 0,
6799 + 0
6800 };
6801
6802 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6803 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
6804
6805 if (ka->sa.sa_flags & SA_RESTORER)
6806 restorer = ka->sa.sa_restorer;
6807 + else if (current->mm->context.vdso)
6808 + /* Return stub is in 32bit vsyscall page */
6809 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6810 else
6811 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6812 - rt_sigreturn);
6813 + restorer = &frame->retcode;
6814 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6815
6816 /*
6817 * Not actually used anymore, but left because some gdb
6818 * versions need it.
6819 */
6820 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6821 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6822 } put_user_catch(err);
6823
6824 if (err)
6825 diff -urNp linux-2.6.32.42/arch/x86/include/asm/alternative.h linux-2.6.32.42/arch/x86/include/asm/alternative.h
6826 --- linux-2.6.32.42/arch/x86/include/asm/alternative.h 2011-03-27 14:31:47.000000000 -0400
6827 +++ linux-2.6.32.42/arch/x86/include/asm/alternative.h 2011-04-17 15:56:46.000000000 -0400
6828 @@ -85,7 +85,7 @@ static inline void alternatives_smp_swit
6829 " .byte 662b-661b\n" /* sourcelen */ \
6830 " .byte 664f-663f\n" /* replacementlen */ \
6831 ".previous\n" \
6832 - ".section .altinstr_replacement, \"ax\"\n" \
6833 + ".section .altinstr_replacement, \"a\"\n" \
6834 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6835 ".previous"
6836
6837 diff -urNp linux-2.6.32.42/arch/x86/include/asm/apm.h linux-2.6.32.42/arch/x86/include/asm/apm.h
6838 --- linux-2.6.32.42/arch/x86/include/asm/apm.h 2011-03-27 14:31:47.000000000 -0400
6839 +++ linux-2.6.32.42/arch/x86/include/asm/apm.h 2011-04-17 15:56:46.000000000 -0400
6840 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6841 __asm__ __volatile__(APM_DO_ZERO_SEGS
6842 "pushl %%edi\n\t"
6843 "pushl %%ebp\n\t"
6844 - "lcall *%%cs:apm_bios_entry\n\t"
6845 + "lcall *%%ss:apm_bios_entry\n\t"
6846 "setc %%al\n\t"
6847 "popl %%ebp\n\t"
6848 "popl %%edi\n\t"
6849 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6850 __asm__ __volatile__(APM_DO_ZERO_SEGS
6851 "pushl %%edi\n\t"
6852 "pushl %%ebp\n\t"
6853 - "lcall *%%cs:apm_bios_entry\n\t"
6854 + "lcall *%%ss:apm_bios_entry\n\t"
6855 "setc %%bl\n\t"
6856 "popl %%ebp\n\t"
6857 "popl %%edi\n\t"
6858 diff -urNp linux-2.6.32.42/arch/x86/include/asm/atomic_32.h linux-2.6.32.42/arch/x86/include/asm/atomic_32.h
6859 --- linux-2.6.32.42/arch/x86/include/asm/atomic_32.h 2011-03-27 14:31:47.000000000 -0400
6860 +++ linux-2.6.32.42/arch/x86/include/asm/atomic_32.h 2011-05-04 17:56:20.000000000 -0400
6861 @@ -25,6 +25,17 @@ static inline int atomic_read(const atom
6862 }
6863
6864 /**
6865 + * atomic_read_unchecked - read atomic variable
6866 + * @v: pointer of type atomic_unchecked_t
6867 + *
6868 + * Atomically reads the value of @v.
6869 + */
6870 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6871 +{
6872 + return v->counter;
6873 +}
6874 +
6875 +/**
6876 * atomic_set - set atomic variable
6877 * @v: pointer of type atomic_t
6878 * @i: required value
6879 @@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *
6880 }
6881
6882 /**
6883 + * atomic_set_unchecked - set atomic variable
6884 + * @v: pointer of type atomic_unchecked_t
6885 + * @i: required value
6886 + *
6887 + * Atomically sets the value of @v to @i.
6888 + */
6889 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6890 +{
6891 + v->counter = i;
6892 +}
6893 +
6894 +/**
6895 * atomic_add - add integer to atomic variable
6896 * @i: integer value to add
6897 * @v: pointer of type atomic_t
6898 @@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *
6899 */
6900 static inline void atomic_add(int i, atomic_t *v)
6901 {
6902 - asm volatile(LOCK_PREFIX "addl %1,%0"
6903 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6904 +
6905 +#ifdef CONFIG_PAX_REFCOUNT
6906 + "jno 0f\n"
6907 + LOCK_PREFIX "subl %1,%0\n"
6908 + "int $4\n0:\n"
6909 + _ASM_EXTABLE(0b, 0b)
6910 +#endif
6911 +
6912 + : "+m" (v->counter)
6913 + : "ir" (i));
6914 +}
6915 +
6916 +/**
6917 + * atomic_add_unchecked - add integer to atomic variable
6918 + * @i: integer value to add
6919 + * @v: pointer of type atomic_unchecked_t
6920 + *
6921 + * Atomically adds @i to @v.
6922 + */
6923 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6924 +{
6925 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6926 : "+m" (v->counter)
6927 : "ir" (i));
6928 }
6929 @@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato
6930 */
6931 static inline void atomic_sub(int i, atomic_t *v)
6932 {
6933 - asm volatile(LOCK_PREFIX "subl %1,%0"
6934 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6935 +
6936 +#ifdef CONFIG_PAX_REFCOUNT
6937 + "jno 0f\n"
6938 + LOCK_PREFIX "addl %1,%0\n"
6939 + "int $4\n0:\n"
6940 + _ASM_EXTABLE(0b, 0b)
6941 +#endif
6942 +
6943 + : "+m" (v->counter)
6944 + : "ir" (i));
6945 +}
6946 +
6947 +/**
6948 + * atomic_sub_unchecked - subtract integer from atomic variable
6949 + * @i: integer value to subtract
6950 + * @v: pointer of type atomic_unchecked_t
6951 + *
6952 + * Atomically subtracts @i from @v.
6953 + */
6954 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6955 +{
6956 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6957 : "+m" (v->counter)
6958 : "ir" (i));
6959 }
6960 @@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in
6961 {
6962 unsigned char c;
6963
6964 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6965 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
6966 +
6967 +#ifdef CONFIG_PAX_REFCOUNT
6968 + "jno 0f\n"
6969 + LOCK_PREFIX "addl %2,%0\n"
6970 + "int $4\n0:\n"
6971 + _ASM_EXTABLE(0b, 0b)
6972 +#endif
6973 +
6974 + "sete %1\n"
6975 : "+m" (v->counter), "=qm" (c)
6976 : "ir" (i) : "memory");
6977 return c;
6978 @@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(in
6979 */
6980 static inline void atomic_inc(atomic_t *v)
6981 {
6982 - asm volatile(LOCK_PREFIX "incl %0"
6983 + asm volatile(LOCK_PREFIX "incl %0\n"
6984 +
6985 +#ifdef CONFIG_PAX_REFCOUNT
6986 + "jno 0f\n"
6987 + LOCK_PREFIX "decl %0\n"
6988 + "int $4\n0:\n"
6989 + _ASM_EXTABLE(0b, 0b)
6990 +#endif
6991 +
6992 + : "+m" (v->counter));
6993 +}
6994 +
6995 +/**
6996 + * atomic_inc_unchecked - increment atomic variable
6997 + * @v: pointer of type atomic_unchecked_t
6998 + *
6999 + * Atomically increments @v by 1.
7000 + */
7001 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7002 +{
7003 + asm volatile(LOCK_PREFIX "incl %0\n"
7004 : "+m" (v->counter));
7005 }
7006
7007 @@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *
7008 */
7009 static inline void atomic_dec(atomic_t *v)
7010 {
7011 - asm volatile(LOCK_PREFIX "decl %0"
7012 + asm volatile(LOCK_PREFIX "decl %0\n"
7013 +
7014 +#ifdef CONFIG_PAX_REFCOUNT
7015 + "jno 0f\n"
7016 + LOCK_PREFIX "incl %0\n"
7017 + "int $4\n0:\n"
7018 + _ASM_EXTABLE(0b, 0b)
7019 +#endif
7020 +
7021 + : "+m" (v->counter));
7022 +}
7023 +
7024 +/**
7025 + * atomic_dec_unchecked - decrement atomic variable
7026 + * @v: pointer of type atomic_unchecked_t
7027 + *
7028 + * Atomically decrements @v by 1.
7029 + */
7030 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7031 +{
7032 + asm volatile(LOCK_PREFIX "decl %0\n"
7033 : "+m" (v->counter));
7034 }
7035
7036 @@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(at
7037 {
7038 unsigned char c;
7039
7040 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7041 + asm volatile(LOCK_PREFIX "decl %0\n"
7042 +
7043 +#ifdef CONFIG_PAX_REFCOUNT
7044 + "jno 0f\n"
7045 + LOCK_PREFIX "incl %0\n"
7046 + "int $4\n0:\n"
7047 + _ASM_EXTABLE(0b, 0b)
7048 +#endif
7049 +
7050 + "sete %1\n"
7051 : "+m" (v->counter), "=qm" (c)
7052 : : "memory");
7053 return c != 0;
7054 @@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(at
7055 {
7056 unsigned char c;
7057
7058 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7059 + asm volatile(LOCK_PREFIX "incl %0\n"
7060 +
7061 +#ifdef CONFIG_PAX_REFCOUNT
7062 + "jno 0f\n"
7063 + LOCK_PREFIX "decl %0\n"
7064 + "into\n0:\n"
7065 + _ASM_EXTABLE(0b, 0b)
7066 +#endif
7067 +
7068 + "sete %1\n"
7069 + : "+m" (v->counter), "=qm" (c)
7070 + : : "memory");
7071 + return c != 0;
7072 +}
7073 +
7074 +/**
7075 + * atomic_inc_and_test_unchecked - increment and test
7076 + * @v: pointer of type atomic_unchecked_t
7077 + *
7078 + * Atomically increments @v by 1
7079 + * and returns true if the result is zero, or false for all
7080 + * other cases.
7081 + */
7082 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7083 +{
7084 + unsigned char c;
7085 +
7086 + asm volatile(LOCK_PREFIX "incl %0\n"
7087 + "sete %1\n"
7088 : "+m" (v->counter), "=qm" (c)
7089 : : "memory");
7090 return c != 0;
7091 @@ -156,7 +309,16 @@ static inline int atomic_add_negative(in
7092 {
7093 unsigned char c;
7094
7095 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7096 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7097 +
7098 +#ifdef CONFIG_PAX_REFCOUNT
7099 + "jno 0f\n"
7100 + LOCK_PREFIX "subl %2,%0\n"
7101 + "int $4\n0:\n"
7102 + _ASM_EXTABLE(0b, 0b)
7103 +#endif
7104 +
7105 + "sets %1\n"
7106 : "+m" (v->counter), "=qm" (c)
7107 : "ir" (i) : "memory");
7108 return c;
7109 @@ -179,6 +341,46 @@ static inline int atomic_add_return(int
7110 #endif
7111 /* Modern 486+ processor */
7112 __i = i;
7113 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7114 +
7115 +#ifdef CONFIG_PAX_REFCOUNT
7116 + "jno 0f\n"
7117 + "movl %0, %1\n"
7118 + "int $4\n0:\n"
7119 + _ASM_EXTABLE(0b, 0b)
7120 +#endif
7121 +
7122 + : "+r" (i), "+m" (v->counter)
7123 + : : "memory");
7124 + return i + __i;
7125 +
7126 +#ifdef CONFIG_M386
7127 +no_xadd: /* Legacy 386 processor */
7128 + local_irq_save(flags);
7129 + __i = atomic_read(v);
7130 + atomic_set(v, i + __i);
7131 + local_irq_restore(flags);
7132 + return i + __i;
7133 +#endif
7134 +}
7135 +
7136 +/**
7137 + * atomic_add_return_unchecked - add integer and return
7138 + * @v: pointer of type atomic_unchecked_t
7139 + * @i: integer value to add
7140 + *
7141 + * Atomically adds @i to @v and returns @i + @v
7142 + */
7143 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7144 +{
7145 + int __i;
7146 +#ifdef CONFIG_M386
7147 + unsigned long flags;
7148 + if (unlikely(boot_cpu_data.x86 <= 3))
7149 + goto no_xadd;
7150 +#endif
7151 + /* Modern 486+ processor */
7152 + __i = i;
7153 asm volatile(LOCK_PREFIX "xaddl %0, %1"
7154 : "+r" (i), "+m" (v->counter)
7155 : : "memory");
7156 @@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_
7157 return cmpxchg(&v->counter, old, new);
7158 }
7159
7160 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7161 +{
7162 + return cmpxchg(&v->counter, old, new);
7163 +}
7164 +
7165 static inline int atomic_xchg(atomic_t *v, int new)
7166 {
7167 return xchg(&v->counter, new);
7168 }
7169
7170 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7171 +{
7172 + return xchg(&v->counter, new);
7173 +}
7174 +
7175 /**
7176 * atomic_add_unless - add unless the number is already a given value
7177 * @v: pointer of type atomic_t
7178 @@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *
7179 */
7180 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7181 {
7182 - int c, old;
7183 + int c, old, new;
7184 c = atomic_read(v);
7185 for (;;) {
7186 - if (unlikely(c == (u)))
7187 + if (unlikely(c == u))
7188 break;
7189 - old = atomic_cmpxchg((v), c, c + (a));
7190 +
7191 + asm volatile("addl %2,%0\n"
7192 +
7193 +#ifdef CONFIG_PAX_REFCOUNT
7194 + "jno 0f\n"
7195 + "subl %2,%0\n"
7196 + "int $4\n0:\n"
7197 + _ASM_EXTABLE(0b, 0b)
7198 +#endif
7199 +
7200 + : "=r" (new)
7201 + : "0" (c), "ir" (a));
7202 +
7203 + old = atomic_cmpxchg(v, c, new);
7204 if (likely(old == c))
7205 break;
7206 c = old;
7207 }
7208 - return c != (u);
7209 + return c != u;
7210 }
7211
7212 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7213
7214 #define atomic_inc_return(v) (atomic_add_return(1, v))
7215 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7216 +{
7217 + return atomic_add_return_unchecked(1, v);
7218 +}
7219 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7220
7221 /* These are x86-specific, used by some header files */
7222 @@ -266,9 +495,18 @@ typedef struct {
7223 u64 __aligned(8) counter;
7224 } atomic64_t;
7225
7226 +#ifdef CONFIG_PAX_REFCOUNT
7227 +typedef struct {
7228 + u64 __aligned(8) counter;
7229 +} atomic64_unchecked_t;
7230 +#else
7231 +typedef atomic64_t atomic64_unchecked_t;
7232 +#endif
7233 +
7234 #define ATOMIC64_INIT(val) { (val) }
7235
7236 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
7237 +extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
7238
7239 /**
7240 * atomic64_xchg - xchg atomic64 variable
7241 @@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *
7242 * the old value.
7243 */
7244 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
7245 +extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7246
7247 /**
7248 * atomic64_set - set atomic64 variable
7249 @@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr
7250 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
7251
7252 /**
7253 + * atomic64_unchecked_set - set atomic64 variable
7254 + * @ptr: pointer to type atomic64_unchecked_t
7255 + * @new_val: value to assign
7256 + *
7257 + * Atomically sets the value of @ptr to @new_val.
7258 + */
7259 +extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7260 +
7261 +/**
7262 * atomic64_read - read atomic64 variable
7263 * @ptr: pointer to type atomic64_t
7264 *
7265 @@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64
7266 return res;
7267 }
7268
7269 -extern u64 atomic64_read(atomic64_t *ptr);
7270 +/**
7271 + * atomic64_read_unchecked - read atomic64 variable
7272 + * @ptr: pointer to type atomic64_unchecked_t
7273 + *
7274 + * Atomically reads the value of @ptr and returns it.
7275 + */
7276 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
7277 +{
7278 + u64 res;
7279 +
7280 + /*
7281 + * Note, we inline this atomic64_unchecked_t primitive because
7282 + * it only clobbers EAX/EDX and leaves the others
7283 + * untouched. We also (somewhat subtly) rely on the
7284 + * fact that cmpxchg8b returns the current 64-bit value
7285 + * of the memory location we are touching:
7286 + */
7287 + asm volatile(
7288 + "mov %%ebx, %%eax\n\t"
7289 + "mov %%ecx, %%edx\n\t"
7290 + LOCK_PREFIX "cmpxchg8b %1\n"
7291 + : "=&A" (res)
7292 + : "m" (*ptr)
7293 + );
7294 +
7295 + return res;
7296 +}
7297
7298 /**
7299 * atomic64_add_return - add and return
7300 @@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta
7301 * Other variants with different arithmetic operators:
7302 */
7303 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
7304 +extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7305 extern u64 atomic64_inc_return(atomic64_t *ptr);
7306 +extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
7307 extern u64 atomic64_dec_return(atomic64_t *ptr);
7308 +extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
7309
7310 /**
7311 * atomic64_add - add integer to atomic64 variable
7312 @@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_
7313 extern void atomic64_add(u64 delta, atomic64_t *ptr);
7314
7315 /**
7316 + * atomic64_add_unchecked - add integer to atomic64 variable
7317 + * @delta: integer value to add
7318 + * @ptr: pointer to type atomic64_unchecked_t
7319 + *
7320 + * Atomically adds @delta to @ptr.
7321 + */
7322 +extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7323 +
7324 +/**
7325 * atomic64_sub - subtract the atomic64 variable
7326 * @delta: integer value to subtract
7327 * @ptr: pointer to type atomic64_t
7328 @@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atom
7329 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
7330
7331 /**
7332 + * atomic64_sub_unchecked - subtract the atomic64 variable
7333 + * @delta: integer value to subtract
7334 + * @ptr: pointer to type atomic64_unchecked_t
7335 + *
7336 + * Atomically subtracts @delta from @ptr.
7337 + */
7338 +extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7339 +
7340 +/**
7341 * atomic64_sub_and_test - subtract value from variable and test result
7342 * @delta: integer value to subtract
7343 * @ptr: pointer to type atomic64_t
7344 @@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 del
7345 extern void atomic64_inc(atomic64_t *ptr);
7346
7347 /**
7348 + * atomic64_inc_unchecked - increment atomic64 variable
7349 + * @ptr: pointer to type atomic64_unchecked_t
7350 + *
7351 + * Atomically increments @ptr by 1.
7352 + */
7353 +extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
7354 +
7355 +/**
7356 * atomic64_dec - decrement atomic64 variable
7357 * @ptr: pointer to type atomic64_t
7358 *
7359 @@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr
7360 extern void atomic64_dec(atomic64_t *ptr);
7361
7362 /**
7363 + * atomic64_dec_unchecked - decrement atomic64 variable
7364 + * @ptr: pointer to type atomic64_unchecked_t
7365 + *
7366 + * Atomically decrements @ptr by 1.
7367 + */
7368 +extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
7369 +
7370 +/**
7371 * atomic64_dec_and_test - decrement and test
7372 * @ptr: pointer to type atomic64_t
7373 *
7374 diff -urNp linux-2.6.32.42/arch/x86/include/asm/atomic_64.h linux-2.6.32.42/arch/x86/include/asm/atomic_64.h
7375 --- linux-2.6.32.42/arch/x86/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
7376 +++ linux-2.6.32.42/arch/x86/include/asm/atomic_64.h 2011-05-04 18:35:31.000000000 -0400
7377 @@ -24,6 +24,17 @@ static inline int atomic_read(const atom
7378 }
7379
7380 /**
7381 + * atomic_read_unchecked - read atomic variable
7382 + * @v: pointer of type atomic_unchecked_t
7383 + *
7384 + * Atomically reads the value of @v.
7385 + */
7386 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7387 +{
7388 + return v->counter;
7389 +}
7390 +
7391 +/**
7392 * atomic_set - set atomic variable
7393 * @v: pointer of type atomic_t
7394 * @i: required value
7395 @@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *
7396 }
7397
7398 /**
7399 + * atomic_set_unchecked - set atomic variable
7400 + * @v: pointer of type atomic_unchecked_t
7401 + * @i: required value
7402 + *
7403 + * Atomically sets the value of @v to @i.
7404 + */
7405 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7406 +{
7407 + v->counter = i;
7408 +}
7409 +
7410 +/**
7411 * atomic_add - add integer to atomic variable
7412 * @i: integer value to add
7413 * @v: pointer of type atomic_t
7414 @@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *
7415 */
7416 static inline void atomic_add(int i, atomic_t *v)
7417 {
7418 - asm volatile(LOCK_PREFIX "addl %1,%0"
7419 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7420 +
7421 +#ifdef CONFIG_PAX_REFCOUNT
7422 + "jno 0f\n"
7423 + LOCK_PREFIX "subl %1,%0\n"
7424 + "int $4\n0:\n"
7425 + _ASM_EXTABLE(0b, 0b)
7426 +#endif
7427 +
7428 + : "=m" (v->counter)
7429 + : "ir" (i), "m" (v->counter));
7430 +}
7431 +
7432 +/**
7433 + * atomic_add_unchecked - add integer to atomic variable
7434 + * @i: integer value to add
7435 + * @v: pointer of type atomic_unchecked_t
7436 + *
7437 + * Atomically adds @i to @v.
7438 + */
7439 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7440 +{
7441 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7442 : "=m" (v->counter)
7443 : "ir" (i), "m" (v->counter));
7444 }
7445 @@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato
7446 */
7447 static inline void atomic_sub(int i, atomic_t *v)
7448 {
7449 - asm volatile(LOCK_PREFIX "subl %1,%0"
7450 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7451 +
7452 +#ifdef CONFIG_PAX_REFCOUNT
7453 + "jno 0f\n"
7454 + LOCK_PREFIX "addl %1,%0\n"
7455 + "int $4\n0:\n"
7456 + _ASM_EXTABLE(0b, 0b)
7457 +#endif
7458 +
7459 + : "=m" (v->counter)
7460 + : "ir" (i), "m" (v->counter));
7461 +}
7462 +
7463 +/**
7464 + * atomic_sub_unchecked - subtract the atomic variable
7465 + * @i: integer value to subtract
7466 + * @v: pointer of type atomic_unchecked_t
7467 + *
7468 + * Atomically subtracts @i from @v.
7469 + */
7470 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7471 +{
7472 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7473 : "=m" (v->counter)
7474 : "ir" (i), "m" (v->counter));
7475 }
7476 @@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in
7477 {
7478 unsigned char c;
7479
7480 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7481 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7482 +
7483 +#ifdef CONFIG_PAX_REFCOUNT
7484 + "jno 0f\n"
7485 + LOCK_PREFIX "addl %2,%0\n"
7486 + "int $4\n0:\n"
7487 + _ASM_EXTABLE(0b, 0b)
7488 +#endif
7489 +
7490 + "sete %1\n"
7491 : "=m" (v->counter), "=qm" (c)
7492 : "ir" (i), "m" (v->counter) : "memory");
7493 return c;
7494 @@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(in
7495 */
7496 static inline void atomic_inc(atomic_t *v)
7497 {
7498 - asm volatile(LOCK_PREFIX "incl %0"
7499 + asm volatile(LOCK_PREFIX "incl %0\n"
7500 +
7501 +#ifdef CONFIG_PAX_REFCOUNT
7502 + "jno 0f\n"
7503 + LOCK_PREFIX "decl %0\n"
7504 + "int $4\n0:\n"
7505 + _ASM_EXTABLE(0b, 0b)
7506 +#endif
7507 +
7508 + : "=m" (v->counter)
7509 + : "m" (v->counter));
7510 +}
7511 +
7512 +/**
7513 + * atomic_inc_unchecked - increment atomic variable
7514 + * @v: pointer of type atomic_unchecked_t
7515 + *
7516 + * Atomically increments @v by 1.
7517 + */
7518 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7519 +{
7520 + asm volatile(LOCK_PREFIX "incl %0\n"
7521 : "=m" (v->counter)
7522 : "m" (v->counter));
7523 }
7524 @@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *
7525 */
7526 static inline void atomic_dec(atomic_t *v)
7527 {
7528 - asm volatile(LOCK_PREFIX "decl %0"
7529 + asm volatile(LOCK_PREFIX "decl %0\n"
7530 +
7531 +#ifdef CONFIG_PAX_REFCOUNT
7532 + "jno 0f\n"
7533 + LOCK_PREFIX "incl %0\n"
7534 + "int $4\n0:\n"
7535 + _ASM_EXTABLE(0b, 0b)
7536 +#endif
7537 +
7538 + : "=m" (v->counter)
7539 + : "m" (v->counter));
7540 +}
7541 +
7542 +/**
7543 + * atomic_dec_unchecked - decrement atomic variable
7544 + * @v: pointer of type atomic_unchecked_t
7545 + *
7546 + * Atomically decrements @v by 1.
7547 + */
7548 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7549 +{
7550 + asm volatile(LOCK_PREFIX "decl %0\n"
7551 : "=m" (v->counter)
7552 : "m" (v->counter));
7553 }
7554 @@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(at
7555 {
7556 unsigned char c;
7557
7558 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7559 + asm volatile(LOCK_PREFIX "decl %0\n"
7560 +
7561 +#ifdef CONFIG_PAX_REFCOUNT
7562 + "jno 0f\n"
7563 + LOCK_PREFIX "incl %0\n"
7564 + "int $4\n0:\n"
7565 + _ASM_EXTABLE(0b, 0b)
7566 +#endif
7567 +
7568 + "sete %1\n"
7569 : "=m" (v->counter), "=qm" (c)
7570 : "m" (v->counter) : "memory");
7571 return c != 0;
7572 @@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(at
7573 {
7574 unsigned char c;
7575
7576 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7577 + asm volatile(LOCK_PREFIX "incl %0\n"
7578 +
7579 +#ifdef CONFIG_PAX_REFCOUNT
7580 + "jno 0f\n"
7581 + LOCK_PREFIX "decl %0\n"
7582 + "int $4\n0:\n"
7583 + _ASM_EXTABLE(0b, 0b)
7584 +#endif
7585 +
7586 + "sete %1\n"
7587 + : "=m" (v->counter), "=qm" (c)
7588 + : "m" (v->counter) : "memory");
7589 + return c != 0;
7590 +}
7591 +
7592 +/**
7593 + * atomic_inc_and_test_unchecked - increment and test
7594 + * @v: pointer of type atomic_unchecked_t
7595 + *
7596 + * Atomically increments @v by 1
7597 + * and returns true if the result is zero, or false for all
7598 + * other cases.
7599 + */
7600 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7601 +{
7602 + unsigned char c;
7603 +
7604 + asm volatile(LOCK_PREFIX "incl %0\n"
7605 + "sete %1\n"
7606 : "=m" (v->counter), "=qm" (c)
7607 : "m" (v->counter) : "memory");
7608 return c != 0;
7609 @@ -157,7 +312,16 @@ static inline int atomic_add_negative(in
7610 {
7611 unsigned char c;
7612
7613 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7614 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7615 +
7616 +#ifdef CONFIG_PAX_REFCOUNT
7617 + "jno 0f\n"
7618 + LOCK_PREFIX "subl %2,%0\n"
7619 + "int $4\n0:\n"
7620 + _ASM_EXTABLE(0b, 0b)
7621 +#endif
7622 +
7623 + "sets %1\n"
7624 : "=m" (v->counter), "=qm" (c)
7625 : "ir" (i), "m" (v->counter) : "memory");
7626 return c;
7627 @@ -173,7 +337,31 @@ static inline int atomic_add_negative(in
7628 static inline int atomic_add_return(int i, atomic_t *v)
7629 {
7630 int __i = i;
7631 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
7632 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7633 +
7634 +#ifdef CONFIG_PAX_REFCOUNT
7635 + "jno 0f\n"
7636 + "movl %0, %1\n"
7637 + "int $4\n0:\n"
7638 + _ASM_EXTABLE(0b, 0b)
7639 +#endif
7640 +
7641 + : "+r" (i), "+m" (v->counter)
7642 + : : "memory");
7643 + return i + __i;
7644 +}
7645 +
7646 +/**
7647 + * atomic_add_return_unchecked - add and return
7648 + * @i: integer value to add
7649 + * @v: pointer of type atomic_unchecked_t
7650 + *
7651 + * Atomically adds @i to @v and returns @i + @v
7652 + */
7653 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7654 +{
7655 + int __i = i;
7656 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7657 : "+r" (i), "+m" (v->counter)
7658 : : "memory");
7659 return i + __i;
7660 @@ -185,6 +373,10 @@ static inline int atomic_sub_return(int
7661 }
7662
7663 #define atomic_inc_return(v) (atomic_add_return(1, v))
7664 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7665 +{
7666 + return atomic_add_return_unchecked(1, v);
7667 +}
7668 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7669
7670 /* The 64-bit atomic type */
7671 @@ -204,6 +396,18 @@ static inline long atomic64_read(const a
7672 }
7673
7674 /**
7675 + * atomic64_read_unchecked - read atomic64 variable
7676 + * @v: pointer of type atomic64_unchecked_t
7677 + *
7678 + * Atomically reads the value of @v.
7679 + * Doesn't imply a read memory barrier.
7680 + */
7681 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7682 +{
7683 + return v->counter;
7684 +}
7685 +
7686 +/**
7687 * atomic64_set - set atomic64 variable
7688 * @v: pointer to type atomic64_t
7689 * @i: required value
7690 @@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64
7691 }
7692
7693 /**
7694 + * atomic64_set_unchecked - set atomic64 variable
7695 + * @v: pointer to type atomic64_unchecked_t
7696 + * @i: required value
7697 + *
7698 + * Atomically sets the value of @v to @i.
7699 + */
7700 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7701 +{
7702 + v->counter = i;
7703 +}
7704 +
7705 +/**
7706 * atomic64_add - add integer to atomic64 variable
7707 * @i: integer value to add
7708 * @v: pointer to type atomic64_t
7709 @@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64
7710 */
7711 static inline void atomic64_add(long i, atomic64_t *v)
7712 {
7713 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
7714 +
7715 +#ifdef CONFIG_PAX_REFCOUNT
7716 + "jno 0f\n"
7717 + LOCK_PREFIX "subq %1,%0\n"
7718 + "int $4\n0:\n"
7719 + _ASM_EXTABLE(0b, 0b)
7720 +#endif
7721 +
7722 + : "=m" (v->counter)
7723 + : "er" (i), "m" (v->counter));
7724 +}
7725 +
7726 +/**
7727 + * atomic64_add_unchecked - add integer to atomic64 variable
7728 + * @i: integer value to add
7729 + * @v: pointer to type atomic64_unchecked_t
7730 + *
7731 + * Atomically adds @i to @v.
7732 + */
7733 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7734 +{
7735 asm volatile(LOCK_PREFIX "addq %1,%0"
7736 : "=m" (v->counter)
7737 : "er" (i), "m" (v->counter));
7738 @@ -238,7 +476,15 @@ static inline void atomic64_add(long i,
7739 */
7740 static inline void atomic64_sub(long i, atomic64_t *v)
7741 {
7742 - asm volatile(LOCK_PREFIX "subq %1,%0"
7743 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7744 +
7745 +#ifdef CONFIG_PAX_REFCOUNT
7746 + "jno 0f\n"
7747 + LOCK_PREFIX "addq %1,%0\n"
7748 + "int $4\n0:\n"
7749 + _ASM_EXTABLE(0b, 0b)
7750 +#endif
7751 +
7752 : "=m" (v->counter)
7753 : "er" (i), "m" (v->counter));
7754 }
7755 @@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(
7756 {
7757 unsigned char c;
7758
7759 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7760 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
7761 +
7762 +#ifdef CONFIG_PAX_REFCOUNT
7763 + "jno 0f\n"
7764 + LOCK_PREFIX "addq %2,%0\n"
7765 + "int $4\n0:\n"
7766 + _ASM_EXTABLE(0b, 0b)
7767 +#endif
7768 +
7769 + "sete %1\n"
7770 : "=m" (v->counter), "=qm" (c)
7771 : "er" (i), "m" (v->counter) : "memory");
7772 return c;
7773 @@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(
7774 */
7775 static inline void atomic64_inc(atomic64_t *v)
7776 {
7777 + asm volatile(LOCK_PREFIX "incq %0\n"
7778 +
7779 +#ifdef CONFIG_PAX_REFCOUNT
7780 + "jno 0f\n"
7781 + LOCK_PREFIX "decq %0\n"
7782 + "int $4\n0:\n"
7783 + _ASM_EXTABLE(0b, 0b)
7784 +#endif
7785 +
7786 + : "=m" (v->counter)
7787 + : "m" (v->counter));
7788 +}
7789 +
7790 +/**
7791 + * atomic64_inc_unchecked - increment atomic64 variable
7792 + * @v: pointer to type atomic64_unchecked_t
7793 + *
7794 + * Atomically increments @v by 1.
7795 + */
7796 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7797 +{
7798 asm volatile(LOCK_PREFIX "incq %0"
7799 : "=m" (v->counter)
7800 : "m" (v->counter));
7801 @@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64
7802 */
7803 static inline void atomic64_dec(atomic64_t *v)
7804 {
7805 - asm volatile(LOCK_PREFIX "decq %0"
7806 + asm volatile(LOCK_PREFIX "decq %0\n"
7807 +
7808 +#ifdef CONFIG_PAX_REFCOUNT
7809 + "jno 0f\n"
7810 + LOCK_PREFIX "incq %0\n"
7811 + "int $4\n0:\n"
7812 + _ASM_EXTABLE(0b, 0b)
7813 +#endif
7814 +
7815 + : "=m" (v->counter)
7816 + : "m" (v->counter));
7817 +}
7818 +
7819 +/**
7820 + * atomic64_dec_unchecked - decrement atomic64 variable
7821 + * @v: pointer to type atomic64_t
7822 + *
7823 + * Atomically decrements @v by 1.
7824 + */
7825 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7826 +{
7827 + asm volatile(LOCK_PREFIX "decq %0\n"
7828 : "=m" (v->counter)
7829 : "m" (v->counter));
7830 }
7831 @@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(
7832 {
7833 unsigned char c;
7834
7835 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
7836 + asm volatile(LOCK_PREFIX "decq %0\n"
7837 +
7838 +#ifdef CONFIG_PAX_REFCOUNT
7839 + "jno 0f\n"
7840 + LOCK_PREFIX "incq %0\n"
7841 + "int $4\n0:\n"
7842 + _ASM_EXTABLE(0b, 0b)
7843 +#endif
7844 +
7845 + "sete %1\n"
7846 : "=m" (v->counter), "=qm" (c)
7847 : "m" (v->counter) : "memory");
7848 return c != 0;
7849 @@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(
7850 {
7851 unsigned char c;
7852
7853 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
7854 + asm volatile(LOCK_PREFIX "incq %0\n"
7855 +
7856 +#ifdef CONFIG_PAX_REFCOUNT
7857 + "jno 0f\n"
7858 + LOCK_PREFIX "decq %0\n"
7859 + "int $4\n0:\n"
7860 + _ASM_EXTABLE(0b, 0b)
7861 +#endif
7862 +
7863 + "sete %1\n"
7864 : "=m" (v->counter), "=qm" (c)
7865 : "m" (v->counter) : "memory");
7866 return c != 0;
7867 @@ -337,7 +652,16 @@ static inline int atomic64_add_negative(
7868 {
7869 unsigned char c;
7870
7871 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7872 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
7873 +
7874 +#ifdef CONFIG_PAX_REFCOUNT
7875 + "jno 0f\n"
7876 + LOCK_PREFIX "subq %2,%0\n"
7877 + "int $4\n0:\n"
7878 + _ASM_EXTABLE(0b, 0b)
7879 +#endif
7880 +
7881 + "sets %1\n"
7882 : "=m" (v->counter), "=qm" (c)
7883 : "er" (i), "m" (v->counter) : "memory");
7884 return c;
7885 @@ -353,7 +677,31 @@ static inline int atomic64_add_negative(
7886 static inline long atomic64_add_return(long i, atomic64_t *v)
7887 {
7888 long __i = i;
7889 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
7890 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
7891 +
7892 +#ifdef CONFIG_PAX_REFCOUNT
7893 + "jno 0f\n"
7894 + "movq %0, %1\n"
7895 + "int $4\n0:\n"
7896 + _ASM_EXTABLE(0b, 0b)
7897 +#endif
7898 +
7899 + : "+r" (i), "+m" (v->counter)
7900 + : : "memory");
7901 + return i + __i;
7902 +}
7903 +
7904 +/**
7905 + * atomic64_add_return_unchecked - add and return
7906 + * @i: integer value to add
7907 + * @v: pointer to type atomic64_unchecked_t
7908 + *
7909 + * Atomically adds @i to @v and returns @i + @v
7910 + */
7911 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7912 +{
7913 + long __i = i;
7914 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
7915 : "+r" (i), "+m" (v->counter)
7916 : : "memory");
7917 return i + __i;
7918 @@ -365,6 +713,10 @@ static inline long atomic64_sub_return(l
7919 }
7920
7921 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7922 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7923 +{
7924 + return atomic64_add_return_unchecked(1, v);
7925 +}
7926 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7927
7928 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7929 @@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atom
7930 return cmpxchg(&v->counter, old, new);
7931 }
7932
7933 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
7934 +{
7935 + return cmpxchg(&v->counter, old, new);
7936 +}
7937 +
7938 static inline long atomic64_xchg(atomic64_t *v, long new)
7939 {
7940 return xchg(&v->counter, new);
7941 }
7942
7943 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7944 +{
7945 + return xchg(&v->counter, new);
7946 +}
7947 +
7948 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
7949 {
7950 return cmpxchg(&v->counter, old, new);
7951 }
7952
7953 +static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7954 +{
7955 + return cmpxchg(&v->counter, old, new);
7956 +}
7957 +
7958 static inline long atomic_xchg(atomic_t *v, int new)
7959 {
7960 return xchg(&v->counter, new);
7961 }
7962
7963 +static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7964 +{
7965 + return xchg(&v->counter, new);
7966 +}
7967 +
7968 /**
7969 * atomic_add_unless - add unless the number is a given value
7970 * @v: pointer of type atomic_t
7971 @@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t
7972 */
7973 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7974 {
7975 - int c, old;
7976 + int c, old, new;
7977 c = atomic_read(v);
7978 for (;;) {
7979 - if (unlikely(c == (u)))
7980 + if (unlikely(c == u))
7981 break;
7982 - old = atomic_cmpxchg((v), c, c + (a));
7983 +
7984 + asm volatile("addl %2,%0\n"
7985 +
7986 +#ifdef CONFIG_PAX_REFCOUNT
7987 + "jno 0f\n"
7988 + "subl %2,%0\n"
7989 + "int $4\n0:\n"
7990 + _ASM_EXTABLE(0b, 0b)
7991 +#endif
7992 +
7993 + : "=r" (new)
7994 + : "0" (c), "ir" (a));
7995 +
7996 + old = atomic_cmpxchg(v, c, new);
7997 if (likely(old == c))
7998 break;
7999 c = old;
8000 }
8001 - return c != (u);
8002 + return c != u;
8003 }
8004
8005 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8006 @@ -424,17 +809,30 @@ static inline int atomic_add_unless(atom
8007 */
8008 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8009 {
8010 - long c, old;
8011 + long c, old, new;
8012 c = atomic64_read(v);
8013 for (;;) {
8014 - if (unlikely(c == (u)))
8015 + if (unlikely(c == u))
8016 break;
8017 - old = atomic64_cmpxchg((v), c, c + (a));
8018 +
8019 + asm volatile("addq %2,%0\n"
8020 +
8021 +#ifdef CONFIG_PAX_REFCOUNT
8022 + "jno 0f\n"
8023 + "subq %2,%0\n"
8024 + "int $4\n0:\n"
8025 + _ASM_EXTABLE(0b, 0b)
8026 +#endif
8027 +
8028 + : "=r" (new)
8029 + : "0" (c), "er" (a));
8030 +
8031 + old = atomic64_cmpxchg(v, c, new);
8032 if (likely(old == c))
8033 break;
8034 c = old;
8035 }
8036 - return c != (u);
8037 + return c != u;
8038 }
8039
8040 /**
8041 diff -urNp linux-2.6.32.42/arch/x86/include/asm/bitops.h linux-2.6.32.42/arch/x86/include/asm/bitops.h
8042 --- linux-2.6.32.42/arch/x86/include/asm/bitops.h 2011-03-27 14:31:47.000000000 -0400
8043 +++ linux-2.6.32.42/arch/x86/include/asm/bitops.h 2011-04-17 15:56:46.000000000 -0400
8044 @@ -38,7 +38,7 @@
8045 * a mask operation on a byte.
8046 */
8047 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8048 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8049 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8050 #define CONST_MASK(nr) (1 << ((nr) & 7))
8051
8052 /**
8053 diff -urNp linux-2.6.32.42/arch/x86/include/asm/boot.h linux-2.6.32.42/arch/x86/include/asm/boot.h
8054 --- linux-2.6.32.42/arch/x86/include/asm/boot.h 2011-03-27 14:31:47.000000000 -0400
8055 +++ linux-2.6.32.42/arch/x86/include/asm/boot.h 2011-04-17 15:56:46.000000000 -0400
8056 @@ -11,10 +11,15 @@
8057 #include <asm/pgtable_types.h>
8058
8059 /* Physical address where kernel should be loaded. */
8060 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8061 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8062 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8063 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8064
8065 +#ifndef __ASSEMBLY__
8066 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
8067 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8068 +#endif
8069 +
8070 /* Minimum kernel alignment, as a power of two */
8071 #ifdef CONFIG_X86_64
8072 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8073 diff -urNp linux-2.6.32.42/arch/x86/include/asm/cacheflush.h linux-2.6.32.42/arch/x86/include/asm/cacheflush.h
8074 --- linux-2.6.32.42/arch/x86/include/asm/cacheflush.h 2011-03-27 14:31:47.000000000 -0400
8075 +++ linux-2.6.32.42/arch/x86/include/asm/cacheflush.h 2011-04-17 15:56:46.000000000 -0400
8076 @@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
8077 static inline unsigned long get_page_memtype(struct page *pg)
8078 {
8079 if (!PageUncached(pg) && !PageWC(pg))
8080 - return -1;
8081 + return ~0UL;
8082 else if (!PageUncached(pg) && PageWC(pg))
8083 return _PAGE_CACHE_WC;
8084 else if (PageUncached(pg) && !PageWC(pg))
8085 @@ -85,7 +85,7 @@ static inline void set_page_memtype(stru
8086 SetPageWC(pg);
8087 break;
8088 default:
8089 - case -1:
8090 + case ~0UL:
8091 ClearPageUncached(pg);
8092 ClearPageWC(pg);
8093 break;
8094 diff -urNp linux-2.6.32.42/arch/x86/include/asm/cache.h linux-2.6.32.42/arch/x86/include/asm/cache.h
8095 --- linux-2.6.32.42/arch/x86/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
8096 +++ linux-2.6.32.42/arch/x86/include/asm/cache.h 2011-05-04 17:56:20.000000000 -0400
8097 @@ -5,9 +5,10 @@
8098
8099 /* L1 cache line size */
8100 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8101 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8102 +#define L1_CACHE_BYTES (_AC(1,U) << L1_CACHE_SHIFT)
8103
8104 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
8105 +#define __read_only __attribute__((__section__(".data.read_only")))
8106
8107 #ifdef CONFIG_X86_VSMP
8108 /* vSMP Internode cacheline shift */
8109 diff -urNp linux-2.6.32.42/arch/x86/include/asm/checksum_32.h linux-2.6.32.42/arch/x86/include/asm/checksum_32.h
8110 --- linux-2.6.32.42/arch/x86/include/asm/checksum_32.h 2011-03-27 14:31:47.000000000 -0400
8111 +++ linux-2.6.32.42/arch/x86/include/asm/checksum_32.h 2011-04-17 15:56:46.000000000 -0400
8112 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
8113 int len, __wsum sum,
8114 int *src_err_ptr, int *dst_err_ptr);
8115
8116 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8117 + int len, __wsum sum,
8118 + int *src_err_ptr, int *dst_err_ptr);
8119 +
8120 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8121 + int len, __wsum sum,
8122 + int *src_err_ptr, int *dst_err_ptr);
8123 +
8124 /*
8125 * Note: when you get a NULL pointer exception here this means someone
8126 * passed in an incorrect kernel address to one of these functions.
8127 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
8128 int *err_ptr)
8129 {
8130 might_sleep();
8131 - return csum_partial_copy_generic((__force void *)src, dst,
8132 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
8133 len, sum, err_ptr, NULL);
8134 }
8135
8136 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
8137 {
8138 might_sleep();
8139 if (access_ok(VERIFY_WRITE, dst, len))
8140 - return csum_partial_copy_generic(src, (__force void *)dst,
8141 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8142 len, sum, NULL, err_ptr);
8143
8144 if (len)
8145 diff -urNp linux-2.6.32.42/arch/x86/include/asm/desc_defs.h linux-2.6.32.42/arch/x86/include/asm/desc_defs.h
8146 --- linux-2.6.32.42/arch/x86/include/asm/desc_defs.h 2011-03-27 14:31:47.000000000 -0400
8147 +++ linux-2.6.32.42/arch/x86/include/asm/desc_defs.h 2011-04-17 15:56:46.000000000 -0400
8148 @@ -31,6 +31,12 @@ struct desc_struct {
8149 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8150 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8151 };
8152 + struct {
8153 + u16 offset_low;
8154 + u16 seg;
8155 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8156 + unsigned offset_high: 16;
8157 + } gate;
8158 };
8159 } __attribute__((packed));
8160
8161 diff -urNp linux-2.6.32.42/arch/x86/include/asm/desc.h linux-2.6.32.42/arch/x86/include/asm/desc.h
8162 --- linux-2.6.32.42/arch/x86/include/asm/desc.h 2011-03-27 14:31:47.000000000 -0400
8163 +++ linux-2.6.32.42/arch/x86/include/asm/desc.h 2011-04-23 12:56:10.000000000 -0400
8164 @@ -4,6 +4,7 @@
8165 #include <asm/desc_defs.h>
8166 #include <asm/ldt.h>
8167 #include <asm/mmu.h>
8168 +#include <asm/pgtable.h>
8169 #include <linux/smp.h>
8170
8171 static inline void fill_ldt(struct desc_struct *desc,
8172 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
8173 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
8174 desc->type = (info->read_exec_only ^ 1) << 1;
8175 desc->type |= info->contents << 2;
8176 + desc->type |= info->seg_not_present ^ 1;
8177 desc->s = 1;
8178 desc->dpl = 0x3;
8179 desc->p = info->seg_not_present ^ 1;
8180 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
8181 }
8182
8183 extern struct desc_ptr idt_descr;
8184 -extern gate_desc idt_table[];
8185 -
8186 -struct gdt_page {
8187 - struct desc_struct gdt[GDT_ENTRIES];
8188 -} __attribute__((aligned(PAGE_SIZE)));
8189 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8190 +extern gate_desc idt_table[256];
8191
8192 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8193 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8194 {
8195 - return per_cpu(gdt_page, cpu).gdt;
8196 + return cpu_gdt_table[cpu];
8197 }
8198
8199 #ifdef CONFIG_X86_64
8200 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
8201 unsigned long base, unsigned dpl, unsigned flags,
8202 unsigned short seg)
8203 {
8204 - gate->a = (seg << 16) | (base & 0xffff);
8205 - gate->b = (base & 0xffff0000) |
8206 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8207 + gate->gate.offset_low = base;
8208 + gate->gate.seg = seg;
8209 + gate->gate.reserved = 0;
8210 + gate->gate.type = type;
8211 + gate->gate.s = 0;
8212 + gate->gate.dpl = dpl;
8213 + gate->gate.p = 1;
8214 + gate->gate.offset_high = base >> 16;
8215 }
8216
8217 #endif
8218 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
8219 static inline void native_write_idt_entry(gate_desc *idt, int entry,
8220 const gate_desc *gate)
8221 {
8222 + pax_open_kernel();
8223 memcpy(&idt[entry], gate, sizeof(*gate));
8224 + pax_close_kernel();
8225 }
8226
8227 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
8228 const void *desc)
8229 {
8230 + pax_open_kernel();
8231 memcpy(&ldt[entry], desc, 8);
8232 + pax_close_kernel();
8233 }
8234
8235 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
8236 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
8237 size = sizeof(struct desc_struct);
8238 break;
8239 }
8240 +
8241 + pax_open_kernel();
8242 memcpy(&gdt[entry], desc, size);
8243 + pax_close_kernel();
8244 }
8245
8246 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8247 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const
8248
8249 static inline void native_load_tr_desc(void)
8250 {
8251 + pax_open_kernel();
8252 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8253 + pax_close_kernel();
8254 }
8255
8256 static inline void native_load_gdt(const struct desc_ptr *dtr)
8257 @@ -246,8 +258,10 @@ static inline void native_load_tls(struc
8258 unsigned int i;
8259 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8260
8261 + pax_open_kernel();
8262 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8263 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8264 + pax_close_kernel();
8265 }
8266
8267 #define _LDT_empty(info) \
8268 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
8269 desc->limit = (limit >> 16) & 0xf;
8270 }
8271
8272 -static inline void _set_gate(int gate, unsigned type, void *addr,
8273 +static inline void _set_gate(int gate, unsigned type, const void *addr,
8274 unsigned dpl, unsigned ist, unsigned seg)
8275 {
8276 gate_desc s;
8277 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
8278 * Pentium F0 0F bugfix can have resulted in the mapped
8279 * IDT being write-protected.
8280 */
8281 -static inline void set_intr_gate(unsigned int n, void *addr)
8282 +static inline void set_intr_gate(unsigned int n, const void *addr)
8283 {
8284 BUG_ON((unsigned)n > 0xFF);
8285 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8286 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
8287 /*
8288 * This routine sets up an interrupt gate at directory privilege level 3.
8289 */
8290 -static inline void set_system_intr_gate(unsigned int n, void *addr)
8291 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
8292 {
8293 BUG_ON((unsigned)n > 0xFF);
8294 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8295 }
8296
8297 -static inline void set_system_trap_gate(unsigned int n, void *addr)
8298 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
8299 {
8300 BUG_ON((unsigned)n > 0xFF);
8301 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8302 }
8303
8304 -static inline void set_trap_gate(unsigned int n, void *addr)
8305 +static inline void set_trap_gate(unsigned int n, const void *addr)
8306 {
8307 BUG_ON((unsigned)n > 0xFF);
8308 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8309 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
8310 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8311 {
8312 BUG_ON((unsigned)n > 0xFF);
8313 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8314 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8315 }
8316
8317 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8318 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8319 {
8320 BUG_ON((unsigned)n > 0xFF);
8321 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8322 }
8323
8324 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8325 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8326 {
8327 BUG_ON((unsigned)n > 0xFF);
8328 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8329 }
8330
8331 +#ifdef CONFIG_X86_32
8332 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8333 +{
8334 + struct desc_struct d;
8335 +
8336 + if (likely(limit))
8337 + limit = (limit - 1UL) >> PAGE_SHIFT;
8338 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
8339 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8340 +}
8341 +#endif
8342 +
8343 #endif /* _ASM_X86_DESC_H */
8344 diff -urNp linux-2.6.32.42/arch/x86/include/asm/device.h linux-2.6.32.42/arch/x86/include/asm/device.h
8345 --- linux-2.6.32.42/arch/x86/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
8346 +++ linux-2.6.32.42/arch/x86/include/asm/device.h 2011-04-17 15:56:46.000000000 -0400
8347 @@ -6,7 +6,7 @@ struct dev_archdata {
8348 void *acpi_handle;
8349 #endif
8350 #ifdef CONFIG_X86_64
8351 -struct dma_map_ops *dma_ops;
8352 + const struct dma_map_ops *dma_ops;
8353 #endif
8354 #ifdef CONFIG_DMAR
8355 void *iommu; /* hook for IOMMU specific extension */
8356 diff -urNp linux-2.6.32.42/arch/x86/include/asm/dma-mapping.h linux-2.6.32.42/arch/x86/include/asm/dma-mapping.h
8357 --- linux-2.6.32.42/arch/x86/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
8358 +++ linux-2.6.32.42/arch/x86/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
8359 @@ -25,9 +25,9 @@ extern int iommu_merge;
8360 extern struct device x86_dma_fallback_dev;
8361 extern int panic_on_overflow;
8362
8363 -extern struct dma_map_ops *dma_ops;
8364 +extern const struct dma_map_ops *dma_ops;
8365
8366 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8367 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
8368 {
8369 #ifdef CONFIG_X86_32
8370 return dma_ops;
8371 @@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dm
8372 /* Make sure we keep the same behaviour */
8373 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
8374 {
8375 - struct dma_map_ops *ops = get_dma_ops(dev);
8376 + const struct dma_map_ops *ops = get_dma_ops(dev);
8377 if (ops->mapping_error)
8378 return ops->mapping_error(dev, dma_addr);
8379
8380 @@ -122,7 +122,7 @@ static inline void *
8381 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8382 gfp_t gfp)
8383 {
8384 - struct dma_map_ops *ops = get_dma_ops(dev);
8385 + const struct dma_map_ops *ops = get_dma_ops(dev);
8386 void *memory;
8387
8388 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
8389 @@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, s
8390 static inline void dma_free_coherent(struct device *dev, size_t size,
8391 void *vaddr, dma_addr_t bus)
8392 {
8393 - struct dma_map_ops *ops = get_dma_ops(dev);
8394 + const struct dma_map_ops *ops = get_dma_ops(dev);
8395
8396 WARN_ON(irqs_disabled()); /* for portability */
8397
8398 diff -urNp linux-2.6.32.42/arch/x86/include/asm/e820.h linux-2.6.32.42/arch/x86/include/asm/e820.h
8399 --- linux-2.6.32.42/arch/x86/include/asm/e820.h 2011-03-27 14:31:47.000000000 -0400
8400 +++ linux-2.6.32.42/arch/x86/include/asm/e820.h 2011-04-17 15:56:46.000000000 -0400
8401 @@ -133,7 +133,7 @@ extern char *default_machine_specific_me
8402 #define ISA_END_ADDRESS 0x100000
8403 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
8404
8405 -#define BIOS_BEGIN 0x000a0000
8406 +#define BIOS_BEGIN 0x000c0000
8407 #define BIOS_END 0x00100000
8408
8409 #ifdef __KERNEL__
8410 diff -urNp linux-2.6.32.42/arch/x86/include/asm/elf.h linux-2.6.32.42/arch/x86/include/asm/elf.h
8411 --- linux-2.6.32.42/arch/x86/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
8412 +++ linux-2.6.32.42/arch/x86/include/asm/elf.h 2011-04-17 15:56:46.000000000 -0400
8413 @@ -257,7 +257,25 @@ extern int force_personality32;
8414 the loader. We need to make sure that it is out of the way of the program
8415 that it will "exec", and that there is sufficient room for the brk. */
8416
8417 +#ifdef CONFIG_PAX_SEGMEXEC
8418 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8419 +#else
8420 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8421 +#endif
8422 +
8423 +#ifdef CONFIG_PAX_ASLR
8424 +#ifdef CONFIG_X86_32
8425 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8426 +
8427 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8428 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8429 +#else
8430 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
8431 +
8432 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8433 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8434 +#endif
8435 +#endif
8436
8437 /* This yields a mask that user programs can use to figure out what
8438 instruction set this CPU supports. This could be done in user space,
8439 @@ -311,8 +329,7 @@ do { \
8440 #define ARCH_DLINFO \
8441 do { \
8442 if (vdso_enabled) \
8443 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8444 - (unsigned long)current->mm->context.vdso); \
8445 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
8446 } while (0)
8447
8448 #define AT_SYSINFO 32
8449 @@ -323,7 +340,7 @@ do { \
8450
8451 #endif /* !CONFIG_X86_32 */
8452
8453 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8454 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8455
8456 #define VDSO_ENTRY \
8457 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8458 @@ -337,7 +354,4 @@ extern int arch_setup_additional_pages(s
8459 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8460 #define compat_arch_setup_additional_pages syscall32_setup_pages
8461
8462 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8463 -#define arch_randomize_brk arch_randomize_brk
8464 -
8465 #endif /* _ASM_X86_ELF_H */
8466 diff -urNp linux-2.6.32.42/arch/x86/include/asm/emergency-restart.h linux-2.6.32.42/arch/x86/include/asm/emergency-restart.h
8467 --- linux-2.6.32.42/arch/x86/include/asm/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
8468 +++ linux-2.6.32.42/arch/x86/include/asm/emergency-restart.h 2011-05-22 23:02:06.000000000 -0400
8469 @@ -15,6 +15,6 @@ enum reboot_type {
8470
8471 extern enum reboot_type reboot_type;
8472
8473 -extern void machine_emergency_restart(void);
8474 +extern void machine_emergency_restart(void) __noreturn;
8475
8476 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8477 diff -urNp linux-2.6.32.42/arch/x86/include/asm/futex.h linux-2.6.32.42/arch/x86/include/asm/futex.h
8478 --- linux-2.6.32.42/arch/x86/include/asm/futex.h 2011-03-27 14:31:47.000000000 -0400
8479 +++ linux-2.6.32.42/arch/x86/include/asm/futex.h 2011-04-17 15:56:46.000000000 -0400
8480 @@ -12,16 +12,18 @@
8481 #include <asm/system.h>
8482
8483 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8484 + typecheck(u32 *, uaddr); \
8485 asm volatile("1:\t" insn "\n" \
8486 "2:\t.section .fixup,\"ax\"\n" \
8487 "3:\tmov\t%3, %1\n" \
8488 "\tjmp\t2b\n" \
8489 "\t.previous\n" \
8490 _ASM_EXTABLE(1b, 3b) \
8491 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8492 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
8493 : "i" (-EFAULT), "0" (oparg), "1" (0))
8494
8495 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8496 + typecheck(u32 *, uaddr); \
8497 asm volatile("1:\tmovl %2, %0\n" \
8498 "\tmovl\t%0, %3\n" \
8499 "\t" insn "\n" \
8500 @@ -34,10 +36,10 @@
8501 _ASM_EXTABLE(1b, 4b) \
8502 _ASM_EXTABLE(2b, 4b) \
8503 : "=&a" (oldval), "=&r" (ret), \
8504 - "+m" (*uaddr), "=&r" (tem) \
8505 + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
8506 : "r" (oparg), "i" (-EFAULT), "1" (0))
8507
8508 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
8509 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8510 {
8511 int op = (encoded_op >> 28) & 7;
8512 int cmp = (encoded_op >> 24) & 15;
8513 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
8514
8515 switch (op) {
8516 case FUTEX_OP_SET:
8517 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8518 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8519 break;
8520 case FUTEX_OP_ADD:
8521 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8522 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8523 uaddr, oparg);
8524 break;
8525 case FUTEX_OP_OR:
8526 @@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
8527 return ret;
8528 }
8529
8530 -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
8531 +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
8532 int newval)
8533 {
8534
8535 @@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
8536 return -ENOSYS;
8537 #endif
8538
8539 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
8540 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8541 return -EFAULT;
8542
8543 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
8544 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
8545 "2:\t.section .fixup, \"ax\"\n"
8546 "3:\tmov %2, %0\n"
8547 "\tjmp 2b\n"
8548 "\t.previous\n"
8549 _ASM_EXTABLE(1b, 3b)
8550 - : "=a" (oldval), "+m" (*uaddr)
8551 + : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
8552 : "i" (-EFAULT), "r" (newval), "0" (oldval)
8553 : "memory"
8554 );
8555 diff -urNp linux-2.6.32.42/arch/x86/include/asm/hw_irq.h linux-2.6.32.42/arch/x86/include/asm/hw_irq.h
8556 --- linux-2.6.32.42/arch/x86/include/asm/hw_irq.h 2011-03-27 14:31:47.000000000 -0400
8557 +++ linux-2.6.32.42/arch/x86/include/asm/hw_irq.h 2011-05-04 17:56:28.000000000 -0400
8558 @@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
8559 extern void enable_IO_APIC(void);
8560
8561 /* Statistics */
8562 -extern atomic_t irq_err_count;
8563 -extern atomic_t irq_mis_count;
8564 +extern atomic_unchecked_t irq_err_count;
8565 +extern atomic_unchecked_t irq_mis_count;
8566
8567 /* EISA */
8568 extern void eisa_set_level_irq(unsigned int irq);
8569 diff -urNp linux-2.6.32.42/arch/x86/include/asm/i387.h linux-2.6.32.42/arch/x86/include/asm/i387.h
8570 --- linux-2.6.32.42/arch/x86/include/asm/i387.h 2011-03-27 14:31:47.000000000 -0400
8571 +++ linux-2.6.32.42/arch/x86/include/asm/i387.h 2011-04-17 15:56:46.000000000 -0400
8572 @@ -60,6 +60,11 @@ static inline int fxrstor_checking(struc
8573 {
8574 int err;
8575
8576 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8577 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8578 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
8579 +#endif
8580 +
8581 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
8582 "2:\n"
8583 ".section .fixup,\"ax\"\n"
8584 @@ -105,6 +110,11 @@ static inline int fxsave_user(struct i38
8585 {
8586 int err;
8587
8588 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8589 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8590 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8591 +#endif
8592 +
8593 asm volatile("1: rex64/fxsave (%[fx])\n\t"
8594 "2:\n"
8595 ".section .fixup,\"ax\"\n"
8596 @@ -195,13 +205,8 @@ static inline int fxrstor_checking(struc
8597 }
8598
8599 /* We need a safe address that is cheap to find and that is already
8600 - in L1 during context switch. The best choices are unfortunately
8601 - different for UP and SMP */
8602 -#ifdef CONFIG_SMP
8603 -#define safe_address (__per_cpu_offset[0])
8604 -#else
8605 -#define safe_address (kstat_cpu(0).cpustat.user)
8606 -#endif
8607 + in L1 during context switch. */
8608 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8609
8610 /*
8611 * These must be called with preempt disabled
8612 @@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void
8613 struct thread_info *me = current_thread_info();
8614 preempt_disable();
8615 if (me->status & TS_USEDFPU)
8616 - __save_init_fpu(me->task);
8617 + __save_init_fpu(current);
8618 else
8619 clts();
8620 }
8621 diff -urNp linux-2.6.32.42/arch/x86/include/asm/io_32.h linux-2.6.32.42/arch/x86/include/asm/io_32.h
8622 --- linux-2.6.32.42/arch/x86/include/asm/io_32.h 2011-03-27 14:31:47.000000000 -0400
8623 +++ linux-2.6.32.42/arch/x86/include/asm/io_32.h 2011-04-17 15:56:46.000000000 -0400
8624 @@ -3,6 +3,7 @@
8625
8626 #include <linux/string.h>
8627 #include <linux/compiler.h>
8628 +#include <asm/processor.h>
8629
8630 /*
8631 * This file contains the definitions for the x86 IO instructions
8632 @@ -42,6 +43,17 @@
8633
8634 #ifdef __KERNEL__
8635
8636 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8637 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8638 +{
8639 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8640 +}
8641 +
8642 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8643 +{
8644 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8645 +}
8646 +
8647 #include <asm-generic/iomap.h>
8648
8649 #include <linux/vmalloc.h>
8650 diff -urNp linux-2.6.32.42/arch/x86/include/asm/io_64.h linux-2.6.32.42/arch/x86/include/asm/io_64.h
8651 --- linux-2.6.32.42/arch/x86/include/asm/io_64.h 2011-03-27 14:31:47.000000000 -0400
8652 +++ linux-2.6.32.42/arch/x86/include/asm/io_64.h 2011-04-17 15:56:46.000000000 -0400
8653 @@ -140,6 +140,17 @@ __OUTS(l)
8654
8655 #include <linux/vmalloc.h>
8656
8657 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8658 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8659 +{
8660 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8661 +}
8662 +
8663 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8664 +{
8665 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8666 +}
8667 +
8668 #include <asm-generic/iomap.h>
8669
8670 void __memcpy_fromio(void *, unsigned long, unsigned);
8671 diff -urNp linux-2.6.32.42/arch/x86/include/asm/iommu.h linux-2.6.32.42/arch/x86/include/asm/iommu.h
8672 --- linux-2.6.32.42/arch/x86/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
8673 +++ linux-2.6.32.42/arch/x86/include/asm/iommu.h 2011-04-17 15:56:46.000000000 -0400
8674 @@ -3,7 +3,7 @@
8675
8676 extern void pci_iommu_shutdown(void);
8677 extern void no_iommu_init(void);
8678 -extern struct dma_map_ops nommu_dma_ops;
8679 +extern const struct dma_map_ops nommu_dma_ops;
8680 extern int force_iommu, no_iommu;
8681 extern int iommu_detected;
8682 extern int iommu_pass_through;
8683 diff -urNp linux-2.6.32.42/arch/x86/include/asm/irqflags.h linux-2.6.32.42/arch/x86/include/asm/irqflags.h
8684 --- linux-2.6.32.42/arch/x86/include/asm/irqflags.h 2011-03-27 14:31:47.000000000 -0400
8685 +++ linux-2.6.32.42/arch/x86/include/asm/irqflags.h 2011-04-17 15:56:46.000000000 -0400
8686 @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
8687 sti; \
8688 sysexit
8689
8690 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
8691 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8692 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
8693 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8694 +
8695 #else
8696 #define INTERRUPT_RETURN iret
8697 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8698 diff -urNp linux-2.6.32.42/arch/x86/include/asm/kprobes.h linux-2.6.32.42/arch/x86/include/asm/kprobes.h
8699 --- linux-2.6.32.42/arch/x86/include/asm/kprobes.h 2011-03-27 14:31:47.000000000 -0400
8700 +++ linux-2.6.32.42/arch/x86/include/asm/kprobes.h 2011-04-23 12:56:12.000000000 -0400
8701 @@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
8702 #define BREAKPOINT_INSTRUCTION 0xcc
8703 #define RELATIVEJUMP_INSTRUCTION 0xe9
8704 #define MAX_INSN_SIZE 16
8705 -#define MAX_STACK_SIZE 64
8706 -#define MIN_STACK_SIZE(ADDR) \
8707 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8708 - THREAD_SIZE - (unsigned long)(ADDR))) \
8709 - ? (MAX_STACK_SIZE) \
8710 - : (((unsigned long)current_thread_info()) + \
8711 - THREAD_SIZE - (unsigned long)(ADDR)))
8712 +#define MAX_STACK_SIZE 64UL
8713 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8714
8715 #define flush_insn_slot(p) do { } while (0)
8716
8717 diff -urNp linux-2.6.32.42/arch/x86/include/asm/kvm_host.h linux-2.6.32.42/arch/x86/include/asm/kvm_host.h
8718 --- linux-2.6.32.42/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:01.000000000 -0400
8719 +++ linux-2.6.32.42/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:26.000000000 -0400
8720 @@ -536,7 +536,7 @@ struct kvm_x86_ops {
8721 const struct trace_print_flags *exit_reasons_str;
8722 };
8723
8724 -extern struct kvm_x86_ops *kvm_x86_ops;
8725 +extern const struct kvm_x86_ops *kvm_x86_ops;
8726
8727 int kvm_mmu_module_init(void);
8728 void kvm_mmu_module_exit(void);
8729 diff -urNp linux-2.6.32.42/arch/x86/include/asm/local.h linux-2.6.32.42/arch/x86/include/asm/local.h
8730 --- linux-2.6.32.42/arch/x86/include/asm/local.h 2011-03-27 14:31:47.000000000 -0400
8731 +++ linux-2.6.32.42/arch/x86/include/asm/local.h 2011-04-17 15:56:46.000000000 -0400
8732 @@ -18,26 +18,58 @@ typedef struct {
8733
8734 static inline void local_inc(local_t *l)
8735 {
8736 - asm volatile(_ASM_INC "%0"
8737 + asm volatile(_ASM_INC "%0\n"
8738 +
8739 +#ifdef CONFIG_PAX_REFCOUNT
8740 + "jno 0f\n"
8741 + _ASM_DEC "%0\n"
8742 + "int $4\n0:\n"
8743 + _ASM_EXTABLE(0b, 0b)
8744 +#endif
8745 +
8746 : "+m" (l->a.counter));
8747 }
8748
8749 static inline void local_dec(local_t *l)
8750 {
8751 - asm volatile(_ASM_DEC "%0"
8752 + asm volatile(_ASM_DEC "%0\n"
8753 +
8754 +#ifdef CONFIG_PAX_REFCOUNT
8755 + "jno 0f\n"
8756 + _ASM_INC "%0\n"
8757 + "int $4\n0:\n"
8758 + _ASM_EXTABLE(0b, 0b)
8759 +#endif
8760 +
8761 : "+m" (l->a.counter));
8762 }
8763
8764 static inline void local_add(long i, local_t *l)
8765 {
8766 - asm volatile(_ASM_ADD "%1,%0"
8767 + asm volatile(_ASM_ADD "%1,%0\n"
8768 +
8769 +#ifdef CONFIG_PAX_REFCOUNT
8770 + "jno 0f\n"
8771 + _ASM_SUB "%1,%0\n"
8772 + "int $4\n0:\n"
8773 + _ASM_EXTABLE(0b, 0b)
8774 +#endif
8775 +
8776 : "+m" (l->a.counter)
8777 : "ir" (i));
8778 }
8779
8780 static inline void local_sub(long i, local_t *l)
8781 {
8782 - asm volatile(_ASM_SUB "%1,%0"
8783 + asm volatile(_ASM_SUB "%1,%0\n"
8784 +
8785 +#ifdef CONFIG_PAX_REFCOUNT
8786 + "jno 0f\n"
8787 + _ASM_ADD "%1,%0\n"
8788 + "int $4\n0:\n"
8789 + _ASM_EXTABLE(0b, 0b)
8790 +#endif
8791 +
8792 : "+m" (l->a.counter)
8793 : "ir" (i));
8794 }
8795 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
8796 {
8797 unsigned char c;
8798
8799 - asm volatile(_ASM_SUB "%2,%0; sete %1"
8800 + asm volatile(_ASM_SUB "%2,%0\n"
8801 +
8802 +#ifdef CONFIG_PAX_REFCOUNT
8803 + "jno 0f\n"
8804 + _ASM_ADD "%2,%0\n"
8805 + "int $4\n0:\n"
8806 + _ASM_EXTABLE(0b, 0b)
8807 +#endif
8808 +
8809 + "sete %1\n"
8810 : "+m" (l->a.counter), "=qm" (c)
8811 : "ir" (i) : "memory");
8812 return c;
8813 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
8814 {
8815 unsigned char c;
8816
8817 - asm volatile(_ASM_DEC "%0; sete %1"
8818 + asm volatile(_ASM_DEC "%0\n"
8819 +
8820 +#ifdef CONFIG_PAX_REFCOUNT
8821 + "jno 0f\n"
8822 + _ASM_INC "%0\n"
8823 + "int $4\n0:\n"
8824 + _ASM_EXTABLE(0b, 0b)
8825 +#endif
8826 +
8827 + "sete %1\n"
8828 : "+m" (l->a.counter), "=qm" (c)
8829 : : "memory");
8830 return c != 0;
8831 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
8832 {
8833 unsigned char c;
8834
8835 - asm volatile(_ASM_INC "%0; sete %1"
8836 + asm volatile(_ASM_INC "%0\n"
8837 +
8838 +#ifdef CONFIG_PAX_REFCOUNT
8839 + "jno 0f\n"
8840 + _ASM_DEC "%0\n"
8841 + "int $4\n0:\n"
8842 + _ASM_EXTABLE(0b, 0b)
8843 +#endif
8844 +
8845 + "sete %1\n"
8846 : "+m" (l->a.counter), "=qm" (c)
8847 : : "memory");
8848 return c != 0;
8849 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
8850 {
8851 unsigned char c;
8852
8853 - asm volatile(_ASM_ADD "%2,%0; sets %1"
8854 + asm volatile(_ASM_ADD "%2,%0\n"
8855 +
8856 +#ifdef CONFIG_PAX_REFCOUNT
8857 + "jno 0f\n"
8858 + _ASM_SUB "%2,%0\n"
8859 + "int $4\n0:\n"
8860 + _ASM_EXTABLE(0b, 0b)
8861 +#endif
8862 +
8863 + "sets %1\n"
8864 : "+m" (l->a.counter), "=qm" (c)
8865 : "ir" (i) : "memory");
8866 return c;
8867 @@ -133,7 +201,15 @@ static inline long local_add_return(long
8868 #endif
8869 /* Modern 486+ processor */
8870 __i = i;
8871 - asm volatile(_ASM_XADD "%0, %1;"
8872 + asm volatile(_ASM_XADD "%0, %1\n"
8873 +
8874 +#ifdef CONFIG_PAX_REFCOUNT
8875 + "jno 0f\n"
8876 + _ASM_MOV "%0,%1\n"
8877 + "int $4\n0:\n"
8878 + _ASM_EXTABLE(0b, 0b)
8879 +#endif
8880 +
8881 : "+r" (i), "+m" (l->a.counter)
8882 : : "memory");
8883 return i + __i;
8884 diff -urNp linux-2.6.32.42/arch/x86/include/asm/microcode.h linux-2.6.32.42/arch/x86/include/asm/microcode.h
8885 --- linux-2.6.32.42/arch/x86/include/asm/microcode.h 2011-03-27 14:31:47.000000000 -0400
8886 +++ linux-2.6.32.42/arch/x86/include/asm/microcode.h 2011-04-17 15:56:46.000000000 -0400
8887 @@ -12,13 +12,13 @@ struct device;
8888 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
8889
8890 struct microcode_ops {
8891 - enum ucode_state (*request_microcode_user) (int cpu,
8892 + enum ucode_state (* const request_microcode_user) (int cpu,
8893 const void __user *buf, size_t size);
8894
8895 - enum ucode_state (*request_microcode_fw) (int cpu,
8896 + enum ucode_state (* const request_microcode_fw) (int cpu,
8897 struct device *device);
8898
8899 - void (*microcode_fini_cpu) (int cpu);
8900 + void (* const microcode_fini_cpu) (int cpu);
8901
8902 /*
8903 * The generic 'microcode_core' part guarantees that
8904 @@ -38,18 +38,18 @@ struct ucode_cpu_info {
8905 extern struct ucode_cpu_info ucode_cpu_info[];
8906
8907 #ifdef CONFIG_MICROCODE_INTEL
8908 -extern struct microcode_ops * __init init_intel_microcode(void);
8909 +extern const struct microcode_ops * __init init_intel_microcode(void);
8910 #else
8911 -static inline struct microcode_ops * __init init_intel_microcode(void)
8912 +static inline const struct microcode_ops * __init init_intel_microcode(void)
8913 {
8914 return NULL;
8915 }
8916 #endif /* CONFIG_MICROCODE_INTEL */
8917
8918 #ifdef CONFIG_MICROCODE_AMD
8919 -extern struct microcode_ops * __init init_amd_microcode(void);
8920 +extern const struct microcode_ops * __init init_amd_microcode(void);
8921 #else
8922 -static inline struct microcode_ops * __init init_amd_microcode(void)
8923 +static inline const struct microcode_ops * __init init_amd_microcode(void)
8924 {
8925 return NULL;
8926 }
8927 diff -urNp linux-2.6.32.42/arch/x86/include/asm/mman.h linux-2.6.32.42/arch/x86/include/asm/mman.h
8928 --- linux-2.6.32.42/arch/x86/include/asm/mman.h 2011-03-27 14:31:47.000000000 -0400
8929 +++ linux-2.6.32.42/arch/x86/include/asm/mman.h 2011-04-17 15:56:46.000000000 -0400
8930 @@ -5,4 +5,14 @@
8931
8932 #include <asm-generic/mman.h>
8933
8934 +#ifdef __KERNEL__
8935 +#ifndef __ASSEMBLY__
8936 +#ifdef CONFIG_X86_32
8937 +#define arch_mmap_check i386_mmap_check
8938 +int i386_mmap_check(unsigned long addr, unsigned long len,
8939 + unsigned long flags);
8940 +#endif
8941 +#endif
8942 +#endif
8943 +
8944 #endif /* _ASM_X86_MMAN_H */
8945 diff -urNp linux-2.6.32.42/arch/x86/include/asm/mmu_context.h linux-2.6.32.42/arch/x86/include/asm/mmu_context.h
8946 --- linux-2.6.32.42/arch/x86/include/asm/mmu_context.h 2011-03-27 14:31:47.000000000 -0400
8947 +++ linux-2.6.32.42/arch/x86/include/asm/mmu_context.h 2011-04-17 15:56:46.000000000 -0400
8948 @@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
8949
8950 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8951 {
8952 +
8953 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8954 + unsigned int i;
8955 + pgd_t *pgd;
8956 +
8957 + pax_open_kernel();
8958 + pgd = get_cpu_pgd(smp_processor_id());
8959 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8960 + if (paravirt_enabled())
8961 + set_pgd(pgd+i, native_make_pgd(0));
8962 + else
8963 + pgd[i] = native_make_pgd(0);
8964 + pax_close_kernel();
8965 +#endif
8966 +
8967 #ifdef CONFIG_SMP
8968 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8969 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8970 @@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
8971 struct task_struct *tsk)
8972 {
8973 unsigned cpu = smp_processor_id();
8974 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
8975 + int tlbstate = TLBSTATE_OK;
8976 +#endif
8977
8978 if (likely(prev != next)) {
8979 #ifdef CONFIG_SMP
8980 +#ifdef CONFIG_X86_32
8981 + tlbstate = percpu_read(cpu_tlbstate.state);
8982 +#endif
8983 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8984 percpu_write(cpu_tlbstate.active_mm, next);
8985 #endif
8986 cpumask_set_cpu(cpu, mm_cpumask(next));
8987
8988 /* Re-load page tables */
8989 +#ifdef CONFIG_PAX_PER_CPU_PGD
8990 + pax_open_kernel();
8991 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8992 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8993 + pax_close_kernel();
8994 + load_cr3(get_cpu_pgd(cpu));
8995 +#else
8996 load_cr3(next->pgd);
8997 +#endif
8998
8999 /* stop flush ipis for the previous mm */
9000 cpumask_clear_cpu(cpu, mm_cpumask(prev));
9001 @@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
9002 */
9003 if (unlikely(prev->context.ldt != next->context.ldt))
9004 load_LDT_nolock(&next->context);
9005 - }
9006 +
9007 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9008 + if (!nx_enabled) {
9009 + smp_mb__before_clear_bit();
9010 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
9011 + smp_mb__after_clear_bit();
9012 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9013 + }
9014 +#endif
9015 +
9016 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9017 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
9018 + prev->context.user_cs_limit != next->context.user_cs_limit))
9019 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9020 #ifdef CONFIG_SMP
9021 + else if (unlikely(tlbstate != TLBSTATE_OK))
9022 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9023 +#endif
9024 +#endif
9025 +
9026 + }
9027 else {
9028 +
9029 +#ifdef CONFIG_PAX_PER_CPU_PGD
9030 + pax_open_kernel();
9031 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9032 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9033 + pax_close_kernel();
9034 + load_cr3(get_cpu_pgd(cpu));
9035 +#endif
9036 +
9037 +#ifdef CONFIG_SMP
9038 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9039 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9040
9041 @@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
9042 * tlb flush IPI delivery. We must reload CR3
9043 * to make sure to use no freed page tables.
9044 */
9045 +
9046 +#ifndef CONFIG_PAX_PER_CPU_PGD
9047 load_cr3(next->pgd);
9048 +#endif
9049 +
9050 load_LDT_nolock(&next->context);
9051 +
9052 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9053 + if (!nx_enabled)
9054 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9055 +#endif
9056 +
9057 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9058 +#ifdef CONFIG_PAX_PAGEEXEC
9059 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
9060 +#endif
9061 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9062 +#endif
9063 +
9064 }
9065 - }
9066 #endif
9067 + }
9068 }
9069
9070 #define activate_mm(prev, next) \
9071 diff -urNp linux-2.6.32.42/arch/x86/include/asm/mmu.h linux-2.6.32.42/arch/x86/include/asm/mmu.h
9072 --- linux-2.6.32.42/arch/x86/include/asm/mmu.h 2011-03-27 14:31:47.000000000 -0400
9073 +++ linux-2.6.32.42/arch/x86/include/asm/mmu.h 2011-04-17 15:56:46.000000000 -0400
9074 @@ -9,10 +9,23 @@
9075 * we put the segment information here.
9076 */
9077 typedef struct {
9078 - void *ldt;
9079 + struct desc_struct *ldt;
9080 int size;
9081 struct mutex lock;
9082 - void *vdso;
9083 + unsigned long vdso;
9084 +
9085 +#ifdef CONFIG_X86_32
9086 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
9087 + unsigned long user_cs_base;
9088 + unsigned long user_cs_limit;
9089 +
9090 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9091 + cpumask_t cpu_user_cs_mask;
9092 +#endif
9093 +
9094 +#endif
9095 +#endif
9096 +
9097 } mm_context_t;
9098
9099 #ifdef CONFIG_SMP
9100 diff -urNp linux-2.6.32.42/arch/x86/include/asm/module.h linux-2.6.32.42/arch/x86/include/asm/module.h
9101 --- linux-2.6.32.42/arch/x86/include/asm/module.h 2011-03-27 14:31:47.000000000 -0400
9102 +++ linux-2.6.32.42/arch/x86/include/asm/module.h 2011-04-23 13:18:57.000000000 -0400
9103 @@ -5,6 +5,7 @@
9104
9105 #ifdef CONFIG_X86_64
9106 /* X86_64 does not define MODULE_PROC_FAMILY */
9107 +#define MODULE_PROC_FAMILY ""
9108 #elif defined CONFIG_M386
9109 #define MODULE_PROC_FAMILY "386 "
9110 #elif defined CONFIG_M486
9111 @@ -59,13 +60,36 @@
9112 #error unknown processor family
9113 #endif
9114
9115 -#ifdef CONFIG_X86_32
9116 -# ifdef CONFIG_4KSTACKS
9117 -# define MODULE_STACKSIZE "4KSTACKS "
9118 -# else
9119 -# define MODULE_STACKSIZE ""
9120 -# endif
9121 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
9122 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9123 +#define MODULE_PAX_UDEREF "UDEREF "
9124 +#else
9125 +#define MODULE_PAX_UDEREF ""
9126 +#endif
9127 +
9128 +#ifdef CONFIG_PAX_KERNEXEC
9129 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
9130 +#else
9131 +#define MODULE_PAX_KERNEXEC ""
9132 +#endif
9133 +
9134 +#ifdef CONFIG_PAX_REFCOUNT
9135 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
9136 +#else
9137 +#define MODULE_PAX_REFCOUNT ""
9138 #endif
9139
9140 +#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
9141 +#define MODULE_STACKSIZE "4KSTACKS "
9142 +#else
9143 +#define MODULE_STACKSIZE ""
9144 +#endif
9145 +
9146 +#ifdef CONFIG_GRKERNSEC
9147 +#define MODULE_GRSEC "GRSECURITY "
9148 +#else
9149 +#define MODULE_GRSEC ""
9150 +#endif
9151 +
9152 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
9153 +
9154 #endif /* _ASM_X86_MODULE_H */
9155 diff -urNp linux-2.6.32.42/arch/x86/include/asm/page_64_types.h linux-2.6.32.42/arch/x86/include/asm/page_64_types.h
9156 --- linux-2.6.32.42/arch/x86/include/asm/page_64_types.h 2011-03-27 14:31:47.000000000 -0400
9157 +++ linux-2.6.32.42/arch/x86/include/asm/page_64_types.h 2011-04-17 15:56:46.000000000 -0400
9158 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9159
9160 /* duplicated to the one in bootmem.h */
9161 extern unsigned long max_pfn;
9162 -extern unsigned long phys_base;
9163 +extern const unsigned long phys_base;
9164
9165 extern unsigned long __phys_addr(unsigned long);
9166 #define __phys_reloc_hide(x) (x)
9167 diff -urNp linux-2.6.32.42/arch/x86/include/asm/paravirt.h linux-2.6.32.42/arch/x86/include/asm/paravirt.h
9168 --- linux-2.6.32.42/arch/x86/include/asm/paravirt.h 2011-03-27 14:31:47.000000000 -0400
9169 +++ linux-2.6.32.42/arch/x86/include/asm/paravirt.h 2011-04-17 15:56:46.000000000 -0400
9170 @@ -729,6 +729,21 @@ static inline void __set_fixmap(unsigned
9171 pv_mmu_ops.set_fixmap(idx, phys, flags);
9172 }
9173
9174 +#ifdef CONFIG_PAX_KERNEXEC
9175 +static inline unsigned long pax_open_kernel(void)
9176 +{
9177 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9178 +}
9179 +
9180 +static inline unsigned long pax_close_kernel(void)
9181 +{
9182 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9183 +}
9184 +#else
9185 +static inline unsigned long pax_open_kernel(void) { return 0; }
9186 +static inline unsigned long pax_close_kernel(void) { return 0; }
9187 +#endif
9188 +
9189 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9190
9191 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
9192 @@ -945,7 +960,7 @@ extern void default_banner(void);
9193
9194 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9195 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9196 -#define PARA_INDIRECT(addr) *%cs:addr
9197 +#define PARA_INDIRECT(addr) *%ss:addr
9198 #endif
9199
9200 #define INTERRUPT_RETURN \
9201 @@ -1022,6 +1037,21 @@ extern void default_banner(void);
9202 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9203 CLBR_NONE, \
9204 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9205 +
9206 +#define GET_CR0_INTO_RDI \
9207 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9208 + mov %rax,%rdi
9209 +
9210 +#define SET_RDI_INTO_CR0 \
9211 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9212 +
9213 +#define GET_CR3_INTO_RDI \
9214 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9215 + mov %rax,%rdi
9216 +
9217 +#define SET_RDI_INTO_CR3 \
9218 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9219 +
9220 #endif /* CONFIG_X86_32 */
9221
9222 #endif /* __ASSEMBLY__ */
9223 diff -urNp linux-2.6.32.42/arch/x86/include/asm/paravirt_types.h linux-2.6.32.42/arch/x86/include/asm/paravirt_types.h
9224 --- linux-2.6.32.42/arch/x86/include/asm/paravirt_types.h 2011-03-27 14:31:47.000000000 -0400
9225 +++ linux-2.6.32.42/arch/x86/include/asm/paravirt_types.h 2011-04-17 15:56:46.000000000 -0400
9226 @@ -316,6 +316,12 @@ struct pv_mmu_ops {
9227 an mfn. We can tell which is which from the index. */
9228 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9229 phys_addr_t phys, pgprot_t flags);
9230 +
9231 +#ifdef CONFIG_PAX_KERNEXEC
9232 + unsigned long (*pax_open_kernel)(void);
9233 + unsigned long (*pax_close_kernel)(void);
9234 +#endif
9235 +
9236 };
9237
9238 struct raw_spinlock;
9239 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pci_x86.h linux-2.6.32.42/arch/x86/include/asm/pci_x86.h
9240 --- linux-2.6.32.42/arch/x86/include/asm/pci_x86.h 2011-03-27 14:31:47.000000000 -0400
9241 +++ linux-2.6.32.42/arch/x86/include/asm/pci_x86.h 2011-04-17 15:56:46.000000000 -0400
9242 @@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct
9243 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
9244
9245 struct pci_raw_ops {
9246 - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9247 + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9248 int reg, int len, u32 *val);
9249 - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9250 + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9251 int reg, int len, u32 val);
9252 };
9253
9254 -extern struct pci_raw_ops *raw_pci_ops;
9255 -extern struct pci_raw_ops *raw_pci_ext_ops;
9256 +extern const struct pci_raw_ops *raw_pci_ops;
9257 +extern const struct pci_raw_ops *raw_pci_ext_ops;
9258
9259 -extern struct pci_raw_ops pci_direct_conf1;
9260 +extern const struct pci_raw_ops pci_direct_conf1;
9261 extern bool port_cf9_safe;
9262
9263 /* arch_initcall level */
9264 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgalloc.h linux-2.6.32.42/arch/x86/include/asm/pgalloc.h
9265 --- linux-2.6.32.42/arch/x86/include/asm/pgalloc.h 2011-03-27 14:31:47.000000000 -0400
9266 +++ linux-2.6.32.42/arch/x86/include/asm/pgalloc.h 2011-04-17 15:56:46.000000000 -0400
9267 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
9268 pmd_t *pmd, pte_t *pte)
9269 {
9270 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9271 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9272 +}
9273 +
9274 +static inline void pmd_populate_user(struct mm_struct *mm,
9275 + pmd_t *pmd, pte_t *pte)
9276 +{
9277 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9278 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9279 }
9280
9281 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable-2level.h linux-2.6.32.42/arch/x86/include/asm/pgtable-2level.h
9282 --- linux-2.6.32.42/arch/x86/include/asm/pgtable-2level.h 2011-03-27 14:31:47.000000000 -0400
9283 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable-2level.h 2011-04-17 15:56:46.000000000 -0400
9284 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
9285
9286 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9287 {
9288 + pax_open_kernel();
9289 *pmdp = pmd;
9290 + pax_close_kernel();
9291 }
9292
9293 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9294 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable_32.h linux-2.6.32.42/arch/x86/include/asm/pgtable_32.h
9295 --- linux-2.6.32.42/arch/x86/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
9296 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
9297 @@ -26,9 +26,6 @@
9298 struct mm_struct;
9299 struct vm_area_struct;
9300
9301 -extern pgd_t swapper_pg_dir[1024];
9302 -extern pgd_t trampoline_pg_dir[1024];
9303 -
9304 static inline void pgtable_cache_init(void) { }
9305 static inline void check_pgt_cache(void) { }
9306 void paging_init(void);
9307 @@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, u
9308 # include <asm/pgtable-2level.h>
9309 #endif
9310
9311 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9312 +extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
9313 +#ifdef CONFIG_X86_PAE
9314 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9315 +#endif
9316 +
9317 #if defined(CONFIG_HIGHPTE)
9318 #define __KM_PTE \
9319 (in_nmi() ? KM_NMI_PTE : \
9320 @@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, u
9321 /* Clear a kernel PTE and flush it from the TLB */
9322 #define kpte_clear_flush(ptep, vaddr) \
9323 do { \
9324 + pax_open_kernel(); \
9325 pte_clear(&init_mm, (vaddr), (ptep)); \
9326 + pax_close_kernel(); \
9327 __flush_tlb_one((vaddr)); \
9328 } while (0)
9329
9330 @@ -85,6 +90,9 @@ do { \
9331
9332 #endif /* !__ASSEMBLY__ */
9333
9334 +#define HAVE_ARCH_UNMAPPED_AREA
9335 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9336 +
9337 /*
9338 * kern_addr_valid() is (1) for FLATMEM and (0) for
9339 * SPARSEMEM and DISCONTIGMEM
9340 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable_32_types.h linux-2.6.32.42/arch/x86/include/asm/pgtable_32_types.h
9341 --- linux-2.6.32.42/arch/x86/include/asm/pgtable_32_types.h 2011-03-27 14:31:47.000000000 -0400
9342 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable_32_types.h 2011-04-17 15:56:46.000000000 -0400
9343 @@ -8,7 +8,7 @@
9344 */
9345 #ifdef CONFIG_X86_PAE
9346 # include <asm/pgtable-3level_types.h>
9347 -# define PMD_SIZE (1UL << PMD_SHIFT)
9348 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9349 # define PMD_MASK (~(PMD_SIZE - 1))
9350 #else
9351 # include <asm/pgtable-2level_types.h>
9352 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
9353 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9354 #endif
9355
9356 +#ifdef CONFIG_PAX_KERNEXEC
9357 +#ifndef __ASSEMBLY__
9358 +extern unsigned char MODULES_EXEC_VADDR[];
9359 +extern unsigned char MODULES_EXEC_END[];
9360 +#endif
9361 +#include <asm/boot.h>
9362 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9363 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9364 +#else
9365 +#define ktla_ktva(addr) (addr)
9366 +#define ktva_ktla(addr) (addr)
9367 +#endif
9368 +
9369 #define MODULES_VADDR VMALLOC_START
9370 #define MODULES_END VMALLOC_END
9371 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9372 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable-3level.h linux-2.6.32.42/arch/x86/include/asm/pgtable-3level.h
9373 --- linux-2.6.32.42/arch/x86/include/asm/pgtable-3level.h 2011-03-27 14:31:47.000000000 -0400
9374 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable-3level.h 2011-04-17 15:56:46.000000000 -0400
9375 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
9376
9377 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9378 {
9379 + pax_open_kernel();
9380 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9381 + pax_close_kernel();
9382 }
9383
9384 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9385 {
9386 + pax_open_kernel();
9387 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9388 + pax_close_kernel();
9389 }
9390
9391 /*
9392 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable_64.h linux-2.6.32.42/arch/x86/include/asm/pgtable_64.h
9393 --- linux-2.6.32.42/arch/x86/include/asm/pgtable_64.h 2011-03-27 14:31:47.000000000 -0400
9394 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable_64.h 2011-04-17 15:56:46.000000000 -0400
9395 @@ -16,10 +16,13 @@
9396
9397 extern pud_t level3_kernel_pgt[512];
9398 extern pud_t level3_ident_pgt[512];
9399 +extern pud_t level3_vmalloc_pgt[512];
9400 +extern pud_t level3_vmemmap_pgt[512];
9401 +extern pud_t level2_vmemmap_pgt[512];
9402 extern pmd_t level2_kernel_pgt[512];
9403 extern pmd_t level2_fixmap_pgt[512];
9404 -extern pmd_t level2_ident_pgt[512];
9405 -extern pgd_t init_level4_pgt[];
9406 +extern pmd_t level2_ident_pgt[512*2];
9407 +extern pgd_t init_level4_pgt[512];
9408
9409 #define swapper_pg_dir init_level4_pgt
9410
9411 @@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
9412
9413 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9414 {
9415 + pax_open_kernel();
9416 *pmdp = pmd;
9417 + pax_close_kernel();
9418 }
9419
9420 static inline void native_pmd_clear(pmd_t *pmd)
9421 @@ -94,7 +99,9 @@ static inline void native_pud_clear(pud_
9422
9423 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9424 {
9425 + pax_open_kernel();
9426 *pgdp = pgd;
9427 + pax_close_kernel();
9428 }
9429
9430 static inline void native_pgd_clear(pgd_t *pgd)
9431 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable_64_types.h linux-2.6.32.42/arch/x86/include/asm/pgtable_64_types.h
9432 --- linux-2.6.32.42/arch/x86/include/asm/pgtable_64_types.h 2011-03-27 14:31:47.000000000 -0400
9433 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable_64_types.h 2011-04-17 15:56:46.000000000 -0400
9434 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9435 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9436 #define MODULES_END _AC(0xffffffffff000000, UL)
9437 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9438 +#define MODULES_EXEC_VADDR MODULES_VADDR
9439 +#define MODULES_EXEC_END MODULES_END
9440 +
9441 +#define ktla_ktva(addr) (addr)
9442 +#define ktva_ktla(addr) (addr)
9443
9444 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9445 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable.h linux-2.6.32.42/arch/x86/include/asm/pgtable.h
9446 --- linux-2.6.32.42/arch/x86/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
9447 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable.h 2011-04-17 15:56:46.000000000 -0400
9448 @@ -74,12 +74,51 @@ extern struct list_head pgd_list;
9449
9450 #define arch_end_context_switch(prev) do {} while(0)
9451
9452 +#define pax_open_kernel() native_pax_open_kernel()
9453 +#define pax_close_kernel() native_pax_close_kernel()
9454 #endif /* CONFIG_PARAVIRT */
9455
9456 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
9457 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9458 +
9459 +#ifdef CONFIG_PAX_KERNEXEC
9460 +static inline unsigned long native_pax_open_kernel(void)
9461 +{
9462 + unsigned long cr0;
9463 +
9464 + preempt_disable();
9465 + barrier();
9466 + cr0 = read_cr0() ^ X86_CR0_WP;
9467 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
9468 + write_cr0(cr0);
9469 + return cr0 ^ X86_CR0_WP;
9470 +}
9471 +
9472 +static inline unsigned long native_pax_close_kernel(void)
9473 +{
9474 + unsigned long cr0;
9475 +
9476 + cr0 = read_cr0() ^ X86_CR0_WP;
9477 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9478 + write_cr0(cr0);
9479 + barrier();
9480 + preempt_enable_no_resched();
9481 + return cr0 ^ X86_CR0_WP;
9482 +}
9483 +#else
9484 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
9485 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
9486 +#endif
9487 +
9488 /*
9489 * The following only work if pte_present() is true.
9490 * Undefined behaviour if not..
9491 */
9492 +static inline int pte_user(pte_t pte)
9493 +{
9494 + return pte_val(pte) & _PAGE_USER;
9495 +}
9496 +
9497 static inline int pte_dirty(pte_t pte)
9498 {
9499 return pte_flags(pte) & _PAGE_DIRTY;
9500 @@ -167,9 +206,29 @@ static inline pte_t pte_wrprotect(pte_t
9501 return pte_clear_flags(pte, _PAGE_RW);
9502 }
9503
9504 +static inline pte_t pte_mkread(pte_t pte)
9505 +{
9506 + return __pte(pte_val(pte) | _PAGE_USER);
9507 +}
9508 +
9509 static inline pte_t pte_mkexec(pte_t pte)
9510 {
9511 - return pte_clear_flags(pte, _PAGE_NX);
9512 +#ifdef CONFIG_X86_PAE
9513 + if (__supported_pte_mask & _PAGE_NX)
9514 + return pte_clear_flags(pte, _PAGE_NX);
9515 + else
9516 +#endif
9517 + return pte_set_flags(pte, _PAGE_USER);
9518 +}
9519 +
9520 +static inline pte_t pte_exprotect(pte_t pte)
9521 +{
9522 +#ifdef CONFIG_X86_PAE
9523 + if (__supported_pte_mask & _PAGE_NX)
9524 + return pte_set_flags(pte, _PAGE_NX);
9525 + else
9526 +#endif
9527 + return pte_clear_flags(pte, _PAGE_USER);
9528 }
9529
9530 static inline pte_t pte_mkdirty(pte_t pte)
9531 @@ -302,6 +361,15 @@ pte_t *populate_extra_pte(unsigned long
9532 #endif
9533
9534 #ifndef __ASSEMBLY__
9535 +
9536 +#ifdef CONFIG_PAX_PER_CPU_PGD
9537 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9538 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9539 +{
9540 + return cpu_pgd[cpu];
9541 +}
9542 +#endif
9543 +
9544 #include <linux/mm_types.h>
9545
9546 static inline int pte_none(pte_t pte)
9547 @@ -472,7 +540,7 @@ static inline pud_t *pud_offset(pgd_t *p
9548
9549 static inline int pgd_bad(pgd_t pgd)
9550 {
9551 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9552 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9553 }
9554
9555 static inline int pgd_none(pgd_t pgd)
9556 @@ -495,7 +563,12 @@ static inline int pgd_none(pgd_t pgd)
9557 * pgd_offset() returns a (pgd_t *)
9558 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9559 */
9560 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9561 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9562 +
9563 +#ifdef CONFIG_PAX_PER_CPU_PGD
9564 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9565 +#endif
9566 +
9567 /*
9568 * a shortcut which implies the use of the kernel's pgd, instead
9569 * of a process's
9570 @@ -506,6 +579,20 @@ static inline int pgd_none(pgd_t pgd)
9571 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9572 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9573
9574 +#ifdef CONFIG_X86_32
9575 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9576 +#else
9577 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9578 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9579 +
9580 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9581 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9582 +#else
9583 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9584 +#endif
9585 +
9586 +#endif
9587 +
9588 #ifndef __ASSEMBLY__
9589
9590 extern int direct_gbpages;
9591 @@ -611,11 +698,23 @@ static inline void ptep_set_wrprotect(st
9592 * dst and src can be on the same page, but the range must not overlap,
9593 * and must not cross a page boundary.
9594 */
9595 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9596 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9597 {
9598 - memcpy(dst, src, count * sizeof(pgd_t));
9599 + pax_open_kernel();
9600 + while (count--)
9601 + *dst++ = *src++;
9602 + pax_close_kernel();
9603 }
9604
9605 +#ifdef CONFIG_PAX_PER_CPU_PGD
9606 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9607 +#endif
9608 +
9609 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9610 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9611 +#else
9612 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9613 +#endif
9614
9615 #include <asm-generic/pgtable.h>
9616 #endif /* __ASSEMBLY__ */
9617 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable_types.h linux-2.6.32.42/arch/x86/include/asm/pgtable_types.h
9618 --- linux-2.6.32.42/arch/x86/include/asm/pgtable_types.h 2011-03-27 14:31:47.000000000 -0400
9619 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable_types.h 2011-04-17 15:56:46.000000000 -0400
9620 @@ -16,12 +16,11 @@
9621 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9622 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9623 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9624 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9625 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9626 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9627 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9628 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9629 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9630 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9631 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9632 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9633
9634 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9635 @@ -39,7 +38,6 @@
9636 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9637 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9638 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9639 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9640 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9641 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9642 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9643 @@ -55,8 +53,10 @@
9644
9645 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9646 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9647 -#else
9648 +#elif defined(CONFIG_KMEMCHECK)
9649 #define _PAGE_NX (_AT(pteval_t, 0))
9650 +#else
9651 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9652 #endif
9653
9654 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9655 @@ -93,6 +93,9 @@
9656 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9657 _PAGE_ACCESSED)
9658
9659 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
9660 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
9661 +
9662 #define __PAGE_KERNEL_EXEC \
9663 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9664 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9665 @@ -103,8 +106,8 @@
9666 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9667 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9668 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9669 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9670 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
9671 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9672 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
9673 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9674 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
9675 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
9676 @@ -163,8 +166,8 @@
9677 * bits are combined, this will alow user to access the high address mapped
9678 * VDSO in the presence of CONFIG_COMPAT_VDSO
9679 */
9680 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9681 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9682 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9683 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9684 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9685 #endif
9686
9687 @@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
9688 {
9689 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9690 }
9691 +#endif
9692
9693 +#if PAGETABLE_LEVELS == 3
9694 +#include <asm-generic/pgtable-nopud.h>
9695 +#endif
9696 +
9697 +#if PAGETABLE_LEVELS == 2
9698 +#include <asm-generic/pgtable-nopmd.h>
9699 +#endif
9700 +
9701 +#ifndef __ASSEMBLY__
9702 #if PAGETABLE_LEVELS > 3
9703 typedef struct { pudval_t pud; } pud_t;
9704
9705 @@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
9706 return pud.pud;
9707 }
9708 #else
9709 -#include <asm-generic/pgtable-nopud.h>
9710 -
9711 static inline pudval_t native_pud_val(pud_t pud)
9712 {
9713 return native_pgd_val(pud.pgd);
9714 @@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
9715 return pmd.pmd;
9716 }
9717 #else
9718 -#include <asm-generic/pgtable-nopmd.h>
9719 -
9720 static inline pmdval_t native_pmd_val(pmd_t pmd)
9721 {
9722 return native_pgd_val(pmd.pud.pgd);
9723 @@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
9724
9725 extern pteval_t __supported_pte_mask;
9726 extern void set_nx(void);
9727 +
9728 +#ifdef CONFIG_X86_32
9729 +#ifdef CONFIG_X86_PAE
9730 extern int nx_enabled;
9731 +#else
9732 +#define nx_enabled (0)
9733 +#endif
9734 +#else
9735 +#define nx_enabled (1)
9736 +#endif
9737
9738 #define pgprot_writecombine pgprot_writecombine
9739 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9740 diff -urNp linux-2.6.32.42/arch/x86/include/asm/processor.h linux-2.6.32.42/arch/x86/include/asm/processor.h
9741 --- linux-2.6.32.42/arch/x86/include/asm/processor.h 2011-04-22 19:16:29.000000000 -0400
9742 +++ linux-2.6.32.42/arch/x86/include/asm/processor.h 2011-05-11 18:25:15.000000000 -0400
9743 @@ -272,7 +272,7 @@ struct tss_struct {
9744
9745 } ____cacheline_aligned;
9746
9747 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9748 +extern struct tss_struct init_tss[NR_CPUS];
9749
9750 /*
9751 * Save the original ist values for checking stack pointers during debugging
9752 @@ -888,11 +888,18 @@ static inline void spin_lock_prefetch(co
9753 */
9754 #define TASK_SIZE PAGE_OFFSET
9755 #define TASK_SIZE_MAX TASK_SIZE
9756 +
9757 +#ifdef CONFIG_PAX_SEGMEXEC
9758 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9759 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9760 +#else
9761 #define STACK_TOP TASK_SIZE
9762 -#define STACK_TOP_MAX STACK_TOP
9763 +#endif
9764 +
9765 +#define STACK_TOP_MAX TASK_SIZE
9766
9767 #define INIT_THREAD { \
9768 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9769 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9770 .vm86_info = NULL, \
9771 .sysenter_cs = __KERNEL_CS, \
9772 .io_bitmap_ptr = NULL, \
9773 @@ -906,7 +913,7 @@ static inline void spin_lock_prefetch(co
9774 */
9775 #define INIT_TSS { \
9776 .x86_tss = { \
9777 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9778 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9779 .ss0 = __KERNEL_DS, \
9780 .ss1 = __KERNEL_CS, \
9781 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9782 @@ -917,11 +924,7 @@ static inline void spin_lock_prefetch(co
9783 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9784
9785 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9786 -#define KSTK_TOP(info) \
9787 -({ \
9788 - unsigned long *__ptr = (unsigned long *)(info); \
9789 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9790 -})
9791 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9792
9793 /*
9794 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9795 @@ -936,7 +939,7 @@ extern unsigned long thread_saved_pc(str
9796 #define task_pt_regs(task) \
9797 ({ \
9798 struct pt_regs *__regs__; \
9799 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9800 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9801 __regs__ - 1; \
9802 })
9803
9804 @@ -946,13 +949,13 @@ extern unsigned long thread_saved_pc(str
9805 /*
9806 * User space process size. 47bits minus one guard page.
9807 */
9808 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9809 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9810
9811 /* This decides where the kernel will search for a free chunk of vm
9812 * space during mmap's.
9813 */
9814 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9815 - 0xc0000000 : 0xFFFFe000)
9816 + 0xc0000000 : 0xFFFFf000)
9817
9818 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9819 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9820 @@ -963,11 +966,11 @@ extern unsigned long thread_saved_pc(str
9821 #define STACK_TOP_MAX TASK_SIZE_MAX
9822
9823 #define INIT_THREAD { \
9824 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9825 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9826 }
9827
9828 #define INIT_TSS { \
9829 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9830 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9831 }
9832
9833 /*
9834 @@ -989,6 +992,10 @@ extern void start_thread(struct pt_regs
9835 */
9836 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9837
9838 +#ifdef CONFIG_PAX_SEGMEXEC
9839 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9840 +#endif
9841 +
9842 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9843
9844 /* Get/set a process' ability to use the timestamp counter instruction */
9845 diff -urNp linux-2.6.32.42/arch/x86/include/asm/ptrace.h linux-2.6.32.42/arch/x86/include/asm/ptrace.h
9846 --- linux-2.6.32.42/arch/x86/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
9847 +++ linux-2.6.32.42/arch/x86/include/asm/ptrace.h 2011-04-17 15:56:46.000000000 -0400
9848 @@ -151,28 +151,29 @@ static inline unsigned long regs_return_
9849 }
9850
9851 /*
9852 - * user_mode_vm(regs) determines whether a register set came from user mode.
9853 + * user_mode(regs) determines whether a register set came from user mode.
9854 * This is true if V8086 mode was enabled OR if the register set was from
9855 * protected mode with RPL-3 CS value. This tricky test checks that with
9856 * one comparison. Many places in the kernel can bypass this full check
9857 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9858 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9859 + * be used.
9860 */
9861 -static inline int user_mode(struct pt_regs *regs)
9862 +static inline int user_mode_novm(struct pt_regs *regs)
9863 {
9864 #ifdef CONFIG_X86_32
9865 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9866 #else
9867 - return !!(regs->cs & 3);
9868 + return !!(regs->cs & SEGMENT_RPL_MASK);
9869 #endif
9870 }
9871
9872 -static inline int user_mode_vm(struct pt_regs *regs)
9873 +static inline int user_mode(struct pt_regs *regs)
9874 {
9875 #ifdef CONFIG_X86_32
9876 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9877 USER_RPL;
9878 #else
9879 - return user_mode(regs);
9880 + return user_mode_novm(regs);
9881 #endif
9882 }
9883
9884 diff -urNp linux-2.6.32.42/arch/x86/include/asm/reboot.h linux-2.6.32.42/arch/x86/include/asm/reboot.h
9885 --- linux-2.6.32.42/arch/x86/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
9886 +++ linux-2.6.32.42/arch/x86/include/asm/reboot.h 2011-05-22 23:02:03.000000000 -0400
9887 @@ -6,19 +6,19 @@
9888 struct pt_regs;
9889
9890 struct machine_ops {
9891 - void (*restart)(char *cmd);
9892 - void (*halt)(void);
9893 - void (*power_off)(void);
9894 + void (* __noreturn restart)(char *cmd);
9895 + void (* __noreturn halt)(void);
9896 + void (* __noreturn power_off)(void);
9897 void (*shutdown)(void);
9898 void (*crash_shutdown)(struct pt_regs *);
9899 - void (*emergency_restart)(void);
9900 + void (* __noreturn emergency_restart)(void);
9901 };
9902
9903 extern struct machine_ops machine_ops;
9904
9905 void native_machine_crash_shutdown(struct pt_regs *regs);
9906 void native_machine_shutdown(void);
9907 -void machine_real_restart(const unsigned char *code, int length);
9908 +void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
9909
9910 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
9911 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
9912 diff -urNp linux-2.6.32.42/arch/x86/include/asm/rwsem.h linux-2.6.32.42/arch/x86/include/asm/rwsem.h
9913 --- linux-2.6.32.42/arch/x86/include/asm/rwsem.h 2011-03-27 14:31:47.000000000 -0400
9914 +++ linux-2.6.32.42/arch/x86/include/asm/rwsem.h 2011-04-17 15:56:46.000000000 -0400
9915 @@ -118,6 +118,14 @@ static inline void __down_read(struct rw
9916 {
9917 asm volatile("# beginning down_read\n\t"
9918 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9919 +
9920 +#ifdef CONFIG_PAX_REFCOUNT
9921 + "jno 0f\n"
9922 + LOCK_PREFIX _ASM_DEC "(%1)\n\t"
9923 + "int $4\n0:\n"
9924 + _ASM_EXTABLE(0b, 0b)
9925 +#endif
9926 +
9927 /* adds 0x00000001, returns the old value */
9928 " jns 1f\n"
9929 " call call_rwsem_down_read_failed\n"
9930 @@ -139,6 +147,14 @@ static inline int __down_read_trylock(st
9931 "1:\n\t"
9932 " mov %1,%2\n\t"
9933 " add %3,%2\n\t"
9934 +
9935 +#ifdef CONFIG_PAX_REFCOUNT
9936 + "jno 0f\n"
9937 + "sub %3,%2\n"
9938 + "int $4\n0:\n"
9939 + _ASM_EXTABLE(0b, 0b)
9940 +#endif
9941 +
9942 " jle 2f\n\t"
9943 LOCK_PREFIX " cmpxchg %2,%0\n\t"
9944 " jnz 1b\n\t"
9945 @@ -160,6 +176,14 @@ static inline void __down_write_nested(s
9946 tmp = RWSEM_ACTIVE_WRITE_BIAS;
9947 asm volatile("# beginning down_write\n\t"
9948 LOCK_PREFIX " xadd %1,(%2)\n\t"
9949 +
9950 +#ifdef CONFIG_PAX_REFCOUNT
9951 + "jno 0f\n"
9952 + "mov %1,(%2)\n"
9953 + "int $4\n0:\n"
9954 + _ASM_EXTABLE(0b, 0b)
9955 +#endif
9956 +
9957 /* subtract 0x0000ffff, returns the old value */
9958 " test %1,%1\n\t"
9959 /* was the count 0 before? */
9960 @@ -198,6 +222,14 @@ static inline void __up_read(struct rw_s
9961 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
9962 asm volatile("# beginning __up_read\n\t"
9963 LOCK_PREFIX " xadd %1,(%2)\n\t"
9964 +
9965 +#ifdef CONFIG_PAX_REFCOUNT
9966 + "jno 0f\n"
9967 + "mov %1,(%2)\n"
9968 + "int $4\n0:\n"
9969 + _ASM_EXTABLE(0b, 0b)
9970 +#endif
9971 +
9972 /* subtracts 1, returns the old value */
9973 " jns 1f\n\t"
9974 " call call_rwsem_wake\n"
9975 @@ -216,6 +248,14 @@ static inline void __up_write(struct rw_
9976 rwsem_count_t tmp;
9977 asm volatile("# beginning __up_write\n\t"
9978 LOCK_PREFIX " xadd %1,(%2)\n\t"
9979 +
9980 +#ifdef CONFIG_PAX_REFCOUNT
9981 + "jno 0f\n"
9982 + "mov %1,(%2)\n"
9983 + "int $4\n0:\n"
9984 + _ASM_EXTABLE(0b, 0b)
9985 +#endif
9986 +
9987 /* tries to transition
9988 0xffff0001 -> 0x00000000 */
9989 " jz 1f\n"
9990 @@ -234,6 +274,14 @@ static inline void __downgrade_write(str
9991 {
9992 asm volatile("# beginning __downgrade_write\n\t"
9993 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
9994 +
9995 +#ifdef CONFIG_PAX_REFCOUNT
9996 + "jno 0f\n"
9997 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
9998 + "int $4\n0:\n"
9999 + _ASM_EXTABLE(0b, 0b)
10000 +#endif
10001 +
10002 /*
10003 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10004 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10005 @@ -253,7 +301,15 @@ static inline void __downgrade_write(str
10006 static inline void rwsem_atomic_add(rwsem_count_t delta,
10007 struct rw_semaphore *sem)
10008 {
10009 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10010 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10011 +
10012 +#ifdef CONFIG_PAX_REFCOUNT
10013 + "jno 0f\n"
10014 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
10015 + "int $4\n0:\n"
10016 + _ASM_EXTABLE(0b, 0b)
10017 +#endif
10018 +
10019 : "+m" (sem->count)
10020 : "er" (delta));
10021 }
10022 @@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic
10023 {
10024 rwsem_count_t tmp = delta;
10025
10026 - asm volatile(LOCK_PREFIX "xadd %0,%1"
10027 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
10028 +
10029 +#ifdef CONFIG_PAX_REFCOUNT
10030 + "jno 0f\n"
10031 + "mov %0,%1\n"
10032 + "int $4\n0:\n"
10033 + _ASM_EXTABLE(0b, 0b)
10034 +#endif
10035 +
10036 : "+r" (tmp), "+m" (sem->count)
10037 : : "memory");
10038
10039 diff -urNp linux-2.6.32.42/arch/x86/include/asm/segment.h linux-2.6.32.42/arch/x86/include/asm/segment.h
10040 --- linux-2.6.32.42/arch/x86/include/asm/segment.h 2011-03-27 14:31:47.000000000 -0400
10041 +++ linux-2.6.32.42/arch/x86/include/asm/segment.h 2011-04-17 15:56:46.000000000 -0400
10042 @@ -62,8 +62,8 @@
10043 * 26 - ESPFIX small SS
10044 * 27 - per-cpu [ offset to per-cpu data area ]
10045 * 28 - stack_canary-20 [ for stack protector ]
10046 - * 29 - unused
10047 - * 30 - unused
10048 + * 29 - PCI BIOS CS
10049 + * 30 - PCI BIOS DS
10050 * 31 - TSS for double fault handler
10051 */
10052 #define GDT_ENTRY_TLS_MIN 6
10053 @@ -77,6 +77,8 @@
10054
10055 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
10056
10057 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10058 +
10059 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
10060
10061 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
10062 @@ -88,7 +90,7 @@
10063 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
10064 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
10065
10066 -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10067 +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10068 #ifdef CONFIG_SMP
10069 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
10070 #else
10071 @@ -102,6 +104,12 @@
10072 #define __KERNEL_STACK_CANARY 0
10073 #endif
10074
10075 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
10076 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10077 +
10078 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
10079 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10080 +
10081 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10082
10083 /*
10084 @@ -139,7 +147,7 @@
10085 */
10086
10087 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10088 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10089 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10090
10091
10092 #else
10093 @@ -163,6 +171,8 @@
10094 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
10095 #define __USER32_DS __USER_DS
10096
10097 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10098 +
10099 #define GDT_ENTRY_TSS 8 /* needs two entries */
10100 #define GDT_ENTRY_LDT 10 /* needs two entries */
10101 #define GDT_ENTRY_TLS_MIN 12
10102 @@ -183,6 +193,7 @@
10103 #endif
10104
10105 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
10106 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
10107 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
10108 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
10109 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
10110 diff -urNp linux-2.6.32.42/arch/x86/include/asm/smp.h linux-2.6.32.42/arch/x86/include/asm/smp.h
10111 --- linux-2.6.32.42/arch/x86/include/asm/smp.h 2011-03-27 14:31:47.000000000 -0400
10112 +++ linux-2.6.32.42/arch/x86/include/asm/smp.h 2011-07-01 19:00:40.000000000 -0400
10113 @@ -24,7 +24,7 @@ extern unsigned int num_processors;
10114 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
10115 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10116 DECLARE_PER_CPU(u16, cpu_llc_id);
10117 -DECLARE_PER_CPU(int, cpu_number);
10118 +DECLARE_PER_CPU(unsigned int, cpu_number);
10119
10120 static inline struct cpumask *cpu_sibling_mask(int cpu)
10121 {
10122 @@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_ap
10123 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
10124
10125 /* Static state in head.S used to set up a CPU */
10126 -extern struct {
10127 - void *sp;
10128 - unsigned short ss;
10129 -} stack_start;
10130 +extern unsigned long stack_start; /* Initial stack pointer address */
10131
10132 struct smp_ops {
10133 void (*smp_prepare_boot_cpu)(void);
10134 @@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitd
10135 extern int safe_smp_processor_id(void);
10136
10137 #elif defined(CONFIG_X86_64_SMP)
10138 -#define raw_smp_processor_id() (percpu_read(cpu_number))
10139 -
10140 -#define stack_smp_processor_id() \
10141 -({ \
10142 - struct thread_info *ti; \
10143 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10144 - ti->cpu; \
10145 -})
10146 +#define raw_smp_processor_id() (percpu_read(cpu_number))
10147 +#define stack_smp_processor_id() raw_smp_processor_id()
10148 #define safe_smp_processor_id() smp_processor_id()
10149
10150 #endif
10151 diff -urNp linux-2.6.32.42/arch/x86/include/asm/spinlock.h linux-2.6.32.42/arch/x86/include/asm/spinlock.h
10152 --- linux-2.6.32.42/arch/x86/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
10153 +++ linux-2.6.32.42/arch/x86/include/asm/spinlock.h 2011-04-17 15:56:46.000000000 -0400
10154 @@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(r
10155 static inline void __raw_read_lock(raw_rwlock_t *rw)
10156 {
10157 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
10158 +
10159 +#ifdef CONFIG_PAX_REFCOUNT
10160 + "jno 0f\n"
10161 + LOCK_PREFIX " addl $1,(%0)\n"
10162 + "int $4\n0:\n"
10163 + _ASM_EXTABLE(0b, 0b)
10164 +#endif
10165 +
10166 "jns 1f\n"
10167 "call __read_lock_failed\n\t"
10168 "1:\n"
10169 @@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_r
10170 static inline void __raw_write_lock(raw_rwlock_t *rw)
10171 {
10172 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
10173 +
10174 +#ifdef CONFIG_PAX_REFCOUNT
10175 + "jno 0f\n"
10176 + LOCK_PREFIX " addl %1,(%0)\n"
10177 + "int $4\n0:\n"
10178 + _ASM_EXTABLE(0b, 0b)
10179 +#endif
10180 +
10181 "jz 1f\n"
10182 "call __write_lock_failed\n\t"
10183 "1:\n"
10184 @@ -286,12 +302,29 @@ static inline int __raw_write_trylock(ra
10185
10186 static inline void __raw_read_unlock(raw_rwlock_t *rw)
10187 {
10188 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
10189 + asm volatile(LOCK_PREFIX "incl %0\n"
10190 +
10191 +#ifdef CONFIG_PAX_REFCOUNT
10192 + "jno 0f\n"
10193 + LOCK_PREFIX "decl %0\n"
10194 + "int $4\n0:\n"
10195 + _ASM_EXTABLE(0b, 0b)
10196 +#endif
10197 +
10198 + :"+m" (rw->lock) : : "memory");
10199 }
10200
10201 static inline void __raw_write_unlock(raw_rwlock_t *rw)
10202 {
10203 - asm volatile(LOCK_PREFIX "addl %1, %0"
10204 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
10205 +
10206 +#ifdef CONFIG_PAX_REFCOUNT
10207 + "jno 0f\n"
10208 + LOCK_PREFIX "subl %1, %0\n"
10209 + "int $4\n0:\n"
10210 + _ASM_EXTABLE(0b, 0b)
10211 +#endif
10212 +
10213 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
10214 }
10215
10216 diff -urNp linux-2.6.32.42/arch/x86/include/asm/stackprotector.h linux-2.6.32.42/arch/x86/include/asm/stackprotector.h
10217 --- linux-2.6.32.42/arch/x86/include/asm/stackprotector.h 2011-03-27 14:31:47.000000000 -0400
10218 +++ linux-2.6.32.42/arch/x86/include/asm/stackprotector.h 2011-04-17 15:56:46.000000000 -0400
10219 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
10220
10221 static inline void load_stack_canary_segment(void)
10222 {
10223 -#ifdef CONFIG_X86_32
10224 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10225 asm volatile ("mov %0, %%gs" : : "r" (0));
10226 #endif
10227 }
10228 diff -urNp linux-2.6.32.42/arch/x86/include/asm/system.h linux-2.6.32.42/arch/x86/include/asm/system.h
10229 --- linux-2.6.32.42/arch/x86/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
10230 +++ linux-2.6.32.42/arch/x86/include/asm/system.h 2011-05-22 23:02:03.000000000 -0400
10231 @@ -132,7 +132,7 @@ do { \
10232 "thread_return:\n\t" \
10233 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10234 __switch_canary \
10235 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
10236 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10237 "movq %%rax,%%rdi\n\t" \
10238 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10239 "jnz ret_from_fork\n\t" \
10240 @@ -143,7 +143,7 @@ do { \
10241 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10242 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10243 [_tif_fork] "i" (_TIF_FORK), \
10244 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
10245 + [thread_info] "m" (per_cpu_var(current_tinfo)), \
10246 [current_task] "m" (per_cpu_var(current_task)) \
10247 __switch_canary_iparam \
10248 : "memory", "cc" __EXTRA_CLOBBER)
10249 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
10250 {
10251 unsigned long __limit;
10252 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10253 - return __limit + 1;
10254 + return __limit;
10255 }
10256
10257 static inline void native_clts(void)
10258 @@ -340,12 +340,12 @@ void enable_hlt(void);
10259
10260 void cpu_idle_wait(void);
10261
10262 -extern unsigned long arch_align_stack(unsigned long sp);
10263 +#define arch_align_stack(x) ((x) & ~0xfUL)
10264 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10265
10266 void default_idle(void);
10267
10268 -void stop_this_cpu(void *dummy);
10269 +void stop_this_cpu(void *dummy) __noreturn;
10270
10271 /*
10272 * Force strict CPU ordering.
10273 diff -urNp linux-2.6.32.42/arch/x86/include/asm/thread_info.h linux-2.6.32.42/arch/x86/include/asm/thread_info.h
10274 --- linux-2.6.32.42/arch/x86/include/asm/thread_info.h 2011-03-27 14:31:47.000000000 -0400
10275 +++ linux-2.6.32.42/arch/x86/include/asm/thread_info.h 2011-05-17 19:26:34.000000000 -0400
10276 @@ -10,6 +10,7 @@
10277 #include <linux/compiler.h>
10278 #include <asm/page.h>
10279 #include <asm/types.h>
10280 +#include <asm/percpu.h>
10281
10282 /*
10283 * low level task data that entry.S needs immediate access to
10284 @@ -24,7 +25,6 @@ struct exec_domain;
10285 #include <asm/atomic.h>
10286
10287 struct thread_info {
10288 - struct task_struct *task; /* main task structure */
10289 struct exec_domain *exec_domain; /* execution domain */
10290 __u32 flags; /* low level flags */
10291 __u32 status; /* thread synchronous flags */
10292 @@ -34,18 +34,12 @@ struct thread_info {
10293 mm_segment_t addr_limit;
10294 struct restart_block restart_block;
10295 void __user *sysenter_return;
10296 -#ifdef CONFIG_X86_32
10297 - unsigned long previous_esp; /* ESP of the previous stack in
10298 - case of nested (IRQ) stacks
10299 - */
10300 - __u8 supervisor_stack[0];
10301 -#endif
10302 + unsigned long lowest_stack;
10303 int uaccess_err;
10304 };
10305
10306 -#define INIT_THREAD_INFO(tsk) \
10307 +#define INIT_THREAD_INFO \
10308 { \
10309 - .task = &tsk, \
10310 .exec_domain = &default_exec_domain, \
10311 .flags = 0, \
10312 .cpu = 0, \
10313 @@ -56,7 +50,7 @@ struct thread_info {
10314 }, \
10315 }
10316
10317 -#define init_thread_info (init_thread_union.thread_info)
10318 +#define init_thread_info (init_thread_union.stack)
10319 #define init_stack (init_thread_union.stack)
10320
10321 #else /* !__ASSEMBLY__ */
10322 @@ -163,6 +157,23 @@ struct thread_info {
10323 #define alloc_thread_info(tsk) \
10324 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
10325
10326 +#ifdef __ASSEMBLY__
10327 +/* how to get the thread information struct from ASM */
10328 +#define GET_THREAD_INFO(reg) \
10329 + mov PER_CPU_VAR(current_tinfo), reg
10330 +
10331 +/* use this one if reg already contains %esp */
10332 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10333 +#else
10334 +/* how to get the thread information struct from C */
10335 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10336 +
10337 +static __always_inline struct thread_info *current_thread_info(void)
10338 +{
10339 + return percpu_read_stable(current_tinfo);
10340 +}
10341 +#endif
10342 +
10343 #ifdef CONFIG_X86_32
10344
10345 #define STACK_WARN (THREAD_SIZE/8)
10346 @@ -173,35 +184,13 @@ struct thread_info {
10347 */
10348 #ifndef __ASSEMBLY__
10349
10350 -
10351 /* how to get the current stack pointer from C */
10352 register unsigned long current_stack_pointer asm("esp") __used;
10353
10354 -/* how to get the thread information struct from C */
10355 -static inline struct thread_info *current_thread_info(void)
10356 -{
10357 - return (struct thread_info *)
10358 - (current_stack_pointer & ~(THREAD_SIZE - 1));
10359 -}
10360 -
10361 -#else /* !__ASSEMBLY__ */
10362 -
10363 -/* how to get the thread information struct from ASM */
10364 -#define GET_THREAD_INFO(reg) \
10365 - movl $-THREAD_SIZE, reg; \
10366 - andl %esp, reg
10367 -
10368 -/* use this one if reg already contains %esp */
10369 -#define GET_THREAD_INFO_WITH_ESP(reg) \
10370 - andl $-THREAD_SIZE, reg
10371 -
10372 #endif
10373
10374 #else /* X86_32 */
10375
10376 -#include <asm/percpu.h>
10377 -#define KERNEL_STACK_OFFSET (5*8)
10378 -
10379 /*
10380 * macros/functions for gaining access to the thread information structure
10381 * preempt_count needs to be 1 initially, until the scheduler is functional.
10382 @@ -209,21 +198,8 @@ static inline struct thread_info *curren
10383 #ifndef __ASSEMBLY__
10384 DECLARE_PER_CPU(unsigned long, kernel_stack);
10385
10386 -static inline struct thread_info *current_thread_info(void)
10387 -{
10388 - struct thread_info *ti;
10389 - ti = (void *)(percpu_read_stable(kernel_stack) +
10390 - KERNEL_STACK_OFFSET - THREAD_SIZE);
10391 - return ti;
10392 -}
10393 -
10394 -#else /* !__ASSEMBLY__ */
10395 -
10396 -/* how to get the thread information struct from ASM */
10397 -#define GET_THREAD_INFO(reg) \
10398 - movq PER_CPU_VAR(kernel_stack),reg ; \
10399 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10400 -
10401 +/* how to get the current stack pointer from C */
10402 +register unsigned long current_stack_pointer asm("rsp") __used;
10403 #endif
10404
10405 #endif /* !X86_32 */
10406 @@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
10407 extern void free_thread_info(struct thread_info *ti);
10408 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10409 #define arch_task_cache_init arch_task_cache_init
10410 +
10411 +#define __HAVE_THREAD_FUNCTIONS
10412 +#define task_thread_info(task) (&(task)->tinfo)
10413 +#define task_stack_page(task) ((task)->stack)
10414 +#define setup_thread_stack(p, org) do {} while (0)
10415 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10416 +
10417 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10418 +extern struct task_struct *alloc_task_struct(void);
10419 +extern void free_task_struct(struct task_struct *);
10420 +
10421 #endif
10422 #endif /* _ASM_X86_THREAD_INFO_H */
10423 diff -urNp linux-2.6.32.42/arch/x86/include/asm/uaccess_32.h linux-2.6.32.42/arch/x86/include/asm/uaccess_32.h
10424 --- linux-2.6.32.42/arch/x86/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
10425 +++ linux-2.6.32.42/arch/x86/include/asm/uaccess_32.h 2011-05-16 21:46:57.000000000 -0400
10426 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
10427 static __always_inline unsigned long __must_check
10428 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10429 {
10430 + pax_track_stack();
10431 +
10432 + if ((long)n < 0)
10433 + return n;
10434 +
10435 if (__builtin_constant_p(n)) {
10436 unsigned long ret;
10437
10438 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
10439 return ret;
10440 }
10441 }
10442 + if (!__builtin_constant_p(n))
10443 + check_object_size(from, n, true);
10444 return __copy_to_user_ll(to, from, n);
10445 }
10446
10447 @@ -83,12 +90,16 @@ static __always_inline unsigned long __m
10448 __copy_to_user(void __user *to, const void *from, unsigned long n)
10449 {
10450 might_fault();
10451 +
10452 return __copy_to_user_inatomic(to, from, n);
10453 }
10454
10455 static __always_inline unsigned long
10456 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10457 {
10458 + if ((long)n < 0)
10459 + return n;
10460 +
10461 /* Avoid zeroing the tail if the copy fails..
10462 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10463 * but as the zeroing behaviour is only significant when n is not
10464 @@ -138,6 +149,12 @@ static __always_inline unsigned long
10465 __copy_from_user(void *to, const void __user *from, unsigned long n)
10466 {
10467 might_fault();
10468 +
10469 + pax_track_stack();
10470 +
10471 + if ((long)n < 0)
10472 + return n;
10473 +
10474 if (__builtin_constant_p(n)) {
10475 unsigned long ret;
10476
10477 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
10478 return ret;
10479 }
10480 }
10481 + if (!__builtin_constant_p(n))
10482 + check_object_size(to, n, false);
10483 return __copy_from_user_ll(to, from, n);
10484 }
10485
10486 @@ -160,6 +179,10 @@ static __always_inline unsigned long __c
10487 const void __user *from, unsigned long n)
10488 {
10489 might_fault();
10490 +
10491 + if ((long)n < 0)
10492 + return n;
10493 +
10494 if (__builtin_constant_p(n)) {
10495 unsigned long ret;
10496
10497 @@ -182,14 +205,62 @@ static __always_inline unsigned long
10498 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10499 unsigned long n)
10500 {
10501 - return __copy_from_user_ll_nocache_nozero(to, from, n);
10502 + if ((long)n < 0)
10503 + return n;
10504 +
10505 + return __copy_from_user_ll_nocache_nozero(to, from, n);
10506 +}
10507 +
10508 +/**
10509 + * copy_to_user: - Copy a block of data into user space.
10510 + * @to: Destination address, in user space.
10511 + * @from: Source address, in kernel space.
10512 + * @n: Number of bytes to copy.
10513 + *
10514 + * Context: User context only. This function may sleep.
10515 + *
10516 + * Copy data from kernel space to user space.
10517 + *
10518 + * Returns number of bytes that could not be copied.
10519 + * On success, this will be zero.
10520 + */
10521 +static __always_inline unsigned long __must_check
10522 +copy_to_user(void __user *to, const void *from, unsigned long n)
10523 +{
10524 + if (access_ok(VERIFY_WRITE, to, n))
10525 + n = __copy_to_user(to, from, n);
10526 + return n;
10527 +}
10528 +
10529 +/**
10530 + * copy_from_user: - Copy a block of data from user space.
10531 + * @to: Destination address, in kernel space.
10532 + * @from: Source address, in user space.
10533 + * @n: Number of bytes to copy.
10534 + *
10535 + * Context: User context only. This function may sleep.
10536 + *
10537 + * Copy data from user space to kernel space.
10538 + *
10539 + * Returns number of bytes that could not be copied.
10540 + * On success, this will be zero.
10541 + *
10542 + * If some data could not be copied, this function will pad the copied
10543 + * data to the requested size using zero bytes.
10544 + */
10545 +static __always_inline unsigned long __must_check
10546 +copy_from_user(void *to, const void __user *from, unsigned long n)
10547 +{
10548 + if (access_ok(VERIFY_READ, from, n))
10549 + n = __copy_from_user(to, from, n);
10550 + else if ((long)n > 0) {
10551 + if (!__builtin_constant_p(n))
10552 + check_object_size(to, n, false);
10553 + memset(to, 0, n);
10554 + }
10555 + return n;
10556 }
10557
10558 -unsigned long __must_check copy_to_user(void __user *to,
10559 - const void *from, unsigned long n);
10560 -unsigned long __must_check copy_from_user(void *to,
10561 - const void __user *from,
10562 - unsigned long n);
10563 long __must_check strncpy_from_user(char *dst, const char __user *src,
10564 long count);
10565 long __must_check __strncpy_from_user(char *dst,
10566 diff -urNp linux-2.6.32.42/arch/x86/include/asm/uaccess_64.h linux-2.6.32.42/arch/x86/include/asm/uaccess_64.h
10567 --- linux-2.6.32.42/arch/x86/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
10568 +++ linux-2.6.32.42/arch/x86/include/asm/uaccess_64.h 2011-05-16 21:46:57.000000000 -0400
10569 @@ -9,6 +9,9 @@
10570 #include <linux/prefetch.h>
10571 #include <linux/lockdep.h>
10572 #include <asm/page.h>
10573 +#include <asm/pgtable.h>
10574 +
10575 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
10576
10577 /*
10578 * Copy To/From Userspace
10579 @@ -19,113 +22,203 @@ __must_check unsigned long
10580 copy_user_generic(void *to, const void *from, unsigned len);
10581
10582 __must_check unsigned long
10583 -copy_to_user(void __user *to, const void *from, unsigned len);
10584 -__must_check unsigned long
10585 -copy_from_user(void *to, const void __user *from, unsigned len);
10586 -__must_check unsigned long
10587 copy_in_user(void __user *to, const void __user *from, unsigned len);
10588
10589 static __always_inline __must_check
10590 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
10591 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
10592 {
10593 - int ret = 0;
10594 + unsigned ret = 0;
10595
10596 might_fault();
10597 - if (!__builtin_constant_p(size))
10598 - return copy_user_generic(dst, (__force void *)src, size);
10599 +
10600 + if ((int)size < 0)
10601 + return size;
10602 +
10603 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10604 + if (!__access_ok(VERIFY_READ, src, size))
10605 + return size;
10606 +#endif
10607 +
10608 + if (!__builtin_constant_p(size)) {
10609 + check_object_size(dst, size, false);
10610 +
10611 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10612 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10613 + src += PAX_USER_SHADOW_BASE;
10614 +#endif
10615 +
10616 + return copy_user_generic(dst, (__force const void *)src, size);
10617 + }
10618 switch (size) {
10619 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10620 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10621 ret, "b", "b", "=q", 1);
10622 return ret;
10623 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10624 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10625 ret, "w", "w", "=r", 2);
10626 return ret;
10627 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10628 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10629 ret, "l", "k", "=r", 4);
10630 return ret;
10631 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10632 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10633 ret, "q", "", "=r", 8);
10634 return ret;
10635 case 10:
10636 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10637 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10638 ret, "q", "", "=r", 10);
10639 if (unlikely(ret))
10640 return ret;
10641 __get_user_asm(*(u16 *)(8 + (char *)dst),
10642 - (u16 __user *)(8 + (char __user *)src),
10643 + (const u16 __user *)(8 + (const char __user *)src),
10644 ret, "w", "w", "=r", 2);
10645 return ret;
10646 case 16:
10647 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10648 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10649 ret, "q", "", "=r", 16);
10650 if (unlikely(ret))
10651 return ret;
10652 __get_user_asm(*(u64 *)(8 + (char *)dst),
10653 - (u64 __user *)(8 + (char __user *)src),
10654 + (const u64 __user *)(8 + (const char __user *)src),
10655 ret, "q", "", "=r", 8);
10656 return ret;
10657 default:
10658 - return copy_user_generic(dst, (__force void *)src, size);
10659 +
10660 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10661 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10662 + src += PAX_USER_SHADOW_BASE;
10663 +#endif
10664 +
10665 + return copy_user_generic(dst, (__force const void *)src, size);
10666 }
10667 }
10668
10669 static __always_inline __must_check
10670 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
10671 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10672 {
10673 - int ret = 0;
10674 + unsigned ret = 0;
10675
10676 might_fault();
10677 - if (!__builtin_constant_p(size))
10678 +
10679 + pax_track_stack();
10680 +
10681 + if ((int)size < 0)
10682 + return size;
10683 +
10684 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10685 + if (!__access_ok(VERIFY_WRITE, dst, size))
10686 + return size;
10687 +#endif
10688 +
10689 + if (!__builtin_constant_p(size)) {
10690 + check_object_size(src, size, true);
10691 +
10692 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10693 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10694 + dst += PAX_USER_SHADOW_BASE;
10695 +#endif
10696 +
10697 return copy_user_generic((__force void *)dst, src, size);
10698 + }
10699 switch (size) {
10700 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10701 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10702 ret, "b", "b", "iq", 1);
10703 return ret;
10704 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10705 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10706 ret, "w", "w", "ir", 2);
10707 return ret;
10708 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
10709 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
10710 ret, "l", "k", "ir", 4);
10711 return ret;
10712 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
10713 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10714 ret, "q", "", "er", 8);
10715 return ret;
10716 case 10:
10717 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10718 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10719 ret, "q", "", "er", 10);
10720 if (unlikely(ret))
10721 return ret;
10722 asm("":::"memory");
10723 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
10724 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
10725 ret, "w", "w", "ir", 2);
10726 return ret;
10727 case 16:
10728 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10729 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10730 ret, "q", "", "er", 16);
10731 if (unlikely(ret))
10732 return ret;
10733 asm("":::"memory");
10734 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
10735 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
10736 ret, "q", "", "er", 8);
10737 return ret;
10738 default:
10739 +
10740 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10741 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10742 + dst += PAX_USER_SHADOW_BASE;
10743 +#endif
10744 +
10745 return copy_user_generic((__force void *)dst, src, size);
10746 }
10747 }
10748
10749 static __always_inline __must_check
10750 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10751 +unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
10752 +{
10753 + if (access_ok(VERIFY_WRITE, to, len))
10754 + len = __copy_to_user(to, from, len);
10755 + return len;
10756 +}
10757 +
10758 +static __always_inline __must_check
10759 +unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
10760 +{
10761 + if ((int)len < 0)
10762 + return len;
10763 +
10764 + if (access_ok(VERIFY_READ, from, len))
10765 + len = __copy_from_user(to, from, len);
10766 + else if ((int)len > 0) {
10767 + if (!__builtin_constant_p(len))
10768 + check_object_size(to, len, false);
10769 + memset(to, 0, len);
10770 + }
10771 + return len;
10772 +}
10773 +
10774 +static __always_inline __must_check
10775 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10776 {
10777 - int ret = 0;
10778 + unsigned ret = 0;
10779
10780 might_fault();
10781 - if (!__builtin_constant_p(size))
10782 +
10783 + pax_track_stack();
10784 +
10785 + if ((int)size < 0)
10786 + return size;
10787 +
10788 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10789 + if (!__access_ok(VERIFY_READ, src, size))
10790 + return size;
10791 + if (!__access_ok(VERIFY_WRITE, dst, size))
10792 + return size;
10793 +#endif
10794 +
10795 + if (!__builtin_constant_p(size)) {
10796 +
10797 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10798 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10799 + src += PAX_USER_SHADOW_BASE;
10800 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10801 + dst += PAX_USER_SHADOW_BASE;
10802 +#endif
10803 +
10804 return copy_user_generic((__force void *)dst,
10805 - (__force void *)src, size);
10806 + (__force const void *)src, size);
10807 + }
10808 switch (size) {
10809 case 1: {
10810 u8 tmp;
10811 - __get_user_asm(tmp, (u8 __user *)src,
10812 + __get_user_asm(tmp, (const u8 __user *)src,
10813 ret, "b", "b", "=q", 1);
10814 if (likely(!ret))
10815 __put_user_asm(tmp, (u8 __user *)dst,
10816 @@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, con
10817 }
10818 case 2: {
10819 u16 tmp;
10820 - __get_user_asm(tmp, (u16 __user *)src,
10821 + __get_user_asm(tmp, (const u16 __user *)src,
10822 ret, "w", "w", "=r", 2);
10823 if (likely(!ret))
10824 __put_user_asm(tmp, (u16 __user *)dst,
10825 @@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, con
10826
10827 case 4: {
10828 u32 tmp;
10829 - __get_user_asm(tmp, (u32 __user *)src,
10830 + __get_user_asm(tmp, (const u32 __user *)src,
10831 ret, "l", "k", "=r", 4);
10832 if (likely(!ret))
10833 __put_user_asm(tmp, (u32 __user *)dst,
10834 @@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, con
10835 }
10836 case 8: {
10837 u64 tmp;
10838 - __get_user_asm(tmp, (u64 __user *)src,
10839 + __get_user_asm(tmp, (const u64 __user *)src,
10840 ret, "q", "", "=r", 8);
10841 if (likely(!ret))
10842 __put_user_asm(tmp, (u64 __user *)dst,
10843 @@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, con
10844 return ret;
10845 }
10846 default:
10847 +
10848 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10849 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10850 + src += PAX_USER_SHADOW_BASE;
10851 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10852 + dst += PAX_USER_SHADOW_BASE;
10853 +#endif
10854 +
10855 return copy_user_generic((__force void *)dst,
10856 - (__force void *)src, size);
10857 + (__force const void *)src, size);
10858 }
10859 }
10860
10861 @@ -176,33 +277,75 @@ __must_check long strlen_user(const char
10862 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
10863 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
10864
10865 -__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
10866 - unsigned size);
10867 +static __must_check __always_inline unsigned long
10868 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
10869 +{
10870 + pax_track_stack();
10871 +
10872 + if ((int)size < 0)
10873 + return size;
10874
10875 -static __must_check __always_inline int
10876 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10877 + if (!__access_ok(VERIFY_READ, src, size))
10878 + return size;
10879 +
10880 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10881 + src += PAX_USER_SHADOW_BASE;
10882 +#endif
10883 +
10884 + return copy_user_generic(dst, (__force const void *)src, size);
10885 +}
10886 +
10887 +static __must_check __always_inline unsigned long
10888 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
10889 {
10890 + if ((int)size < 0)
10891 + return size;
10892 +
10893 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10894 + if (!__access_ok(VERIFY_WRITE, dst, size))
10895 + return size;
10896 +
10897 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10898 + dst += PAX_USER_SHADOW_BASE;
10899 +#endif
10900 +
10901 return copy_user_generic((__force void *)dst, src, size);
10902 }
10903
10904 -extern long __copy_user_nocache(void *dst, const void __user *src,
10905 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
10906 unsigned size, int zerorest);
10907
10908 -static inline int
10909 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10910 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10911 {
10912 might_sleep();
10913 +
10914 + if ((int)size < 0)
10915 + return size;
10916 +
10917 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10918 + if (!__access_ok(VERIFY_READ, src, size))
10919 + return size;
10920 +#endif
10921 +
10922 return __copy_user_nocache(dst, src, size, 1);
10923 }
10924
10925 -static inline int
10926 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10927 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10928 unsigned size)
10929 {
10930 + if ((int)size < 0)
10931 + return size;
10932 +
10933 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10934 + if (!__access_ok(VERIFY_READ, src, size))
10935 + return size;
10936 +#endif
10937 +
10938 return __copy_user_nocache(dst, src, size, 0);
10939 }
10940
10941 -unsigned long
10942 +extern unsigned long
10943 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
10944
10945 #endif /* _ASM_X86_UACCESS_64_H */
10946 diff -urNp linux-2.6.32.42/arch/x86/include/asm/uaccess.h linux-2.6.32.42/arch/x86/include/asm/uaccess.h
10947 --- linux-2.6.32.42/arch/x86/include/asm/uaccess.h 2011-06-25 12:55:34.000000000 -0400
10948 +++ linux-2.6.32.42/arch/x86/include/asm/uaccess.h 2011-06-25 12:56:37.000000000 -0400
10949 @@ -8,12 +8,15 @@
10950 #include <linux/thread_info.h>
10951 #include <linux/prefetch.h>
10952 #include <linux/string.h>
10953 +#include <linux/sched.h>
10954 #include <asm/asm.h>
10955 #include <asm/page.h>
10956
10957 #define VERIFY_READ 0
10958 #define VERIFY_WRITE 1
10959
10960 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
10961 +
10962 /*
10963 * The fs value determines whether argument validity checking should be
10964 * performed or not. If get_fs() == USER_DS, checking is performed, with
10965 @@ -29,7 +32,12 @@
10966
10967 #define get_ds() (KERNEL_DS)
10968 #define get_fs() (current_thread_info()->addr_limit)
10969 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10970 +void __set_fs(mm_segment_t x);
10971 +void set_fs(mm_segment_t x);
10972 +#else
10973 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10974 +#endif
10975
10976 #define segment_eq(a, b) ((a).seg == (b).seg)
10977
10978 @@ -77,7 +85,33 @@
10979 * checks that the pointer is in the user space range - after calling
10980 * this function, memory access functions may still return -EFAULT.
10981 */
10982 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10983 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10984 +#define access_ok(type, addr, size) \
10985 +({ \
10986 + long __size = size; \
10987 + unsigned long __addr = (unsigned long)addr; \
10988 + unsigned long __addr_ao = __addr & PAGE_MASK; \
10989 + unsigned long __end_ao = __addr + __size - 1; \
10990 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10991 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10992 + while(__addr_ao <= __end_ao) { \
10993 + char __c_ao; \
10994 + __addr_ao += PAGE_SIZE; \
10995 + if (__size > PAGE_SIZE) \
10996 + cond_resched(); \
10997 + if (__get_user(__c_ao, (char __user *)__addr)) \
10998 + break; \
10999 + if (type != VERIFY_WRITE) { \
11000 + __addr = __addr_ao; \
11001 + continue; \
11002 + } \
11003 + if (__put_user(__c_ao, (char __user *)__addr)) \
11004 + break; \
11005 + __addr = __addr_ao; \
11006 + } \
11007 + } \
11008 + __ret_ao; \
11009 +})
11010
11011 /*
11012 * The exception table consists of pairs of addresses: the first is the
11013 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
11014 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
11015 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
11016
11017 -
11018 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11019 +#define __copyuser_seg "gs;"
11020 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
11021 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
11022 +#else
11023 +#define __copyuser_seg
11024 +#define __COPYUSER_SET_ES
11025 +#define __COPYUSER_RESTORE_ES
11026 +#endif
11027
11028 #ifdef CONFIG_X86_32
11029 #define __put_user_asm_u64(x, addr, err, errret) \
11030 - asm volatile("1: movl %%eax,0(%2)\n" \
11031 - "2: movl %%edx,4(%2)\n" \
11032 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
11033 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
11034 "3:\n" \
11035 ".section .fixup,\"ax\"\n" \
11036 "4: movl %3,%0\n" \
11037 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
11038 : "A" (x), "r" (addr), "i" (errret), "0" (err))
11039
11040 #define __put_user_asm_ex_u64(x, addr) \
11041 - asm volatile("1: movl %%eax,0(%1)\n" \
11042 - "2: movl %%edx,4(%1)\n" \
11043 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
11044 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
11045 "3:\n" \
11046 _ASM_EXTABLE(1b, 2b - 1b) \
11047 _ASM_EXTABLE(2b, 3b - 2b) \
11048 @@ -374,7 +416,7 @@ do { \
11049 } while (0)
11050
11051 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11052 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
11053 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
11054 "2:\n" \
11055 ".section .fixup,\"ax\"\n" \
11056 "3: mov %3,%0\n" \
11057 @@ -382,7 +424,7 @@ do { \
11058 " jmp 2b\n" \
11059 ".previous\n" \
11060 _ASM_EXTABLE(1b, 3b) \
11061 - : "=r" (err), ltype(x) \
11062 + : "=r" (err), ltype (x) \
11063 : "m" (__m(addr)), "i" (errret), "0" (err))
11064
11065 #define __get_user_size_ex(x, ptr, size) \
11066 @@ -407,7 +449,7 @@ do { \
11067 } while (0)
11068
11069 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
11070 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
11071 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
11072 "2:\n" \
11073 _ASM_EXTABLE(1b, 2b - 1b) \
11074 : ltype(x) : "m" (__m(addr)))
11075 @@ -424,13 +466,24 @@ do { \
11076 int __gu_err; \
11077 unsigned long __gu_val; \
11078 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
11079 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
11080 + (x) = (__typeof__(*(ptr)))__gu_val; \
11081 __gu_err; \
11082 })
11083
11084 /* FIXME: this hack is definitely wrong -AK */
11085 struct __large_struct { unsigned long buf[100]; };
11086 -#define __m(x) (*(struct __large_struct __user *)(x))
11087 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11088 +#define ____m(x) \
11089 +({ \
11090 + unsigned long ____x = (unsigned long)(x); \
11091 + if (____x < PAX_USER_SHADOW_BASE) \
11092 + ____x += PAX_USER_SHADOW_BASE; \
11093 + (void __user *)____x; \
11094 +})
11095 +#else
11096 +#define ____m(x) (x)
11097 +#endif
11098 +#define __m(x) (*(struct __large_struct __user *)____m(x))
11099
11100 /*
11101 * Tell gcc we read from memory instead of writing: this is because
11102 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
11103 * aliasing issues.
11104 */
11105 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11106 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
11107 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
11108 "2:\n" \
11109 ".section .fixup,\"ax\"\n" \
11110 "3: mov %3,%0\n" \
11111 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
11112 ".previous\n" \
11113 _ASM_EXTABLE(1b, 3b) \
11114 : "=r"(err) \
11115 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
11116 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
11117
11118 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
11119 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
11120 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
11121 "2:\n" \
11122 _ASM_EXTABLE(1b, 2b - 1b) \
11123 : : ltype(x), "m" (__m(addr)))
11124 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
11125 * On error, the variable @x is set to zero.
11126 */
11127
11128 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11129 +#define __get_user(x, ptr) get_user((x), (ptr))
11130 +#else
11131 #define __get_user(x, ptr) \
11132 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
11133 +#endif
11134
11135 /**
11136 * __put_user: - Write a simple value into user space, with less checking.
11137 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
11138 * Returns zero on success, or -EFAULT on error.
11139 */
11140
11141 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11142 +#define __put_user(x, ptr) put_user((x), (ptr))
11143 +#else
11144 #define __put_user(x, ptr) \
11145 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
11146 +#endif
11147
11148 #define __get_user_unaligned __get_user
11149 #define __put_user_unaligned __put_user
11150 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
11151 #define get_user_ex(x, ptr) do { \
11152 unsigned long __gue_val; \
11153 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
11154 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
11155 + (x) = (__typeof__(*(ptr)))__gue_val; \
11156 } while (0)
11157
11158 #ifdef CONFIG_X86_WP_WORKS_OK
11159 @@ -567,6 +628,7 @@ extern struct movsl_mask {
11160
11161 #define ARCH_HAS_NOCACHE_UACCESS 1
11162
11163 +#define ARCH_HAS_SORT_EXTABLE
11164 #ifdef CONFIG_X86_32
11165 # include "uaccess_32.h"
11166 #else
11167 diff -urNp linux-2.6.32.42/arch/x86/include/asm/vgtod.h linux-2.6.32.42/arch/x86/include/asm/vgtod.h
11168 --- linux-2.6.32.42/arch/x86/include/asm/vgtod.h 2011-03-27 14:31:47.000000000 -0400
11169 +++ linux-2.6.32.42/arch/x86/include/asm/vgtod.h 2011-04-17 15:56:46.000000000 -0400
11170 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
11171 int sysctl_enabled;
11172 struct timezone sys_tz;
11173 struct { /* extract of a clocksource struct */
11174 + char name[8];
11175 cycle_t (*vread)(void);
11176 cycle_t cycle_last;
11177 cycle_t mask;
11178 diff -urNp linux-2.6.32.42/arch/x86/include/asm/vmi.h linux-2.6.32.42/arch/x86/include/asm/vmi.h
11179 --- linux-2.6.32.42/arch/x86/include/asm/vmi.h 2011-03-27 14:31:47.000000000 -0400
11180 +++ linux-2.6.32.42/arch/x86/include/asm/vmi.h 2011-04-17 15:56:46.000000000 -0400
11181 @@ -191,6 +191,7 @@ struct vrom_header {
11182 u8 reserved[96]; /* Reserved for headers */
11183 char vmi_init[8]; /* VMI_Init jump point */
11184 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
11185 + char rom_data[8048]; /* rest of the option ROM */
11186 } __attribute__((packed));
11187
11188 struct pnp_header {
11189 diff -urNp linux-2.6.32.42/arch/x86/include/asm/vsyscall.h linux-2.6.32.42/arch/x86/include/asm/vsyscall.h
11190 --- linux-2.6.32.42/arch/x86/include/asm/vsyscall.h 2011-03-27 14:31:47.000000000 -0400
11191 +++ linux-2.6.32.42/arch/x86/include/asm/vsyscall.h 2011-04-17 15:56:46.000000000 -0400
11192 @@ -15,9 +15,10 @@ enum vsyscall_num {
11193
11194 #ifdef __KERNEL__
11195 #include <linux/seqlock.h>
11196 +#include <linux/getcpu.h>
11197 +#include <linux/time.h>
11198
11199 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
11200 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
11201
11202 /* Definitions for CONFIG_GENERIC_TIME definitions */
11203 #define __section_vsyscall_gtod_data __attribute__ \
11204 @@ -31,7 +32,6 @@ enum vsyscall_num {
11205 #define VGETCPU_LSL 2
11206
11207 extern int __vgetcpu_mode;
11208 -extern volatile unsigned long __jiffies;
11209
11210 /* kernel space (writeable) */
11211 extern int vgetcpu_mode;
11212 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
11213
11214 extern void map_vsyscall(void);
11215
11216 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
11217 +extern time_t vtime(time_t *t);
11218 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
11219 #endif /* __KERNEL__ */
11220
11221 #endif /* _ASM_X86_VSYSCALL_H */
11222 diff -urNp linux-2.6.32.42/arch/x86/include/asm/xsave.h linux-2.6.32.42/arch/x86/include/asm/xsave.h
11223 --- linux-2.6.32.42/arch/x86/include/asm/xsave.h 2011-03-27 14:31:47.000000000 -0400
11224 +++ linux-2.6.32.42/arch/x86/include/asm/xsave.h 2011-04-17 15:56:46.000000000 -0400
11225 @@ -56,6 +56,12 @@ static inline int xrstor_checking(struct
11226 static inline int xsave_user(struct xsave_struct __user *buf)
11227 {
11228 int err;
11229 +
11230 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11231 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11232 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11233 +#endif
11234 +
11235 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
11236 "2:\n"
11237 ".section .fixup,\"ax\"\n"
11238 @@ -82,6 +88,11 @@ static inline int xrestore_user(struct x
11239 u32 lmask = mask;
11240 u32 hmask = mask >> 32;
11241
11242 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11243 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11244 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11245 +#endif
11246 +
11247 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11248 "2:\n"
11249 ".section .fixup,\"ax\"\n"
11250 diff -urNp linux-2.6.32.42/arch/x86/Kconfig linux-2.6.32.42/arch/x86/Kconfig
11251 --- linux-2.6.32.42/arch/x86/Kconfig 2011-03-27 14:31:47.000000000 -0400
11252 +++ linux-2.6.32.42/arch/x86/Kconfig 2011-04-17 15:56:46.000000000 -0400
11253 @@ -223,7 +223,7 @@ config X86_TRAMPOLINE
11254
11255 config X86_32_LAZY_GS
11256 def_bool y
11257 - depends on X86_32 && !CC_STACKPROTECTOR
11258 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11259
11260 config KTIME_SCALAR
11261 def_bool X86_32
11262 @@ -1008,7 +1008,7 @@ choice
11263
11264 config NOHIGHMEM
11265 bool "off"
11266 - depends on !X86_NUMAQ
11267 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11268 ---help---
11269 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11270 However, the address space of 32-bit x86 processors is only 4
11271 @@ -1045,7 +1045,7 @@ config NOHIGHMEM
11272
11273 config HIGHMEM4G
11274 bool "4GB"
11275 - depends on !X86_NUMAQ
11276 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11277 ---help---
11278 Select this if you have a 32-bit processor and between 1 and 4
11279 gigabytes of physical RAM.
11280 @@ -1099,7 +1099,7 @@ config PAGE_OFFSET
11281 hex
11282 default 0xB0000000 if VMSPLIT_3G_OPT
11283 default 0x80000000 if VMSPLIT_2G
11284 - default 0x78000000 if VMSPLIT_2G_OPT
11285 + default 0x70000000 if VMSPLIT_2G_OPT
11286 default 0x40000000 if VMSPLIT_1G
11287 default 0xC0000000
11288 depends on X86_32
11289 @@ -1430,7 +1430,7 @@ config ARCH_USES_PG_UNCACHED
11290
11291 config EFI
11292 bool "EFI runtime service support"
11293 - depends on ACPI
11294 + depends on ACPI && !PAX_KERNEXEC
11295 ---help---
11296 This enables the kernel to use EFI runtime services that are
11297 available (such as the EFI variable services).
11298 @@ -1460,6 +1460,7 @@ config SECCOMP
11299
11300 config CC_STACKPROTECTOR
11301 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
11302 + depends on X86_64 || !PAX_MEMORY_UDEREF
11303 ---help---
11304 This option turns on the -fstack-protector GCC feature. This
11305 feature puts, at the beginning of functions, a canary value on
11306 @@ -1517,6 +1518,7 @@ config KEXEC_JUMP
11307 config PHYSICAL_START
11308 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
11309 default "0x1000000"
11310 + range 0x400000 0x40000000
11311 ---help---
11312 This gives the physical address where the kernel is loaded.
11313
11314 @@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
11315 hex
11316 prompt "Alignment value to which kernel should be aligned" if X86_32
11317 default "0x1000000"
11318 + range 0x400000 0x1000000 if PAX_KERNEXEC
11319 range 0x2000 0x1000000
11320 ---help---
11321 This value puts the alignment restrictions on physical address
11322 @@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
11323 Say N if you want to disable CPU hotplug.
11324
11325 config COMPAT_VDSO
11326 - def_bool y
11327 + def_bool n
11328 prompt "Compat VDSO support"
11329 depends on X86_32 || IA32_EMULATION
11330 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
11331 ---help---
11332 Map the 32-bit VDSO to the predictable old-style address too.
11333 ---help---
11334 diff -urNp linux-2.6.32.42/arch/x86/Kconfig.cpu linux-2.6.32.42/arch/x86/Kconfig.cpu
11335 --- linux-2.6.32.42/arch/x86/Kconfig.cpu 2011-03-27 14:31:47.000000000 -0400
11336 +++ linux-2.6.32.42/arch/x86/Kconfig.cpu 2011-04-17 15:56:46.000000000 -0400
11337 @@ -340,7 +340,7 @@ config X86_PPRO_FENCE
11338
11339 config X86_F00F_BUG
11340 def_bool y
11341 - depends on M586MMX || M586TSC || M586 || M486 || M386
11342 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
11343
11344 config X86_WP_WORKS_OK
11345 def_bool y
11346 @@ -360,7 +360,7 @@ config X86_POPAD_OK
11347
11348 config X86_ALIGNMENT_16
11349 def_bool y
11350 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11351 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11352
11353 config X86_INTEL_USERCOPY
11354 def_bool y
11355 @@ -406,7 +406,7 @@ config X86_CMPXCHG64
11356 # generates cmov.
11357 config X86_CMOV
11358 def_bool y
11359 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11360 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11361
11362 config X86_MINIMUM_CPU_FAMILY
11363 int
11364 diff -urNp linux-2.6.32.42/arch/x86/Kconfig.debug linux-2.6.32.42/arch/x86/Kconfig.debug
11365 --- linux-2.6.32.42/arch/x86/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
11366 +++ linux-2.6.32.42/arch/x86/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
11367 @@ -99,7 +99,7 @@ config X86_PTDUMP
11368 config DEBUG_RODATA
11369 bool "Write protect kernel read-only data structures"
11370 default y
11371 - depends on DEBUG_KERNEL
11372 + depends on DEBUG_KERNEL && BROKEN
11373 ---help---
11374 Mark the kernel read-only data as write-protected in the pagetables,
11375 in order to catch accidental (and incorrect) writes to such const
11376 diff -urNp linux-2.6.32.42/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.32.42/arch/x86/kernel/acpi/realmode/wakeup.S
11377 --- linux-2.6.32.42/arch/x86/kernel/acpi/realmode/wakeup.S 2011-03-27 14:31:47.000000000 -0400
11378 +++ linux-2.6.32.42/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-01 18:53:40.000000000 -0400
11379 @@ -91,6 +91,9 @@ _start:
11380 /* Do any other stuff... */
11381
11382 #ifndef CONFIG_64BIT
11383 + /* Recheck NX bit overrides (64bit path does this in trampoline) */
11384 + call verify_cpu
11385 +
11386 /* This could also be done in C code... */
11387 movl pmode_cr3, %eax
11388 movl %eax, %cr3
11389 @@ -104,7 +107,7 @@ _start:
11390 movl %eax, %ecx
11391 orl %edx, %ecx
11392 jz 1f
11393 - movl $0xc0000080, %ecx
11394 + mov $MSR_EFER, %ecx
11395 wrmsr
11396 1:
11397
11398 @@ -114,6 +117,7 @@ _start:
11399 movl pmode_cr0, %eax
11400 movl %eax, %cr0
11401 jmp pmode_return
11402 +# include "../../verify_cpu.S"
11403 #else
11404 pushw $0
11405 pushw trampoline_segment
11406 diff -urNp linux-2.6.32.42/arch/x86/kernel/acpi/sleep.c linux-2.6.32.42/arch/x86/kernel/acpi/sleep.c
11407 --- linux-2.6.32.42/arch/x86/kernel/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
11408 +++ linux-2.6.32.42/arch/x86/kernel/acpi/sleep.c 2011-07-01 19:01:34.000000000 -0400
11409 @@ -11,11 +11,12 @@
11410 #include <linux/cpumask.h>
11411 #include <asm/segment.h>
11412 #include <asm/desc.h>
11413 +#include <asm/e820.h>
11414
11415 #include "realmode/wakeup.h"
11416 #include "sleep.h"
11417
11418 -unsigned long acpi_wakeup_address;
11419 +unsigned long acpi_wakeup_address = 0x2000;
11420 unsigned long acpi_realmode_flags;
11421
11422 /* address in low memory of the wakeup routine. */
11423 @@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
11424 #else /* CONFIG_64BIT */
11425 header->trampoline_segment = setup_trampoline() >> 4;
11426 #ifdef CONFIG_SMP
11427 - stack_start.sp = temp_stack + sizeof(temp_stack);
11428 + stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11429 +
11430 + pax_open_kernel();
11431 early_gdt_descr.address =
11432 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11433 + pax_close_kernel();
11434 +
11435 initial_gs = per_cpu_offset(smp_processor_id());
11436 #endif
11437 initial_code = (unsigned long)wakeup_long64;
11438 @@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
11439 return;
11440 }
11441
11442 - acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
11443 -
11444 - if (!acpi_realmode) {
11445 - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
11446 - return;
11447 - }
11448 -
11449 - acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
11450 + reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
11451 + acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
11452 }
11453
11454
11455 diff -urNp linux-2.6.32.42/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.32.42/arch/x86/kernel/acpi/wakeup_32.S
11456 --- linux-2.6.32.42/arch/x86/kernel/acpi/wakeup_32.S 2011-03-27 14:31:47.000000000 -0400
11457 +++ linux-2.6.32.42/arch/x86/kernel/acpi/wakeup_32.S 2011-04-17 15:56:46.000000000 -0400
11458 @@ -30,13 +30,11 @@ wakeup_pmode_return:
11459 # and restore the stack ... but you need gdt for this to work
11460 movl saved_context_esp, %esp
11461
11462 - movl %cs:saved_magic, %eax
11463 - cmpl $0x12345678, %eax
11464 + cmpl $0x12345678, saved_magic
11465 jne bogus_magic
11466
11467 # jump to place where we left off
11468 - movl saved_eip, %eax
11469 - jmp *%eax
11470 + jmp *(saved_eip)
11471
11472 bogus_magic:
11473 jmp bogus_magic
11474 diff -urNp linux-2.6.32.42/arch/x86/kernel/alternative.c linux-2.6.32.42/arch/x86/kernel/alternative.c
11475 --- linux-2.6.32.42/arch/x86/kernel/alternative.c 2011-03-27 14:31:47.000000000 -0400
11476 +++ linux-2.6.32.42/arch/x86/kernel/alternative.c 2011-04-17 15:56:46.000000000 -0400
11477 @@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(str
11478
11479 BUG_ON(p->len > MAX_PATCH_LEN);
11480 /* prep the buffer with the original instructions */
11481 - memcpy(insnbuf, p->instr, p->len);
11482 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11483 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11484 (unsigned long)p->instr, p->len);
11485
11486 @@ -475,7 +475,7 @@ void __init alternative_instructions(voi
11487 if (smp_alt_once)
11488 free_init_pages("SMP alternatives",
11489 (unsigned long)__smp_locks,
11490 - (unsigned long)__smp_locks_end);
11491 + PAGE_ALIGN((unsigned long)__smp_locks_end));
11492
11493 restart_nmi();
11494 }
11495 @@ -492,13 +492,17 @@ void __init alternative_instructions(voi
11496 * instructions. And on the local CPU you need to be protected again NMI or MCE
11497 * handlers seeing an inconsistent instruction while you patch.
11498 */
11499 -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
11500 +static void *__kprobes text_poke_early(void *addr, const void *opcode,
11501 size_t len)
11502 {
11503 unsigned long flags;
11504 local_irq_save(flags);
11505 - memcpy(addr, opcode, len);
11506 +
11507 + pax_open_kernel();
11508 + memcpy(ktla_ktva(addr), opcode, len);
11509 sync_core();
11510 + pax_close_kernel();
11511 +
11512 local_irq_restore(flags);
11513 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11514 that causes hangs on some VIA CPUs. */
11515 @@ -520,35 +524,21 @@ static void *__init_or_module text_poke_
11516 */
11517 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11518 {
11519 - unsigned long flags;
11520 - char *vaddr;
11521 + unsigned char *vaddr = ktla_ktva(addr);
11522 struct page *pages[2];
11523 - int i;
11524 + size_t i;
11525
11526 if (!core_kernel_text((unsigned long)addr)) {
11527 - pages[0] = vmalloc_to_page(addr);
11528 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11529 + pages[0] = vmalloc_to_page(vaddr);
11530 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11531 } else {
11532 - pages[0] = virt_to_page(addr);
11533 + pages[0] = virt_to_page(vaddr);
11534 WARN_ON(!PageReserved(pages[0]));
11535 - pages[1] = virt_to_page(addr + PAGE_SIZE);
11536 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11537 }
11538 BUG_ON(!pages[0]);
11539 - local_irq_save(flags);
11540 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11541 - if (pages[1])
11542 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11543 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11544 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11545 - clear_fixmap(FIX_TEXT_POKE0);
11546 - if (pages[1])
11547 - clear_fixmap(FIX_TEXT_POKE1);
11548 - local_flush_tlb();
11549 - sync_core();
11550 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
11551 - that causes hangs on some VIA CPUs. */
11552 + text_poke_early(addr, opcode, len);
11553 for (i = 0; i < len; i++)
11554 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11555 - local_irq_restore(flags);
11556 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11557 return addr;
11558 }
11559 diff -urNp linux-2.6.32.42/arch/x86/kernel/amd_iommu.c linux-2.6.32.42/arch/x86/kernel/amd_iommu.c
11560 --- linux-2.6.32.42/arch/x86/kernel/amd_iommu.c 2011-03-27 14:31:47.000000000 -0400
11561 +++ linux-2.6.32.42/arch/x86/kernel/amd_iommu.c 2011-04-17 15:56:46.000000000 -0400
11562 @@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(
11563 }
11564 }
11565
11566 -static struct dma_map_ops amd_iommu_dma_ops = {
11567 +static const struct dma_map_ops amd_iommu_dma_ops = {
11568 .alloc_coherent = alloc_coherent,
11569 .free_coherent = free_coherent,
11570 .map_page = map_page,
11571 diff -urNp linux-2.6.32.42/arch/x86/kernel/apic/apic.c linux-2.6.32.42/arch/x86/kernel/apic/apic.c
11572 --- linux-2.6.32.42/arch/x86/kernel/apic/apic.c 2011-03-27 14:31:47.000000000 -0400
11573 +++ linux-2.6.32.42/arch/x86/kernel/apic/apic.c 2011-05-16 21:46:57.000000000 -0400
11574 @@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs
11575 apic_write(APIC_ESR, 0);
11576 v1 = apic_read(APIC_ESR);
11577 ack_APIC_irq();
11578 - atomic_inc(&irq_err_count);
11579 + atomic_inc_unchecked(&irq_err_count);
11580
11581 /*
11582 * Here is what the APIC error bits mean:
11583 @@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(vo
11584 u16 *bios_cpu_apicid;
11585 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11586
11587 + pax_track_stack();
11588 +
11589 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11590 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11591
11592 diff -urNp linux-2.6.32.42/arch/x86/kernel/apic/io_apic.c linux-2.6.32.42/arch/x86/kernel/apic/io_apic.c
11593 --- linux-2.6.32.42/arch/x86/kernel/apic/io_apic.c 2011-03-27 14:31:47.000000000 -0400
11594 +++ linux-2.6.32.42/arch/x86/kernel/apic/io_apic.c 2011-05-04 17:56:20.000000000 -0400
11595 @@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapi
11596 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
11597 GFP_ATOMIC);
11598 if (!ioapic_entries)
11599 - return 0;
11600 + return NULL;
11601
11602 for (apic = 0; apic < nr_ioapics; apic++) {
11603 ioapic_entries[apic] =
11604 @@ -733,7 +733,7 @@ nomem:
11605 kfree(ioapic_entries[apic]);
11606 kfree(ioapic_entries);
11607
11608 - return 0;
11609 + return NULL;
11610 }
11611
11612 /*
11613 @@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
11614 }
11615 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11616
11617 -void lock_vector_lock(void)
11618 +void lock_vector_lock(void) __acquires(vector_lock)
11619 {
11620 /* Used to the online set of cpus does not change
11621 * during assign_irq_vector.
11622 @@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
11623 spin_lock(&vector_lock);
11624 }
11625
11626 -void unlock_vector_lock(void)
11627 +void unlock_vector_lock(void) __releases(vector_lock)
11628 {
11629 spin_unlock(&vector_lock);
11630 }
11631 @@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int i
11632 ack_APIC_irq();
11633 }
11634
11635 -atomic_t irq_mis_count;
11636 +atomic_unchecked_t irq_mis_count;
11637
11638 static void ack_apic_level(unsigned int irq)
11639 {
11640 @@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int
11641
11642 /* Tail end of version 0x11 I/O APIC bug workaround */
11643 if (!(v & (1 << (i & 0x1f)))) {
11644 - atomic_inc(&irq_mis_count);
11645 + atomic_inc_unchecked(&irq_mis_count);
11646 spin_lock(&ioapic_lock);
11647 __mask_and_edge_IO_APIC_irq(cfg);
11648 __unmask_and_level_IO_APIC_irq(cfg);
11649 diff -urNp linux-2.6.32.42/arch/x86/kernel/apm_32.c linux-2.6.32.42/arch/x86/kernel/apm_32.c
11650 --- linux-2.6.32.42/arch/x86/kernel/apm_32.c 2011-03-27 14:31:47.000000000 -0400
11651 +++ linux-2.6.32.42/arch/x86/kernel/apm_32.c 2011-04-23 12:56:10.000000000 -0400
11652 @@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
11653 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11654 * even though they are called in protected mode.
11655 */
11656 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11657 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11658 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11659
11660 static const char driver_version[] = "1.16ac"; /* no spaces */
11661 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
11662 BUG_ON(cpu != 0);
11663 gdt = get_cpu_gdt_table(cpu);
11664 save_desc_40 = gdt[0x40 / 8];
11665 +
11666 + pax_open_kernel();
11667 gdt[0x40 / 8] = bad_bios_desc;
11668 + pax_close_kernel();
11669
11670 apm_irq_save(flags);
11671 APM_DO_SAVE_SEGS;
11672 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
11673 &call->esi);
11674 APM_DO_RESTORE_SEGS;
11675 apm_irq_restore(flags);
11676 +
11677 + pax_open_kernel();
11678 gdt[0x40 / 8] = save_desc_40;
11679 + pax_close_kernel();
11680 +
11681 put_cpu();
11682
11683 return call->eax & 0xff;
11684 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
11685 BUG_ON(cpu != 0);
11686 gdt = get_cpu_gdt_table(cpu);
11687 save_desc_40 = gdt[0x40 / 8];
11688 +
11689 + pax_open_kernel();
11690 gdt[0x40 / 8] = bad_bios_desc;
11691 + pax_close_kernel();
11692
11693 apm_irq_save(flags);
11694 APM_DO_SAVE_SEGS;
11695 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
11696 &call->eax);
11697 APM_DO_RESTORE_SEGS;
11698 apm_irq_restore(flags);
11699 +
11700 + pax_open_kernel();
11701 gdt[0x40 / 8] = save_desc_40;
11702 + pax_close_kernel();
11703 +
11704 put_cpu();
11705 return error;
11706 }
11707 @@ -975,7 +989,7 @@ recalc:
11708
11709 static void apm_power_off(void)
11710 {
11711 - unsigned char po_bios_call[] = {
11712 + const unsigned char po_bios_call[] = {
11713 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
11714 0x8e, 0xd0, /* movw ax,ss */
11715 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
11716 @@ -2357,12 +2371,15 @@ static int __init apm_init(void)
11717 * code to that CPU.
11718 */
11719 gdt = get_cpu_gdt_table(0);
11720 +
11721 + pax_open_kernel();
11722 set_desc_base(&gdt[APM_CS >> 3],
11723 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11724 set_desc_base(&gdt[APM_CS_16 >> 3],
11725 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11726 set_desc_base(&gdt[APM_DS >> 3],
11727 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11728 + pax_close_kernel();
11729
11730 proc_create("apm", 0, NULL, &apm_file_ops);
11731
11732 diff -urNp linux-2.6.32.42/arch/x86/kernel/asm-offsets_32.c linux-2.6.32.42/arch/x86/kernel/asm-offsets_32.c
11733 --- linux-2.6.32.42/arch/x86/kernel/asm-offsets_32.c 2011-03-27 14:31:47.000000000 -0400
11734 +++ linux-2.6.32.42/arch/x86/kernel/asm-offsets_32.c 2011-05-16 21:46:57.000000000 -0400
11735 @@ -51,7 +51,6 @@ void foo(void)
11736 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
11737 BLANK();
11738
11739 - OFFSET(TI_task, thread_info, task);
11740 OFFSET(TI_exec_domain, thread_info, exec_domain);
11741 OFFSET(TI_flags, thread_info, flags);
11742 OFFSET(TI_status, thread_info, status);
11743 @@ -60,6 +59,8 @@ void foo(void)
11744 OFFSET(TI_restart_block, thread_info, restart_block);
11745 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
11746 OFFSET(TI_cpu, thread_info, cpu);
11747 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11748 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11749 BLANK();
11750
11751 OFFSET(GDS_size, desc_ptr, size);
11752 @@ -99,6 +100,7 @@ void foo(void)
11753
11754 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11755 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11756 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11757 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
11758 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
11759 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
11760 @@ -115,6 +117,11 @@ void foo(void)
11761 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
11762 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11763 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11764 +
11765 +#ifdef CONFIG_PAX_KERNEXEC
11766 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11767 +#endif
11768 +
11769 #endif
11770
11771 #ifdef CONFIG_XEN
11772 diff -urNp linux-2.6.32.42/arch/x86/kernel/asm-offsets_64.c linux-2.6.32.42/arch/x86/kernel/asm-offsets_64.c
11773 --- linux-2.6.32.42/arch/x86/kernel/asm-offsets_64.c 2011-03-27 14:31:47.000000000 -0400
11774 +++ linux-2.6.32.42/arch/x86/kernel/asm-offsets_64.c 2011-05-16 21:46:57.000000000 -0400
11775 @@ -44,6 +44,8 @@ int main(void)
11776 ENTRY(addr_limit);
11777 ENTRY(preempt_count);
11778 ENTRY(status);
11779 + ENTRY(lowest_stack);
11780 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11781 #ifdef CONFIG_IA32_EMULATION
11782 ENTRY(sysenter_return);
11783 #endif
11784 @@ -63,6 +65,18 @@ int main(void)
11785 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11786 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
11787 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11788 +
11789 +#ifdef CONFIG_PAX_KERNEXEC
11790 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11791 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11792 +#endif
11793 +
11794 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11795 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11796 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11797 + OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
11798 +#endif
11799 +
11800 #endif
11801
11802
11803 @@ -115,6 +129,7 @@ int main(void)
11804 ENTRY(cr8);
11805 BLANK();
11806 #undef ENTRY
11807 + DEFINE(TSS_size, sizeof(struct tss_struct));
11808 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
11809 BLANK();
11810 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
11811 @@ -130,6 +145,7 @@ int main(void)
11812
11813 BLANK();
11814 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11815 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11816 #ifdef CONFIG_XEN
11817 BLANK();
11818 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
11819 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/amd.c linux-2.6.32.42/arch/x86/kernel/cpu/amd.c
11820 --- linux-2.6.32.42/arch/x86/kernel/cpu/amd.c 2011-06-25 12:55:34.000000000 -0400
11821 +++ linux-2.6.32.42/arch/x86/kernel/cpu/amd.c 2011-06-25 12:56:37.000000000 -0400
11822 @@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_c
11823 unsigned int size)
11824 {
11825 /* AMD errata T13 (order #21922) */
11826 - if ((c->x86 == 6)) {
11827 + if (c->x86 == 6) {
11828 /* Duron Rev A0 */
11829 if (c->x86_model == 3 && c->x86_mask == 0)
11830 size = 64;
11831 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/common.c linux-2.6.32.42/arch/x86/kernel/cpu/common.c
11832 --- linux-2.6.32.42/arch/x86/kernel/cpu/common.c 2011-03-27 14:31:47.000000000 -0400
11833 +++ linux-2.6.32.42/arch/x86/kernel/cpu/common.c 2011-05-11 18:25:15.000000000 -0400
11834 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
11835
11836 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11837
11838 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11839 -#ifdef CONFIG_X86_64
11840 - /*
11841 - * We need valid kernel segments for data and code in long mode too
11842 - * IRET will check the segment types kkeil 2000/10/28
11843 - * Also sysret mandates a special GDT layout
11844 - *
11845 - * TLS descriptors are currently at a different place compared to i386.
11846 - * Hopefully nobody expects them at a fixed place (Wine?)
11847 - */
11848 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11849 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11850 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11851 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11852 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11853 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
11854 -#else
11855 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11856 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11857 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11858 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
11859 - /*
11860 - * Segments used for calling PnP BIOS have byte granularity.
11861 - * They code segments and data segments have fixed 64k limits,
11862 - * the transfer segment sizes are set at run time.
11863 - */
11864 - /* 32-bit code */
11865 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11866 - /* 16-bit code */
11867 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11868 - /* 16-bit data */
11869 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
11870 - /* 16-bit data */
11871 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
11872 - /* 16-bit data */
11873 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
11874 - /*
11875 - * The APM segments have byte granularity and their bases
11876 - * are set at run time. All have 64k limits.
11877 - */
11878 - /* 32-bit code */
11879 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11880 - /* 16-bit code */
11881 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11882 - /* data */
11883 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
11884 -
11885 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11886 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11887 - GDT_STACK_CANARY_INIT
11888 -#endif
11889 -} };
11890 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
11891 -
11892 static int __init x86_xsave_setup(char *s)
11893 {
11894 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
11895 @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
11896 {
11897 struct desc_ptr gdt_descr;
11898
11899 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
11900 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
11901 gdt_descr.size = GDT_SIZE - 1;
11902 load_gdt(&gdt_descr);
11903 /* Reload the per-cpu base */
11904 @@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struc
11905 /* Filter out anything that depends on CPUID levels we don't have */
11906 filter_cpuid_features(c, true);
11907
11908 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
11909 + setup_clear_cpu_cap(X86_FEATURE_SEP);
11910 +#endif
11911 +
11912 /* If the model name is still unset, do table lookup. */
11913 if (!c->x86_model_id[0]) {
11914 const char *p;
11915 @@ -980,6 +930,9 @@ static __init int setup_disablecpuid(cha
11916 }
11917 __setup("clearcpuid=", setup_disablecpuid);
11918
11919 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
11920 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
11921 +
11922 #ifdef CONFIG_X86_64
11923 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
11924
11925 @@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
11926 EXPORT_PER_CPU_SYMBOL(current_task);
11927
11928 DEFINE_PER_CPU(unsigned long, kernel_stack) =
11929 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
11930 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
11931 EXPORT_PER_CPU_SYMBOL(kernel_stack);
11932
11933 DEFINE_PER_CPU(char *, irq_stack_ptr) =
11934 @@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(str
11935 {
11936 memset(regs, 0, sizeof(struct pt_regs));
11937 regs->fs = __KERNEL_PERCPU;
11938 - regs->gs = __KERNEL_STACK_CANARY;
11939 + savesegment(gs, regs->gs);
11940
11941 return regs;
11942 }
11943 @@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
11944 int i;
11945
11946 cpu = stack_smp_processor_id();
11947 - t = &per_cpu(init_tss, cpu);
11948 + t = init_tss + cpu;
11949 orig_ist = &per_cpu(orig_ist, cpu);
11950
11951 #ifdef CONFIG_NUMA
11952 @@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
11953 switch_to_new_gdt(cpu);
11954 loadsegment(fs, 0);
11955
11956 - load_idt((const struct desc_ptr *)&idt_descr);
11957 + load_idt(&idt_descr);
11958
11959 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
11960 syscall_init();
11961 @@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
11962 wrmsrl(MSR_KERNEL_GS_BASE, 0);
11963 barrier();
11964
11965 - check_efer();
11966 if (cpu != 0)
11967 enable_x2apic();
11968
11969 @@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
11970 {
11971 int cpu = smp_processor_id();
11972 struct task_struct *curr = current;
11973 - struct tss_struct *t = &per_cpu(init_tss, cpu);
11974 + struct tss_struct *t = init_tss + cpu;
11975 struct thread_struct *thread = &curr->thread;
11976
11977 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
11978 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/intel.c linux-2.6.32.42/arch/x86/kernel/cpu/intel.c
11979 --- linux-2.6.32.42/arch/x86/kernel/cpu/intel.c 2011-03-27 14:31:47.000000000 -0400
11980 +++ linux-2.6.32.42/arch/x86/kernel/cpu/intel.c 2011-04-17 15:56:46.000000000 -0400
11981 @@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug
11982 * Update the IDT descriptor and reload the IDT so that
11983 * it uses the read-only mapped virtual address.
11984 */
11985 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
11986 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
11987 load_idt(&idt_descr);
11988 }
11989 #endif
11990 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/intel_cacheinfo.c linux-2.6.32.42/arch/x86/kernel/cpu/intel_cacheinfo.c
11991 --- linux-2.6.32.42/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
11992 +++ linux-2.6.32.42/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-04-17 15:56:46.000000000 -0400
11993 @@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kob
11994 return ret;
11995 }
11996
11997 -static struct sysfs_ops sysfs_ops = {
11998 +static const struct sysfs_ops sysfs_ops = {
11999 .show = show,
12000 .store = store,
12001 };
12002 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/Makefile linux-2.6.32.42/arch/x86/kernel/cpu/Makefile
12003 --- linux-2.6.32.42/arch/x86/kernel/cpu/Makefile 2011-03-27 14:31:47.000000000 -0400
12004 +++ linux-2.6.32.42/arch/x86/kernel/cpu/Makefile 2011-04-17 15:56:46.000000000 -0400
12005 @@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
12006 CFLAGS_REMOVE_common.o = -pg
12007 endif
12008
12009 -# Make sure load_percpu_segment has no stackprotector
12010 -nostackp := $(call cc-option, -fno-stack-protector)
12011 -CFLAGS_common.o := $(nostackp)
12012 -
12013 obj-y := intel_cacheinfo.o addon_cpuid_features.o
12014 obj-y += proc.o capflags.o powerflags.o common.o
12015 obj-y += vmware.o hypervisor.o sched.o
12016 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce_amd.c linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce_amd.c
12017 --- linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:56:59.000000000 -0400
12018 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:57:13.000000000 -0400
12019 @@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kob
12020 return ret;
12021 }
12022
12023 -static struct sysfs_ops threshold_ops = {
12024 +static const struct sysfs_ops threshold_ops = {
12025 .show = show,
12026 .store = store,
12027 };
12028 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce.c
12029 --- linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce.c 2011-03-27 14:31:47.000000000 -0400
12030 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-04 17:56:20.000000000 -0400
12031 @@ -43,6 +43,7 @@
12032 #include <asm/ipi.h>
12033 #include <asm/mce.h>
12034 #include <asm/msr.h>
12035 +#include <asm/local.h>
12036
12037 #include "mce-internal.h"
12038
12039 @@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
12040 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12041 m->cs, m->ip);
12042
12043 - if (m->cs == __KERNEL_CS)
12044 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12045 print_symbol("{%s}", m->ip);
12046 pr_cont("\n");
12047 }
12048 @@ -221,10 +222,10 @@ static void print_mce_tail(void)
12049
12050 #define PANIC_TIMEOUT 5 /* 5 seconds */
12051
12052 -static atomic_t mce_paniced;
12053 +static atomic_unchecked_t mce_paniced;
12054
12055 static int fake_panic;
12056 -static atomic_t mce_fake_paniced;
12057 +static atomic_unchecked_t mce_fake_paniced;
12058
12059 /* Panic in progress. Enable interrupts and wait for final IPI */
12060 static void wait_for_panic(void)
12061 @@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct
12062 /*
12063 * Make sure only one CPU runs in machine check panic
12064 */
12065 - if (atomic_inc_return(&mce_paniced) > 1)
12066 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12067 wait_for_panic();
12068 barrier();
12069
12070 @@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct
12071 console_verbose();
12072 } else {
12073 /* Don't log too much for fake panic */
12074 - if (atomic_inc_return(&mce_fake_paniced) > 1)
12075 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12076 return;
12077 }
12078 print_mce_head();
12079 @@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
12080 * might have been modified by someone else.
12081 */
12082 rmb();
12083 - if (atomic_read(&mce_paniced))
12084 + if (atomic_read_unchecked(&mce_paniced))
12085 wait_for_panic();
12086 if (!monarch_timeout)
12087 goto out;
12088 @@ -1429,14 +1430,14 @@ void __cpuinit mcheck_init(struct cpuinf
12089 */
12090
12091 static DEFINE_SPINLOCK(mce_state_lock);
12092 -static int open_count; /* #times opened */
12093 +static local_t open_count; /* #times opened */
12094 static int open_exclu; /* already open exclusive? */
12095
12096 static int mce_open(struct inode *inode, struct file *file)
12097 {
12098 spin_lock(&mce_state_lock);
12099
12100 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
12101 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
12102 spin_unlock(&mce_state_lock);
12103
12104 return -EBUSY;
12105 @@ -1444,7 +1445,7 @@ static int mce_open(struct inode *inode,
12106
12107 if (file->f_flags & O_EXCL)
12108 open_exclu = 1;
12109 - open_count++;
12110 + local_inc(&open_count);
12111
12112 spin_unlock(&mce_state_lock);
12113
12114 @@ -1455,7 +1456,7 @@ static int mce_release(struct inode *ino
12115 {
12116 spin_lock(&mce_state_lock);
12117
12118 - open_count--;
12119 + local_dec(&open_count);
12120 open_exclu = 0;
12121
12122 spin_unlock(&mce_state_lock);
12123 @@ -2082,7 +2083,7 @@ struct dentry *mce_get_debugfs_dir(void)
12124 static void mce_reset(void)
12125 {
12126 cpu_missing = 0;
12127 - atomic_set(&mce_fake_paniced, 0);
12128 + atomic_set_unchecked(&mce_fake_paniced, 0);
12129 atomic_set(&mce_executing, 0);
12130 atomic_set(&mce_callin, 0);
12131 atomic_set(&global_nwo, 0);
12132 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/amd.c linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/amd.c
12133 --- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/amd.c 2011-03-27 14:31:47.000000000 -0400
12134 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/amd.c 2011-04-17 15:56:46.000000000 -0400
12135 @@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base
12136 return 0;
12137 }
12138
12139 -static struct mtrr_ops amd_mtrr_ops = {
12140 +static const struct mtrr_ops amd_mtrr_ops = {
12141 .vendor = X86_VENDOR_AMD,
12142 .set = amd_set_mtrr,
12143 .get = amd_get_mtrr,
12144 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/centaur.c linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/centaur.c
12145 --- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/centaur.c 2011-03-27 14:31:47.000000000 -0400
12146 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/centaur.c 2011-04-17 15:56:46.000000000 -0400
12147 @@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long
12148 return 0;
12149 }
12150
12151 -static struct mtrr_ops centaur_mtrr_ops = {
12152 +static const struct mtrr_ops centaur_mtrr_ops = {
12153 .vendor = X86_VENDOR_CENTAUR,
12154 .set = centaur_set_mcr,
12155 .get = centaur_get_mcr,
12156 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/cyrix.c linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/cyrix.c
12157 --- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-03-27 14:31:47.000000000 -0400
12158 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-04-17 15:56:46.000000000 -0400
12159 @@ -265,7 +265,7 @@ static void cyrix_set_all(void)
12160 post_set();
12161 }
12162
12163 -static struct mtrr_ops cyrix_mtrr_ops = {
12164 +static const struct mtrr_ops cyrix_mtrr_ops = {
12165 .vendor = X86_VENDOR_CYRIX,
12166 .set_all = cyrix_set_all,
12167 .set = cyrix_set_arr,
12168 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/generic.c
12169 --- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/generic.c 2011-03-27 14:31:47.000000000 -0400
12170 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/generic.c 2011-04-23 12:56:10.000000000 -0400
12171 @@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
12172 /*
12173 * Generic structure...
12174 */
12175 -struct mtrr_ops generic_mtrr_ops = {
12176 +const struct mtrr_ops generic_mtrr_ops = {
12177 .use_intel_if = 1,
12178 .set_all = generic_set_all,
12179 .get = generic_get_mtrr,
12180 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/main.c
12181 --- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:00:52.000000000 -0400
12182 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:03:05.000000000 -0400
12183 @@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
12184 u64 size_or_mask, size_and_mask;
12185 static bool mtrr_aps_delayed_init;
12186
12187 -static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12188 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12189
12190 -struct mtrr_ops *mtrr_if;
12191 +const struct mtrr_ops *mtrr_if;
12192
12193 static void set_mtrr(unsigned int reg, unsigned long base,
12194 unsigned long size, mtrr_type type);
12195
12196 -void set_mtrr_ops(struct mtrr_ops *ops)
12197 +void set_mtrr_ops(const struct mtrr_ops *ops)
12198 {
12199 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
12200 mtrr_ops[ops->vendor] = ops;
12201 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/mtrr.h
12202 --- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-03-27 14:31:47.000000000 -0400
12203 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-04-17 15:56:46.000000000 -0400
12204 @@ -12,19 +12,19 @@
12205 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
12206
12207 struct mtrr_ops {
12208 - u32 vendor;
12209 - u32 use_intel_if;
12210 - void (*set)(unsigned int reg, unsigned long base,
12211 + const u32 vendor;
12212 + const u32 use_intel_if;
12213 + void (* const set)(unsigned int reg, unsigned long base,
12214 unsigned long size, mtrr_type type);
12215 - void (*set_all)(void);
12216 + void (* const set_all)(void);
12217
12218 - void (*get)(unsigned int reg, unsigned long *base,
12219 + void (* const get)(unsigned int reg, unsigned long *base,
12220 unsigned long *size, mtrr_type *type);
12221 - int (*get_free_region)(unsigned long base, unsigned long size,
12222 + int (* const get_free_region)(unsigned long base, unsigned long size,
12223 int replace_reg);
12224 - int (*validate_add_page)(unsigned long base, unsigned long size,
12225 + int (* const validate_add_page)(unsigned long base, unsigned long size,
12226 unsigned int type);
12227 - int (*have_wrcomb)(void);
12228 + int (* const have_wrcomb)(void);
12229 };
12230
12231 extern int generic_get_free_region(unsigned long base, unsigned long size,
12232 @@ -32,7 +32,7 @@ extern int generic_get_free_region(unsig
12233 extern int generic_validate_add_page(unsigned long base, unsigned long size,
12234 unsigned int type);
12235
12236 -extern struct mtrr_ops generic_mtrr_ops;
12237 +extern const struct mtrr_ops generic_mtrr_ops;
12238
12239 extern int positive_have_wrcomb(void);
12240
12241 @@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int in
12242 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
12243 void get_mtrr_state(void);
12244
12245 -extern void set_mtrr_ops(struct mtrr_ops *ops);
12246 +extern void set_mtrr_ops(const struct mtrr_ops *ops);
12247
12248 extern u64 size_or_mask, size_and_mask;
12249 -extern struct mtrr_ops *mtrr_if;
12250 +extern const struct mtrr_ops *mtrr_if;
12251
12252 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
12253 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
12254 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.32.42/arch/x86/kernel/cpu/perfctr-watchdog.c
12255 --- linux-2.6.32.42/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-03-27 14:31:47.000000000 -0400
12256 +++ linux-2.6.32.42/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-04-17 15:56:46.000000000 -0400
12257 @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
12258
12259 /* Interface defining a CPU specific perfctr watchdog */
12260 struct wd_ops {
12261 - int (*reserve)(void);
12262 - void (*unreserve)(void);
12263 - int (*setup)(unsigned nmi_hz);
12264 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12265 - void (*stop)(void);
12266 + int (* const reserve)(void);
12267 + void (* const unreserve)(void);
12268 + int (* const setup)(unsigned nmi_hz);
12269 + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12270 + void (* const stop)(void);
12271 unsigned perfctr;
12272 unsigned evntsel;
12273 u64 checkbit;
12274 @@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
12275 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
12276 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
12277
12278 +/* cannot be const */
12279 static struct wd_ops intel_arch_wd_ops;
12280
12281 static int setup_intel_arch_watchdog(unsigned nmi_hz)
12282 @@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(uns
12283 return 1;
12284 }
12285
12286 +/* cannot be const */
12287 static struct wd_ops intel_arch_wd_ops __read_mostly = {
12288 .reserve = single_msr_reserve,
12289 .unreserve = single_msr_unreserve,
12290 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/perf_event.c linux-2.6.32.42/arch/x86/kernel/cpu/perf_event.c
12291 --- linux-2.6.32.42/arch/x86/kernel/cpu/perf_event.c 2011-03-27 14:31:47.000000000 -0400
12292 +++ linux-2.6.32.42/arch/x86/kernel/cpu/perf_event.c 2011-05-04 17:56:20.000000000 -0400
12293 @@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event
12294 * count to the generic event atomically:
12295 */
12296 again:
12297 - prev_raw_count = atomic64_read(&hwc->prev_count);
12298 + prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
12299 rdmsrl(hwc->event_base + idx, new_raw_count);
12300
12301 - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
12302 + if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
12303 new_raw_count) != prev_raw_count)
12304 goto again;
12305
12306 @@ -741,7 +741,7 @@ again:
12307 delta = (new_raw_count << shift) - (prev_raw_count << shift);
12308 delta >>= shift;
12309
12310 - atomic64_add(delta, &event->count);
12311 + atomic64_add_unchecked(delta, &event->count);
12312 atomic64_sub(delta, &hwc->period_left);
12313
12314 return new_raw_count;
12315 @@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_ev
12316 * The hw event starts counting from this event offset,
12317 * mark it to be able to extra future deltas:
12318 */
12319 - atomic64_set(&hwc->prev_count, (u64)-left);
12320 + atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
12321
12322 err = checking_wrmsrl(hwc->event_base + idx,
12323 (u64)(-left) & x86_pmu.event_mask);
12324 @@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs
12325 break;
12326
12327 callchain_store(entry, frame.return_address);
12328 - fp = frame.next_frame;
12329 + fp = (__force const void __user *)frame.next_frame;
12330 }
12331 }
12332
12333 diff -urNp linux-2.6.32.42/arch/x86/kernel/crash.c linux-2.6.32.42/arch/x86/kernel/crash.c
12334 --- linux-2.6.32.42/arch/x86/kernel/crash.c 2011-03-27 14:31:47.000000000 -0400
12335 +++ linux-2.6.32.42/arch/x86/kernel/crash.c 2011-04-17 15:56:46.000000000 -0400
12336 @@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu,
12337 regs = args->regs;
12338
12339 #ifdef CONFIG_X86_32
12340 - if (!user_mode_vm(regs)) {
12341 + if (!user_mode(regs)) {
12342 crash_fixup_ss_esp(&fixed_regs, regs);
12343 regs = &fixed_regs;
12344 }
12345 diff -urNp linux-2.6.32.42/arch/x86/kernel/doublefault_32.c linux-2.6.32.42/arch/x86/kernel/doublefault_32.c
12346 --- linux-2.6.32.42/arch/x86/kernel/doublefault_32.c 2011-03-27 14:31:47.000000000 -0400
12347 +++ linux-2.6.32.42/arch/x86/kernel/doublefault_32.c 2011-04-17 15:56:46.000000000 -0400
12348 @@ -11,7 +11,7 @@
12349
12350 #define DOUBLEFAULT_STACKSIZE (1024)
12351 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12352 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12353 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12354
12355 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12356
12357 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
12358 unsigned long gdt, tss;
12359
12360 store_gdt(&gdt_desc);
12361 - gdt = gdt_desc.address;
12362 + gdt = (unsigned long)gdt_desc.address;
12363
12364 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12365
12366 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
12367 /* 0x2 bit is always set */
12368 .flags = X86_EFLAGS_SF | 0x2,
12369 .sp = STACK_START,
12370 - .es = __USER_DS,
12371 + .es = __KERNEL_DS,
12372 .cs = __KERNEL_CS,
12373 .ss = __KERNEL_DS,
12374 - .ds = __USER_DS,
12375 + .ds = __KERNEL_DS,
12376 .fs = __KERNEL_PERCPU,
12377
12378 .__cr3 = __pa_nodebug(swapper_pg_dir),
12379 diff -urNp linux-2.6.32.42/arch/x86/kernel/dumpstack_32.c linux-2.6.32.42/arch/x86/kernel/dumpstack_32.c
12380 --- linux-2.6.32.42/arch/x86/kernel/dumpstack_32.c 2011-03-27 14:31:47.000000000 -0400
12381 +++ linux-2.6.32.42/arch/x86/kernel/dumpstack_32.c 2011-04-17 15:56:46.000000000 -0400
12382 @@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task
12383 #endif
12384
12385 for (;;) {
12386 - struct thread_info *context;
12387 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12388 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12389
12390 - context = (struct thread_info *)
12391 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12392 - bp = print_context_stack(context, stack, bp, ops,
12393 - data, NULL, &graph);
12394 -
12395 - stack = (unsigned long *)context->previous_esp;
12396 - if (!stack)
12397 + if (stack_start == task_stack_page(task))
12398 break;
12399 + stack = *(unsigned long **)stack_start;
12400 if (ops->stack(data, "IRQ") < 0)
12401 break;
12402 touch_nmi_watchdog();
12403 @@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs
12404 * When in-kernel, we also print out the stack and code at the
12405 * time of the fault..
12406 */
12407 - if (!user_mode_vm(regs)) {
12408 + if (!user_mode(regs)) {
12409 unsigned int code_prologue = code_bytes * 43 / 64;
12410 unsigned int code_len = code_bytes;
12411 unsigned char c;
12412 u8 *ip;
12413 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12414
12415 printk(KERN_EMERG "Stack:\n");
12416 show_stack_log_lvl(NULL, regs, &regs->sp,
12417 @@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs
12418
12419 printk(KERN_EMERG "Code: ");
12420
12421 - ip = (u8 *)regs->ip - code_prologue;
12422 + ip = (u8 *)regs->ip - code_prologue + cs_base;
12423 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12424 /* try starting at IP */
12425 - ip = (u8 *)regs->ip;
12426 + ip = (u8 *)regs->ip + cs_base;
12427 code_len = code_len - code_prologue + 1;
12428 }
12429 for (i = 0; i < code_len; i++, ip++) {
12430 @@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs
12431 printk(" Bad EIP value.");
12432 break;
12433 }
12434 - if (ip == (u8 *)regs->ip)
12435 + if (ip == (u8 *)regs->ip + cs_base)
12436 printk("<%02x> ", c);
12437 else
12438 printk("%02x ", c);
12439 @@ -149,6 +146,7 @@ int is_valid_bugaddr(unsigned long ip)
12440 {
12441 unsigned short ud2;
12442
12443 + ip = ktla_ktva(ip);
12444 if (ip < PAGE_OFFSET)
12445 return 0;
12446 if (probe_kernel_address((unsigned short *)ip, ud2))
12447 diff -urNp linux-2.6.32.42/arch/x86/kernel/dumpstack_64.c linux-2.6.32.42/arch/x86/kernel/dumpstack_64.c
12448 --- linux-2.6.32.42/arch/x86/kernel/dumpstack_64.c 2011-03-27 14:31:47.000000000 -0400
12449 +++ linux-2.6.32.42/arch/x86/kernel/dumpstack_64.c 2011-04-17 15:56:46.000000000 -0400
12450 @@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task
12451 unsigned long *irq_stack_end =
12452 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12453 unsigned used = 0;
12454 - struct thread_info *tinfo;
12455 int graph = 0;
12456 + void *stack_start;
12457
12458 if (!task)
12459 task = current;
12460 @@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task
12461 * current stack address. If the stacks consist of nested
12462 * exceptions
12463 */
12464 - tinfo = task_thread_info(task);
12465 for (;;) {
12466 char *id;
12467 unsigned long *estack_end;
12468 +
12469 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12470 &used, &id);
12471
12472 @@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task
12473 if (ops->stack(data, id) < 0)
12474 break;
12475
12476 - bp = print_context_stack(tinfo, stack, bp, ops,
12477 + bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12478 data, estack_end, &graph);
12479 ops->stack(data, "<EOE>");
12480 /*
12481 @@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task
12482 if (stack >= irq_stack && stack < irq_stack_end) {
12483 if (ops->stack(data, "IRQ") < 0)
12484 break;
12485 - bp = print_context_stack(tinfo, stack, bp,
12486 + bp = print_context_stack(task, irq_stack, stack, bp,
12487 ops, data, irq_stack_end, &graph);
12488 /*
12489 * We link to the next stack (which would be
12490 @@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task
12491 /*
12492 * This handles the process stack:
12493 */
12494 - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12495 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12496 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12497 put_cpu();
12498 }
12499 EXPORT_SYMBOL(dump_trace);
12500 diff -urNp linux-2.6.32.42/arch/x86/kernel/dumpstack.c linux-2.6.32.42/arch/x86/kernel/dumpstack.c
12501 --- linux-2.6.32.42/arch/x86/kernel/dumpstack.c 2011-03-27 14:31:47.000000000 -0400
12502 +++ linux-2.6.32.42/arch/x86/kernel/dumpstack.c 2011-04-17 15:56:46.000000000 -0400
12503 @@ -2,6 +2,9 @@
12504 * Copyright (C) 1991, 1992 Linus Torvalds
12505 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12506 */
12507 +#ifdef CONFIG_GRKERNSEC_HIDESYM
12508 +#define __INCLUDED_BY_HIDESYM 1
12509 +#endif
12510 #include <linux/kallsyms.h>
12511 #include <linux/kprobes.h>
12512 #include <linux/uaccess.h>
12513 @@ -28,7 +31,7 @@ static int die_counter;
12514
12515 void printk_address(unsigned long address, int reliable)
12516 {
12517 - printk(" [<%p>] %s%pS\n", (void *) address,
12518 + printk(" [<%p>] %s%pA\n", (void *) address,
12519 reliable ? "" : "? ", (void *) address);
12520 }
12521
12522 @@ -36,9 +39,8 @@ void printk_address(unsigned long addres
12523 static void
12524 print_ftrace_graph_addr(unsigned long addr, void *data,
12525 const struct stacktrace_ops *ops,
12526 - struct thread_info *tinfo, int *graph)
12527 + struct task_struct *task, int *graph)
12528 {
12529 - struct task_struct *task = tinfo->task;
12530 unsigned long ret_addr;
12531 int index = task->curr_ret_stack;
12532
12533 @@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long ad
12534 static inline void
12535 print_ftrace_graph_addr(unsigned long addr, void *data,
12536 const struct stacktrace_ops *ops,
12537 - struct thread_info *tinfo, int *graph)
12538 + struct task_struct *task, int *graph)
12539 { }
12540 #endif
12541
12542 @@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long ad
12543 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12544 */
12545
12546 -static inline int valid_stack_ptr(struct thread_info *tinfo,
12547 - void *p, unsigned int size, void *end)
12548 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12549 {
12550 - void *t = tinfo;
12551 if (end) {
12552 if (p < end && p >= (end-THREAD_SIZE))
12553 return 1;
12554 @@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct
12555 }
12556
12557 unsigned long
12558 -print_context_stack(struct thread_info *tinfo,
12559 +print_context_stack(struct task_struct *task, void *stack_start,
12560 unsigned long *stack, unsigned long bp,
12561 const struct stacktrace_ops *ops, void *data,
12562 unsigned long *end, int *graph)
12563 {
12564 struct stack_frame *frame = (struct stack_frame *)bp;
12565
12566 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12567 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12568 unsigned long addr;
12569
12570 addr = *stack;
12571 @@ -103,7 +103,7 @@ print_context_stack(struct thread_info *
12572 } else {
12573 ops->address(data, addr, 0);
12574 }
12575 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12576 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12577 }
12578 stack++;
12579 }
12580 @@ -180,7 +180,7 @@ void dump_stack(void)
12581 #endif
12582
12583 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12584 - current->pid, current->comm, print_tainted(),
12585 + task_pid_nr(current), current->comm, print_tainted(),
12586 init_utsname()->release,
12587 (int)strcspn(init_utsname()->version, " "),
12588 init_utsname()->version);
12589 @@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
12590 return flags;
12591 }
12592
12593 +extern void gr_handle_kernel_exploit(void);
12594 +
12595 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12596 {
12597 if (regs && kexec_should_crash(current))
12598 @@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long fl
12599 panic("Fatal exception in interrupt");
12600 if (panic_on_oops)
12601 panic("Fatal exception");
12602 - do_exit(signr);
12603 +
12604 + gr_handle_kernel_exploit();
12605 +
12606 + do_group_exit(signr);
12607 }
12608
12609 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12610 @@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs
12611 unsigned long flags = oops_begin();
12612 int sig = SIGSEGV;
12613
12614 - if (!user_mode_vm(regs))
12615 + if (!user_mode(regs))
12616 report_bug(regs->ip, regs);
12617
12618 if (__die(str, regs, err))
12619 diff -urNp linux-2.6.32.42/arch/x86/kernel/dumpstack.h linux-2.6.32.42/arch/x86/kernel/dumpstack.h
12620 --- linux-2.6.32.42/arch/x86/kernel/dumpstack.h 2011-03-27 14:31:47.000000000 -0400
12621 +++ linux-2.6.32.42/arch/x86/kernel/dumpstack.h 2011-04-23 13:25:26.000000000 -0400
12622 @@ -15,7 +15,7 @@
12623 #endif
12624
12625 extern unsigned long
12626 -print_context_stack(struct thread_info *tinfo,
12627 +print_context_stack(struct task_struct *task, void *stack_start,
12628 unsigned long *stack, unsigned long bp,
12629 const struct stacktrace_ops *ops, void *data,
12630 unsigned long *end, int *graph);
12631 diff -urNp linux-2.6.32.42/arch/x86/kernel/e820.c linux-2.6.32.42/arch/x86/kernel/e820.c
12632 --- linux-2.6.32.42/arch/x86/kernel/e820.c 2011-03-27 14:31:47.000000000 -0400
12633 +++ linux-2.6.32.42/arch/x86/kernel/e820.c 2011-04-17 15:56:46.000000000 -0400
12634 @@ -733,7 +733,7 @@ struct early_res {
12635 };
12636 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
12637 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
12638 - {}
12639 + { 0, 0, {0}, 0 }
12640 };
12641
12642 static int __init find_overlapped_early(u64 start, u64 end)
12643 diff -urNp linux-2.6.32.42/arch/x86/kernel/early_printk.c linux-2.6.32.42/arch/x86/kernel/early_printk.c
12644 --- linux-2.6.32.42/arch/x86/kernel/early_printk.c 2011-03-27 14:31:47.000000000 -0400
12645 +++ linux-2.6.32.42/arch/x86/kernel/early_printk.c 2011-05-16 21:46:57.000000000 -0400
12646 @@ -7,6 +7,7 @@
12647 #include <linux/pci_regs.h>
12648 #include <linux/pci_ids.h>
12649 #include <linux/errno.h>
12650 +#include <linux/sched.h>
12651 #include <asm/io.h>
12652 #include <asm/processor.h>
12653 #include <asm/fcntl.h>
12654 @@ -170,6 +171,8 @@ asmlinkage void early_printk(const char
12655 int n;
12656 va_list ap;
12657
12658 + pax_track_stack();
12659 +
12660 va_start(ap, fmt);
12661 n = vscnprintf(buf, sizeof(buf), fmt, ap);
12662 early_console->write(early_console, buf, n);
12663 diff -urNp linux-2.6.32.42/arch/x86/kernel/efi_32.c linux-2.6.32.42/arch/x86/kernel/efi_32.c
12664 --- linux-2.6.32.42/arch/x86/kernel/efi_32.c 2011-03-27 14:31:47.000000000 -0400
12665 +++ linux-2.6.32.42/arch/x86/kernel/efi_32.c 2011-04-17 15:56:46.000000000 -0400
12666 @@ -38,70 +38,38 @@
12667 */
12668
12669 static unsigned long efi_rt_eflags;
12670 -static pgd_t efi_bak_pg_dir_pointer[2];
12671 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
12672
12673 -void efi_call_phys_prelog(void)
12674 +void __init efi_call_phys_prelog(void)
12675 {
12676 - unsigned long cr4;
12677 - unsigned long temp;
12678 struct desc_ptr gdt_descr;
12679
12680 local_irq_save(efi_rt_eflags);
12681
12682 - /*
12683 - * If I don't have PAE, I should just duplicate two entries in page
12684 - * directory. If I have PAE, I just need to duplicate one entry in
12685 - * page directory.
12686 - */
12687 - cr4 = read_cr4_safe();
12688
12689 - if (cr4 & X86_CR4_PAE) {
12690 - efi_bak_pg_dir_pointer[0].pgd =
12691 - swapper_pg_dir[pgd_index(0)].pgd;
12692 - swapper_pg_dir[0].pgd =
12693 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12694 - } else {
12695 - efi_bak_pg_dir_pointer[0].pgd =
12696 - swapper_pg_dir[pgd_index(0)].pgd;
12697 - efi_bak_pg_dir_pointer[1].pgd =
12698 - swapper_pg_dir[pgd_index(0x400000)].pgd;
12699 - swapper_pg_dir[pgd_index(0)].pgd =
12700 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12701 - temp = PAGE_OFFSET + 0x400000;
12702 - swapper_pg_dir[pgd_index(0x400000)].pgd =
12703 - swapper_pg_dir[pgd_index(temp)].pgd;
12704 - }
12705 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
12706 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
12707 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
12708
12709 /*
12710 * After the lock is released, the original page table is restored.
12711 */
12712 __flush_tlb_all();
12713
12714 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
12715 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
12716 gdt_descr.size = GDT_SIZE - 1;
12717 load_gdt(&gdt_descr);
12718 }
12719
12720 -void efi_call_phys_epilog(void)
12721 +void __init efi_call_phys_epilog(void)
12722 {
12723 - unsigned long cr4;
12724 struct desc_ptr gdt_descr;
12725
12726 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
12727 + gdt_descr.address = get_cpu_gdt_table(0);
12728 gdt_descr.size = GDT_SIZE - 1;
12729 load_gdt(&gdt_descr);
12730
12731 - cr4 = read_cr4_safe();
12732 -
12733 - if (cr4 & X86_CR4_PAE) {
12734 - swapper_pg_dir[pgd_index(0)].pgd =
12735 - efi_bak_pg_dir_pointer[0].pgd;
12736 - } else {
12737 - swapper_pg_dir[pgd_index(0)].pgd =
12738 - efi_bak_pg_dir_pointer[0].pgd;
12739 - swapper_pg_dir[pgd_index(0x400000)].pgd =
12740 - efi_bak_pg_dir_pointer[1].pgd;
12741 - }
12742 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
12743
12744 /*
12745 * After the lock is released, the original page table is restored.
12746 diff -urNp linux-2.6.32.42/arch/x86/kernel/efi_stub_32.S linux-2.6.32.42/arch/x86/kernel/efi_stub_32.S
12747 --- linux-2.6.32.42/arch/x86/kernel/efi_stub_32.S 2011-03-27 14:31:47.000000000 -0400
12748 +++ linux-2.6.32.42/arch/x86/kernel/efi_stub_32.S 2011-04-17 15:56:46.000000000 -0400
12749 @@ -6,6 +6,7 @@
12750 */
12751
12752 #include <linux/linkage.h>
12753 +#include <linux/init.h>
12754 #include <asm/page_types.h>
12755
12756 /*
12757 @@ -20,7 +21,7 @@
12758 * service functions will comply with gcc calling convention, too.
12759 */
12760
12761 -.text
12762 +__INIT
12763 ENTRY(efi_call_phys)
12764 /*
12765 * 0. The function can only be called in Linux kernel. So CS has been
12766 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
12767 * The mapping of lower virtual memory has been created in prelog and
12768 * epilog.
12769 */
12770 - movl $1f, %edx
12771 - subl $__PAGE_OFFSET, %edx
12772 - jmp *%edx
12773 + jmp 1f-__PAGE_OFFSET
12774 1:
12775
12776 /*
12777 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
12778 * parameter 2, ..., param n. To make things easy, we save the return
12779 * address of efi_call_phys in a global variable.
12780 */
12781 - popl %edx
12782 - movl %edx, saved_return_addr
12783 - /* get the function pointer into ECX*/
12784 - popl %ecx
12785 - movl %ecx, efi_rt_function_ptr
12786 - movl $2f, %edx
12787 - subl $__PAGE_OFFSET, %edx
12788 - pushl %edx
12789 + popl (saved_return_addr)
12790 + popl (efi_rt_function_ptr)
12791
12792 /*
12793 * 3. Clear PG bit in %CR0.
12794 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
12795 /*
12796 * 5. Call the physical function.
12797 */
12798 - jmp *%ecx
12799 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
12800
12801 -2:
12802 /*
12803 * 6. After EFI runtime service returns, control will return to
12804 * following instruction. We'd better readjust stack pointer first.
12805 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
12806 movl %cr0, %edx
12807 orl $0x80000000, %edx
12808 movl %edx, %cr0
12809 - jmp 1f
12810 -1:
12811 +
12812 /*
12813 * 8. Now restore the virtual mode from flat mode by
12814 * adding EIP with PAGE_OFFSET.
12815 */
12816 - movl $1f, %edx
12817 - jmp *%edx
12818 + jmp 1f+__PAGE_OFFSET
12819 1:
12820
12821 /*
12822 * 9. Balance the stack. And because EAX contain the return value,
12823 * we'd better not clobber it.
12824 */
12825 - leal efi_rt_function_ptr, %edx
12826 - movl (%edx), %ecx
12827 - pushl %ecx
12828 + pushl (efi_rt_function_ptr)
12829
12830 /*
12831 - * 10. Push the saved return address onto the stack and return.
12832 + * 10. Return to the saved return address.
12833 */
12834 - leal saved_return_addr, %edx
12835 - movl (%edx), %ecx
12836 - pushl %ecx
12837 - ret
12838 + jmpl *(saved_return_addr)
12839 ENDPROC(efi_call_phys)
12840 .previous
12841
12842 -.data
12843 +__INITDATA
12844 saved_return_addr:
12845 .long 0
12846 efi_rt_function_ptr:
12847 diff -urNp linux-2.6.32.42/arch/x86/kernel/entry_32.S linux-2.6.32.42/arch/x86/kernel/entry_32.S
12848 --- linux-2.6.32.42/arch/x86/kernel/entry_32.S 2011-03-27 14:31:47.000000000 -0400
12849 +++ linux-2.6.32.42/arch/x86/kernel/entry_32.S 2011-05-22 23:02:03.000000000 -0400
12850 @@ -185,13 +185,146 @@
12851 /*CFI_REL_OFFSET gs, PT_GS*/
12852 .endm
12853 .macro SET_KERNEL_GS reg
12854 +
12855 +#ifdef CONFIG_CC_STACKPROTECTOR
12856 movl $(__KERNEL_STACK_CANARY), \reg
12857 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12858 + movl $(__USER_DS), \reg
12859 +#else
12860 + xorl \reg, \reg
12861 +#endif
12862 +
12863 movl \reg, %gs
12864 .endm
12865
12866 #endif /* CONFIG_X86_32_LAZY_GS */
12867
12868 -.macro SAVE_ALL
12869 +.macro pax_enter_kernel
12870 +#ifdef CONFIG_PAX_KERNEXEC
12871 + call pax_enter_kernel
12872 +#endif
12873 +.endm
12874 +
12875 +.macro pax_exit_kernel
12876 +#ifdef CONFIG_PAX_KERNEXEC
12877 + call pax_exit_kernel
12878 +#endif
12879 +.endm
12880 +
12881 +#ifdef CONFIG_PAX_KERNEXEC
12882 +ENTRY(pax_enter_kernel)
12883 +#ifdef CONFIG_PARAVIRT
12884 + pushl %eax
12885 + pushl %ecx
12886 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12887 + mov %eax, %esi
12888 +#else
12889 + mov %cr0, %esi
12890 +#endif
12891 + bts $16, %esi
12892 + jnc 1f
12893 + mov %cs, %esi
12894 + cmp $__KERNEL_CS, %esi
12895 + jz 3f
12896 + ljmp $__KERNEL_CS, $3f
12897 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
12898 +2:
12899 +#ifdef CONFIG_PARAVIRT
12900 + mov %esi, %eax
12901 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12902 +#else
12903 + mov %esi, %cr0
12904 +#endif
12905 +3:
12906 +#ifdef CONFIG_PARAVIRT
12907 + popl %ecx
12908 + popl %eax
12909 +#endif
12910 + ret
12911 +ENDPROC(pax_enter_kernel)
12912 +
12913 +ENTRY(pax_exit_kernel)
12914 +#ifdef CONFIG_PARAVIRT
12915 + pushl %eax
12916 + pushl %ecx
12917 +#endif
12918 + mov %cs, %esi
12919 + cmp $__KERNEXEC_KERNEL_CS, %esi
12920 + jnz 2f
12921 +#ifdef CONFIG_PARAVIRT
12922 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
12923 + mov %eax, %esi
12924 +#else
12925 + mov %cr0, %esi
12926 +#endif
12927 + btr $16, %esi
12928 + ljmp $__KERNEL_CS, $1f
12929 +1:
12930 +#ifdef CONFIG_PARAVIRT
12931 + mov %esi, %eax
12932 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
12933 +#else
12934 + mov %esi, %cr0
12935 +#endif
12936 +2:
12937 +#ifdef CONFIG_PARAVIRT
12938 + popl %ecx
12939 + popl %eax
12940 +#endif
12941 + ret
12942 +ENDPROC(pax_exit_kernel)
12943 +#endif
12944 +
12945 +.macro pax_erase_kstack
12946 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12947 + call pax_erase_kstack
12948 +#endif
12949 +.endm
12950 +
12951 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12952 +/*
12953 + * ebp: thread_info
12954 + * ecx, edx: can be clobbered
12955 + */
12956 +ENTRY(pax_erase_kstack)
12957 + pushl %edi
12958 + pushl %eax
12959 +
12960 + mov TI_lowest_stack(%ebp), %edi
12961 + mov $-0xBEEF, %eax
12962 + std
12963 +
12964 +1: mov %edi, %ecx
12965 + and $THREAD_SIZE_asm - 1, %ecx
12966 + shr $2, %ecx
12967 + repne scasl
12968 + jecxz 2f
12969 +
12970 + cmp $2*16, %ecx
12971 + jc 2f
12972 +
12973 + mov $2*16, %ecx
12974 + repe scasl
12975 + jecxz 2f
12976 + jne 1b
12977 +
12978 +2: cld
12979 + mov %esp, %ecx
12980 + sub %edi, %ecx
12981 + shr $2, %ecx
12982 + rep stosl
12983 +
12984 + mov TI_task_thread_sp0(%ebp), %edi
12985 + sub $128, %edi
12986 + mov %edi, TI_lowest_stack(%ebp)
12987 +
12988 + popl %eax
12989 + popl %edi
12990 + ret
12991 +ENDPROC(pax_erase_kstack)
12992 +#endif
12993 +
12994 +.macro __SAVE_ALL _DS
12995 cld
12996 PUSH_GS
12997 pushl %fs
12998 @@ -224,7 +357,7 @@
12999 pushl %ebx
13000 CFI_ADJUST_CFA_OFFSET 4
13001 CFI_REL_OFFSET ebx, 0
13002 - movl $(__USER_DS), %edx
13003 + movl $\_DS, %edx
13004 movl %edx, %ds
13005 movl %edx, %es
13006 movl $(__KERNEL_PERCPU), %edx
13007 @@ -232,6 +365,15 @@
13008 SET_KERNEL_GS %edx
13009 .endm
13010
13011 +.macro SAVE_ALL
13012 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
13013 + __SAVE_ALL __KERNEL_DS
13014 + pax_enter_kernel
13015 +#else
13016 + __SAVE_ALL __USER_DS
13017 +#endif
13018 +.endm
13019 +
13020 .macro RESTORE_INT_REGS
13021 popl %ebx
13022 CFI_ADJUST_CFA_OFFSET -4
13023 @@ -352,7 +494,15 @@ check_userspace:
13024 movb PT_CS(%esp), %al
13025 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
13026 cmpl $USER_RPL, %eax
13027 +
13028 +#ifdef CONFIG_PAX_KERNEXEC
13029 + jae resume_userspace
13030 +
13031 + PAX_EXIT_KERNEL
13032 + jmp resume_kernel
13033 +#else
13034 jb resume_kernel # not returning to v8086 or userspace
13035 +#endif
13036
13037 ENTRY(resume_userspace)
13038 LOCKDEP_SYS_EXIT
13039 @@ -364,7 +514,7 @@ ENTRY(resume_userspace)
13040 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
13041 # int/exception return?
13042 jne work_pending
13043 - jmp restore_all
13044 + jmp restore_all_pax
13045 END(ret_from_exception)
13046
13047 #ifdef CONFIG_PREEMPT
13048 @@ -414,25 +564,36 @@ sysenter_past_esp:
13049 /*CFI_REL_OFFSET cs, 0*/
13050 /*
13051 * Push current_thread_info()->sysenter_return to the stack.
13052 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
13053 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
13054 */
13055 - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
13056 + pushl $0
13057 CFI_ADJUST_CFA_OFFSET 4
13058 CFI_REL_OFFSET eip, 0
13059
13060 pushl %eax
13061 CFI_ADJUST_CFA_OFFSET 4
13062 SAVE_ALL
13063 + GET_THREAD_INFO(%ebp)
13064 + movl TI_sysenter_return(%ebp),%ebp
13065 + movl %ebp,PT_EIP(%esp)
13066 ENABLE_INTERRUPTS(CLBR_NONE)
13067
13068 /*
13069 * Load the potential sixth argument from user stack.
13070 * Careful about security.
13071 */
13072 + movl PT_OLDESP(%esp),%ebp
13073 +
13074 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13075 + mov PT_OLDSS(%esp),%ds
13076 +1: movl %ds:(%ebp),%ebp
13077 + push %ss
13078 + pop %ds
13079 +#else
13080 cmpl $__PAGE_OFFSET-3,%ebp
13081 jae syscall_fault
13082 1: movl (%ebp),%ebp
13083 +#endif
13084 +
13085 movl %ebp,PT_EBP(%esp)
13086 .section __ex_table,"a"
13087 .align 4
13088 @@ -455,12 +616,23 @@ sysenter_do_call:
13089 testl $_TIF_ALLWORK_MASK, %ecx
13090 jne sysexit_audit
13091 sysenter_exit:
13092 +
13093 +#ifdef CONFIG_PAX_RANDKSTACK
13094 + pushl_cfi %eax
13095 + call pax_randomize_kstack
13096 + popl_cfi %eax
13097 +#endif
13098 +
13099 + pax_erase_kstack
13100 +
13101 /* if something modifies registers it must also disable sysexit */
13102 movl PT_EIP(%esp), %edx
13103 movl PT_OLDESP(%esp), %ecx
13104 xorl %ebp,%ebp
13105 TRACE_IRQS_ON
13106 1: mov PT_FS(%esp), %fs
13107 +2: mov PT_DS(%esp), %ds
13108 +3: mov PT_ES(%esp), %es
13109 PTGS_TO_GS
13110 ENABLE_INTERRUPTS_SYSEXIT
13111
13112 @@ -477,6 +649,9 @@ sysenter_audit:
13113 movl %eax,%edx /* 2nd arg: syscall number */
13114 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
13115 call audit_syscall_entry
13116 +
13117 + pax_erase_kstack
13118 +
13119 pushl %ebx
13120 CFI_ADJUST_CFA_OFFSET 4
13121 movl PT_EAX(%esp),%eax /* reload syscall number */
13122 @@ -504,11 +679,17 @@ sysexit_audit:
13123
13124 CFI_ENDPROC
13125 .pushsection .fixup,"ax"
13126 -2: movl $0,PT_FS(%esp)
13127 +4: movl $0,PT_FS(%esp)
13128 + jmp 1b
13129 +5: movl $0,PT_DS(%esp)
13130 + jmp 1b
13131 +6: movl $0,PT_ES(%esp)
13132 jmp 1b
13133 .section __ex_table,"a"
13134 .align 4
13135 - .long 1b,2b
13136 + .long 1b,4b
13137 + .long 2b,5b
13138 + .long 3b,6b
13139 .popsection
13140 PTGS_TO_GS_EX
13141 ENDPROC(ia32_sysenter_target)
13142 @@ -538,6 +719,14 @@ syscall_exit:
13143 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13144 jne syscall_exit_work
13145
13146 +restore_all_pax:
13147 +
13148 +#ifdef CONFIG_PAX_RANDKSTACK
13149 + call pax_randomize_kstack
13150 +#endif
13151 +
13152 + pax_erase_kstack
13153 +
13154 restore_all:
13155 TRACE_IRQS_IRET
13156 restore_all_notrace:
13157 @@ -602,7 +791,13 @@ ldt_ss:
13158 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13159 mov %dx, %ax /* eax: new kernel esp */
13160 sub %eax, %edx /* offset (low word is 0) */
13161 - PER_CPU(gdt_page, %ebx)
13162 +#ifdef CONFIG_SMP
13163 + movl PER_CPU_VAR(cpu_number), %ebx
13164 + shll $PAGE_SHIFT_asm, %ebx
13165 + addl $cpu_gdt_table, %ebx
13166 +#else
13167 + movl $cpu_gdt_table, %ebx
13168 +#endif
13169 shr $16, %edx
13170 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
13171 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
13172 @@ -636,31 +831,25 @@ work_resched:
13173 movl TI_flags(%ebp), %ecx
13174 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13175 # than syscall tracing?
13176 - jz restore_all
13177 + jz restore_all_pax
13178 testb $_TIF_NEED_RESCHED, %cl
13179 jnz work_resched
13180
13181 work_notifysig: # deal with pending signals and
13182 # notify-resume requests
13183 + movl %esp, %eax
13184 #ifdef CONFIG_VM86
13185 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13186 - movl %esp, %eax
13187 - jne work_notifysig_v86 # returning to kernel-space or
13188 + jz 1f # returning to kernel-space or
13189 # vm86-space
13190 - xorl %edx, %edx
13191 - call do_notify_resume
13192 - jmp resume_userspace_sig
13193
13194 - ALIGN
13195 -work_notifysig_v86:
13196 pushl %ecx # save ti_flags for do_notify_resume
13197 CFI_ADJUST_CFA_OFFSET 4
13198 call save_v86_state # %eax contains pt_regs pointer
13199 popl %ecx
13200 CFI_ADJUST_CFA_OFFSET -4
13201 movl %eax, %esp
13202 -#else
13203 - movl %esp, %eax
13204 +1:
13205 #endif
13206 xorl %edx, %edx
13207 call do_notify_resume
13208 @@ -673,6 +862,9 @@ syscall_trace_entry:
13209 movl $-ENOSYS,PT_EAX(%esp)
13210 movl %esp, %eax
13211 call syscall_trace_enter
13212 +
13213 + pax_erase_kstack
13214 +
13215 /* What it returned is what we'll actually use. */
13216 cmpl $(nr_syscalls), %eax
13217 jnae syscall_call
13218 @@ -695,6 +887,10 @@ END(syscall_exit_work)
13219
13220 RING0_INT_FRAME # can't unwind into user space anyway
13221 syscall_fault:
13222 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13223 + push %ss
13224 + pop %ds
13225 +#endif
13226 GET_THREAD_INFO(%ebp)
13227 movl $-EFAULT,PT_EAX(%esp)
13228 jmp resume_userspace
13229 @@ -726,6 +922,33 @@ PTREGSCALL(rt_sigreturn)
13230 PTREGSCALL(vm86)
13231 PTREGSCALL(vm86old)
13232
13233 + ALIGN;
13234 +ENTRY(kernel_execve)
13235 + push %ebp
13236 + sub $PT_OLDSS+4,%esp
13237 + push %edi
13238 + push %ecx
13239 + push %eax
13240 + lea 3*4(%esp),%edi
13241 + mov $PT_OLDSS/4+1,%ecx
13242 + xorl %eax,%eax
13243 + rep stosl
13244 + pop %eax
13245 + pop %ecx
13246 + pop %edi
13247 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13248 + mov %eax,PT_EBX(%esp)
13249 + mov %edx,PT_ECX(%esp)
13250 + mov %ecx,PT_EDX(%esp)
13251 + mov %esp,%eax
13252 + call sys_execve
13253 + GET_THREAD_INFO(%ebp)
13254 + test %eax,%eax
13255 + jz syscall_exit
13256 + add $PT_OLDSS+4,%esp
13257 + pop %ebp
13258 + ret
13259 +
13260 .macro FIXUP_ESPFIX_STACK
13261 /*
13262 * Switch back for ESPFIX stack to the normal zerobased stack
13263 @@ -735,7 +958,13 @@ PTREGSCALL(vm86old)
13264 * normal stack and adjusts ESP with the matching offset.
13265 */
13266 /* fixup the stack */
13267 - PER_CPU(gdt_page, %ebx)
13268 +#ifdef CONFIG_SMP
13269 + movl PER_CPU_VAR(cpu_number), %ebx
13270 + shll $PAGE_SHIFT_asm, %ebx
13271 + addl $cpu_gdt_table, %ebx
13272 +#else
13273 + movl $cpu_gdt_table, %ebx
13274 +#endif
13275 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
13276 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
13277 shl $16, %eax
13278 @@ -1198,7 +1427,6 @@ return_to_handler:
13279 ret
13280 #endif
13281
13282 -.section .rodata,"a"
13283 #include "syscall_table_32.S"
13284
13285 syscall_table_size=(.-sys_call_table)
13286 @@ -1255,9 +1483,12 @@ error_code:
13287 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13288 REG_TO_PTGS %ecx
13289 SET_KERNEL_GS %ecx
13290 - movl $(__USER_DS), %ecx
13291 + movl $(__KERNEL_DS), %ecx
13292 movl %ecx, %ds
13293 movl %ecx, %es
13294 +
13295 + pax_enter_kernel
13296 +
13297 TRACE_IRQS_OFF
13298 movl %esp,%eax # pt_regs pointer
13299 call *%edi
13300 @@ -1351,6 +1582,9 @@ nmi_stack_correct:
13301 xorl %edx,%edx # zero error code
13302 movl %esp,%eax # pt_regs pointer
13303 call do_nmi
13304 +
13305 + pax_exit_kernel
13306 +
13307 jmp restore_all_notrace
13308 CFI_ENDPROC
13309
13310 @@ -1391,6 +1625,9 @@ nmi_espfix_stack:
13311 FIXUP_ESPFIX_STACK # %eax == %esp
13312 xorl %edx,%edx # zero error code
13313 call do_nmi
13314 +
13315 + pax_exit_kernel
13316 +
13317 RESTORE_REGS
13318 lss 12+4(%esp), %esp # back to espfix stack
13319 CFI_ADJUST_CFA_OFFSET -24
13320 diff -urNp linux-2.6.32.42/arch/x86/kernel/entry_64.S linux-2.6.32.42/arch/x86/kernel/entry_64.S
13321 --- linux-2.6.32.42/arch/x86/kernel/entry_64.S 2011-03-27 14:31:47.000000000 -0400
13322 +++ linux-2.6.32.42/arch/x86/kernel/entry_64.S 2011-06-04 20:30:53.000000000 -0400
13323 @@ -53,6 +53,7 @@
13324 #include <asm/paravirt.h>
13325 #include <asm/ftrace.h>
13326 #include <asm/percpu.h>
13327 +#include <asm/pgtable.h>
13328
13329 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13330 #include <linux/elf-em.h>
13331 @@ -174,6 +175,257 @@ ENTRY(native_usergs_sysret64)
13332 ENDPROC(native_usergs_sysret64)
13333 #endif /* CONFIG_PARAVIRT */
13334
13335 + .macro ljmpq sel, off
13336 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13337 + .byte 0x48; ljmp *1234f(%rip)
13338 + .pushsection .rodata
13339 + .align 16
13340 + 1234: .quad \off; .word \sel
13341 + .popsection
13342 +#else
13343 + pushq $\sel
13344 + pushq $\off
13345 + lretq
13346 +#endif
13347 + .endm
13348 +
13349 + .macro pax_enter_kernel
13350 +#ifdef CONFIG_PAX_KERNEXEC
13351 + call pax_enter_kernel
13352 +#endif
13353 + .endm
13354 +
13355 + .macro pax_exit_kernel
13356 +#ifdef CONFIG_PAX_KERNEXEC
13357 + call pax_exit_kernel
13358 +#endif
13359 + .endm
13360 +
13361 +#ifdef CONFIG_PAX_KERNEXEC
13362 +ENTRY(pax_enter_kernel)
13363 + pushq %rdi
13364 +
13365 +#ifdef CONFIG_PARAVIRT
13366 + PV_SAVE_REGS(CLBR_RDI)
13367 +#endif
13368 +
13369 + GET_CR0_INTO_RDI
13370 + bts $16,%rdi
13371 + jnc 1f
13372 + mov %cs,%edi
13373 + cmp $__KERNEL_CS,%edi
13374 + jz 3f
13375 + ljmpq __KERNEL_CS,3f
13376 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
13377 +2: SET_RDI_INTO_CR0
13378 +3:
13379 +
13380 +#ifdef CONFIG_PARAVIRT
13381 + PV_RESTORE_REGS(CLBR_RDI)
13382 +#endif
13383 +
13384 + popq %rdi
13385 + retq
13386 +ENDPROC(pax_enter_kernel)
13387 +
13388 +ENTRY(pax_exit_kernel)
13389 + pushq %rdi
13390 +
13391 +#ifdef CONFIG_PARAVIRT
13392 + PV_SAVE_REGS(CLBR_RDI)
13393 +#endif
13394 +
13395 + mov %cs,%rdi
13396 + cmp $__KERNEXEC_KERNEL_CS,%edi
13397 + jnz 2f
13398 + GET_CR0_INTO_RDI
13399 + btr $16,%rdi
13400 + ljmpq __KERNEL_CS,1f
13401 +1: SET_RDI_INTO_CR0
13402 +2:
13403 +
13404 +#ifdef CONFIG_PARAVIRT
13405 + PV_RESTORE_REGS(CLBR_RDI);
13406 +#endif
13407 +
13408 + popq %rdi
13409 + retq
13410 +ENDPROC(pax_exit_kernel)
13411 +#endif
13412 +
13413 + .macro pax_enter_kernel_user
13414 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13415 + call pax_enter_kernel_user
13416 +#endif
13417 + .endm
13418 +
13419 + .macro pax_exit_kernel_user
13420 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13421 + call pax_exit_kernel_user
13422 +#endif
13423 +#ifdef CONFIG_PAX_RANDKSTACK
13424 + push %rax
13425 + call pax_randomize_kstack
13426 + pop %rax
13427 +#endif
13428 + pax_erase_kstack
13429 + .endm
13430 +
13431 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13432 +ENTRY(pax_enter_kernel_user)
13433 + pushq %rdi
13434 + pushq %rbx
13435 +
13436 +#ifdef CONFIG_PARAVIRT
13437 + PV_SAVE_REGS(CLBR_RDI)
13438 +#endif
13439 +
13440 + GET_CR3_INTO_RDI
13441 + mov %rdi,%rbx
13442 + add $__START_KERNEL_map,%rbx
13443 + sub phys_base(%rip),%rbx
13444 +
13445 +#ifdef CONFIG_PARAVIRT
13446 + pushq %rdi
13447 + cmpl $0, pv_info+PARAVIRT_enabled
13448 + jz 1f
13449 + i = 0
13450 + .rept USER_PGD_PTRS
13451 + mov i*8(%rbx),%rsi
13452 + mov $0,%sil
13453 + lea i*8(%rbx),%rdi
13454 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13455 + i = i + 1
13456 + .endr
13457 + jmp 2f
13458 +1:
13459 +#endif
13460 +
13461 + i = 0
13462 + .rept USER_PGD_PTRS
13463 + movb $0,i*8(%rbx)
13464 + i = i + 1
13465 + .endr
13466 +
13467 +#ifdef CONFIG_PARAVIRT
13468 +2: popq %rdi
13469 +#endif
13470 + SET_RDI_INTO_CR3
13471 +
13472 +#ifdef CONFIG_PAX_KERNEXEC
13473 + GET_CR0_INTO_RDI
13474 + bts $16,%rdi
13475 + SET_RDI_INTO_CR0
13476 +#endif
13477 +
13478 +#ifdef CONFIG_PARAVIRT
13479 + PV_RESTORE_REGS(CLBR_RDI)
13480 +#endif
13481 +
13482 + popq %rbx
13483 + popq %rdi
13484 + retq
13485 +ENDPROC(pax_enter_kernel_user)
13486 +
13487 +ENTRY(pax_exit_kernel_user)
13488 + push %rdi
13489 +
13490 +#ifdef CONFIG_PARAVIRT
13491 + pushq %rbx
13492 + PV_SAVE_REGS(CLBR_RDI)
13493 +#endif
13494 +
13495 +#ifdef CONFIG_PAX_KERNEXEC
13496 + GET_CR0_INTO_RDI
13497 + btr $16,%rdi
13498 + SET_RDI_INTO_CR0
13499 +#endif
13500 +
13501 + GET_CR3_INTO_RDI
13502 + add $__START_KERNEL_map,%rdi
13503 + sub phys_base(%rip),%rdi
13504 +
13505 +#ifdef CONFIG_PARAVIRT
13506 + cmpl $0, pv_info+PARAVIRT_enabled
13507 + jz 1f
13508 + mov %rdi,%rbx
13509 + i = 0
13510 + .rept USER_PGD_PTRS
13511 + mov i*8(%rbx),%rsi
13512 + mov $0x67,%sil
13513 + lea i*8(%rbx),%rdi
13514 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13515 + i = i + 1
13516 + .endr
13517 + jmp 2f
13518 +1:
13519 +#endif
13520 +
13521 + i = 0
13522 + .rept USER_PGD_PTRS
13523 + movb $0x67,i*8(%rdi)
13524 + i = i + 1
13525 + .endr
13526 +
13527 +#ifdef CONFIG_PARAVIRT
13528 +2: PV_RESTORE_REGS(CLBR_RDI)
13529 + popq %rbx
13530 +#endif
13531 +
13532 + popq %rdi
13533 + retq
13534 +ENDPROC(pax_exit_kernel_user)
13535 +#endif
13536 +
13537 +.macro pax_erase_kstack
13538 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13539 + call pax_erase_kstack
13540 +#endif
13541 +.endm
13542 +
13543 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13544 +/*
13545 + * r10: thread_info
13546 + * rcx, rdx: can be clobbered
13547 + */
13548 +ENTRY(pax_erase_kstack)
13549 + pushq %rdi
13550 + pushq %rax
13551 +
13552 + GET_THREAD_INFO(%r10)
13553 + mov TI_lowest_stack(%r10), %rdi
13554 + mov $-0xBEEF, %rax
13555 + std
13556 +
13557 +1: mov %edi, %ecx
13558 + and $THREAD_SIZE_asm - 1, %ecx
13559 + shr $3, %ecx
13560 + repne scasq
13561 + jecxz 2f
13562 +
13563 + cmp $2*8, %ecx
13564 + jc 2f
13565 +
13566 + mov $2*8, %ecx
13567 + repe scasq
13568 + jecxz 2f
13569 + jne 1b
13570 +
13571 +2: cld
13572 + mov %esp, %ecx
13573 + sub %edi, %ecx
13574 + shr $3, %ecx
13575 + rep stosq
13576 +
13577 + mov TI_task_thread_sp0(%r10), %rdi
13578 + sub $256, %rdi
13579 + mov %rdi, TI_lowest_stack(%r10)
13580 +
13581 + popq %rax
13582 + popq %rdi
13583 + ret
13584 +ENDPROC(pax_erase_kstack)
13585 +#endif
13586
13587 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13588 #ifdef CONFIG_TRACE_IRQFLAGS
13589 @@ -317,7 +569,7 @@ ENTRY(save_args)
13590 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
13591 movq_cfi rbp, 8 /* push %rbp */
13592 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
13593 - testl $3, CS(%rdi)
13594 + testb $3, CS(%rdi)
13595 je 1f
13596 SWAPGS
13597 /*
13598 @@ -409,7 +661,7 @@ ENTRY(ret_from_fork)
13599
13600 RESTORE_REST
13601
13602 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13603 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13604 je int_ret_from_sys_call
13605
13606 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13607 @@ -455,7 +707,7 @@ END(ret_from_fork)
13608 ENTRY(system_call)
13609 CFI_STARTPROC simple
13610 CFI_SIGNAL_FRAME
13611 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13612 + CFI_DEF_CFA rsp,0
13613 CFI_REGISTER rip,rcx
13614 /*CFI_REGISTER rflags,r11*/
13615 SWAPGS_UNSAFE_STACK
13616 @@ -468,12 +720,13 @@ ENTRY(system_call_after_swapgs)
13617
13618 movq %rsp,PER_CPU_VAR(old_rsp)
13619 movq PER_CPU_VAR(kernel_stack),%rsp
13620 + pax_enter_kernel_user
13621 /*
13622 * No need to follow this irqs off/on section - it's straight
13623 * and short:
13624 */
13625 ENABLE_INTERRUPTS(CLBR_NONE)
13626 - SAVE_ARGS 8,1
13627 + SAVE_ARGS 8*6,1
13628 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13629 movq %rcx,RIP-ARGOFFSET(%rsp)
13630 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13631 @@ -502,6 +755,7 @@ sysret_check:
13632 andl %edi,%edx
13633 jnz sysret_careful
13634 CFI_REMEMBER_STATE
13635 + pax_exit_kernel_user
13636 /*
13637 * sysretq will re-enable interrupts:
13638 */
13639 @@ -562,6 +816,9 @@ auditsys:
13640 movq %rax,%rsi /* 2nd arg: syscall number */
13641 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13642 call audit_syscall_entry
13643 +
13644 + pax_erase_kstack
13645 +
13646 LOAD_ARGS 0 /* reload call-clobbered registers */
13647 jmp system_call_fastpath
13648
13649 @@ -592,6 +849,9 @@ tracesys:
13650 FIXUP_TOP_OF_STACK %rdi
13651 movq %rsp,%rdi
13652 call syscall_trace_enter
13653 +
13654 + pax_erase_kstack
13655 +
13656 /*
13657 * Reload arg registers from stack in case ptrace changed them.
13658 * We don't reload %rax because syscall_trace_enter() returned
13659 @@ -613,7 +873,7 @@ tracesys:
13660 GLOBAL(int_ret_from_sys_call)
13661 DISABLE_INTERRUPTS(CLBR_NONE)
13662 TRACE_IRQS_OFF
13663 - testl $3,CS-ARGOFFSET(%rsp)
13664 + testb $3,CS-ARGOFFSET(%rsp)
13665 je retint_restore_args
13666 movl $_TIF_ALLWORK_MASK,%edi
13667 /* edi: mask to check */
13668 @@ -800,6 +1060,16 @@ END(interrupt)
13669 CFI_ADJUST_CFA_OFFSET 10*8
13670 call save_args
13671 PARTIAL_FRAME 0
13672 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13673 + testb $3, CS(%rdi)
13674 + jnz 1f
13675 + pax_enter_kernel
13676 + jmp 2f
13677 +1: pax_enter_kernel_user
13678 +2:
13679 +#else
13680 + pax_enter_kernel
13681 +#endif
13682 call \func
13683 .endm
13684
13685 @@ -822,7 +1092,7 @@ ret_from_intr:
13686 CFI_ADJUST_CFA_OFFSET -8
13687 exit_intr:
13688 GET_THREAD_INFO(%rcx)
13689 - testl $3,CS-ARGOFFSET(%rsp)
13690 + testb $3,CS-ARGOFFSET(%rsp)
13691 je retint_kernel
13692
13693 /* Interrupt came from user space */
13694 @@ -844,12 +1114,14 @@ retint_swapgs: /* return to user-space
13695 * The iretq could re-enable interrupts:
13696 */
13697 DISABLE_INTERRUPTS(CLBR_ANY)
13698 + pax_exit_kernel_user
13699 TRACE_IRQS_IRETQ
13700 SWAPGS
13701 jmp restore_args
13702
13703 retint_restore_args: /* return to kernel space */
13704 DISABLE_INTERRUPTS(CLBR_ANY)
13705 + pax_exit_kernel
13706 /*
13707 * The iretq could re-enable interrupts:
13708 */
13709 @@ -1032,6 +1304,16 @@ ENTRY(\sym)
13710 CFI_ADJUST_CFA_OFFSET 15*8
13711 call error_entry
13712 DEFAULT_FRAME 0
13713 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13714 + testb $3, CS(%rsp)
13715 + jnz 1f
13716 + pax_enter_kernel
13717 + jmp 2f
13718 +1: pax_enter_kernel_user
13719 +2:
13720 +#else
13721 + pax_enter_kernel
13722 +#endif
13723 movq %rsp,%rdi /* pt_regs pointer */
13724 xorl %esi,%esi /* no error code */
13725 call \do_sym
13726 @@ -1049,6 +1331,16 @@ ENTRY(\sym)
13727 subq $15*8, %rsp
13728 call save_paranoid
13729 TRACE_IRQS_OFF
13730 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13731 + testb $3, CS(%rsp)
13732 + jnz 1f
13733 + pax_enter_kernel
13734 + jmp 2f
13735 +1: pax_enter_kernel_user
13736 +2:
13737 +#else
13738 + pax_enter_kernel
13739 +#endif
13740 movq %rsp,%rdi /* pt_regs pointer */
13741 xorl %esi,%esi /* no error code */
13742 call \do_sym
13743 @@ -1066,9 +1358,24 @@ ENTRY(\sym)
13744 subq $15*8, %rsp
13745 call save_paranoid
13746 TRACE_IRQS_OFF
13747 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13748 + testb $3, CS(%rsp)
13749 + jnz 1f
13750 + pax_enter_kernel
13751 + jmp 2f
13752 +1: pax_enter_kernel_user
13753 +2:
13754 +#else
13755 + pax_enter_kernel
13756 +#endif
13757 movq %rsp,%rdi /* pt_regs pointer */
13758 xorl %esi,%esi /* no error code */
13759 - PER_CPU(init_tss, %rbp)
13760 +#ifdef CONFIG_SMP
13761 + imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
13762 + lea init_tss(%rbp), %rbp
13763 +#else
13764 + lea init_tss(%rip), %rbp
13765 +#endif
13766 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13767 call \do_sym
13768 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13769 @@ -1085,6 +1392,16 @@ ENTRY(\sym)
13770 CFI_ADJUST_CFA_OFFSET 15*8
13771 call error_entry
13772 DEFAULT_FRAME 0
13773 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13774 + testb $3, CS(%rsp)
13775 + jnz 1f
13776 + pax_enter_kernel
13777 + jmp 2f
13778 +1: pax_enter_kernel_user
13779 +2:
13780 +#else
13781 + pax_enter_kernel
13782 +#endif
13783 movq %rsp,%rdi /* pt_regs pointer */
13784 movq ORIG_RAX(%rsp),%rsi /* get error code */
13785 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13786 @@ -1104,6 +1421,16 @@ ENTRY(\sym)
13787 call save_paranoid
13788 DEFAULT_FRAME 0
13789 TRACE_IRQS_OFF
13790 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13791 + testb $3, CS(%rsp)
13792 + jnz 1f
13793 + pax_enter_kernel
13794 + jmp 2f
13795 +1: pax_enter_kernel_user
13796 +2:
13797 +#else
13798 + pax_enter_kernel
13799 +#endif
13800 movq %rsp,%rdi /* pt_regs pointer */
13801 movq ORIG_RAX(%rsp),%rsi /* get error code */
13802 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13803 @@ -1405,14 +1732,27 @@ ENTRY(paranoid_exit)
13804 TRACE_IRQS_OFF
13805 testl %ebx,%ebx /* swapgs needed? */
13806 jnz paranoid_restore
13807 - testl $3,CS(%rsp)
13808 + testb $3,CS(%rsp)
13809 jnz paranoid_userspace
13810 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13811 + pax_exit_kernel
13812 + TRACE_IRQS_IRETQ 0
13813 + SWAPGS_UNSAFE_STACK
13814 + RESTORE_ALL 8
13815 + jmp irq_return
13816 +#endif
13817 paranoid_swapgs:
13818 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13819 + pax_exit_kernel_user
13820 +#else
13821 + pax_exit_kernel
13822 +#endif
13823 TRACE_IRQS_IRETQ 0
13824 SWAPGS_UNSAFE_STACK
13825 RESTORE_ALL 8
13826 jmp irq_return
13827 paranoid_restore:
13828 + pax_exit_kernel
13829 TRACE_IRQS_IRETQ 0
13830 RESTORE_ALL 8
13831 jmp irq_return
13832 @@ -1470,7 +1810,7 @@ ENTRY(error_entry)
13833 movq_cfi r14, R14+8
13834 movq_cfi r15, R15+8
13835 xorl %ebx,%ebx
13836 - testl $3,CS+8(%rsp)
13837 + testb $3,CS+8(%rsp)
13838 je error_kernelspace
13839 error_swapgs:
13840 SWAPGS
13841 @@ -1529,6 +1869,16 @@ ENTRY(nmi)
13842 CFI_ADJUST_CFA_OFFSET 15*8
13843 call save_paranoid
13844 DEFAULT_FRAME 0
13845 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13846 + testb $3, CS(%rsp)
13847 + jnz 1f
13848 + pax_enter_kernel
13849 + jmp 2f
13850 +1: pax_enter_kernel_user
13851 +2:
13852 +#else
13853 + pax_enter_kernel
13854 +#endif
13855 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
13856 movq %rsp,%rdi
13857 movq $-1,%rsi
13858 @@ -1539,11 +1889,25 @@ ENTRY(nmi)
13859 DISABLE_INTERRUPTS(CLBR_NONE)
13860 testl %ebx,%ebx /* swapgs needed? */
13861 jnz nmi_restore
13862 - testl $3,CS(%rsp)
13863 + testb $3,CS(%rsp)
13864 jnz nmi_userspace
13865 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13866 + pax_exit_kernel
13867 + SWAPGS_UNSAFE_STACK
13868 + RESTORE_ALL 8
13869 + jmp irq_return
13870 +#endif
13871 nmi_swapgs:
13872 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13873 + pax_exit_kernel_user
13874 +#else
13875 + pax_exit_kernel
13876 +#endif
13877 SWAPGS_UNSAFE_STACK
13878 + RESTORE_ALL 8
13879 + jmp irq_return
13880 nmi_restore:
13881 + pax_exit_kernel
13882 RESTORE_ALL 8
13883 jmp irq_return
13884 nmi_userspace:
13885 diff -urNp linux-2.6.32.42/arch/x86/kernel/ftrace.c linux-2.6.32.42/arch/x86/kernel/ftrace.c
13886 --- linux-2.6.32.42/arch/x86/kernel/ftrace.c 2011-03-27 14:31:47.000000000 -0400
13887 +++ linux-2.6.32.42/arch/x86/kernel/ftrace.c 2011-05-04 17:56:20.000000000 -0400
13888 @@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the
13889 static void *mod_code_newcode; /* holds the text to write to the IP */
13890
13891 static unsigned nmi_wait_count;
13892 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
13893 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
13894
13895 int ftrace_arch_read_dyn_info(char *buf, int size)
13896 {
13897 @@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf,
13898
13899 r = snprintf(buf, size, "%u %u",
13900 nmi_wait_count,
13901 - atomic_read(&nmi_update_count));
13902 + atomic_read_unchecked(&nmi_update_count));
13903 return r;
13904 }
13905
13906 @@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
13907 {
13908 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
13909 smp_rmb();
13910 + pax_open_kernel();
13911 ftrace_mod_code();
13912 - atomic_inc(&nmi_update_count);
13913 + pax_close_kernel();
13914 + atomic_inc_unchecked(&nmi_update_count);
13915 }
13916 /* Must have previous changes seen before executions */
13917 smp_mb();
13918 @@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, voi
13919
13920
13921
13922 -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
13923 +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
13924
13925 static unsigned char *ftrace_nop_replace(void)
13926 {
13927 @@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, uns
13928 {
13929 unsigned char replaced[MCOUNT_INSN_SIZE];
13930
13931 + ip = ktla_ktva(ip);
13932 +
13933 /*
13934 * Note: Due to modules and __init, code can
13935 * disappear and change, we need to protect against faulting
13936 @@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_fun
13937 unsigned char old[MCOUNT_INSN_SIZE], *new;
13938 int ret;
13939
13940 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
13941 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
13942 new = ftrace_call_replace(ip, (unsigned long)func);
13943 ret = ftrace_modify_code(ip, old, new);
13944
13945 @@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *da
13946 switch (faulted) {
13947 case 0:
13948 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
13949 - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
13950 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
13951 break;
13952 case 1:
13953 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
13954 - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
13955 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
13956 break;
13957 case 2:
13958 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
13959 - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
13960 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
13961 break;
13962 }
13963
13964 @@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long
13965 {
13966 unsigned char code[MCOUNT_INSN_SIZE];
13967
13968 + ip = ktla_ktva(ip);
13969 +
13970 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
13971 return -EFAULT;
13972
13973 diff -urNp linux-2.6.32.42/arch/x86/kernel/head32.c linux-2.6.32.42/arch/x86/kernel/head32.c
13974 --- linux-2.6.32.42/arch/x86/kernel/head32.c 2011-03-27 14:31:47.000000000 -0400
13975 +++ linux-2.6.32.42/arch/x86/kernel/head32.c 2011-04-17 15:56:46.000000000 -0400
13976 @@ -16,6 +16,7 @@
13977 #include <asm/apic.h>
13978 #include <asm/io_apic.h>
13979 #include <asm/bios_ebda.h>
13980 +#include <asm/boot.h>
13981
13982 static void __init i386_default_early_setup(void)
13983 {
13984 @@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
13985 {
13986 reserve_trampoline_memory();
13987
13988 - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13989 + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13990
13991 #ifdef CONFIG_BLK_DEV_INITRD
13992 /* Reserve INITRD */
13993 diff -urNp linux-2.6.32.42/arch/x86/kernel/head_32.S linux-2.6.32.42/arch/x86/kernel/head_32.S
13994 --- linux-2.6.32.42/arch/x86/kernel/head_32.S 2011-03-27 14:31:47.000000000 -0400
13995 +++ linux-2.6.32.42/arch/x86/kernel/head_32.S 2011-07-01 19:09:03.000000000 -0400
13996 @@ -19,10 +19,17 @@
13997 #include <asm/setup.h>
13998 #include <asm/processor-flags.h>
13999 #include <asm/percpu.h>
14000 +#include <asm/msr-index.h>
14001
14002 /* Physical address */
14003 #define pa(X) ((X) - __PAGE_OFFSET)
14004
14005 +#ifdef CONFIG_PAX_KERNEXEC
14006 +#define ta(X) (X)
14007 +#else
14008 +#define ta(X) ((X) - __PAGE_OFFSET)
14009 +#endif
14010 +
14011 /*
14012 * References to members of the new_cpu_data structure.
14013 */
14014 @@ -52,11 +59,7 @@
14015 * and small than max_low_pfn, otherwise will waste some page table entries
14016 */
14017
14018 -#if PTRS_PER_PMD > 1
14019 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14020 -#else
14021 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14022 -#endif
14023 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14024
14025 /* Enough space to fit pagetables for the low memory linear map */
14026 MAPPING_BEYOND_END = \
14027 @@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
14028 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14029
14030 /*
14031 + * Real beginning of normal "text" segment
14032 + */
14033 +ENTRY(stext)
14034 +ENTRY(_stext)
14035 +
14036 +/*
14037 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14038 * %esi points to the real-mode code as a 32-bit pointer.
14039 * CS and DS must be 4 GB flat segments, but we don't depend on
14040 @@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14041 * can.
14042 */
14043 __HEAD
14044 +
14045 +#ifdef CONFIG_PAX_KERNEXEC
14046 + jmp startup_32
14047 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14048 +.fill PAGE_SIZE-5,1,0xcc
14049 +#endif
14050 +
14051 ENTRY(startup_32)
14052 + movl pa(stack_start),%ecx
14053 +
14054 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
14055 us to not reload segments */
14056 testb $(1<<6), BP_loadflags(%esi)
14057 @@ -95,7 +113,60 @@ ENTRY(startup_32)
14058 movl %eax,%es
14059 movl %eax,%fs
14060 movl %eax,%gs
14061 + movl %eax,%ss
14062 2:
14063 + leal -__PAGE_OFFSET(%ecx),%esp
14064 +
14065 +#ifdef CONFIG_SMP
14066 + movl $pa(cpu_gdt_table),%edi
14067 + movl $__per_cpu_load,%eax
14068 + movw %ax,__KERNEL_PERCPU + 2(%edi)
14069 + rorl $16,%eax
14070 + movb %al,__KERNEL_PERCPU + 4(%edi)
14071 + movb %ah,__KERNEL_PERCPU + 7(%edi)
14072 + movl $__per_cpu_end - 1,%eax
14073 + subl $__per_cpu_start,%eax
14074 + movw %ax,__KERNEL_PERCPU + 0(%edi)
14075 +#endif
14076 +
14077 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14078 + movl $NR_CPUS,%ecx
14079 + movl $pa(cpu_gdt_table),%edi
14080 +1:
14081 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14082 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14083 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14084 + addl $PAGE_SIZE_asm,%edi
14085 + loop 1b
14086 +#endif
14087 +
14088 +#ifdef CONFIG_PAX_KERNEXEC
14089 + movl $pa(boot_gdt),%edi
14090 + movl $__LOAD_PHYSICAL_ADDR,%eax
14091 + movw %ax,__BOOT_CS + 2(%edi)
14092 + rorl $16,%eax
14093 + movb %al,__BOOT_CS + 4(%edi)
14094 + movb %ah,__BOOT_CS + 7(%edi)
14095 + rorl $16,%eax
14096 +
14097 + ljmp $(__BOOT_CS),$1f
14098 +1:
14099 +
14100 + movl $NR_CPUS,%ecx
14101 + movl $pa(cpu_gdt_table),%edi
14102 + addl $__PAGE_OFFSET,%eax
14103 +1:
14104 + movw %ax,__KERNEL_CS + 2(%edi)
14105 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14106 + rorl $16,%eax
14107 + movb %al,__KERNEL_CS + 4(%edi)
14108 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14109 + movb %ah,__KERNEL_CS + 7(%edi)
14110 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14111 + rorl $16,%eax
14112 + addl $PAGE_SIZE_asm,%edi
14113 + loop 1b
14114 +#endif
14115
14116 /*
14117 * Clear BSS first so that there are no surprises...
14118 @@ -140,9 +211,7 @@ ENTRY(startup_32)
14119 cmpl $num_subarch_entries, %eax
14120 jae bad_subarch
14121
14122 - movl pa(subarch_entries)(,%eax,4), %eax
14123 - subl $__PAGE_OFFSET, %eax
14124 - jmp *%eax
14125 + jmp *pa(subarch_entries)(,%eax,4)
14126
14127 bad_subarch:
14128 WEAK(lguest_entry)
14129 @@ -154,10 +223,10 @@ WEAK(xen_entry)
14130 __INITDATA
14131
14132 subarch_entries:
14133 - .long default_entry /* normal x86/PC */
14134 - .long lguest_entry /* lguest hypervisor */
14135 - .long xen_entry /* Xen hypervisor */
14136 - .long default_entry /* Moorestown MID */
14137 + .long ta(default_entry) /* normal x86/PC */
14138 + .long ta(lguest_entry) /* lguest hypervisor */
14139 + .long ta(xen_entry) /* Xen hypervisor */
14140 + .long ta(default_entry) /* Moorestown MID */
14141 num_subarch_entries = (. - subarch_entries) / 4
14142 .previous
14143 #endif /* CONFIG_PARAVIRT */
14144 @@ -218,8 +287,11 @@ default_entry:
14145 movl %eax, pa(max_pfn_mapped)
14146
14147 /* Do early initialization of the fixmap area */
14148 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14149 - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14150 +#ifdef CONFIG_COMPAT_VDSO
14151 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14152 +#else
14153 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14154 +#endif
14155 #else /* Not PAE */
14156
14157 page_pde_offset = (__PAGE_OFFSET >> 20);
14158 @@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14159 movl %eax, pa(max_pfn_mapped)
14160
14161 /* Do early initialization of the fixmap area */
14162 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14163 - movl %eax,pa(swapper_pg_dir+0xffc)
14164 +#ifdef CONFIG_COMPAT_VDSO
14165 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
14166 +#else
14167 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
14168 +#endif
14169 #endif
14170 jmp 3f
14171 /*
14172 @@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
14173 movl %eax,%es
14174 movl %eax,%fs
14175 movl %eax,%gs
14176 + movl pa(stack_start),%ecx
14177 + movl %eax,%ss
14178 + leal -__PAGE_OFFSET(%ecx),%esp
14179 #endif /* CONFIG_SMP */
14180 3:
14181
14182 @@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
14183 orl %edx,%eax
14184 movl %eax,%cr4
14185
14186 +#ifdef CONFIG_X86_PAE
14187 btl $5, %eax # check if PAE is enabled
14188 jnc 6f
14189
14190 @@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
14191 cpuid
14192 cmpl $0x80000000, %eax
14193 jbe 6f
14194 +
14195 + /* Clear bogus XD_DISABLE bits */
14196 + call verify_cpu
14197 +
14198 mov $0x80000001, %eax
14199 cpuid
14200 /* Execute Disable bit supported? */
14201 @@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
14202 jnc 6f
14203
14204 /* Setup EFER (Extended Feature Enable Register) */
14205 - movl $0xc0000080, %ecx
14206 + movl $MSR_EFER, %ecx
14207 rdmsr
14208
14209 btsl $11, %eax
14210 /* Make changes effective */
14211 wrmsr
14212
14213 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14214 + movl $1,pa(nx_enabled)
14215 +#endif
14216 +
14217 6:
14218
14219 /*
14220 @@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
14221 movl %eax,%cr0 /* ..and set paging (PG) bit */
14222 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
14223 1:
14224 - /* Set up the stack pointer */
14225 - lss stack_start,%esp
14226 + /* Shift the stack pointer to a virtual address */
14227 + addl $__PAGE_OFFSET, %esp
14228
14229 /*
14230 * Initialize eflags. Some BIOS's leave bits like NT set. This would
14231 @@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
14232
14233 #ifdef CONFIG_SMP
14234 cmpb $0, ready
14235 - jz 1f /* Initial CPU cleans BSS */
14236 - jmp checkCPUtype
14237 -1:
14238 + jnz checkCPUtype
14239 #endif /* CONFIG_SMP */
14240
14241 /*
14242 @@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
14243 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14244 movl %eax,%ss # after changing gdt.
14245
14246 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
14247 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14248 movl %eax,%ds
14249 movl %eax,%es
14250
14251 @@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
14252 */
14253 cmpb $0,ready
14254 jne 1f
14255 - movl $per_cpu__gdt_page,%eax
14256 + movl $cpu_gdt_table,%eax
14257 movl $per_cpu__stack_canary,%ecx
14258 +#ifdef CONFIG_SMP
14259 + addl $__per_cpu_load,%ecx
14260 +#endif
14261 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14262 shrl $16, %ecx
14263 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14264 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14265 1:
14266 -#endif
14267 movl $(__KERNEL_STACK_CANARY),%eax
14268 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14269 + movl $(__USER_DS),%eax
14270 +#else
14271 + xorl %eax,%eax
14272 +#endif
14273 movl %eax,%gs
14274
14275 xorl %eax,%eax # Clear LDT
14276 @@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
14277
14278 cld # gcc2 wants the direction flag cleared at all times
14279 pushl $0 # fake return address for unwinder
14280 -#ifdef CONFIG_SMP
14281 - movb ready, %cl
14282 movb $1, ready
14283 - cmpb $0,%cl # the first CPU calls start_kernel
14284 - je 1f
14285 - movl (stack_start), %esp
14286 -1:
14287 -#endif /* CONFIG_SMP */
14288 jmp *(initial_code)
14289
14290 /*
14291 @@ -546,22 +631,22 @@ early_page_fault:
14292 jmp early_fault
14293
14294 early_fault:
14295 - cld
14296 #ifdef CONFIG_PRINTK
14297 + cmpl $1,%ss:early_recursion_flag
14298 + je hlt_loop
14299 + incl %ss:early_recursion_flag
14300 + cld
14301 pusha
14302 movl $(__KERNEL_DS),%eax
14303 movl %eax,%ds
14304 movl %eax,%es
14305 - cmpl $2,early_recursion_flag
14306 - je hlt_loop
14307 - incl early_recursion_flag
14308 movl %cr2,%eax
14309 pushl %eax
14310 pushl %edx /* trapno */
14311 pushl $fault_msg
14312 call printk
14313 +; call dump_stack
14314 #endif
14315 - call dump_stack
14316 hlt_loop:
14317 hlt
14318 jmp hlt_loop
14319 @@ -569,8 +654,11 @@ hlt_loop:
14320 /* This is the default interrupt "handler" :-) */
14321 ALIGN
14322 ignore_int:
14323 - cld
14324 #ifdef CONFIG_PRINTK
14325 + cmpl $2,%ss:early_recursion_flag
14326 + je hlt_loop
14327 + incl %ss:early_recursion_flag
14328 + cld
14329 pushl %eax
14330 pushl %ecx
14331 pushl %edx
14332 @@ -579,9 +667,6 @@ ignore_int:
14333 movl $(__KERNEL_DS),%eax
14334 movl %eax,%ds
14335 movl %eax,%es
14336 - cmpl $2,early_recursion_flag
14337 - je hlt_loop
14338 - incl early_recursion_flag
14339 pushl 16(%esp)
14340 pushl 24(%esp)
14341 pushl 32(%esp)
14342 @@ -600,6 +685,8 @@ ignore_int:
14343 #endif
14344 iret
14345
14346 +#include "verify_cpu.S"
14347 +
14348 __REFDATA
14349 .align 4
14350 ENTRY(initial_code)
14351 @@ -610,31 +697,47 @@ ENTRY(initial_page_table)
14352 /*
14353 * BSS section
14354 */
14355 -__PAGE_ALIGNED_BSS
14356 - .align PAGE_SIZE_asm
14357 #ifdef CONFIG_X86_PAE
14358 +.section .swapper_pg_pmd,"a",@progbits
14359 swapper_pg_pmd:
14360 .fill 1024*KPMDS,4,0
14361 #else
14362 +.section .swapper_pg_dir,"a",@progbits
14363 ENTRY(swapper_pg_dir)
14364 .fill 1024,4,0
14365 #endif
14366 +.section .swapper_pg_fixmap,"a",@progbits
14367 swapper_pg_fixmap:
14368 .fill 1024,4,0
14369 #ifdef CONFIG_X86_TRAMPOLINE
14370 +.section .trampoline_pg_dir,"a",@progbits
14371 ENTRY(trampoline_pg_dir)
14372 +#ifdef CONFIG_X86_PAE
14373 + .fill 4,8,0
14374 +#else
14375 .fill 1024,4,0
14376 #endif
14377 +#endif
14378 +
14379 +.section .empty_zero_page,"a",@progbits
14380 ENTRY(empty_zero_page)
14381 .fill 4096,1,0
14382
14383 /*
14384 + * The IDT has to be page-aligned to simplify the Pentium
14385 + * F0 0F bug workaround.. We have a special link segment
14386 + * for this.
14387 + */
14388 +.section .idt,"a",@progbits
14389 +ENTRY(idt_table)
14390 + .fill 256,8,0
14391 +
14392 +/*
14393 * This starts the data section.
14394 */
14395 #ifdef CONFIG_X86_PAE
14396 -__PAGE_ALIGNED_DATA
14397 - /* Page-aligned for the benefit of paravirt? */
14398 - .align PAGE_SIZE_asm
14399 +.section .swapper_pg_dir,"a",@progbits
14400 +
14401 ENTRY(swapper_pg_dir)
14402 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14403 # if KPMDS == 3
14404 @@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
14405 # error "Kernel PMDs should be 1, 2 or 3"
14406 # endif
14407 .align PAGE_SIZE_asm /* needs to be page-sized too */
14408 +
14409 +#ifdef CONFIG_PAX_PER_CPU_PGD
14410 +ENTRY(cpu_pgd)
14411 + .rept NR_CPUS
14412 + .fill 4,8,0
14413 + .endr
14414 +#endif
14415 +
14416 #endif
14417
14418 .data
14419 +.balign 4
14420 ENTRY(stack_start)
14421 - .long init_thread_union+THREAD_SIZE
14422 - .long __BOOT_DS
14423 + .long init_thread_union+THREAD_SIZE-8
14424
14425 ready: .byte 0
14426
14427 +.section .rodata,"a",@progbits
14428 early_recursion_flag:
14429 .long 0
14430
14431 @@ -697,7 +809,7 @@ fault_msg:
14432 .word 0 # 32 bit align gdt_desc.address
14433 boot_gdt_descr:
14434 .word __BOOT_DS+7
14435 - .long boot_gdt - __PAGE_OFFSET
14436 + .long pa(boot_gdt)
14437
14438 .word 0 # 32-bit align idt_desc.address
14439 idt_descr:
14440 @@ -708,7 +820,7 @@ idt_descr:
14441 .word 0 # 32 bit align gdt_desc.address
14442 ENTRY(early_gdt_descr)
14443 .word GDT_ENTRIES*8-1
14444 - .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
14445 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
14446
14447 /*
14448 * The boot_gdt must mirror the equivalent in setup.S and is
14449 @@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
14450 .align L1_CACHE_BYTES
14451 ENTRY(boot_gdt)
14452 .fill GDT_ENTRY_BOOT_CS,8,0
14453 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14454 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14455 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14456 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14457 +
14458 + .align PAGE_SIZE_asm
14459 +ENTRY(cpu_gdt_table)
14460 + .rept NR_CPUS
14461 + .quad 0x0000000000000000 /* NULL descriptor */
14462 + .quad 0x0000000000000000 /* 0x0b reserved */
14463 + .quad 0x0000000000000000 /* 0x13 reserved */
14464 + .quad 0x0000000000000000 /* 0x1b reserved */
14465 +
14466 +#ifdef CONFIG_PAX_KERNEXEC
14467 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14468 +#else
14469 + .quad 0x0000000000000000 /* 0x20 unused */
14470 +#endif
14471 +
14472 + .quad 0x0000000000000000 /* 0x28 unused */
14473 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14474 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14475 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14476 + .quad 0x0000000000000000 /* 0x4b reserved */
14477 + .quad 0x0000000000000000 /* 0x53 reserved */
14478 + .quad 0x0000000000000000 /* 0x5b reserved */
14479 +
14480 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14481 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14482 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14483 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14484 +
14485 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14486 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14487 +
14488 + /*
14489 + * Segments used for calling PnP BIOS have byte granularity.
14490 + * The code segments and data segments have fixed 64k limits,
14491 + * the transfer segment sizes are set at run time.
14492 + */
14493 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
14494 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
14495 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
14496 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
14497 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
14498 +
14499 + /*
14500 + * The APM segments have byte granularity and their bases
14501 + * are set at run time. All have 64k limits.
14502 + */
14503 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14504 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14505 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
14506 +
14507 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14508 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14509 + .quad 0x0040910000000018 /* 0xe0 - STACK_CANARY */
14510 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14511 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14512 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14513 +
14514 + /* Be sure this is zeroed to avoid false validations in Xen */
14515 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14516 + .endr
14517 diff -urNp linux-2.6.32.42/arch/x86/kernel/head_64.S linux-2.6.32.42/arch/x86/kernel/head_64.S
14518 --- linux-2.6.32.42/arch/x86/kernel/head_64.S 2011-03-27 14:31:47.000000000 -0400
14519 +++ linux-2.6.32.42/arch/x86/kernel/head_64.S 2011-04-17 15:56:46.000000000 -0400
14520 @@ -19,6 +19,7 @@
14521 #include <asm/cache.h>
14522 #include <asm/processor-flags.h>
14523 #include <asm/percpu.h>
14524 +#include <asm/cpufeature.h>
14525
14526 #ifdef CONFIG_PARAVIRT
14527 #include <asm/asm-offsets.h>
14528 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
14529 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14530 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14531 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14532 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
14533 +L3_VMALLOC_START = pud_index(VMALLOC_START)
14534 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14535 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14536
14537 .text
14538 __HEAD
14539 @@ -85,35 +90,22 @@ startup_64:
14540 */
14541 addq %rbp, init_level4_pgt + 0(%rip)
14542 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14543 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14544 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14545 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14546
14547 addq %rbp, level3_ident_pgt + 0(%rip)
14548 +#ifndef CONFIG_XEN
14549 + addq %rbp, level3_ident_pgt + 8(%rip)
14550 +#endif
14551
14552 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14553 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14554 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14555
14556 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14557 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14558 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14559
14560 - /* Add an Identity mapping if I am above 1G */
14561 - leaq _text(%rip), %rdi
14562 - andq $PMD_PAGE_MASK, %rdi
14563 -
14564 - movq %rdi, %rax
14565 - shrq $PUD_SHIFT, %rax
14566 - andq $(PTRS_PER_PUD - 1), %rax
14567 - jz ident_complete
14568 -
14569 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
14570 - leaq level3_ident_pgt(%rip), %rbx
14571 - movq %rdx, 0(%rbx, %rax, 8)
14572 -
14573 - movq %rdi, %rax
14574 - shrq $PMD_SHIFT, %rax
14575 - andq $(PTRS_PER_PMD - 1), %rax
14576 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
14577 - leaq level2_spare_pgt(%rip), %rbx
14578 - movq %rdx, 0(%rbx, %rax, 8)
14579 -ident_complete:
14580 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14581 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
14582
14583 /*
14584 * Fixup the kernel text+data virtual addresses. Note that
14585 @@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
14586 * after the boot processor executes this code.
14587 */
14588
14589 - /* Enable PAE mode and PGE */
14590 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
14591 + /* Enable PAE mode and PSE/PGE */
14592 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
14593 movq %rax, %cr4
14594
14595 /* Setup early boot stage 4 level pagetables. */
14596 @@ -184,9 +176,13 @@ ENTRY(secondary_startup_64)
14597 movl $MSR_EFER, %ecx
14598 rdmsr
14599 btsl $_EFER_SCE, %eax /* Enable System Call */
14600 - btl $20,%edi /* No Execute supported? */
14601 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
14602 jnc 1f
14603 btsl $_EFER_NX, %eax
14604 + leaq init_level4_pgt(%rip), %rdi
14605 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
14606 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
14607 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
14608 1: wrmsr /* Make changes effective */
14609
14610 /* Setup cr0 */
14611 @@ -262,16 +258,16 @@ ENTRY(secondary_startup_64)
14612 .quad x86_64_start_kernel
14613 ENTRY(initial_gs)
14614 .quad INIT_PER_CPU_VAR(irq_stack_union)
14615 - __FINITDATA
14616
14617 ENTRY(stack_start)
14618 .quad init_thread_union+THREAD_SIZE-8
14619 .word 0
14620 + __FINITDATA
14621
14622 bad_address:
14623 jmp bad_address
14624
14625 - .section ".init.text","ax"
14626 + __INIT
14627 #ifdef CONFIG_EARLY_PRINTK
14628 .globl early_idt_handlers
14629 early_idt_handlers:
14630 @@ -316,18 +312,23 @@ ENTRY(early_idt_handler)
14631 #endif /* EARLY_PRINTK */
14632 1: hlt
14633 jmp 1b
14634 + .previous
14635
14636 #ifdef CONFIG_EARLY_PRINTK
14637 + __INITDATA
14638 early_recursion_flag:
14639 .long 0
14640 + .previous
14641
14642 + .section .rodata,"a",@progbits
14643 early_idt_msg:
14644 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
14645 early_idt_ripmsg:
14646 .asciz "RIP %s\n"
14647 -#endif /* CONFIG_EARLY_PRINTK */
14648 .previous
14649 +#endif /* CONFIG_EARLY_PRINTK */
14650
14651 + .section .rodata,"a",@progbits
14652 #define NEXT_PAGE(name) \
14653 .balign PAGE_SIZE; \
14654 ENTRY(name)
14655 @@ -350,13 +351,36 @@ NEXT_PAGE(init_level4_pgt)
14656 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14657 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
14658 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14659 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
14660 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
14661 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
14662 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14663 .org init_level4_pgt + L4_START_KERNEL*8, 0
14664 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
14665 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
14666
14667 +#ifdef CONFIG_PAX_PER_CPU_PGD
14668 +NEXT_PAGE(cpu_pgd)
14669 + .rept NR_CPUS
14670 + .fill 512,8,0
14671 + .endr
14672 +#endif
14673 +
14674 NEXT_PAGE(level3_ident_pgt)
14675 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14676 +#ifdef CONFIG_XEN
14677 .fill 511,8,0
14678 +#else
14679 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
14680 + .fill 510,8,0
14681 +#endif
14682 +
14683 +NEXT_PAGE(level3_vmalloc_pgt)
14684 + .fill 512,8,0
14685 +
14686 +NEXT_PAGE(level3_vmemmap_pgt)
14687 + .fill L3_VMEMMAP_START,8,0
14688 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14689
14690 NEXT_PAGE(level3_kernel_pgt)
14691 .fill L3_START_KERNEL,8,0
14692 @@ -364,20 +388,23 @@ NEXT_PAGE(level3_kernel_pgt)
14693 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
14694 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14695
14696 +NEXT_PAGE(level2_vmemmap_pgt)
14697 + .fill 512,8,0
14698 +
14699 NEXT_PAGE(level2_fixmap_pgt)
14700 - .fill 506,8,0
14701 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14702 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
14703 - .fill 5,8,0
14704 + .fill 507,8,0
14705 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
14706 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
14707 + .fill 4,8,0
14708
14709 -NEXT_PAGE(level1_fixmap_pgt)
14710 +NEXT_PAGE(level1_vsyscall_pgt)
14711 .fill 512,8,0
14712
14713 -NEXT_PAGE(level2_ident_pgt)
14714 - /* Since I easily can, map the first 1G.
14715 + /* Since I easily can, map the first 2G.
14716 * Don't set NX because code runs from these pages.
14717 */
14718 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
14719 +NEXT_PAGE(level2_ident_pgt)
14720 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
14721
14722 NEXT_PAGE(level2_kernel_pgt)
14723 /*
14724 @@ -390,33 +417,55 @@ NEXT_PAGE(level2_kernel_pgt)
14725 * If you want to increase this then increase MODULES_VADDR
14726 * too.)
14727 */
14728 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
14729 - KERNEL_IMAGE_SIZE/PMD_SIZE)
14730 -
14731 -NEXT_PAGE(level2_spare_pgt)
14732 - .fill 512, 8, 0
14733 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
14734
14735 #undef PMDS
14736 #undef NEXT_PAGE
14737
14738 - .data
14739 + .align PAGE_SIZE
14740 +ENTRY(cpu_gdt_table)
14741 + .rept NR_CPUS
14742 + .quad 0x0000000000000000 /* NULL descriptor */
14743 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
14744 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
14745 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
14746 + .quad 0x00cffb000000ffff /* __USER32_CS */
14747 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
14748 + .quad 0x00affb000000ffff /* __USER_CS */
14749 +
14750 +#ifdef CONFIG_PAX_KERNEXEC
14751 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
14752 +#else
14753 + .quad 0x0 /* unused */
14754 +#endif
14755 +
14756 + .quad 0,0 /* TSS */
14757 + .quad 0,0 /* LDT */
14758 + .quad 0,0,0 /* three TLS descriptors */
14759 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
14760 + /* asm/segment.h:GDT_ENTRIES must match this */
14761 +
14762 + /* zero the remaining page */
14763 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
14764 + .endr
14765 +
14766 .align 16
14767 .globl early_gdt_descr
14768 early_gdt_descr:
14769 .word GDT_ENTRIES*8-1
14770 early_gdt_descr_base:
14771 - .quad INIT_PER_CPU_VAR(gdt_page)
14772 + .quad cpu_gdt_table
14773
14774 ENTRY(phys_base)
14775 /* This must match the first entry in level2_kernel_pgt */
14776 .quad 0x0000000000000000
14777
14778 #include "../../x86/xen/xen-head.S"
14779 -
14780 - .section .bss, "aw", @nobits
14781 +
14782 + .section .rodata,"a",@progbits
14783 .align L1_CACHE_BYTES
14784 ENTRY(idt_table)
14785 - .skip IDT_ENTRIES * 16
14786 + .fill 512,8,0
14787
14788 __PAGE_ALIGNED_BSS
14789 .align PAGE_SIZE
14790 diff -urNp linux-2.6.32.42/arch/x86/kernel/i386_ksyms_32.c linux-2.6.32.42/arch/x86/kernel/i386_ksyms_32.c
14791 --- linux-2.6.32.42/arch/x86/kernel/i386_ksyms_32.c 2011-03-27 14:31:47.000000000 -0400
14792 +++ linux-2.6.32.42/arch/x86/kernel/i386_ksyms_32.c 2011-04-17 15:56:46.000000000 -0400
14793 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
14794 EXPORT_SYMBOL(cmpxchg8b_emu);
14795 #endif
14796
14797 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
14798 +
14799 /* Networking helper routines. */
14800 EXPORT_SYMBOL(csum_partial_copy_generic);
14801 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
14802 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
14803
14804 EXPORT_SYMBOL(__get_user_1);
14805 EXPORT_SYMBOL(__get_user_2);
14806 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
14807
14808 EXPORT_SYMBOL(csum_partial);
14809 EXPORT_SYMBOL(empty_zero_page);
14810 +
14811 +#ifdef CONFIG_PAX_KERNEXEC
14812 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
14813 +#endif
14814 diff -urNp linux-2.6.32.42/arch/x86/kernel/i8259.c linux-2.6.32.42/arch/x86/kernel/i8259.c
14815 --- linux-2.6.32.42/arch/x86/kernel/i8259.c 2011-03-27 14:31:47.000000000 -0400
14816 +++ linux-2.6.32.42/arch/x86/kernel/i8259.c 2011-05-04 17:56:28.000000000 -0400
14817 @@ -208,7 +208,7 @@ spurious_8259A_irq:
14818 "spurious 8259A interrupt: IRQ%d.\n", irq);
14819 spurious_irq_mask |= irqmask;
14820 }
14821 - atomic_inc(&irq_err_count);
14822 + atomic_inc_unchecked(&irq_err_count);
14823 /*
14824 * Theoretically we do not have to handle this IRQ,
14825 * but in Linux this does not cause problems and is
14826 diff -urNp linux-2.6.32.42/arch/x86/kernel/init_task.c linux-2.6.32.42/arch/x86/kernel/init_task.c
14827 --- linux-2.6.32.42/arch/x86/kernel/init_task.c 2011-03-27 14:31:47.000000000 -0400
14828 +++ linux-2.6.32.42/arch/x86/kernel/init_task.c 2011-04-17 15:56:46.000000000 -0400
14829 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
14830 * way process stacks are handled. This is done by having a special
14831 * "init_task" linker map entry..
14832 */
14833 -union thread_union init_thread_union __init_task_data =
14834 - { INIT_THREAD_INFO(init_task) };
14835 +union thread_union init_thread_union __init_task_data;
14836
14837 /*
14838 * Initial task structure.
14839 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
14840 * section. Since TSS's are completely CPU-local, we want them
14841 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
14842 */
14843 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
14844 -
14845 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
14846 +EXPORT_SYMBOL(init_tss);
14847 diff -urNp linux-2.6.32.42/arch/x86/kernel/ioport.c linux-2.6.32.42/arch/x86/kernel/ioport.c
14848 --- linux-2.6.32.42/arch/x86/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
14849 +++ linux-2.6.32.42/arch/x86/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
14850 @@ -6,6 +6,7 @@
14851 #include <linux/sched.h>
14852 #include <linux/kernel.h>
14853 #include <linux/capability.h>
14854 +#include <linux/security.h>
14855 #include <linux/errno.h>
14856 #include <linux/types.h>
14857 #include <linux/ioport.h>
14858 @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
14859
14860 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
14861 return -EINVAL;
14862 +#ifdef CONFIG_GRKERNSEC_IO
14863 + if (turn_on && grsec_disable_privio) {
14864 + gr_handle_ioperm();
14865 + return -EPERM;
14866 + }
14867 +#endif
14868 if (turn_on && !capable(CAP_SYS_RAWIO))
14869 return -EPERM;
14870
14871 @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
14872 * because the ->io_bitmap_max value must match the bitmap
14873 * contents:
14874 */
14875 - tss = &per_cpu(init_tss, get_cpu());
14876 + tss = init_tss + get_cpu();
14877
14878 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
14879
14880 @@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, s
14881 return -EINVAL;
14882 /* Trying to gain more privileges? */
14883 if (level > old) {
14884 +#ifdef CONFIG_GRKERNSEC_IO
14885 + if (grsec_disable_privio) {
14886 + gr_handle_iopl();
14887 + return -EPERM;
14888 + }
14889 +#endif
14890 if (!capable(CAP_SYS_RAWIO))
14891 return -EPERM;
14892 }
14893 diff -urNp linux-2.6.32.42/arch/x86/kernel/irq_32.c linux-2.6.32.42/arch/x86/kernel/irq_32.c
14894 --- linux-2.6.32.42/arch/x86/kernel/irq_32.c 2011-03-27 14:31:47.000000000 -0400
14895 +++ linux-2.6.32.42/arch/x86/kernel/irq_32.c 2011-04-23 13:26:46.000000000 -0400
14896 @@ -35,7 +35,7 @@ static int check_stack_overflow(void)
14897 __asm__ __volatile__("andl %%esp,%0" :
14898 "=r" (sp) : "0" (THREAD_SIZE - 1));
14899
14900 - return sp < (sizeof(struct thread_info) + STACK_WARN);
14901 + return sp < STACK_WARN;
14902 }
14903
14904 static void print_stack_overflow(void)
14905 @@ -54,9 +54,9 @@ static inline void print_stack_overflow(
14906 * per-CPU IRQ handling contexts (thread information and stack)
14907 */
14908 union irq_ctx {
14909 - struct thread_info tinfo;
14910 - u32 stack[THREAD_SIZE/sizeof(u32)];
14911 -} __attribute__((aligned(PAGE_SIZE)));
14912 + unsigned long previous_esp;
14913 + u32 stack[THREAD_SIZE/sizeof(u32)];
14914 +} __attribute__((aligned(THREAD_SIZE)));
14915
14916 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
14917 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
14918 @@ -78,10 +78,9 @@ static void call_on_stack(void *func, vo
14919 static inline int
14920 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
14921 {
14922 - union irq_ctx *curctx, *irqctx;
14923 + union irq_ctx *irqctx;
14924 u32 *isp, arg1, arg2;
14925
14926 - curctx = (union irq_ctx *) current_thread_info();
14927 irqctx = __get_cpu_var(hardirq_ctx);
14928
14929 /*
14930 @@ -90,21 +89,17 @@ execute_on_irq_stack(int overflow, struc
14931 * handler) we can't do that and just have to keep using the
14932 * current stack (which is the irq stack already after all)
14933 */
14934 - if (unlikely(curctx == irqctx))
14935 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
14936 return 0;
14937
14938 /* build the stack frame on the IRQ stack */
14939 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14940 - irqctx->tinfo.task = curctx->tinfo.task;
14941 - irqctx->tinfo.previous_esp = current_stack_pointer;
14942 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14943 + irqctx->previous_esp = current_stack_pointer;
14944 + add_preempt_count(HARDIRQ_OFFSET);
14945
14946 - /*
14947 - * Copy the softirq bits in preempt_count so that the
14948 - * softirq checks work in the hardirq context.
14949 - */
14950 - irqctx->tinfo.preempt_count =
14951 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
14952 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
14953 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14954 + __set_fs(MAKE_MM_SEG(0));
14955 +#endif
14956
14957 if (unlikely(overflow))
14958 call_on_stack(print_stack_overflow, isp);
14959 @@ -116,6 +111,12 @@ execute_on_irq_stack(int overflow, struc
14960 : "0" (irq), "1" (desc), "2" (isp),
14961 "D" (desc->handle_irq)
14962 : "memory", "cc", "ecx");
14963 +
14964 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14965 + __set_fs(current_thread_info()->addr_limit);
14966 +#endif
14967 +
14968 + sub_preempt_count(HARDIRQ_OFFSET);
14969 return 1;
14970 }
14971
14972 @@ -124,28 +125,11 @@ execute_on_irq_stack(int overflow, struc
14973 */
14974 void __cpuinit irq_ctx_init(int cpu)
14975 {
14976 - union irq_ctx *irqctx;
14977 -
14978 if (per_cpu(hardirq_ctx, cpu))
14979 return;
14980
14981 - irqctx = &per_cpu(hardirq_stack, cpu);
14982 - irqctx->tinfo.task = NULL;
14983 - irqctx->tinfo.exec_domain = NULL;
14984 - irqctx->tinfo.cpu = cpu;
14985 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
14986 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14987 -
14988 - per_cpu(hardirq_ctx, cpu) = irqctx;
14989 -
14990 - irqctx = &per_cpu(softirq_stack, cpu);
14991 - irqctx->tinfo.task = NULL;
14992 - irqctx->tinfo.exec_domain = NULL;
14993 - irqctx->tinfo.cpu = cpu;
14994 - irqctx->tinfo.preempt_count = 0;
14995 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14996 -
14997 - per_cpu(softirq_ctx, cpu) = irqctx;
14998 + per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
14999 + per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
15000
15001 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15002 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15003 @@ -159,7 +143,6 @@ void irq_ctx_exit(int cpu)
15004 asmlinkage void do_softirq(void)
15005 {
15006 unsigned long flags;
15007 - struct thread_info *curctx;
15008 union irq_ctx *irqctx;
15009 u32 *isp;
15010
15011 @@ -169,15 +152,22 @@ asmlinkage void do_softirq(void)
15012 local_irq_save(flags);
15013
15014 if (local_softirq_pending()) {
15015 - curctx = current_thread_info();
15016 irqctx = __get_cpu_var(softirq_ctx);
15017 - irqctx->tinfo.task = curctx->task;
15018 - irqctx->tinfo.previous_esp = current_stack_pointer;
15019 + irqctx->previous_esp = current_stack_pointer;
15020
15021 /* build the stack frame on the softirq stack */
15022 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15023 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15024 +
15025 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15026 + __set_fs(MAKE_MM_SEG(0));
15027 +#endif
15028
15029 call_on_stack(__do_softirq, isp);
15030 +
15031 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15032 + __set_fs(current_thread_info()->addr_limit);
15033 +#endif
15034 +
15035 /*
15036 * Shouldnt happen, we returned above if in_interrupt():
15037 */
15038 diff -urNp linux-2.6.32.42/arch/x86/kernel/irq.c linux-2.6.32.42/arch/x86/kernel/irq.c
15039 --- linux-2.6.32.42/arch/x86/kernel/irq.c 2011-03-27 14:31:47.000000000 -0400
15040 +++ linux-2.6.32.42/arch/x86/kernel/irq.c 2011-05-04 17:56:28.000000000 -0400
15041 @@ -15,7 +15,7 @@
15042 #include <asm/mce.h>
15043 #include <asm/hw_irq.h>
15044
15045 -atomic_t irq_err_count;
15046 +atomic_unchecked_t irq_err_count;
15047
15048 /* Function pointer for generic interrupt vector handling */
15049 void (*generic_interrupt_extension)(void) = NULL;
15050 @@ -114,9 +114,9 @@ static int show_other_interrupts(struct
15051 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15052 seq_printf(p, " Machine check polls\n");
15053 #endif
15054 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15055 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15056 #if defined(CONFIG_X86_IO_APIC)
15057 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15058 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15059 #endif
15060 return 0;
15061 }
15062 @@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15063
15064 u64 arch_irq_stat(void)
15065 {
15066 - u64 sum = atomic_read(&irq_err_count);
15067 + u64 sum = atomic_read_unchecked(&irq_err_count);
15068
15069 #ifdef CONFIG_X86_IO_APIC
15070 - sum += atomic_read(&irq_mis_count);
15071 + sum += atomic_read_unchecked(&irq_mis_count);
15072 #endif
15073 return sum;
15074 }
15075 diff -urNp linux-2.6.32.42/arch/x86/kernel/kgdb.c linux-2.6.32.42/arch/x86/kernel/kgdb.c
15076 --- linux-2.6.32.42/arch/x86/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
15077 +++ linux-2.6.32.42/arch/x86/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
15078 @@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vec
15079
15080 /* clear the trace bit */
15081 linux_regs->flags &= ~X86_EFLAGS_TF;
15082 - atomic_set(&kgdb_cpu_doing_single_step, -1);
15083 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15084
15085 /* set the trace bit if we're stepping */
15086 if (remcomInBuffer[0] == 's') {
15087 linux_regs->flags |= X86_EFLAGS_TF;
15088 kgdb_single_step = 1;
15089 - atomic_set(&kgdb_cpu_doing_single_step,
15090 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15091 raw_smp_processor_id());
15092 }
15093
15094 @@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args
15095 break;
15096
15097 case DIE_DEBUG:
15098 - if (atomic_read(&kgdb_cpu_doing_single_step) ==
15099 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
15100 raw_smp_processor_id()) {
15101 if (user_mode(regs))
15102 return single_step_cont(regs, args);
15103 @@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception
15104 return instruction_pointer(regs);
15105 }
15106
15107 -struct kgdb_arch arch_kgdb_ops = {
15108 +const struct kgdb_arch arch_kgdb_ops = {
15109 /* Breakpoint instruction: */
15110 .gdb_bpt_instr = { 0xcc },
15111 .flags = KGDB_HW_BREAKPOINT,
15112 diff -urNp linux-2.6.32.42/arch/x86/kernel/kprobes.c linux-2.6.32.42/arch/x86/kernel/kprobes.c
15113 --- linux-2.6.32.42/arch/x86/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
15114 +++ linux-2.6.32.42/arch/x86/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
15115 @@ -166,9 +166,13 @@ static void __kprobes set_jmp_op(void *f
15116 char op;
15117 s32 raddr;
15118 } __attribute__((packed)) * jop;
15119 - jop = (struct __arch_jmp_op *)from;
15120 +
15121 + jop = (struct __arch_jmp_op *)(ktla_ktva(from));
15122 +
15123 + pax_open_kernel();
15124 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
15125 jop->op = RELATIVEJUMP_INSTRUCTION;
15126 + pax_close_kernel();
15127 }
15128
15129 /*
15130 @@ -193,7 +197,7 @@ static int __kprobes can_boost(kprobe_op
15131 kprobe_opcode_t opcode;
15132 kprobe_opcode_t *orig_opcodes = opcodes;
15133
15134 - if (search_exception_tables((unsigned long)opcodes))
15135 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15136 return 0; /* Page fault may occur on this address. */
15137
15138 retry:
15139 @@ -337,7 +341,9 @@ static void __kprobes fix_riprel(struct
15140 disp = (u8 *) p->addr + *((s32 *) insn) -
15141 (u8 *) p->ainsn.insn;
15142 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
15143 + pax_open_kernel();
15144 *(s32 *)insn = (s32) disp;
15145 + pax_close_kernel();
15146 }
15147 }
15148 #endif
15149 @@ -345,16 +351,18 @@ static void __kprobes fix_riprel(struct
15150
15151 static void __kprobes arch_copy_kprobe(struct kprobe *p)
15152 {
15153 - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15154 + pax_open_kernel();
15155 + memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15156 + pax_close_kernel();
15157
15158 fix_riprel(p);
15159
15160 - if (can_boost(p->addr))
15161 + if (can_boost(ktla_ktva(p->addr)))
15162 p->ainsn.boostable = 0;
15163 else
15164 p->ainsn.boostable = -1;
15165
15166 - p->opcode = *p->addr;
15167 + p->opcode = *(ktla_ktva(p->addr));
15168 }
15169
15170 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15171 @@ -432,7 +440,7 @@ static void __kprobes prepare_singlestep
15172 if (p->opcode == BREAKPOINT_INSTRUCTION)
15173 regs->ip = (unsigned long)p->addr;
15174 else
15175 - regs->ip = (unsigned long)p->ainsn.insn;
15176 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15177 }
15178
15179 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
15180 @@ -453,7 +461,7 @@ static void __kprobes setup_singlestep(s
15181 if (p->ainsn.boostable == 1 && !p->post_handler) {
15182 /* Boost up -- we can execute copied instructions directly */
15183 reset_current_kprobe();
15184 - regs->ip = (unsigned long)p->ainsn.insn;
15185 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15186 preempt_enable_no_resched();
15187 return;
15188 }
15189 @@ -523,7 +531,7 @@ static int __kprobes kprobe_handler(stru
15190 struct kprobe_ctlblk *kcb;
15191
15192 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
15193 - if (*addr != BREAKPOINT_INSTRUCTION) {
15194 + if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15195 /*
15196 * The breakpoint instruction was removed right
15197 * after we hit it. Another cpu has removed
15198 @@ -775,7 +783,7 @@ static void __kprobes resume_execution(s
15199 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15200 {
15201 unsigned long *tos = stack_addr(regs);
15202 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15203 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15204 unsigned long orig_ip = (unsigned long)p->addr;
15205 kprobe_opcode_t *insn = p->ainsn.insn;
15206
15207 @@ -958,7 +966,7 @@ int __kprobes kprobe_exceptions_notify(s
15208 struct die_args *args = data;
15209 int ret = NOTIFY_DONE;
15210
15211 - if (args->regs && user_mode_vm(args->regs))
15212 + if (args->regs && user_mode(args->regs))
15213 return ret;
15214
15215 switch (val) {
15216 diff -urNp linux-2.6.32.42/arch/x86/kernel/ldt.c linux-2.6.32.42/arch/x86/kernel/ldt.c
15217 --- linux-2.6.32.42/arch/x86/kernel/ldt.c 2011-03-27 14:31:47.000000000 -0400
15218 +++ linux-2.6.32.42/arch/x86/kernel/ldt.c 2011-04-17 15:56:46.000000000 -0400
15219 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
15220 if (reload) {
15221 #ifdef CONFIG_SMP
15222 preempt_disable();
15223 - load_LDT(pc);
15224 + load_LDT_nolock(pc);
15225 if (!cpumask_equal(mm_cpumask(current->mm),
15226 cpumask_of(smp_processor_id())))
15227 smp_call_function(flush_ldt, current->mm, 1);
15228 preempt_enable();
15229 #else
15230 - load_LDT(pc);
15231 + load_LDT_nolock(pc);
15232 #endif
15233 }
15234 if (oldsize) {
15235 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
15236 return err;
15237
15238 for (i = 0; i < old->size; i++)
15239 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15240 + write_ldt_entry(new->ldt, i, old->ldt + i);
15241 return 0;
15242 }
15243
15244 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct
15245 retval = copy_ldt(&mm->context, &old_mm->context);
15246 mutex_unlock(&old_mm->context.lock);
15247 }
15248 +
15249 + if (tsk == current) {
15250 + mm->context.vdso = 0;
15251 +
15252 +#ifdef CONFIG_X86_32
15253 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15254 + mm->context.user_cs_base = 0UL;
15255 + mm->context.user_cs_limit = ~0UL;
15256 +
15257 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15258 + cpus_clear(mm->context.cpu_user_cs_mask);
15259 +#endif
15260 +
15261 +#endif
15262 +#endif
15263 +
15264 + }
15265 +
15266 return retval;
15267 }
15268
15269 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
15270 }
15271 }
15272
15273 +#ifdef CONFIG_PAX_SEGMEXEC
15274 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15275 + error = -EINVAL;
15276 + goto out_unlock;
15277 + }
15278 +#endif
15279 +
15280 fill_ldt(&ldt, &ldt_info);
15281 if (oldmode)
15282 ldt.avl = 0;
15283 diff -urNp linux-2.6.32.42/arch/x86/kernel/machine_kexec_32.c linux-2.6.32.42/arch/x86/kernel/machine_kexec_32.c
15284 --- linux-2.6.32.42/arch/x86/kernel/machine_kexec_32.c 2011-03-27 14:31:47.000000000 -0400
15285 +++ linux-2.6.32.42/arch/x86/kernel/machine_kexec_32.c 2011-04-17 15:56:46.000000000 -0400
15286 @@ -26,7 +26,7 @@
15287 #include <asm/system.h>
15288 #include <asm/cacheflush.h>
15289
15290 -static void set_idt(void *newidt, __u16 limit)
15291 +static void set_idt(struct desc_struct *newidt, __u16 limit)
15292 {
15293 struct desc_ptr curidt;
15294
15295 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
15296 }
15297
15298
15299 -static void set_gdt(void *newgdt, __u16 limit)
15300 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15301 {
15302 struct desc_ptr curgdt;
15303
15304 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15305 }
15306
15307 control_page = page_address(image->control_code_page);
15308 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15309 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15310
15311 relocate_kernel_ptr = control_page;
15312 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15313 diff -urNp linux-2.6.32.42/arch/x86/kernel/microcode_amd.c linux-2.6.32.42/arch/x86/kernel/microcode_amd.c
15314 --- linux-2.6.32.42/arch/x86/kernel/microcode_amd.c 2011-04-17 17:00:52.000000000 -0400
15315 +++ linux-2.6.32.42/arch/x86/kernel/microcode_amd.c 2011-04-17 17:03:05.000000000 -0400
15316 @@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int c
15317 uci->mc = NULL;
15318 }
15319
15320 -static struct microcode_ops microcode_amd_ops = {
15321 +static const struct microcode_ops microcode_amd_ops = {
15322 .request_microcode_user = request_microcode_user,
15323 .request_microcode_fw = request_microcode_fw,
15324 .collect_cpu_info = collect_cpu_info_amd,
15325 @@ -372,7 +372,7 @@ static struct microcode_ops microcode_am
15326 .microcode_fini_cpu = microcode_fini_cpu_amd,
15327 };
15328
15329 -struct microcode_ops * __init init_amd_microcode(void)
15330 +const struct microcode_ops * __init init_amd_microcode(void)
15331 {
15332 return &microcode_amd_ops;
15333 }
15334 diff -urNp linux-2.6.32.42/arch/x86/kernel/microcode_core.c linux-2.6.32.42/arch/x86/kernel/microcode_core.c
15335 --- linux-2.6.32.42/arch/x86/kernel/microcode_core.c 2011-03-27 14:31:47.000000000 -0400
15336 +++ linux-2.6.32.42/arch/x86/kernel/microcode_core.c 2011-04-17 15:56:46.000000000 -0400
15337 @@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
15338
15339 #define MICROCODE_VERSION "2.00"
15340
15341 -static struct microcode_ops *microcode_ops;
15342 +static const struct microcode_ops *microcode_ops;
15343
15344 /*
15345 * Synchronization.
15346 diff -urNp linux-2.6.32.42/arch/x86/kernel/microcode_intel.c linux-2.6.32.42/arch/x86/kernel/microcode_intel.c
15347 --- linux-2.6.32.42/arch/x86/kernel/microcode_intel.c 2011-03-27 14:31:47.000000000 -0400
15348 +++ linux-2.6.32.42/arch/x86/kernel/microcode_intel.c 2011-04-17 15:56:46.000000000 -0400
15349 @@ -443,13 +443,13 @@ static enum ucode_state request_microcod
15350
15351 static int get_ucode_user(void *to, const void *from, size_t n)
15352 {
15353 - return copy_from_user(to, from, n);
15354 + return copy_from_user(to, (__force const void __user *)from, n);
15355 }
15356
15357 static enum ucode_state
15358 request_microcode_user(int cpu, const void __user *buf, size_t size)
15359 {
15360 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15361 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
15362 }
15363
15364 static void microcode_fini_cpu(int cpu)
15365 @@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
15366 uci->mc = NULL;
15367 }
15368
15369 -static struct microcode_ops microcode_intel_ops = {
15370 +static const struct microcode_ops microcode_intel_ops = {
15371 .request_microcode_user = request_microcode_user,
15372 .request_microcode_fw = request_microcode_fw,
15373 .collect_cpu_info = collect_cpu_info,
15374 @@ -468,7 +468,7 @@ static struct microcode_ops microcode_in
15375 .microcode_fini_cpu = microcode_fini_cpu,
15376 };
15377
15378 -struct microcode_ops * __init init_intel_microcode(void)
15379 +const struct microcode_ops * __init init_intel_microcode(void)
15380 {
15381 return &microcode_intel_ops;
15382 }
15383 diff -urNp linux-2.6.32.42/arch/x86/kernel/module.c linux-2.6.32.42/arch/x86/kernel/module.c
15384 --- linux-2.6.32.42/arch/x86/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
15385 +++ linux-2.6.32.42/arch/x86/kernel/module.c 2011-04-17 15:56:46.000000000 -0400
15386 @@ -34,7 +34,7 @@
15387 #define DEBUGP(fmt...)
15388 #endif
15389
15390 -void *module_alloc(unsigned long size)
15391 +static void *__module_alloc(unsigned long size, pgprot_t prot)
15392 {
15393 struct vm_struct *area;
15394
15395 @@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
15396 if (!area)
15397 return NULL;
15398
15399 - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
15400 - PAGE_KERNEL_EXEC);
15401 + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
15402 +}
15403 +
15404 +void *module_alloc(unsigned long size)
15405 +{
15406 +
15407 +#ifdef CONFIG_PAX_KERNEXEC
15408 + return __module_alloc(size, PAGE_KERNEL);
15409 +#else
15410 + return __module_alloc(size, PAGE_KERNEL_EXEC);
15411 +#endif
15412 +
15413 }
15414
15415 /* Free memory returned from module_alloc */
15416 @@ -58,6 +68,40 @@ void module_free(struct module *mod, voi
15417 vfree(module_region);
15418 }
15419
15420 +#ifdef CONFIG_PAX_KERNEXEC
15421 +#ifdef CONFIG_X86_32
15422 +void *module_alloc_exec(unsigned long size)
15423 +{
15424 + struct vm_struct *area;
15425 +
15426 + if (size == 0)
15427 + return NULL;
15428 +
15429 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15430 + return area ? area->addr : NULL;
15431 +}
15432 +EXPORT_SYMBOL(module_alloc_exec);
15433 +
15434 +void module_free_exec(struct module *mod, void *module_region)
15435 +{
15436 + vunmap(module_region);
15437 +}
15438 +EXPORT_SYMBOL(module_free_exec);
15439 +#else
15440 +void module_free_exec(struct module *mod, void *module_region)
15441 +{
15442 + module_free(mod, module_region);
15443 +}
15444 +EXPORT_SYMBOL(module_free_exec);
15445 +
15446 +void *module_alloc_exec(unsigned long size)
15447 +{
15448 + return __module_alloc(size, PAGE_KERNEL_RX);
15449 +}
15450 +EXPORT_SYMBOL(module_alloc_exec);
15451 +#endif
15452 +#endif
15453 +
15454 /* We don't need anything special. */
15455 int module_frob_arch_sections(Elf_Ehdr *hdr,
15456 Elf_Shdr *sechdrs,
15457 @@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15458 unsigned int i;
15459 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15460 Elf32_Sym *sym;
15461 - uint32_t *location;
15462 + uint32_t *plocation, location;
15463
15464 DEBUGP("Applying relocate section %u to %u\n", relsec,
15465 sechdrs[relsec].sh_info);
15466 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15467 /* This is where to make the change */
15468 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15469 - + rel[i].r_offset;
15470 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15471 + location = (uint32_t)plocation;
15472 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15473 + plocation = ktla_ktva((void *)plocation);
15474 /* This is the symbol it is referring to. Note that all
15475 undefined symbols have been resolved. */
15476 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15477 @@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15478 switch (ELF32_R_TYPE(rel[i].r_info)) {
15479 case R_386_32:
15480 /* We add the value into the location given */
15481 - *location += sym->st_value;
15482 + pax_open_kernel();
15483 + *plocation += sym->st_value;
15484 + pax_close_kernel();
15485 break;
15486 case R_386_PC32:
15487 /* Add the value, subtract its postition */
15488 - *location += sym->st_value - (uint32_t)location;
15489 + pax_open_kernel();
15490 + *plocation += sym->st_value - location;
15491 + pax_close_kernel();
15492 break;
15493 default:
15494 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15495 @@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
15496 case R_X86_64_NONE:
15497 break;
15498 case R_X86_64_64:
15499 + pax_open_kernel();
15500 *(u64 *)loc = val;
15501 + pax_close_kernel();
15502 break;
15503 case R_X86_64_32:
15504 + pax_open_kernel();
15505 *(u32 *)loc = val;
15506 + pax_close_kernel();
15507 if (val != *(u32 *)loc)
15508 goto overflow;
15509 break;
15510 case R_X86_64_32S:
15511 + pax_open_kernel();
15512 *(s32 *)loc = val;
15513 + pax_close_kernel();
15514 if ((s64)val != *(s32 *)loc)
15515 goto overflow;
15516 break;
15517 case R_X86_64_PC32:
15518 val -= (u64)loc;
15519 + pax_open_kernel();
15520 *(u32 *)loc = val;
15521 + pax_close_kernel();
15522 +
15523 #if 0
15524 if ((s64)val != *(s32 *)loc)
15525 goto overflow;
15526 diff -urNp linux-2.6.32.42/arch/x86/kernel/paravirt.c linux-2.6.32.42/arch/x86/kernel/paravirt.c
15527 --- linux-2.6.32.42/arch/x86/kernel/paravirt.c 2011-03-27 14:31:47.000000000 -0400
15528 +++ linux-2.6.32.42/arch/x86/kernel/paravirt.c 2011-05-16 21:46:57.000000000 -0400
15529 @@ -122,7 +122,7 @@ unsigned paravirt_patch_jmp(void *insnbu
15530 * corresponding structure. */
15531 static void *get_call_destination(u8 type)
15532 {
15533 - struct paravirt_patch_template tmpl = {
15534 + const struct paravirt_patch_template tmpl = {
15535 .pv_init_ops = pv_init_ops,
15536 .pv_time_ops = pv_time_ops,
15537 .pv_cpu_ops = pv_cpu_ops,
15538 @@ -133,6 +133,9 @@ static void *get_call_destination(u8 typ
15539 .pv_lock_ops = pv_lock_ops,
15540 #endif
15541 };
15542 +
15543 + pax_track_stack();
15544 +
15545 return *((void **)&tmpl + type);
15546 }
15547
15548 @@ -145,14 +148,14 @@ unsigned paravirt_patch_default(u8 type,
15549 if (opfunc == NULL)
15550 /* If there's no function, patch it with a ud2a (BUG) */
15551 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
15552 - else if (opfunc == _paravirt_nop)
15553 + else if (opfunc == (void *)_paravirt_nop)
15554 /* If the operation is a nop, then nop the callsite */
15555 ret = paravirt_patch_nop();
15556
15557 /* identity functions just return their single argument */
15558 - else if (opfunc == _paravirt_ident_32)
15559 + else if (opfunc == (void *)_paravirt_ident_32)
15560 ret = paravirt_patch_ident_32(insnbuf, len);
15561 - else if (opfunc == _paravirt_ident_64)
15562 + else if (opfunc == (void *)_paravirt_ident_64)
15563 ret = paravirt_patch_ident_64(insnbuf, len);
15564
15565 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
15566 @@ -178,7 +181,7 @@ unsigned paravirt_patch_insns(void *insn
15567 if (insn_len > len || start == NULL)
15568 insn_len = len;
15569 else
15570 - memcpy(insnbuf, start, insn_len);
15571 + memcpy(insnbuf, ktla_ktva(start), insn_len);
15572
15573 return insn_len;
15574 }
15575 @@ -294,22 +297,22 @@ void arch_flush_lazy_mmu_mode(void)
15576 preempt_enable();
15577 }
15578
15579 -struct pv_info pv_info = {
15580 +struct pv_info pv_info __read_only = {
15581 .name = "bare hardware",
15582 .paravirt_enabled = 0,
15583 .kernel_rpl = 0,
15584 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
15585 };
15586
15587 -struct pv_init_ops pv_init_ops = {
15588 +struct pv_init_ops pv_init_ops __read_only = {
15589 .patch = native_patch,
15590 };
15591
15592 -struct pv_time_ops pv_time_ops = {
15593 +struct pv_time_ops pv_time_ops __read_only = {
15594 .sched_clock = native_sched_clock,
15595 };
15596
15597 -struct pv_irq_ops pv_irq_ops = {
15598 +struct pv_irq_ops pv_irq_ops __read_only = {
15599 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
15600 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
15601 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
15602 @@ -321,7 +324,7 @@ struct pv_irq_ops pv_irq_ops = {
15603 #endif
15604 };
15605
15606 -struct pv_cpu_ops pv_cpu_ops = {
15607 +struct pv_cpu_ops pv_cpu_ops __read_only = {
15608 .cpuid = native_cpuid,
15609 .get_debugreg = native_get_debugreg,
15610 .set_debugreg = native_set_debugreg,
15611 @@ -382,7 +385,7 @@ struct pv_cpu_ops pv_cpu_ops = {
15612 .end_context_switch = paravirt_nop,
15613 };
15614
15615 -struct pv_apic_ops pv_apic_ops = {
15616 +struct pv_apic_ops pv_apic_ops __read_only = {
15617 #ifdef CONFIG_X86_LOCAL_APIC
15618 .startup_ipi_hook = paravirt_nop,
15619 #endif
15620 @@ -396,7 +399,7 @@ struct pv_apic_ops pv_apic_ops = {
15621 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
15622 #endif
15623
15624 -struct pv_mmu_ops pv_mmu_ops = {
15625 +struct pv_mmu_ops pv_mmu_ops __read_only = {
15626
15627 .read_cr2 = native_read_cr2,
15628 .write_cr2 = native_write_cr2,
15629 @@ -467,6 +470,12 @@ struct pv_mmu_ops pv_mmu_ops = {
15630 },
15631
15632 .set_fixmap = native_set_fixmap,
15633 +
15634 +#ifdef CONFIG_PAX_KERNEXEC
15635 + .pax_open_kernel = native_pax_open_kernel,
15636 + .pax_close_kernel = native_pax_close_kernel,
15637 +#endif
15638 +
15639 };
15640
15641 EXPORT_SYMBOL_GPL(pv_time_ops);
15642 diff -urNp linux-2.6.32.42/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.32.42/arch/x86/kernel/paravirt-spinlocks.c
15643 --- linux-2.6.32.42/arch/x86/kernel/paravirt-spinlocks.c 2011-03-27 14:31:47.000000000 -0400
15644 +++ linux-2.6.32.42/arch/x86/kernel/paravirt-spinlocks.c 2011-04-17 15:56:46.000000000 -0400
15645 @@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
15646 __raw_spin_lock(lock);
15647 }
15648
15649 -struct pv_lock_ops pv_lock_ops = {
15650 +struct pv_lock_ops pv_lock_ops __read_only = {
15651 #ifdef CONFIG_SMP
15652 .spin_is_locked = __ticket_spin_is_locked,
15653 .spin_is_contended = __ticket_spin_is_contended,
15654 diff -urNp linux-2.6.32.42/arch/x86/kernel/pci-calgary_64.c linux-2.6.32.42/arch/x86/kernel/pci-calgary_64.c
15655 --- linux-2.6.32.42/arch/x86/kernel/pci-calgary_64.c 2011-03-27 14:31:47.000000000 -0400
15656 +++ linux-2.6.32.42/arch/x86/kernel/pci-calgary_64.c 2011-04-17 15:56:46.000000000 -0400
15657 @@ -477,7 +477,7 @@ static void calgary_free_coherent(struct
15658 free_pages((unsigned long)vaddr, get_order(size));
15659 }
15660
15661 -static struct dma_map_ops calgary_dma_ops = {
15662 +static const struct dma_map_ops calgary_dma_ops = {
15663 .alloc_coherent = calgary_alloc_coherent,
15664 .free_coherent = calgary_free_coherent,
15665 .map_sg = calgary_map_sg,
15666 diff -urNp linux-2.6.32.42/arch/x86/kernel/pci-dma.c linux-2.6.32.42/arch/x86/kernel/pci-dma.c
15667 --- linux-2.6.32.42/arch/x86/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
15668 +++ linux-2.6.32.42/arch/x86/kernel/pci-dma.c 2011-04-17 15:56:46.000000000 -0400
15669 @@ -14,7 +14,7 @@
15670
15671 static int forbid_dac __read_mostly;
15672
15673 -struct dma_map_ops *dma_ops;
15674 +const struct dma_map_ops *dma_ops;
15675 EXPORT_SYMBOL(dma_ops);
15676
15677 static int iommu_sac_force __read_mostly;
15678 @@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
15679
15680 int dma_supported(struct device *dev, u64 mask)
15681 {
15682 - struct dma_map_ops *ops = get_dma_ops(dev);
15683 + const struct dma_map_ops *ops = get_dma_ops(dev);
15684
15685 #ifdef CONFIG_PCI
15686 if (mask > 0xffffffff && forbid_dac > 0) {
15687 diff -urNp linux-2.6.32.42/arch/x86/kernel/pci-gart_64.c linux-2.6.32.42/arch/x86/kernel/pci-gart_64.c
15688 --- linux-2.6.32.42/arch/x86/kernel/pci-gart_64.c 2011-03-27 14:31:47.000000000 -0400
15689 +++ linux-2.6.32.42/arch/x86/kernel/pci-gart_64.c 2011-04-17 15:56:46.000000000 -0400
15690 @@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct ag
15691 return -1;
15692 }
15693
15694 -static struct dma_map_ops gart_dma_ops = {
15695 +static const struct dma_map_ops gart_dma_ops = {
15696 .map_sg = gart_map_sg,
15697 .unmap_sg = gart_unmap_sg,
15698 .map_page = gart_map_page,
15699 diff -urNp linux-2.6.32.42/arch/x86/kernel/pci-nommu.c linux-2.6.32.42/arch/x86/kernel/pci-nommu.c
15700 --- linux-2.6.32.42/arch/x86/kernel/pci-nommu.c 2011-03-27 14:31:47.000000000 -0400
15701 +++ linux-2.6.32.42/arch/x86/kernel/pci-nommu.c 2011-04-17 15:56:46.000000000 -0400
15702 @@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(str
15703 flush_write_buffers();
15704 }
15705
15706 -struct dma_map_ops nommu_dma_ops = {
15707 +const struct dma_map_ops nommu_dma_ops = {
15708 .alloc_coherent = dma_generic_alloc_coherent,
15709 .free_coherent = nommu_free_coherent,
15710 .map_sg = nommu_map_sg,
15711 diff -urNp linux-2.6.32.42/arch/x86/kernel/pci-swiotlb.c linux-2.6.32.42/arch/x86/kernel/pci-swiotlb.c
15712 --- linux-2.6.32.42/arch/x86/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
15713 +++ linux-2.6.32.42/arch/x86/kernel/pci-swiotlb.c 2011-04-17 15:56:46.000000000 -0400
15714 @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
15715 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
15716 }
15717
15718 -static struct dma_map_ops swiotlb_dma_ops = {
15719 +static const struct dma_map_ops swiotlb_dma_ops = {
15720 .mapping_error = swiotlb_dma_mapping_error,
15721 .alloc_coherent = x86_swiotlb_alloc_coherent,
15722 .free_coherent = swiotlb_free_coherent,
15723 diff -urNp linux-2.6.32.42/arch/x86/kernel/process_32.c linux-2.6.32.42/arch/x86/kernel/process_32.c
15724 --- linux-2.6.32.42/arch/x86/kernel/process_32.c 2011-06-25 12:55:34.000000000 -0400
15725 +++ linux-2.6.32.42/arch/x86/kernel/process_32.c 2011-06-25 12:56:37.000000000 -0400
15726 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __as
15727 unsigned long thread_saved_pc(struct task_struct *tsk)
15728 {
15729 return ((unsigned long *)tsk->thread.sp)[3];
15730 +//XXX return tsk->thread.eip;
15731 }
15732
15733 #ifndef CONFIG_SMP
15734 @@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, i
15735 unsigned short ss, gs;
15736 const char *board;
15737
15738 - if (user_mode_vm(regs)) {
15739 + if (user_mode(regs)) {
15740 sp = regs->sp;
15741 ss = regs->ss & 0xffff;
15742 - gs = get_user_gs(regs);
15743 } else {
15744 sp = (unsigned long) (&regs->sp);
15745 savesegment(ss, ss);
15746 - savesegment(gs, gs);
15747 }
15748 + gs = get_user_gs(regs);
15749
15750 printk("\n");
15751
15752 @@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), voi
15753 regs.bx = (unsigned long) fn;
15754 regs.dx = (unsigned long) arg;
15755
15756 - regs.ds = __USER_DS;
15757 - regs.es = __USER_DS;
15758 + regs.ds = __KERNEL_DS;
15759 + regs.es = __KERNEL_DS;
15760 regs.fs = __KERNEL_PERCPU;
15761 - regs.gs = __KERNEL_STACK_CANARY;
15762 + savesegment(gs, regs.gs);
15763 regs.orig_ax = -1;
15764 regs.ip = (unsigned long) kernel_thread_helper;
15765 regs.cs = __KERNEL_CS | get_kernel_rpl();
15766 @@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flag
15767 struct task_struct *tsk;
15768 int err;
15769
15770 - childregs = task_pt_regs(p);
15771 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
15772 *childregs = *regs;
15773 childregs->ax = 0;
15774 childregs->sp = sp;
15775
15776 p->thread.sp = (unsigned long) childregs;
15777 p->thread.sp0 = (unsigned long) (childregs+1);
15778 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15779
15780 p->thread.ip = (unsigned long) ret_from_fork;
15781
15782 @@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p,
15783 struct thread_struct *prev = &prev_p->thread,
15784 *next = &next_p->thread;
15785 int cpu = smp_processor_id();
15786 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
15787 + struct tss_struct *tss = init_tss + cpu;
15788 bool preload_fpu;
15789
15790 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
15791 @@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p,
15792 */
15793 lazy_save_gs(prev->gs);
15794
15795 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15796 + __set_fs(task_thread_info(next_p)->addr_limit);
15797 +#endif
15798 +
15799 /*
15800 * Load the per-thread Thread-Local Storage descriptor.
15801 */
15802 @@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p,
15803 */
15804 arch_end_context_switch(next_p);
15805
15806 + percpu_write(current_task, next_p);
15807 + percpu_write(current_tinfo, &next_p->tinfo);
15808 +
15809 if (preload_fpu)
15810 __math_state_restore();
15811
15812 @@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p,
15813 if (prev->gs | next->gs)
15814 lazy_load_gs(next->gs);
15815
15816 - percpu_write(current_task, next_p);
15817 -
15818 return prev_p;
15819 }
15820
15821 @@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_stru
15822 } while (count++ < 16);
15823 return 0;
15824 }
15825 -
15826 diff -urNp linux-2.6.32.42/arch/x86/kernel/process_64.c linux-2.6.32.42/arch/x86/kernel/process_64.c
15827 --- linux-2.6.32.42/arch/x86/kernel/process_64.c 2011-06-25 12:55:34.000000000 -0400
15828 +++ linux-2.6.32.42/arch/x86/kernel/process_64.c 2011-06-25 12:56:37.000000000 -0400
15829 @@ -91,7 +91,7 @@ static void __exit_idle(void)
15830 void exit_idle(void)
15831 {
15832 /* idle loop has pid 0 */
15833 - if (current->pid)
15834 + if (task_pid_nr(current))
15835 return;
15836 __exit_idle();
15837 }
15838 @@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, i
15839 if (!board)
15840 board = "";
15841 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
15842 - current->pid, current->comm, print_tainted(),
15843 + task_pid_nr(current), current->comm, print_tainted(),
15844 init_utsname()->release,
15845 (int)strcspn(init_utsname()->version, " "),
15846 init_utsname()->version, board);
15847 @@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flag
15848 struct pt_regs *childregs;
15849 struct task_struct *me = current;
15850
15851 - childregs = ((struct pt_regs *)
15852 - (THREAD_SIZE + task_stack_page(p))) - 1;
15853 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
15854 *childregs = *regs;
15855
15856 childregs->ax = 0;
15857 @@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flag
15858 p->thread.sp = (unsigned long) childregs;
15859 p->thread.sp0 = (unsigned long) (childregs+1);
15860 p->thread.usersp = me->thread.usersp;
15861 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15862
15863 set_tsk_thread_flag(p, TIF_FORK);
15864
15865 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p,
15866 struct thread_struct *prev = &prev_p->thread;
15867 struct thread_struct *next = &next_p->thread;
15868 int cpu = smp_processor_id();
15869 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
15870 + struct tss_struct *tss = init_tss + cpu;
15871 unsigned fsindex, gsindex;
15872 bool preload_fpu;
15873
15874 @@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p,
15875 prev->usersp = percpu_read(old_rsp);
15876 percpu_write(old_rsp, next->usersp);
15877 percpu_write(current_task, next_p);
15878 + percpu_write(current_tinfo, &next_p->tinfo);
15879
15880 - percpu_write(kernel_stack,
15881 - (unsigned long)task_stack_page(next_p) +
15882 - THREAD_SIZE - KERNEL_STACK_OFFSET);
15883 + percpu_write(kernel_stack, next->sp0);
15884
15885 /*
15886 * Now maybe reload the debug registers and handle I/O bitmaps
15887 @@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_stru
15888 if (!p || p == current || p->state == TASK_RUNNING)
15889 return 0;
15890 stack = (unsigned long)task_stack_page(p);
15891 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
15892 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
15893 return 0;
15894 fp = *(u64 *)(p->thread.sp);
15895 do {
15896 - if (fp < (unsigned long)stack ||
15897 - fp >= (unsigned long)stack+THREAD_SIZE)
15898 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
15899 return 0;
15900 ip = *(u64 *)(fp+8);
15901 if (!in_sched_functions(ip))
15902 diff -urNp linux-2.6.32.42/arch/x86/kernel/process.c linux-2.6.32.42/arch/x86/kernel/process.c
15903 --- linux-2.6.32.42/arch/x86/kernel/process.c 2011-04-22 19:16:29.000000000 -0400
15904 +++ linux-2.6.32.42/arch/x86/kernel/process.c 2011-05-22 23:02:03.000000000 -0400
15905 @@ -51,16 +51,33 @@ void free_thread_xstate(struct task_stru
15906
15907 void free_thread_info(struct thread_info *ti)
15908 {
15909 - free_thread_xstate(ti->task);
15910 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
15911 }
15912
15913 +static struct kmem_cache *task_struct_cachep;
15914 +
15915 void arch_task_cache_init(void)
15916 {
15917 - task_xstate_cachep =
15918 - kmem_cache_create("task_xstate", xstate_size,
15919 + /* create a slab on which task_structs can be allocated */
15920 + task_struct_cachep =
15921 + kmem_cache_create("task_struct", sizeof(struct task_struct),
15922 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
15923 +
15924 + task_xstate_cachep =
15925 + kmem_cache_create("task_xstate", xstate_size,
15926 __alignof__(union thread_xstate),
15927 - SLAB_PANIC | SLAB_NOTRACK, NULL);
15928 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
15929 +}
15930 +
15931 +struct task_struct *alloc_task_struct(void)
15932 +{
15933 + return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
15934 +}
15935 +
15936 +void free_task_struct(struct task_struct *task)
15937 +{
15938 + free_thread_xstate(task);
15939 + kmem_cache_free(task_struct_cachep, task);
15940 }
15941
15942 /*
15943 @@ -73,7 +90,7 @@ void exit_thread(void)
15944 unsigned long *bp = t->io_bitmap_ptr;
15945
15946 if (bp) {
15947 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
15948 + struct tss_struct *tss = init_tss + get_cpu();
15949
15950 t->io_bitmap_ptr = NULL;
15951 clear_thread_flag(TIF_IO_BITMAP);
15952 @@ -93,6 +110,9 @@ void flush_thread(void)
15953
15954 clear_tsk_thread_flag(tsk, TIF_DEBUG);
15955
15956 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
15957 + loadsegment(gs, 0);
15958 +#endif
15959 tsk->thread.debugreg0 = 0;
15960 tsk->thread.debugreg1 = 0;
15961 tsk->thread.debugreg2 = 0;
15962 @@ -307,7 +327,7 @@ void default_idle(void)
15963 EXPORT_SYMBOL(default_idle);
15964 #endif
15965
15966 -void stop_this_cpu(void *dummy)
15967 +__noreturn void stop_this_cpu(void *dummy)
15968 {
15969 local_irq_disable();
15970 /*
15971 @@ -568,16 +588,35 @@ static int __init idle_setup(char *str)
15972 }
15973 early_param("idle", idle_setup);
15974
15975 -unsigned long arch_align_stack(unsigned long sp)
15976 +#ifdef CONFIG_PAX_RANDKSTACK
15977 +asmlinkage void pax_randomize_kstack(void)
15978 {
15979 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
15980 - sp -= get_random_int() % 8192;
15981 - return sp & ~0xf;
15982 -}
15983 + struct thread_struct *thread = &current->thread;
15984 + unsigned long time;
15985
15986 -unsigned long arch_randomize_brk(struct mm_struct *mm)
15987 -{
15988 - unsigned long range_end = mm->brk + 0x02000000;
15989 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
15990 + if (!randomize_va_space)
15991 + return;
15992 +
15993 + rdtscl(time);
15994 +
15995 + /* P4 seems to return a 0 LSB, ignore it */
15996 +#ifdef CONFIG_MPENTIUM4
15997 + time &= 0x3EUL;
15998 + time <<= 2;
15999 +#elif defined(CONFIG_X86_64)
16000 + time &= 0xFUL;
16001 + time <<= 4;
16002 +#else
16003 + time &= 0x1FUL;
16004 + time <<= 3;
16005 +#endif
16006 +
16007 + thread->sp0 ^= time;
16008 + load_sp0(init_tss + smp_processor_id(), thread);
16009 +
16010 +#ifdef CONFIG_X86_64
16011 + percpu_write(kernel_stack, thread->sp0);
16012 +#endif
16013 }
16014 +#endif
16015
16016 diff -urNp linux-2.6.32.42/arch/x86/kernel/ptrace.c linux-2.6.32.42/arch/x86/kernel/ptrace.c
16017 --- linux-2.6.32.42/arch/x86/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
16018 +++ linux-2.6.32.42/arch/x86/kernel/ptrace.c 2011-04-17 15:56:46.000000000 -0400
16019 @@ -925,7 +925,7 @@ static const struct user_regset_view use
16020 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
16021 {
16022 int ret;
16023 - unsigned long __user *datap = (unsigned long __user *)data;
16024 + unsigned long __user *datap = (__force unsigned long __user *)data;
16025
16026 switch (request) {
16027 /* read the word at location addr in the USER area. */
16028 @@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *chi
16029 if (addr < 0)
16030 return -EIO;
16031 ret = do_get_thread_area(child, addr,
16032 - (struct user_desc __user *) data);
16033 + (__force struct user_desc __user *) data);
16034 break;
16035
16036 case PTRACE_SET_THREAD_AREA:
16037 if (addr < 0)
16038 return -EIO;
16039 ret = do_set_thread_area(child, addr,
16040 - (struct user_desc __user *) data, 0);
16041 + (__force struct user_desc __user *) data, 0);
16042 break;
16043 #endif
16044
16045 @@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *chi
16046 #ifdef CONFIG_X86_PTRACE_BTS
16047 case PTRACE_BTS_CONFIG:
16048 ret = ptrace_bts_config
16049 - (child, data, (struct ptrace_bts_config __user *)addr);
16050 + (child, data, (__force struct ptrace_bts_config __user *)addr);
16051 break;
16052
16053 case PTRACE_BTS_STATUS:
16054 ret = ptrace_bts_status
16055 - (child, data, (struct ptrace_bts_config __user *)addr);
16056 + (child, data, (__force struct ptrace_bts_config __user *)addr);
16057 break;
16058
16059 case PTRACE_BTS_SIZE:
16060 @@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *chi
16061
16062 case PTRACE_BTS_GET:
16063 ret = ptrace_bts_read_record
16064 - (child, data, (struct bts_struct __user *) addr);
16065 + (child, data, (__force struct bts_struct __user *) addr);
16066 break;
16067
16068 case PTRACE_BTS_CLEAR:
16069 @@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *chi
16070
16071 case PTRACE_BTS_DRAIN:
16072 ret = ptrace_bts_drain
16073 - (child, data, (struct bts_struct __user *) addr);
16074 + (child, data, (__force struct bts_struct __user *) addr);
16075 break;
16076 #endif /* CONFIG_X86_PTRACE_BTS */
16077
16078 @@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *ts
16079 info.si_code = si_code;
16080
16081 /* User-mode ip? */
16082 - info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
16083 + info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
16084
16085 /* Send us the fake SIGTRAP */
16086 force_sig_info(SIGTRAP, &info, tsk);
16087 @@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *ts
16088 * We must return the syscall number to actually look up in the table.
16089 * This can be -1L to skip running any syscall at all.
16090 */
16091 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
16092 +long syscall_trace_enter(struct pt_regs *regs)
16093 {
16094 long ret = 0;
16095
16096 @@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(stru
16097 return ret ?: regs->orig_ax;
16098 }
16099
16100 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
16101 +void syscall_trace_leave(struct pt_regs *regs)
16102 {
16103 if (unlikely(current->audit_context))
16104 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
16105 diff -urNp linux-2.6.32.42/arch/x86/kernel/reboot.c linux-2.6.32.42/arch/x86/kernel/reboot.c
16106 --- linux-2.6.32.42/arch/x86/kernel/reboot.c 2011-03-27 14:31:47.000000000 -0400
16107 +++ linux-2.6.32.42/arch/x86/kernel/reboot.c 2011-05-22 23:02:03.000000000 -0400
16108 @@ -33,7 +33,7 @@ void (*pm_power_off)(void);
16109 EXPORT_SYMBOL(pm_power_off);
16110
16111 static const struct desc_ptr no_idt = {};
16112 -static int reboot_mode;
16113 +static unsigned short reboot_mode;
16114 enum reboot_type reboot_type = BOOT_KBD;
16115 int reboot_force;
16116
16117 @@ -292,12 +292,12 @@ core_initcall(reboot_init);
16118 controller to pulse the CPU reset line, which is more thorough, but
16119 doesn't work with at least one type of 486 motherboard. It is easy
16120 to stop this code working; hence the copious comments. */
16121 -static const unsigned long long
16122 -real_mode_gdt_entries [3] =
16123 +static struct desc_struct
16124 +real_mode_gdt_entries [3] __read_only =
16125 {
16126 - 0x0000000000000000ULL, /* Null descriptor */
16127 - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
16128 - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
16129 + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
16130 + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
16131 + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
16132 };
16133
16134 static const struct desc_ptr
16135 @@ -346,7 +346,7 @@ static const unsigned char jump_to_bios
16136 * specified by the code and length parameters.
16137 * We assume that length will aways be less that 100!
16138 */
16139 -void machine_real_restart(const unsigned char *code, int length)
16140 +__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
16141 {
16142 local_irq_disable();
16143
16144 @@ -366,8 +366,8 @@ void machine_real_restart(const unsigned
16145 /* Remap the kernel at virtual address zero, as well as offset zero
16146 from the kernel segment. This assumes the kernel segment starts at
16147 virtual address PAGE_OFFSET. */
16148 - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16149 - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
16150 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16151 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
16152
16153 /*
16154 * Use `swapper_pg_dir' as our page directory.
16155 @@ -379,16 +379,15 @@ void machine_real_restart(const unsigned
16156 boot)". This seems like a fairly standard thing that gets set by
16157 REBOOT.COM programs, and the previous reset routine did this
16158 too. */
16159 - *((unsigned short *)0x472) = reboot_mode;
16160 + *(unsigned short *)(__va(0x472)) = reboot_mode;
16161
16162 /* For the switch to real mode, copy some code to low memory. It has
16163 to be in the first 64k because it is running in 16-bit mode, and it
16164 has to have the same physical and virtual address, because it turns
16165 off paging. Copy it near the end of the first page, out of the way
16166 of BIOS variables. */
16167 - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
16168 - real_mode_switch, sizeof (real_mode_switch));
16169 - memcpy((void *)(0x1000 - 100), code, length);
16170 + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
16171 + memcpy(__va(0x1000 - 100), code, length);
16172
16173 /* Set up the IDT for real mode. */
16174 load_idt(&real_mode_idt);
16175 @@ -416,6 +415,7 @@ void machine_real_restart(const unsigned
16176 __asm__ __volatile__ ("ljmp $0x0008,%0"
16177 :
16178 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
16179 + do { } while (1);
16180 }
16181 #ifdef CONFIG_APM_MODULE
16182 EXPORT_SYMBOL(machine_real_restart);
16183 @@ -536,7 +536,7 @@ void __attribute__((weak)) mach_reboot_f
16184 {
16185 }
16186
16187 -static void native_machine_emergency_restart(void)
16188 +__noreturn static void native_machine_emergency_restart(void)
16189 {
16190 int i;
16191
16192 @@ -651,13 +651,13 @@ void native_machine_shutdown(void)
16193 #endif
16194 }
16195
16196 -static void __machine_emergency_restart(int emergency)
16197 +static __noreturn void __machine_emergency_restart(int emergency)
16198 {
16199 reboot_emergency = emergency;
16200 machine_ops.emergency_restart();
16201 }
16202
16203 -static void native_machine_restart(char *__unused)
16204 +static __noreturn void native_machine_restart(char *__unused)
16205 {
16206 printk("machine restart\n");
16207
16208 @@ -666,7 +666,7 @@ static void native_machine_restart(char
16209 __machine_emergency_restart(0);
16210 }
16211
16212 -static void native_machine_halt(void)
16213 +static __noreturn void native_machine_halt(void)
16214 {
16215 /* stop other cpus and apics */
16216 machine_shutdown();
16217 @@ -677,7 +677,7 @@ static void native_machine_halt(void)
16218 stop_this_cpu(NULL);
16219 }
16220
16221 -static void native_machine_power_off(void)
16222 +__noreturn static void native_machine_power_off(void)
16223 {
16224 if (pm_power_off) {
16225 if (!reboot_force)
16226 @@ -686,6 +686,7 @@ static void native_machine_power_off(voi
16227 }
16228 /* a fallback in case there is no PM info available */
16229 tboot_shutdown(TB_SHUTDOWN_HALT);
16230 + do { } while (1);
16231 }
16232
16233 struct machine_ops machine_ops = {
16234 diff -urNp linux-2.6.32.42/arch/x86/kernel/setup.c linux-2.6.32.42/arch/x86/kernel/setup.c
16235 --- linux-2.6.32.42/arch/x86/kernel/setup.c 2011-04-17 17:00:52.000000000 -0400
16236 +++ linux-2.6.32.42/arch/x86/kernel/setup.c 2011-04-17 17:03:05.000000000 -0400
16237 @@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
16238
16239 if (!boot_params.hdr.root_flags)
16240 root_mountflags &= ~MS_RDONLY;
16241 - init_mm.start_code = (unsigned long) _text;
16242 - init_mm.end_code = (unsigned long) _etext;
16243 + init_mm.start_code = ktla_ktva((unsigned long) _text);
16244 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
16245 init_mm.end_data = (unsigned long) _edata;
16246 init_mm.brk = _brk_end;
16247
16248 - code_resource.start = virt_to_phys(_text);
16249 - code_resource.end = virt_to_phys(_etext)-1;
16250 - data_resource.start = virt_to_phys(_etext);
16251 + code_resource.start = virt_to_phys(ktla_ktva(_text));
16252 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16253 + data_resource.start = virt_to_phys(_sdata);
16254 data_resource.end = virt_to_phys(_edata)-1;
16255 bss_resource.start = virt_to_phys(&__bss_start);
16256 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16257 diff -urNp linux-2.6.32.42/arch/x86/kernel/setup_percpu.c linux-2.6.32.42/arch/x86/kernel/setup_percpu.c
16258 --- linux-2.6.32.42/arch/x86/kernel/setup_percpu.c 2011-03-27 14:31:47.000000000 -0400
16259 +++ linux-2.6.32.42/arch/x86/kernel/setup_percpu.c 2011-06-04 20:36:29.000000000 -0400
16260 @@ -25,19 +25,17 @@
16261 # define DBG(x...)
16262 #endif
16263
16264 -DEFINE_PER_CPU(int, cpu_number);
16265 +#ifdef CONFIG_SMP
16266 +DEFINE_PER_CPU(unsigned int, cpu_number);
16267 EXPORT_PER_CPU_SYMBOL(cpu_number);
16268 +#endif
16269
16270 -#ifdef CONFIG_X86_64
16271 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16272 -#else
16273 -#define BOOT_PERCPU_OFFSET 0
16274 -#endif
16275
16276 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16277 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16278
16279 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16280 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16281 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16282 };
16283 EXPORT_SYMBOL(__per_cpu_offset);
16284 @@ -159,10 +157,10 @@ static inline void setup_percpu_segment(
16285 {
16286 #ifdef CONFIG_X86_32
16287 struct desc_struct gdt;
16288 + unsigned long base = per_cpu_offset(cpu);
16289
16290 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16291 - 0x2 | DESCTYPE_S, 0x8);
16292 - gdt.s = 1;
16293 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16294 + 0x83 | DESCTYPE_S, 0xC);
16295 write_gdt_entry(get_cpu_gdt_table(cpu),
16296 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16297 #endif
16298 @@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
16299 /* alrighty, percpu areas up and running */
16300 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16301 for_each_possible_cpu(cpu) {
16302 +#ifdef CONFIG_CC_STACKPROTECTOR
16303 +#ifdef CONFIG_X86_32
16304 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
16305 +#endif
16306 +#endif
16307 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16308 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16309 per_cpu(cpu_number, cpu) = cpu;
16310 @@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
16311 early_per_cpu_map(x86_cpu_to_node_map, cpu);
16312 #endif
16313 #endif
16314 +#ifdef CONFIG_CC_STACKPROTECTOR
16315 +#ifdef CONFIG_X86_32
16316 + if (!cpu)
16317 + per_cpu(stack_canary.canary, cpu) = canary;
16318 +#endif
16319 +#endif
16320 /*
16321 * Up to this point, the boot CPU has been using .data.init
16322 * area. Reload any changed state for the boot CPU.
16323 diff -urNp linux-2.6.32.42/arch/x86/kernel/signal.c linux-2.6.32.42/arch/x86/kernel/signal.c
16324 --- linux-2.6.32.42/arch/x86/kernel/signal.c 2011-03-27 14:31:47.000000000 -0400
16325 +++ linux-2.6.32.42/arch/x86/kernel/signal.c 2011-05-22 23:02:03.000000000 -0400
16326 @@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsi
16327 * Align the stack pointer according to the i386 ABI,
16328 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16329 */
16330 - sp = ((sp + 4) & -16ul) - 4;
16331 + sp = ((sp - 12) & -16ul) - 4;
16332 #else /* !CONFIG_X86_32 */
16333 sp = round_down(sp, 16) - 8;
16334 #endif
16335 @@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, str
16336 * Return an always-bogus address instead so we will die with SIGSEGV.
16337 */
16338 if (onsigstack && !likely(on_sig_stack(sp)))
16339 - return (void __user *)-1L;
16340 + return (__force void __user *)-1L;
16341
16342 /* save i387 state */
16343 if (used_math() && save_i387_xstate(*fpstate) < 0)
16344 - return (void __user *)-1L;
16345 + return (__force void __user *)-1L;
16346
16347 return (void __user *)sp;
16348 }
16349 @@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigactio
16350 }
16351
16352 if (current->mm->context.vdso)
16353 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16354 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16355 else
16356 - restorer = &frame->retcode;
16357 + restorer = (void __user *)&frame->retcode;
16358 if (ka->sa.sa_flags & SA_RESTORER)
16359 restorer = ka->sa.sa_restorer;
16360
16361 @@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigactio
16362 * reasons and because gdb uses it as a signature to notice
16363 * signal handler stack frames.
16364 */
16365 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16366 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16367
16368 if (err)
16369 return -EFAULT;
16370 @@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, str
16371 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16372
16373 /* Set up to return from userspace. */
16374 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16375 + if (current->mm->context.vdso)
16376 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16377 + else
16378 + restorer = (void __user *)&frame->retcode;
16379 if (ka->sa.sa_flags & SA_RESTORER)
16380 restorer = ka->sa.sa_restorer;
16381 put_user_ex(restorer, &frame->pretcode);
16382 @@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, str
16383 * reasons and because gdb uses it as a signature to notice
16384 * signal handler stack frames.
16385 */
16386 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16387 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16388 } put_user_catch(err);
16389
16390 if (err)
16391 @@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *re
16392 int signr;
16393 sigset_t *oldset;
16394
16395 + pax_track_stack();
16396 +
16397 /*
16398 * We want the common case to go fast, which is why we may in certain
16399 * cases get here from kernel mode. Just return without doing anything
16400 @@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *re
16401 * X86_32: vm86 regs switched out by assembly code before reaching
16402 * here, so testing against kernel CS suffices.
16403 */
16404 - if (!user_mode(regs))
16405 + if (!user_mode_novm(regs))
16406 return;
16407
16408 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
16409 diff -urNp linux-2.6.32.42/arch/x86/kernel/smpboot.c linux-2.6.32.42/arch/x86/kernel/smpboot.c
16410 --- linux-2.6.32.42/arch/x86/kernel/smpboot.c 2011-03-27 14:31:47.000000000 -0400
16411 +++ linux-2.6.32.42/arch/x86/kernel/smpboot.c 2011-07-01 19:10:03.000000000 -0400
16412 @@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct
16413 */
16414 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
16415
16416 -void cpu_hotplug_driver_lock()
16417 +void cpu_hotplug_driver_lock(void)
16418 {
16419 - mutex_lock(&x86_cpu_hotplug_driver_mutex);
16420 + mutex_lock(&x86_cpu_hotplug_driver_mutex);
16421 }
16422
16423 -void cpu_hotplug_driver_unlock()
16424 +void cpu_hotplug_driver_unlock(void)
16425 {
16426 - mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16427 + mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16428 }
16429
16430 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
16431 @@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_a
16432 * target processor state.
16433 */
16434 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
16435 - (unsigned long)stack_start.sp);
16436 + stack_start);
16437
16438 /*
16439 * Run STARTUP IPI loop.
16440 @@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int api
16441 set_idle_for_cpu(cpu, c_idle.idle);
16442 do_rest:
16443 per_cpu(current_task, cpu) = c_idle.idle;
16444 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16445 #ifdef CONFIG_X86_32
16446 /* Stack for startup_32 can be just as for start_secondary onwards */
16447 irq_ctx_init(cpu);
16448 @@ -750,13 +751,15 @@ do_rest:
16449 #else
16450 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16451 initial_gs = per_cpu_offset(cpu);
16452 - per_cpu(kernel_stack, cpu) =
16453 - (unsigned long)task_stack_page(c_idle.idle) -
16454 - KERNEL_STACK_OFFSET + THREAD_SIZE;
16455 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16456 #endif
16457 +
16458 + pax_open_kernel();
16459 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16460 + pax_close_kernel();
16461 +
16462 initial_code = (unsigned long)start_secondary;
16463 - stack_start.sp = (void *) c_idle.idle->thread.sp;
16464 + stack_start = c_idle.idle->thread.sp;
16465
16466 /* start_ip had better be page-aligned! */
16467 start_ip = setup_trampoline();
16468 @@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int
16469
16470 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16471
16472 +#ifdef CONFIG_PAX_PER_CPU_PGD
16473 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16474 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16475 + KERNEL_PGD_PTRS);
16476 +#endif
16477 +
16478 err = do_boot_cpu(apicid, cpu);
16479
16480 if (err) {
16481 diff -urNp linux-2.6.32.42/arch/x86/kernel/step.c linux-2.6.32.42/arch/x86/kernel/step.c
16482 --- linux-2.6.32.42/arch/x86/kernel/step.c 2011-03-27 14:31:47.000000000 -0400
16483 +++ linux-2.6.32.42/arch/x86/kernel/step.c 2011-04-17 15:56:46.000000000 -0400
16484 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
16485 struct desc_struct *desc;
16486 unsigned long base;
16487
16488 - seg &= ~7UL;
16489 + seg >>= 3;
16490
16491 mutex_lock(&child->mm->context.lock);
16492 - if (unlikely((seg >> 3) >= child->mm->context.size))
16493 + if (unlikely(seg >= child->mm->context.size))
16494 addr = -1L; /* bogus selector, access would fault */
16495 else {
16496 desc = child->mm->context.ldt + seg;
16497 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
16498 addr += base;
16499 }
16500 mutex_unlock(&child->mm->context.lock);
16501 - }
16502 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16503 + addr = ktla_ktva(addr);
16504
16505 return addr;
16506 }
16507 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
16508 unsigned char opcode[15];
16509 unsigned long addr = convert_ip_to_linear(child, regs);
16510
16511 + if (addr == -EINVAL)
16512 + return 0;
16513 +
16514 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16515 for (i = 0; i < copied; i++) {
16516 switch (opcode[i]) {
16517 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
16518
16519 #ifdef CONFIG_X86_64
16520 case 0x40 ... 0x4f:
16521 - if (regs->cs != __USER_CS)
16522 + if ((regs->cs & 0xffff) != __USER_CS)
16523 /* 32-bit mode: register increment */
16524 return 0;
16525 /* 64-bit mode: REX prefix */
16526 diff -urNp linux-2.6.32.42/arch/x86/kernel/syscall_table_32.S linux-2.6.32.42/arch/x86/kernel/syscall_table_32.S
16527 --- linux-2.6.32.42/arch/x86/kernel/syscall_table_32.S 2011-03-27 14:31:47.000000000 -0400
16528 +++ linux-2.6.32.42/arch/x86/kernel/syscall_table_32.S 2011-04-17 15:56:46.000000000 -0400
16529 @@ -1,3 +1,4 @@
16530 +.section .rodata,"a",@progbits
16531 ENTRY(sys_call_table)
16532 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
16533 .long sys_exit
16534 diff -urNp linux-2.6.32.42/arch/x86/kernel/sys_i386_32.c linux-2.6.32.42/arch/x86/kernel/sys_i386_32.c
16535 --- linux-2.6.32.42/arch/x86/kernel/sys_i386_32.c 2011-03-27 14:31:47.000000000 -0400
16536 +++ linux-2.6.32.42/arch/x86/kernel/sys_i386_32.c 2011-04-17 15:56:46.000000000 -0400
16537 @@ -24,6 +24,21 @@
16538
16539 #include <asm/syscalls.h>
16540
16541 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
16542 +{
16543 + unsigned long pax_task_size = TASK_SIZE;
16544 +
16545 +#ifdef CONFIG_PAX_SEGMEXEC
16546 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
16547 + pax_task_size = SEGMEXEC_TASK_SIZE;
16548 +#endif
16549 +
16550 + if (len > pax_task_size || addr > pax_task_size - len)
16551 + return -EINVAL;
16552 +
16553 + return 0;
16554 +}
16555 +
16556 /*
16557 * Perform the select(nd, in, out, ex, tv) and mmap() system
16558 * calls. Linux/i386 didn't use to be able to handle more than
16559 @@ -58,6 +73,212 @@ out:
16560 return err;
16561 }
16562
16563 +unsigned long
16564 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
16565 + unsigned long len, unsigned long pgoff, unsigned long flags)
16566 +{
16567 + struct mm_struct *mm = current->mm;
16568 + struct vm_area_struct *vma;
16569 + unsigned long start_addr, pax_task_size = TASK_SIZE;
16570 +
16571 +#ifdef CONFIG_PAX_SEGMEXEC
16572 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16573 + pax_task_size = SEGMEXEC_TASK_SIZE;
16574 +#endif
16575 +
16576 + pax_task_size -= PAGE_SIZE;
16577 +
16578 + if (len > pax_task_size)
16579 + return -ENOMEM;
16580 +
16581 + if (flags & MAP_FIXED)
16582 + return addr;
16583 +
16584 +#ifdef CONFIG_PAX_RANDMMAP
16585 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16586 +#endif
16587 +
16588 + if (addr) {
16589 + addr = PAGE_ALIGN(addr);
16590 + if (pax_task_size - len >= addr) {
16591 + vma = find_vma(mm, addr);
16592 + if (check_heap_stack_gap(vma, addr, len))
16593 + return addr;
16594 + }
16595 + }
16596 + if (len > mm->cached_hole_size) {
16597 + start_addr = addr = mm->free_area_cache;
16598 + } else {
16599 + start_addr = addr = mm->mmap_base;
16600 + mm->cached_hole_size = 0;
16601 + }
16602 +
16603 +#ifdef CONFIG_PAX_PAGEEXEC
16604 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
16605 + start_addr = 0x00110000UL;
16606 +
16607 +#ifdef CONFIG_PAX_RANDMMAP
16608 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16609 + start_addr += mm->delta_mmap & 0x03FFF000UL;
16610 +#endif
16611 +
16612 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
16613 + start_addr = addr = mm->mmap_base;
16614 + else
16615 + addr = start_addr;
16616 + }
16617 +#endif
16618 +
16619 +full_search:
16620 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
16621 + /* At this point: (!vma || addr < vma->vm_end). */
16622 + if (pax_task_size - len < addr) {
16623 + /*
16624 + * Start a new search - just in case we missed
16625 + * some holes.
16626 + */
16627 + if (start_addr != mm->mmap_base) {
16628 + start_addr = addr = mm->mmap_base;
16629 + mm->cached_hole_size = 0;
16630 + goto full_search;
16631 + }
16632 + return -ENOMEM;
16633 + }
16634 + if (check_heap_stack_gap(vma, addr, len))
16635 + break;
16636 + if (addr + mm->cached_hole_size < vma->vm_start)
16637 + mm->cached_hole_size = vma->vm_start - addr;
16638 + addr = vma->vm_end;
16639 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
16640 + start_addr = addr = mm->mmap_base;
16641 + mm->cached_hole_size = 0;
16642 + goto full_search;
16643 + }
16644 + }
16645 +
16646 + /*
16647 + * Remember the place where we stopped the search:
16648 + */
16649 + mm->free_area_cache = addr + len;
16650 + return addr;
16651 +}
16652 +
16653 +unsigned long
16654 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16655 + const unsigned long len, const unsigned long pgoff,
16656 + const unsigned long flags)
16657 +{
16658 + struct vm_area_struct *vma;
16659 + struct mm_struct *mm = current->mm;
16660 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
16661 +
16662 +#ifdef CONFIG_PAX_SEGMEXEC
16663 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16664 + pax_task_size = SEGMEXEC_TASK_SIZE;
16665 +#endif
16666 +
16667 + pax_task_size -= PAGE_SIZE;
16668 +
16669 + /* requested length too big for entire address space */
16670 + if (len > pax_task_size)
16671 + return -ENOMEM;
16672 +
16673 + if (flags & MAP_FIXED)
16674 + return addr;
16675 +
16676 +#ifdef CONFIG_PAX_PAGEEXEC
16677 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
16678 + goto bottomup;
16679 +#endif
16680 +
16681 +#ifdef CONFIG_PAX_RANDMMAP
16682 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16683 +#endif
16684 +
16685 + /* requesting a specific address */
16686 + if (addr) {
16687 + addr = PAGE_ALIGN(addr);
16688 + if (pax_task_size - len >= addr) {
16689 + vma = find_vma(mm, addr);
16690 + if (check_heap_stack_gap(vma, addr, len))
16691 + return addr;
16692 + }
16693 + }
16694 +
16695 + /* check if free_area_cache is useful for us */
16696 + if (len <= mm->cached_hole_size) {
16697 + mm->cached_hole_size = 0;
16698 + mm->free_area_cache = mm->mmap_base;
16699 + }
16700 +
16701 + /* either no address requested or can't fit in requested address hole */
16702 + addr = mm->free_area_cache;
16703 +
16704 + /* make sure it can fit in the remaining address space */
16705 + if (addr > len) {
16706 + vma = find_vma(mm, addr-len);
16707 + if (check_heap_stack_gap(vma, addr - len, len))
16708 + /* remember the address as a hint for next time */
16709 + return (mm->free_area_cache = addr-len);
16710 + }
16711 +
16712 + if (mm->mmap_base < len)
16713 + goto bottomup;
16714 +
16715 + addr = mm->mmap_base-len;
16716 +
16717 + do {
16718 + /*
16719 + * Lookup failure means no vma is above this address,
16720 + * else if new region fits below vma->vm_start,
16721 + * return with success:
16722 + */
16723 + vma = find_vma(mm, addr);
16724 + if (check_heap_stack_gap(vma, addr, len))
16725 + /* remember the address as a hint for next time */
16726 + return (mm->free_area_cache = addr);
16727 +
16728 + /* remember the largest hole we saw so far */
16729 + if (addr + mm->cached_hole_size < vma->vm_start)
16730 + mm->cached_hole_size = vma->vm_start - addr;
16731 +
16732 + /* try just below the current vma->vm_start */
16733 + addr = skip_heap_stack_gap(vma, len);
16734 + } while (!IS_ERR_VALUE(addr));
16735 +
16736 +bottomup:
16737 + /*
16738 + * A failed mmap() very likely causes application failure,
16739 + * so fall back to the bottom-up function here. This scenario
16740 + * can happen with large stack limits and large mmap()
16741 + * allocations.
16742 + */
16743 +
16744 +#ifdef CONFIG_PAX_SEGMEXEC
16745 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16746 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
16747 + else
16748 +#endif
16749 +
16750 + mm->mmap_base = TASK_UNMAPPED_BASE;
16751 +
16752 +#ifdef CONFIG_PAX_RANDMMAP
16753 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16754 + mm->mmap_base += mm->delta_mmap;
16755 +#endif
16756 +
16757 + mm->free_area_cache = mm->mmap_base;
16758 + mm->cached_hole_size = ~0UL;
16759 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16760 + /*
16761 + * Restore the topdown base:
16762 + */
16763 + mm->mmap_base = base;
16764 + mm->free_area_cache = base;
16765 + mm->cached_hole_size = ~0UL;
16766 +
16767 + return addr;
16768 +}
16769
16770 struct sel_arg_struct {
16771 unsigned long n;
16772 @@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int fi
16773 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
16774 case SEMTIMEDOP:
16775 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
16776 - (const struct timespec __user *)fifth);
16777 + (__force const struct timespec __user *)fifth);
16778
16779 case SEMGET:
16780 return sys_semget(first, second, third);
16781 @@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int fi
16782 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
16783 if (ret)
16784 return ret;
16785 - return put_user(raddr, (ulong __user *) third);
16786 + return put_user(raddr, (__force ulong __user *) third);
16787 }
16788 case 1: /* iBCS2 emulator entry point */
16789 if (!segment_eq(get_fs(), get_ds()))
16790 @@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldol
16791
16792 return error;
16793 }
16794 -
16795 -
16796 -/*
16797 - * Do a system call from kernel instead of calling sys_execve so we
16798 - * end up with proper pt_regs.
16799 - */
16800 -int kernel_execve(const char *filename, char *const argv[], char *const envp[])
16801 -{
16802 - long __res;
16803 - asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
16804 - : "=a" (__res)
16805 - : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
16806 - return __res;
16807 -}
16808 diff -urNp linux-2.6.32.42/arch/x86/kernel/sys_x86_64.c linux-2.6.32.42/arch/x86/kernel/sys_x86_64.c
16809 --- linux-2.6.32.42/arch/x86/kernel/sys_x86_64.c 2011-03-27 14:31:47.000000000 -0400
16810 +++ linux-2.6.32.42/arch/x86/kernel/sys_x86_64.c 2011-04-17 15:56:46.000000000 -0400
16811 @@ -32,8 +32,8 @@ out:
16812 return error;
16813 }
16814
16815 -static void find_start_end(unsigned long flags, unsigned long *begin,
16816 - unsigned long *end)
16817 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
16818 + unsigned long *begin, unsigned long *end)
16819 {
16820 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
16821 unsigned long new_begin;
16822 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
16823 *begin = new_begin;
16824 }
16825 } else {
16826 - *begin = TASK_UNMAPPED_BASE;
16827 + *begin = mm->mmap_base;
16828 *end = TASK_SIZE;
16829 }
16830 }
16831 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
16832 if (flags & MAP_FIXED)
16833 return addr;
16834
16835 - find_start_end(flags, &begin, &end);
16836 + find_start_end(mm, flags, &begin, &end);
16837
16838 if (len > end)
16839 return -ENOMEM;
16840
16841 +#ifdef CONFIG_PAX_RANDMMAP
16842 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16843 +#endif
16844 +
16845 if (addr) {
16846 addr = PAGE_ALIGN(addr);
16847 vma = find_vma(mm, addr);
16848 - if (end - len >= addr &&
16849 - (!vma || addr + len <= vma->vm_start))
16850 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
16851 return addr;
16852 }
16853 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
16854 @@ -106,7 +109,7 @@ full_search:
16855 }
16856 return -ENOMEM;
16857 }
16858 - if (!vma || addr + len <= vma->vm_start) {
16859 + if (check_heap_stack_gap(vma, addr, len)) {
16860 /*
16861 * Remember the place where we stopped the search:
16862 */
16863 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
16864 {
16865 struct vm_area_struct *vma;
16866 struct mm_struct *mm = current->mm;
16867 - unsigned long addr = addr0;
16868 + unsigned long base = mm->mmap_base, addr = addr0;
16869
16870 /* requested length too big for entire address space */
16871 if (len > TASK_SIZE)
16872 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
16873 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
16874 goto bottomup;
16875
16876 +#ifdef CONFIG_PAX_RANDMMAP
16877 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16878 +#endif
16879 +
16880 /* requesting a specific address */
16881 if (addr) {
16882 addr = PAGE_ALIGN(addr);
16883 - vma = find_vma(mm, addr);
16884 - if (TASK_SIZE - len >= addr &&
16885 - (!vma || addr + len <= vma->vm_start))
16886 - return addr;
16887 + if (TASK_SIZE - len >= addr) {
16888 + vma = find_vma(mm, addr);
16889 + if (check_heap_stack_gap(vma, addr, len))
16890 + return addr;
16891 + }
16892 }
16893
16894 /* check if free_area_cache is useful for us */
16895 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
16896 /* make sure it can fit in the remaining address space */
16897 if (addr > len) {
16898 vma = find_vma(mm, addr-len);
16899 - if (!vma || addr <= vma->vm_start)
16900 + if (check_heap_stack_gap(vma, addr - len, len))
16901 /* remember the address as a hint for next time */
16902 return mm->free_area_cache = addr-len;
16903 }
16904 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
16905 * return with success:
16906 */
16907 vma = find_vma(mm, addr);
16908 - if (!vma || addr+len <= vma->vm_start)
16909 + if (check_heap_stack_gap(vma, addr, len))
16910 /* remember the address as a hint for next time */
16911 return mm->free_area_cache = addr;
16912
16913 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
16914 mm->cached_hole_size = vma->vm_start - addr;
16915
16916 /* try just below the current vma->vm_start */
16917 - addr = vma->vm_start-len;
16918 - } while (len < vma->vm_start);
16919 + addr = skip_heap_stack_gap(vma, len);
16920 + } while (!IS_ERR_VALUE(addr));
16921
16922 bottomup:
16923 /*
16924 @@ -198,13 +206,21 @@ bottomup:
16925 * can happen with large stack limits and large mmap()
16926 * allocations.
16927 */
16928 + mm->mmap_base = TASK_UNMAPPED_BASE;
16929 +
16930 +#ifdef CONFIG_PAX_RANDMMAP
16931 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16932 + mm->mmap_base += mm->delta_mmap;
16933 +#endif
16934 +
16935 + mm->free_area_cache = mm->mmap_base;
16936 mm->cached_hole_size = ~0UL;
16937 - mm->free_area_cache = TASK_UNMAPPED_BASE;
16938 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16939 /*
16940 * Restore the topdown base:
16941 */
16942 - mm->free_area_cache = mm->mmap_base;
16943 + mm->mmap_base = base;
16944 + mm->free_area_cache = base;
16945 mm->cached_hole_size = ~0UL;
16946
16947 return addr;
16948 diff -urNp linux-2.6.32.42/arch/x86/kernel/tboot.c linux-2.6.32.42/arch/x86/kernel/tboot.c
16949 --- linux-2.6.32.42/arch/x86/kernel/tboot.c 2011-03-27 14:31:47.000000000 -0400
16950 +++ linux-2.6.32.42/arch/x86/kernel/tboot.c 2011-05-22 23:02:03.000000000 -0400
16951 @@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
16952
16953 void tboot_shutdown(u32 shutdown_type)
16954 {
16955 - void (*shutdown)(void);
16956 + void (* __noreturn shutdown)(void);
16957
16958 if (!tboot_enabled())
16959 return;
16960 @@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
16961
16962 switch_to_tboot_pt();
16963
16964 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
16965 + shutdown = (void *)tboot->shutdown_entry;
16966 shutdown();
16967
16968 /* should not reach here */
16969 @@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
16970 tboot_shutdown(acpi_shutdown_map[sleep_state]);
16971 }
16972
16973 -static atomic_t ap_wfs_count;
16974 +static atomic_unchecked_t ap_wfs_count;
16975
16976 static int tboot_wait_for_aps(int num_aps)
16977 {
16978 @@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(
16979 {
16980 switch (action) {
16981 case CPU_DYING:
16982 - atomic_inc(&ap_wfs_count);
16983 + atomic_inc_unchecked(&ap_wfs_count);
16984 if (num_online_cpus() == 1)
16985 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
16986 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
16987 return NOTIFY_BAD;
16988 break;
16989 }
16990 @@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
16991
16992 tboot_create_trampoline();
16993
16994 - atomic_set(&ap_wfs_count, 0);
16995 + atomic_set_unchecked(&ap_wfs_count, 0);
16996 register_hotcpu_notifier(&tboot_cpu_notifier);
16997 return 0;
16998 }
16999 diff -urNp linux-2.6.32.42/arch/x86/kernel/time.c linux-2.6.32.42/arch/x86/kernel/time.c
17000 --- linux-2.6.32.42/arch/x86/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
17001 +++ linux-2.6.32.42/arch/x86/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
17002 @@ -26,17 +26,13 @@
17003 int timer_ack;
17004 #endif
17005
17006 -#ifdef CONFIG_X86_64
17007 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
17008 -#endif
17009 -
17010 unsigned long profile_pc(struct pt_regs *regs)
17011 {
17012 unsigned long pc = instruction_pointer(regs);
17013
17014 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17015 + if (!user_mode(regs) && in_lock_functions(pc)) {
17016 #ifdef CONFIG_FRAME_POINTER
17017 - return *(unsigned long *)(regs->bp + sizeof(long));
17018 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17019 #else
17020 unsigned long *sp =
17021 (unsigned long *)kernel_stack_pointer(regs);
17022 @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
17023 * or above a saved flags. Eflags has bits 22-31 zero,
17024 * kernel addresses don't.
17025 */
17026 +
17027 +#ifdef CONFIG_PAX_KERNEXEC
17028 + return ktla_ktva(sp[0]);
17029 +#else
17030 if (sp[0] >> 22)
17031 return sp[0];
17032 if (sp[1] >> 22)
17033 return sp[1];
17034 #endif
17035 +
17036 +#endif
17037 }
17038 return pc;
17039 }
17040 diff -urNp linux-2.6.32.42/arch/x86/kernel/tls.c linux-2.6.32.42/arch/x86/kernel/tls.c
17041 --- linux-2.6.32.42/arch/x86/kernel/tls.c 2011-03-27 14:31:47.000000000 -0400
17042 +++ linux-2.6.32.42/arch/x86/kernel/tls.c 2011-04-17 15:56:46.000000000 -0400
17043 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
17044 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17045 return -EINVAL;
17046
17047 +#ifdef CONFIG_PAX_SEGMEXEC
17048 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17049 + return -EINVAL;
17050 +#endif
17051 +
17052 set_tls_desc(p, idx, &info, 1);
17053
17054 return 0;
17055 diff -urNp linux-2.6.32.42/arch/x86/kernel/trampoline_32.S linux-2.6.32.42/arch/x86/kernel/trampoline_32.S
17056 --- linux-2.6.32.42/arch/x86/kernel/trampoline_32.S 2011-03-27 14:31:47.000000000 -0400
17057 +++ linux-2.6.32.42/arch/x86/kernel/trampoline_32.S 2011-04-17 15:56:46.000000000 -0400
17058 @@ -32,6 +32,12 @@
17059 #include <asm/segment.h>
17060 #include <asm/page_types.h>
17061
17062 +#ifdef CONFIG_PAX_KERNEXEC
17063 +#define ta(X) (X)
17064 +#else
17065 +#define ta(X) ((X) - __PAGE_OFFSET)
17066 +#endif
17067 +
17068 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
17069 __CPUINITRODATA
17070 .code16
17071 @@ -60,7 +66,7 @@ r_base = .
17072 inc %ax # protected mode (PE) bit
17073 lmsw %ax # into protected mode
17074 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17075 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17076 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
17077
17078 # These need to be in the same 64K segment as the above;
17079 # hence we don't use the boot_gdt_descr defined in head.S
17080 diff -urNp linux-2.6.32.42/arch/x86/kernel/trampoline_64.S linux-2.6.32.42/arch/x86/kernel/trampoline_64.S
17081 --- linux-2.6.32.42/arch/x86/kernel/trampoline_64.S 2011-03-27 14:31:47.000000000 -0400
17082 +++ linux-2.6.32.42/arch/x86/kernel/trampoline_64.S 2011-07-01 18:53:26.000000000 -0400
17083 @@ -91,7 +91,7 @@ startup_32:
17084 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17085 movl %eax, %ds
17086
17087 - movl $X86_CR4_PAE, %eax
17088 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17089 movl %eax, %cr4 # Enable PAE mode
17090
17091 # Setup trampoline 4 level pagetables
17092 @@ -127,7 +127,7 @@ startup_64:
17093 no_longmode:
17094 hlt
17095 jmp no_longmode
17096 -#include "verify_cpu_64.S"
17097 +#include "verify_cpu.S"
17098
17099 # Careful these need to be in the same 64K segment as the above;
17100 tidt:
17101 @@ -138,7 +138,7 @@ tidt:
17102 # so the kernel can live anywhere
17103 .balign 4
17104 tgdt:
17105 - .short tgdt_end - tgdt # gdt limit
17106 + .short tgdt_end - tgdt - 1 # gdt limit
17107 .long tgdt - r_base
17108 .short 0
17109 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17110 diff -urNp linux-2.6.32.42/arch/x86/kernel/traps.c linux-2.6.32.42/arch/x86/kernel/traps.c
17111 --- linux-2.6.32.42/arch/x86/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
17112 +++ linux-2.6.32.42/arch/x86/kernel/traps.c 2011-04-17 15:56:46.000000000 -0400
17113 @@ -69,12 +69,6 @@ asmlinkage int system_call(void);
17114
17115 /* Do we ignore FPU interrupts ? */
17116 char ignore_fpu_irq;
17117 -
17118 -/*
17119 - * The IDT has to be page-aligned to simplify the Pentium
17120 - * F0 0F bug workaround.
17121 - */
17122 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17123 #endif
17124
17125 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17126 @@ -112,19 +106,19 @@ static inline void preempt_conditional_c
17127 static inline void
17128 die_if_kernel(const char *str, struct pt_regs *regs, long err)
17129 {
17130 - if (!user_mode_vm(regs))
17131 + if (!user_mode(regs))
17132 die(str, regs, err);
17133 }
17134 #endif
17135
17136 static void __kprobes
17137 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17138 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17139 long error_code, siginfo_t *info)
17140 {
17141 struct task_struct *tsk = current;
17142
17143 #ifdef CONFIG_X86_32
17144 - if (regs->flags & X86_VM_MASK) {
17145 + if (v8086_mode(regs)) {
17146 /*
17147 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17148 * On nmi (interrupt 2), do_trap should not be called.
17149 @@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str
17150 }
17151 #endif
17152
17153 - if (!user_mode(regs))
17154 + if (!user_mode_novm(regs))
17155 goto kernel_trap;
17156
17157 #ifdef CONFIG_X86_32
17158 @@ -158,7 +152,7 @@ trap_signal:
17159 printk_ratelimit()) {
17160 printk(KERN_INFO
17161 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17162 - tsk->comm, tsk->pid, str,
17163 + tsk->comm, task_pid_nr(tsk), str,
17164 regs->ip, regs->sp, error_code);
17165 print_vma_addr(" in ", regs->ip);
17166 printk("\n");
17167 @@ -175,8 +169,20 @@ kernel_trap:
17168 if (!fixup_exception(regs)) {
17169 tsk->thread.error_code = error_code;
17170 tsk->thread.trap_no = trapnr;
17171 +
17172 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17173 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17174 + str = "PAX: suspicious stack segment fault";
17175 +#endif
17176 +
17177 die(str, regs, error_code);
17178 }
17179 +
17180 +#ifdef CONFIG_PAX_REFCOUNT
17181 + if (trapnr == 4)
17182 + pax_report_refcount_overflow(regs);
17183 +#endif
17184 +
17185 return;
17186
17187 #ifdef CONFIG_X86_32
17188 @@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *re
17189 conditional_sti(regs);
17190
17191 #ifdef CONFIG_X86_32
17192 - if (regs->flags & X86_VM_MASK)
17193 + if (v8086_mode(regs))
17194 goto gp_in_vm86;
17195 #endif
17196
17197 tsk = current;
17198 - if (!user_mode(regs))
17199 + if (!user_mode_novm(regs))
17200 goto gp_in_kernel;
17201
17202 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17203 + if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17204 + struct mm_struct *mm = tsk->mm;
17205 + unsigned long limit;
17206 +
17207 + down_write(&mm->mmap_sem);
17208 + limit = mm->context.user_cs_limit;
17209 + if (limit < TASK_SIZE) {
17210 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17211 + up_write(&mm->mmap_sem);
17212 + return;
17213 + }
17214 + up_write(&mm->mmap_sem);
17215 + }
17216 +#endif
17217 +
17218 tsk->thread.error_code = error_code;
17219 tsk->thread.trap_no = 13;
17220
17221 @@ -305,6 +327,13 @@ gp_in_kernel:
17222 if (notify_die(DIE_GPF, "general protection fault", regs,
17223 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17224 return;
17225 +
17226 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17227 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17228 + die("PAX: suspicious general protection fault", regs, error_code);
17229 + else
17230 +#endif
17231 +
17232 die("general protection fault", regs, error_code);
17233 }
17234
17235 @@ -558,7 +587,7 @@ dotraplinkage void __kprobes do_debug(st
17236 }
17237
17238 #ifdef CONFIG_X86_32
17239 - if (regs->flags & X86_VM_MASK)
17240 + if (v8086_mode(regs))
17241 goto debug_vm86;
17242 #endif
17243
17244 @@ -570,7 +599,7 @@ dotraplinkage void __kprobes do_debug(st
17245 * kernel space (but re-enable TF when returning to user mode).
17246 */
17247 if (condition & DR_STEP) {
17248 - if (!user_mode(regs))
17249 + if (!user_mode_novm(regs))
17250 goto clear_TF_reenable;
17251 }
17252
17253 @@ -757,7 +786,7 @@ do_simd_coprocessor_error(struct pt_regs
17254 * Handle strange cache flush from user space exception
17255 * in all other cases. This is undocumented behaviour.
17256 */
17257 - if (regs->flags & X86_VM_MASK) {
17258 + if (v8086_mode(regs)) {
17259 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
17260 return;
17261 }
17262 @@ -798,7 +827,7 @@ asmlinkage void __attribute__((weak)) sm
17263 void __math_state_restore(void)
17264 {
17265 struct thread_info *thread = current_thread_info();
17266 - struct task_struct *tsk = thread->task;
17267 + struct task_struct *tsk = current;
17268
17269 /*
17270 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17271 @@ -825,8 +854,7 @@ void __math_state_restore(void)
17272 */
17273 asmlinkage void math_state_restore(void)
17274 {
17275 - struct thread_info *thread = current_thread_info();
17276 - struct task_struct *tsk = thread->task;
17277 + struct task_struct *tsk = current;
17278
17279 if (!tsk_used_math(tsk)) {
17280 local_irq_enable();
17281 diff -urNp linux-2.6.32.42/arch/x86/kernel/verify_cpu_64.S linux-2.6.32.42/arch/x86/kernel/verify_cpu_64.S
17282 --- linux-2.6.32.42/arch/x86/kernel/verify_cpu_64.S 2011-03-27 14:31:47.000000000 -0400
17283 +++ linux-2.6.32.42/arch/x86/kernel/verify_cpu_64.S 1969-12-31 19:00:00.000000000 -0500
17284 @@ -1,105 +0,0 @@
17285 -/*
17286 - *
17287 - * verify_cpu.S - Code for cpu long mode and SSE verification. This
17288 - * code has been borrowed from boot/setup.S and was introduced by
17289 - * Andi Kleen.
17290 - *
17291 - * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17292 - * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17293 - * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17294 - *
17295 - * This source code is licensed under the GNU General Public License,
17296 - * Version 2. See the file COPYING for more details.
17297 - *
17298 - * This is a common code for verification whether CPU supports
17299 - * long mode and SSE or not. It is not called directly instead this
17300 - * file is included at various places and compiled in that context.
17301 - * Following are the current usage.
17302 - *
17303 - * This file is included by both 16bit and 32bit code.
17304 - *
17305 - * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
17306 - * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
17307 - * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
17308 - * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
17309 - *
17310 - * verify_cpu, returns the status of cpu check in register %eax.
17311 - * 0: Success 1: Failure
17312 - *
17313 - * The caller needs to check for the error code and take the action
17314 - * appropriately. Either display a message or halt.
17315 - */
17316 -
17317 -#include <asm/cpufeature.h>
17318 -
17319 -verify_cpu:
17320 - pushfl # Save caller passed flags
17321 - pushl $0 # Kill any dangerous flags
17322 - popfl
17323 -
17324 - pushfl # standard way to check for cpuid
17325 - popl %eax
17326 - movl %eax,%ebx
17327 - xorl $0x200000,%eax
17328 - pushl %eax
17329 - popfl
17330 - pushfl
17331 - popl %eax
17332 - cmpl %eax,%ebx
17333 - jz verify_cpu_no_longmode # cpu has no cpuid
17334 -
17335 - movl $0x0,%eax # See if cpuid 1 is implemented
17336 - cpuid
17337 - cmpl $0x1,%eax
17338 - jb verify_cpu_no_longmode # no cpuid 1
17339 -
17340 - xor %di,%di
17341 - cmpl $0x68747541,%ebx # AuthenticAMD
17342 - jnz verify_cpu_noamd
17343 - cmpl $0x69746e65,%edx
17344 - jnz verify_cpu_noamd
17345 - cmpl $0x444d4163,%ecx
17346 - jnz verify_cpu_noamd
17347 - mov $1,%di # cpu is from AMD
17348 -
17349 -verify_cpu_noamd:
17350 - movl $0x1,%eax # Does the cpu have what it takes
17351 - cpuid
17352 - andl $REQUIRED_MASK0,%edx
17353 - xorl $REQUIRED_MASK0,%edx
17354 - jnz verify_cpu_no_longmode
17355 -
17356 - movl $0x80000000,%eax # See if extended cpuid is implemented
17357 - cpuid
17358 - cmpl $0x80000001,%eax
17359 - jb verify_cpu_no_longmode # no extended cpuid
17360 -
17361 - movl $0x80000001,%eax # Does the cpu have what it takes
17362 - cpuid
17363 - andl $REQUIRED_MASK1,%edx
17364 - xorl $REQUIRED_MASK1,%edx
17365 - jnz verify_cpu_no_longmode
17366 -
17367 -verify_cpu_sse_test:
17368 - movl $1,%eax
17369 - cpuid
17370 - andl $SSE_MASK,%edx
17371 - cmpl $SSE_MASK,%edx
17372 - je verify_cpu_sse_ok
17373 - test %di,%di
17374 - jz verify_cpu_no_longmode # only try to force SSE on AMD
17375 - movl $0xc0010015,%ecx # HWCR
17376 - rdmsr
17377 - btr $15,%eax # enable SSE
17378 - wrmsr
17379 - xor %di,%di # don't loop
17380 - jmp verify_cpu_sse_test # try again
17381 -
17382 -verify_cpu_no_longmode:
17383 - popfl # Restore caller passed flags
17384 - movl $1,%eax
17385 - ret
17386 -verify_cpu_sse_ok:
17387 - popfl # Restore caller passed flags
17388 - xorl %eax, %eax
17389 - ret
17390 diff -urNp linux-2.6.32.42/arch/x86/kernel/verify_cpu.S linux-2.6.32.42/arch/x86/kernel/verify_cpu.S
17391 --- linux-2.6.32.42/arch/x86/kernel/verify_cpu.S 1969-12-31 19:00:00.000000000 -0500
17392 +++ linux-2.6.32.42/arch/x86/kernel/verify_cpu.S 2011-07-01 18:28:42.000000000 -0400
17393 @@ -0,0 +1,140 @@
17394 +/*
17395 + *
17396 + * verify_cpu.S - Code for cpu long mode and SSE verification. This
17397 + * code has been borrowed from boot/setup.S and was introduced by
17398 + * Andi Kleen.
17399 + *
17400 + * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17401 + * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17402 + * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17403 + * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
17404 + *
17405 + * This source code is licensed under the GNU General Public License,
17406 + * Version 2. See the file COPYING for more details.
17407 + *
17408 + * This is a common code for verification whether CPU supports
17409 + * long mode and SSE or not. It is not called directly instead this
17410 + * file is included at various places and compiled in that context.
17411 + * This file is expected to run in 32bit code. Currently:
17412 + *
17413 + * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17414 + * arch/x86/kernel/trampoline_64.S: secondary processor verification
17415 + * arch/x86/kernel/head_32.S: processor startup
17416 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17417 + *
17418 + * verify_cpu, returns the status of longmode and SSE in register %eax.
17419 + * 0: Success 1: Failure
17420 + *
17421 + * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
17422 + *
17423 + * The caller needs to check for the error code and take the action
17424 + * appropriately. Either display a message or halt.
17425 + */
17426 +
17427 +#include <asm/cpufeature.h>
17428 +#include <asm/msr-index.h>
17429 +
17430 +verify_cpu:
17431 + pushfl # Save caller passed flags
17432 + pushl $0 # Kill any dangerous flags
17433 + popfl
17434 +
17435 + pushfl # standard way to check for cpuid
17436 + popl %eax
17437 + movl %eax,%ebx
17438 + xorl $0x200000,%eax
17439 + pushl %eax
17440 + popfl
17441 + pushfl
17442 + popl %eax
17443 + cmpl %eax,%ebx
17444 + jz verify_cpu_no_longmode # cpu has no cpuid
17445 +
17446 + movl $0x0,%eax # See if cpuid 1 is implemented
17447 + cpuid
17448 + cmpl $0x1,%eax
17449 + jb verify_cpu_no_longmode # no cpuid 1
17450 +
17451 + xor %di,%di
17452 + cmpl $0x68747541,%ebx # AuthenticAMD
17453 + jnz verify_cpu_noamd
17454 + cmpl $0x69746e65,%edx
17455 + jnz verify_cpu_noamd
17456 + cmpl $0x444d4163,%ecx
17457 + jnz verify_cpu_noamd
17458 + mov $1,%di # cpu is from AMD
17459 + jmp verify_cpu_check
17460 +
17461 +verify_cpu_noamd:
17462 + cmpl $0x756e6547,%ebx # GenuineIntel?
17463 + jnz verify_cpu_check
17464 + cmpl $0x49656e69,%edx
17465 + jnz verify_cpu_check
17466 + cmpl $0x6c65746e,%ecx
17467 + jnz verify_cpu_check
17468 +
17469 + # only call IA32_MISC_ENABLE when:
17470 + # family > 6 || (family == 6 && model >= 0xd)
17471 + movl $0x1, %eax # check CPU family and model
17472 + cpuid
17473 + movl %eax, %ecx
17474 +
17475 + andl $0x0ff00f00, %eax # mask family and extended family
17476 + shrl $8, %eax
17477 + cmpl $6, %eax
17478 + ja verify_cpu_clear_xd # family > 6, ok
17479 + jb verify_cpu_check # family < 6, skip
17480 +
17481 + andl $0x000f00f0, %ecx # mask model and extended model
17482 + shrl $4, %ecx
17483 + cmpl $0xd, %ecx
17484 + jb verify_cpu_check # family == 6, model < 0xd, skip
17485 +
17486 +verify_cpu_clear_xd:
17487 + movl $MSR_IA32_MISC_ENABLE, %ecx
17488 + rdmsr
17489 + btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
17490 + jnc verify_cpu_check # only write MSR if bit was changed
17491 + wrmsr
17492 +
17493 +verify_cpu_check:
17494 + movl $0x1,%eax # Does the cpu have what it takes
17495 + cpuid
17496 + andl $REQUIRED_MASK0,%edx
17497 + xorl $REQUIRED_MASK0,%edx
17498 + jnz verify_cpu_no_longmode
17499 +
17500 + movl $0x80000000,%eax # See if extended cpuid is implemented
17501 + cpuid
17502 + cmpl $0x80000001,%eax
17503 + jb verify_cpu_no_longmode # no extended cpuid
17504 +
17505 + movl $0x80000001,%eax # Does the cpu have what it takes
17506 + cpuid
17507 + andl $REQUIRED_MASK1,%edx
17508 + xorl $REQUIRED_MASK1,%edx
17509 + jnz verify_cpu_no_longmode
17510 +
17511 +verify_cpu_sse_test:
17512 + movl $1,%eax
17513 + cpuid
17514 + andl $SSE_MASK,%edx
17515 + cmpl $SSE_MASK,%edx
17516 + je verify_cpu_sse_ok
17517 + test %di,%di
17518 + jz verify_cpu_no_longmode # only try to force SSE on AMD
17519 + movl $MSR_K7_HWCR,%ecx
17520 + rdmsr
17521 + btr $15,%eax # enable SSE
17522 + wrmsr
17523 + xor %di,%di # don't loop
17524 + jmp verify_cpu_sse_test # try again
17525 +
17526 +verify_cpu_no_longmode:
17527 + popfl # Restore caller passed flags
17528 + movl $1,%eax
17529 + ret
17530 +verify_cpu_sse_ok:
17531 + popfl # Restore caller passed flags
17532 + xorl %eax, %eax
17533 + ret
17534 diff -urNp linux-2.6.32.42/arch/x86/kernel/vm86_32.c linux-2.6.32.42/arch/x86/kernel/vm86_32.c
17535 --- linux-2.6.32.42/arch/x86/kernel/vm86_32.c 2011-03-27 14:31:47.000000000 -0400
17536 +++ linux-2.6.32.42/arch/x86/kernel/vm86_32.c 2011-04-17 15:56:46.000000000 -0400
17537 @@ -41,6 +41,7 @@
17538 #include <linux/ptrace.h>
17539 #include <linux/audit.h>
17540 #include <linux/stddef.h>
17541 +#include <linux/grsecurity.h>
17542
17543 #include <asm/uaccess.h>
17544 #include <asm/io.h>
17545 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
17546 do_exit(SIGSEGV);
17547 }
17548
17549 - tss = &per_cpu(init_tss, get_cpu());
17550 + tss = init_tss + get_cpu();
17551 current->thread.sp0 = current->thread.saved_sp0;
17552 current->thread.sysenter_cs = __KERNEL_CS;
17553 load_sp0(tss, &current->thread);
17554 @@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
17555 struct task_struct *tsk;
17556 int tmp, ret = -EPERM;
17557
17558 +#ifdef CONFIG_GRKERNSEC_VM86
17559 + if (!capable(CAP_SYS_RAWIO)) {
17560 + gr_handle_vm86();
17561 + goto out;
17562 + }
17563 +#endif
17564 +
17565 tsk = current;
17566 if (tsk->thread.saved_sp0)
17567 goto out;
17568 @@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
17569 int tmp, ret;
17570 struct vm86plus_struct __user *v86;
17571
17572 +#ifdef CONFIG_GRKERNSEC_VM86
17573 + if (!capable(CAP_SYS_RAWIO)) {
17574 + gr_handle_vm86();
17575 + ret = -EPERM;
17576 + goto out;
17577 + }
17578 +#endif
17579 +
17580 tsk = current;
17581 switch (regs->bx) {
17582 case VM86_REQUEST_IRQ:
17583 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
17584 tsk->thread.saved_fs = info->regs32->fs;
17585 tsk->thread.saved_gs = get_user_gs(info->regs32);
17586
17587 - tss = &per_cpu(init_tss, get_cpu());
17588 + tss = init_tss + get_cpu();
17589 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17590 if (cpu_has_sep)
17591 tsk->thread.sysenter_cs = 0;
17592 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
17593 goto cannot_handle;
17594 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17595 goto cannot_handle;
17596 - intr_ptr = (unsigned long __user *) (i << 2);
17597 + intr_ptr = (__force unsigned long __user *) (i << 2);
17598 if (get_user(segoffs, intr_ptr))
17599 goto cannot_handle;
17600 if ((segoffs >> 16) == BIOSSEG)
17601 diff -urNp linux-2.6.32.42/arch/x86/kernel/vmi_32.c linux-2.6.32.42/arch/x86/kernel/vmi_32.c
17602 --- linux-2.6.32.42/arch/x86/kernel/vmi_32.c 2011-03-27 14:31:47.000000000 -0400
17603 +++ linux-2.6.32.42/arch/x86/kernel/vmi_32.c 2011-04-17 15:56:46.000000000 -0400
17604 @@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1)))
17605 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
17606
17607 #define call_vrom_func(rom,func) \
17608 - (((VROMFUNC *)(rom->func))())
17609 + (((VROMFUNC *)(ktva_ktla(rom.func)))())
17610
17611 #define call_vrom_long_func(rom,func,arg) \
17612 - (((VROMLONGFUNC *)(rom->func)) (arg))
17613 +({\
17614 + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
17615 + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
17616 + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
17617 + __reloc;\
17618 +})
17619
17620 -static struct vrom_header *vmi_rom;
17621 +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
17622 static int disable_pge;
17623 static int disable_pse;
17624 static int disable_sep;
17625 @@ -76,10 +81,10 @@ static struct {
17626 void (*set_initial_ap_state)(int, int);
17627 void (*halt)(void);
17628 void (*set_lazy_mode)(int mode);
17629 -} vmi_ops;
17630 +} vmi_ops __read_only;
17631
17632 /* Cached VMI operations */
17633 -struct vmi_timer_ops vmi_timer_ops;
17634 +struct vmi_timer_ops vmi_timer_ops __read_only;
17635
17636 /*
17637 * VMI patching routines.
17638 @@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
17639 static inline void patch_offset(void *insnbuf,
17640 unsigned long ip, unsigned long dest)
17641 {
17642 - *(unsigned long *)(insnbuf+1) = dest-ip-5;
17643 + *(unsigned long *)(insnbuf+1) = dest-ip-5;
17644 }
17645
17646 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
17647 @@ -102,6 +107,7 @@ static unsigned patch_internal(int call,
17648 {
17649 u64 reloc;
17650 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
17651 +
17652 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
17653 switch(rel->type) {
17654 case VMI_RELOCATION_CALL_REL:
17655 @@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud
17656
17657 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
17658 {
17659 - const pte_t pte = { .pte = 0 };
17660 + const pte_t pte = __pte(0ULL);
17661 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
17662 }
17663
17664 static void vmi_pmd_clear(pmd_t *pmd)
17665 {
17666 - const pte_t pte = { .pte = 0 };
17667 + const pte_t pte = __pte(0ULL);
17668 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
17669 }
17670 #endif
17671 @@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, un
17672 ap.ss = __KERNEL_DS;
17673 ap.esp = (unsigned long) start_esp;
17674
17675 - ap.ds = __USER_DS;
17676 - ap.es = __USER_DS;
17677 + ap.ds = __KERNEL_DS;
17678 + ap.es = __KERNEL_DS;
17679 ap.fs = __KERNEL_PERCPU;
17680 - ap.gs = __KERNEL_STACK_CANARY;
17681 + savesegment(gs, ap.gs);
17682
17683 ap.eflags = 0;
17684
17685 @@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
17686 paravirt_leave_lazy_mmu();
17687 }
17688
17689 +#ifdef CONFIG_PAX_KERNEXEC
17690 +static unsigned long vmi_pax_open_kernel(void)
17691 +{
17692 + return 0;
17693 +}
17694 +
17695 +static unsigned long vmi_pax_close_kernel(void)
17696 +{
17697 + return 0;
17698 +}
17699 +#endif
17700 +
17701 static inline int __init check_vmi_rom(struct vrom_header *rom)
17702 {
17703 struct pci_header *pci;
17704 @@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(s
17705 return 0;
17706 if (rom->vrom_signature != VMI_SIGNATURE)
17707 return 0;
17708 + if (rom->rom_length * 512 > sizeof(*rom)) {
17709 + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
17710 + return 0;
17711 + }
17712 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
17713 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
17714 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
17715 @@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(v
17716 struct vrom_header *romstart;
17717 romstart = (struct vrom_header *)isa_bus_to_virt(base);
17718 if (check_vmi_rom(romstart)) {
17719 - vmi_rom = romstart;
17720 + vmi_rom = *romstart;
17721 return 1;
17722 }
17723 }
17724 @@ -836,6 +858,11 @@ static inline int __init activate_vmi(vo
17725
17726 para_fill(pv_irq_ops.safe_halt, Halt);
17727
17728 +#ifdef CONFIG_PAX_KERNEXEC
17729 + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
17730 + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
17731 +#endif
17732 +
17733 /*
17734 * Alternative instruction rewriting doesn't happen soon enough
17735 * to convert VMI_IRET to a call instead of a jump; so we have
17736 @@ -853,16 +880,16 @@ static inline int __init activate_vmi(vo
17737
17738 void __init vmi_init(void)
17739 {
17740 - if (!vmi_rom)
17741 + if (!vmi_rom.rom_signature)
17742 probe_vmi_rom();
17743 else
17744 - check_vmi_rom(vmi_rom);
17745 + check_vmi_rom(&vmi_rom);
17746
17747 /* In case probing for or validating the ROM failed, basil */
17748 - if (!vmi_rom)
17749 + if (!vmi_rom.rom_signature)
17750 return;
17751
17752 - reserve_top_address(-vmi_rom->virtual_top);
17753 + reserve_top_address(-vmi_rom.virtual_top);
17754
17755 #ifdef CONFIG_X86_IO_APIC
17756 /* This is virtual hardware; timer routing is wired correctly */
17757 @@ -874,7 +901,7 @@ void __init vmi_activate(void)
17758 {
17759 unsigned long flags;
17760
17761 - if (!vmi_rom)
17762 + if (!vmi_rom.rom_signature)
17763 return;
17764
17765 local_irq_save(flags);
17766 diff -urNp linux-2.6.32.42/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.42/arch/x86/kernel/vmlinux.lds.S
17767 --- linux-2.6.32.42/arch/x86/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
17768 +++ linux-2.6.32.42/arch/x86/kernel/vmlinux.lds.S 2011-04-17 15:56:46.000000000 -0400
17769 @@ -26,6 +26,13 @@
17770 #include <asm/page_types.h>
17771 #include <asm/cache.h>
17772 #include <asm/boot.h>
17773 +#include <asm/segment.h>
17774 +
17775 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17776 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
17777 +#else
17778 +#define __KERNEL_TEXT_OFFSET 0
17779 +#endif
17780
17781 #undef i386 /* in case the preprocessor is a 32bit one */
17782
17783 @@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
17784 #ifdef CONFIG_X86_32
17785 OUTPUT_ARCH(i386)
17786 ENTRY(phys_startup_32)
17787 -jiffies = jiffies_64;
17788 #else
17789 OUTPUT_ARCH(i386:x86-64)
17790 ENTRY(phys_startup_64)
17791 -jiffies_64 = jiffies;
17792 #endif
17793
17794 PHDRS {
17795 text PT_LOAD FLAGS(5); /* R_E */
17796 - data PT_LOAD FLAGS(7); /* RWE */
17797 +#ifdef CONFIG_X86_32
17798 + module PT_LOAD FLAGS(5); /* R_E */
17799 +#endif
17800 +#ifdef CONFIG_XEN
17801 + rodata PT_LOAD FLAGS(5); /* R_E */
17802 +#else
17803 + rodata PT_LOAD FLAGS(4); /* R__ */
17804 +#endif
17805 + data PT_LOAD FLAGS(6); /* RW_ */
17806 #ifdef CONFIG_X86_64
17807 user PT_LOAD FLAGS(5); /* R_E */
17808 +#endif
17809 + init.begin PT_LOAD FLAGS(6); /* RW_ */
17810 #ifdef CONFIG_SMP
17811 percpu PT_LOAD FLAGS(6); /* RW_ */
17812 #endif
17813 + text.init PT_LOAD FLAGS(5); /* R_E */
17814 + text.exit PT_LOAD FLAGS(5); /* R_E */
17815 init PT_LOAD FLAGS(7); /* RWE */
17816 -#endif
17817 note PT_NOTE FLAGS(0); /* ___ */
17818 }
17819
17820 SECTIONS
17821 {
17822 #ifdef CONFIG_X86_32
17823 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
17824 - phys_startup_32 = startup_32 - LOAD_OFFSET;
17825 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
17826 #else
17827 - . = __START_KERNEL;
17828 - phys_startup_64 = startup_64 - LOAD_OFFSET;
17829 + . = __START_KERNEL;
17830 #endif
17831
17832 /* Text and read-only data */
17833 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
17834 - _text = .;
17835 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17836 /* bootstrapping code */
17837 +#ifdef CONFIG_X86_32
17838 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17839 +#else
17840 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17841 +#endif
17842 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17843 + _text = .;
17844 HEAD_TEXT
17845 #ifdef CONFIG_X86_32
17846 . = ALIGN(PAGE_SIZE);
17847 @@ -82,28 +102,71 @@ SECTIONS
17848 IRQENTRY_TEXT
17849 *(.fixup)
17850 *(.gnu.warning)
17851 - /* End of text section */
17852 - _etext = .;
17853 } :text = 0x9090
17854
17855 - NOTES :text :note
17856 + . += __KERNEL_TEXT_OFFSET;
17857 +
17858 +#ifdef CONFIG_X86_32
17859 + . = ALIGN(PAGE_SIZE);
17860 + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
17861 + *(.vmi.rom)
17862 + } :module
17863 +
17864 + . = ALIGN(PAGE_SIZE);
17865 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
17866 +
17867 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
17868 + MODULES_EXEC_VADDR = .;
17869 + BYTE(0)
17870 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
17871 + . = ALIGN(HPAGE_SIZE);
17872 + MODULES_EXEC_END = . - 1;
17873 +#endif
17874 +
17875 + } :module
17876 +#endif
17877
17878 - EXCEPTION_TABLE(16) :text = 0x9090
17879 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
17880 + /* End of text section */
17881 + _etext = . - __KERNEL_TEXT_OFFSET;
17882 + }
17883 +
17884 +#ifdef CONFIG_X86_32
17885 + . = ALIGN(PAGE_SIZE);
17886 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
17887 + *(.idt)
17888 + . = ALIGN(PAGE_SIZE);
17889 + *(.empty_zero_page)
17890 + *(.swapper_pg_fixmap)
17891 + *(.swapper_pg_pmd)
17892 + *(.swapper_pg_dir)
17893 + *(.trampoline_pg_dir)
17894 + } :rodata
17895 +#endif
17896 +
17897 + . = ALIGN(PAGE_SIZE);
17898 + NOTES :rodata :note
17899 +
17900 + EXCEPTION_TABLE(16) :rodata
17901
17902 RO_DATA(PAGE_SIZE)
17903
17904 /* Data */
17905 .data : AT(ADDR(.data) - LOAD_OFFSET) {
17906 +
17907 +#ifdef CONFIG_PAX_KERNEXEC
17908 + . = ALIGN(HPAGE_SIZE);
17909 +#else
17910 + . = ALIGN(PAGE_SIZE);
17911 +#endif
17912 +
17913 /* Start of data section */
17914 _sdata = .;
17915
17916 /* init_task */
17917 INIT_TASK_DATA(THREAD_SIZE)
17918
17919 -#ifdef CONFIG_X86_32
17920 - /* 32 bit has nosave before _edata */
17921 NOSAVE_DATA
17922 -#endif
17923
17924 PAGE_ALIGNED_DATA(PAGE_SIZE)
17925
17926 @@ -112,6 +175,8 @@ SECTIONS
17927 DATA_DATA
17928 CONSTRUCTORS
17929
17930 + jiffies = jiffies_64;
17931 +
17932 /* rarely changed data like cpu maps */
17933 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
17934
17935 @@ -166,12 +231,6 @@ SECTIONS
17936 }
17937 vgetcpu_mode = VVIRT(.vgetcpu_mode);
17938
17939 - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
17940 - .jiffies : AT(VLOAD(.jiffies)) {
17941 - *(.jiffies)
17942 - }
17943 - jiffies = VVIRT(.jiffies);
17944 -
17945 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
17946 *(.vsyscall_3)
17947 }
17948 @@ -187,12 +246,19 @@ SECTIONS
17949 #endif /* CONFIG_X86_64 */
17950
17951 /* Init code and data - will be freed after init */
17952 - . = ALIGN(PAGE_SIZE);
17953 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
17954 + BYTE(0)
17955 +
17956 +#ifdef CONFIG_PAX_KERNEXEC
17957 + . = ALIGN(HPAGE_SIZE);
17958 +#else
17959 + . = ALIGN(PAGE_SIZE);
17960 +#endif
17961 +
17962 __init_begin = .; /* paired with __init_end */
17963 - }
17964 + } :init.begin
17965
17966 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
17967 +#ifdef CONFIG_SMP
17968 /*
17969 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
17970 * output PHDR, so the next output section - .init.text - should
17971 @@ -201,12 +267,27 @@ SECTIONS
17972 PERCPU_VADDR(0, :percpu)
17973 #endif
17974
17975 - INIT_TEXT_SECTION(PAGE_SIZE)
17976 -#ifdef CONFIG_X86_64
17977 - :init
17978 -#endif
17979 + . = ALIGN(PAGE_SIZE);
17980 + init_begin = .;
17981 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
17982 + VMLINUX_SYMBOL(_sinittext) = .;
17983 + INIT_TEXT
17984 + VMLINUX_SYMBOL(_einittext) = .;
17985 + . = ALIGN(PAGE_SIZE);
17986 + } :text.init
17987
17988 - INIT_DATA_SECTION(16)
17989 + /*
17990 + * .exit.text is discard at runtime, not link time, to deal with
17991 + * references from .altinstructions and .eh_frame
17992 + */
17993 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17994 + EXIT_TEXT
17995 + . = ALIGN(16);
17996 + } :text.exit
17997 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
17998 +
17999 + . = ALIGN(PAGE_SIZE);
18000 + INIT_DATA_SECTION(16) :init
18001
18002 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
18003 __x86_cpu_dev_start = .;
18004 @@ -232,19 +313,11 @@ SECTIONS
18005 *(.altinstr_replacement)
18006 }
18007
18008 - /*
18009 - * .exit.text is discard at runtime, not link time, to deal with
18010 - * references from .altinstructions and .eh_frame
18011 - */
18012 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18013 - EXIT_TEXT
18014 - }
18015 -
18016 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18017 EXIT_DATA
18018 }
18019
18020 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18021 +#ifndef CONFIG_SMP
18022 PERCPU(PAGE_SIZE)
18023 #endif
18024
18025 @@ -267,12 +340,6 @@ SECTIONS
18026 . = ALIGN(PAGE_SIZE);
18027 }
18028
18029 -#ifdef CONFIG_X86_64
18030 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18031 - NOSAVE_DATA
18032 - }
18033 -#endif
18034 -
18035 /* BSS */
18036 . = ALIGN(PAGE_SIZE);
18037 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18038 @@ -288,6 +355,7 @@ SECTIONS
18039 __brk_base = .;
18040 . += 64 * 1024; /* 64k alignment slop space */
18041 *(.brk_reservation) /* areas brk users have reserved */
18042 + . = ALIGN(HPAGE_SIZE);
18043 __brk_limit = .;
18044 }
18045
18046 @@ -316,13 +384,12 @@ SECTIONS
18047 * for the boot processor.
18048 */
18049 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
18050 -INIT_PER_CPU(gdt_page);
18051 INIT_PER_CPU(irq_stack_union);
18052
18053 /*
18054 * Build-time check on the image size:
18055 */
18056 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18057 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18058 "kernel image bigger than KERNEL_IMAGE_SIZE");
18059
18060 #ifdef CONFIG_SMP
18061 diff -urNp linux-2.6.32.42/arch/x86/kernel/vsyscall_64.c linux-2.6.32.42/arch/x86/kernel/vsyscall_64.c
18062 --- linux-2.6.32.42/arch/x86/kernel/vsyscall_64.c 2011-03-27 14:31:47.000000000 -0400
18063 +++ linux-2.6.32.42/arch/x86/kernel/vsyscall_64.c 2011-04-23 12:56:10.000000000 -0400
18064 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
18065
18066 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
18067 /* copy vsyscall data */
18068 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
18069 vsyscall_gtod_data.clock.vread = clock->vread;
18070 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
18071 vsyscall_gtod_data.clock.mask = clock->mask;
18072 @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
18073 We do this here because otherwise user space would do it on
18074 its own in a likely inferior way (no access to jiffies).
18075 If you don't like it pass NULL. */
18076 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
18077 + if (tcache && tcache->blob[0] == (j = jiffies)) {
18078 p = tcache->blob[1];
18079 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
18080 /* Load per CPU data from RDTSCP */
18081 diff -urNp linux-2.6.32.42/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.32.42/arch/x86/kernel/x8664_ksyms_64.c
18082 --- linux-2.6.32.42/arch/x86/kernel/x8664_ksyms_64.c 2011-03-27 14:31:47.000000000 -0400
18083 +++ linux-2.6.32.42/arch/x86/kernel/x8664_ksyms_64.c 2011-04-17 15:56:46.000000000 -0400
18084 @@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
18085
18086 EXPORT_SYMBOL(copy_user_generic);
18087 EXPORT_SYMBOL(__copy_user_nocache);
18088 -EXPORT_SYMBOL(copy_from_user);
18089 -EXPORT_SYMBOL(copy_to_user);
18090 EXPORT_SYMBOL(__copy_from_user_inatomic);
18091
18092 EXPORT_SYMBOL(copy_page);
18093 diff -urNp linux-2.6.32.42/arch/x86/kernel/xsave.c linux-2.6.32.42/arch/x86/kernel/xsave.c
18094 --- linux-2.6.32.42/arch/x86/kernel/xsave.c 2011-03-27 14:31:47.000000000 -0400
18095 +++ linux-2.6.32.42/arch/x86/kernel/xsave.c 2011-04-17 15:56:46.000000000 -0400
18096 @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
18097 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18098 return -1;
18099
18100 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18101 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18102 fx_sw_user->extended_size -
18103 FP_XSTATE_MAGIC2_SIZE));
18104 /*
18105 @@ -196,7 +196,7 @@ fx_only:
18106 * the other extended state.
18107 */
18108 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18109 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18110 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
18111 }
18112
18113 /*
18114 @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
18115 if (task_thread_info(tsk)->status & TS_XSAVE)
18116 err = restore_user_xstate(buf);
18117 else
18118 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
18119 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
18120 buf);
18121 if (unlikely(err)) {
18122 /*
18123 diff -urNp linux-2.6.32.42/arch/x86/kvm/emulate.c linux-2.6.32.42/arch/x86/kvm/emulate.c
18124 --- linux-2.6.32.42/arch/x86/kvm/emulate.c 2011-03-27 14:31:47.000000000 -0400
18125 +++ linux-2.6.32.42/arch/x86/kvm/emulate.c 2011-04-17 15:56:46.000000000 -0400
18126 @@ -81,8 +81,8 @@
18127 #define Src2CL (1<<29)
18128 #define Src2ImmByte (2<<29)
18129 #define Src2One (3<<29)
18130 -#define Src2Imm16 (4<<29)
18131 -#define Src2Mask (7<<29)
18132 +#define Src2Imm16 (4U<<29)
18133 +#define Src2Mask (7U<<29)
18134
18135 enum {
18136 Group1_80, Group1_81, Group1_82, Group1_83,
18137 @@ -411,6 +411,7 @@ static u32 group2_table[] = {
18138
18139 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
18140 do { \
18141 + unsigned long _tmp; \
18142 __asm__ __volatile__ ( \
18143 _PRE_EFLAGS("0", "4", "2") \
18144 _op _suffix " %"_x"3,%1; " \
18145 @@ -424,8 +425,6 @@ static u32 group2_table[] = {
18146 /* Raw emulation: instruction has two explicit operands. */
18147 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
18148 do { \
18149 - unsigned long _tmp; \
18150 - \
18151 switch ((_dst).bytes) { \
18152 case 2: \
18153 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
18154 @@ -441,7 +440,6 @@ static u32 group2_table[] = {
18155
18156 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18157 do { \
18158 - unsigned long _tmp; \
18159 switch ((_dst).bytes) { \
18160 case 1: \
18161 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
18162 diff -urNp linux-2.6.32.42/arch/x86/kvm/lapic.c linux-2.6.32.42/arch/x86/kvm/lapic.c
18163 --- linux-2.6.32.42/arch/x86/kvm/lapic.c 2011-03-27 14:31:47.000000000 -0400
18164 +++ linux-2.6.32.42/arch/x86/kvm/lapic.c 2011-04-17 15:56:46.000000000 -0400
18165 @@ -52,7 +52,7 @@
18166 #define APIC_BUS_CYCLE_NS 1
18167
18168 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18169 -#define apic_debug(fmt, arg...)
18170 +#define apic_debug(fmt, arg...) do {} while (0)
18171
18172 #define APIC_LVT_NUM 6
18173 /* 14 is the version for Xeon and Pentium 8.4.8*/
18174 diff -urNp linux-2.6.32.42/arch/x86/kvm/paging_tmpl.h linux-2.6.32.42/arch/x86/kvm/paging_tmpl.h
18175 --- linux-2.6.32.42/arch/x86/kvm/paging_tmpl.h 2011-03-27 14:31:47.000000000 -0400
18176 +++ linux-2.6.32.42/arch/x86/kvm/paging_tmpl.h 2011-05-16 21:46:57.000000000 -0400
18177 @@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_
18178 int level = PT_PAGE_TABLE_LEVEL;
18179 unsigned long mmu_seq;
18180
18181 + pax_track_stack();
18182 +
18183 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
18184 kvm_mmu_audit(vcpu, "pre page fault");
18185
18186 diff -urNp linux-2.6.32.42/arch/x86/kvm/svm.c linux-2.6.32.42/arch/x86/kvm/svm.c
18187 --- linux-2.6.32.42/arch/x86/kvm/svm.c 2011-03-27 14:31:47.000000000 -0400
18188 +++ linux-2.6.32.42/arch/x86/kvm/svm.c 2011-04-17 15:56:46.000000000 -0400
18189 @@ -2483,9 +2483,12 @@ static int handle_exit(struct kvm_run *k
18190 static void reload_tss(struct kvm_vcpu *vcpu)
18191 {
18192 int cpu = raw_smp_processor_id();
18193 -
18194 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
18195 +
18196 + pax_open_kernel();
18197 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
18198 + pax_close_kernel();
18199 +
18200 load_TR_desc();
18201 }
18202
18203 @@ -2946,7 +2949,7 @@ static bool svm_gb_page_enable(void)
18204 return true;
18205 }
18206
18207 -static struct kvm_x86_ops svm_x86_ops = {
18208 +static const struct kvm_x86_ops svm_x86_ops = {
18209 .cpu_has_kvm_support = has_svm,
18210 .disabled_by_bios = is_disabled,
18211 .hardware_setup = svm_hardware_setup,
18212 diff -urNp linux-2.6.32.42/arch/x86/kvm/vmx.c linux-2.6.32.42/arch/x86/kvm/vmx.c
18213 --- linux-2.6.32.42/arch/x86/kvm/vmx.c 2011-03-27 14:31:47.000000000 -0400
18214 +++ linux-2.6.32.42/arch/x86/kvm/vmx.c 2011-05-04 17:56:20.000000000 -0400
18215 @@ -570,7 +570,11 @@ static void reload_tss(void)
18216
18217 kvm_get_gdt(&gdt);
18218 descs = (void *)gdt.base;
18219 +
18220 + pax_open_kernel();
18221 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18222 + pax_close_kernel();
18223 +
18224 load_TR_desc();
18225 }
18226
18227 @@ -1409,8 +1413,11 @@ static __init int hardware_setup(void)
18228 if (!cpu_has_vmx_flexpriority())
18229 flexpriority_enabled = 0;
18230
18231 - if (!cpu_has_vmx_tpr_shadow())
18232 - kvm_x86_ops->update_cr8_intercept = NULL;
18233 + if (!cpu_has_vmx_tpr_shadow()) {
18234 + pax_open_kernel();
18235 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18236 + pax_close_kernel();
18237 + }
18238
18239 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18240 kvm_disable_largepages();
18241 @@ -2361,7 +2368,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
18242 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
18243
18244 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
18245 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
18246 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
18247 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
18248 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
18249 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
18250 @@ -3717,6 +3724,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
18251 "jmp .Lkvm_vmx_return \n\t"
18252 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18253 ".Lkvm_vmx_return: "
18254 +
18255 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18256 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18257 + ".Lkvm_vmx_return2: "
18258 +#endif
18259 +
18260 /* Save guest registers, load host registers, keep flags */
18261 "xchg %0, (%%"R"sp) \n\t"
18262 "mov %%"R"ax, %c[rax](%0) \n\t"
18263 @@ -3763,8 +3776,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
18264 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
18265 #endif
18266 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
18267 +
18268 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18269 + ,[cs]"i"(__KERNEL_CS)
18270 +#endif
18271 +
18272 : "cc", "memory"
18273 - , R"bx", R"di", R"si"
18274 + , R"ax", R"bx", R"di", R"si"
18275 #ifdef CONFIG_X86_64
18276 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
18277 #endif
18278 @@ -3781,7 +3799,16 @@ static void vmx_vcpu_run(struct kvm_vcpu
18279 if (vmx->rmode.irq.pending)
18280 fixup_rmode_irq(vmx);
18281
18282 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18283 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18284 +
18285 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18286 + loadsegment(fs, __KERNEL_PERCPU);
18287 +#endif
18288 +
18289 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18290 + __set_fs(current_thread_info()->addr_limit);
18291 +#endif
18292 +
18293 vmx->launched = 1;
18294
18295 vmx_complete_interrupts(vmx);
18296 @@ -3956,7 +3983,7 @@ static bool vmx_gb_page_enable(void)
18297 return false;
18298 }
18299
18300 -static struct kvm_x86_ops vmx_x86_ops = {
18301 +static const struct kvm_x86_ops vmx_x86_ops = {
18302 .cpu_has_kvm_support = cpu_has_kvm_support,
18303 .disabled_by_bios = vmx_disabled_by_bios,
18304 .hardware_setup = hardware_setup,
18305 diff -urNp linux-2.6.32.42/arch/x86/kvm/x86.c linux-2.6.32.42/arch/x86/kvm/x86.c
18306 --- linux-2.6.32.42/arch/x86/kvm/x86.c 2011-05-10 22:12:01.000000000 -0400
18307 +++ linux-2.6.32.42/arch/x86/kvm/x86.c 2011-05-10 22:12:26.000000000 -0400
18308 @@ -82,7 +82,7 @@ static void update_cr8_intercept(struct
18309 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
18310 struct kvm_cpuid_entry2 __user *entries);
18311
18312 -struct kvm_x86_ops *kvm_x86_ops;
18313 +const struct kvm_x86_ops *kvm_x86_ops;
18314 EXPORT_SYMBOL_GPL(kvm_x86_ops);
18315
18316 int ignore_msrs = 0;
18317 @@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
18318 struct kvm_cpuid2 *cpuid,
18319 struct kvm_cpuid_entry2 __user *entries)
18320 {
18321 - int r;
18322 + int r, i;
18323
18324 r = -E2BIG;
18325 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18326 goto out;
18327 r = -EFAULT;
18328 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18329 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18330 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18331 goto out;
18332 + for (i = 0; i < cpuid->nent; ++i) {
18333 + struct kvm_cpuid_entry2 cpuid_entry;
18334 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18335 + goto out;
18336 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
18337 + }
18338 vcpu->arch.cpuid_nent = cpuid->nent;
18339 kvm_apic_set_version(vcpu);
18340 return 0;
18341 @@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
18342 struct kvm_cpuid2 *cpuid,
18343 struct kvm_cpuid_entry2 __user *entries)
18344 {
18345 - int r;
18346 + int r, i;
18347
18348 vcpu_load(vcpu);
18349 r = -E2BIG;
18350 if (cpuid->nent < vcpu->arch.cpuid_nent)
18351 goto out;
18352 r = -EFAULT;
18353 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18354 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18355 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18356 goto out;
18357 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18358 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18359 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18360 + goto out;
18361 + }
18362 return 0;
18363
18364 out:
18365 @@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
18366 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18367 struct kvm_interrupt *irq)
18368 {
18369 - if (irq->irq < 0 || irq->irq >= 256)
18370 + if (irq->irq >= 256)
18371 return -EINVAL;
18372 if (irqchip_in_kernel(vcpu->kvm))
18373 return -ENXIO;
18374 @@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cp
18375 .notifier_call = kvmclock_cpufreq_notifier
18376 };
18377
18378 -int kvm_arch_init(void *opaque)
18379 +int kvm_arch_init(const void *opaque)
18380 {
18381 int r, cpu;
18382 - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18383 + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
18384
18385 if (kvm_x86_ops) {
18386 printk(KERN_ERR "kvm: already loaded the other module\n");
18387 diff -urNp linux-2.6.32.42/arch/x86/lib/atomic64_32.c linux-2.6.32.42/arch/x86/lib/atomic64_32.c
18388 --- linux-2.6.32.42/arch/x86/lib/atomic64_32.c 2011-03-27 14:31:47.000000000 -0400
18389 +++ linux-2.6.32.42/arch/x86/lib/atomic64_32.c 2011-05-04 17:56:28.000000000 -0400
18390 @@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u6
18391 }
18392 EXPORT_SYMBOL(atomic64_cmpxchg);
18393
18394 +u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
18395 +{
18396 + return cmpxchg8b(&ptr->counter, old_val, new_val);
18397 +}
18398 +EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
18399 +
18400 /**
18401 * atomic64_xchg - xchg atomic64 variable
18402 * @ptr: pointer to type atomic64_t
18403 @@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 n
18404 EXPORT_SYMBOL(atomic64_xchg);
18405
18406 /**
18407 + * atomic64_xchg_unchecked - xchg atomic64 variable
18408 + * @ptr: pointer to type atomic64_unchecked_t
18409 + * @new_val: value to assign
18410 + *
18411 + * Atomically xchgs the value of @ptr to @new_val and returns
18412 + * the old value.
18413 + */
18414 +u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18415 +{
18416 + /*
18417 + * Try first with a (possibly incorrect) assumption about
18418 + * what we have there. We'll do two loops most likely,
18419 + * but we'll get an ownership MESI transaction straight away
18420 + * instead of a read transaction followed by a
18421 + * flush-for-ownership transaction:
18422 + */
18423 + u64 old_val, real_val = 0;
18424 +
18425 + do {
18426 + old_val = real_val;
18427 +
18428 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18429 +
18430 + } while (real_val != old_val);
18431 +
18432 + return old_val;
18433 +}
18434 +EXPORT_SYMBOL(atomic64_xchg_unchecked);
18435 +
18436 +/**
18437 * atomic64_set - set atomic64 variable
18438 * @ptr: pointer to type atomic64_t
18439 * @new_val: value to assign
18440 @@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 n
18441 EXPORT_SYMBOL(atomic64_set);
18442
18443 /**
18444 -EXPORT_SYMBOL(atomic64_read);
18445 + * atomic64_unchecked_set - set atomic64 variable
18446 + * @ptr: pointer to type atomic64_unchecked_t
18447 + * @new_val: value to assign
18448 + *
18449 + * Atomically sets the value of @ptr to @new_val.
18450 + */
18451 +void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18452 +{
18453 + atomic64_xchg_unchecked(ptr, new_val);
18454 +}
18455 +EXPORT_SYMBOL(atomic64_set_unchecked);
18456 +
18457 +/**
18458 * atomic64_add_return - add and return
18459 * @delta: integer value to add
18460 * @ptr: pointer to type atomic64_t
18461 @@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 del
18462 }
18463 EXPORT_SYMBOL(atomic64_add_return);
18464
18465 +/**
18466 + * atomic64_add_return_unchecked - add and return
18467 + * @delta: integer value to add
18468 + * @ptr: pointer to type atomic64_unchecked_t
18469 + *
18470 + * Atomically adds @delta to @ptr and returns @delta + *@ptr
18471 + */
18472 +noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18473 +{
18474 + /*
18475 + * Try first with a (possibly incorrect) assumption about
18476 + * what we have there. We'll do two loops most likely,
18477 + * but we'll get an ownership MESI transaction straight away
18478 + * instead of a read transaction followed by a
18479 + * flush-for-ownership transaction:
18480 + */
18481 + u64 old_val, new_val, real_val = 0;
18482 +
18483 + do {
18484 + old_val = real_val;
18485 + new_val = old_val + delta;
18486 +
18487 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18488 +
18489 + } while (real_val != old_val);
18490 +
18491 + return new_val;
18492 +}
18493 +EXPORT_SYMBOL(atomic64_add_return_unchecked);
18494 +
18495 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
18496 {
18497 return atomic64_add_return(-delta, ptr);
18498 }
18499 EXPORT_SYMBOL(atomic64_sub_return);
18500
18501 +u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18502 +{
18503 + return atomic64_add_return_unchecked(-delta, ptr);
18504 +}
18505 +EXPORT_SYMBOL(atomic64_sub_return_unchecked);
18506 +
18507 u64 atomic64_inc_return(atomic64_t *ptr)
18508 {
18509 return atomic64_add_return(1, ptr);
18510 }
18511 EXPORT_SYMBOL(atomic64_inc_return);
18512
18513 +u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
18514 +{
18515 + return atomic64_add_return_unchecked(1, ptr);
18516 +}
18517 +EXPORT_SYMBOL(atomic64_inc_return_unchecked);
18518 +
18519 u64 atomic64_dec_return(atomic64_t *ptr)
18520 {
18521 return atomic64_sub_return(1, ptr);
18522 }
18523 EXPORT_SYMBOL(atomic64_dec_return);
18524
18525 +u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
18526 +{
18527 + return atomic64_sub_return_unchecked(1, ptr);
18528 +}
18529 +EXPORT_SYMBOL(atomic64_dec_return_unchecked);
18530 +
18531 /**
18532 * atomic64_add - add integer to atomic64 variable
18533 * @delta: integer value to add
18534 @@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t
18535 EXPORT_SYMBOL(atomic64_add);
18536
18537 /**
18538 + * atomic64_add_unchecked - add integer to atomic64 variable
18539 + * @delta: integer value to add
18540 + * @ptr: pointer to type atomic64_unchecked_t
18541 + *
18542 + * Atomically adds @delta to @ptr.
18543 + */
18544 +void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18545 +{
18546 + atomic64_add_return_unchecked(delta, ptr);
18547 +}
18548 +EXPORT_SYMBOL(atomic64_add_unchecked);
18549 +
18550 +/**
18551 * atomic64_sub - subtract the atomic64 variable
18552 * @delta: integer value to subtract
18553 * @ptr: pointer to type atomic64_t
18554 @@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t
18555 EXPORT_SYMBOL(atomic64_sub);
18556
18557 /**
18558 + * atomic64_sub_unchecked - subtract the atomic64 variable
18559 + * @delta: integer value to subtract
18560 + * @ptr: pointer to type atomic64_unchecked_t
18561 + *
18562 + * Atomically subtracts @delta from @ptr.
18563 + */
18564 +void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18565 +{
18566 + atomic64_add_unchecked(-delta, ptr);
18567 +}
18568 +EXPORT_SYMBOL(atomic64_sub_unchecked);
18569 +
18570 +/**
18571 * atomic64_sub_and_test - subtract value from variable and test result
18572 * @delta: integer value to subtract
18573 * @ptr: pointer to type atomic64_t
18574 @@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
18575 EXPORT_SYMBOL(atomic64_inc);
18576
18577 /**
18578 + * atomic64_inc_unchecked - increment atomic64 variable
18579 + * @ptr: pointer to type atomic64_unchecked_t
18580 + *
18581 + * Atomically increments @ptr by 1.
18582 + */
18583 +void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
18584 +{
18585 + atomic64_add_unchecked(1, ptr);
18586 +}
18587 +EXPORT_SYMBOL(atomic64_inc_unchecked);
18588 +
18589 +/**
18590 * atomic64_dec - decrement atomic64 variable
18591 * @ptr: pointer to type atomic64_t
18592 *
18593 @@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
18594 EXPORT_SYMBOL(atomic64_dec);
18595
18596 /**
18597 + * atomic64_dec_unchecked - decrement atomic64 variable
18598 + * @ptr: pointer to type atomic64_unchecked_t
18599 + *
18600 + * Atomically decrements @ptr by 1.
18601 + */
18602 +void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
18603 +{
18604 + atomic64_sub_unchecked(1, ptr);
18605 +}
18606 +EXPORT_SYMBOL(atomic64_dec_unchecked);
18607 +
18608 +/**
18609 * atomic64_dec_and_test - decrement and test
18610 * @ptr: pointer to type atomic64_t
18611 *
18612 diff -urNp linux-2.6.32.42/arch/x86/lib/checksum_32.S linux-2.6.32.42/arch/x86/lib/checksum_32.S
18613 --- linux-2.6.32.42/arch/x86/lib/checksum_32.S 2011-03-27 14:31:47.000000000 -0400
18614 +++ linux-2.6.32.42/arch/x86/lib/checksum_32.S 2011-04-17 15:56:46.000000000 -0400
18615 @@ -28,7 +28,8 @@
18616 #include <linux/linkage.h>
18617 #include <asm/dwarf2.h>
18618 #include <asm/errno.h>
18619 -
18620 +#include <asm/segment.h>
18621 +
18622 /*
18623 * computes a partial checksum, e.g. for TCP/UDP fragments
18624 */
18625 @@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (
18626
18627 #define ARGBASE 16
18628 #define FP 12
18629 -
18630 -ENTRY(csum_partial_copy_generic)
18631 +
18632 +ENTRY(csum_partial_copy_generic_to_user)
18633 CFI_STARTPROC
18634 +
18635 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18636 + pushl %gs
18637 + CFI_ADJUST_CFA_OFFSET 4
18638 + popl %es
18639 + CFI_ADJUST_CFA_OFFSET -4
18640 + jmp csum_partial_copy_generic
18641 +#endif
18642 +
18643 +ENTRY(csum_partial_copy_generic_from_user)
18644 +
18645 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18646 + pushl %gs
18647 + CFI_ADJUST_CFA_OFFSET 4
18648 + popl %ds
18649 + CFI_ADJUST_CFA_OFFSET -4
18650 +#endif
18651 +
18652 +ENTRY(csum_partial_copy_generic)
18653 subl $4,%esp
18654 CFI_ADJUST_CFA_OFFSET 4
18655 pushl %edi
18656 @@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
18657 jmp 4f
18658 SRC(1: movw (%esi), %bx )
18659 addl $2, %esi
18660 -DST( movw %bx, (%edi) )
18661 +DST( movw %bx, %es:(%edi) )
18662 addl $2, %edi
18663 addw %bx, %ax
18664 adcl $0, %eax
18665 @@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
18666 SRC(1: movl (%esi), %ebx )
18667 SRC( movl 4(%esi), %edx )
18668 adcl %ebx, %eax
18669 -DST( movl %ebx, (%edi) )
18670 +DST( movl %ebx, %es:(%edi) )
18671 adcl %edx, %eax
18672 -DST( movl %edx, 4(%edi) )
18673 +DST( movl %edx, %es:4(%edi) )
18674
18675 SRC( movl 8(%esi), %ebx )
18676 SRC( movl 12(%esi), %edx )
18677 adcl %ebx, %eax
18678 -DST( movl %ebx, 8(%edi) )
18679 +DST( movl %ebx, %es:8(%edi) )
18680 adcl %edx, %eax
18681 -DST( movl %edx, 12(%edi) )
18682 +DST( movl %edx, %es:12(%edi) )
18683
18684 SRC( movl 16(%esi), %ebx )
18685 SRC( movl 20(%esi), %edx )
18686 adcl %ebx, %eax
18687 -DST( movl %ebx, 16(%edi) )
18688 +DST( movl %ebx, %es:16(%edi) )
18689 adcl %edx, %eax
18690 -DST( movl %edx, 20(%edi) )
18691 +DST( movl %edx, %es:20(%edi) )
18692
18693 SRC( movl 24(%esi), %ebx )
18694 SRC( movl 28(%esi), %edx )
18695 adcl %ebx, %eax
18696 -DST( movl %ebx, 24(%edi) )
18697 +DST( movl %ebx, %es:24(%edi) )
18698 adcl %edx, %eax
18699 -DST( movl %edx, 28(%edi) )
18700 +DST( movl %edx, %es:28(%edi) )
18701
18702 lea 32(%esi), %esi
18703 lea 32(%edi), %edi
18704 @@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
18705 shrl $2, %edx # This clears CF
18706 SRC(3: movl (%esi), %ebx )
18707 adcl %ebx, %eax
18708 -DST( movl %ebx, (%edi) )
18709 +DST( movl %ebx, %es:(%edi) )
18710 lea 4(%esi), %esi
18711 lea 4(%edi), %edi
18712 dec %edx
18713 @@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
18714 jb 5f
18715 SRC( movw (%esi), %cx )
18716 leal 2(%esi), %esi
18717 -DST( movw %cx, (%edi) )
18718 +DST( movw %cx, %es:(%edi) )
18719 leal 2(%edi), %edi
18720 je 6f
18721 shll $16,%ecx
18722 SRC(5: movb (%esi), %cl )
18723 -DST( movb %cl, (%edi) )
18724 +DST( movb %cl, %es:(%edi) )
18725 6: addl %ecx, %eax
18726 adcl $0, %eax
18727 7:
18728 @@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
18729
18730 6001:
18731 movl ARGBASE+20(%esp), %ebx # src_err_ptr
18732 - movl $-EFAULT, (%ebx)
18733 + movl $-EFAULT, %ss:(%ebx)
18734
18735 # zero the complete destination - computing the rest
18736 # is too much work
18737 @@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
18738
18739 6002:
18740 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18741 - movl $-EFAULT,(%ebx)
18742 + movl $-EFAULT,%ss:(%ebx)
18743 jmp 5000b
18744
18745 .previous
18746
18747 + pushl %ss
18748 + CFI_ADJUST_CFA_OFFSET 4
18749 + popl %ds
18750 + CFI_ADJUST_CFA_OFFSET -4
18751 + pushl %ss
18752 + CFI_ADJUST_CFA_OFFSET 4
18753 + popl %es
18754 + CFI_ADJUST_CFA_OFFSET -4
18755 popl %ebx
18756 CFI_ADJUST_CFA_OFFSET -4
18757 CFI_RESTORE ebx
18758 @@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
18759 CFI_ADJUST_CFA_OFFSET -4
18760 ret
18761 CFI_ENDPROC
18762 -ENDPROC(csum_partial_copy_generic)
18763 +ENDPROC(csum_partial_copy_generic_to_user)
18764
18765 #else
18766
18767 /* Version for PentiumII/PPro */
18768
18769 #define ROUND1(x) \
18770 + nop; nop; nop; \
18771 SRC(movl x(%esi), %ebx ) ; \
18772 addl %ebx, %eax ; \
18773 - DST(movl %ebx, x(%edi) ) ;
18774 + DST(movl %ebx, %es:x(%edi)) ;
18775
18776 #define ROUND(x) \
18777 + nop; nop; nop; \
18778 SRC(movl x(%esi), %ebx ) ; \
18779 adcl %ebx, %eax ; \
18780 - DST(movl %ebx, x(%edi) ) ;
18781 + DST(movl %ebx, %es:x(%edi)) ;
18782
18783 #define ARGBASE 12
18784 -
18785 -ENTRY(csum_partial_copy_generic)
18786 +
18787 +ENTRY(csum_partial_copy_generic_to_user)
18788 CFI_STARTPROC
18789 +
18790 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18791 + pushl %gs
18792 + CFI_ADJUST_CFA_OFFSET 4
18793 + popl %es
18794 + CFI_ADJUST_CFA_OFFSET -4
18795 + jmp csum_partial_copy_generic
18796 +#endif
18797 +
18798 +ENTRY(csum_partial_copy_generic_from_user)
18799 +
18800 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18801 + pushl %gs
18802 + CFI_ADJUST_CFA_OFFSET 4
18803 + popl %ds
18804 + CFI_ADJUST_CFA_OFFSET -4
18805 +#endif
18806 +
18807 +ENTRY(csum_partial_copy_generic)
18808 pushl %ebx
18809 CFI_ADJUST_CFA_OFFSET 4
18810 CFI_REL_OFFSET ebx, 0
18811 @@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
18812 subl %ebx, %edi
18813 lea -1(%esi),%edx
18814 andl $-32,%edx
18815 - lea 3f(%ebx,%ebx), %ebx
18816 + lea 3f(%ebx,%ebx,2), %ebx
18817 testl %esi, %esi
18818 jmp *%ebx
18819 1: addl $64,%esi
18820 @@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
18821 jb 5f
18822 SRC( movw (%esi), %dx )
18823 leal 2(%esi), %esi
18824 -DST( movw %dx, (%edi) )
18825 +DST( movw %dx, %es:(%edi) )
18826 leal 2(%edi), %edi
18827 je 6f
18828 shll $16,%edx
18829 5:
18830 SRC( movb (%esi), %dl )
18831 -DST( movb %dl, (%edi) )
18832 +DST( movb %dl, %es:(%edi) )
18833 6: addl %edx, %eax
18834 adcl $0, %eax
18835 7:
18836 .section .fixup, "ax"
18837 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
18838 - movl $-EFAULT, (%ebx)
18839 + movl $-EFAULT, %ss:(%ebx)
18840 # zero the complete destination (computing the rest is too much work)
18841 movl ARGBASE+8(%esp),%edi # dst
18842 movl ARGBASE+12(%esp),%ecx # len
18843 @@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
18844 rep; stosb
18845 jmp 7b
18846 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18847 - movl $-EFAULT, (%ebx)
18848 + movl $-EFAULT, %ss:(%ebx)
18849 jmp 7b
18850 .previous
18851
18852 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18853 + pushl %ss
18854 + CFI_ADJUST_CFA_OFFSET 4
18855 + popl %ds
18856 + CFI_ADJUST_CFA_OFFSET -4
18857 + pushl %ss
18858 + CFI_ADJUST_CFA_OFFSET 4
18859 + popl %es
18860 + CFI_ADJUST_CFA_OFFSET -4
18861 +#endif
18862 +
18863 popl %esi
18864 CFI_ADJUST_CFA_OFFSET -4
18865 CFI_RESTORE esi
18866 @@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
18867 CFI_RESTORE ebx
18868 ret
18869 CFI_ENDPROC
18870 -ENDPROC(csum_partial_copy_generic)
18871 +ENDPROC(csum_partial_copy_generic_to_user)
18872
18873 #undef ROUND
18874 #undef ROUND1
18875 diff -urNp linux-2.6.32.42/arch/x86/lib/clear_page_64.S linux-2.6.32.42/arch/x86/lib/clear_page_64.S
18876 --- linux-2.6.32.42/arch/x86/lib/clear_page_64.S 2011-03-27 14:31:47.000000000 -0400
18877 +++ linux-2.6.32.42/arch/x86/lib/clear_page_64.S 2011-04-17 15:56:46.000000000 -0400
18878 @@ -43,7 +43,7 @@ ENDPROC(clear_page)
18879
18880 #include <asm/cpufeature.h>
18881
18882 - .section .altinstr_replacement,"ax"
18883 + .section .altinstr_replacement,"a"
18884 1: .byte 0xeb /* jmp <disp8> */
18885 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
18886 2:
18887 diff -urNp linux-2.6.32.42/arch/x86/lib/copy_page_64.S linux-2.6.32.42/arch/x86/lib/copy_page_64.S
18888 --- linux-2.6.32.42/arch/x86/lib/copy_page_64.S 2011-03-27 14:31:47.000000000 -0400
18889 +++ linux-2.6.32.42/arch/x86/lib/copy_page_64.S 2011-04-17 15:56:46.000000000 -0400
18890 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
18891
18892 #include <asm/cpufeature.h>
18893
18894 - .section .altinstr_replacement,"ax"
18895 + .section .altinstr_replacement,"a"
18896 1: .byte 0xeb /* jmp <disp8> */
18897 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
18898 2:
18899 diff -urNp linux-2.6.32.42/arch/x86/lib/copy_user_64.S linux-2.6.32.42/arch/x86/lib/copy_user_64.S
18900 --- linux-2.6.32.42/arch/x86/lib/copy_user_64.S 2011-06-25 12:55:34.000000000 -0400
18901 +++ linux-2.6.32.42/arch/x86/lib/copy_user_64.S 2011-06-25 12:56:37.000000000 -0400
18902 @@ -15,13 +15,14 @@
18903 #include <asm/asm-offsets.h>
18904 #include <asm/thread_info.h>
18905 #include <asm/cpufeature.h>
18906 +#include <asm/pgtable.h>
18907
18908 .macro ALTERNATIVE_JUMP feature,orig,alt
18909 0:
18910 .byte 0xe9 /* 32bit jump */
18911 .long \orig-1f /* by default jump to orig */
18912 1:
18913 - .section .altinstr_replacement,"ax"
18914 + .section .altinstr_replacement,"a"
18915 2: .byte 0xe9 /* near jump with 32bit immediate */
18916 .long \alt-1b /* offset */ /* or alternatively to alt */
18917 .previous
18918 @@ -64,49 +65,19 @@
18919 #endif
18920 .endm
18921
18922 -/* Standard copy_to_user with segment limit checking */
18923 -ENTRY(copy_to_user)
18924 - CFI_STARTPROC
18925 - GET_THREAD_INFO(%rax)
18926 - movq %rdi,%rcx
18927 - addq %rdx,%rcx
18928 - jc bad_to_user
18929 - cmpq TI_addr_limit(%rax),%rcx
18930 - ja bad_to_user
18931 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18932 - CFI_ENDPROC
18933 -ENDPROC(copy_to_user)
18934 -
18935 -/* Standard copy_from_user with segment limit checking */
18936 -ENTRY(copy_from_user)
18937 - CFI_STARTPROC
18938 - GET_THREAD_INFO(%rax)
18939 - movq %rsi,%rcx
18940 - addq %rdx,%rcx
18941 - jc bad_from_user
18942 - cmpq TI_addr_limit(%rax),%rcx
18943 - ja bad_from_user
18944 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18945 - CFI_ENDPROC
18946 -ENDPROC(copy_from_user)
18947 -
18948 ENTRY(copy_user_generic)
18949 CFI_STARTPROC
18950 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18951 CFI_ENDPROC
18952 ENDPROC(copy_user_generic)
18953
18954 -ENTRY(__copy_from_user_inatomic)
18955 - CFI_STARTPROC
18956 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18957 - CFI_ENDPROC
18958 -ENDPROC(__copy_from_user_inatomic)
18959 -
18960 .section .fixup,"ax"
18961 /* must zero dest */
18962 ENTRY(bad_from_user)
18963 bad_from_user:
18964 CFI_STARTPROC
18965 + testl %edx,%edx
18966 + js bad_to_user
18967 movl %edx,%ecx
18968 xorl %eax,%eax
18969 rep
18970 diff -urNp linux-2.6.32.42/arch/x86/lib/copy_user_nocache_64.S linux-2.6.32.42/arch/x86/lib/copy_user_nocache_64.S
18971 --- linux-2.6.32.42/arch/x86/lib/copy_user_nocache_64.S 2011-03-27 14:31:47.000000000 -0400
18972 +++ linux-2.6.32.42/arch/x86/lib/copy_user_nocache_64.S 2011-04-17 15:56:46.000000000 -0400
18973 @@ -14,6 +14,7 @@
18974 #include <asm/current.h>
18975 #include <asm/asm-offsets.h>
18976 #include <asm/thread_info.h>
18977 +#include <asm/pgtable.h>
18978
18979 .macro ALIGN_DESTINATION
18980 #ifdef FIX_ALIGNMENT
18981 @@ -50,6 +51,15 @@
18982 */
18983 ENTRY(__copy_user_nocache)
18984 CFI_STARTPROC
18985 +
18986 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18987 + mov $PAX_USER_SHADOW_BASE,%rcx
18988 + cmp %rcx,%rsi
18989 + jae 1f
18990 + add %rcx,%rsi
18991 +1:
18992 +#endif
18993 +
18994 cmpl $8,%edx
18995 jb 20f /* less then 8 bytes, go to byte copy loop */
18996 ALIGN_DESTINATION
18997 diff -urNp linux-2.6.32.42/arch/x86/lib/csum-wrappers_64.c linux-2.6.32.42/arch/x86/lib/csum-wrappers_64.c
18998 --- linux-2.6.32.42/arch/x86/lib/csum-wrappers_64.c 2011-03-27 14:31:47.000000000 -0400
18999 +++ linux-2.6.32.42/arch/x86/lib/csum-wrappers_64.c 2011-05-04 17:56:20.000000000 -0400
19000 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
19001 len -= 2;
19002 }
19003 }
19004 +
19005 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19006 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19007 + src += PAX_USER_SHADOW_BASE;
19008 +#endif
19009 +
19010 isum = csum_partial_copy_generic((__force const void *)src,
19011 dst, len, isum, errp, NULL);
19012 if (unlikely(*errp))
19013 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
19014 }
19015
19016 *errp = 0;
19017 +
19018 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19019 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19020 + dst += PAX_USER_SHADOW_BASE;
19021 +#endif
19022 +
19023 return csum_partial_copy_generic(src, (void __force *)dst,
19024 len, isum, NULL, errp);
19025 }
19026 diff -urNp linux-2.6.32.42/arch/x86/lib/getuser.S linux-2.6.32.42/arch/x86/lib/getuser.S
19027 --- linux-2.6.32.42/arch/x86/lib/getuser.S 2011-03-27 14:31:47.000000000 -0400
19028 +++ linux-2.6.32.42/arch/x86/lib/getuser.S 2011-04-17 15:56:46.000000000 -0400
19029 @@ -33,14 +33,35 @@
19030 #include <asm/asm-offsets.h>
19031 #include <asm/thread_info.h>
19032 #include <asm/asm.h>
19033 +#include <asm/segment.h>
19034 +#include <asm/pgtable.h>
19035 +
19036 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19037 +#define __copyuser_seg gs;
19038 +#else
19039 +#define __copyuser_seg
19040 +#endif
19041
19042 .text
19043 ENTRY(__get_user_1)
19044 CFI_STARTPROC
19045 +
19046 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19047 GET_THREAD_INFO(%_ASM_DX)
19048 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19049 jae bad_get_user
19050 -1: movzb (%_ASM_AX),%edx
19051 +
19052 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19053 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19054 + cmp %_ASM_DX,%_ASM_AX
19055 + jae 1234f
19056 + add %_ASM_DX,%_ASM_AX
19057 +1234:
19058 +#endif
19059 +
19060 +#endif
19061 +
19062 +1: __copyuser_seg movzb (%_ASM_AX),%edx
19063 xor %eax,%eax
19064 ret
19065 CFI_ENDPROC
19066 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
19067 ENTRY(__get_user_2)
19068 CFI_STARTPROC
19069 add $1,%_ASM_AX
19070 +
19071 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19072 jc bad_get_user
19073 GET_THREAD_INFO(%_ASM_DX)
19074 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19075 jae bad_get_user
19076 -2: movzwl -1(%_ASM_AX),%edx
19077 +
19078 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19079 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19080 + cmp %_ASM_DX,%_ASM_AX
19081 + jae 1234f
19082 + add %_ASM_DX,%_ASM_AX
19083 +1234:
19084 +#endif
19085 +
19086 +#endif
19087 +
19088 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19089 xor %eax,%eax
19090 ret
19091 CFI_ENDPROC
19092 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
19093 ENTRY(__get_user_4)
19094 CFI_STARTPROC
19095 add $3,%_ASM_AX
19096 +
19097 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19098 jc bad_get_user
19099 GET_THREAD_INFO(%_ASM_DX)
19100 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19101 jae bad_get_user
19102 -3: mov -3(%_ASM_AX),%edx
19103 +
19104 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19105 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19106 + cmp %_ASM_DX,%_ASM_AX
19107 + jae 1234f
19108 + add %_ASM_DX,%_ASM_AX
19109 +1234:
19110 +#endif
19111 +
19112 +#endif
19113 +
19114 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
19115 xor %eax,%eax
19116 ret
19117 CFI_ENDPROC
19118 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
19119 GET_THREAD_INFO(%_ASM_DX)
19120 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19121 jae bad_get_user
19122 +
19123 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19124 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19125 + cmp %_ASM_DX,%_ASM_AX
19126 + jae 1234f
19127 + add %_ASM_DX,%_ASM_AX
19128 +1234:
19129 +#endif
19130 +
19131 4: movq -7(%_ASM_AX),%_ASM_DX
19132 xor %eax,%eax
19133 ret
19134 diff -urNp linux-2.6.32.42/arch/x86/lib/memcpy_64.S linux-2.6.32.42/arch/x86/lib/memcpy_64.S
19135 --- linux-2.6.32.42/arch/x86/lib/memcpy_64.S 2011-03-27 14:31:47.000000000 -0400
19136 +++ linux-2.6.32.42/arch/x86/lib/memcpy_64.S 2011-04-17 15:56:46.000000000 -0400
19137 @@ -128,7 +128,7 @@ ENDPROC(__memcpy)
19138 * It is also a lot simpler. Use this when possible:
19139 */
19140
19141 - .section .altinstr_replacement, "ax"
19142 + .section .altinstr_replacement, "a"
19143 1: .byte 0xeb /* jmp <disp8> */
19144 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
19145 2:
19146 diff -urNp linux-2.6.32.42/arch/x86/lib/memset_64.S linux-2.6.32.42/arch/x86/lib/memset_64.S
19147 --- linux-2.6.32.42/arch/x86/lib/memset_64.S 2011-03-27 14:31:47.000000000 -0400
19148 +++ linux-2.6.32.42/arch/x86/lib/memset_64.S 2011-04-17 15:56:46.000000000 -0400
19149 @@ -118,7 +118,7 @@ ENDPROC(__memset)
19150
19151 #include <asm/cpufeature.h>
19152
19153 - .section .altinstr_replacement,"ax"
19154 + .section .altinstr_replacement,"a"
19155 1: .byte 0xeb /* jmp <disp8> */
19156 .byte (memset_c - memset) - (2f - 1b) /* offset */
19157 2:
19158 diff -urNp linux-2.6.32.42/arch/x86/lib/mmx_32.c linux-2.6.32.42/arch/x86/lib/mmx_32.c
19159 --- linux-2.6.32.42/arch/x86/lib/mmx_32.c 2011-03-27 14:31:47.000000000 -0400
19160 +++ linux-2.6.32.42/arch/x86/lib/mmx_32.c 2011-04-17 15:56:46.000000000 -0400
19161 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
19162 {
19163 void *p;
19164 int i;
19165 + unsigned long cr0;
19166
19167 if (unlikely(in_interrupt()))
19168 return __memcpy(to, from, len);
19169 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
19170 kernel_fpu_begin();
19171
19172 __asm__ __volatile__ (
19173 - "1: prefetch (%0)\n" /* This set is 28 bytes */
19174 - " prefetch 64(%0)\n"
19175 - " prefetch 128(%0)\n"
19176 - " prefetch 192(%0)\n"
19177 - " prefetch 256(%0)\n"
19178 + "1: prefetch (%1)\n" /* This set is 28 bytes */
19179 + " prefetch 64(%1)\n"
19180 + " prefetch 128(%1)\n"
19181 + " prefetch 192(%1)\n"
19182 + " prefetch 256(%1)\n"
19183 "2: \n"
19184 ".section .fixup, \"ax\"\n"
19185 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19186 + "3: \n"
19187 +
19188 +#ifdef CONFIG_PAX_KERNEXEC
19189 + " movl %%cr0, %0\n"
19190 + " movl %0, %%eax\n"
19191 + " andl $0xFFFEFFFF, %%eax\n"
19192 + " movl %%eax, %%cr0\n"
19193 +#endif
19194 +
19195 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19196 +
19197 +#ifdef CONFIG_PAX_KERNEXEC
19198 + " movl %0, %%cr0\n"
19199 +#endif
19200 +
19201 " jmp 2b\n"
19202 ".previous\n"
19203 _ASM_EXTABLE(1b, 3b)
19204 - : : "r" (from));
19205 + : "=&r" (cr0) : "r" (from) : "ax");
19206
19207 for ( ; i > 5; i--) {
19208 __asm__ __volatile__ (
19209 - "1: prefetch 320(%0)\n"
19210 - "2: movq (%0), %%mm0\n"
19211 - " movq 8(%0), %%mm1\n"
19212 - " movq 16(%0), %%mm2\n"
19213 - " movq 24(%0), %%mm3\n"
19214 - " movq %%mm0, (%1)\n"
19215 - " movq %%mm1, 8(%1)\n"
19216 - " movq %%mm2, 16(%1)\n"
19217 - " movq %%mm3, 24(%1)\n"
19218 - " movq 32(%0), %%mm0\n"
19219 - " movq 40(%0), %%mm1\n"
19220 - " movq 48(%0), %%mm2\n"
19221 - " movq 56(%0), %%mm3\n"
19222 - " movq %%mm0, 32(%1)\n"
19223 - " movq %%mm1, 40(%1)\n"
19224 - " movq %%mm2, 48(%1)\n"
19225 - " movq %%mm3, 56(%1)\n"
19226 + "1: prefetch 320(%1)\n"
19227 + "2: movq (%1), %%mm0\n"
19228 + " movq 8(%1), %%mm1\n"
19229 + " movq 16(%1), %%mm2\n"
19230 + " movq 24(%1), %%mm3\n"
19231 + " movq %%mm0, (%2)\n"
19232 + " movq %%mm1, 8(%2)\n"
19233 + " movq %%mm2, 16(%2)\n"
19234 + " movq %%mm3, 24(%2)\n"
19235 + " movq 32(%1), %%mm0\n"
19236 + " movq 40(%1), %%mm1\n"
19237 + " movq 48(%1), %%mm2\n"
19238 + " movq 56(%1), %%mm3\n"
19239 + " movq %%mm0, 32(%2)\n"
19240 + " movq %%mm1, 40(%2)\n"
19241 + " movq %%mm2, 48(%2)\n"
19242 + " movq %%mm3, 56(%2)\n"
19243 ".section .fixup, \"ax\"\n"
19244 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19245 + "3:\n"
19246 +
19247 +#ifdef CONFIG_PAX_KERNEXEC
19248 + " movl %%cr0, %0\n"
19249 + " movl %0, %%eax\n"
19250 + " andl $0xFFFEFFFF, %%eax\n"
19251 + " movl %%eax, %%cr0\n"
19252 +#endif
19253 +
19254 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19255 +
19256 +#ifdef CONFIG_PAX_KERNEXEC
19257 + " movl %0, %%cr0\n"
19258 +#endif
19259 +
19260 " jmp 2b\n"
19261 ".previous\n"
19262 _ASM_EXTABLE(1b, 3b)
19263 - : : "r" (from), "r" (to) : "memory");
19264 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19265
19266 from += 64;
19267 to += 64;
19268 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
19269 static void fast_copy_page(void *to, void *from)
19270 {
19271 int i;
19272 + unsigned long cr0;
19273
19274 kernel_fpu_begin();
19275
19276 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
19277 * but that is for later. -AV
19278 */
19279 __asm__ __volatile__(
19280 - "1: prefetch (%0)\n"
19281 - " prefetch 64(%0)\n"
19282 - " prefetch 128(%0)\n"
19283 - " prefetch 192(%0)\n"
19284 - " prefetch 256(%0)\n"
19285 + "1: prefetch (%1)\n"
19286 + " prefetch 64(%1)\n"
19287 + " prefetch 128(%1)\n"
19288 + " prefetch 192(%1)\n"
19289 + " prefetch 256(%1)\n"
19290 "2: \n"
19291 ".section .fixup, \"ax\"\n"
19292 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19293 + "3: \n"
19294 +
19295 +#ifdef CONFIG_PAX_KERNEXEC
19296 + " movl %%cr0, %0\n"
19297 + " movl %0, %%eax\n"
19298 + " andl $0xFFFEFFFF, %%eax\n"
19299 + " movl %%eax, %%cr0\n"
19300 +#endif
19301 +
19302 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19303 +
19304 +#ifdef CONFIG_PAX_KERNEXEC
19305 + " movl %0, %%cr0\n"
19306 +#endif
19307 +
19308 " jmp 2b\n"
19309 ".previous\n"
19310 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
19311 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19312
19313 for (i = 0; i < (4096-320)/64; i++) {
19314 __asm__ __volatile__ (
19315 - "1: prefetch 320(%0)\n"
19316 - "2: movq (%0), %%mm0\n"
19317 - " movntq %%mm0, (%1)\n"
19318 - " movq 8(%0), %%mm1\n"
19319 - " movntq %%mm1, 8(%1)\n"
19320 - " movq 16(%0), %%mm2\n"
19321 - " movntq %%mm2, 16(%1)\n"
19322 - " movq 24(%0), %%mm3\n"
19323 - " movntq %%mm3, 24(%1)\n"
19324 - " movq 32(%0), %%mm4\n"
19325 - " movntq %%mm4, 32(%1)\n"
19326 - " movq 40(%0), %%mm5\n"
19327 - " movntq %%mm5, 40(%1)\n"
19328 - " movq 48(%0), %%mm6\n"
19329 - " movntq %%mm6, 48(%1)\n"
19330 - " movq 56(%0), %%mm7\n"
19331 - " movntq %%mm7, 56(%1)\n"
19332 + "1: prefetch 320(%1)\n"
19333 + "2: movq (%1), %%mm0\n"
19334 + " movntq %%mm0, (%2)\n"
19335 + " movq 8(%1), %%mm1\n"
19336 + " movntq %%mm1, 8(%2)\n"
19337 + " movq 16(%1), %%mm2\n"
19338 + " movntq %%mm2, 16(%2)\n"
19339 + " movq 24(%1), %%mm3\n"
19340 + " movntq %%mm3, 24(%2)\n"
19341 + " movq 32(%1), %%mm4\n"
19342 + " movntq %%mm4, 32(%2)\n"
19343 + " movq 40(%1), %%mm5\n"
19344 + " movntq %%mm5, 40(%2)\n"
19345 + " movq 48(%1), %%mm6\n"
19346 + " movntq %%mm6, 48(%2)\n"
19347 + " movq 56(%1), %%mm7\n"
19348 + " movntq %%mm7, 56(%2)\n"
19349 ".section .fixup, \"ax\"\n"
19350 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19351 + "3:\n"
19352 +
19353 +#ifdef CONFIG_PAX_KERNEXEC
19354 + " movl %%cr0, %0\n"
19355 + " movl %0, %%eax\n"
19356 + " andl $0xFFFEFFFF, %%eax\n"
19357 + " movl %%eax, %%cr0\n"
19358 +#endif
19359 +
19360 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19361 +
19362 +#ifdef CONFIG_PAX_KERNEXEC
19363 + " movl %0, %%cr0\n"
19364 +#endif
19365 +
19366 " jmp 2b\n"
19367 ".previous\n"
19368 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
19369 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19370
19371 from += 64;
19372 to += 64;
19373 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
19374 static void fast_copy_page(void *to, void *from)
19375 {
19376 int i;
19377 + unsigned long cr0;
19378
19379 kernel_fpu_begin();
19380
19381 __asm__ __volatile__ (
19382 - "1: prefetch (%0)\n"
19383 - " prefetch 64(%0)\n"
19384 - " prefetch 128(%0)\n"
19385 - " prefetch 192(%0)\n"
19386 - " prefetch 256(%0)\n"
19387 + "1: prefetch (%1)\n"
19388 + " prefetch 64(%1)\n"
19389 + " prefetch 128(%1)\n"
19390 + " prefetch 192(%1)\n"
19391 + " prefetch 256(%1)\n"
19392 "2: \n"
19393 ".section .fixup, \"ax\"\n"
19394 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19395 + "3: \n"
19396 +
19397 +#ifdef CONFIG_PAX_KERNEXEC
19398 + " movl %%cr0, %0\n"
19399 + " movl %0, %%eax\n"
19400 + " andl $0xFFFEFFFF, %%eax\n"
19401 + " movl %%eax, %%cr0\n"
19402 +#endif
19403 +
19404 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19405 +
19406 +#ifdef CONFIG_PAX_KERNEXEC
19407 + " movl %0, %%cr0\n"
19408 +#endif
19409 +
19410 " jmp 2b\n"
19411 ".previous\n"
19412 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
19413 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19414
19415 for (i = 0; i < 4096/64; i++) {
19416 __asm__ __volatile__ (
19417 - "1: prefetch 320(%0)\n"
19418 - "2: movq (%0), %%mm0\n"
19419 - " movq 8(%0), %%mm1\n"
19420 - " movq 16(%0), %%mm2\n"
19421 - " movq 24(%0), %%mm3\n"
19422 - " movq %%mm0, (%1)\n"
19423 - " movq %%mm1, 8(%1)\n"
19424 - " movq %%mm2, 16(%1)\n"
19425 - " movq %%mm3, 24(%1)\n"
19426 - " movq 32(%0), %%mm0\n"
19427 - " movq 40(%0), %%mm1\n"
19428 - " movq 48(%0), %%mm2\n"
19429 - " movq 56(%0), %%mm3\n"
19430 - " movq %%mm0, 32(%1)\n"
19431 - " movq %%mm1, 40(%1)\n"
19432 - " movq %%mm2, 48(%1)\n"
19433 - " movq %%mm3, 56(%1)\n"
19434 + "1: prefetch 320(%1)\n"
19435 + "2: movq (%1), %%mm0\n"
19436 + " movq 8(%1), %%mm1\n"
19437 + " movq 16(%1), %%mm2\n"
19438 + " movq 24(%1), %%mm3\n"
19439 + " movq %%mm0, (%2)\n"
19440 + " movq %%mm1, 8(%2)\n"
19441 + " movq %%mm2, 16(%2)\n"
19442 + " movq %%mm3, 24(%2)\n"
19443 + " movq 32(%1), %%mm0\n"
19444 + " movq 40(%1), %%mm1\n"
19445 + " movq 48(%1), %%mm2\n"
19446 + " movq 56(%1), %%mm3\n"
19447 + " movq %%mm0, 32(%2)\n"
19448 + " movq %%mm1, 40(%2)\n"
19449 + " movq %%mm2, 48(%2)\n"
19450 + " movq %%mm3, 56(%2)\n"
19451 ".section .fixup, \"ax\"\n"
19452 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19453 + "3:\n"
19454 +
19455 +#ifdef CONFIG_PAX_KERNEXEC
19456 + " movl %%cr0, %0\n"
19457 + " movl %0, %%eax\n"
19458 + " andl $0xFFFEFFFF, %%eax\n"
19459 + " movl %%eax, %%cr0\n"
19460 +#endif
19461 +
19462 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19463 +
19464 +#ifdef CONFIG_PAX_KERNEXEC
19465 + " movl %0, %%cr0\n"
19466 +#endif
19467 +
19468 " jmp 2b\n"
19469 ".previous\n"
19470 _ASM_EXTABLE(1b, 3b)
19471 - : : "r" (from), "r" (to) : "memory");
19472 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19473
19474 from += 64;
19475 to += 64;
19476 diff -urNp linux-2.6.32.42/arch/x86/lib/putuser.S linux-2.6.32.42/arch/x86/lib/putuser.S
19477 --- linux-2.6.32.42/arch/x86/lib/putuser.S 2011-03-27 14:31:47.000000000 -0400
19478 +++ linux-2.6.32.42/arch/x86/lib/putuser.S 2011-04-17 15:56:46.000000000 -0400
19479 @@ -15,7 +15,8 @@
19480 #include <asm/thread_info.h>
19481 #include <asm/errno.h>
19482 #include <asm/asm.h>
19483 -
19484 +#include <asm/segment.h>
19485 +#include <asm/pgtable.h>
19486
19487 /*
19488 * __put_user_X
19489 @@ -29,52 +30,119 @@
19490 * as they get called from within inline assembly.
19491 */
19492
19493 -#define ENTER CFI_STARTPROC ; \
19494 - GET_THREAD_INFO(%_ASM_BX)
19495 +#define ENTER CFI_STARTPROC
19496 #define EXIT ret ; \
19497 CFI_ENDPROC
19498
19499 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19500 +#define _DEST %_ASM_CX,%_ASM_BX
19501 +#else
19502 +#define _DEST %_ASM_CX
19503 +#endif
19504 +
19505 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19506 +#define __copyuser_seg gs;
19507 +#else
19508 +#define __copyuser_seg
19509 +#endif
19510 +
19511 .text
19512 ENTRY(__put_user_1)
19513 ENTER
19514 +
19515 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19516 + GET_THREAD_INFO(%_ASM_BX)
19517 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
19518 jae bad_put_user
19519 -1: movb %al,(%_ASM_CX)
19520 +
19521 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19522 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19523 + cmp %_ASM_BX,%_ASM_CX
19524 + jb 1234f
19525 + xor %ebx,%ebx
19526 +1234:
19527 +#endif
19528 +
19529 +#endif
19530 +
19531 +1: __copyuser_seg movb %al,(_DEST)
19532 xor %eax,%eax
19533 EXIT
19534 ENDPROC(__put_user_1)
19535
19536 ENTRY(__put_user_2)
19537 ENTER
19538 +
19539 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19540 + GET_THREAD_INFO(%_ASM_BX)
19541 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19542 sub $1,%_ASM_BX
19543 cmp %_ASM_BX,%_ASM_CX
19544 jae bad_put_user
19545 -2: movw %ax,(%_ASM_CX)
19546 +
19547 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19548 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19549 + cmp %_ASM_BX,%_ASM_CX
19550 + jb 1234f
19551 + xor %ebx,%ebx
19552 +1234:
19553 +#endif
19554 +
19555 +#endif
19556 +
19557 +2: __copyuser_seg movw %ax,(_DEST)
19558 xor %eax,%eax
19559 EXIT
19560 ENDPROC(__put_user_2)
19561
19562 ENTRY(__put_user_4)
19563 ENTER
19564 +
19565 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19566 + GET_THREAD_INFO(%_ASM_BX)
19567 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19568 sub $3,%_ASM_BX
19569 cmp %_ASM_BX,%_ASM_CX
19570 jae bad_put_user
19571 -3: movl %eax,(%_ASM_CX)
19572 +
19573 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19574 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19575 + cmp %_ASM_BX,%_ASM_CX
19576 + jb 1234f
19577 + xor %ebx,%ebx
19578 +1234:
19579 +#endif
19580 +
19581 +#endif
19582 +
19583 +3: __copyuser_seg movl %eax,(_DEST)
19584 xor %eax,%eax
19585 EXIT
19586 ENDPROC(__put_user_4)
19587
19588 ENTRY(__put_user_8)
19589 ENTER
19590 +
19591 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19592 + GET_THREAD_INFO(%_ASM_BX)
19593 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19594 sub $7,%_ASM_BX
19595 cmp %_ASM_BX,%_ASM_CX
19596 jae bad_put_user
19597 -4: mov %_ASM_AX,(%_ASM_CX)
19598 +
19599 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19600 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19601 + cmp %_ASM_BX,%_ASM_CX
19602 + jb 1234f
19603 + xor %ebx,%ebx
19604 +1234:
19605 +#endif
19606 +
19607 +#endif
19608 +
19609 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
19610 #ifdef CONFIG_X86_32
19611 -5: movl %edx,4(%_ASM_CX)
19612 +5: __copyuser_seg movl %edx,4(_DEST)
19613 #endif
19614 xor %eax,%eax
19615 EXIT
19616 diff -urNp linux-2.6.32.42/arch/x86/lib/usercopy_32.c linux-2.6.32.42/arch/x86/lib/usercopy_32.c
19617 --- linux-2.6.32.42/arch/x86/lib/usercopy_32.c 2011-03-27 14:31:47.000000000 -0400
19618 +++ linux-2.6.32.42/arch/x86/lib/usercopy_32.c 2011-04-23 21:12:28.000000000 -0400
19619 @@ -43,7 +43,7 @@ do { \
19620 __asm__ __volatile__( \
19621 " testl %1,%1\n" \
19622 " jz 2f\n" \
19623 - "0: lodsb\n" \
19624 + "0: "__copyuser_seg"lodsb\n" \
19625 " stosb\n" \
19626 " testb %%al,%%al\n" \
19627 " jz 1f\n" \
19628 @@ -128,10 +128,12 @@ do { \
19629 int __d0; \
19630 might_fault(); \
19631 __asm__ __volatile__( \
19632 + __COPYUSER_SET_ES \
19633 "0: rep; stosl\n" \
19634 " movl %2,%0\n" \
19635 "1: rep; stosb\n" \
19636 "2:\n" \
19637 + __COPYUSER_RESTORE_ES \
19638 ".section .fixup,\"ax\"\n" \
19639 "3: lea 0(%2,%0,4),%0\n" \
19640 " jmp 2b\n" \
19641 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
19642 might_fault();
19643
19644 __asm__ __volatile__(
19645 + __COPYUSER_SET_ES
19646 " testl %0, %0\n"
19647 " jz 3f\n"
19648 " andl %0,%%ecx\n"
19649 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
19650 " subl %%ecx,%0\n"
19651 " addl %0,%%eax\n"
19652 "1:\n"
19653 + __COPYUSER_RESTORE_ES
19654 ".section .fixup,\"ax\"\n"
19655 "2: xorl %%eax,%%eax\n"
19656 " jmp 1b\n"
19657 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
19658
19659 #ifdef CONFIG_X86_INTEL_USERCOPY
19660 static unsigned long
19661 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
19662 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
19663 {
19664 int d0, d1;
19665 __asm__ __volatile__(
19666 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
19667 " .align 2,0x90\n"
19668 "3: movl 0(%4), %%eax\n"
19669 "4: movl 4(%4), %%edx\n"
19670 - "5: movl %%eax, 0(%3)\n"
19671 - "6: movl %%edx, 4(%3)\n"
19672 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
19673 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
19674 "7: movl 8(%4), %%eax\n"
19675 "8: movl 12(%4),%%edx\n"
19676 - "9: movl %%eax, 8(%3)\n"
19677 - "10: movl %%edx, 12(%3)\n"
19678 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
19679 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
19680 "11: movl 16(%4), %%eax\n"
19681 "12: movl 20(%4), %%edx\n"
19682 - "13: movl %%eax, 16(%3)\n"
19683 - "14: movl %%edx, 20(%3)\n"
19684 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
19685 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
19686 "15: movl 24(%4), %%eax\n"
19687 "16: movl 28(%4), %%edx\n"
19688 - "17: movl %%eax, 24(%3)\n"
19689 - "18: movl %%edx, 28(%3)\n"
19690 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
19691 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
19692 "19: movl 32(%4), %%eax\n"
19693 "20: movl 36(%4), %%edx\n"
19694 - "21: movl %%eax, 32(%3)\n"
19695 - "22: movl %%edx, 36(%3)\n"
19696 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
19697 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
19698 "23: movl 40(%4), %%eax\n"
19699 "24: movl 44(%4), %%edx\n"
19700 - "25: movl %%eax, 40(%3)\n"
19701 - "26: movl %%edx, 44(%3)\n"
19702 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
19703 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
19704 "27: movl 48(%4), %%eax\n"
19705 "28: movl 52(%4), %%edx\n"
19706 - "29: movl %%eax, 48(%3)\n"
19707 - "30: movl %%edx, 52(%3)\n"
19708 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
19709 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
19710 "31: movl 56(%4), %%eax\n"
19711 "32: movl 60(%4), %%edx\n"
19712 - "33: movl %%eax, 56(%3)\n"
19713 - "34: movl %%edx, 60(%3)\n"
19714 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
19715 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
19716 " addl $-64, %0\n"
19717 " addl $64, %4\n"
19718 " addl $64, %3\n"
19719 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
19720 " shrl $2, %0\n"
19721 " andl $3, %%eax\n"
19722 " cld\n"
19723 + __COPYUSER_SET_ES
19724 "99: rep; movsl\n"
19725 "36: movl %%eax, %0\n"
19726 "37: rep; movsb\n"
19727 "100:\n"
19728 + __COPYUSER_RESTORE_ES
19729 + ".section .fixup,\"ax\"\n"
19730 + "101: lea 0(%%eax,%0,4),%0\n"
19731 + " jmp 100b\n"
19732 + ".previous\n"
19733 + ".section __ex_table,\"a\"\n"
19734 + " .align 4\n"
19735 + " .long 1b,100b\n"
19736 + " .long 2b,100b\n"
19737 + " .long 3b,100b\n"
19738 + " .long 4b,100b\n"
19739 + " .long 5b,100b\n"
19740 + " .long 6b,100b\n"
19741 + " .long 7b,100b\n"
19742 + " .long 8b,100b\n"
19743 + " .long 9b,100b\n"
19744 + " .long 10b,100b\n"
19745 + " .long 11b,100b\n"
19746 + " .long 12b,100b\n"
19747 + " .long 13b,100b\n"
19748 + " .long 14b,100b\n"
19749 + " .long 15b,100b\n"
19750 + " .long 16b,100b\n"
19751 + " .long 17b,100b\n"
19752 + " .long 18b,100b\n"
19753 + " .long 19b,100b\n"
19754 + " .long 20b,100b\n"
19755 + " .long 21b,100b\n"
19756 + " .long 22b,100b\n"
19757 + " .long 23b,100b\n"
19758 + " .long 24b,100b\n"
19759 + " .long 25b,100b\n"
19760 + " .long 26b,100b\n"
19761 + " .long 27b,100b\n"
19762 + " .long 28b,100b\n"
19763 + " .long 29b,100b\n"
19764 + " .long 30b,100b\n"
19765 + " .long 31b,100b\n"
19766 + " .long 32b,100b\n"
19767 + " .long 33b,100b\n"
19768 + " .long 34b,100b\n"
19769 + " .long 35b,100b\n"
19770 + " .long 36b,100b\n"
19771 + " .long 37b,100b\n"
19772 + " .long 99b,101b\n"
19773 + ".previous"
19774 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
19775 + : "1"(to), "2"(from), "0"(size)
19776 + : "eax", "edx", "memory");
19777 + return size;
19778 +}
19779 +
19780 +static unsigned long
19781 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
19782 +{
19783 + int d0, d1;
19784 + __asm__ __volatile__(
19785 + " .align 2,0x90\n"
19786 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
19787 + " cmpl $67, %0\n"
19788 + " jbe 3f\n"
19789 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
19790 + " .align 2,0x90\n"
19791 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
19792 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
19793 + "5: movl %%eax, 0(%3)\n"
19794 + "6: movl %%edx, 4(%3)\n"
19795 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
19796 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
19797 + "9: movl %%eax, 8(%3)\n"
19798 + "10: movl %%edx, 12(%3)\n"
19799 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
19800 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
19801 + "13: movl %%eax, 16(%3)\n"
19802 + "14: movl %%edx, 20(%3)\n"
19803 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
19804 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
19805 + "17: movl %%eax, 24(%3)\n"
19806 + "18: movl %%edx, 28(%3)\n"
19807 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
19808 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
19809 + "21: movl %%eax, 32(%3)\n"
19810 + "22: movl %%edx, 36(%3)\n"
19811 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
19812 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
19813 + "25: movl %%eax, 40(%3)\n"
19814 + "26: movl %%edx, 44(%3)\n"
19815 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
19816 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
19817 + "29: movl %%eax, 48(%3)\n"
19818 + "30: movl %%edx, 52(%3)\n"
19819 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
19820 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
19821 + "33: movl %%eax, 56(%3)\n"
19822 + "34: movl %%edx, 60(%3)\n"
19823 + " addl $-64, %0\n"
19824 + " addl $64, %4\n"
19825 + " addl $64, %3\n"
19826 + " cmpl $63, %0\n"
19827 + " ja 1b\n"
19828 + "35: movl %0, %%eax\n"
19829 + " shrl $2, %0\n"
19830 + " andl $3, %%eax\n"
19831 + " cld\n"
19832 + "99: rep; "__copyuser_seg" movsl\n"
19833 + "36: movl %%eax, %0\n"
19834 + "37: rep; "__copyuser_seg" movsb\n"
19835 + "100:\n"
19836 ".section .fixup,\"ax\"\n"
19837 "101: lea 0(%%eax,%0,4),%0\n"
19838 " jmp 100b\n"
19839 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
19840 int d0, d1;
19841 __asm__ __volatile__(
19842 " .align 2,0x90\n"
19843 - "0: movl 32(%4), %%eax\n"
19844 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19845 " cmpl $67, %0\n"
19846 " jbe 2f\n"
19847 - "1: movl 64(%4), %%eax\n"
19848 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19849 " .align 2,0x90\n"
19850 - "2: movl 0(%4), %%eax\n"
19851 - "21: movl 4(%4), %%edx\n"
19852 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19853 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19854 " movl %%eax, 0(%3)\n"
19855 " movl %%edx, 4(%3)\n"
19856 - "3: movl 8(%4), %%eax\n"
19857 - "31: movl 12(%4),%%edx\n"
19858 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19859 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19860 " movl %%eax, 8(%3)\n"
19861 " movl %%edx, 12(%3)\n"
19862 - "4: movl 16(%4), %%eax\n"
19863 - "41: movl 20(%4), %%edx\n"
19864 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19865 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19866 " movl %%eax, 16(%3)\n"
19867 " movl %%edx, 20(%3)\n"
19868 - "10: movl 24(%4), %%eax\n"
19869 - "51: movl 28(%4), %%edx\n"
19870 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19871 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19872 " movl %%eax, 24(%3)\n"
19873 " movl %%edx, 28(%3)\n"
19874 - "11: movl 32(%4), %%eax\n"
19875 - "61: movl 36(%4), %%edx\n"
19876 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19877 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19878 " movl %%eax, 32(%3)\n"
19879 " movl %%edx, 36(%3)\n"
19880 - "12: movl 40(%4), %%eax\n"
19881 - "71: movl 44(%4), %%edx\n"
19882 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19883 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19884 " movl %%eax, 40(%3)\n"
19885 " movl %%edx, 44(%3)\n"
19886 - "13: movl 48(%4), %%eax\n"
19887 - "81: movl 52(%4), %%edx\n"
19888 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19889 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19890 " movl %%eax, 48(%3)\n"
19891 " movl %%edx, 52(%3)\n"
19892 - "14: movl 56(%4), %%eax\n"
19893 - "91: movl 60(%4), %%edx\n"
19894 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19895 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19896 " movl %%eax, 56(%3)\n"
19897 " movl %%edx, 60(%3)\n"
19898 " addl $-64, %0\n"
19899 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
19900 " shrl $2, %0\n"
19901 " andl $3, %%eax\n"
19902 " cld\n"
19903 - "6: rep; movsl\n"
19904 + "6: rep; "__copyuser_seg" movsl\n"
19905 " movl %%eax,%0\n"
19906 - "7: rep; movsb\n"
19907 + "7: rep; "__copyuser_seg" movsb\n"
19908 "8:\n"
19909 ".section .fixup,\"ax\"\n"
19910 "9: lea 0(%%eax,%0,4),%0\n"
19911 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
19912
19913 __asm__ __volatile__(
19914 " .align 2,0x90\n"
19915 - "0: movl 32(%4), %%eax\n"
19916 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19917 " cmpl $67, %0\n"
19918 " jbe 2f\n"
19919 - "1: movl 64(%4), %%eax\n"
19920 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19921 " .align 2,0x90\n"
19922 - "2: movl 0(%4), %%eax\n"
19923 - "21: movl 4(%4), %%edx\n"
19924 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19925 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19926 " movnti %%eax, 0(%3)\n"
19927 " movnti %%edx, 4(%3)\n"
19928 - "3: movl 8(%4), %%eax\n"
19929 - "31: movl 12(%4),%%edx\n"
19930 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19931 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19932 " movnti %%eax, 8(%3)\n"
19933 " movnti %%edx, 12(%3)\n"
19934 - "4: movl 16(%4), %%eax\n"
19935 - "41: movl 20(%4), %%edx\n"
19936 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19937 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19938 " movnti %%eax, 16(%3)\n"
19939 " movnti %%edx, 20(%3)\n"
19940 - "10: movl 24(%4), %%eax\n"
19941 - "51: movl 28(%4), %%edx\n"
19942 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19943 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19944 " movnti %%eax, 24(%3)\n"
19945 " movnti %%edx, 28(%3)\n"
19946 - "11: movl 32(%4), %%eax\n"
19947 - "61: movl 36(%4), %%edx\n"
19948 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19949 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19950 " movnti %%eax, 32(%3)\n"
19951 " movnti %%edx, 36(%3)\n"
19952 - "12: movl 40(%4), %%eax\n"
19953 - "71: movl 44(%4), %%edx\n"
19954 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19955 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19956 " movnti %%eax, 40(%3)\n"
19957 " movnti %%edx, 44(%3)\n"
19958 - "13: movl 48(%4), %%eax\n"
19959 - "81: movl 52(%4), %%edx\n"
19960 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19961 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19962 " movnti %%eax, 48(%3)\n"
19963 " movnti %%edx, 52(%3)\n"
19964 - "14: movl 56(%4), %%eax\n"
19965 - "91: movl 60(%4), %%edx\n"
19966 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19967 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19968 " movnti %%eax, 56(%3)\n"
19969 " movnti %%edx, 60(%3)\n"
19970 " addl $-64, %0\n"
19971 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
19972 " shrl $2, %0\n"
19973 " andl $3, %%eax\n"
19974 " cld\n"
19975 - "6: rep; movsl\n"
19976 + "6: rep; "__copyuser_seg" movsl\n"
19977 " movl %%eax,%0\n"
19978 - "7: rep; movsb\n"
19979 + "7: rep; "__copyuser_seg" movsb\n"
19980 "8:\n"
19981 ".section .fixup,\"ax\"\n"
19982 "9: lea 0(%%eax,%0,4),%0\n"
19983 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
19984
19985 __asm__ __volatile__(
19986 " .align 2,0x90\n"
19987 - "0: movl 32(%4), %%eax\n"
19988 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19989 " cmpl $67, %0\n"
19990 " jbe 2f\n"
19991 - "1: movl 64(%4), %%eax\n"
19992 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19993 " .align 2,0x90\n"
19994 - "2: movl 0(%4), %%eax\n"
19995 - "21: movl 4(%4), %%edx\n"
19996 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19997 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19998 " movnti %%eax, 0(%3)\n"
19999 " movnti %%edx, 4(%3)\n"
20000 - "3: movl 8(%4), %%eax\n"
20001 - "31: movl 12(%4),%%edx\n"
20002 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20003 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20004 " movnti %%eax, 8(%3)\n"
20005 " movnti %%edx, 12(%3)\n"
20006 - "4: movl 16(%4), %%eax\n"
20007 - "41: movl 20(%4), %%edx\n"
20008 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20009 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20010 " movnti %%eax, 16(%3)\n"
20011 " movnti %%edx, 20(%3)\n"
20012 - "10: movl 24(%4), %%eax\n"
20013 - "51: movl 28(%4), %%edx\n"
20014 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20015 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20016 " movnti %%eax, 24(%3)\n"
20017 " movnti %%edx, 28(%3)\n"
20018 - "11: movl 32(%4), %%eax\n"
20019 - "61: movl 36(%4), %%edx\n"
20020 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20021 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20022 " movnti %%eax, 32(%3)\n"
20023 " movnti %%edx, 36(%3)\n"
20024 - "12: movl 40(%4), %%eax\n"
20025 - "71: movl 44(%4), %%edx\n"
20026 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20027 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20028 " movnti %%eax, 40(%3)\n"
20029 " movnti %%edx, 44(%3)\n"
20030 - "13: movl 48(%4), %%eax\n"
20031 - "81: movl 52(%4), %%edx\n"
20032 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20033 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20034 " movnti %%eax, 48(%3)\n"
20035 " movnti %%edx, 52(%3)\n"
20036 - "14: movl 56(%4), %%eax\n"
20037 - "91: movl 60(%4), %%edx\n"
20038 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20039 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20040 " movnti %%eax, 56(%3)\n"
20041 " movnti %%edx, 60(%3)\n"
20042 " addl $-64, %0\n"
20043 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
20044 " shrl $2, %0\n"
20045 " andl $3, %%eax\n"
20046 " cld\n"
20047 - "6: rep; movsl\n"
20048 + "6: rep; "__copyuser_seg" movsl\n"
20049 " movl %%eax,%0\n"
20050 - "7: rep; movsb\n"
20051 + "7: rep; "__copyuser_seg" movsb\n"
20052 "8:\n"
20053 ".section .fixup,\"ax\"\n"
20054 "9: lea 0(%%eax,%0,4),%0\n"
20055 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
20056 */
20057 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
20058 unsigned long size);
20059 -unsigned long __copy_user_intel(void __user *to, const void *from,
20060 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
20061 + unsigned long size);
20062 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
20063 unsigned long size);
20064 unsigned long __copy_user_zeroing_intel_nocache(void *to,
20065 const void __user *from, unsigned long size);
20066 #endif /* CONFIG_X86_INTEL_USERCOPY */
20067
20068 /* Generic arbitrary sized copy. */
20069 -#define __copy_user(to, from, size) \
20070 +#define __copy_user(to, from, size, prefix, set, restore) \
20071 do { \
20072 int __d0, __d1, __d2; \
20073 __asm__ __volatile__( \
20074 + set \
20075 " cmp $7,%0\n" \
20076 " jbe 1f\n" \
20077 " movl %1,%0\n" \
20078 " negl %0\n" \
20079 " andl $7,%0\n" \
20080 " subl %0,%3\n" \
20081 - "4: rep; movsb\n" \
20082 + "4: rep; "prefix"movsb\n" \
20083 " movl %3,%0\n" \
20084 " shrl $2,%0\n" \
20085 " andl $3,%3\n" \
20086 " .align 2,0x90\n" \
20087 - "0: rep; movsl\n" \
20088 + "0: rep; "prefix"movsl\n" \
20089 " movl %3,%0\n" \
20090 - "1: rep; movsb\n" \
20091 + "1: rep; "prefix"movsb\n" \
20092 "2:\n" \
20093 + restore \
20094 ".section .fixup,\"ax\"\n" \
20095 "5: addl %3,%0\n" \
20096 " jmp 2b\n" \
20097 @@ -682,14 +799,14 @@ do { \
20098 " negl %0\n" \
20099 " andl $7,%0\n" \
20100 " subl %0,%3\n" \
20101 - "4: rep; movsb\n" \
20102 + "4: rep; "__copyuser_seg"movsb\n" \
20103 " movl %3,%0\n" \
20104 " shrl $2,%0\n" \
20105 " andl $3,%3\n" \
20106 " .align 2,0x90\n" \
20107 - "0: rep; movsl\n" \
20108 + "0: rep; "__copyuser_seg"movsl\n" \
20109 " movl %3,%0\n" \
20110 - "1: rep; movsb\n" \
20111 + "1: rep; "__copyuser_seg"movsb\n" \
20112 "2:\n" \
20113 ".section .fixup,\"ax\"\n" \
20114 "5: addl %3,%0\n" \
20115 @@ -775,9 +892,9 @@ survive:
20116 }
20117 #endif
20118 if (movsl_is_ok(to, from, n))
20119 - __copy_user(to, from, n);
20120 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
20121 else
20122 - n = __copy_user_intel(to, from, n);
20123 + n = __generic_copy_to_user_intel(to, from, n);
20124 return n;
20125 }
20126 EXPORT_SYMBOL(__copy_to_user_ll);
20127 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
20128 unsigned long n)
20129 {
20130 if (movsl_is_ok(to, from, n))
20131 - __copy_user(to, from, n);
20132 + __copy_user(to, from, n, __copyuser_seg, "", "");
20133 else
20134 - n = __copy_user_intel((void __user *)to,
20135 - (const void *)from, n);
20136 + n = __generic_copy_from_user_intel(to, from, n);
20137 return n;
20138 }
20139 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
20140 @@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocach
20141 if (n > 64 && cpu_has_xmm2)
20142 n = __copy_user_intel_nocache(to, from, n);
20143 else
20144 - __copy_user(to, from, n);
20145 + __copy_user(to, from, n, __copyuser_seg, "", "");
20146 #else
20147 - __copy_user(to, from, n);
20148 + __copy_user(to, from, n, __copyuser_seg, "", "");
20149 #endif
20150 return n;
20151 }
20152 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
20153
20154 -/**
20155 - * copy_to_user: - Copy a block of data into user space.
20156 - * @to: Destination address, in user space.
20157 - * @from: Source address, in kernel space.
20158 - * @n: Number of bytes to copy.
20159 - *
20160 - * Context: User context only. This function may sleep.
20161 - *
20162 - * Copy data from kernel space to user space.
20163 - *
20164 - * Returns number of bytes that could not be copied.
20165 - * On success, this will be zero.
20166 - */
20167 -unsigned long
20168 -copy_to_user(void __user *to, const void *from, unsigned long n)
20169 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20170 +void __set_fs(mm_segment_t x)
20171 {
20172 - if (access_ok(VERIFY_WRITE, to, n))
20173 - n = __copy_to_user(to, from, n);
20174 - return n;
20175 + switch (x.seg) {
20176 + case 0:
20177 + loadsegment(gs, 0);
20178 + break;
20179 + case TASK_SIZE_MAX:
20180 + loadsegment(gs, __USER_DS);
20181 + break;
20182 + case -1UL:
20183 + loadsegment(gs, __KERNEL_DS);
20184 + break;
20185 + default:
20186 + BUG();
20187 + }
20188 + return;
20189 }
20190 -EXPORT_SYMBOL(copy_to_user);
20191 +EXPORT_SYMBOL(__set_fs);
20192
20193 -/**
20194 - * copy_from_user: - Copy a block of data from user space.
20195 - * @to: Destination address, in kernel space.
20196 - * @from: Source address, in user space.
20197 - * @n: Number of bytes to copy.
20198 - *
20199 - * Context: User context only. This function may sleep.
20200 - *
20201 - * Copy data from user space to kernel space.
20202 - *
20203 - * Returns number of bytes that could not be copied.
20204 - * On success, this will be zero.
20205 - *
20206 - * If some data could not be copied, this function will pad the copied
20207 - * data to the requested size using zero bytes.
20208 - */
20209 -unsigned long
20210 -copy_from_user(void *to, const void __user *from, unsigned long n)
20211 +void set_fs(mm_segment_t x)
20212 {
20213 - if (access_ok(VERIFY_READ, from, n))
20214 - n = __copy_from_user(to, from, n);
20215 - else
20216 - memset(to, 0, n);
20217 - return n;
20218 + current_thread_info()->addr_limit = x;
20219 + __set_fs(x);
20220 }
20221 -EXPORT_SYMBOL(copy_from_user);
20222 +EXPORT_SYMBOL(set_fs);
20223 +#endif
20224 diff -urNp linux-2.6.32.42/arch/x86/lib/usercopy_64.c linux-2.6.32.42/arch/x86/lib/usercopy_64.c
20225 --- linux-2.6.32.42/arch/x86/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
20226 +++ linux-2.6.32.42/arch/x86/lib/usercopy_64.c 2011-05-04 17:56:20.000000000 -0400
20227 @@ -42,6 +42,12 @@ long
20228 __strncpy_from_user(char *dst, const char __user *src, long count)
20229 {
20230 long res;
20231 +
20232 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20233 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
20234 + src += PAX_USER_SHADOW_BASE;
20235 +#endif
20236 +
20237 __do_strncpy_from_user(dst, src, count, res);
20238 return res;
20239 }
20240 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
20241 {
20242 long __d0;
20243 might_fault();
20244 +
20245 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20246 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
20247 + addr += PAX_USER_SHADOW_BASE;
20248 +#endif
20249 +
20250 /* no memory constraint because it doesn't change any memory gcc knows
20251 about */
20252 asm volatile(
20253 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
20254
20255 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
20256 {
20257 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20258 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20259 +
20260 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20261 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
20262 + to += PAX_USER_SHADOW_BASE;
20263 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
20264 + from += PAX_USER_SHADOW_BASE;
20265 +#endif
20266 +
20267 return copy_user_generic((__force void *)to, (__force void *)from, len);
20268 - }
20269 - return len;
20270 + }
20271 + return len;
20272 }
20273 EXPORT_SYMBOL(copy_in_user);
20274
20275 diff -urNp linux-2.6.32.42/arch/x86/Makefile linux-2.6.32.42/arch/x86/Makefile
20276 --- linux-2.6.32.42/arch/x86/Makefile 2011-03-27 14:31:47.000000000 -0400
20277 +++ linux-2.6.32.42/arch/x86/Makefile 2011-04-17 15:56:46.000000000 -0400
20278 @@ -189,3 +189,12 @@ define archhelp
20279 echo ' FDARGS="..." arguments for the booted kernel'
20280 echo ' FDINITRD=file initrd for the booted kernel'
20281 endef
20282 +
20283 +define OLD_LD
20284 +
20285 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
20286 +*** Please upgrade your binutils to 2.18 or newer
20287 +endef
20288 +
20289 +archprepare:
20290 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
20291 diff -urNp linux-2.6.32.42/arch/x86/mm/extable.c linux-2.6.32.42/arch/x86/mm/extable.c
20292 --- linux-2.6.32.42/arch/x86/mm/extable.c 2011-03-27 14:31:47.000000000 -0400
20293 +++ linux-2.6.32.42/arch/x86/mm/extable.c 2011-04-17 15:56:46.000000000 -0400
20294 @@ -1,14 +1,71 @@
20295 #include <linux/module.h>
20296 #include <linux/spinlock.h>
20297 +#include <linux/sort.h>
20298 #include <asm/uaccess.h>
20299 +#include <asm/pgtable.h>
20300
20301 +/*
20302 + * The exception table needs to be sorted so that the binary
20303 + * search that we use to find entries in it works properly.
20304 + * This is used both for the kernel exception table and for
20305 + * the exception tables of modules that get loaded.
20306 + */
20307 +static int cmp_ex(const void *a, const void *b)
20308 +{
20309 + const struct exception_table_entry *x = a, *y = b;
20310 +
20311 + /* avoid overflow */
20312 + if (x->insn > y->insn)
20313 + return 1;
20314 + if (x->insn < y->insn)
20315 + return -1;
20316 + return 0;
20317 +}
20318 +
20319 +static void swap_ex(void *a, void *b, int size)
20320 +{
20321 + struct exception_table_entry t, *x = a, *y = b;
20322 +
20323 + t = *x;
20324 +
20325 + pax_open_kernel();
20326 + *x = *y;
20327 + *y = t;
20328 + pax_close_kernel();
20329 +}
20330 +
20331 +void sort_extable(struct exception_table_entry *start,
20332 + struct exception_table_entry *finish)
20333 +{
20334 + sort(start, finish - start, sizeof(struct exception_table_entry),
20335 + cmp_ex, swap_ex);
20336 +}
20337 +
20338 +#ifdef CONFIG_MODULES
20339 +/*
20340 + * If the exception table is sorted, any referring to the module init
20341 + * will be at the beginning or the end.
20342 + */
20343 +void trim_init_extable(struct module *m)
20344 +{
20345 + /*trim the beginning*/
20346 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
20347 + m->extable++;
20348 + m->num_exentries--;
20349 + }
20350 + /*trim the end*/
20351 + while (m->num_exentries &&
20352 + within_module_init(m->extable[m->num_exentries-1].insn, m))
20353 + m->num_exentries--;
20354 +}
20355 +#endif /* CONFIG_MODULES */
20356
20357 int fixup_exception(struct pt_regs *regs)
20358 {
20359 const struct exception_table_entry *fixup;
20360
20361 #ifdef CONFIG_PNPBIOS
20362 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
20363 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
20364 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
20365 extern u32 pnp_bios_is_utter_crap;
20366 pnp_bios_is_utter_crap = 1;
20367 diff -urNp linux-2.6.32.42/arch/x86/mm/fault.c linux-2.6.32.42/arch/x86/mm/fault.c
20368 --- linux-2.6.32.42/arch/x86/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
20369 +++ linux-2.6.32.42/arch/x86/mm/fault.c 2011-06-06 17:35:16.000000000 -0400
20370 @@ -11,10 +11,19 @@
20371 #include <linux/kprobes.h> /* __kprobes, ... */
20372 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
20373 #include <linux/perf_event.h> /* perf_sw_event */
20374 +#include <linux/unistd.h>
20375 +#include <linux/compiler.h>
20376
20377 #include <asm/traps.h> /* dotraplinkage, ... */
20378 #include <asm/pgalloc.h> /* pgd_*(), ... */
20379 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
20380 +#include <asm/vsyscall.h>
20381 +#include <asm/tlbflush.h>
20382 +
20383 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20384 +#include <asm/stacktrace.h>
20385 +#include "../kernel/dumpstack.h"
20386 +#endif
20387
20388 /*
20389 * Page fault error code bits:
20390 @@ -51,7 +60,7 @@ static inline int notify_page_fault(stru
20391 int ret = 0;
20392
20393 /* kprobe_running() needs smp_processor_id() */
20394 - if (kprobes_built_in() && !user_mode_vm(regs)) {
20395 + if (kprobes_built_in() && !user_mode(regs)) {
20396 preempt_disable();
20397 if (kprobe_running() && kprobe_fault_handler(regs, 14))
20398 ret = 1;
20399 @@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *re
20400 return !instr_lo || (instr_lo>>1) == 1;
20401 case 0x00:
20402 /* Prefetch instruction is 0x0F0D or 0x0F18 */
20403 - if (probe_kernel_address(instr, opcode))
20404 + if (user_mode(regs)) {
20405 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20406 + return 0;
20407 + } else if (probe_kernel_address(instr, opcode))
20408 return 0;
20409
20410 *prefetch = (instr_lo == 0xF) &&
20411 @@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsign
20412 while (instr < max_instr) {
20413 unsigned char opcode;
20414
20415 - if (probe_kernel_address(instr, opcode))
20416 + if (user_mode(regs)) {
20417 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20418 + break;
20419 + } else if (probe_kernel_address(instr, opcode))
20420 break;
20421
20422 instr++;
20423 @@ -172,6 +187,30 @@ force_sig_info_fault(int si_signo, int s
20424 force_sig_info(si_signo, &info, tsk);
20425 }
20426
20427 +#ifdef CONFIG_PAX_EMUTRAMP
20428 +static int pax_handle_fetch_fault(struct pt_regs *regs);
20429 +#endif
20430 +
20431 +#ifdef CONFIG_PAX_PAGEEXEC
20432 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
20433 +{
20434 + pgd_t *pgd;
20435 + pud_t *pud;
20436 + pmd_t *pmd;
20437 +
20438 + pgd = pgd_offset(mm, address);
20439 + if (!pgd_present(*pgd))
20440 + return NULL;
20441 + pud = pud_offset(pgd, address);
20442 + if (!pud_present(*pud))
20443 + return NULL;
20444 + pmd = pmd_offset(pud, address);
20445 + if (!pmd_present(*pmd))
20446 + return NULL;
20447 + return pmd;
20448 +}
20449 +#endif
20450 +
20451 DEFINE_SPINLOCK(pgd_lock);
20452 LIST_HEAD(pgd_list);
20453
20454 @@ -224,11 +263,24 @@ void vmalloc_sync_all(void)
20455 address += PMD_SIZE) {
20456
20457 unsigned long flags;
20458 +
20459 +#ifdef CONFIG_PAX_PER_CPU_PGD
20460 + unsigned long cpu;
20461 +#else
20462 struct page *page;
20463 +#endif
20464
20465 spin_lock_irqsave(&pgd_lock, flags);
20466 +
20467 +#ifdef CONFIG_PAX_PER_CPU_PGD
20468 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20469 + pgd_t *pgd = get_cpu_pgd(cpu);
20470 +#else
20471 list_for_each_entry(page, &pgd_list, lru) {
20472 - if (!vmalloc_sync_one(page_address(page), address))
20473 + pgd_t *pgd = page_address(page);
20474 +#endif
20475 +
20476 + if (!vmalloc_sync_one(pgd, address))
20477 break;
20478 }
20479 spin_unlock_irqrestore(&pgd_lock, flags);
20480 @@ -258,6 +310,11 @@ static noinline int vmalloc_fault(unsign
20481 * an interrupt in the middle of a task switch..
20482 */
20483 pgd_paddr = read_cr3();
20484 +
20485 +#ifdef CONFIG_PAX_PER_CPU_PGD
20486 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
20487 +#endif
20488 +
20489 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
20490 if (!pmd_k)
20491 return -1;
20492 @@ -332,15 +389,27 @@ void vmalloc_sync_all(void)
20493
20494 const pgd_t *pgd_ref = pgd_offset_k(address);
20495 unsigned long flags;
20496 +
20497 +#ifdef CONFIG_PAX_PER_CPU_PGD
20498 + unsigned long cpu;
20499 +#else
20500 struct page *page;
20501 +#endif
20502
20503 if (pgd_none(*pgd_ref))
20504 continue;
20505
20506 spin_lock_irqsave(&pgd_lock, flags);
20507 +
20508 +#ifdef CONFIG_PAX_PER_CPU_PGD
20509 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20510 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
20511 +#else
20512 list_for_each_entry(page, &pgd_list, lru) {
20513 pgd_t *pgd;
20514 pgd = (pgd_t *)page_address(page) + pgd_index(address);
20515 +#endif
20516 +
20517 if (pgd_none(*pgd))
20518 set_pgd(pgd, *pgd_ref);
20519 else
20520 @@ -373,7 +442,14 @@ static noinline int vmalloc_fault(unsign
20521 * happen within a race in page table update. In the later
20522 * case just flush:
20523 */
20524 +
20525 +#ifdef CONFIG_PAX_PER_CPU_PGD
20526 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
20527 + pgd = pgd_offset_cpu(smp_processor_id(), address);
20528 +#else
20529 pgd = pgd_offset(current->active_mm, address);
20530 +#endif
20531 +
20532 pgd_ref = pgd_offset_k(address);
20533 if (pgd_none(*pgd_ref))
20534 return -1;
20535 @@ -535,7 +611,7 @@ static int is_errata93(struct pt_regs *r
20536 static int is_errata100(struct pt_regs *regs, unsigned long address)
20537 {
20538 #ifdef CONFIG_X86_64
20539 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
20540 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
20541 return 1;
20542 #endif
20543 return 0;
20544 @@ -562,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *r
20545 }
20546
20547 static const char nx_warning[] = KERN_CRIT
20548 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
20549 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
20550
20551 static void
20552 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
20553 @@ -571,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, un
20554 if (!oops_may_print())
20555 return;
20556
20557 - if (error_code & PF_INSTR) {
20558 + if (nx_enabled && (error_code & PF_INSTR)) {
20559 unsigned int level;
20560
20561 pte_t *pte = lookup_address(address, &level);
20562
20563 if (pte && pte_present(*pte) && !pte_exec(*pte))
20564 - printk(nx_warning, current_uid());
20565 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
20566 }
20567
20568 +#ifdef CONFIG_PAX_KERNEXEC
20569 + if (init_mm.start_code <= address && address < init_mm.end_code) {
20570 + if (current->signal->curr_ip)
20571 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20572 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
20573 + else
20574 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20575 + current->comm, task_pid_nr(current), current_uid(), current_euid());
20576 + }
20577 +#endif
20578 +
20579 printk(KERN_ALERT "BUG: unable to handle kernel ");
20580 if (address < PAGE_SIZE)
20581 printk(KERN_CONT "NULL pointer dereference");
20582 @@ -704,6 +791,68 @@ __bad_area_nosemaphore(struct pt_regs *r
20583 unsigned long address, int si_code)
20584 {
20585 struct task_struct *tsk = current;
20586 + struct mm_struct *mm = tsk->mm;
20587 +
20588 +#ifdef CONFIG_X86_64
20589 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
20590 + if (regs->ip == (unsigned long)vgettimeofday) {
20591 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
20592 + return;
20593 + } else if (regs->ip == (unsigned long)vtime) {
20594 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
20595 + return;
20596 + } else if (regs->ip == (unsigned long)vgetcpu) {
20597 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
20598 + return;
20599 + }
20600 + }
20601 +#endif
20602 +
20603 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20604 + if (mm && (error_code & PF_USER)) {
20605 + unsigned long ip = regs->ip;
20606 +
20607 + if (v8086_mode(regs))
20608 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
20609 +
20610 + /*
20611 + * It's possible to have interrupts off here:
20612 + */
20613 + local_irq_enable();
20614 +
20615 +#ifdef CONFIG_PAX_PAGEEXEC
20616 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
20617 + ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
20618 +
20619 +#ifdef CONFIG_PAX_EMUTRAMP
20620 + switch (pax_handle_fetch_fault(regs)) {
20621 + case 2:
20622 + return;
20623 + }
20624 +#endif
20625 +
20626 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20627 + do_group_exit(SIGKILL);
20628 + }
20629 +#endif
20630 +
20631 +#ifdef CONFIG_PAX_SEGMEXEC
20632 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
20633 +
20634 +#ifdef CONFIG_PAX_EMUTRAMP
20635 + switch (pax_handle_fetch_fault(regs)) {
20636 + case 2:
20637 + return;
20638 + }
20639 +#endif
20640 +
20641 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20642 + do_group_exit(SIGKILL);
20643 + }
20644 +#endif
20645 +
20646 + }
20647 +#endif
20648
20649 /* User mode accesses just cause a SIGSEGV */
20650 if (error_code & PF_USER) {
20651 @@ -857,6 +1006,99 @@ static int spurious_fault_check(unsigned
20652 return 1;
20653 }
20654
20655 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20656 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
20657 +{
20658 + pte_t *pte;
20659 + pmd_t *pmd;
20660 + spinlock_t *ptl;
20661 + unsigned char pte_mask;
20662 +
20663 + if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
20664 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
20665 + return 0;
20666 +
20667 + /* PaX: it's our fault, let's handle it if we can */
20668 +
20669 + /* PaX: take a look at read faults before acquiring any locks */
20670 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
20671 + /* instruction fetch attempt from a protected page in user mode */
20672 + up_read(&mm->mmap_sem);
20673 +
20674 +#ifdef CONFIG_PAX_EMUTRAMP
20675 + switch (pax_handle_fetch_fault(regs)) {
20676 + case 2:
20677 + return 1;
20678 + }
20679 +#endif
20680 +
20681 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20682 + do_group_exit(SIGKILL);
20683 + }
20684 +
20685 + pmd = pax_get_pmd(mm, address);
20686 + if (unlikely(!pmd))
20687 + return 0;
20688 +
20689 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
20690 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
20691 + pte_unmap_unlock(pte, ptl);
20692 + return 0;
20693 + }
20694 +
20695 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
20696 + /* write attempt to a protected page in user mode */
20697 + pte_unmap_unlock(pte, ptl);
20698 + return 0;
20699 + }
20700 +
20701 +#ifdef CONFIG_SMP
20702 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
20703 +#else
20704 + if (likely(address > get_limit(regs->cs)))
20705 +#endif
20706 + {
20707 + set_pte(pte, pte_mkread(*pte));
20708 + __flush_tlb_one(address);
20709 + pte_unmap_unlock(pte, ptl);
20710 + up_read(&mm->mmap_sem);
20711 + return 1;
20712 + }
20713 +
20714 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
20715 +
20716 + /*
20717 + * PaX: fill DTLB with user rights and retry
20718 + */
20719 + __asm__ __volatile__ (
20720 + "orb %2,(%1)\n"
20721 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
20722 +/*
20723 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
20724 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
20725 + * page fault when examined during a TLB load attempt. this is true not only
20726 + * for PTEs holding a non-present entry but also present entries that will
20727 + * raise a page fault (such as those set up by PaX, or the copy-on-write
20728 + * mechanism). in effect it means that we do *not* need to flush the TLBs
20729 + * for our target pages since their PTEs are simply not in the TLBs at all.
20730 +
20731 + * the best thing in omitting it is that we gain around 15-20% speed in the
20732 + * fast path of the page fault handler and can get rid of tracing since we
20733 + * can no longer flush unintended entries.
20734 + */
20735 + "invlpg (%0)\n"
20736 +#endif
20737 + __copyuser_seg"testb $0,(%0)\n"
20738 + "xorb %3,(%1)\n"
20739 + :
20740 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
20741 + : "memory", "cc");
20742 + pte_unmap_unlock(pte, ptl);
20743 + up_read(&mm->mmap_sem);
20744 + return 1;
20745 +}
20746 +#endif
20747 +
20748 /*
20749 * Handle a spurious fault caused by a stale TLB entry.
20750 *
20751 @@ -923,6 +1165,9 @@ int show_unhandled_signals = 1;
20752 static inline int
20753 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
20754 {
20755 + if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
20756 + return 1;
20757 +
20758 if (write) {
20759 /* write, present and write, not present: */
20760 if (unlikely(!(vma->vm_flags & VM_WRITE)))
20761 @@ -956,17 +1201,31 @@ do_page_fault(struct pt_regs *regs, unsi
20762 {
20763 struct vm_area_struct *vma;
20764 struct task_struct *tsk;
20765 - unsigned long address;
20766 struct mm_struct *mm;
20767 int write;
20768 int fault;
20769
20770 + /* Get the faulting address: */
20771 + unsigned long address = read_cr2();
20772 +
20773 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20774 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
20775 + if (!search_exception_tables(regs->ip)) {
20776 + bad_area_nosemaphore(regs, error_code, address);
20777 + return;
20778 + }
20779 + if (address < PAX_USER_SHADOW_BASE) {
20780 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
20781 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
20782 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
20783 + } else
20784 + address -= PAX_USER_SHADOW_BASE;
20785 + }
20786 +#endif
20787 +
20788 tsk = current;
20789 mm = tsk->mm;
20790
20791 - /* Get the faulting address: */
20792 - address = read_cr2();
20793 -
20794 /*
20795 * Detect and handle instructions that would cause a page fault for
20796 * both a tracked kernel page and a userspace page.
20797 @@ -1026,7 +1285,7 @@ do_page_fault(struct pt_regs *regs, unsi
20798 * User-mode registers count as a user access even for any
20799 * potential system fault or CPU buglet:
20800 */
20801 - if (user_mode_vm(regs)) {
20802 + if (user_mode(regs)) {
20803 local_irq_enable();
20804 error_code |= PF_USER;
20805 } else {
20806 @@ -1080,6 +1339,11 @@ do_page_fault(struct pt_regs *regs, unsi
20807 might_sleep();
20808 }
20809
20810 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20811 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
20812 + return;
20813 +#endif
20814 +
20815 vma = find_vma(mm, address);
20816 if (unlikely(!vma)) {
20817 bad_area(regs, error_code, address);
20818 @@ -1091,18 +1355,24 @@ do_page_fault(struct pt_regs *regs, unsi
20819 bad_area(regs, error_code, address);
20820 return;
20821 }
20822 - if (error_code & PF_USER) {
20823 - /*
20824 - * Accessing the stack below %sp is always a bug.
20825 - * The large cushion allows instructions like enter
20826 - * and pusha to work. ("enter $65535, $31" pushes
20827 - * 32 pointers and then decrements %sp by 65535.)
20828 - */
20829 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
20830 - bad_area(regs, error_code, address);
20831 - return;
20832 - }
20833 + /*
20834 + * Accessing the stack below %sp is always a bug.
20835 + * The large cushion allows instructions like enter
20836 + * and pusha to work. ("enter $65535, $31" pushes
20837 + * 32 pointers and then decrements %sp by 65535.)
20838 + */
20839 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
20840 + bad_area(regs, error_code, address);
20841 + return;
20842 + }
20843 +
20844 +#ifdef CONFIG_PAX_SEGMEXEC
20845 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
20846 + bad_area(regs, error_code, address);
20847 + return;
20848 }
20849 +#endif
20850 +
20851 if (unlikely(expand_stack(vma, address))) {
20852 bad_area(regs, error_code, address);
20853 return;
20854 @@ -1146,3 +1416,199 @@ good_area:
20855
20856 up_read(&mm->mmap_sem);
20857 }
20858 +
20859 +#ifdef CONFIG_PAX_EMUTRAMP
20860 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
20861 +{
20862 + int err;
20863 +
20864 + do { /* PaX: gcc trampoline emulation #1 */
20865 + unsigned char mov1, mov2;
20866 + unsigned short jmp;
20867 + unsigned int addr1, addr2;
20868 +
20869 +#ifdef CONFIG_X86_64
20870 + if ((regs->ip + 11) >> 32)
20871 + break;
20872 +#endif
20873 +
20874 + err = get_user(mov1, (unsigned char __user *)regs->ip);
20875 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20876 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
20877 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20878 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
20879 +
20880 + if (err)
20881 + break;
20882 +
20883 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
20884 + regs->cx = addr1;
20885 + regs->ax = addr2;
20886 + regs->ip = addr2;
20887 + return 2;
20888 + }
20889 + } while (0);
20890 +
20891 + do { /* PaX: gcc trampoline emulation #2 */
20892 + unsigned char mov, jmp;
20893 + unsigned int addr1, addr2;
20894 +
20895 +#ifdef CONFIG_X86_64
20896 + if ((regs->ip + 9) >> 32)
20897 + break;
20898 +#endif
20899 +
20900 + err = get_user(mov, (unsigned char __user *)regs->ip);
20901 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20902 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
20903 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20904 +
20905 + if (err)
20906 + break;
20907 +
20908 + if (mov == 0xB9 && jmp == 0xE9) {
20909 + regs->cx = addr1;
20910 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
20911 + return 2;
20912 + }
20913 + } while (0);
20914 +
20915 + return 1; /* PaX in action */
20916 +}
20917 +
20918 +#ifdef CONFIG_X86_64
20919 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
20920 +{
20921 + int err;
20922 +
20923 + do { /* PaX: gcc trampoline emulation #1 */
20924 + unsigned short mov1, mov2, jmp1;
20925 + unsigned char jmp2;
20926 + unsigned int addr1;
20927 + unsigned long addr2;
20928 +
20929 + err = get_user(mov1, (unsigned short __user *)regs->ip);
20930 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
20931 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
20932 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
20933 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
20934 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
20935 +
20936 + if (err)
20937 + break;
20938 +
20939 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20940 + regs->r11 = addr1;
20941 + regs->r10 = addr2;
20942 + regs->ip = addr1;
20943 + return 2;
20944 + }
20945 + } while (0);
20946 +
20947 + do { /* PaX: gcc trampoline emulation #2 */
20948 + unsigned short mov1, mov2, jmp1;
20949 + unsigned char jmp2;
20950 + unsigned long addr1, addr2;
20951 +
20952 + err = get_user(mov1, (unsigned short __user *)regs->ip);
20953 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
20954 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
20955 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
20956 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
20957 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
20958 +
20959 + if (err)
20960 + break;
20961 +
20962 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20963 + regs->r11 = addr1;
20964 + regs->r10 = addr2;
20965 + regs->ip = addr1;
20966 + return 2;
20967 + }
20968 + } while (0);
20969 +
20970 + return 1; /* PaX in action */
20971 +}
20972 +#endif
20973 +
20974 +/*
20975 + * PaX: decide what to do with offenders (regs->ip = fault address)
20976 + *
20977 + * returns 1 when task should be killed
20978 + * 2 when gcc trampoline was detected
20979 + */
20980 +static int pax_handle_fetch_fault(struct pt_regs *regs)
20981 +{
20982 + if (v8086_mode(regs))
20983 + return 1;
20984 +
20985 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
20986 + return 1;
20987 +
20988 +#ifdef CONFIG_X86_32
20989 + return pax_handle_fetch_fault_32(regs);
20990 +#else
20991 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
20992 + return pax_handle_fetch_fault_32(regs);
20993 + else
20994 + return pax_handle_fetch_fault_64(regs);
20995 +#endif
20996 +}
20997 +#endif
20998 +
20999 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21000 +void pax_report_insns(void *pc, void *sp)
21001 +{
21002 + long i;
21003 +
21004 + printk(KERN_ERR "PAX: bytes at PC: ");
21005 + for (i = 0; i < 20; i++) {
21006 + unsigned char c;
21007 + if (get_user(c, (__force unsigned char __user *)pc+i))
21008 + printk(KERN_CONT "?? ");
21009 + else
21010 + printk(KERN_CONT "%02x ", c);
21011 + }
21012 + printk("\n");
21013 +
21014 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
21015 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
21016 + unsigned long c;
21017 + if (get_user(c, (__force unsigned long __user *)sp+i))
21018 +#ifdef CONFIG_X86_32
21019 + printk(KERN_CONT "???????? ");
21020 +#else
21021 + printk(KERN_CONT "???????????????? ");
21022 +#endif
21023 + else
21024 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
21025 + }
21026 + printk("\n");
21027 +}
21028 +#endif
21029 +
21030 +/**
21031 + * probe_kernel_write(): safely attempt to write to a location
21032 + * @dst: address to write to
21033 + * @src: pointer to the data that shall be written
21034 + * @size: size of the data chunk
21035 + *
21036 + * Safely write to address @dst from the buffer at @src. If a kernel fault
21037 + * happens, handle that and return -EFAULT.
21038 + */
21039 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
21040 +{
21041 + long ret;
21042 + mm_segment_t old_fs = get_fs();
21043 +
21044 + set_fs(KERNEL_DS);
21045 + pagefault_disable();
21046 + pax_open_kernel();
21047 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
21048 + pax_close_kernel();
21049 + pagefault_enable();
21050 + set_fs(old_fs);
21051 +
21052 + return ret ? -EFAULT : 0;
21053 +}
21054 diff -urNp linux-2.6.32.42/arch/x86/mm/gup.c linux-2.6.32.42/arch/x86/mm/gup.c
21055 --- linux-2.6.32.42/arch/x86/mm/gup.c 2011-03-27 14:31:47.000000000 -0400
21056 +++ linux-2.6.32.42/arch/x86/mm/gup.c 2011-04-17 15:56:46.000000000 -0400
21057 @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
21058 addr = start;
21059 len = (unsigned long) nr_pages << PAGE_SHIFT;
21060 end = start + len;
21061 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21062 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21063 (void __user *)start, len)))
21064 return 0;
21065
21066 diff -urNp linux-2.6.32.42/arch/x86/mm/highmem_32.c linux-2.6.32.42/arch/x86/mm/highmem_32.c
21067 --- linux-2.6.32.42/arch/x86/mm/highmem_32.c 2011-03-27 14:31:47.000000000 -0400
21068 +++ linux-2.6.32.42/arch/x86/mm/highmem_32.c 2011-04-17 15:56:46.000000000 -0400
21069 @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
21070 idx = type + KM_TYPE_NR*smp_processor_id();
21071 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21072 BUG_ON(!pte_none(*(kmap_pte-idx)));
21073 +
21074 + pax_open_kernel();
21075 set_pte(kmap_pte-idx, mk_pte(page, prot));
21076 + pax_close_kernel();
21077
21078 return (void *)vaddr;
21079 }
21080 diff -urNp linux-2.6.32.42/arch/x86/mm/hugetlbpage.c linux-2.6.32.42/arch/x86/mm/hugetlbpage.c
21081 --- linux-2.6.32.42/arch/x86/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
21082 +++ linux-2.6.32.42/arch/x86/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
21083 @@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmappe
21084 struct hstate *h = hstate_file(file);
21085 struct mm_struct *mm = current->mm;
21086 struct vm_area_struct *vma;
21087 - unsigned long start_addr;
21088 + unsigned long start_addr, pax_task_size = TASK_SIZE;
21089 +
21090 +#ifdef CONFIG_PAX_SEGMEXEC
21091 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21092 + pax_task_size = SEGMEXEC_TASK_SIZE;
21093 +#endif
21094 +
21095 + pax_task_size -= PAGE_SIZE;
21096
21097 if (len > mm->cached_hole_size) {
21098 - start_addr = mm->free_area_cache;
21099 + start_addr = mm->free_area_cache;
21100 } else {
21101 - start_addr = TASK_UNMAPPED_BASE;
21102 - mm->cached_hole_size = 0;
21103 + start_addr = mm->mmap_base;
21104 + mm->cached_hole_size = 0;
21105 }
21106
21107 full_search:
21108 @@ -281,26 +288,27 @@ full_search:
21109
21110 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
21111 /* At this point: (!vma || addr < vma->vm_end). */
21112 - if (TASK_SIZE - len < addr) {
21113 + if (pax_task_size - len < addr) {
21114 /*
21115 * Start a new search - just in case we missed
21116 * some holes.
21117 */
21118 - if (start_addr != TASK_UNMAPPED_BASE) {
21119 - start_addr = TASK_UNMAPPED_BASE;
21120 + if (start_addr != mm->mmap_base) {
21121 + start_addr = mm->mmap_base;
21122 mm->cached_hole_size = 0;
21123 goto full_search;
21124 }
21125 return -ENOMEM;
21126 }
21127 - if (!vma || addr + len <= vma->vm_start) {
21128 - mm->free_area_cache = addr + len;
21129 - return addr;
21130 - }
21131 + if (check_heap_stack_gap(vma, addr, len))
21132 + break;
21133 if (addr + mm->cached_hole_size < vma->vm_start)
21134 mm->cached_hole_size = vma->vm_start - addr;
21135 addr = ALIGN(vma->vm_end, huge_page_size(h));
21136 }
21137 +
21138 + mm->free_area_cache = addr + len;
21139 + return addr;
21140 }
21141
21142 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
21143 @@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmappe
21144 {
21145 struct hstate *h = hstate_file(file);
21146 struct mm_struct *mm = current->mm;
21147 - struct vm_area_struct *vma, *prev_vma;
21148 - unsigned long base = mm->mmap_base, addr = addr0;
21149 + struct vm_area_struct *vma;
21150 + unsigned long base = mm->mmap_base, addr;
21151 unsigned long largest_hole = mm->cached_hole_size;
21152 - int first_time = 1;
21153
21154 /* don't allow allocations above current base */
21155 if (mm->free_area_cache > base)
21156 @@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmappe
21157 largest_hole = 0;
21158 mm->free_area_cache = base;
21159 }
21160 -try_again:
21161 +
21162 /* make sure it can fit in the remaining address space */
21163 if (mm->free_area_cache < len)
21164 goto fail;
21165
21166 /* either no address requested or cant fit in requested address hole */
21167 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
21168 + addr = (mm->free_area_cache - len);
21169 do {
21170 + addr &= huge_page_mask(h);
21171 + vma = find_vma(mm, addr);
21172 /*
21173 * Lookup failure means no vma is above this address,
21174 * i.e. return with success:
21175 - */
21176 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
21177 - return addr;
21178 -
21179 - /*
21180 * new region fits between prev_vma->vm_end and
21181 * vma->vm_start, use it:
21182 */
21183 - if (addr + len <= vma->vm_start &&
21184 - (!prev_vma || (addr >= prev_vma->vm_end))) {
21185 + if (check_heap_stack_gap(vma, addr, len)) {
21186 /* remember the address as a hint for next time */
21187 - mm->cached_hole_size = largest_hole;
21188 - return (mm->free_area_cache = addr);
21189 - } else {
21190 - /* pull free_area_cache down to the first hole */
21191 - if (mm->free_area_cache == vma->vm_end) {
21192 - mm->free_area_cache = vma->vm_start;
21193 - mm->cached_hole_size = largest_hole;
21194 - }
21195 + mm->cached_hole_size = largest_hole;
21196 + return (mm->free_area_cache = addr);
21197 + }
21198 + /* pull free_area_cache down to the first hole */
21199 + if (mm->free_area_cache == vma->vm_end) {
21200 + mm->free_area_cache = vma->vm_start;
21201 + mm->cached_hole_size = largest_hole;
21202 }
21203
21204 /* remember the largest hole we saw so far */
21205 if (addr + largest_hole < vma->vm_start)
21206 - largest_hole = vma->vm_start - addr;
21207 + largest_hole = vma->vm_start - addr;
21208
21209 /* try just below the current vma->vm_start */
21210 - addr = (vma->vm_start - len) & huge_page_mask(h);
21211 - } while (len <= vma->vm_start);
21212 + addr = skip_heap_stack_gap(vma, len);
21213 + } while (!IS_ERR_VALUE(addr));
21214
21215 fail:
21216 /*
21217 - * if hint left us with no space for the requested
21218 - * mapping then try again:
21219 - */
21220 - if (first_time) {
21221 - mm->free_area_cache = base;
21222 - largest_hole = 0;
21223 - first_time = 0;
21224 - goto try_again;
21225 - }
21226 - /*
21227 * A failed mmap() very likely causes application failure,
21228 * so fall back to the bottom-up function here. This scenario
21229 * can happen with large stack limits and large mmap()
21230 * allocations.
21231 */
21232 - mm->free_area_cache = TASK_UNMAPPED_BASE;
21233 +
21234 +#ifdef CONFIG_PAX_SEGMEXEC
21235 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21236 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
21237 + else
21238 +#endif
21239 +
21240 + mm->mmap_base = TASK_UNMAPPED_BASE;
21241 +
21242 +#ifdef CONFIG_PAX_RANDMMAP
21243 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21244 + mm->mmap_base += mm->delta_mmap;
21245 +#endif
21246 +
21247 + mm->free_area_cache = mm->mmap_base;
21248 mm->cached_hole_size = ~0UL;
21249 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
21250 len, pgoff, flags);
21251 @@ -387,6 +393,7 @@ fail:
21252 /*
21253 * Restore the topdown base:
21254 */
21255 + mm->mmap_base = base;
21256 mm->free_area_cache = base;
21257 mm->cached_hole_size = ~0UL;
21258
21259 @@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *f
21260 struct hstate *h = hstate_file(file);
21261 struct mm_struct *mm = current->mm;
21262 struct vm_area_struct *vma;
21263 + unsigned long pax_task_size = TASK_SIZE;
21264
21265 if (len & ~huge_page_mask(h))
21266 return -EINVAL;
21267 - if (len > TASK_SIZE)
21268 +
21269 +#ifdef CONFIG_PAX_SEGMEXEC
21270 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21271 + pax_task_size = SEGMEXEC_TASK_SIZE;
21272 +#endif
21273 +
21274 + pax_task_size -= PAGE_SIZE;
21275 +
21276 + if (len > pax_task_size)
21277 return -ENOMEM;
21278
21279 if (flags & MAP_FIXED) {
21280 @@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
21281 if (addr) {
21282 addr = ALIGN(addr, huge_page_size(h));
21283 vma = find_vma(mm, addr);
21284 - if (TASK_SIZE - len >= addr &&
21285 - (!vma || addr + len <= vma->vm_start))
21286 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
21287 return addr;
21288 }
21289 if (mm->get_unmapped_area == arch_get_unmapped_area)
21290 diff -urNp linux-2.6.32.42/arch/x86/mm/init_32.c linux-2.6.32.42/arch/x86/mm/init_32.c
21291 --- linux-2.6.32.42/arch/x86/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
21292 +++ linux-2.6.32.42/arch/x86/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
21293 @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
21294 }
21295
21296 /*
21297 - * Creates a middle page table and puts a pointer to it in the
21298 - * given global directory entry. This only returns the gd entry
21299 - * in non-PAE compilation mode, since the middle layer is folded.
21300 - */
21301 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
21302 -{
21303 - pud_t *pud;
21304 - pmd_t *pmd_table;
21305 -
21306 -#ifdef CONFIG_X86_PAE
21307 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
21308 - if (after_bootmem)
21309 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
21310 - else
21311 - pmd_table = (pmd_t *)alloc_low_page();
21312 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
21313 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
21314 - pud = pud_offset(pgd, 0);
21315 - BUG_ON(pmd_table != pmd_offset(pud, 0));
21316 -
21317 - return pmd_table;
21318 - }
21319 -#endif
21320 - pud = pud_offset(pgd, 0);
21321 - pmd_table = pmd_offset(pud, 0);
21322 -
21323 - return pmd_table;
21324 -}
21325 -
21326 -/*
21327 * Create a page table and place a pointer to it in a middle page
21328 * directory entry:
21329 */
21330 @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
21331 page_table = (pte_t *)alloc_low_page();
21332
21333 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
21334 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21335 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
21336 +#else
21337 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
21338 +#endif
21339 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
21340 }
21341
21342 return pte_offset_kernel(pmd, 0);
21343 }
21344
21345 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
21346 +{
21347 + pud_t *pud;
21348 + pmd_t *pmd_table;
21349 +
21350 + pud = pud_offset(pgd, 0);
21351 + pmd_table = pmd_offset(pud, 0);
21352 +
21353 + return pmd_table;
21354 +}
21355 +
21356 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
21357 {
21358 int pgd_idx = pgd_index(vaddr);
21359 @@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
21360 int pgd_idx, pmd_idx;
21361 unsigned long vaddr;
21362 pgd_t *pgd;
21363 + pud_t *pud;
21364 pmd_t *pmd;
21365 pte_t *pte = NULL;
21366
21367 @@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
21368 pgd = pgd_base + pgd_idx;
21369
21370 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
21371 - pmd = one_md_table_init(pgd);
21372 - pmd = pmd + pmd_index(vaddr);
21373 + pud = pud_offset(pgd, vaddr);
21374 + pmd = pmd_offset(pud, vaddr);
21375 +
21376 +#ifdef CONFIG_X86_PAE
21377 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21378 +#endif
21379 +
21380 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
21381 pmd++, pmd_idx++) {
21382 pte = page_table_kmap_check(one_page_table_init(pmd),
21383 @@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
21384 }
21385 }
21386
21387 -static inline int is_kernel_text(unsigned long addr)
21388 +static inline int is_kernel_text(unsigned long start, unsigned long end)
21389 {
21390 - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
21391 - return 1;
21392 - return 0;
21393 + if ((start > ktla_ktva((unsigned long)_etext) ||
21394 + end <= ktla_ktva((unsigned long)_stext)) &&
21395 + (start > ktla_ktva((unsigned long)_einittext) ||
21396 + end <= ktla_ktva((unsigned long)_sinittext)) &&
21397 +
21398 +#ifdef CONFIG_ACPI_SLEEP
21399 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
21400 +#endif
21401 +
21402 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
21403 + return 0;
21404 + return 1;
21405 }
21406
21407 /*
21408 @@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned lo
21409 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
21410 unsigned long start_pfn, end_pfn;
21411 pgd_t *pgd_base = swapper_pg_dir;
21412 - int pgd_idx, pmd_idx, pte_ofs;
21413 + unsigned int pgd_idx, pmd_idx, pte_ofs;
21414 unsigned long pfn;
21415 pgd_t *pgd;
21416 + pud_t *pud;
21417 pmd_t *pmd;
21418 pte_t *pte;
21419 unsigned pages_2m, pages_4k;
21420 @@ -278,8 +279,13 @@ repeat:
21421 pfn = start_pfn;
21422 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21423 pgd = pgd_base + pgd_idx;
21424 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
21425 - pmd = one_md_table_init(pgd);
21426 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
21427 + pud = pud_offset(pgd, 0);
21428 + pmd = pmd_offset(pud, 0);
21429 +
21430 +#ifdef CONFIG_X86_PAE
21431 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21432 +#endif
21433
21434 if (pfn >= end_pfn)
21435 continue;
21436 @@ -291,14 +297,13 @@ repeat:
21437 #endif
21438 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
21439 pmd++, pmd_idx++) {
21440 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
21441 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
21442
21443 /*
21444 * Map with big pages if possible, otherwise
21445 * create normal page tables:
21446 */
21447 if (use_pse) {
21448 - unsigned int addr2;
21449 pgprot_t prot = PAGE_KERNEL_LARGE;
21450 /*
21451 * first pass will use the same initial
21452 @@ -308,11 +313,7 @@ repeat:
21453 __pgprot(PTE_IDENT_ATTR |
21454 _PAGE_PSE);
21455
21456 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
21457 - PAGE_OFFSET + PAGE_SIZE-1;
21458 -
21459 - if (is_kernel_text(addr) ||
21460 - is_kernel_text(addr2))
21461 + if (is_kernel_text(address, address + PMD_SIZE))
21462 prot = PAGE_KERNEL_LARGE_EXEC;
21463
21464 pages_2m++;
21465 @@ -329,7 +330,7 @@ repeat:
21466 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21467 pte += pte_ofs;
21468 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
21469 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
21470 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
21471 pgprot_t prot = PAGE_KERNEL;
21472 /*
21473 * first pass will use the same initial
21474 @@ -337,7 +338,7 @@ repeat:
21475 */
21476 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
21477
21478 - if (is_kernel_text(addr))
21479 + if (is_kernel_text(address, address + PAGE_SIZE))
21480 prot = PAGE_KERNEL_EXEC;
21481
21482 pages_4k++;
21483 @@ -489,7 +490,7 @@ void __init native_pagetable_setup_start
21484
21485 pud = pud_offset(pgd, va);
21486 pmd = pmd_offset(pud, va);
21487 - if (!pmd_present(*pmd))
21488 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
21489 break;
21490
21491 pte = pte_offset_kernel(pmd, va);
21492 @@ -541,9 +542,7 @@ void __init early_ioremap_page_table_ran
21493
21494 static void __init pagetable_init(void)
21495 {
21496 - pgd_t *pgd_base = swapper_pg_dir;
21497 -
21498 - permanent_kmaps_init(pgd_base);
21499 + permanent_kmaps_init(swapper_pg_dir);
21500 }
21501
21502 #ifdef CONFIG_ACPI_SLEEP
21503 @@ -551,12 +550,12 @@ static void __init pagetable_init(void)
21504 * ACPI suspend needs this for resume, because things like the intel-agp
21505 * driver might have split up a kernel 4MB mapping.
21506 */
21507 -char swsusp_pg_dir[PAGE_SIZE]
21508 +pgd_t swsusp_pg_dir[PTRS_PER_PGD]
21509 __attribute__ ((aligned(PAGE_SIZE)));
21510
21511 static inline void save_pg_dir(void)
21512 {
21513 - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
21514 + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
21515 }
21516 #else /* !CONFIG_ACPI_SLEEP */
21517 static inline void save_pg_dir(void)
21518 @@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
21519 flush_tlb_all();
21520 }
21521
21522 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21523 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21524 EXPORT_SYMBOL_GPL(__supported_pte_mask);
21525
21526 /* user-defined highmem size */
21527 @@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void
21528 * Initialize the boot-time allocator (with low memory only):
21529 */
21530 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
21531 - bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21532 + bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21533 PAGE_SIZE);
21534 if (bootmap == -1L)
21535 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
21536 @@ -864,6 +863,12 @@ void __init mem_init(void)
21537
21538 pci_iommu_alloc();
21539
21540 +#ifdef CONFIG_PAX_PER_CPU_PGD
21541 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21542 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21543 + KERNEL_PGD_PTRS);
21544 +#endif
21545 +
21546 #ifdef CONFIG_FLATMEM
21547 BUG_ON(!mem_map);
21548 #endif
21549 @@ -881,7 +886,7 @@ void __init mem_init(void)
21550 set_highmem_pages_init();
21551
21552 codesize = (unsigned long) &_etext - (unsigned long) &_text;
21553 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
21554 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
21555 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
21556
21557 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
21558 @@ -923,10 +928,10 @@ void __init mem_init(void)
21559 ((unsigned long)&__init_end -
21560 (unsigned long)&__init_begin) >> 10,
21561
21562 - (unsigned long)&_etext, (unsigned long)&_edata,
21563 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
21564 + (unsigned long)&_sdata, (unsigned long)&_edata,
21565 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
21566
21567 - (unsigned long)&_text, (unsigned long)&_etext,
21568 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
21569 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
21570
21571 /*
21572 @@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
21573 if (!kernel_set_to_readonly)
21574 return;
21575
21576 + start = ktla_ktva(start);
21577 pr_debug("Set kernel text: %lx - %lx for read write\n",
21578 start, start+size);
21579
21580 @@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
21581 if (!kernel_set_to_readonly)
21582 return;
21583
21584 + start = ktla_ktva(start);
21585 pr_debug("Set kernel text: %lx - %lx for read only\n",
21586 start, start+size);
21587
21588 @@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
21589 unsigned long start = PFN_ALIGN(_text);
21590 unsigned long size = PFN_ALIGN(_etext) - start;
21591
21592 + start = ktla_ktva(start);
21593 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
21594 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
21595 size >> 10);
21596 diff -urNp linux-2.6.32.42/arch/x86/mm/init_64.c linux-2.6.32.42/arch/x86/mm/init_64.c
21597 --- linux-2.6.32.42/arch/x86/mm/init_64.c 2011-04-17 17:00:52.000000000 -0400
21598 +++ linux-2.6.32.42/arch/x86/mm/init_64.c 2011-04-17 17:03:05.000000000 -0400
21599 @@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
21600 pmd = fill_pmd(pud, vaddr);
21601 pte = fill_pte(pmd, vaddr);
21602
21603 + pax_open_kernel();
21604 set_pte(pte, new_pte);
21605 + pax_close_kernel();
21606
21607 /*
21608 * It's enough to flush this one mapping.
21609 @@ -223,14 +225,12 @@ static void __init __init_extra_mapping(
21610 pgd = pgd_offset_k((unsigned long)__va(phys));
21611 if (pgd_none(*pgd)) {
21612 pud = (pud_t *) spp_getpage();
21613 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
21614 - _PAGE_USER));
21615 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
21616 }
21617 pud = pud_offset(pgd, (unsigned long)__va(phys));
21618 if (pud_none(*pud)) {
21619 pmd = (pmd_t *) spp_getpage();
21620 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
21621 - _PAGE_USER));
21622 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
21623 }
21624 pmd = pmd_offset(pud, phys);
21625 BUG_ON(!pmd_none(*pmd));
21626 @@ -675,6 +675,12 @@ void __init mem_init(void)
21627
21628 pci_iommu_alloc();
21629
21630 +#ifdef CONFIG_PAX_PER_CPU_PGD
21631 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21632 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21633 + KERNEL_PGD_PTRS);
21634 +#endif
21635 +
21636 /* clear_bss() already clear the empty_zero_page */
21637
21638 reservedpages = 0;
21639 @@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
21640 static struct vm_area_struct gate_vma = {
21641 .vm_start = VSYSCALL_START,
21642 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
21643 - .vm_page_prot = PAGE_READONLY_EXEC,
21644 - .vm_flags = VM_READ | VM_EXEC
21645 + .vm_page_prot = PAGE_READONLY,
21646 + .vm_flags = VM_READ
21647 };
21648
21649 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
21650 @@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long a
21651
21652 const char *arch_vma_name(struct vm_area_struct *vma)
21653 {
21654 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21655 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21656 return "[vdso]";
21657 if (vma == &gate_vma)
21658 return "[vsyscall]";
21659 diff -urNp linux-2.6.32.42/arch/x86/mm/init.c linux-2.6.32.42/arch/x86/mm/init.c
21660 --- linux-2.6.32.42/arch/x86/mm/init.c 2011-04-17 17:00:52.000000000 -0400
21661 +++ linux-2.6.32.42/arch/x86/mm/init.c 2011-06-07 19:06:09.000000000 -0400
21662 @@ -69,11 +69,7 @@ static void __init find_early_table_spac
21663 * cause a hotspot and fill up ZONE_DMA. The page tables
21664 * need roughly 0.5KB per GB.
21665 */
21666 -#ifdef CONFIG_X86_32
21667 - start = 0x7000;
21668 -#else
21669 - start = 0x8000;
21670 -#endif
21671 + start = 0x100000;
21672 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
21673 tables, PAGE_SIZE);
21674 if (e820_table_start == -1UL)
21675 @@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
21676 #endif
21677
21678 set_nx();
21679 - if (nx_enabled)
21680 + if (nx_enabled && cpu_has_nx)
21681 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
21682
21683 /* Enable PSE if available */
21684 @@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_m
21685 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
21686 * mmio resources as well as potential bios/acpi data regions.
21687 */
21688 +
21689 int devmem_is_allowed(unsigned long pagenr)
21690 {
21691 +#ifdef CONFIG_GRKERNSEC_KMEM
21692 + /* allow BDA */
21693 + if (!pagenr)
21694 + return 1;
21695 + /* allow EBDA */
21696 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
21697 + return 1;
21698 + /* allow ISA/video mem */
21699 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21700 + return 1;
21701 + /* throw out everything else below 1MB */
21702 + if (pagenr <= 256)
21703 + return 0;
21704 +#else
21705 if (pagenr <= 256)
21706 return 1;
21707 +#endif
21708 +
21709 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
21710 return 0;
21711 if (!page_is_ram(pagenr))
21712 @@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigne
21713
21714 void free_initmem(void)
21715 {
21716 +
21717 +#ifdef CONFIG_PAX_KERNEXEC
21718 +#ifdef CONFIG_X86_32
21719 + /* PaX: limit KERNEL_CS to actual size */
21720 + unsigned long addr, limit;
21721 + struct desc_struct d;
21722 + int cpu;
21723 +
21724 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
21725 + limit = (limit - 1UL) >> PAGE_SHIFT;
21726 +
21727 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
21728 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
21729 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
21730 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
21731 + }
21732 +
21733 + /* PaX: make KERNEL_CS read-only */
21734 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
21735 + if (!paravirt_enabled())
21736 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
21737 +/*
21738 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
21739 + pgd = pgd_offset_k(addr);
21740 + pud = pud_offset(pgd, addr);
21741 + pmd = pmd_offset(pud, addr);
21742 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21743 + }
21744 +*/
21745 +#ifdef CONFIG_X86_PAE
21746 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
21747 +/*
21748 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
21749 + pgd = pgd_offset_k(addr);
21750 + pud = pud_offset(pgd, addr);
21751 + pmd = pmd_offset(pud, addr);
21752 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21753 + }
21754 +*/
21755 +#endif
21756 +
21757 +#ifdef CONFIG_MODULES
21758 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
21759 +#endif
21760 +
21761 +#else
21762 + pgd_t *pgd;
21763 + pud_t *pud;
21764 + pmd_t *pmd;
21765 + unsigned long addr, end;
21766 +
21767 + /* PaX: make kernel code/rodata read-only, rest non-executable */
21768 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
21769 + pgd = pgd_offset_k(addr);
21770 + pud = pud_offset(pgd, addr);
21771 + pmd = pmd_offset(pud, addr);
21772 + if (!pmd_present(*pmd))
21773 + continue;
21774 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
21775 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21776 + else
21777 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21778 + }
21779 +
21780 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
21781 + end = addr + KERNEL_IMAGE_SIZE;
21782 + for (; addr < end; addr += PMD_SIZE) {
21783 + pgd = pgd_offset_k(addr);
21784 + pud = pud_offset(pgd, addr);
21785 + pmd = pmd_offset(pud, addr);
21786 + if (!pmd_present(*pmd))
21787 + continue;
21788 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
21789 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21790 + }
21791 +#endif
21792 +
21793 + flush_tlb_all();
21794 +#endif
21795 +
21796 free_init_pages("unused kernel memory",
21797 (unsigned long)(&__init_begin),
21798 (unsigned long)(&__init_end));
21799 diff -urNp linux-2.6.32.42/arch/x86/mm/iomap_32.c linux-2.6.32.42/arch/x86/mm/iomap_32.c
21800 --- linux-2.6.32.42/arch/x86/mm/iomap_32.c 2011-03-27 14:31:47.000000000 -0400
21801 +++ linux-2.6.32.42/arch/x86/mm/iomap_32.c 2011-04-17 15:56:46.000000000 -0400
21802 @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
21803 debug_kmap_atomic(type);
21804 idx = type + KM_TYPE_NR * smp_processor_id();
21805 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21806 +
21807 + pax_open_kernel();
21808 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
21809 + pax_close_kernel();
21810 +
21811 arch_flush_lazy_mmu_mode();
21812
21813 return (void *)vaddr;
21814 diff -urNp linux-2.6.32.42/arch/x86/mm/ioremap.c linux-2.6.32.42/arch/x86/mm/ioremap.c
21815 --- linux-2.6.32.42/arch/x86/mm/ioremap.c 2011-03-27 14:31:47.000000000 -0400
21816 +++ linux-2.6.32.42/arch/x86/mm/ioremap.c 2011-04-17 15:56:46.000000000 -0400
21817 @@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
21818 * Second special case: Some BIOSen report the PC BIOS
21819 * area (640->1Mb) as ram even though it is not.
21820 */
21821 - if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
21822 - pagenr < (BIOS_END >> PAGE_SHIFT))
21823 + if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
21824 + pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21825 return 0;
21826
21827 for (i = 0; i < e820.nr_map; i++) {
21828 @@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(re
21829 /*
21830 * Don't allow anybody to remap normal RAM that we're using..
21831 */
21832 - for (pfn = phys_addr >> PAGE_SHIFT;
21833 - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
21834 - pfn++) {
21835 -
21836 + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
21837 int is_ram = page_is_ram(pfn);
21838
21839 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
21840 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
21841 return NULL;
21842 WARN_ON_ONCE(is_ram);
21843 }
21844 @@ -407,7 +404,7 @@ static int __init early_ioremap_debug_se
21845 early_param("early_ioremap_debug", early_ioremap_debug_setup);
21846
21847 static __initdata int after_paging_init;
21848 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
21849 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
21850
21851 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
21852 {
21853 @@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
21854 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
21855
21856 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
21857 - memset(bm_pte, 0, sizeof(bm_pte));
21858 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
21859 + pmd_populate_user(&init_mm, pmd, bm_pte);
21860
21861 /*
21862 * The boot-ioremap range spans multiple pmds, for which
21863 diff -urNp linux-2.6.32.42/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.32.42/arch/x86/mm/kmemcheck/kmemcheck.c
21864 --- linux-2.6.32.42/arch/x86/mm/kmemcheck/kmemcheck.c 2011-03-27 14:31:47.000000000 -0400
21865 +++ linux-2.6.32.42/arch/x86/mm/kmemcheck/kmemcheck.c 2011-04-17 15:56:46.000000000 -0400
21866 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
21867 * memory (e.g. tracked pages)? For now, we need this to avoid
21868 * invoking kmemcheck for PnP BIOS calls.
21869 */
21870 - if (regs->flags & X86_VM_MASK)
21871 + if (v8086_mode(regs))
21872 return false;
21873 - if (regs->cs != __KERNEL_CS)
21874 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
21875 return false;
21876
21877 pte = kmemcheck_pte_lookup(address);
21878 diff -urNp linux-2.6.32.42/arch/x86/mm/mmap.c linux-2.6.32.42/arch/x86/mm/mmap.c
21879 --- linux-2.6.32.42/arch/x86/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
21880 +++ linux-2.6.32.42/arch/x86/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
21881 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
21882 * Leave an at least ~128 MB hole with possible stack randomization.
21883 */
21884 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
21885 -#define MAX_GAP (TASK_SIZE/6*5)
21886 +#define MAX_GAP (pax_task_size/6*5)
21887
21888 /*
21889 * True on X86_32 or when emulating IA32 on X86_64
21890 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
21891 return rnd << PAGE_SHIFT;
21892 }
21893
21894 -static unsigned long mmap_base(void)
21895 +static unsigned long mmap_base(struct mm_struct *mm)
21896 {
21897 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
21898 + unsigned long pax_task_size = TASK_SIZE;
21899 +
21900 +#ifdef CONFIG_PAX_SEGMEXEC
21901 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21902 + pax_task_size = SEGMEXEC_TASK_SIZE;
21903 +#endif
21904
21905 if (gap < MIN_GAP)
21906 gap = MIN_GAP;
21907 else if (gap > MAX_GAP)
21908 gap = MAX_GAP;
21909
21910 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
21911 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
21912 }
21913
21914 /*
21915 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
21916 * does, but not when emulating X86_32
21917 */
21918 -static unsigned long mmap_legacy_base(void)
21919 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
21920 {
21921 - if (mmap_is_ia32())
21922 + if (mmap_is_ia32()) {
21923 +
21924 +#ifdef CONFIG_PAX_SEGMEXEC
21925 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21926 + return SEGMEXEC_TASK_UNMAPPED_BASE;
21927 + else
21928 +#endif
21929 +
21930 return TASK_UNMAPPED_BASE;
21931 - else
21932 + } else
21933 return TASK_UNMAPPED_BASE + mmap_rnd();
21934 }
21935
21936 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
21937 void arch_pick_mmap_layout(struct mm_struct *mm)
21938 {
21939 if (mmap_is_legacy()) {
21940 - mm->mmap_base = mmap_legacy_base();
21941 + mm->mmap_base = mmap_legacy_base(mm);
21942 +
21943 +#ifdef CONFIG_PAX_RANDMMAP
21944 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21945 + mm->mmap_base += mm->delta_mmap;
21946 +#endif
21947 +
21948 mm->get_unmapped_area = arch_get_unmapped_area;
21949 mm->unmap_area = arch_unmap_area;
21950 } else {
21951 - mm->mmap_base = mmap_base();
21952 + mm->mmap_base = mmap_base(mm);
21953 +
21954 +#ifdef CONFIG_PAX_RANDMMAP
21955 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21956 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
21957 +#endif
21958 +
21959 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
21960 mm->unmap_area = arch_unmap_area_topdown;
21961 }
21962 diff -urNp linux-2.6.32.42/arch/x86/mm/mmio-mod.c linux-2.6.32.42/arch/x86/mm/mmio-mod.c
21963 --- linux-2.6.32.42/arch/x86/mm/mmio-mod.c 2011-03-27 14:31:47.000000000 -0400
21964 +++ linux-2.6.32.42/arch/x86/mm/mmio-mod.c 2011-05-04 17:56:28.000000000 -0400
21965 @@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p,
21966 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
21967 void __iomem *addr)
21968 {
21969 - static atomic_t next_id;
21970 + static atomic_unchecked_t next_id;
21971 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
21972 /* These are page-unaligned. */
21973 struct mmiotrace_map map = {
21974 @@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_
21975 .private = trace
21976 },
21977 .phys = offset,
21978 - .id = atomic_inc_return(&next_id)
21979 + .id = atomic_inc_return_unchecked(&next_id)
21980 };
21981 map.map_id = trace->id;
21982
21983 diff -urNp linux-2.6.32.42/arch/x86/mm/numa_32.c linux-2.6.32.42/arch/x86/mm/numa_32.c
21984 --- linux-2.6.32.42/arch/x86/mm/numa_32.c 2011-03-27 14:31:47.000000000 -0400
21985 +++ linux-2.6.32.42/arch/x86/mm/numa_32.c 2011-04-17 15:56:46.000000000 -0400
21986 @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
21987 }
21988 #endif
21989
21990 -extern unsigned long find_max_low_pfn(void);
21991 extern unsigned long highend_pfn, highstart_pfn;
21992
21993 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
21994 diff -urNp linux-2.6.32.42/arch/x86/mm/pageattr.c linux-2.6.32.42/arch/x86/mm/pageattr.c
21995 --- linux-2.6.32.42/arch/x86/mm/pageattr.c 2011-03-27 14:31:47.000000000 -0400
21996 +++ linux-2.6.32.42/arch/x86/mm/pageattr.c 2011-04-17 15:56:46.000000000 -0400
21997 @@ -261,16 +261,17 @@ static inline pgprot_t static_protection
21998 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
21999 */
22000 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
22001 - pgprot_val(forbidden) |= _PAGE_NX;
22002 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22003
22004 /*
22005 * The kernel text needs to be executable for obvious reasons
22006 * Does not cover __inittext since that is gone later on. On
22007 * 64bit we do not enforce !NX on the low mapping
22008 */
22009 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
22010 - pgprot_val(forbidden) |= _PAGE_NX;
22011 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
22012 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22013
22014 +#ifdef CONFIG_DEBUG_RODATA
22015 /*
22016 * The .rodata section needs to be read-only. Using the pfn
22017 * catches all aliases.
22018 @@ -278,6 +279,14 @@ static inline pgprot_t static_protection
22019 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
22020 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
22021 pgprot_val(forbidden) |= _PAGE_RW;
22022 +#endif
22023 +
22024 +#ifdef CONFIG_PAX_KERNEXEC
22025 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
22026 + pgprot_val(forbidden) |= _PAGE_RW;
22027 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22028 + }
22029 +#endif
22030
22031 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
22032
22033 @@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
22034 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
22035 {
22036 /* change init_mm */
22037 + pax_open_kernel();
22038 set_pte_atomic(kpte, pte);
22039 +
22040 #ifdef CONFIG_X86_32
22041 if (!SHARED_KERNEL_PMD) {
22042 +
22043 +#ifdef CONFIG_PAX_PER_CPU_PGD
22044 + unsigned long cpu;
22045 +#else
22046 struct page *page;
22047 +#endif
22048
22049 +#ifdef CONFIG_PAX_PER_CPU_PGD
22050 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22051 + pgd_t *pgd = get_cpu_pgd(cpu);
22052 +#else
22053 list_for_each_entry(page, &pgd_list, lru) {
22054 - pgd_t *pgd;
22055 + pgd_t *pgd = (pgd_t *)page_address(page);
22056 +#endif
22057 +
22058 pud_t *pud;
22059 pmd_t *pmd;
22060
22061 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
22062 + pgd += pgd_index(address);
22063 pud = pud_offset(pgd, address);
22064 pmd = pmd_offset(pud, address);
22065 set_pte_atomic((pte_t *)pmd, pte);
22066 }
22067 }
22068 #endif
22069 + pax_close_kernel();
22070 }
22071
22072 static int
22073 diff -urNp linux-2.6.32.42/arch/x86/mm/pageattr-test.c linux-2.6.32.42/arch/x86/mm/pageattr-test.c
22074 --- linux-2.6.32.42/arch/x86/mm/pageattr-test.c 2011-03-27 14:31:47.000000000 -0400
22075 +++ linux-2.6.32.42/arch/x86/mm/pageattr-test.c 2011-04-17 15:56:46.000000000 -0400
22076 @@ -36,7 +36,7 @@ enum {
22077
22078 static int pte_testbit(pte_t pte)
22079 {
22080 - return pte_flags(pte) & _PAGE_UNUSED1;
22081 + return pte_flags(pte) & _PAGE_CPA_TEST;
22082 }
22083
22084 struct split_state {
22085 diff -urNp linux-2.6.32.42/arch/x86/mm/pat.c linux-2.6.32.42/arch/x86/mm/pat.c
22086 --- linux-2.6.32.42/arch/x86/mm/pat.c 2011-03-27 14:31:47.000000000 -0400
22087 +++ linux-2.6.32.42/arch/x86/mm/pat.c 2011-04-17 15:56:46.000000000 -0400
22088 @@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct
22089
22090 conflict:
22091 printk(KERN_INFO "%s:%d conflicting memory types "
22092 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
22093 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
22094 new->end, cattr_name(new->type), cattr_name(entry->type));
22095 return -EBUSY;
22096 }
22097 @@ -559,7 +559,7 @@ unlock_ret:
22098
22099 if (err) {
22100 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
22101 - current->comm, current->pid, start, end);
22102 + current->comm, task_pid_nr(current), start, end);
22103 }
22104
22105 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
22106 @@ -689,8 +689,8 @@ static inline int range_is_allowed(unsig
22107 while (cursor < to) {
22108 if (!devmem_is_allowed(pfn)) {
22109 printk(KERN_INFO
22110 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
22111 - current->comm, from, to);
22112 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
22113 + current->comm, from, to, cursor);
22114 return 0;
22115 }
22116 cursor += PAGE_SIZE;
22117 @@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, un
22118 printk(KERN_INFO
22119 "%s:%d ioremap_change_attr failed %s "
22120 "for %Lx-%Lx\n",
22121 - current->comm, current->pid,
22122 + current->comm, task_pid_nr(current),
22123 cattr_name(flags),
22124 base, (unsigned long long)(base + size));
22125 return -EINVAL;
22126 @@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr,
22127 free_memtype(paddr, paddr + size);
22128 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
22129 " for %Lx-%Lx, got %s\n",
22130 - current->comm, current->pid,
22131 + current->comm, task_pid_nr(current),
22132 cattr_name(want_flags),
22133 (unsigned long long)paddr,
22134 (unsigned long long)(paddr + size),
22135 diff -urNp linux-2.6.32.42/arch/x86/mm/pgtable_32.c linux-2.6.32.42/arch/x86/mm/pgtable_32.c
22136 --- linux-2.6.32.42/arch/x86/mm/pgtable_32.c 2011-03-27 14:31:47.000000000 -0400
22137 +++ linux-2.6.32.42/arch/x86/mm/pgtable_32.c 2011-04-17 15:56:46.000000000 -0400
22138 @@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
22139 return;
22140 }
22141 pte = pte_offset_kernel(pmd, vaddr);
22142 +
22143 + pax_open_kernel();
22144 if (pte_val(pteval))
22145 set_pte_at(&init_mm, vaddr, pte, pteval);
22146 else
22147 pte_clear(&init_mm, vaddr, pte);
22148 + pax_close_kernel();
22149
22150 /*
22151 * It's enough to flush this one mapping.
22152 diff -urNp linux-2.6.32.42/arch/x86/mm/pgtable.c linux-2.6.32.42/arch/x86/mm/pgtable.c
22153 --- linux-2.6.32.42/arch/x86/mm/pgtable.c 2011-03-27 14:31:47.000000000 -0400
22154 +++ linux-2.6.32.42/arch/x86/mm/pgtable.c 2011-05-11 18:25:15.000000000 -0400
22155 @@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *p
22156 list_del(&page->lru);
22157 }
22158
22159 -#define UNSHARED_PTRS_PER_PGD \
22160 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22161 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22162 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
22163
22164 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22165 +{
22166 + while (count--)
22167 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
22168 +}
22169 +#endif
22170 +
22171 +#ifdef CONFIG_PAX_PER_CPU_PGD
22172 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22173 +{
22174 + while (count--)
22175 +
22176 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22177 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
22178 +#else
22179 + *dst++ = *src++;
22180 +#endif
22181 +
22182 +}
22183 +#endif
22184 +
22185 +#ifdef CONFIG_X86_64
22186 +#define pxd_t pud_t
22187 +#define pyd_t pgd_t
22188 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
22189 +#define pxd_free(mm, pud) pud_free((mm), (pud))
22190 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
22191 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
22192 +#define PYD_SIZE PGDIR_SIZE
22193 +#else
22194 +#define pxd_t pmd_t
22195 +#define pyd_t pud_t
22196 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
22197 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
22198 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
22199 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
22200 +#define PYD_SIZE PUD_SIZE
22201 +#endif
22202 +
22203 +#ifdef CONFIG_PAX_PER_CPU_PGD
22204 +static inline void pgd_ctor(pgd_t *pgd) {}
22205 +static inline void pgd_dtor(pgd_t *pgd) {}
22206 +#else
22207 static void pgd_ctor(pgd_t *pgd)
22208 {
22209 /* If the pgd points to a shared pagetable level (either the
22210 @@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
22211 pgd_list_del(pgd);
22212 spin_unlock_irqrestore(&pgd_lock, flags);
22213 }
22214 +#endif
22215
22216 /*
22217 * List of all pgd's needed for non-PAE so it can invalidate entries
22218 @@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
22219 * -- wli
22220 */
22221
22222 -#ifdef CONFIG_X86_PAE
22223 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22224 /*
22225 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
22226 * updating the top-level pagetable entries to guarantee the
22227 @@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
22228 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
22229 * and initialize the kernel pmds here.
22230 */
22231 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
22232 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22233
22234 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
22235 {
22236 @@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm,
22237 */
22238 flush_tlb_mm(mm);
22239 }
22240 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
22241 +#define PREALLOCATED_PXDS USER_PGD_PTRS
22242 #else /* !CONFIG_X86_PAE */
22243
22244 /* No need to prepopulate any pagetable entries in non-PAE modes. */
22245 -#define PREALLOCATED_PMDS 0
22246 +#define PREALLOCATED_PXDS 0
22247
22248 #endif /* CONFIG_X86_PAE */
22249
22250 -static void free_pmds(pmd_t *pmds[])
22251 +static void free_pxds(pxd_t *pxds[])
22252 {
22253 int i;
22254
22255 - for(i = 0; i < PREALLOCATED_PMDS; i++)
22256 - if (pmds[i])
22257 - free_page((unsigned long)pmds[i]);
22258 + for(i = 0; i < PREALLOCATED_PXDS; i++)
22259 + if (pxds[i])
22260 + free_page((unsigned long)pxds[i]);
22261 }
22262
22263 -static int preallocate_pmds(pmd_t *pmds[])
22264 +static int preallocate_pxds(pxd_t *pxds[])
22265 {
22266 int i;
22267 bool failed = false;
22268
22269 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
22270 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
22271 - if (pmd == NULL)
22272 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
22273 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
22274 + if (pxd == NULL)
22275 failed = true;
22276 - pmds[i] = pmd;
22277 + pxds[i] = pxd;
22278 }
22279
22280 if (failed) {
22281 - free_pmds(pmds);
22282 + free_pxds(pxds);
22283 return -ENOMEM;
22284 }
22285
22286 @@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[
22287 * preallocate which never got a corresponding vma will need to be
22288 * freed manually.
22289 */
22290 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
22291 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
22292 {
22293 int i;
22294
22295 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
22296 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
22297 pgd_t pgd = pgdp[i];
22298
22299 if (pgd_val(pgd) != 0) {
22300 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
22301 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
22302
22303 - pgdp[i] = native_make_pgd(0);
22304 + set_pgd(pgdp + i, native_make_pgd(0));
22305
22306 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
22307 - pmd_free(mm, pmd);
22308 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
22309 + pxd_free(mm, pxd);
22310 }
22311 }
22312 }
22313
22314 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
22315 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
22316 {
22317 - pud_t *pud;
22318 + pyd_t *pyd;
22319 unsigned long addr;
22320 int i;
22321
22322 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
22323 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
22324 return;
22325
22326 - pud = pud_offset(pgd, 0);
22327 +#ifdef CONFIG_X86_64
22328 + pyd = pyd_offset(mm, 0L);
22329 +#else
22330 + pyd = pyd_offset(pgd, 0L);
22331 +#endif
22332
22333 - for (addr = i = 0; i < PREALLOCATED_PMDS;
22334 - i++, pud++, addr += PUD_SIZE) {
22335 - pmd_t *pmd = pmds[i];
22336 + for (addr = i = 0; i < PREALLOCATED_PXDS;
22337 + i++, pyd++, addr += PYD_SIZE) {
22338 + pxd_t *pxd = pxds[i];
22339
22340 if (i >= KERNEL_PGD_BOUNDARY)
22341 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22342 - sizeof(pmd_t) * PTRS_PER_PMD);
22343 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22344 + sizeof(pxd_t) * PTRS_PER_PMD);
22345
22346 - pud_populate(mm, pud, pmd);
22347 + pyd_populate(mm, pyd, pxd);
22348 }
22349 }
22350
22351 pgd_t *pgd_alloc(struct mm_struct *mm)
22352 {
22353 pgd_t *pgd;
22354 - pmd_t *pmds[PREALLOCATED_PMDS];
22355 + pxd_t *pxds[PREALLOCATED_PXDS];
22356 +
22357 unsigned long flags;
22358
22359 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
22360 @@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22361
22362 mm->pgd = pgd;
22363
22364 - if (preallocate_pmds(pmds) != 0)
22365 + if (preallocate_pxds(pxds) != 0)
22366 goto out_free_pgd;
22367
22368 if (paravirt_pgd_alloc(mm) != 0)
22369 - goto out_free_pmds;
22370 + goto out_free_pxds;
22371
22372 /*
22373 * Make sure that pre-populating the pmds is atomic with
22374 @@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22375 spin_lock_irqsave(&pgd_lock, flags);
22376
22377 pgd_ctor(pgd);
22378 - pgd_prepopulate_pmd(mm, pgd, pmds);
22379 + pgd_prepopulate_pxd(mm, pgd, pxds);
22380
22381 spin_unlock_irqrestore(&pgd_lock, flags);
22382
22383 return pgd;
22384
22385 -out_free_pmds:
22386 - free_pmds(pmds);
22387 +out_free_pxds:
22388 + free_pxds(pxds);
22389 out_free_pgd:
22390 free_page((unsigned long)pgd);
22391 out:
22392 @@ -287,7 +338,7 @@ out:
22393
22394 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
22395 {
22396 - pgd_mop_up_pmds(mm, pgd);
22397 + pgd_mop_up_pxds(mm, pgd);
22398 pgd_dtor(pgd);
22399 paravirt_pgd_free(mm, pgd);
22400 free_page((unsigned long)pgd);
22401 diff -urNp linux-2.6.32.42/arch/x86/mm/setup_nx.c linux-2.6.32.42/arch/x86/mm/setup_nx.c
22402 --- linux-2.6.32.42/arch/x86/mm/setup_nx.c 2011-03-27 14:31:47.000000000 -0400
22403 +++ linux-2.6.32.42/arch/x86/mm/setup_nx.c 2011-04-17 15:56:46.000000000 -0400
22404 @@ -4,11 +4,10 @@
22405
22406 #include <asm/pgtable.h>
22407
22408 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22409 int nx_enabled;
22410
22411 -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22412 -static int disable_nx __cpuinitdata;
22413 -
22414 +#ifndef CONFIG_PAX_PAGEEXEC
22415 /*
22416 * noexec = on|off
22417 *
22418 @@ -22,32 +21,26 @@ static int __init noexec_setup(char *str
22419 if (!str)
22420 return -EINVAL;
22421 if (!strncmp(str, "on", 2)) {
22422 - __supported_pte_mask |= _PAGE_NX;
22423 - disable_nx = 0;
22424 + nx_enabled = 1;
22425 } else if (!strncmp(str, "off", 3)) {
22426 - disable_nx = 1;
22427 - __supported_pte_mask &= ~_PAGE_NX;
22428 + nx_enabled = 0;
22429 }
22430 return 0;
22431 }
22432 early_param("noexec", noexec_setup);
22433 #endif
22434 +#endif
22435
22436 #ifdef CONFIG_X86_PAE
22437 void __init set_nx(void)
22438 {
22439 - unsigned int v[4], l, h;
22440 + if (!nx_enabled && cpu_has_nx) {
22441 + unsigned l, h;
22442
22443 - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
22444 - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
22445 -
22446 - if ((v[3] & (1 << 20)) && !disable_nx) {
22447 - rdmsr(MSR_EFER, l, h);
22448 - l |= EFER_NX;
22449 - wrmsr(MSR_EFER, l, h);
22450 - nx_enabled = 1;
22451 - __supported_pte_mask |= _PAGE_NX;
22452 - }
22453 + __supported_pte_mask &= ~_PAGE_NX;
22454 + rdmsr(MSR_EFER, l, h);
22455 + l &= ~EFER_NX;
22456 + wrmsr(MSR_EFER, l, h);
22457 }
22458 }
22459 #else
22460 @@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
22461 unsigned long efer;
22462
22463 rdmsrl(MSR_EFER, efer);
22464 - if (!(efer & EFER_NX) || disable_nx)
22465 + if (!(efer & EFER_NX) || !nx_enabled)
22466 __supported_pte_mask &= ~_PAGE_NX;
22467 }
22468 #endif
22469 diff -urNp linux-2.6.32.42/arch/x86/mm/tlb.c linux-2.6.32.42/arch/x86/mm/tlb.c
22470 --- linux-2.6.32.42/arch/x86/mm/tlb.c 2011-03-27 14:31:47.000000000 -0400
22471 +++ linux-2.6.32.42/arch/x86/mm/tlb.c 2011-04-23 12:56:10.000000000 -0400
22472 @@ -61,7 +61,11 @@ void leave_mm(int cpu)
22473 BUG();
22474 cpumask_clear_cpu(cpu,
22475 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
22476 +
22477 +#ifndef CONFIG_PAX_PER_CPU_PGD
22478 load_cr3(swapper_pg_dir);
22479 +#endif
22480 +
22481 }
22482 EXPORT_SYMBOL_GPL(leave_mm);
22483
22484 diff -urNp linux-2.6.32.42/arch/x86/oprofile/backtrace.c linux-2.6.32.42/arch/x86/oprofile/backtrace.c
22485 --- linux-2.6.32.42/arch/x86/oprofile/backtrace.c 2011-03-27 14:31:47.000000000 -0400
22486 +++ linux-2.6.32.42/arch/x86/oprofile/backtrace.c 2011-04-17 15:56:46.000000000 -0400
22487 @@ -57,7 +57,7 @@ static struct frame_head *dump_user_back
22488 struct frame_head bufhead[2];
22489
22490 /* Also check accessibility of one struct frame_head beyond */
22491 - if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
22492 + if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
22493 return NULL;
22494 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
22495 return NULL;
22496 @@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
22497 {
22498 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
22499
22500 - if (!user_mode_vm(regs)) {
22501 + if (!user_mode(regs)) {
22502 unsigned long stack = kernel_stack_pointer(regs);
22503 if (depth)
22504 dump_trace(NULL, regs, (unsigned long *)stack, 0,
22505 diff -urNp linux-2.6.32.42/arch/x86/oprofile/op_model_p4.c linux-2.6.32.42/arch/x86/oprofile/op_model_p4.c
22506 --- linux-2.6.32.42/arch/x86/oprofile/op_model_p4.c 2011-03-27 14:31:47.000000000 -0400
22507 +++ linux-2.6.32.42/arch/x86/oprofile/op_model_p4.c 2011-04-17 15:56:46.000000000 -0400
22508 @@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
22509 #endif
22510 }
22511
22512 -static int inline addr_increment(void)
22513 +static inline int addr_increment(void)
22514 {
22515 #ifdef CONFIG_SMP
22516 return smp_num_siblings == 2 ? 2 : 1;
22517 diff -urNp linux-2.6.32.42/arch/x86/pci/common.c linux-2.6.32.42/arch/x86/pci/common.c
22518 --- linux-2.6.32.42/arch/x86/pci/common.c 2011-03-27 14:31:47.000000000 -0400
22519 +++ linux-2.6.32.42/arch/x86/pci/common.c 2011-04-23 12:56:10.000000000 -0400
22520 @@ -31,8 +31,8 @@ int noioapicreroute = 1;
22521 int pcibios_last_bus = -1;
22522 unsigned long pirq_table_addr;
22523 struct pci_bus *pci_root_bus;
22524 -struct pci_raw_ops *raw_pci_ops;
22525 -struct pci_raw_ops *raw_pci_ext_ops;
22526 +const struct pci_raw_ops *raw_pci_ops;
22527 +const struct pci_raw_ops *raw_pci_ext_ops;
22528
22529 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
22530 int reg, int len, u32 *val)
22531 diff -urNp linux-2.6.32.42/arch/x86/pci/direct.c linux-2.6.32.42/arch/x86/pci/direct.c
22532 --- linux-2.6.32.42/arch/x86/pci/direct.c 2011-03-27 14:31:47.000000000 -0400
22533 +++ linux-2.6.32.42/arch/x86/pci/direct.c 2011-04-17 15:56:46.000000000 -0400
22534 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
22535
22536 #undef PCI_CONF1_ADDRESS
22537
22538 -struct pci_raw_ops pci_direct_conf1 = {
22539 +const struct pci_raw_ops pci_direct_conf1 = {
22540 .read = pci_conf1_read,
22541 .write = pci_conf1_write,
22542 };
22543 @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
22544
22545 #undef PCI_CONF2_ADDRESS
22546
22547 -struct pci_raw_ops pci_direct_conf2 = {
22548 +const struct pci_raw_ops pci_direct_conf2 = {
22549 .read = pci_conf2_read,
22550 .write = pci_conf2_write,
22551 };
22552 @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
22553 * This should be close to trivial, but it isn't, because there are buggy
22554 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
22555 */
22556 -static int __init pci_sanity_check(struct pci_raw_ops *o)
22557 +static int __init pci_sanity_check(const struct pci_raw_ops *o)
22558 {
22559 u32 x = 0;
22560 int year, devfn;
22561 diff -urNp linux-2.6.32.42/arch/x86/pci/mmconfig_32.c linux-2.6.32.42/arch/x86/pci/mmconfig_32.c
22562 --- linux-2.6.32.42/arch/x86/pci/mmconfig_32.c 2011-03-27 14:31:47.000000000 -0400
22563 +++ linux-2.6.32.42/arch/x86/pci/mmconfig_32.c 2011-04-17 15:56:46.000000000 -0400
22564 @@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int
22565 return 0;
22566 }
22567
22568 -static struct pci_raw_ops pci_mmcfg = {
22569 +static const struct pci_raw_ops pci_mmcfg = {
22570 .read = pci_mmcfg_read,
22571 .write = pci_mmcfg_write,
22572 };
22573 diff -urNp linux-2.6.32.42/arch/x86/pci/mmconfig_64.c linux-2.6.32.42/arch/x86/pci/mmconfig_64.c
22574 --- linux-2.6.32.42/arch/x86/pci/mmconfig_64.c 2011-03-27 14:31:47.000000000 -0400
22575 +++ linux-2.6.32.42/arch/x86/pci/mmconfig_64.c 2011-04-17 15:56:46.000000000 -0400
22576 @@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int
22577 return 0;
22578 }
22579
22580 -static struct pci_raw_ops pci_mmcfg = {
22581 +static const struct pci_raw_ops pci_mmcfg = {
22582 .read = pci_mmcfg_read,
22583 .write = pci_mmcfg_write,
22584 };
22585 diff -urNp linux-2.6.32.42/arch/x86/pci/numaq_32.c linux-2.6.32.42/arch/x86/pci/numaq_32.c
22586 --- linux-2.6.32.42/arch/x86/pci/numaq_32.c 2011-03-27 14:31:47.000000000 -0400
22587 +++ linux-2.6.32.42/arch/x86/pci/numaq_32.c 2011-04-17 15:56:46.000000000 -0400
22588 @@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned i
22589
22590 #undef PCI_CONF1_MQ_ADDRESS
22591
22592 -static struct pci_raw_ops pci_direct_conf1_mq = {
22593 +static const struct pci_raw_ops pci_direct_conf1_mq = {
22594 .read = pci_conf1_mq_read,
22595 .write = pci_conf1_mq_write
22596 };
22597 diff -urNp linux-2.6.32.42/arch/x86/pci/olpc.c linux-2.6.32.42/arch/x86/pci/olpc.c
22598 --- linux-2.6.32.42/arch/x86/pci/olpc.c 2011-03-27 14:31:47.000000000 -0400
22599 +++ linux-2.6.32.42/arch/x86/pci/olpc.c 2011-04-17 15:56:46.000000000 -0400
22600 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
22601 return 0;
22602 }
22603
22604 -static struct pci_raw_ops pci_olpc_conf = {
22605 +static const struct pci_raw_ops pci_olpc_conf = {
22606 .read = pci_olpc_read,
22607 .write = pci_olpc_write,
22608 };
22609 diff -urNp linux-2.6.32.42/arch/x86/pci/pcbios.c linux-2.6.32.42/arch/x86/pci/pcbios.c
22610 --- linux-2.6.32.42/arch/x86/pci/pcbios.c 2011-03-27 14:31:47.000000000 -0400
22611 +++ linux-2.6.32.42/arch/x86/pci/pcbios.c 2011-04-17 15:56:46.000000000 -0400
22612 @@ -56,50 +56,93 @@ union bios32 {
22613 static struct {
22614 unsigned long address;
22615 unsigned short segment;
22616 -} bios32_indirect = { 0, __KERNEL_CS };
22617 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
22618
22619 /*
22620 * Returns the entry point for the given service, NULL on error
22621 */
22622
22623 -static unsigned long bios32_service(unsigned long service)
22624 +static unsigned long __devinit bios32_service(unsigned long service)
22625 {
22626 unsigned char return_code; /* %al */
22627 unsigned long address; /* %ebx */
22628 unsigned long length; /* %ecx */
22629 unsigned long entry; /* %edx */
22630 unsigned long flags;
22631 + struct desc_struct d, *gdt;
22632
22633 local_irq_save(flags);
22634 - __asm__("lcall *(%%edi); cld"
22635 +
22636 + gdt = get_cpu_gdt_table(smp_processor_id());
22637 +
22638 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
22639 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22640 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
22641 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22642 +
22643 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
22644 : "=a" (return_code),
22645 "=b" (address),
22646 "=c" (length),
22647 "=d" (entry)
22648 : "0" (service),
22649 "1" (0),
22650 - "D" (&bios32_indirect));
22651 + "D" (&bios32_indirect),
22652 + "r"(__PCIBIOS_DS)
22653 + : "memory");
22654 +
22655 + pax_open_kernel();
22656 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
22657 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
22658 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
22659 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
22660 + pax_close_kernel();
22661 +
22662 local_irq_restore(flags);
22663
22664 switch (return_code) {
22665 - case 0:
22666 - return address + entry;
22667 - case 0x80: /* Not present */
22668 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22669 - return 0;
22670 - default: /* Shouldn't happen */
22671 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22672 - service, return_code);
22673 + case 0: {
22674 + int cpu;
22675 + unsigned char flags;
22676 +
22677 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
22678 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
22679 + printk(KERN_WARNING "bios32_service: not valid\n");
22680 return 0;
22681 + }
22682 + address = address + PAGE_OFFSET;
22683 + length += 16UL; /* some BIOSs underreport this... */
22684 + flags = 4;
22685 + if (length >= 64*1024*1024) {
22686 + length >>= PAGE_SHIFT;
22687 + flags |= 8;
22688 + }
22689 +
22690 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
22691 + gdt = get_cpu_gdt_table(cpu);
22692 + pack_descriptor(&d, address, length, 0x9b, flags);
22693 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22694 + pack_descriptor(&d, address, length, 0x93, flags);
22695 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22696 + }
22697 + return entry;
22698 + }
22699 + case 0x80: /* Not present */
22700 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22701 + return 0;
22702 + default: /* Shouldn't happen */
22703 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22704 + service, return_code);
22705 + return 0;
22706 }
22707 }
22708
22709 static struct {
22710 unsigned long address;
22711 unsigned short segment;
22712 -} pci_indirect = { 0, __KERNEL_CS };
22713 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
22714
22715 -static int pci_bios_present;
22716 +static int pci_bios_present __read_only;
22717
22718 static int __devinit check_pcibios(void)
22719 {
22720 @@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
22721 unsigned long flags, pcibios_entry;
22722
22723 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
22724 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
22725 + pci_indirect.address = pcibios_entry;
22726
22727 local_irq_save(flags);
22728 - __asm__(
22729 - "lcall *(%%edi); cld\n\t"
22730 + __asm__("movw %w6, %%ds\n\t"
22731 + "lcall *%%ss:(%%edi); cld\n\t"
22732 + "push %%ss\n\t"
22733 + "pop %%ds\n\t"
22734 "jc 1f\n\t"
22735 "xor %%ah, %%ah\n"
22736 "1:"
22737 @@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
22738 "=b" (ebx),
22739 "=c" (ecx)
22740 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
22741 - "D" (&pci_indirect)
22742 + "D" (&pci_indirect),
22743 + "r" (__PCIBIOS_DS)
22744 : "memory");
22745 local_irq_restore(flags);
22746
22747 @@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int se
22748
22749 switch (len) {
22750 case 1:
22751 - __asm__("lcall *(%%esi); cld\n\t"
22752 + __asm__("movw %w6, %%ds\n\t"
22753 + "lcall *%%ss:(%%esi); cld\n\t"
22754 + "push %%ss\n\t"
22755 + "pop %%ds\n\t"
22756 "jc 1f\n\t"
22757 "xor %%ah, %%ah\n"
22758 "1:"
22759 @@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int se
22760 : "1" (PCIBIOS_READ_CONFIG_BYTE),
22761 "b" (bx),
22762 "D" ((long)reg),
22763 - "S" (&pci_indirect));
22764 + "S" (&pci_indirect),
22765 + "r" (__PCIBIOS_DS));
22766 /*
22767 * Zero-extend the result beyond 8 bits, do not trust the
22768 * BIOS having done it:
22769 @@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int se
22770 *value &= 0xff;
22771 break;
22772 case 2:
22773 - __asm__("lcall *(%%esi); cld\n\t"
22774 + __asm__("movw %w6, %%ds\n\t"
22775 + "lcall *%%ss:(%%esi); cld\n\t"
22776 + "push %%ss\n\t"
22777 + "pop %%ds\n\t"
22778 "jc 1f\n\t"
22779 "xor %%ah, %%ah\n"
22780 "1:"
22781 @@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int se
22782 : "1" (PCIBIOS_READ_CONFIG_WORD),
22783 "b" (bx),
22784 "D" ((long)reg),
22785 - "S" (&pci_indirect));
22786 + "S" (&pci_indirect),
22787 + "r" (__PCIBIOS_DS));
22788 /*
22789 * Zero-extend the result beyond 16 bits, do not trust the
22790 * BIOS having done it:
22791 @@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int se
22792 *value &= 0xffff;
22793 break;
22794 case 4:
22795 - __asm__("lcall *(%%esi); cld\n\t"
22796 + __asm__("movw %w6, %%ds\n\t"
22797 + "lcall *%%ss:(%%esi); cld\n\t"
22798 + "push %%ss\n\t"
22799 + "pop %%ds\n\t"
22800 "jc 1f\n\t"
22801 "xor %%ah, %%ah\n"
22802 "1:"
22803 @@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int se
22804 : "1" (PCIBIOS_READ_CONFIG_DWORD),
22805 "b" (bx),
22806 "D" ((long)reg),
22807 - "S" (&pci_indirect));
22808 + "S" (&pci_indirect),
22809 + "r" (__PCIBIOS_DS));
22810 break;
22811 }
22812
22813 @@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int s
22814
22815 switch (len) {
22816 case 1:
22817 - __asm__("lcall *(%%esi); cld\n\t"
22818 + __asm__("movw %w6, %%ds\n\t"
22819 + "lcall *%%ss:(%%esi); cld\n\t"
22820 + "push %%ss\n\t"
22821 + "pop %%ds\n\t"
22822 "jc 1f\n\t"
22823 "xor %%ah, %%ah\n"
22824 "1:"
22825 @@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int s
22826 "c" (value),
22827 "b" (bx),
22828 "D" ((long)reg),
22829 - "S" (&pci_indirect));
22830 + "S" (&pci_indirect),
22831 + "r" (__PCIBIOS_DS));
22832 break;
22833 case 2:
22834 - __asm__("lcall *(%%esi); cld\n\t"
22835 + __asm__("movw %w6, %%ds\n\t"
22836 + "lcall *%%ss:(%%esi); cld\n\t"
22837 + "push %%ss\n\t"
22838 + "pop %%ds\n\t"
22839 "jc 1f\n\t"
22840 "xor %%ah, %%ah\n"
22841 "1:"
22842 @@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int s
22843 "c" (value),
22844 "b" (bx),
22845 "D" ((long)reg),
22846 - "S" (&pci_indirect));
22847 + "S" (&pci_indirect),
22848 + "r" (__PCIBIOS_DS));
22849 break;
22850 case 4:
22851 - __asm__("lcall *(%%esi); cld\n\t"
22852 + __asm__("movw %w6, %%ds\n\t"
22853 + "lcall *%%ss:(%%esi); cld\n\t"
22854 + "push %%ss\n\t"
22855 + "pop %%ds\n\t"
22856 "jc 1f\n\t"
22857 "xor %%ah, %%ah\n"
22858 "1:"
22859 @@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int s
22860 "c" (value),
22861 "b" (bx),
22862 "D" ((long)reg),
22863 - "S" (&pci_indirect));
22864 + "S" (&pci_indirect),
22865 + "r" (__PCIBIOS_DS));
22866 break;
22867 }
22868
22869 @@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int s
22870 * Function table for BIOS32 access
22871 */
22872
22873 -static struct pci_raw_ops pci_bios_access = {
22874 +static const struct pci_raw_ops pci_bios_access = {
22875 .read = pci_bios_read,
22876 .write = pci_bios_write
22877 };
22878 @@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_acces
22879 * Try to find PCI BIOS.
22880 */
22881
22882 -static struct pci_raw_ops * __devinit pci_find_bios(void)
22883 +static const struct pci_raw_ops * __devinit pci_find_bios(void)
22884 {
22885 union bios32 *check;
22886 unsigned char sum;
22887 @@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_i
22888
22889 DBG("PCI: Fetching IRQ routing table... ");
22890 __asm__("push %%es\n\t"
22891 + "movw %w8, %%ds\n\t"
22892 "push %%ds\n\t"
22893 "pop %%es\n\t"
22894 - "lcall *(%%esi); cld\n\t"
22895 + "lcall *%%ss:(%%esi); cld\n\t"
22896 "pop %%es\n\t"
22897 + "push %%ss\n\t"
22898 + "pop %%ds\n"
22899 "jc 1f\n\t"
22900 "xor %%ah, %%ah\n"
22901 "1:"
22902 @@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_i
22903 "1" (0),
22904 "D" ((long) &opt),
22905 "S" (&pci_indirect),
22906 - "m" (opt)
22907 + "m" (opt),
22908 + "r" (__PCIBIOS_DS)
22909 : "memory");
22910 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
22911 if (ret & 0xff00)
22912 @@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_d
22913 {
22914 int ret;
22915
22916 - __asm__("lcall *(%%esi); cld\n\t"
22917 + __asm__("movw %w5, %%ds\n\t"
22918 + "lcall *%%ss:(%%esi); cld\n\t"
22919 + "push %%ss\n\t"
22920 + "pop %%ds\n"
22921 "jc 1f\n\t"
22922 "xor %%ah, %%ah\n"
22923 "1:"
22924 @@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_d
22925 : "0" (PCIBIOS_SET_PCI_HW_INT),
22926 "b" ((dev->bus->number << 8) | dev->devfn),
22927 "c" ((irq << 8) | (pin + 10)),
22928 - "S" (&pci_indirect));
22929 + "S" (&pci_indirect),
22930 + "r" (__PCIBIOS_DS));
22931 return !(ret & 0xff00);
22932 }
22933 EXPORT_SYMBOL(pcibios_set_irq_routing);
22934 diff -urNp linux-2.6.32.42/arch/x86/power/cpu.c linux-2.6.32.42/arch/x86/power/cpu.c
22935 --- linux-2.6.32.42/arch/x86/power/cpu.c 2011-03-27 14:31:47.000000000 -0400
22936 +++ linux-2.6.32.42/arch/x86/power/cpu.c 2011-04-17 15:56:46.000000000 -0400
22937 @@ -129,7 +129,7 @@ static void do_fpu_end(void)
22938 static void fix_processor_context(void)
22939 {
22940 int cpu = smp_processor_id();
22941 - struct tss_struct *t = &per_cpu(init_tss, cpu);
22942 + struct tss_struct *t = init_tss + cpu;
22943
22944 set_tss_desc(cpu, t); /*
22945 * This just modifies memory; should not be
22946 @@ -139,7 +139,9 @@ static void fix_processor_context(void)
22947 */
22948
22949 #ifdef CONFIG_X86_64
22950 + pax_open_kernel();
22951 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
22952 + pax_close_kernel();
22953
22954 syscall_init(); /* This sets MSR_*STAR and related */
22955 #endif
22956 diff -urNp linux-2.6.32.42/arch/x86/vdso/Makefile linux-2.6.32.42/arch/x86/vdso/Makefile
22957 --- linux-2.6.32.42/arch/x86/vdso/Makefile 2011-03-27 14:31:47.000000000 -0400
22958 +++ linux-2.6.32.42/arch/x86/vdso/Makefile 2011-04-17 15:56:46.000000000 -0400
22959 @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
22960 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
22961 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
22962
22963 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22964 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22965 GCOV_PROFILE := n
22966
22967 #
22968 diff -urNp linux-2.6.32.42/arch/x86/vdso/vclock_gettime.c linux-2.6.32.42/arch/x86/vdso/vclock_gettime.c
22969 --- linux-2.6.32.42/arch/x86/vdso/vclock_gettime.c 2011-03-27 14:31:47.000000000 -0400
22970 +++ linux-2.6.32.42/arch/x86/vdso/vclock_gettime.c 2011-04-17 15:56:46.000000000 -0400
22971 @@ -22,24 +22,48 @@
22972 #include <asm/hpet.h>
22973 #include <asm/unistd.h>
22974 #include <asm/io.h>
22975 +#include <asm/fixmap.h>
22976 #include "vextern.h"
22977
22978 #define gtod vdso_vsyscall_gtod_data
22979
22980 +notrace noinline long __vdso_fallback_time(long *t)
22981 +{
22982 + long secs;
22983 + asm volatile("syscall"
22984 + : "=a" (secs)
22985 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
22986 + return secs;
22987 +}
22988 +
22989 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
22990 {
22991 long ret;
22992 asm("syscall" : "=a" (ret) :
22993 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
22994 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
22995 return ret;
22996 }
22997
22998 +notrace static inline cycle_t __vdso_vread_hpet(void)
22999 +{
23000 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
23001 +}
23002 +
23003 +notrace static inline cycle_t __vdso_vread_tsc(void)
23004 +{
23005 + cycle_t ret = (cycle_t)vget_cycles();
23006 +
23007 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
23008 +}
23009 +
23010 notrace static inline long vgetns(void)
23011 {
23012 long v;
23013 - cycles_t (*vread)(void);
23014 - vread = gtod->clock.vread;
23015 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
23016 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
23017 + v = __vdso_vread_tsc();
23018 + else
23019 + v = __vdso_vread_hpet();
23020 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
23021 return (v * gtod->clock.mult) >> gtod->clock.shift;
23022 }
23023
23024 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
23025
23026 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
23027 {
23028 - if (likely(gtod->sysctl_enabled))
23029 + if (likely(gtod->sysctl_enabled &&
23030 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23031 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23032 switch (clock) {
23033 case CLOCK_REALTIME:
23034 if (likely(gtod->clock.vread))
23035 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
23036 int clock_gettime(clockid_t, struct timespec *)
23037 __attribute__((weak, alias("__vdso_clock_gettime")));
23038
23039 -notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23040 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
23041 {
23042 long ret;
23043 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
23044 + asm("syscall" : "=a" (ret) :
23045 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
23046 + return ret;
23047 +}
23048 +
23049 +notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23050 +{
23051 + if (likely(gtod->sysctl_enabled &&
23052 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23053 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23054 + {
23055 if (likely(tv != NULL)) {
23056 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
23057 offsetof(struct timespec, tv_nsec) ||
23058 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
23059 }
23060 return 0;
23061 }
23062 - asm("syscall" : "=a" (ret) :
23063 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
23064 - return ret;
23065 + return __vdso_fallback_gettimeofday(tv, tz);
23066 }
23067 int gettimeofday(struct timeval *, struct timezone *)
23068 __attribute__((weak, alias("__vdso_gettimeofday")));
23069 diff -urNp linux-2.6.32.42/arch/x86/vdso/vdso32-setup.c linux-2.6.32.42/arch/x86/vdso/vdso32-setup.c
23070 --- linux-2.6.32.42/arch/x86/vdso/vdso32-setup.c 2011-03-27 14:31:47.000000000 -0400
23071 +++ linux-2.6.32.42/arch/x86/vdso/vdso32-setup.c 2011-04-23 12:56:10.000000000 -0400
23072 @@ -25,6 +25,7 @@
23073 #include <asm/tlbflush.h>
23074 #include <asm/vdso.h>
23075 #include <asm/proto.h>
23076 +#include <asm/mman.h>
23077
23078 enum {
23079 VDSO_DISABLED = 0,
23080 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
23081 void enable_sep_cpu(void)
23082 {
23083 int cpu = get_cpu();
23084 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
23085 + struct tss_struct *tss = init_tss + cpu;
23086
23087 if (!boot_cpu_has(X86_FEATURE_SEP)) {
23088 put_cpu();
23089 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
23090 gate_vma.vm_start = FIXADDR_USER_START;
23091 gate_vma.vm_end = FIXADDR_USER_END;
23092 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
23093 - gate_vma.vm_page_prot = __P101;
23094 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
23095 /*
23096 * Make sure the vDSO gets into every core dump.
23097 * Dumping its contents makes post-mortem fully interpretable later
23098 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
23099 if (compat)
23100 addr = VDSO_HIGH_BASE;
23101 else {
23102 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
23103 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
23104 if (IS_ERR_VALUE(addr)) {
23105 ret = addr;
23106 goto up_fail;
23107 }
23108 }
23109
23110 - current->mm->context.vdso = (void *)addr;
23111 + current->mm->context.vdso = addr;
23112
23113 if (compat_uses_vma || !compat) {
23114 /*
23115 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
23116 }
23117
23118 current_thread_info()->sysenter_return =
23119 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23120 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23121
23122 up_fail:
23123 if (ret)
23124 - current->mm->context.vdso = NULL;
23125 + current->mm->context.vdso = 0;
23126
23127 up_write(&mm->mmap_sem);
23128
23129 @@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
23130
23131 const char *arch_vma_name(struct vm_area_struct *vma)
23132 {
23133 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
23134 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
23135 return "[vdso]";
23136 +
23137 +#ifdef CONFIG_PAX_SEGMEXEC
23138 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
23139 + return "[vdso]";
23140 +#endif
23141 +
23142 return NULL;
23143 }
23144
23145 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
23146 struct mm_struct *mm = tsk->mm;
23147
23148 /* Check to see if this task was created in compat vdso mode */
23149 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
23150 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
23151 return &gate_vma;
23152 return NULL;
23153 }
23154 diff -urNp linux-2.6.32.42/arch/x86/vdso/vdso.lds.S linux-2.6.32.42/arch/x86/vdso/vdso.lds.S
23155 --- linux-2.6.32.42/arch/x86/vdso/vdso.lds.S 2011-03-27 14:31:47.000000000 -0400
23156 +++ linux-2.6.32.42/arch/x86/vdso/vdso.lds.S 2011-06-06 17:35:35.000000000 -0400
23157 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
23158 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
23159 #include "vextern.h"
23160 #undef VEXTERN
23161 +
23162 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
23163 +VEXTERN(fallback_gettimeofday)
23164 +VEXTERN(fallback_time)
23165 +VEXTERN(getcpu)
23166 +#undef VEXTERN
23167 diff -urNp linux-2.6.32.42/arch/x86/vdso/vextern.h linux-2.6.32.42/arch/x86/vdso/vextern.h
23168 --- linux-2.6.32.42/arch/x86/vdso/vextern.h 2011-03-27 14:31:47.000000000 -0400
23169 +++ linux-2.6.32.42/arch/x86/vdso/vextern.h 2011-04-17 15:56:46.000000000 -0400
23170 @@ -11,6 +11,5 @@
23171 put into vextern.h and be referenced as a pointer with vdso prefix.
23172 The main kernel later fills in the values. */
23173
23174 -VEXTERN(jiffies)
23175 VEXTERN(vgetcpu_mode)
23176 VEXTERN(vsyscall_gtod_data)
23177 diff -urNp linux-2.6.32.42/arch/x86/vdso/vma.c linux-2.6.32.42/arch/x86/vdso/vma.c
23178 --- linux-2.6.32.42/arch/x86/vdso/vma.c 2011-03-27 14:31:47.000000000 -0400
23179 +++ linux-2.6.32.42/arch/x86/vdso/vma.c 2011-04-17 15:56:46.000000000 -0400
23180 @@ -57,7 +57,7 @@ static int __init init_vdso_vars(void)
23181 if (!vbase)
23182 goto oom;
23183
23184 - if (memcmp(vbase, "\177ELF", 4)) {
23185 + if (memcmp(vbase, ELFMAG, SELFMAG)) {
23186 printk("VDSO: I'm broken; not ELF\n");
23187 vdso_enabled = 0;
23188 }
23189 @@ -66,6 +66,7 @@ static int __init init_vdso_vars(void)
23190 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
23191 #include "vextern.h"
23192 #undef VEXTERN
23193 + vunmap(vbase);
23194 return 0;
23195
23196 oom:
23197 @@ -116,7 +117,7 @@ int arch_setup_additional_pages(struct l
23198 goto up_fail;
23199 }
23200
23201 - current->mm->context.vdso = (void *)addr;
23202 + current->mm->context.vdso = addr;
23203
23204 ret = install_special_mapping(mm, addr, vdso_size,
23205 VM_READ|VM_EXEC|
23206 @@ -124,7 +125,7 @@ int arch_setup_additional_pages(struct l
23207 VM_ALWAYSDUMP,
23208 vdso_pages);
23209 if (ret) {
23210 - current->mm->context.vdso = NULL;
23211 + current->mm->context.vdso = 0;
23212 goto up_fail;
23213 }
23214
23215 @@ -132,10 +133,3 @@ up_fail:
23216 up_write(&mm->mmap_sem);
23217 return ret;
23218 }
23219 -
23220 -static __init int vdso_setup(char *s)
23221 -{
23222 - vdso_enabled = simple_strtoul(s, NULL, 0);
23223 - return 0;
23224 -}
23225 -__setup("vdso=", vdso_setup);
23226 diff -urNp linux-2.6.32.42/arch/x86/xen/enlighten.c linux-2.6.32.42/arch/x86/xen/enlighten.c
23227 --- linux-2.6.32.42/arch/x86/xen/enlighten.c 2011-03-27 14:31:47.000000000 -0400
23228 +++ linux-2.6.32.42/arch/x86/xen/enlighten.c 2011-05-22 23:02:03.000000000 -0400
23229 @@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
23230
23231 struct shared_info xen_dummy_shared_info;
23232
23233 -void *xen_initial_gdt;
23234 -
23235 /*
23236 * Point at some empty memory to start with. We map the real shared_info
23237 * page as soon as fixmap is up and running.
23238 @@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_des
23239
23240 preempt_disable();
23241
23242 - start = __get_cpu_var(idt_desc).address;
23243 + start = (unsigned long)__get_cpu_var(idt_desc).address;
23244 end = start + __get_cpu_var(idt_desc).size + 1;
23245
23246 xen_mc_flush();
23247 @@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic
23248 #endif
23249 };
23250
23251 -static void xen_reboot(int reason)
23252 +static __noreturn void xen_reboot(int reason)
23253 {
23254 struct sched_shutdown r = { .reason = reason };
23255
23256 @@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
23257 BUG();
23258 }
23259
23260 -static void xen_restart(char *msg)
23261 +static __noreturn void xen_restart(char *msg)
23262 {
23263 xen_reboot(SHUTDOWN_reboot);
23264 }
23265
23266 -static void xen_emergency_restart(void)
23267 +static __noreturn void xen_emergency_restart(void)
23268 {
23269 xen_reboot(SHUTDOWN_reboot);
23270 }
23271
23272 -static void xen_machine_halt(void)
23273 +static __noreturn void xen_machine_halt(void)
23274 {
23275 xen_reboot(SHUTDOWN_poweroff);
23276 }
23277 @@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(
23278 */
23279 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
23280
23281 -#ifdef CONFIG_X86_64
23282 /* Work out if we support NX */
23283 - check_efer();
23284 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23285 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
23286 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
23287 + unsigned l, h;
23288 +
23289 +#ifdef CONFIG_X86_PAE
23290 + nx_enabled = 1;
23291 +#endif
23292 + __supported_pte_mask |= _PAGE_NX;
23293 + rdmsr(MSR_EFER, l, h);
23294 + l |= EFER_NX;
23295 + wrmsr(MSR_EFER, l, h);
23296 + }
23297 #endif
23298
23299 xen_setup_features();
23300 @@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(
23301
23302 machine_ops = xen_machine_ops;
23303
23304 - /*
23305 - * The only reliable way to retain the initial address of the
23306 - * percpu gdt_page is to remember it here, so we can go and
23307 - * mark it RW later, when the initial percpu area is freed.
23308 - */
23309 - xen_initial_gdt = &per_cpu(gdt_page, 0);
23310 -
23311 xen_smp_init();
23312
23313 pgd = (pgd_t *)xen_start_info->pt_base;
23314 diff -urNp linux-2.6.32.42/arch/x86/xen/mmu.c linux-2.6.32.42/arch/x86/xen/mmu.c
23315 --- linux-2.6.32.42/arch/x86/xen/mmu.c 2011-06-25 12:55:34.000000000 -0400
23316 +++ linux-2.6.32.42/arch/x86/xen/mmu.c 2011-06-25 12:56:37.000000000 -0400
23317 @@ -1714,6 +1714,8 @@ __init pgd_t *xen_setup_kernel_pagetable
23318 convert_pfn_mfn(init_level4_pgt);
23319 convert_pfn_mfn(level3_ident_pgt);
23320 convert_pfn_mfn(level3_kernel_pgt);
23321 + convert_pfn_mfn(level3_vmalloc_pgt);
23322 + convert_pfn_mfn(level3_vmemmap_pgt);
23323
23324 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
23325 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
23326 @@ -1732,7 +1734,10 @@ __init pgd_t *xen_setup_kernel_pagetable
23327 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
23328 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
23329 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
23330 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
23331 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
23332 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
23333 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
23334 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
23335 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
23336
23337 diff -urNp linux-2.6.32.42/arch/x86/xen/smp.c linux-2.6.32.42/arch/x86/xen/smp.c
23338 --- linux-2.6.32.42/arch/x86/xen/smp.c 2011-03-27 14:31:47.000000000 -0400
23339 +++ linux-2.6.32.42/arch/x86/xen/smp.c 2011-05-11 18:25:15.000000000 -0400
23340 @@ -167,11 +167,6 @@ static void __init xen_smp_prepare_boot_
23341 {
23342 BUG_ON(smp_processor_id() != 0);
23343 native_smp_prepare_boot_cpu();
23344 -
23345 - /* We've switched to the "real" per-cpu gdt, so make sure the
23346 - old memory can be recycled */
23347 - make_lowmem_page_readwrite(xen_initial_gdt);
23348 -
23349 xen_setup_vcpu_info_placement();
23350 }
23351
23352 @@ -231,12 +226,12 @@ cpu_initialize_context(unsigned int cpu,
23353 gdt = get_cpu_gdt_table(cpu);
23354
23355 ctxt->flags = VGCF_IN_KERNEL;
23356 - ctxt->user_regs.ds = __USER_DS;
23357 - ctxt->user_regs.es = __USER_DS;
23358 + ctxt->user_regs.ds = __KERNEL_DS;
23359 + ctxt->user_regs.es = __KERNEL_DS;
23360 ctxt->user_regs.ss = __KERNEL_DS;
23361 #ifdef CONFIG_X86_32
23362 ctxt->user_regs.fs = __KERNEL_PERCPU;
23363 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
23364 + savesegment(gs, ctxt->user_regs.gs);
23365 #else
23366 ctxt->gs_base_kernel = per_cpu_offset(cpu);
23367 #endif
23368 @@ -287,13 +282,12 @@ static int __cpuinit xen_cpu_up(unsigned
23369 int rc;
23370
23371 per_cpu(current_task, cpu) = idle;
23372 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
23373 #ifdef CONFIG_X86_32
23374 irq_ctx_init(cpu);
23375 #else
23376 clear_tsk_thread_flag(idle, TIF_FORK);
23377 - per_cpu(kernel_stack, cpu) =
23378 - (unsigned long)task_stack_page(idle) -
23379 - KERNEL_STACK_OFFSET + THREAD_SIZE;
23380 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
23381 #endif
23382 xen_setup_runstate_info(cpu);
23383 xen_setup_timer(cpu);
23384 diff -urNp linux-2.6.32.42/arch/x86/xen/xen-asm_32.S linux-2.6.32.42/arch/x86/xen/xen-asm_32.S
23385 --- linux-2.6.32.42/arch/x86/xen/xen-asm_32.S 2011-03-27 14:31:47.000000000 -0400
23386 +++ linux-2.6.32.42/arch/x86/xen/xen-asm_32.S 2011-04-22 19:13:13.000000000 -0400
23387 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
23388 ESP_OFFSET=4 # bytes pushed onto stack
23389
23390 /*
23391 - * Store vcpu_info pointer for easy access. Do it this way to
23392 - * avoid having to reload %fs
23393 + * Store vcpu_info pointer for easy access.
23394 */
23395 #ifdef CONFIG_SMP
23396 - GET_THREAD_INFO(%eax)
23397 - movl TI_cpu(%eax), %eax
23398 - movl __per_cpu_offset(,%eax,4), %eax
23399 - mov per_cpu__xen_vcpu(%eax), %eax
23400 + push %fs
23401 + mov $(__KERNEL_PERCPU), %eax
23402 + mov %eax, %fs
23403 + mov PER_CPU_VAR(xen_vcpu), %eax
23404 + pop %fs
23405 #else
23406 movl per_cpu__xen_vcpu, %eax
23407 #endif
23408 diff -urNp linux-2.6.32.42/arch/x86/xen/xen-head.S linux-2.6.32.42/arch/x86/xen/xen-head.S
23409 --- linux-2.6.32.42/arch/x86/xen/xen-head.S 2011-03-27 14:31:47.000000000 -0400
23410 +++ linux-2.6.32.42/arch/x86/xen/xen-head.S 2011-04-17 15:56:46.000000000 -0400
23411 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
23412 #ifdef CONFIG_X86_32
23413 mov %esi,xen_start_info
23414 mov $init_thread_union+THREAD_SIZE,%esp
23415 +#ifdef CONFIG_SMP
23416 + movl $cpu_gdt_table,%edi
23417 + movl $__per_cpu_load,%eax
23418 + movw %ax,__KERNEL_PERCPU + 2(%edi)
23419 + rorl $16,%eax
23420 + movb %al,__KERNEL_PERCPU + 4(%edi)
23421 + movb %ah,__KERNEL_PERCPU + 7(%edi)
23422 + movl $__per_cpu_end - 1,%eax
23423 + subl $__per_cpu_start,%eax
23424 + movw %ax,__KERNEL_PERCPU + 0(%edi)
23425 +#endif
23426 #else
23427 mov %rsi,xen_start_info
23428 mov $init_thread_union+THREAD_SIZE,%rsp
23429 diff -urNp linux-2.6.32.42/arch/x86/xen/xen-ops.h linux-2.6.32.42/arch/x86/xen/xen-ops.h
23430 --- linux-2.6.32.42/arch/x86/xen/xen-ops.h 2011-03-27 14:31:47.000000000 -0400
23431 +++ linux-2.6.32.42/arch/x86/xen/xen-ops.h 2011-04-17 15:56:46.000000000 -0400
23432 @@ -10,8 +10,6 @@
23433 extern const char xen_hypervisor_callback[];
23434 extern const char xen_failsafe_callback[];
23435
23436 -extern void *xen_initial_gdt;
23437 -
23438 struct trap_info;
23439 void xen_copy_trap_info(struct trap_info *traps);
23440
23441 diff -urNp linux-2.6.32.42/block/blk-integrity.c linux-2.6.32.42/block/blk-integrity.c
23442 --- linux-2.6.32.42/block/blk-integrity.c 2011-03-27 14:31:47.000000000 -0400
23443 +++ linux-2.6.32.42/block/blk-integrity.c 2011-04-17 15:56:46.000000000 -0400
23444 @@ -278,7 +278,7 @@ static struct attribute *integrity_attrs
23445 NULL,
23446 };
23447
23448 -static struct sysfs_ops integrity_ops = {
23449 +static const struct sysfs_ops integrity_ops = {
23450 .show = &integrity_attr_show,
23451 .store = &integrity_attr_store,
23452 };
23453 diff -urNp linux-2.6.32.42/block/blk-iopoll.c linux-2.6.32.42/block/blk-iopoll.c
23454 --- linux-2.6.32.42/block/blk-iopoll.c 2011-03-27 14:31:47.000000000 -0400
23455 +++ linux-2.6.32.42/block/blk-iopoll.c 2011-04-17 15:56:46.000000000 -0400
23456 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
23457 }
23458 EXPORT_SYMBOL(blk_iopoll_complete);
23459
23460 -static void blk_iopoll_softirq(struct softirq_action *h)
23461 +static void blk_iopoll_softirq(void)
23462 {
23463 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
23464 int rearm = 0, budget = blk_iopoll_budget;
23465 diff -urNp linux-2.6.32.42/block/blk-map.c linux-2.6.32.42/block/blk-map.c
23466 --- linux-2.6.32.42/block/blk-map.c 2011-03-27 14:31:47.000000000 -0400
23467 +++ linux-2.6.32.42/block/blk-map.c 2011-04-18 16:57:33.000000000 -0400
23468 @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
23469 * direct dma. else, set up kernel bounce buffers
23470 */
23471 uaddr = (unsigned long) ubuf;
23472 - if (blk_rq_aligned(q, ubuf, len) && !map_data)
23473 + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
23474 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
23475 else
23476 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
23477 @@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_q
23478 for (i = 0; i < iov_count; i++) {
23479 unsigned long uaddr = (unsigned long)iov[i].iov_base;
23480
23481 + if (!iov[i].iov_len)
23482 + return -EINVAL;
23483 +
23484 if (uaddr & queue_dma_alignment(q)) {
23485 unaligned = 1;
23486 break;
23487 }
23488 - if (!iov[i].iov_len)
23489 - return -EINVAL;
23490 }
23491
23492 if (unaligned || (q->dma_pad_mask & len) || map_data)
23493 @@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue
23494 if (!len || !kbuf)
23495 return -EINVAL;
23496
23497 - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
23498 + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
23499 if (do_copy)
23500 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
23501 else
23502 diff -urNp linux-2.6.32.42/block/blk-softirq.c linux-2.6.32.42/block/blk-softirq.c
23503 --- linux-2.6.32.42/block/blk-softirq.c 2011-03-27 14:31:47.000000000 -0400
23504 +++ linux-2.6.32.42/block/blk-softirq.c 2011-04-17 15:56:46.000000000 -0400
23505 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
23506 * Softirq action handler - move entries to local list and loop over them
23507 * while passing them to the queue registered handler.
23508 */
23509 -static void blk_done_softirq(struct softirq_action *h)
23510 +static void blk_done_softirq(void)
23511 {
23512 struct list_head *cpu_list, local_list;
23513
23514 diff -urNp linux-2.6.32.42/block/blk-sysfs.c linux-2.6.32.42/block/blk-sysfs.c
23515 --- linux-2.6.32.42/block/blk-sysfs.c 2011-05-10 22:12:01.000000000 -0400
23516 +++ linux-2.6.32.42/block/blk-sysfs.c 2011-05-10 22:12:26.000000000 -0400
23517 @@ -414,7 +414,7 @@ static void blk_release_queue(struct kob
23518 kmem_cache_free(blk_requestq_cachep, q);
23519 }
23520
23521 -static struct sysfs_ops queue_sysfs_ops = {
23522 +static const struct sysfs_ops queue_sysfs_ops = {
23523 .show = queue_attr_show,
23524 .store = queue_attr_store,
23525 };
23526 diff -urNp linux-2.6.32.42/block/bsg.c linux-2.6.32.42/block/bsg.c
23527 --- linux-2.6.32.42/block/bsg.c 2011-03-27 14:31:47.000000000 -0400
23528 +++ linux-2.6.32.42/block/bsg.c 2011-04-17 15:56:46.000000000 -0400
23529 @@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
23530 struct sg_io_v4 *hdr, struct bsg_device *bd,
23531 fmode_t has_write_perm)
23532 {
23533 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23534 + unsigned char *cmdptr;
23535 +
23536 if (hdr->request_len > BLK_MAX_CDB) {
23537 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
23538 if (!rq->cmd)
23539 return -ENOMEM;
23540 - }
23541 + cmdptr = rq->cmd;
23542 + } else
23543 + cmdptr = tmpcmd;
23544
23545 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
23546 + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
23547 hdr->request_len))
23548 return -EFAULT;
23549
23550 + if (cmdptr != rq->cmd)
23551 + memcpy(rq->cmd, cmdptr, hdr->request_len);
23552 +
23553 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
23554 if (blk_verify_command(rq->cmd, has_write_perm))
23555 return -EPERM;
23556 diff -urNp linux-2.6.32.42/block/elevator.c linux-2.6.32.42/block/elevator.c
23557 --- linux-2.6.32.42/block/elevator.c 2011-03-27 14:31:47.000000000 -0400
23558 +++ linux-2.6.32.42/block/elevator.c 2011-04-17 15:56:46.000000000 -0400
23559 @@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, str
23560 return error;
23561 }
23562
23563 -static struct sysfs_ops elv_sysfs_ops = {
23564 +static const struct sysfs_ops elv_sysfs_ops = {
23565 .show = elv_attr_show,
23566 .store = elv_attr_store,
23567 };
23568 diff -urNp linux-2.6.32.42/block/scsi_ioctl.c linux-2.6.32.42/block/scsi_ioctl.c
23569 --- linux-2.6.32.42/block/scsi_ioctl.c 2011-03-27 14:31:47.000000000 -0400
23570 +++ linux-2.6.32.42/block/scsi_ioctl.c 2011-04-23 13:28:22.000000000 -0400
23571 @@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
23572 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
23573 struct sg_io_hdr *hdr, fmode_t mode)
23574 {
23575 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
23576 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23577 + unsigned char *cmdptr;
23578 +
23579 + if (rq->cmd != rq->__cmd)
23580 + cmdptr = rq->cmd;
23581 + else
23582 + cmdptr = tmpcmd;
23583 +
23584 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
23585 return -EFAULT;
23586 +
23587 + if (cmdptr != rq->cmd)
23588 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
23589 +
23590 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
23591 return -EPERM;
23592
23593 @@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *
23594 int err;
23595 unsigned int in_len, out_len, bytes, opcode, cmdlen;
23596 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
23597 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23598 + unsigned char *cmdptr;
23599
23600 if (!sic)
23601 return -EINVAL;
23602 @@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *
23603 */
23604 err = -EFAULT;
23605 rq->cmd_len = cmdlen;
23606 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
23607 +
23608 + if (rq->cmd != rq->__cmd)
23609 + cmdptr = rq->cmd;
23610 + else
23611 + cmdptr = tmpcmd;
23612 +
23613 + if (copy_from_user(cmdptr, sic->data, cmdlen))
23614 goto error;
23615
23616 + if (rq->cmd != cmdptr)
23617 + memcpy(rq->cmd, cmdptr, cmdlen);
23618 +
23619 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
23620 goto error;
23621
23622 diff -urNp linux-2.6.32.42/crypto/serpent.c linux-2.6.32.42/crypto/serpent.c
23623 --- linux-2.6.32.42/crypto/serpent.c 2011-03-27 14:31:47.000000000 -0400
23624 +++ linux-2.6.32.42/crypto/serpent.c 2011-05-16 21:46:57.000000000 -0400
23625 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
23626 u32 r0,r1,r2,r3,r4;
23627 int i;
23628
23629 + pax_track_stack();
23630 +
23631 /* Copy key, add padding */
23632
23633 for (i = 0; i < keylen; ++i)
23634 diff -urNp linux-2.6.32.42/Documentation/dontdiff linux-2.6.32.42/Documentation/dontdiff
23635 --- linux-2.6.32.42/Documentation/dontdiff 2011-03-27 14:31:47.000000000 -0400
23636 +++ linux-2.6.32.42/Documentation/dontdiff 2011-05-18 20:09:36.000000000 -0400
23637 @@ -1,13 +1,16 @@
23638 *.a
23639 *.aux
23640 *.bin
23641 +*.cis
23642 *.cpio
23643 *.csp
23644 +*.dbg
23645 *.dsp
23646 *.dvi
23647 *.elf
23648 *.eps
23649 *.fw
23650 +*.gcno
23651 *.gen.S
23652 *.gif
23653 *.grep
23654 @@ -38,8 +41,10 @@
23655 *.tab.h
23656 *.tex
23657 *.ver
23658 +*.vim
23659 *.xml
23660 *_MODULES
23661 +*_reg_safe.h
23662 *_vga16.c
23663 *~
23664 *.9
23665 @@ -49,11 +54,16 @@
23666 53c700_d.h
23667 CVS
23668 ChangeSet
23669 +GPATH
23670 +GRTAGS
23671 +GSYMS
23672 +GTAGS
23673 Image
23674 Kerntypes
23675 Module.markers
23676 Module.symvers
23677 PENDING
23678 +PERF*
23679 SCCS
23680 System.map*
23681 TAGS
23682 @@ -76,7 +86,11 @@ btfixupprep
23683 build
23684 bvmlinux
23685 bzImage*
23686 +capability_names.h
23687 +capflags.c
23688 classlist.h*
23689 +clut_vga16.c
23690 +common-cmds.h
23691 comp*.log
23692 compile.h*
23693 conf
23694 @@ -103,13 +117,14 @@ gen_crc32table
23695 gen_init_cpio
23696 genksyms
23697 *_gray256.c
23698 +hash
23699 ihex2fw
23700 ikconfig.h*
23701 initramfs_data.cpio
23702 +initramfs_data.cpio.bz2
23703 initramfs_data.cpio.gz
23704 initramfs_list
23705 kallsyms
23706 -kconfig
23707 keywords.c
23708 ksym.c*
23709 ksym.h*
23710 @@ -133,7 +148,9 @@ mkboot
23711 mkbugboot
23712 mkcpustr
23713 mkdep
23714 +mkpiggy
23715 mkprep
23716 +mkregtable
23717 mktables
23718 mktree
23719 modpost
23720 @@ -149,6 +166,7 @@ patches*
23721 pca200e.bin
23722 pca200e_ecd.bin2
23723 piggy.gz
23724 +piggy.S
23725 piggyback
23726 pnmtologo
23727 ppc_defs.h*
23728 @@ -157,12 +175,15 @@ qconf
23729 raid6altivec*.c
23730 raid6int*.c
23731 raid6tables.c
23732 +regdb.c
23733 relocs
23734 +rlim_names.h
23735 series
23736 setup
23737 setup.bin
23738 setup.elf
23739 sImage
23740 +slabinfo
23741 sm_tbl*
23742 split-include
23743 syscalltab.h
23744 @@ -186,14 +207,20 @@ version.h*
23745 vmlinux
23746 vmlinux-*
23747 vmlinux.aout
23748 +vmlinux.bin.all
23749 +vmlinux.bin.bz2
23750 vmlinux.lds
23751 +vmlinux.relocs
23752 +voffset.h
23753 vsyscall.lds
23754 vsyscall_32.lds
23755 wanxlfw.inc
23756 uImage
23757 unifdef
23758 +utsrelease.h
23759 wakeup.bin
23760 wakeup.elf
23761 wakeup.lds
23762 zImage*
23763 zconf.hash.c
23764 +zoffset.h
23765 diff -urNp linux-2.6.32.42/Documentation/kernel-parameters.txt linux-2.6.32.42/Documentation/kernel-parameters.txt
23766 --- linux-2.6.32.42/Documentation/kernel-parameters.txt 2011-03-27 14:31:47.000000000 -0400
23767 +++ linux-2.6.32.42/Documentation/kernel-parameters.txt 2011-04-17 15:56:45.000000000 -0400
23768 @@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters.
23769 the specified number of seconds. This is to be used if
23770 your oopses keep scrolling off the screen.
23771
23772 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
23773 + virtualization environments that don't cope well with the
23774 + expand down segment used by UDEREF on X86-32 or the frequent
23775 + page table updates on X86-64.
23776 +
23777 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
23778 +
23779 pcbit= [HW,ISDN]
23780
23781 pcd. [PARIDE]
23782 diff -urNp linux-2.6.32.42/drivers/acpi/acpi_pad.c linux-2.6.32.42/drivers/acpi/acpi_pad.c
23783 --- linux-2.6.32.42/drivers/acpi/acpi_pad.c 2011-03-27 14:31:47.000000000 -0400
23784 +++ linux-2.6.32.42/drivers/acpi/acpi_pad.c 2011-04-17 15:56:46.000000000 -0400
23785 @@ -30,7 +30,7 @@
23786 #include <acpi/acpi_bus.h>
23787 #include <acpi/acpi_drivers.h>
23788
23789 -#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
23790 +#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
23791 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
23792 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
23793 static DEFINE_MUTEX(isolated_cpus_lock);
23794 diff -urNp linux-2.6.32.42/drivers/acpi/battery.c linux-2.6.32.42/drivers/acpi/battery.c
23795 --- linux-2.6.32.42/drivers/acpi/battery.c 2011-03-27 14:31:47.000000000 -0400
23796 +++ linux-2.6.32.42/drivers/acpi/battery.c 2011-04-17 15:56:46.000000000 -0400
23797 @@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
23798 }
23799
23800 static struct battery_file {
23801 - struct file_operations ops;
23802 + const struct file_operations ops;
23803 mode_t mode;
23804 const char *name;
23805 } acpi_battery_file[] = {
23806 diff -urNp linux-2.6.32.42/drivers/acpi/dock.c linux-2.6.32.42/drivers/acpi/dock.c
23807 --- linux-2.6.32.42/drivers/acpi/dock.c 2011-03-27 14:31:47.000000000 -0400
23808 +++ linux-2.6.32.42/drivers/acpi/dock.c 2011-04-17 15:56:46.000000000 -0400
23809 @@ -77,7 +77,7 @@ struct dock_dependent_device {
23810 struct list_head list;
23811 struct list_head hotplug_list;
23812 acpi_handle handle;
23813 - struct acpi_dock_ops *ops;
23814 + const struct acpi_dock_ops *ops;
23815 void *context;
23816 };
23817
23818 @@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
23819 * the dock driver after _DCK is executed.
23820 */
23821 int
23822 -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
23823 +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
23824 void *context)
23825 {
23826 struct dock_dependent_device *dd;
23827 diff -urNp linux-2.6.32.42/drivers/acpi/osl.c linux-2.6.32.42/drivers/acpi/osl.c
23828 --- linux-2.6.32.42/drivers/acpi/osl.c 2011-03-27 14:31:47.000000000 -0400
23829 +++ linux-2.6.32.42/drivers/acpi/osl.c 2011-04-17 15:56:46.000000000 -0400
23830 @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
23831 void __iomem *virt_addr;
23832
23833 virt_addr = ioremap(phys_addr, width);
23834 + if (!virt_addr)
23835 + return AE_NO_MEMORY;
23836 if (!value)
23837 value = &dummy;
23838
23839 @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
23840 void __iomem *virt_addr;
23841
23842 virt_addr = ioremap(phys_addr, width);
23843 + if (!virt_addr)
23844 + return AE_NO_MEMORY;
23845
23846 switch (width) {
23847 case 8:
23848 diff -urNp linux-2.6.32.42/drivers/acpi/power_meter.c linux-2.6.32.42/drivers/acpi/power_meter.c
23849 --- linux-2.6.32.42/drivers/acpi/power_meter.c 2011-03-27 14:31:47.000000000 -0400
23850 +++ linux-2.6.32.42/drivers/acpi/power_meter.c 2011-04-17 15:56:46.000000000 -0400
23851 @@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *d
23852 return res;
23853
23854 temp /= 1000;
23855 - if (temp < 0)
23856 - return -EINVAL;
23857
23858 mutex_lock(&resource->lock);
23859 resource->trip[attr->index - 7] = temp;
23860 diff -urNp linux-2.6.32.42/drivers/acpi/proc.c linux-2.6.32.42/drivers/acpi/proc.c
23861 --- linux-2.6.32.42/drivers/acpi/proc.c 2011-03-27 14:31:47.000000000 -0400
23862 +++ linux-2.6.32.42/drivers/acpi/proc.c 2011-04-17 15:56:46.000000000 -0400
23863 @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
23864 size_t count, loff_t * ppos)
23865 {
23866 struct list_head *node, *next;
23867 - char strbuf[5];
23868 - char str[5] = "";
23869 - unsigned int len = count;
23870 + char strbuf[5] = {0};
23871 struct acpi_device *found_dev = NULL;
23872
23873 - if (len > 4)
23874 - len = 4;
23875 - if (len < 0)
23876 - return -EFAULT;
23877 + if (count > 4)
23878 + count = 4;
23879
23880 - if (copy_from_user(strbuf, buffer, len))
23881 + if (copy_from_user(strbuf, buffer, count))
23882 return -EFAULT;
23883 - strbuf[len] = '\0';
23884 - sscanf(strbuf, "%s", str);
23885 + strbuf[count] = '\0';
23886
23887 mutex_lock(&acpi_device_lock);
23888 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
23889 @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
23890 if (!dev->wakeup.flags.valid)
23891 continue;
23892
23893 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
23894 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
23895 dev->wakeup.state.enabled =
23896 dev->wakeup.state.enabled ? 0 : 1;
23897 found_dev = dev;
23898 diff -urNp linux-2.6.32.42/drivers/acpi/processor_core.c linux-2.6.32.42/drivers/acpi/processor_core.c
23899 --- linux-2.6.32.42/drivers/acpi/processor_core.c 2011-03-27 14:31:47.000000000 -0400
23900 +++ linux-2.6.32.42/drivers/acpi/processor_core.c 2011-04-17 15:56:46.000000000 -0400
23901 @@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(
23902 return 0;
23903 }
23904
23905 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
23906 + BUG_ON(pr->id >= nr_cpu_ids);
23907
23908 /*
23909 * Buggy BIOS check
23910 diff -urNp linux-2.6.32.42/drivers/acpi/sbshc.c linux-2.6.32.42/drivers/acpi/sbshc.c
23911 --- linux-2.6.32.42/drivers/acpi/sbshc.c 2011-03-27 14:31:47.000000000 -0400
23912 +++ linux-2.6.32.42/drivers/acpi/sbshc.c 2011-04-17 15:56:46.000000000 -0400
23913 @@ -17,7 +17,7 @@
23914
23915 #define PREFIX "ACPI: "
23916
23917 -#define ACPI_SMB_HC_CLASS "smbus_host_controller"
23918 +#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
23919 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
23920
23921 struct acpi_smb_hc {
23922 diff -urNp linux-2.6.32.42/drivers/acpi/sleep.c linux-2.6.32.42/drivers/acpi/sleep.c
23923 --- linux-2.6.32.42/drivers/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
23924 +++ linux-2.6.32.42/drivers/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
23925 @@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(susp
23926 }
23927 }
23928
23929 -static struct platform_suspend_ops acpi_suspend_ops = {
23930 +static const struct platform_suspend_ops acpi_suspend_ops = {
23931 .valid = acpi_suspend_state_valid,
23932 .begin = acpi_suspend_begin,
23933 .prepare_late = acpi_pm_prepare,
23934 @@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspen
23935 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
23936 * been requested.
23937 */
23938 -static struct platform_suspend_ops acpi_suspend_ops_old = {
23939 +static const struct platform_suspend_ops acpi_suspend_ops_old = {
23940 .valid = acpi_suspend_state_valid,
23941 .begin = acpi_suspend_begin_old,
23942 .prepare_late = acpi_pm_disable_gpes,
23943 @@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
23944 acpi_enable_all_runtime_gpes();
23945 }
23946
23947 -static struct platform_hibernation_ops acpi_hibernation_ops = {
23948 +static const struct platform_hibernation_ops acpi_hibernation_ops = {
23949 .begin = acpi_hibernation_begin,
23950 .end = acpi_pm_end,
23951 .pre_snapshot = acpi_hibernation_pre_snapshot,
23952 @@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot
23953 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
23954 * been requested.
23955 */
23956 -static struct platform_hibernation_ops acpi_hibernation_ops_old = {
23957 +static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
23958 .begin = acpi_hibernation_begin_old,
23959 .end = acpi_pm_end,
23960 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
23961 diff -urNp linux-2.6.32.42/drivers/acpi/video.c linux-2.6.32.42/drivers/acpi/video.c
23962 --- linux-2.6.32.42/drivers/acpi/video.c 2011-03-27 14:31:47.000000000 -0400
23963 +++ linux-2.6.32.42/drivers/acpi/video.c 2011-04-17 15:56:46.000000000 -0400
23964 @@ -359,7 +359,7 @@ static int acpi_video_set_brightness(str
23965 vd->brightness->levels[request_level]);
23966 }
23967
23968 -static struct backlight_ops acpi_backlight_ops = {
23969 +static const struct backlight_ops acpi_backlight_ops = {
23970 .get_brightness = acpi_video_get_brightness,
23971 .update_status = acpi_video_set_brightness,
23972 };
23973 diff -urNp linux-2.6.32.42/drivers/ata/ahci.c linux-2.6.32.42/drivers/ata/ahci.c
23974 --- linux-2.6.32.42/drivers/ata/ahci.c 2011-03-27 14:31:47.000000000 -0400
23975 +++ linux-2.6.32.42/drivers/ata/ahci.c 2011-04-23 12:56:10.000000000 -0400
23976 @@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sh
23977 .sdev_attrs = ahci_sdev_attrs,
23978 };
23979
23980 -static struct ata_port_operations ahci_ops = {
23981 +static const struct ata_port_operations ahci_ops = {
23982 .inherits = &sata_pmp_port_ops,
23983
23984 .qc_defer = sata_pmp_qc_defer_cmd_switch,
23985 @@ -424,17 +424,17 @@ static struct ata_port_operations ahci_o
23986 .port_stop = ahci_port_stop,
23987 };
23988
23989 -static struct ata_port_operations ahci_vt8251_ops = {
23990 +static const struct ata_port_operations ahci_vt8251_ops = {
23991 .inherits = &ahci_ops,
23992 .hardreset = ahci_vt8251_hardreset,
23993 };
23994
23995 -static struct ata_port_operations ahci_p5wdh_ops = {
23996 +static const struct ata_port_operations ahci_p5wdh_ops = {
23997 .inherits = &ahci_ops,
23998 .hardreset = ahci_p5wdh_hardreset,
23999 };
24000
24001 -static struct ata_port_operations ahci_sb600_ops = {
24002 +static const struct ata_port_operations ahci_sb600_ops = {
24003 .inherits = &ahci_ops,
24004 .softreset = ahci_sb600_softreset,
24005 .pmp_softreset = ahci_sb600_softreset,
24006 diff -urNp linux-2.6.32.42/drivers/ata/ata_generic.c linux-2.6.32.42/drivers/ata/ata_generic.c
24007 --- linux-2.6.32.42/drivers/ata/ata_generic.c 2011-03-27 14:31:47.000000000 -0400
24008 +++ linux-2.6.32.42/drivers/ata/ata_generic.c 2011-04-17 15:56:46.000000000 -0400
24009 @@ -104,7 +104,7 @@ static struct scsi_host_template generic
24010 ATA_BMDMA_SHT(DRV_NAME),
24011 };
24012
24013 -static struct ata_port_operations generic_port_ops = {
24014 +static const struct ata_port_operations generic_port_ops = {
24015 .inherits = &ata_bmdma_port_ops,
24016 .cable_detect = ata_cable_unknown,
24017 .set_mode = generic_set_mode,
24018 diff -urNp linux-2.6.32.42/drivers/ata/ata_piix.c linux-2.6.32.42/drivers/ata/ata_piix.c
24019 --- linux-2.6.32.42/drivers/ata/ata_piix.c 2011-03-27 14:31:47.000000000 -0400
24020 +++ linux-2.6.32.42/drivers/ata/ata_piix.c 2011-04-23 12:56:10.000000000 -0400
24021 @@ -318,7 +318,7 @@ static struct scsi_host_template piix_sh
24022 ATA_BMDMA_SHT(DRV_NAME),
24023 };
24024
24025 -static struct ata_port_operations piix_pata_ops = {
24026 +static const struct ata_port_operations piix_pata_ops = {
24027 .inherits = &ata_bmdma32_port_ops,
24028 .cable_detect = ata_cable_40wire,
24029 .set_piomode = piix_set_piomode,
24030 @@ -326,22 +326,22 @@ static struct ata_port_operations piix_p
24031 .prereset = piix_pata_prereset,
24032 };
24033
24034 -static struct ata_port_operations piix_vmw_ops = {
24035 +static const struct ata_port_operations piix_vmw_ops = {
24036 .inherits = &piix_pata_ops,
24037 .bmdma_status = piix_vmw_bmdma_status,
24038 };
24039
24040 -static struct ata_port_operations ich_pata_ops = {
24041 +static const struct ata_port_operations ich_pata_ops = {
24042 .inherits = &piix_pata_ops,
24043 .cable_detect = ich_pata_cable_detect,
24044 .set_dmamode = ich_set_dmamode,
24045 };
24046
24047 -static struct ata_port_operations piix_sata_ops = {
24048 +static const struct ata_port_operations piix_sata_ops = {
24049 .inherits = &ata_bmdma_port_ops,
24050 };
24051
24052 -static struct ata_port_operations piix_sidpr_sata_ops = {
24053 +static const struct ata_port_operations piix_sidpr_sata_ops = {
24054 .inherits = &piix_sata_ops,
24055 .hardreset = sata_std_hardreset,
24056 .scr_read = piix_sidpr_scr_read,
24057 diff -urNp linux-2.6.32.42/drivers/ata/libata-acpi.c linux-2.6.32.42/drivers/ata/libata-acpi.c
24058 --- linux-2.6.32.42/drivers/ata/libata-acpi.c 2011-03-27 14:31:47.000000000 -0400
24059 +++ linux-2.6.32.42/drivers/ata/libata-acpi.c 2011-04-17 15:56:46.000000000 -0400
24060 @@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_han
24061 ata_acpi_uevent(dev->link->ap, dev, event);
24062 }
24063
24064 -static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24065 +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24066 .handler = ata_acpi_dev_notify_dock,
24067 .uevent = ata_acpi_dev_uevent,
24068 };
24069
24070 -static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24071 +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24072 .handler = ata_acpi_ap_notify_dock,
24073 .uevent = ata_acpi_ap_uevent,
24074 };
24075 diff -urNp linux-2.6.32.42/drivers/ata/libata-core.c linux-2.6.32.42/drivers/ata/libata-core.c
24076 --- linux-2.6.32.42/drivers/ata/libata-core.c 2011-03-27 14:31:47.000000000 -0400
24077 +++ linux-2.6.32.42/drivers/ata/libata-core.c 2011-04-23 12:56:10.000000000 -0400
24078 @@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *
24079 struct ata_port *ap;
24080 unsigned int tag;
24081
24082 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24083 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24084 ap = qc->ap;
24085
24086 qc->flags = 0;
24087 @@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued
24088 struct ata_port *ap;
24089 struct ata_link *link;
24090
24091 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24092 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24093 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
24094 ap = qc->ap;
24095 link = qc->dev->link;
24096 @@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device
24097 * LOCKING:
24098 * None.
24099 */
24100 -static void ata_finalize_port_ops(struct ata_port_operations *ops)
24101 +static void ata_finalize_port_ops(const struct ata_port_operations *ops)
24102 {
24103 static DEFINE_SPINLOCK(lock);
24104 const struct ata_port_operations *cur;
24105 @@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct
24106 return;
24107
24108 spin_lock(&lock);
24109 + pax_open_kernel();
24110
24111 for (cur = ops->inherits; cur; cur = cur->inherits) {
24112 void **inherit = (void **)cur;
24113 @@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct
24114 if (IS_ERR(*pp))
24115 *pp = NULL;
24116
24117 - ops->inherits = NULL;
24118 + ((struct ata_port_operations *)ops)->inherits = NULL;
24119
24120 + pax_close_kernel();
24121 spin_unlock(&lock);
24122 }
24123
24124 @@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host
24125 */
24126 /* KILLME - the only user left is ipr */
24127 void ata_host_init(struct ata_host *host, struct device *dev,
24128 - unsigned long flags, struct ata_port_operations *ops)
24129 + unsigned long flags, const struct ata_port_operations *ops)
24130 {
24131 spin_lock_init(&host->lock);
24132 host->dev = dev;
24133 @@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(stru
24134 /* truly dummy */
24135 }
24136
24137 -struct ata_port_operations ata_dummy_port_ops = {
24138 +const struct ata_port_operations ata_dummy_port_ops = {
24139 .qc_prep = ata_noop_qc_prep,
24140 .qc_issue = ata_dummy_qc_issue,
24141 .error_handler = ata_dummy_error_handler,
24142 diff -urNp linux-2.6.32.42/drivers/ata/libata-eh.c linux-2.6.32.42/drivers/ata/libata-eh.c
24143 --- linux-2.6.32.42/drivers/ata/libata-eh.c 2011-03-27 14:31:47.000000000 -0400
24144 +++ linux-2.6.32.42/drivers/ata/libata-eh.c 2011-05-16 21:46:57.000000000 -0400
24145 @@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
24146 {
24147 struct ata_link *link;
24148
24149 + pax_track_stack();
24150 +
24151 ata_for_each_link(link, ap, HOST_FIRST)
24152 ata_eh_link_report(link);
24153 }
24154 @@ -3590,7 +3592,7 @@ void ata_do_eh(struct ata_port *ap, ata_
24155 */
24156 void ata_std_error_handler(struct ata_port *ap)
24157 {
24158 - struct ata_port_operations *ops = ap->ops;
24159 + const struct ata_port_operations *ops = ap->ops;
24160 ata_reset_fn_t hardreset = ops->hardreset;
24161
24162 /* ignore built-in hardreset if SCR access is not available */
24163 diff -urNp linux-2.6.32.42/drivers/ata/libata-pmp.c linux-2.6.32.42/drivers/ata/libata-pmp.c
24164 --- linux-2.6.32.42/drivers/ata/libata-pmp.c 2011-03-27 14:31:47.000000000 -0400
24165 +++ linux-2.6.32.42/drivers/ata/libata-pmp.c 2011-04-17 15:56:46.000000000 -0400
24166 @@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(str
24167 */
24168 static int sata_pmp_eh_recover(struct ata_port *ap)
24169 {
24170 - struct ata_port_operations *ops = ap->ops;
24171 + const struct ata_port_operations *ops = ap->ops;
24172 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
24173 struct ata_link *pmp_link = &ap->link;
24174 struct ata_device *pmp_dev = pmp_link->device;
24175 diff -urNp linux-2.6.32.42/drivers/ata/pata_acpi.c linux-2.6.32.42/drivers/ata/pata_acpi.c
24176 --- linux-2.6.32.42/drivers/ata/pata_acpi.c 2011-03-27 14:31:47.000000000 -0400
24177 +++ linux-2.6.32.42/drivers/ata/pata_acpi.c 2011-04-17 15:56:46.000000000 -0400
24178 @@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_s
24179 ATA_BMDMA_SHT(DRV_NAME),
24180 };
24181
24182 -static struct ata_port_operations pacpi_ops = {
24183 +static const struct ata_port_operations pacpi_ops = {
24184 .inherits = &ata_bmdma_port_ops,
24185 .qc_issue = pacpi_qc_issue,
24186 .cable_detect = pacpi_cable_detect,
24187 diff -urNp linux-2.6.32.42/drivers/ata/pata_ali.c linux-2.6.32.42/drivers/ata/pata_ali.c
24188 --- linux-2.6.32.42/drivers/ata/pata_ali.c 2011-03-27 14:31:47.000000000 -0400
24189 +++ linux-2.6.32.42/drivers/ata/pata_ali.c 2011-04-17 15:56:46.000000000 -0400
24190 @@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht
24191 * Port operations for PIO only ALi
24192 */
24193
24194 -static struct ata_port_operations ali_early_port_ops = {
24195 +static const struct ata_port_operations ali_early_port_ops = {
24196 .inherits = &ata_sff_port_ops,
24197 .cable_detect = ata_cable_40wire,
24198 .set_piomode = ali_set_piomode,
24199 @@ -382,7 +382,7 @@ static const struct ata_port_operations
24200 * Port operations for DMA capable ALi without cable
24201 * detect
24202 */
24203 -static struct ata_port_operations ali_20_port_ops = {
24204 +static const struct ata_port_operations ali_20_port_ops = {
24205 .inherits = &ali_dma_base_ops,
24206 .cable_detect = ata_cable_40wire,
24207 .mode_filter = ali_20_filter,
24208 @@ -393,7 +393,7 @@ static struct ata_port_operations ali_20
24209 /*
24210 * Port operations for DMA capable ALi with cable detect
24211 */
24212 -static struct ata_port_operations ali_c2_port_ops = {
24213 +static const struct ata_port_operations ali_c2_port_ops = {
24214 .inherits = &ali_dma_base_ops,
24215 .check_atapi_dma = ali_check_atapi_dma,
24216 .cable_detect = ali_c2_cable_detect,
24217 @@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2
24218 /*
24219 * Port operations for DMA capable ALi with cable detect
24220 */
24221 -static struct ata_port_operations ali_c4_port_ops = {
24222 +static const struct ata_port_operations ali_c4_port_ops = {
24223 .inherits = &ali_dma_base_ops,
24224 .check_atapi_dma = ali_check_atapi_dma,
24225 .cable_detect = ali_c2_cable_detect,
24226 @@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4
24227 /*
24228 * Port operations for DMA capable ALi with cable detect and LBA48
24229 */
24230 -static struct ata_port_operations ali_c5_port_ops = {
24231 +static const struct ata_port_operations ali_c5_port_ops = {
24232 .inherits = &ali_dma_base_ops,
24233 .check_atapi_dma = ali_check_atapi_dma,
24234 .dev_config = ali_warn_atapi_dma,
24235 diff -urNp linux-2.6.32.42/drivers/ata/pata_amd.c linux-2.6.32.42/drivers/ata/pata_amd.c
24236 --- linux-2.6.32.42/drivers/ata/pata_amd.c 2011-03-27 14:31:47.000000000 -0400
24237 +++ linux-2.6.32.42/drivers/ata/pata_amd.c 2011-04-17 15:56:46.000000000 -0400
24238 @@ -397,28 +397,28 @@ static const struct ata_port_operations
24239 .prereset = amd_pre_reset,
24240 };
24241
24242 -static struct ata_port_operations amd33_port_ops = {
24243 +static const struct ata_port_operations amd33_port_ops = {
24244 .inherits = &amd_base_port_ops,
24245 .cable_detect = ata_cable_40wire,
24246 .set_piomode = amd33_set_piomode,
24247 .set_dmamode = amd33_set_dmamode,
24248 };
24249
24250 -static struct ata_port_operations amd66_port_ops = {
24251 +static const struct ata_port_operations amd66_port_ops = {
24252 .inherits = &amd_base_port_ops,
24253 .cable_detect = ata_cable_unknown,
24254 .set_piomode = amd66_set_piomode,
24255 .set_dmamode = amd66_set_dmamode,
24256 };
24257
24258 -static struct ata_port_operations amd100_port_ops = {
24259 +static const struct ata_port_operations amd100_port_ops = {
24260 .inherits = &amd_base_port_ops,
24261 .cable_detect = ata_cable_unknown,
24262 .set_piomode = amd100_set_piomode,
24263 .set_dmamode = amd100_set_dmamode,
24264 };
24265
24266 -static struct ata_port_operations amd133_port_ops = {
24267 +static const struct ata_port_operations amd133_port_ops = {
24268 .inherits = &amd_base_port_ops,
24269 .cable_detect = amd_cable_detect,
24270 .set_piomode = amd133_set_piomode,
24271 @@ -433,13 +433,13 @@ static const struct ata_port_operations
24272 .host_stop = nv_host_stop,
24273 };
24274
24275 -static struct ata_port_operations nv100_port_ops = {
24276 +static const struct ata_port_operations nv100_port_ops = {
24277 .inherits = &nv_base_port_ops,
24278 .set_piomode = nv100_set_piomode,
24279 .set_dmamode = nv100_set_dmamode,
24280 };
24281
24282 -static struct ata_port_operations nv133_port_ops = {
24283 +static const struct ata_port_operations nv133_port_ops = {
24284 .inherits = &nv_base_port_ops,
24285 .set_piomode = nv133_set_piomode,
24286 .set_dmamode = nv133_set_dmamode,
24287 diff -urNp linux-2.6.32.42/drivers/ata/pata_artop.c linux-2.6.32.42/drivers/ata/pata_artop.c
24288 --- linux-2.6.32.42/drivers/ata/pata_artop.c 2011-03-27 14:31:47.000000000 -0400
24289 +++ linux-2.6.32.42/drivers/ata/pata_artop.c 2011-04-17 15:56:46.000000000 -0400
24290 @@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
24291 ATA_BMDMA_SHT(DRV_NAME),
24292 };
24293
24294 -static struct ata_port_operations artop6210_ops = {
24295 +static const struct ata_port_operations artop6210_ops = {
24296 .inherits = &ata_bmdma_port_ops,
24297 .cable_detect = ata_cable_40wire,
24298 .set_piomode = artop6210_set_piomode,
24299 @@ -320,7 +320,7 @@ static struct ata_port_operations artop6
24300 .qc_defer = artop6210_qc_defer,
24301 };
24302
24303 -static struct ata_port_operations artop6260_ops = {
24304 +static const struct ata_port_operations artop6260_ops = {
24305 .inherits = &ata_bmdma_port_ops,
24306 .cable_detect = artop6260_cable_detect,
24307 .set_piomode = artop6260_set_piomode,
24308 diff -urNp linux-2.6.32.42/drivers/ata/pata_at32.c linux-2.6.32.42/drivers/ata/pata_at32.c
24309 --- linux-2.6.32.42/drivers/ata/pata_at32.c 2011-03-27 14:31:47.000000000 -0400
24310 +++ linux-2.6.32.42/drivers/ata/pata_at32.c 2011-04-17 15:56:46.000000000 -0400
24311 @@ -172,7 +172,7 @@ static struct scsi_host_template at32_sh
24312 ATA_PIO_SHT(DRV_NAME),
24313 };
24314
24315 -static struct ata_port_operations at32_port_ops = {
24316 +static const struct ata_port_operations at32_port_ops = {
24317 .inherits = &ata_sff_port_ops,
24318 .cable_detect = ata_cable_40wire,
24319 .set_piomode = pata_at32_set_piomode,
24320 diff -urNp linux-2.6.32.42/drivers/ata/pata_at91.c linux-2.6.32.42/drivers/ata/pata_at91.c
24321 --- linux-2.6.32.42/drivers/ata/pata_at91.c 2011-03-27 14:31:47.000000000 -0400
24322 +++ linux-2.6.32.42/drivers/ata/pata_at91.c 2011-04-17 15:56:46.000000000 -0400
24323 @@ -195,7 +195,7 @@ static struct scsi_host_template pata_at
24324 ATA_PIO_SHT(DRV_NAME),
24325 };
24326
24327 -static struct ata_port_operations pata_at91_port_ops = {
24328 +static const struct ata_port_operations pata_at91_port_ops = {
24329 .inherits = &ata_sff_port_ops,
24330
24331 .sff_data_xfer = pata_at91_data_xfer_noirq,
24332 diff -urNp linux-2.6.32.42/drivers/ata/pata_atiixp.c linux-2.6.32.42/drivers/ata/pata_atiixp.c
24333 --- linux-2.6.32.42/drivers/ata/pata_atiixp.c 2011-03-27 14:31:47.000000000 -0400
24334 +++ linux-2.6.32.42/drivers/ata/pata_atiixp.c 2011-04-17 15:56:46.000000000 -0400
24335 @@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_
24336 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24337 };
24338
24339 -static struct ata_port_operations atiixp_port_ops = {
24340 +static const struct ata_port_operations atiixp_port_ops = {
24341 .inherits = &ata_bmdma_port_ops,
24342
24343 .qc_prep = ata_sff_dumb_qc_prep,
24344 diff -urNp linux-2.6.32.42/drivers/ata/pata_atp867x.c linux-2.6.32.42/drivers/ata/pata_atp867x.c
24345 --- linux-2.6.32.42/drivers/ata/pata_atp867x.c 2011-03-27 14:31:47.000000000 -0400
24346 +++ linux-2.6.32.42/drivers/ata/pata_atp867x.c 2011-04-17 15:56:46.000000000 -0400
24347 @@ -274,7 +274,7 @@ static struct scsi_host_template atp867x
24348 ATA_BMDMA_SHT(DRV_NAME),
24349 };
24350
24351 -static struct ata_port_operations atp867x_ops = {
24352 +static const struct ata_port_operations atp867x_ops = {
24353 .inherits = &ata_bmdma_port_ops,
24354 .cable_detect = atp867x_cable_detect,
24355 .set_piomode = atp867x_set_piomode,
24356 diff -urNp linux-2.6.32.42/drivers/ata/pata_bf54x.c linux-2.6.32.42/drivers/ata/pata_bf54x.c
24357 --- linux-2.6.32.42/drivers/ata/pata_bf54x.c 2011-03-27 14:31:47.000000000 -0400
24358 +++ linux-2.6.32.42/drivers/ata/pata_bf54x.c 2011-04-17 15:56:46.000000000 -0400
24359 @@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
24360 .dma_boundary = ATA_DMA_BOUNDARY,
24361 };
24362
24363 -static struct ata_port_operations bfin_pata_ops = {
24364 +static const struct ata_port_operations bfin_pata_ops = {
24365 .inherits = &ata_sff_port_ops,
24366
24367 .set_piomode = bfin_set_piomode,
24368 diff -urNp linux-2.6.32.42/drivers/ata/pata_cmd640.c linux-2.6.32.42/drivers/ata/pata_cmd640.c
24369 --- linux-2.6.32.42/drivers/ata/pata_cmd640.c 2011-03-27 14:31:47.000000000 -0400
24370 +++ linux-2.6.32.42/drivers/ata/pata_cmd640.c 2011-04-17 15:56:46.000000000 -0400
24371 @@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_
24372 ATA_BMDMA_SHT(DRV_NAME),
24373 };
24374
24375 -static struct ata_port_operations cmd640_port_ops = {
24376 +static const struct ata_port_operations cmd640_port_ops = {
24377 .inherits = &ata_bmdma_port_ops,
24378 /* In theory xfer_noirq is not needed once we kill the prefetcher */
24379 .sff_data_xfer = ata_sff_data_xfer_noirq,
24380 diff -urNp linux-2.6.32.42/drivers/ata/pata_cmd64x.c linux-2.6.32.42/drivers/ata/pata_cmd64x.c
24381 --- linux-2.6.32.42/drivers/ata/pata_cmd64x.c 2011-06-25 12:55:34.000000000 -0400
24382 +++ linux-2.6.32.42/drivers/ata/pata_cmd64x.c 2011-06-25 12:56:37.000000000 -0400
24383 @@ -271,18 +271,18 @@ static const struct ata_port_operations
24384 .set_dmamode = cmd64x_set_dmamode,
24385 };
24386
24387 -static struct ata_port_operations cmd64x_port_ops = {
24388 +static const struct ata_port_operations cmd64x_port_ops = {
24389 .inherits = &cmd64x_base_ops,
24390 .cable_detect = ata_cable_40wire,
24391 };
24392
24393 -static struct ata_port_operations cmd646r1_port_ops = {
24394 +static const struct ata_port_operations cmd646r1_port_ops = {
24395 .inherits = &cmd64x_base_ops,
24396 .bmdma_stop = cmd646r1_bmdma_stop,
24397 .cable_detect = ata_cable_40wire,
24398 };
24399
24400 -static struct ata_port_operations cmd648_port_ops = {
24401 +static const struct ata_port_operations cmd648_port_ops = {
24402 .inherits = &cmd64x_base_ops,
24403 .bmdma_stop = cmd648_bmdma_stop,
24404 .cable_detect = cmd648_cable_detect,
24405 diff -urNp linux-2.6.32.42/drivers/ata/pata_cs5520.c linux-2.6.32.42/drivers/ata/pata_cs5520.c
24406 --- linux-2.6.32.42/drivers/ata/pata_cs5520.c 2011-03-27 14:31:47.000000000 -0400
24407 +++ linux-2.6.32.42/drivers/ata/pata_cs5520.c 2011-04-17 15:56:46.000000000 -0400
24408 @@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_
24409 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24410 };
24411
24412 -static struct ata_port_operations cs5520_port_ops = {
24413 +static const struct ata_port_operations cs5520_port_ops = {
24414 .inherits = &ata_bmdma_port_ops,
24415 .qc_prep = ata_sff_dumb_qc_prep,
24416 .cable_detect = ata_cable_40wire,
24417 diff -urNp linux-2.6.32.42/drivers/ata/pata_cs5530.c linux-2.6.32.42/drivers/ata/pata_cs5530.c
24418 --- linux-2.6.32.42/drivers/ata/pata_cs5530.c 2011-03-27 14:31:47.000000000 -0400
24419 +++ linux-2.6.32.42/drivers/ata/pata_cs5530.c 2011-04-17 15:56:46.000000000 -0400
24420 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
24421 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24422 };
24423
24424 -static struct ata_port_operations cs5530_port_ops = {
24425 +static const struct ata_port_operations cs5530_port_ops = {
24426 .inherits = &ata_bmdma_port_ops,
24427
24428 .qc_prep = ata_sff_dumb_qc_prep,
24429 diff -urNp linux-2.6.32.42/drivers/ata/pata_cs5535.c linux-2.6.32.42/drivers/ata/pata_cs5535.c
24430 --- linux-2.6.32.42/drivers/ata/pata_cs5535.c 2011-03-27 14:31:47.000000000 -0400
24431 +++ linux-2.6.32.42/drivers/ata/pata_cs5535.c 2011-04-17 15:56:46.000000000 -0400
24432 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
24433 ATA_BMDMA_SHT(DRV_NAME),
24434 };
24435
24436 -static struct ata_port_operations cs5535_port_ops = {
24437 +static const struct ata_port_operations cs5535_port_ops = {
24438 .inherits = &ata_bmdma_port_ops,
24439 .cable_detect = cs5535_cable_detect,
24440 .set_piomode = cs5535_set_piomode,
24441 diff -urNp linux-2.6.32.42/drivers/ata/pata_cs5536.c linux-2.6.32.42/drivers/ata/pata_cs5536.c
24442 --- linux-2.6.32.42/drivers/ata/pata_cs5536.c 2011-03-27 14:31:47.000000000 -0400
24443 +++ linux-2.6.32.42/drivers/ata/pata_cs5536.c 2011-04-17 15:56:46.000000000 -0400
24444 @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
24445 ATA_BMDMA_SHT(DRV_NAME),
24446 };
24447
24448 -static struct ata_port_operations cs5536_port_ops = {
24449 +static const struct ata_port_operations cs5536_port_ops = {
24450 .inherits = &ata_bmdma_port_ops,
24451 .cable_detect = cs5536_cable_detect,
24452 .set_piomode = cs5536_set_piomode,
24453 diff -urNp linux-2.6.32.42/drivers/ata/pata_cypress.c linux-2.6.32.42/drivers/ata/pata_cypress.c
24454 --- linux-2.6.32.42/drivers/ata/pata_cypress.c 2011-03-27 14:31:47.000000000 -0400
24455 +++ linux-2.6.32.42/drivers/ata/pata_cypress.c 2011-04-17 15:56:46.000000000 -0400
24456 @@ -113,7 +113,7 @@ static struct scsi_host_template cy82c69
24457 ATA_BMDMA_SHT(DRV_NAME),
24458 };
24459
24460 -static struct ata_port_operations cy82c693_port_ops = {
24461 +static const struct ata_port_operations cy82c693_port_ops = {
24462 .inherits = &ata_bmdma_port_ops,
24463 .cable_detect = ata_cable_40wire,
24464 .set_piomode = cy82c693_set_piomode,
24465 diff -urNp linux-2.6.32.42/drivers/ata/pata_efar.c linux-2.6.32.42/drivers/ata/pata_efar.c
24466 --- linux-2.6.32.42/drivers/ata/pata_efar.c 2011-03-27 14:31:47.000000000 -0400
24467 +++ linux-2.6.32.42/drivers/ata/pata_efar.c 2011-04-17 15:56:46.000000000 -0400
24468 @@ -222,7 +222,7 @@ static struct scsi_host_template efar_sh
24469 ATA_BMDMA_SHT(DRV_NAME),
24470 };
24471
24472 -static struct ata_port_operations efar_ops = {
24473 +static const struct ata_port_operations efar_ops = {
24474 .inherits = &ata_bmdma_port_ops,
24475 .cable_detect = efar_cable_detect,
24476 .set_piomode = efar_set_piomode,
24477 diff -urNp linux-2.6.32.42/drivers/ata/pata_hpt366.c linux-2.6.32.42/drivers/ata/pata_hpt366.c
24478 --- linux-2.6.32.42/drivers/ata/pata_hpt366.c 2011-06-25 12:55:34.000000000 -0400
24479 +++ linux-2.6.32.42/drivers/ata/pata_hpt366.c 2011-06-25 12:56:37.000000000 -0400
24480 @@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_
24481 * Configuration for HPT366/68
24482 */
24483
24484 -static struct ata_port_operations hpt366_port_ops = {
24485 +static const struct ata_port_operations hpt366_port_ops = {
24486 .inherits = &ata_bmdma_port_ops,
24487 .cable_detect = hpt36x_cable_detect,
24488 .mode_filter = hpt366_filter,
24489 diff -urNp linux-2.6.32.42/drivers/ata/pata_hpt37x.c linux-2.6.32.42/drivers/ata/pata_hpt37x.c
24490 --- linux-2.6.32.42/drivers/ata/pata_hpt37x.c 2011-06-25 12:55:34.000000000 -0400
24491 +++ linux-2.6.32.42/drivers/ata/pata_hpt37x.c 2011-06-25 12:56:37.000000000 -0400
24492 @@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_
24493 * Configuration for HPT370
24494 */
24495
24496 -static struct ata_port_operations hpt370_port_ops = {
24497 +static const struct ata_port_operations hpt370_port_ops = {
24498 .inherits = &ata_bmdma_port_ops,
24499
24500 .bmdma_stop = hpt370_bmdma_stop,
24501 @@ -591,7 +591,7 @@ static struct ata_port_operations hpt370
24502 * Configuration for HPT370A. Close to 370 but less filters
24503 */
24504
24505 -static struct ata_port_operations hpt370a_port_ops = {
24506 +static const struct ata_port_operations hpt370a_port_ops = {
24507 .inherits = &hpt370_port_ops,
24508 .mode_filter = hpt370a_filter,
24509 };
24510 @@ -601,7 +601,7 @@ static struct ata_port_operations hpt370
24511 * and DMA mode setting functionality.
24512 */
24513
24514 -static struct ata_port_operations hpt372_port_ops = {
24515 +static const struct ata_port_operations hpt372_port_ops = {
24516 .inherits = &ata_bmdma_port_ops,
24517
24518 .bmdma_stop = hpt37x_bmdma_stop,
24519 @@ -616,7 +616,7 @@ static struct ata_port_operations hpt372
24520 * but we have a different cable detection procedure for function 1.
24521 */
24522
24523 -static struct ata_port_operations hpt374_fn1_port_ops = {
24524 +static const struct ata_port_operations hpt374_fn1_port_ops = {
24525 .inherits = &hpt372_port_ops,
24526 .prereset = hpt374_fn1_pre_reset,
24527 };
24528 diff -urNp linux-2.6.32.42/drivers/ata/pata_hpt3x2n.c linux-2.6.32.42/drivers/ata/pata_hpt3x2n.c
24529 --- linux-2.6.32.42/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:55:34.000000000 -0400
24530 +++ linux-2.6.32.42/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:56:37.000000000 -0400
24531 @@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n
24532 * Configuration for HPT3x2n.
24533 */
24534
24535 -static struct ata_port_operations hpt3x2n_port_ops = {
24536 +static const struct ata_port_operations hpt3x2n_port_ops = {
24537 .inherits = &ata_bmdma_port_ops,
24538
24539 .bmdma_stop = hpt3x2n_bmdma_stop,
24540 diff -urNp linux-2.6.32.42/drivers/ata/pata_hpt3x3.c linux-2.6.32.42/drivers/ata/pata_hpt3x3.c
24541 --- linux-2.6.32.42/drivers/ata/pata_hpt3x3.c 2011-03-27 14:31:47.000000000 -0400
24542 +++ linux-2.6.32.42/drivers/ata/pata_hpt3x3.c 2011-04-17 15:56:46.000000000 -0400
24543 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
24544 ATA_BMDMA_SHT(DRV_NAME),
24545 };
24546
24547 -static struct ata_port_operations hpt3x3_port_ops = {
24548 +static const struct ata_port_operations hpt3x3_port_ops = {
24549 .inherits = &ata_bmdma_port_ops,
24550 .cable_detect = ata_cable_40wire,
24551 .set_piomode = hpt3x3_set_piomode,
24552 diff -urNp linux-2.6.32.42/drivers/ata/pata_icside.c linux-2.6.32.42/drivers/ata/pata_icside.c
24553 --- linux-2.6.32.42/drivers/ata/pata_icside.c 2011-03-27 14:31:47.000000000 -0400
24554 +++ linux-2.6.32.42/drivers/ata/pata_icside.c 2011-04-17 15:56:46.000000000 -0400
24555 @@ -319,7 +319,7 @@ static void pata_icside_postreset(struct
24556 }
24557 }
24558
24559 -static struct ata_port_operations pata_icside_port_ops = {
24560 +static const struct ata_port_operations pata_icside_port_ops = {
24561 .inherits = &ata_sff_port_ops,
24562 /* no need to build any PRD tables for DMA */
24563 .qc_prep = ata_noop_qc_prep,
24564 diff -urNp linux-2.6.32.42/drivers/ata/pata_isapnp.c linux-2.6.32.42/drivers/ata/pata_isapnp.c
24565 --- linux-2.6.32.42/drivers/ata/pata_isapnp.c 2011-03-27 14:31:47.000000000 -0400
24566 +++ linux-2.6.32.42/drivers/ata/pata_isapnp.c 2011-04-17 15:56:46.000000000 -0400
24567 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
24568 ATA_PIO_SHT(DRV_NAME),
24569 };
24570
24571 -static struct ata_port_operations isapnp_port_ops = {
24572 +static const struct ata_port_operations isapnp_port_ops = {
24573 .inherits = &ata_sff_port_ops,
24574 .cable_detect = ata_cable_40wire,
24575 };
24576
24577 -static struct ata_port_operations isapnp_noalt_port_ops = {
24578 +static const struct ata_port_operations isapnp_noalt_port_ops = {
24579 .inherits = &ata_sff_port_ops,
24580 .cable_detect = ata_cable_40wire,
24581 /* No altstatus so we don't want to use the lost interrupt poll */
24582 diff -urNp linux-2.6.32.42/drivers/ata/pata_it8213.c linux-2.6.32.42/drivers/ata/pata_it8213.c
24583 --- linux-2.6.32.42/drivers/ata/pata_it8213.c 2011-03-27 14:31:47.000000000 -0400
24584 +++ linux-2.6.32.42/drivers/ata/pata_it8213.c 2011-04-17 15:56:46.000000000 -0400
24585 @@ -234,7 +234,7 @@ static struct scsi_host_template it8213_
24586 };
24587
24588
24589 -static struct ata_port_operations it8213_ops = {
24590 +static const struct ata_port_operations it8213_ops = {
24591 .inherits = &ata_bmdma_port_ops,
24592 .cable_detect = it8213_cable_detect,
24593 .set_piomode = it8213_set_piomode,
24594 diff -urNp linux-2.6.32.42/drivers/ata/pata_it821x.c linux-2.6.32.42/drivers/ata/pata_it821x.c
24595 --- linux-2.6.32.42/drivers/ata/pata_it821x.c 2011-03-27 14:31:47.000000000 -0400
24596 +++ linux-2.6.32.42/drivers/ata/pata_it821x.c 2011-04-17 15:56:46.000000000 -0400
24597 @@ -800,7 +800,7 @@ static struct scsi_host_template it821x_
24598 ATA_BMDMA_SHT(DRV_NAME),
24599 };
24600
24601 -static struct ata_port_operations it821x_smart_port_ops = {
24602 +static const struct ata_port_operations it821x_smart_port_ops = {
24603 .inherits = &ata_bmdma_port_ops,
24604
24605 .check_atapi_dma= it821x_check_atapi_dma,
24606 @@ -814,7 +814,7 @@ static struct ata_port_operations it821x
24607 .port_start = it821x_port_start,
24608 };
24609
24610 -static struct ata_port_operations it821x_passthru_port_ops = {
24611 +static const struct ata_port_operations it821x_passthru_port_ops = {
24612 .inherits = &ata_bmdma_port_ops,
24613
24614 .check_atapi_dma= it821x_check_atapi_dma,
24615 @@ -830,7 +830,7 @@ static struct ata_port_operations it821x
24616 .port_start = it821x_port_start,
24617 };
24618
24619 -static struct ata_port_operations it821x_rdc_port_ops = {
24620 +static const struct ata_port_operations it821x_rdc_port_ops = {
24621 .inherits = &ata_bmdma_port_ops,
24622
24623 .check_atapi_dma= it821x_check_atapi_dma,
24624 diff -urNp linux-2.6.32.42/drivers/ata/pata_ixp4xx_cf.c linux-2.6.32.42/drivers/ata/pata_ixp4xx_cf.c
24625 --- linux-2.6.32.42/drivers/ata/pata_ixp4xx_cf.c 2011-03-27 14:31:47.000000000 -0400
24626 +++ linux-2.6.32.42/drivers/ata/pata_ixp4xx_cf.c 2011-04-17 15:56:46.000000000 -0400
24627 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
24628 ATA_PIO_SHT(DRV_NAME),
24629 };
24630
24631 -static struct ata_port_operations ixp4xx_port_ops = {
24632 +static const struct ata_port_operations ixp4xx_port_ops = {
24633 .inherits = &ata_sff_port_ops,
24634 .sff_data_xfer = ixp4xx_mmio_data_xfer,
24635 .cable_detect = ata_cable_40wire,
24636 diff -urNp linux-2.6.32.42/drivers/ata/pata_jmicron.c linux-2.6.32.42/drivers/ata/pata_jmicron.c
24637 --- linux-2.6.32.42/drivers/ata/pata_jmicron.c 2011-03-27 14:31:47.000000000 -0400
24638 +++ linux-2.6.32.42/drivers/ata/pata_jmicron.c 2011-04-17 15:56:46.000000000 -0400
24639 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
24640 ATA_BMDMA_SHT(DRV_NAME),
24641 };
24642
24643 -static struct ata_port_operations jmicron_ops = {
24644 +static const struct ata_port_operations jmicron_ops = {
24645 .inherits = &ata_bmdma_port_ops,
24646 .prereset = jmicron_pre_reset,
24647 };
24648 diff -urNp linux-2.6.32.42/drivers/ata/pata_legacy.c linux-2.6.32.42/drivers/ata/pata_legacy.c
24649 --- linux-2.6.32.42/drivers/ata/pata_legacy.c 2011-03-27 14:31:47.000000000 -0400
24650 +++ linux-2.6.32.42/drivers/ata/pata_legacy.c 2011-04-17 15:56:46.000000000 -0400
24651 @@ -106,7 +106,7 @@ struct legacy_probe {
24652
24653 struct legacy_controller {
24654 const char *name;
24655 - struct ata_port_operations *ops;
24656 + const struct ata_port_operations *ops;
24657 unsigned int pio_mask;
24658 unsigned int flags;
24659 unsigned int pflags;
24660 @@ -223,12 +223,12 @@ static const struct ata_port_operations
24661 * pio_mask as well.
24662 */
24663
24664 -static struct ata_port_operations simple_port_ops = {
24665 +static const struct ata_port_operations simple_port_ops = {
24666 .inherits = &legacy_base_port_ops,
24667 .sff_data_xfer = ata_sff_data_xfer_noirq,
24668 };
24669
24670 -static struct ata_port_operations legacy_port_ops = {
24671 +static const struct ata_port_operations legacy_port_ops = {
24672 .inherits = &legacy_base_port_ops,
24673 .sff_data_xfer = ata_sff_data_xfer_noirq,
24674 .set_mode = legacy_set_mode,
24675 @@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(st
24676 return buflen;
24677 }
24678
24679 -static struct ata_port_operations pdc20230_port_ops = {
24680 +static const struct ata_port_operations pdc20230_port_ops = {
24681 .inherits = &legacy_base_port_ops,
24682 .set_piomode = pdc20230_set_piomode,
24683 .sff_data_xfer = pdc_data_xfer_vlb,
24684 @@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct a
24685 ioread8(ap->ioaddr.status_addr);
24686 }
24687
24688 -static struct ata_port_operations ht6560a_port_ops = {
24689 +static const struct ata_port_operations ht6560a_port_ops = {
24690 .inherits = &legacy_base_port_ops,
24691 .set_piomode = ht6560a_set_piomode,
24692 };
24693 @@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct a
24694 ioread8(ap->ioaddr.status_addr);
24695 }
24696
24697 -static struct ata_port_operations ht6560b_port_ops = {
24698 +static const struct ata_port_operations ht6560b_port_ops = {
24699 .inherits = &legacy_base_port_ops,
24700 .set_piomode = ht6560b_set_piomode,
24701 };
24702 @@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(stru
24703 }
24704
24705
24706 -static struct ata_port_operations opti82c611a_port_ops = {
24707 +static const struct ata_port_operations opti82c611a_port_ops = {
24708 .inherits = &legacy_base_port_ops,
24709 .set_piomode = opti82c611a_set_piomode,
24710 };
24711 @@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(
24712 return ata_sff_qc_issue(qc);
24713 }
24714
24715 -static struct ata_port_operations opti82c46x_port_ops = {
24716 +static const struct ata_port_operations opti82c46x_port_ops = {
24717 .inherits = &legacy_base_port_ops,
24718 .set_piomode = opti82c46x_set_piomode,
24719 .qc_issue = opti82c46x_qc_issue,
24720 @@ -771,20 +771,20 @@ static int qdi_port(struct platform_devi
24721 return 0;
24722 }
24723
24724 -static struct ata_port_operations qdi6500_port_ops = {
24725 +static const struct ata_port_operations qdi6500_port_ops = {
24726 .inherits = &legacy_base_port_ops,
24727 .set_piomode = qdi6500_set_piomode,
24728 .qc_issue = qdi_qc_issue,
24729 .sff_data_xfer = vlb32_data_xfer,
24730 };
24731
24732 -static struct ata_port_operations qdi6580_port_ops = {
24733 +static const struct ata_port_operations qdi6580_port_ops = {
24734 .inherits = &legacy_base_port_ops,
24735 .set_piomode = qdi6580_set_piomode,
24736 .sff_data_xfer = vlb32_data_xfer,
24737 };
24738
24739 -static struct ata_port_operations qdi6580dp_port_ops = {
24740 +static const struct ata_port_operations qdi6580dp_port_ops = {
24741 .inherits = &legacy_base_port_ops,
24742 .set_piomode = qdi6580dp_set_piomode,
24743 .sff_data_xfer = vlb32_data_xfer,
24744 @@ -855,7 +855,7 @@ static int winbond_port(struct platform_
24745 return 0;
24746 }
24747
24748 -static struct ata_port_operations winbond_port_ops = {
24749 +static const struct ata_port_operations winbond_port_ops = {
24750 .inherits = &legacy_base_port_ops,
24751 .set_piomode = winbond_set_piomode,
24752 .sff_data_xfer = vlb32_data_xfer,
24753 @@ -978,7 +978,7 @@ static __init int legacy_init_one(struct
24754 int pio_modes = controller->pio_mask;
24755 unsigned long io = probe->port;
24756 u32 mask = (1 << probe->slot);
24757 - struct ata_port_operations *ops = controller->ops;
24758 + const struct ata_port_operations *ops = controller->ops;
24759 struct legacy_data *ld = &legacy_data[probe->slot];
24760 struct ata_host *host = NULL;
24761 struct ata_port *ap;
24762 diff -urNp linux-2.6.32.42/drivers/ata/pata_marvell.c linux-2.6.32.42/drivers/ata/pata_marvell.c
24763 --- linux-2.6.32.42/drivers/ata/pata_marvell.c 2011-03-27 14:31:47.000000000 -0400
24764 +++ linux-2.6.32.42/drivers/ata/pata_marvell.c 2011-04-17 15:56:46.000000000 -0400
24765 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell
24766 ATA_BMDMA_SHT(DRV_NAME),
24767 };
24768
24769 -static struct ata_port_operations marvell_ops = {
24770 +static const struct ata_port_operations marvell_ops = {
24771 .inherits = &ata_bmdma_port_ops,
24772 .cable_detect = marvell_cable_detect,
24773 .prereset = marvell_pre_reset,
24774 diff -urNp linux-2.6.32.42/drivers/ata/pata_mpc52xx.c linux-2.6.32.42/drivers/ata/pata_mpc52xx.c
24775 --- linux-2.6.32.42/drivers/ata/pata_mpc52xx.c 2011-03-27 14:31:47.000000000 -0400
24776 +++ linux-2.6.32.42/drivers/ata/pata_mpc52xx.c 2011-04-17 15:56:46.000000000 -0400
24777 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
24778 ATA_PIO_SHT(DRV_NAME),
24779 };
24780
24781 -static struct ata_port_operations mpc52xx_ata_port_ops = {
24782 +static const struct ata_port_operations mpc52xx_ata_port_ops = {
24783 .inherits = &ata_bmdma_port_ops,
24784 .sff_dev_select = mpc52xx_ata_dev_select,
24785 .set_piomode = mpc52xx_ata_set_piomode,
24786 diff -urNp linux-2.6.32.42/drivers/ata/pata_mpiix.c linux-2.6.32.42/drivers/ata/pata_mpiix.c
24787 --- linux-2.6.32.42/drivers/ata/pata_mpiix.c 2011-03-27 14:31:47.000000000 -0400
24788 +++ linux-2.6.32.42/drivers/ata/pata_mpiix.c 2011-04-17 15:56:46.000000000 -0400
24789 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
24790 ATA_PIO_SHT(DRV_NAME),
24791 };
24792
24793 -static struct ata_port_operations mpiix_port_ops = {
24794 +static const struct ata_port_operations mpiix_port_ops = {
24795 .inherits = &ata_sff_port_ops,
24796 .qc_issue = mpiix_qc_issue,
24797 .cable_detect = ata_cable_40wire,
24798 diff -urNp linux-2.6.32.42/drivers/ata/pata_netcell.c linux-2.6.32.42/drivers/ata/pata_netcell.c
24799 --- linux-2.6.32.42/drivers/ata/pata_netcell.c 2011-03-27 14:31:47.000000000 -0400
24800 +++ linux-2.6.32.42/drivers/ata/pata_netcell.c 2011-04-17 15:56:46.000000000 -0400
24801 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell
24802 ATA_BMDMA_SHT(DRV_NAME),
24803 };
24804
24805 -static struct ata_port_operations netcell_ops = {
24806 +static const struct ata_port_operations netcell_ops = {
24807 .inherits = &ata_bmdma_port_ops,
24808 .cable_detect = ata_cable_80wire,
24809 .read_id = netcell_read_id,
24810 diff -urNp linux-2.6.32.42/drivers/ata/pata_ninja32.c linux-2.6.32.42/drivers/ata/pata_ninja32.c
24811 --- linux-2.6.32.42/drivers/ata/pata_ninja32.c 2011-03-27 14:31:47.000000000 -0400
24812 +++ linux-2.6.32.42/drivers/ata/pata_ninja32.c 2011-04-17 15:56:46.000000000 -0400
24813 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
24814 ATA_BMDMA_SHT(DRV_NAME),
24815 };
24816
24817 -static struct ata_port_operations ninja32_port_ops = {
24818 +static const struct ata_port_operations ninja32_port_ops = {
24819 .inherits = &ata_bmdma_port_ops,
24820 .sff_dev_select = ninja32_dev_select,
24821 .cable_detect = ata_cable_40wire,
24822 diff -urNp linux-2.6.32.42/drivers/ata/pata_ns87410.c linux-2.6.32.42/drivers/ata/pata_ns87410.c
24823 --- linux-2.6.32.42/drivers/ata/pata_ns87410.c 2011-03-27 14:31:47.000000000 -0400
24824 +++ linux-2.6.32.42/drivers/ata/pata_ns87410.c 2011-04-17 15:56:46.000000000 -0400
24825 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
24826 ATA_PIO_SHT(DRV_NAME),
24827 };
24828
24829 -static struct ata_port_operations ns87410_port_ops = {
24830 +static const struct ata_port_operations ns87410_port_ops = {
24831 .inherits = &ata_sff_port_ops,
24832 .qc_issue = ns87410_qc_issue,
24833 .cable_detect = ata_cable_40wire,
24834 diff -urNp linux-2.6.32.42/drivers/ata/pata_ns87415.c linux-2.6.32.42/drivers/ata/pata_ns87415.c
24835 --- linux-2.6.32.42/drivers/ata/pata_ns87415.c 2011-03-27 14:31:47.000000000 -0400
24836 +++ linux-2.6.32.42/drivers/ata/pata_ns87415.c 2011-04-17 15:56:46.000000000 -0400
24837 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
24838 }
24839 #endif /* 87560 SuperIO Support */
24840
24841 -static struct ata_port_operations ns87415_pata_ops = {
24842 +static const struct ata_port_operations ns87415_pata_ops = {
24843 .inherits = &ata_bmdma_port_ops,
24844
24845 .check_atapi_dma = ns87415_check_atapi_dma,
24846 @@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
24847 };
24848
24849 #if defined(CONFIG_SUPERIO)
24850 -static struct ata_port_operations ns87560_pata_ops = {
24851 +static const struct ata_port_operations ns87560_pata_ops = {
24852 .inherits = &ns87415_pata_ops,
24853 .sff_tf_read = ns87560_tf_read,
24854 .sff_check_status = ns87560_check_status,
24855 diff -urNp linux-2.6.32.42/drivers/ata/pata_octeon_cf.c linux-2.6.32.42/drivers/ata/pata_octeon_cf.c
24856 --- linux-2.6.32.42/drivers/ata/pata_octeon_cf.c 2011-03-27 14:31:47.000000000 -0400
24857 +++ linux-2.6.32.42/drivers/ata/pata_octeon_cf.c 2011-04-17 15:56:46.000000000 -0400
24858 @@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(s
24859 return 0;
24860 }
24861
24862 +/* cannot be const */
24863 static struct ata_port_operations octeon_cf_ops = {
24864 .inherits = &ata_sff_port_ops,
24865 .check_atapi_dma = octeon_cf_check_atapi_dma,
24866 diff -urNp linux-2.6.32.42/drivers/ata/pata_oldpiix.c linux-2.6.32.42/drivers/ata/pata_oldpiix.c
24867 --- linux-2.6.32.42/drivers/ata/pata_oldpiix.c 2011-03-27 14:31:47.000000000 -0400
24868 +++ linux-2.6.32.42/drivers/ata/pata_oldpiix.c 2011-04-17 15:56:46.000000000 -0400
24869 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
24870 ATA_BMDMA_SHT(DRV_NAME),
24871 };
24872
24873 -static struct ata_port_operations oldpiix_pata_ops = {
24874 +static const struct ata_port_operations oldpiix_pata_ops = {
24875 .inherits = &ata_bmdma_port_ops,
24876 .qc_issue = oldpiix_qc_issue,
24877 .cable_detect = ata_cable_40wire,
24878 diff -urNp linux-2.6.32.42/drivers/ata/pata_opti.c linux-2.6.32.42/drivers/ata/pata_opti.c
24879 --- linux-2.6.32.42/drivers/ata/pata_opti.c 2011-03-27 14:31:47.000000000 -0400
24880 +++ linux-2.6.32.42/drivers/ata/pata_opti.c 2011-04-17 15:56:46.000000000 -0400
24881 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
24882 ATA_PIO_SHT(DRV_NAME),
24883 };
24884
24885 -static struct ata_port_operations opti_port_ops = {
24886 +static const struct ata_port_operations opti_port_ops = {
24887 .inherits = &ata_sff_port_ops,
24888 .cable_detect = ata_cable_40wire,
24889 .set_piomode = opti_set_piomode,
24890 diff -urNp linux-2.6.32.42/drivers/ata/pata_optidma.c linux-2.6.32.42/drivers/ata/pata_optidma.c
24891 --- linux-2.6.32.42/drivers/ata/pata_optidma.c 2011-03-27 14:31:47.000000000 -0400
24892 +++ linux-2.6.32.42/drivers/ata/pata_optidma.c 2011-04-17 15:56:46.000000000 -0400
24893 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma
24894 ATA_BMDMA_SHT(DRV_NAME),
24895 };
24896
24897 -static struct ata_port_operations optidma_port_ops = {
24898 +static const struct ata_port_operations optidma_port_ops = {
24899 .inherits = &ata_bmdma_port_ops,
24900 .cable_detect = ata_cable_40wire,
24901 .set_piomode = optidma_set_pio_mode,
24902 @@ -346,7 +346,7 @@ static struct ata_port_operations optidm
24903 .prereset = optidma_pre_reset,
24904 };
24905
24906 -static struct ata_port_operations optiplus_port_ops = {
24907 +static const struct ata_port_operations optiplus_port_ops = {
24908 .inherits = &optidma_port_ops,
24909 .set_piomode = optiplus_set_pio_mode,
24910 .set_dmamode = optiplus_set_dma_mode,
24911 diff -urNp linux-2.6.32.42/drivers/ata/pata_palmld.c linux-2.6.32.42/drivers/ata/pata_palmld.c
24912 --- linux-2.6.32.42/drivers/ata/pata_palmld.c 2011-03-27 14:31:47.000000000 -0400
24913 +++ linux-2.6.32.42/drivers/ata/pata_palmld.c 2011-04-17 15:56:46.000000000 -0400
24914 @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
24915 ATA_PIO_SHT(DRV_NAME),
24916 };
24917
24918 -static struct ata_port_operations palmld_port_ops = {
24919 +static const struct ata_port_operations palmld_port_ops = {
24920 .inherits = &ata_sff_port_ops,
24921 .sff_data_xfer = ata_sff_data_xfer_noirq,
24922 .cable_detect = ata_cable_40wire,
24923 diff -urNp linux-2.6.32.42/drivers/ata/pata_pcmcia.c linux-2.6.32.42/drivers/ata/pata_pcmcia.c
24924 --- linux-2.6.32.42/drivers/ata/pata_pcmcia.c 2011-03-27 14:31:47.000000000 -0400
24925 +++ linux-2.6.32.42/drivers/ata/pata_pcmcia.c 2011-04-17 15:56:46.000000000 -0400
24926 @@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_
24927 ATA_PIO_SHT(DRV_NAME),
24928 };
24929
24930 -static struct ata_port_operations pcmcia_port_ops = {
24931 +static const struct ata_port_operations pcmcia_port_ops = {
24932 .inherits = &ata_sff_port_ops,
24933 .sff_data_xfer = ata_sff_data_xfer_noirq,
24934 .cable_detect = ata_cable_40wire,
24935 .set_mode = pcmcia_set_mode,
24936 };
24937
24938 -static struct ata_port_operations pcmcia_8bit_port_ops = {
24939 +static const struct ata_port_operations pcmcia_8bit_port_ops = {
24940 .inherits = &ata_sff_port_ops,
24941 .sff_data_xfer = ata_data_xfer_8bit,
24942 .cable_detect = ata_cable_40wire,
24943 @@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia
24944 unsigned long io_base, ctl_base;
24945 void __iomem *io_addr, *ctl_addr;
24946 int n_ports = 1;
24947 - struct ata_port_operations *ops = &pcmcia_port_ops;
24948 + const struct ata_port_operations *ops = &pcmcia_port_ops;
24949
24950 info = kzalloc(sizeof(*info), GFP_KERNEL);
24951 if (info == NULL)
24952 diff -urNp linux-2.6.32.42/drivers/ata/pata_pdc2027x.c linux-2.6.32.42/drivers/ata/pata_pdc2027x.c
24953 --- linux-2.6.32.42/drivers/ata/pata_pdc2027x.c 2011-03-27 14:31:47.000000000 -0400
24954 +++ linux-2.6.32.42/drivers/ata/pata_pdc2027x.c 2011-04-17 15:56:46.000000000 -0400
24955 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
24956 ATA_BMDMA_SHT(DRV_NAME),
24957 };
24958
24959 -static struct ata_port_operations pdc2027x_pata100_ops = {
24960 +static const struct ata_port_operations pdc2027x_pata100_ops = {
24961 .inherits = &ata_bmdma_port_ops,
24962 .check_atapi_dma = pdc2027x_check_atapi_dma,
24963 .cable_detect = pdc2027x_cable_detect,
24964 .prereset = pdc2027x_prereset,
24965 };
24966
24967 -static struct ata_port_operations pdc2027x_pata133_ops = {
24968 +static const struct ata_port_operations pdc2027x_pata133_ops = {
24969 .inherits = &pdc2027x_pata100_ops,
24970 .mode_filter = pdc2027x_mode_filter,
24971 .set_piomode = pdc2027x_set_piomode,
24972 diff -urNp linux-2.6.32.42/drivers/ata/pata_pdc202xx_old.c linux-2.6.32.42/drivers/ata/pata_pdc202xx_old.c
24973 --- linux-2.6.32.42/drivers/ata/pata_pdc202xx_old.c 2011-03-27 14:31:47.000000000 -0400
24974 +++ linux-2.6.32.42/drivers/ata/pata_pdc202xx_old.c 2011-04-17 15:56:46.000000000 -0400
24975 @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
24976 ATA_BMDMA_SHT(DRV_NAME),
24977 };
24978
24979 -static struct ata_port_operations pdc2024x_port_ops = {
24980 +static const struct ata_port_operations pdc2024x_port_ops = {
24981 .inherits = &ata_bmdma_port_ops,
24982
24983 .cable_detect = ata_cable_40wire,
24984 @@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
24985 .sff_exec_command = pdc202xx_exec_command,
24986 };
24987
24988 -static struct ata_port_operations pdc2026x_port_ops = {
24989 +static const struct ata_port_operations pdc2026x_port_ops = {
24990 .inherits = &pdc2024x_port_ops,
24991
24992 .check_atapi_dma = pdc2026x_check_atapi_dma,
24993 diff -urNp linux-2.6.32.42/drivers/ata/pata_platform.c linux-2.6.32.42/drivers/ata/pata_platform.c
24994 --- linux-2.6.32.42/drivers/ata/pata_platform.c 2011-03-27 14:31:47.000000000 -0400
24995 +++ linux-2.6.32.42/drivers/ata/pata_platform.c 2011-04-17 15:56:46.000000000 -0400
24996 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
24997 ATA_PIO_SHT(DRV_NAME),
24998 };
24999
25000 -static struct ata_port_operations pata_platform_port_ops = {
25001 +static const struct ata_port_operations pata_platform_port_ops = {
25002 .inherits = &ata_sff_port_ops,
25003 .sff_data_xfer = ata_sff_data_xfer_noirq,
25004 .cable_detect = ata_cable_unknown,
25005 diff -urNp linux-2.6.32.42/drivers/ata/pata_qdi.c linux-2.6.32.42/drivers/ata/pata_qdi.c
25006 --- linux-2.6.32.42/drivers/ata/pata_qdi.c 2011-03-27 14:31:47.000000000 -0400
25007 +++ linux-2.6.32.42/drivers/ata/pata_qdi.c 2011-04-17 15:56:46.000000000 -0400
25008 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
25009 ATA_PIO_SHT(DRV_NAME),
25010 };
25011
25012 -static struct ata_port_operations qdi6500_port_ops = {
25013 +static const struct ata_port_operations qdi6500_port_ops = {
25014 .inherits = &ata_sff_port_ops,
25015 .qc_issue = qdi_qc_issue,
25016 .sff_data_xfer = qdi_data_xfer,
25017 @@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
25018 .set_piomode = qdi6500_set_piomode,
25019 };
25020
25021 -static struct ata_port_operations qdi6580_port_ops = {
25022 +static const struct ata_port_operations qdi6580_port_ops = {
25023 .inherits = &qdi6500_port_ops,
25024 .set_piomode = qdi6580_set_piomode,
25025 };
25026 diff -urNp linux-2.6.32.42/drivers/ata/pata_radisys.c linux-2.6.32.42/drivers/ata/pata_radisys.c
25027 --- linux-2.6.32.42/drivers/ata/pata_radisys.c 2011-03-27 14:31:47.000000000 -0400
25028 +++ linux-2.6.32.42/drivers/ata/pata_radisys.c 2011-04-17 15:56:46.000000000 -0400
25029 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys
25030 ATA_BMDMA_SHT(DRV_NAME),
25031 };
25032
25033 -static struct ata_port_operations radisys_pata_ops = {
25034 +static const struct ata_port_operations radisys_pata_ops = {
25035 .inherits = &ata_bmdma_port_ops,
25036 .qc_issue = radisys_qc_issue,
25037 .cable_detect = ata_cable_unknown,
25038 diff -urNp linux-2.6.32.42/drivers/ata/pata_rb532_cf.c linux-2.6.32.42/drivers/ata/pata_rb532_cf.c
25039 --- linux-2.6.32.42/drivers/ata/pata_rb532_cf.c 2011-03-27 14:31:47.000000000 -0400
25040 +++ linux-2.6.32.42/drivers/ata/pata_rb532_cf.c 2011-04-17 15:56:46.000000000 -0400
25041 @@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handle
25042 return IRQ_HANDLED;
25043 }
25044
25045 -static struct ata_port_operations rb532_pata_port_ops = {
25046 +static const struct ata_port_operations rb532_pata_port_ops = {
25047 .inherits = &ata_sff_port_ops,
25048 .sff_data_xfer = ata_sff_data_xfer32,
25049 };
25050 diff -urNp linux-2.6.32.42/drivers/ata/pata_rdc.c linux-2.6.32.42/drivers/ata/pata_rdc.c
25051 --- linux-2.6.32.42/drivers/ata/pata_rdc.c 2011-03-27 14:31:47.000000000 -0400
25052 +++ linux-2.6.32.42/drivers/ata/pata_rdc.c 2011-04-17 15:56:46.000000000 -0400
25053 @@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_p
25054 pci_write_config_byte(dev, 0x48, udma_enable);
25055 }
25056
25057 -static struct ata_port_operations rdc_pata_ops = {
25058 +static const struct ata_port_operations rdc_pata_ops = {
25059 .inherits = &ata_bmdma32_port_ops,
25060 .cable_detect = rdc_pata_cable_detect,
25061 .set_piomode = rdc_set_piomode,
25062 diff -urNp linux-2.6.32.42/drivers/ata/pata_rz1000.c linux-2.6.32.42/drivers/ata/pata_rz1000.c
25063 --- linux-2.6.32.42/drivers/ata/pata_rz1000.c 2011-03-27 14:31:47.000000000 -0400
25064 +++ linux-2.6.32.42/drivers/ata/pata_rz1000.c 2011-04-17 15:56:46.000000000 -0400
25065 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
25066 ATA_PIO_SHT(DRV_NAME),
25067 };
25068
25069 -static struct ata_port_operations rz1000_port_ops = {
25070 +static const struct ata_port_operations rz1000_port_ops = {
25071 .inherits = &ata_sff_port_ops,
25072 .cable_detect = ata_cable_40wire,
25073 .set_mode = rz1000_set_mode,
25074 diff -urNp linux-2.6.32.42/drivers/ata/pata_sc1200.c linux-2.6.32.42/drivers/ata/pata_sc1200.c
25075 --- linux-2.6.32.42/drivers/ata/pata_sc1200.c 2011-03-27 14:31:47.000000000 -0400
25076 +++ linux-2.6.32.42/drivers/ata/pata_sc1200.c 2011-04-17 15:56:46.000000000 -0400
25077 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
25078 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25079 };
25080
25081 -static struct ata_port_operations sc1200_port_ops = {
25082 +static const struct ata_port_operations sc1200_port_ops = {
25083 .inherits = &ata_bmdma_port_ops,
25084 .qc_prep = ata_sff_dumb_qc_prep,
25085 .qc_issue = sc1200_qc_issue,
25086 diff -urNp linux-2.6.32.42/drivers/ata/pata_scc.c linux-2.6.32.42/drivers/ata/pata_scc.c
25087 --- linux-2.6.32.42/drivers/ata/pata_scc.c 2011-03-27 14:31:47.000000000 -0400
25088 +++ linux-2.6.32.42/drivers/ata/pata_scc.c 2011-04-17 15:56:46.000000000 -0400
25089 @@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
25090 ATA_BMDMA_SHT(DRV_NAME),
25091 };
25092
25093 -static struct ata_port_operations scc_pata_ops = {
25094 +static const struct ata_port_operations scc_pata_ops = {
25095 .inherits = &ata_bmdma_port_ops,
25096
25097 .set_piomode = scc_set_piomode,
25098 diff -urNp linux-2.6.32.42/drivers/ata/pata_sch.c linux-2.6.32.42/drivers/ata/pata_sch.c
25099 --- linux-2.6.32.42/drivers/ata/pata_sch.c 2011-03-27 14:31:47.000000000 -0400
25100 +++ linux-2.6.32.42/drivers/ata/pata_sch.c 2011-04-17 15:56:46.000000000 -0400
25101 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
25102 ATA_BMDMA_SHT(DRV_NAME),
25103 };
25104
25105 -static struct ata_port_operations sch_pata_ops = {
25106 +static const struct ata_port_operations sch_pata_ops = {
25107 .inherits = &ata_bmdma_port_ops,
25108 .cable_detect = ata_cable_unknown,
25109 .set_piomode = sch_set_piomode,
25110 diff -urNp linux-2.6.32.42/drivers/ata/pata_serverworks.c linux-2.6.32.42/drivers/ata/pata_serverworks.c
25111 --- linux-2.6.32.42/drivers/ata/pata_serverworks.c 2011-03-27 14:31:47.000000000 -0400
25112 +++ linux-2.6.32.42/drivers/ata/pata_serverworks.c 2011-04-17 15:56:46.000000000 -0400
25113 @@ -299,7 +299,7 @@ static struct scsi_host_template serverw
25114 ATA_BMDMA_SHT(DRV_NAME),
25115 };
25116
25117 -static struct ata_port_operations serverworks_osb4_port_ops = {
25118 +static const struct ata_port_operations serverworks_osb4_port_ops = {
25119 .inherits = &ata_bmdma_port_ops,
25120 .cable_detect = serverworks_cable_detect,
25121 .mode_filter = serverworks_osb4_filter,
25122 @@ -307,7 +307,7 @@ static struct ata_port_operations server
25123 .set_dmamode = serverworks_set_dmamode,
25124 };
25125
25126 -static struct ata_port_operations serverworks_csb_port_ops = {
25127 +static const struct ata_port_operations serverworks_csb_port_ops = {
25128 .inherits = &serverworks_osb4_port_ops,
25129 .mode_filter = serverworks_csb_filter,
25130 };
25131 diff -urNp linux-2.6.32.42/drivers/ata/pata_sil680.c linux-2.6.32.42/drivers/ata/pata_sil680.c
25132 --- linux-2.6.32.42/drivers/ata/pata_sil680.c 2011-06-25 12:55:34.000000000 -0400
25133 +++ linux-2.6.32.42/drivers/ata/pata_sil680.c 2011-06-25 12:56:37.000000000 -0400
25134 @@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
25135 ATA_BMDMA_SHT(DRV_NAME),
25136 };
25137
25138 -static struct ata_port_operations sil680_port_ops = {
25139 +static const struct ata_port_operations sil680_port_ops = {
25140 .inherits = &ata_bmdma32_port_ops,
25141 .cable_detect = sil680_cable_detect,
25142 .set_piomode = sil680_set_piomode,
25143 diff -urNp linux-2.6.32.42/drivers/ata/pata_sis.c linux-2.6.32.42/drivers/ata/pata_sis.c
25144 --- linux-2.6.32.42/drivers/ata/pata_sis.c 2011-03-27 14:31:47.000000000 -0400
25145 +++ linux-2.6.32.42/drivers/ata/pata_sis.c 2011-04-17 15:56:46.000000000 -0400
25146 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
25147 ATA_BMDMA_SHT(DRV_NAME),
25148 };
25149
25150 -static struct ata_port_operations sis_133_for_sata_ops = {
25151 +static const struct ata_port_operations sis_133_for_sata_ops = {
25152 .inherits = &ata_bmdma_port_ops,
25153 .set_piomode = sis_133_set_piomode,
25154 .set_dmamode = sis_133_set_dmamode,
25155 .cable_detect = sis_133_cable_detect,
25156 };
25157
25158 -static struct ata_port_operations sis_base_ops = {
25159 +static const struct ata_port_operations sis_base_ops = {
25160 .inherits = &ata_bmdma_port_ops,
25161 .prereset = sis_pre_reset,
25162 };
25163
25164 -static struct ata_port_operations sis_133_ops = {
25165 +static const struct ata_port_operations sis_133_ops = {
25166 .inherits = &sis_base_ops,
25167 .set_piomode = sis_133_set_piomode,
25168 .set_dmamode = sis_133_set_dmamode,
25169 .cable_detect = sis_133_cable_detect,
25170 };
25171
25172 -static struct ata_port_operations sis_133_early_ops = {
25173 +static const struct ata_port_operations sis_133_early_ops = {
25174 .inherits = &sis_base_ops,
25175 .set_piomode = sis_100_set_piomode,
25176 .set_dmamode = sis_133_early_set_dmamode,
25177 .cable_detect = sis_66_cable_detect,
25178 };
25179
25180 -static struct ata_port_operations sis_100_ops = {
25181 +static const struct ata_port_operations sis_100_ops = {
25182 .inherits = &sis_base_ops,
25183 .set_piomode = sis_100_set_piomode,
25184 .set_dmamode = sis_100_set_dmamode,
25185 .cable_detect = sis_66_cable_detect,
25186 };
25187
25188 -static struct ata_port_operations sis_66_ops = {
25189 +static const struct ata_port_operations sis_66_ops = {
25190 .inherits = &sis_base_ops,
25191 .set_piomode = sis_old_set_piomode,
25192 .set_dmamode = sis_66_set_dmamode,
25193 .cable_detect = sis_66_cable_detect,
25194 };
25195
25196 -static struct ata_port_operations sis_old_ops = {
25197 +static const struct ata_port_operations sis_old_ops = {
25198 .inherits = &sis_base_ops,
25199 .set_piomode = sis_old_set_piomode,
25200 .set_dmamode = sis_old_set_dmamode,
25201 diff -urNp linux-2.6.32.42/drivers/ata/pata_sl82c105.c linux-2.6.32.42/drivers/ata/pata_sl82c105.c
25202 --- linux-2.6.32.42/drivers/ata/pata_sl82c105.c 2011-03-27 14:31:47.000000000 -0400
25203 +++ linux-2.6.32.42/drivers/ata/pata_sl82c105.c 2011-04-17 15:56:46.000000000 -0400
25204 @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
25205 ATA_BMDMA_SHT(DRV_NAME),
25206 };
25207
25208 -static struct ata_port_operations sl82c105_port_ops = {
25209 +static const struct ata_port_operations sl82c105_port_ops = {
25210 .inherits = &ata_bmdma_port_ops,
25211 .qc_defer = sl82c105_qc_defer,
25212 .bmdma_start = sl82c105_bmdma_start,
25213 diff -urNp linux-2.6.32.42/drivers/ata/pata_triflex.c linux-2.6.32.42/drivers/ata/pata_triflex.c
25214 --- linux-2.6.32.42/drivers/ata/pata_triflex.c 2011-03-27 14:31:47.000000000 -0400
25215 +++ linux-2.6.32.42/drivers/ata/pata_triflex.c 2011-04-17 15:56:46.000000000 -0400
25216 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex
25217 ATA_BMDMA_SHT(DRV_NAME),
25218 };
25219
25220 -static struct ata_port_operations triflex_port_ops = {
25221 +static const struct ata_port_operations triflex_port_ops = {
25222 .inherits = &ata_bmdma_port_ops,
25223 .bmdma_start = triflex_bmdma_start,
25224 .bmdma_stop = triflex_bmdma_stop,
25225 diff -urNp linux-2.6.32.42/drivers/ata/pata_via.c linux-2.6.32.42/drivers/ata/pata_via.c
25226 --- linux-2.6.32.42/drivers/ata/pata_via.c 2011-03-27 14:31:47.000000000 -0400
25227 +++ linux-2.6.32.42/drivers/ata/pata_via.c 2011-04-17 15:56:46.000000000 -0400
25228 @@ -419,7 +419,7 @@ static struct scsi_host_template via_sht
25229 ATA_BMDMA_SHT(DRV_NAME),
25230 };
25231
25232 -static struct ata_port_operations via_port_ops = {
25233 +static const struct ata_port_operations via_port_ops = {
25234 .inherits = &ata_bmdma_port_ops,
25235 .cable_detect = via_cable_detect,
25236 .set_piomode = via_set_piomode,
25237 @@ -429,7 +429,7 @@ static struct ata_port_operations via_po
25238 .port_start = via_port_start,
25239 };
25240
25241 -static struct ata_port_operations via_port_ops_noirq = {
25242 +static const struct ata_port_operations via_port_ops_noirq = {
25243 .inherits = &via_port_ops,
25244 .sff_data_xfer = ata_sff_data_xfer_noirq,
25245 };
25246 diff -urNp linux-2.6.32.42/drivers/ata/pata_winbond.c linux-2.6.32.42/drivers/ata/pata_winbond.c
25247 --- linux-2.6.32.42/drivers/ata/pata_winbond.c 2011-03-27 14:31:47.000000000 -0400
25248 +++ linux-2.6.32.42/drivers/ata/pata_winbond.c 2011-04-17 15:56:46.000000000 -0400
25249 @@ -125,7 +125,7 @@ static struct scsi_host_template winbond
25250 ATA_PIO_SHT(DRV_NAME),
25251 };
25252
25253 -static struct ata_port_operations winbond_port_ops = {
25254 +static const struct ata_port_operations winbond_port_ops = {
25255 .inherits = &ata_sff_port_ops,
25256 .sff_data_xfer = winbond_data_xfer,
25257 .cable_detect = ata_cable_40wire,
25258 diff -urNp linux-2.6.32.42/drivers/ata/pdc_adma.c linux-2.6.32.42/drivers/ata/pdc_adma.c
25259 --- linux-2.6.32.42/drivers/ata/pdc_adma.c 2011-03-27 14:31:47.000000000 -0400
25260 +++ linux-2.6.32.42/drivers/ata/pdc_adma.c 2011-04-17 15:56:46.000000000 -0400
25261 @@ -145,7 +145,7 @@ static struct scsi_host_template adma_at
25262 .dma_boundary = ADMA_DMA_BOUNDARY,
25263 };
25264
25265 -static struct ata_port_operations adma_ata_ops = {
25266 +static const struct ata_port_operations adma_ata_ops = {
25267 .inherits = &ata_sff_port_ops,
25268
25269 .lost_interrupt = ATA_OP_NULL,
25270 diff -urNp linux-2.6.32.42/drivers/ata/sata_fsl.c linux-2.6.32.42/drivers/ata/sata_fsl.c
25271 --- linux-2.6.32.42/drivers/ata/sata_fsl.c 2011-03-27 14:31:47.000000000 -0400
25272 +++ linux-2.6.32.42/drivers/ata/sata_fsl.c 2011-04-17 15:56:46.000000000 -0400
25273 @@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fs
25274 .dma_boundary = ATA_DMA_BOUNDARY,
25275 };
25276
25277 -static struct ata_port_operations sata_fsl_ops = {
25278 +static const struct ata_port_operations sata_fsl_ops = {
25279 .inherits = &sata_pmp_port_ops,
25280
25281 .qc_defer = ata_std_qc_defer,
25282 diff -urNp linux-2.6.32.42/drivers/ata/sata_inic162x.c linux-2.6.32.42/drivers/ata/sata_inic162x.c
25283 --- linux-2.6.32.42/drivers/ata/sata_inic162x.c 2011-03-27 14:31:47.000000000 -0400
25284 +++ linux-2.6.32.42/drivers/ata/sata_inic162x.c 2011-04-17 15:56:46.000000000 -0400
25285 @@ -721,7 +721,7 @@ static int inic_port_start(struct ata_po
25286 return 0;
25287 }
25288
25289 -static struct ata_port_operations inic_port_ops = {
25290 +static const struct ata_port_operations inic_port_ops = {
25291 .inherits = &sata_port_ops,
25292
25293 .check_atapi_dma = inic_check_atapi_dma,
25294 diff -urNp linux-2.6.32.42/drivers/ata/sata_mv.c linux-2.6.32.42/drivers/ata/sata_mv.c
25295 --- linux-2.6.32.42/drivers/ata/sata_mv.c 2011-03-27 14:31:47.000000000 -0400
25296 +++ linux-2.6.32.42/drivers/ata/sata_mv.c 2011-04-17 15:56:46.000000000 -0400
25297 @@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht
25298 .dma_boundary = MV_DMA_BOUNDARY,
25299 };
25300
25301 -static struct ata_port_operations mv5_ops = {
25302 +static const struct ata_port_operations mv5_ops = {
25303 .inherits = &ata_sff_port_ops,
25304
25305 .lost_interrupt = ATA_OP_NULL,
25306 @@ -678,7 +678,7 @@ static struct ata_port_operations mv5_op
25307 .port_stop = mv_port_stop,
25308 };
25309
25310 -static struct ata_port_operations mv6_ops = {
25311 +static const struct ata_port_operations mv6_ops = {
25312 .inherits = &mv5_ops,
25313 .dev_config = mv6_dev_config,
25314 .scr_read = mv_scr_read,
25315 @@ -698,7 +698,7 @@ static struct ata_port_operations mv6_op
25316 .bmdma_status = mv_bmdma_status,
25317 };
25318
25319 -static struct ata_port_operations mv_iie_ops = {
25320 +static const struct ata_port_operations mv_iie_ops = {
25321 .inherits = &mv6_ops,
25322 .dev_config = ATA_OP_NULL,
25323 .qc_prep = mv_qc_prep_iie,
25324 diff -urNp linux-2.6.32.42/drivers/ata/sata_nv.c linux-2.6.32.42/drivers/ata/sata_nv.c
25325 --- linux-2.6.32.42/drivers/ata/sata_nv.c 2011-03-27 14:31:47.000000000 -0400
25326 +++ linux-2.6.32.42/drivers/ata/sata_nv.c 2011-04-17 15:56:46.000000000 -0400
25327 @@ -464,7 +464,7 @@ static struct scsi_host_template nv_swnc
25328 * cases. Define nv_hardreset() which only kicks in for post-boot
25329 * probing and use it for all variants.
25330 */
25331 -static struct ata_port_operations nv_generic_ops = {
25332 +static const struct ata_port_operations nv_generic_ops = {
25333 .inherits = &ata_bmdma_port_ops,
25334 .lost_interrupt = ATA_OP_NULL,
25335 .scr_read = nv_scr_read,
25336 @@ -472,20 +472,20 @@ static struct ata_port_operations nv_gen
25337 .hardreset = nv_hardreset,
25338 };
25339
25340 -static struct ata_port_operations nv_nf2_ops = {
25341 +static const struct ata_port_operations nv_nf2_ops = {
25342 .inherits = &nv_generic_ops,
25343 .freeze = nv_nf2_freeze,
25344 .thaw = nv_nf2_thaw,
25345 };
25346
25347 -static struct ata_port_operations nv_ck804_ops = {
25348 +static const struct ata_port_operations nv_ck804_ops = {
25349 .inherits = &nv_generic_ops,
25350 .freeze = nv_ck804_freeze,
25351 .thaw = nv_ck804_thaw,
25352 .host_stop = nv_ck804_host_stop,
25353 };
25354
25355 -static struct ata_port_operations nv_adma_ops = {
25356 +static const struct ata_port_operations nv_adma_ops = {
25357 .inherits = &nv_ck804_ops,
25358
25359 .check_atapi_dma = nv_adma_check_atapi_dma,
25360 @@ -509,7 +509,7 @@ static struct ata_port_operations nv_adm
25361 .host_stop = nv_adma_host_stop,
25362 };
25363
25364 -static struct ata_port_operations nv_swncq_ops = {
25365 +static const struct ata_port_operations nv_swncq_ops = {
25366 .inherits = &nv_generic_ops,
25367
25368 .qc_defer = ata_std_qc_defer,
25369 diff -urNp linux-2.6.32.42/drivers/ata/sata_promise.c linux-2.6.32.42/drivers/ata/sata_promise.c
25370 --- linux-2.6.32.42/drivers/ata/sata_promise.c 2011-03-27 14:31:47.000000000 -0400
25371 +++ linux-2.6.32.42/drivers/ata/sata_promise.c 2011-04-17 15:56:46.000000000 -0400
25372 @@ -195,7 +195,7 @@ static const struct ata_port_operations
25373 .error_handler = pdc_error_handler,
25374 };
25375
25376 -static struct ata_port_operations pdc_sata_ops = {
25377 +static const struct ata_port_operations pdc_sata_ops = {
25378 .inherits = &pdc_common_ops,
25379 .cable_detect = pdc_sata_cable_detect,
25380 .freeze = pdc_sata_freeze,
25381 @@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sa
25382
25383 /* First-generation chips need a more restrictive ->check_atapi_dma op,
25384 and ->freeze/thaw that ignore the hotplug controls. */
25385 -static struct ata_port_operations pdc_old_sata_ops = {
25386 +static const struct ata_port_operations pdc_old_sata_ops = {
25387 .inherits = &pdc_sata_ops,
25388 .freeze = pdc_freeze,
25389 .thaw = pdc_thaw,
25390 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
25391 };
25392
25393 -static struct ata_port_operations pdc_pata_ops = {
25394 +static const struct ata_port_operations pdc_pata_ops = {
25395 .inherits = &pdc_common_ops,
25396 .cable_detect = pdc_pata_cable_detect,
25397 .freeze = pdc_freeze,
25398 diff -urNp linux-2.6.32.42/drivers/ata/sata_qstor.c linux-2.6.32.42/drivers/ata/sata_qstor.c
25399 --- linux-2.6.32.42/drivers/ata/sata_qstor.c 2011-03-27 14:31:47.000000000 -0400
25400 +++ linux-2.6.32.42/drivers/ata/sata_qstor.c 2011-04-17 15:56:46.000000000 -0400
25401 @@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_
25402 .dma_boundary = QS_DMA_BOUNDARY,
25403 };
25404
25405 -static struct ata_port_operations qs_ata_ops = {
25406 +static const struct ata_port_operations qs_ata_ops = {
25407 .inherits = &ata_sff_port_ops,
25408
25409 .check_atapi_dma = qs_check_atapi_dma,
25410 diff -urNp linux-2.6.32.42/drivers/ata/sata_sil24.c linux-2.6.32.42/drivers/ata/sata_sil24.c
25411 --- linux-2.6.32.42/drivers/ata/sata_sil24.c 2011-03-27 14:31:47.000000000 -0400
25412 +++ linux-2.6.32.42/drivers/ata/sata_sil24.c 2011-04-17 15:56:46.000000000 -0400
25413 @@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
25414 .dma_boundary = ATA_DMA_BOUNDARY,
25415 };
25416
25417 -static struct ata_port_operations sil24_ops = {
25418 +static const struct ata_port_operations sil24_ops = {
25419 .inherits = &sata_pmp_port_ops,
25420
25421 .qc_defer = sil24_qc_defer,
25422 diff -urNp linux-2.6.32.42/drivers/ata/sata_sil.c linux-2.6.32.42/drivers/ata/sata_sil.c
25423 --- linux-2.6.32.42/drivers/ata/sata_sil.c 2011-03-27 14:31:47.000000000 -0400
25424 +++ linux-2.6.32.42/drivers/ata/sata_sil.c 2011-04-17 15:56:46.000000000 -0400
25425 @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
25426 .sg_tablesize = ATA_MAX_PRD
25427 };
25428
25429 -static struct ata_port_operations sil_ops = {
25430 +static const struct ata_port_operations sil_ops = {
25431 .inherits = &ata_bmdma32_port_ops,
25432 .dev_config = sil_dev_config,
25433 .set_mode = sil_set_mode,
25434 diff -urNp linux-2.6.32.42/drivers/ata/sata_sis.c linux-2.6.32.42/drivers/ata/sata_sis.c
25435 --- linux-2.6.32.42/drivers/ata/sata_sis.c 2011-03-27 14:31:47.000000000 -0400
25436 +++ linux-2.6.32.42/drivers/ata/sata_sis.c 2011-04-17 15:56:46.000000000 -0400
25437 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
25438 ATA_BMDMA_SHT(DRV_NAME),
25439 };
25440
25441 -static struct ata_port_operations sis_ops = {
25442 +static const struct ata_port_operations sis_ops = {
25443 .inherits = &ata_bmdma_port_ops,
25444 .scr_read = sis_scr_read,
25445 .scr_write = sis_scr_write,
25446 diff -urNp linux-2.6.32.42/drivers/ata/sata_svw.c linux-2.6.32.42/drivers/ata/sata_svw.c
25447 --- linux-2.6.32.42/drivers/ata/sata_svw.c 2011-03-27 14:31:47.000000000 -0400
25448 +++ linux-2.6.32.42/drivers/ata/sata_svw.c 2011-04-17 15:56:46.000000000 -0400
25449 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
25450 };
25451
25452
25453 -static struct ata_port_operations k2_sata_ops = {
25454 +static const struct ata_port_operations k2_sata_ops = {
25455 .inherits = &ata_bmdma_port_ops,
25456 .sff_tf_load = k2_sata_tf_load,
25457 .sff_tf_read = k2_sata_tf_read,
25458 diff -urNp linux-2.6.32.42/drivers/ata/sata_sx4.c linux-2.6.32.42/drivers/ata/sata_sx4.c
25459 --- linux-2.6.32.42/drivers/ata/sata_sx4.c 2011-03-27 14:31:47.000000000 -0400
25460 +++ linux-2.6.32.42/drivers/ata/sata_sx4.c 2011-04-17 15:56:46.000000000 -0400
25461 @@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sat
25462 };
25463
25464 /* TODO: inherit from base port_ops after converting to new EH */
25465 -static struct ata_port_operations pdc_20621_ops = {
25466 +static const struct ata_port_operations pdc_20621_ops = {
25467 .inherits = &ata_sff_port_ops,
25468
25469 .check_atapi_dma = pdc_check_atapi_dma,
25470 diff -urNp linux-2.6.32.42/drivers/ata/sata_uli.c linux-2.6.32.42/drivers/ata/sata_uli.c
25471 --- linux-2.6.32.42/drivers/ata/sata_uli.c 2011-03-27 14:31:47.000000000 -0400
25472 +++ linux-2.6.32.42/drivers/ata/sata_uli.c 2011-04-17 15:56:46.000000000 -0400
25473 @@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht
25474 ATA_BMDMA_SHT(DRV_NAME),
25475 };
25476
25477 -static struct ata_port_operations uli_ops = {
25478 +static const struct ata_port_operations uli_ops = {
25479 .inherits = &ata_bmdma_port_ops,
25480 .scr_read = uli_scr_read,
25481 .scr_write = uli_scr_write,
25482 diff -urNp linux-2.6.32.42/drivers/ata/sata_via.c linux-2.6.32.42/drivers/ata/sata_via.c
25483 --- linux-2.6.32.42/drivers/ata/sata_via.c 2011-05-10 22:12:01.000000000 -0400
25484 +++ linux-2.6.32.42/drivers/ata/sata_via.c 2011-05-10 22:15:08.000000000 -0400
25485 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
25486 ATA_BMDMA_SHT(DRV_NAME),
25487 };
25488
25489 -static struct ata_port_operations svia_base_ops = {
25490 +static const struct ata_port_operations svia_base_ops = {
25491 .inherits = &ata_bmdma_port_ops,
25492 .sff_tf_load = svia_tf_load,
25493 };
25494
25495 -static struct ata_port_operations vt6420_sata_ops = {
25496 +static const struct ata_port_operations vt6420_sata_ops = {
25497 .inherits = &svia_base_ops,
25498 .freeze = svia_noop_freeze,
25499 .prereset = vt6420_prereset,
25500 .bmdma_start = vt6420_bmdma_start,
25501 };
25502
25503 -static struct ata_port_operations vt6421_pata_ops = {
25504 +static const struct ata_port_operations vt6421_pata_ops = {
25505 .inherits = &svia_base_ops,
25506 .cable_detect = vt6421_pata_cable_detect,
25507 .set_piomode = vt6421_set_pio_mode,
25508 .set_dmamode = vt6421_set_dma_mode,
25509 };
25510
25511 -static struct ata_port_operations vt6421_sata_ops = {
25512 +static const struct ata_port_operations vt6421_sata_ops = {
25513 .inherits = &svia_base_ops,
25514 .scr_read = svia_scr_read,
25515 .scr_write = svia_scr_write,
25516 };
25517
25518 -static struct ata_port_operations vt8251_ops = {
25519 +static const struct ata_port_operations vt8251_ops = {
25520 .inherits = &svia_base_ops,
25521 .hardreset = sata_std_hardreset,
25522 .scr_read = vt8251_scr_read,
25523 diff -urNp linux-2.6.32.42/drivers/ata/sata_vsc.c linux-2.6.32.42/drivers/ata/sata_vsc.c
25524 --- linux-2.6.32.42/drivers/ata/sata_vsc.c 2011-03-27 14:31:47.000000000 -0400
25525 +++ linux-2.6.32.42/drivers/ata/sata_vsc.c 2011-04-17 15:56:46.000000000 -0400
25526 @@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
25527 };
25528
25529
25530 -static struct ata_port_operations vsc_sata_ops = {
25531 +static const struct ata_port_operations vsc_sata_ops = {
25532 .inherits = &ata_bmdma_port_ops,
25533 /* The IRQ handling is not quite standard SFF behaviour so we
25534 cannot use the default lost interrupt handler */
25535 diff -urNp linux-2.6.32.42/drivers/atm/adummy.c linux-2.6.32.42/drivers/atm/adummy.c
25536 --- linux-2.6.32.42/drivers/atm/adummy.c 2011-03-27 14:31:47.000000000 -0400
25537 +++ linux-2.6.32.42/drivers/atm/adummy.c 2011-04-17 15:56:46.000000000 -0400
25538 @@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
25539 vcc->pop(vcc, skb);
25540 else
25541 dev_kfree_skb_any(skb);
25542 - atomic_inc(&vcc->stats->tx);
25543 + atomic_inc_unchecked(&vcc->stats->tx);
25544
25545 return 0;
25546 }
25547 diff -urNp linux-2.6.32.42/drivers/atm/ambassador.c linux-2.6.32.42/drivers/atm/ambassador.c
25548 --- linux-2.6.32.42/drivers/atm/ambassador.c 2011-03-27 14:31:47.000000000 -0400
25549 +++ linux-2.6.32.42/drivers/atm/ambassador.c 2011-04-17 15:56:46.000000000 -0400
25550 @@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
25551 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25552
25553 // VC layer stats
25554 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25555 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25556
25557 // free the descriptor
25558 kfree (tx_descr);
25559 @@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
25560 dump_skb ("<<<", vc, skb);
25561
25562 // VC layer stats
25563 - atomic_inc(&atm_vcc->stats->rx);
25564 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25565 __net_timestamp(skb);
25566 // end of our responsability
25567 atm_vcc->push (atm_vcc, skb);
25568 @@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
25569 } else {
25570 PRINTK (KERN_INFO, "dropped over-size frame");
25571 // should we count this?
25572 - atomic_inc(&atm_vcc->stats->rx_drop);
25573 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25574 }
25575
25576 } else {
25577 @@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * at
25578 }
25579
25580 if (check_area (skb->data, skb->len)) {
25581 - atomic_inc(&atm_vcc->stats->tx_err);
25582 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25583 return -ENOMEM; // ?
25584 }
25585
25586 diff -urNp linux-2.6.32.42/drivers/atm/atmtcp.c linux-2.6.32.42/drivers/atm/atmtcp.c
25587 --- linux-2.6.32.42/drivers/atm/atmtcp.c 2011-03-27 14:31:47.000000000 -0400
25588 +++ linux-2.6.32.42/drivers/atm/atmtcp.c 2011-04-17 15:56:46.000000000 -0400
25589 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
25590 if (vcc->pop) vcc->pop(vcc,skb);
25591 else dev_kfree_skb(skb);
25592 if (dev_data) return 0;
25593 - atomic_inc(&vcc->stats->tx_err);
25594 + atomic_inc_unchecked(&vcc->stats->tx_err);
25595 return -ENOLINK;
25596 }
25597 size = skb->len+sizeof(struct atmtcp_hdr);
25598 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
25599 if (!new_skb) {
25600 if (vcc->pop) vcc->pop(vcc,skb);
25601 else dev_kfree_skb(skb);
25602 - atomic_inc(&vcc->stats->tx_err);
25603 + atomic_inc_unchecked(&vcc->stats->tx_err);
25604 return -ENOBUFS;
25605 }
25606 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25607 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
25608 if (vcc->pop) vcc->pop(vcc,skb);
25609 else dev_kfree_skb(skb);
25610 out_vcc->push(out_vcc,new_skb);
25611 - atomic_inc(&vcc->stats->tx);
25612 - atomic_inc(&out_vcc->stats->rx);
25613 + atomic_inc_unchecked(&vcc->stats->tx);
25614 + atomic_inc_unchecked(&out_vcc->stats->rx);
25615 return 0;
25616 }
25617
25618 @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
25619 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25620 read_unlock(&vcc_sklist_lock);
25621 if (!out_vcc) {
25622 - atomic_inc(&vcc->stats->tx_err);
25623 + atomic_inc_unchecked(&vcc->stats->tx_err);
25624 goto done;
25625 }
25626 skb_pull(skb,sizeof(struct atmtcp_hdr));
25627 @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
25628 __net_timestamp(new_skb);
25629 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25630 out_vcc->push(out_vcc,new_skb);
25631 - atomic_inc(&vcc->stats->tx);
25632 - atomic_inc(&out_vcc->stats->rx);
25633 + atomic_inc_unchecked(&vcc->stats->tx);
25634 + atomic_inc_unchecked(&out_vcc->stats->rx);
25635 done:
25636 if (vcc->pop) vcc->pop(vcc,skb);
25637 else dev_kfree_skb(skb);
25638 diff -urNp linux-2.6.32.42/drivers/atm/eni.c linux-2.6.32.42/drivers/atm/eni.c
25639 --- linux-2.6.32.42/drivers/atm/eni.c 2011-03-27 14:31:47.000000000 -0400
25640 +++ linux-2.6.32.42/drivers/atm/eni.c 2011-04-17 15:56:46.000000000 -0400
25641 @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25642 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25643 vcc->dev->number);
25644 length = 0;
25645 - atomic_inc(&vcc->stats->rx_err);
25646 + atomic_inc_unchecked(&vcc->stats->rx_err);
25647 }
25648 else {
25649 length = ATM_CELL_SIZE-1; /* no HEC */
25650 @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25651 size);
25652 }
25653 eff = length = 0;
25654 - atomic_inc(&vcc->stats->rx_err);
25655 + atomic_inc_unchecked(&vcc->stats->rx_err);
25656 }
25657 else {
25658 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25659 @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25660 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25661 vcc->dev->number,vcc->vci,length,size << 2,descr);
25662 length = eff = 0;
25663 - atomic_inc(&vcc->stats->rx_err);
25664 + atomic_inc_unchecked(&vcc->stats->rx_err);
25665 }
25666 }
25667 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25668 @@ -770,7 +770,7 @@ rx_dequeued++;
25669 vcc->push(vcc,skb);
25670 pushed++;
25671 }
25672 - atomic_inc(&vcc->stats->rx);
25673 + atomic_inc_unchecked(&vcc->stats->rx);
25674 }
25675 wake_up(&eni_dev->rx_wait);
25676 }
25677 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
25678 PCI_DMA_TODEVICE);
25679 if (vcc->pop) vcc->pop(vcc,skb);
25680 else dev_kfree_skb_irq(skb);
25681 - atomic_inc(&vcc->stats->tx);
25682 + atomic_inc_unchecked(&vcc->stats->tx);
25683 wake_up(&eni_dev->tx_wait);
25684 dma_complete++;
25685 }
25686 diff -urNp linux-2.6.32.42/drivers/atm/firestream.c linux-2.6.32.42/drivers/atm/firestream.c
25687 --- linux-2.6.32.42/drivers/atm/firestream.c 2011-03-27 14:31:47.000000000 -0400
25688 +++ linux-2.6.32.42/drivers/atm/firestream.c 2011-04-17 15:56:46.000000000 -0400
25689 @@ -748,7 +748,7 @@ static void process_txdone_queue (struct
25690 }
25691 }
25692
25693 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25694 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25695
25696 fs_dprintk (FS_DEBUG_TXMEM, "i");
25697 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25698 @@ -815,7 +815,7 @@ static void process_incoming (struct fs_
25699 #endif
25700 skb_put (skb, qe->p1 & 0xffff);
25701 ATM_SKB(skb)->vcc = atm_vcc;
25702 - atomic_inc(&atm_vcc->stats->rx);
25703 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25704 __net_timestamp(skb);
25705 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25706 atm_vcc->push (atm_vcc, skb);
25707 @@ -836,12 +836,12 @@ static void process_incoming (struct fs_
25708 kfree (pe);
25709 }
25710 if (atm_vcc)
25711 - atomic_inc(&atm_vcc->stats->rx_drop);
25712 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25713 break;
25714 case 0x1f: /* Reassembly abort: no buffers. */
25715 /* Silently increment error counter. */
25716 if (atm_vcc)
25717 - atomic_inc(&atm_vcc->stats->rx_drop);
25718 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25719 break;
25720 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25721 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25722 diff -urNp linux-2.6.32.42/drivers/atm/fore200e.c linux-2.6.32.42/drivers/atm/fore200e.c
25723 --- linux-2.6.32.42/drivers/atm/fore200e.c 2011-03-27 14:31:47.000000000 -0400
25724 +++ linux-2.6.32.42/drivers/atm/fore200e.c 2011-04-17 15:56:46.000000000 -0400
25725 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
25726 #endif
25727 /* check error condition */
25728 if (*entry->status & STATUS_ERROR)
25729 - atomic_inc(&vcc->stats->tx_err);
25730 + atomic_inc_unchecked(&vcc->stats->tx_err);
25731 else
25732 - atomic_inc(&vcc->stats->tx);
25733 + atomic_inc_unchecked(&vcc->stats->tx);
25734 }
25735 }
25736
25737 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
25738 if (skb == NULL) {
25739 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25740
25741 - atomic_inc(&vcc->stats->rx_drop);
25742 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25743 return -ENOMEM;
25744 }
25745
25746 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
25747
25748 dev_kfree_skb_any(skb);
25749
25750 - atomic_inc(&vcc->stats->rx_drop);
25751 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25752 return -ENOMEM;
25753 }
25754
25755 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25756
25757 vcc->push(vcc, skb);
25758 - atomic_inc(&vcc->stats->rx);
25759 + atomic_inc_unchecked(&vcc->stats->rx);
25760
25761 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25762
25763 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
25764 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25765 fore200e->atm_dev->number,
25766 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25767 - atomic_inc(&vcc->stats->rx_err);
25768 + atomic_inc_unchecked(&vcc->stats->rx_err);
25769 }
25770 }
25771
25772 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
25773 goto retry_here;
25774 }
25775
25776 - atomic_inc(&vcc->stats->tx_err);
25777 + atomic_inc_unchecked(&vcc->stats->tx_err);
25778
25779 fore200e->tx_sat++;
25780 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25781 diff -urNp linux-2.6.32.42/drivers/atm/he.c linux-2.6.32.42/drivers/atm/he.c
25782 --- linux-2.6.32.42/drivers/atm/he.c 2011-03-27 14:31:47.000000000 -0400
25783 +++ linux-2.6.32.42/drivers/atm/he.c 2011-04-17 15:56:46.000000000 -0400
25784 @@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25785
25786 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25787 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25788 - atomic_inc(&vcc->stats->rx_drop);
25789 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25790 goto return_host_buffers;
25791 }
25792
25793 @@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25794 RBRQ_LEN_ERR(he_dev->rbrq_head)
25795 ? "LEN_ERR" : "",
25796 vcc->vpi, vcc->vci);
25797 - atomic_inc(&vcc->stats->rx_err);
25798 + atomic_inc_unchecked(&vcc->stats->rx_err);
25799 goto return_host_buffers;
25800 }
25801
25802 @@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25803 vcc->push(vcc, skb);
25804 spin_lock(&he_dev->global_lock);
25805
25806 - atomic_inc(&vcc->stats->rx);
25807 + atomic_inc_unchecked(&vcc->stats->rx);
25808
25809 return_host_buffers:
25810 ++pdus_assembled;
25811 @@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
25812 tpd->vcc->pop(tpd->vcc, tpd->skb);
25813 else
25814 dev_kfree_skb_any(tpd->skb);
25815 - atomic_inc(&tpd->vcc->stats->tx_err);
25816 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25817 }
25818 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25819 return;
25820 @@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25821 vcc->pop(vcc, skb);
25822 else
25823 dev_kfree_skb_any(skb);
25824 - atomic_inc(&vcc->stats->tx_err);
25825 + atomic_inc_unchecked(&vcc->stats->tx_err);
25826 return -EINVAL;
25827 }
25828
25829 @@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25830 vcc->pop(vcc, skb);
25831 else
25832 dev_kfree_skb_any(skb);
25833 - atomic_inc(&vcc->stats->tx_err);
25834 + atomic_inc_unchecked(&vcc->stats->tx_err);
25835 return -EINVAL;
25836 }
25837 #endif
25838 @@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25839 vcc->pop(vcc, skb);
25840 else
25841 dev_kfree_skb_any(skb);
25842 - atomic_inc(&vcc->stats->tx_err);
25843 + atomic_inc_unchecked(&vcc->stats->tx_err);
25844 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25845 return -ENOMEM;
25846 }
25847 @@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25848 vcc->pop(vcc, skb);
25849 else
25850 dev_kfree_skb_any(skb);
25851 - atomic_inc(&vcc->stats->tx_err);
25852 + atomic_inc_unchecked(&vcc->stats->tx_err);
25853 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25854 return -ENOMEM;
25855 }
25856 @@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25857 __enqueue_tpd(he_dev, tpd, cid);
25858 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25859
25860 - atomic_inc(&vcc->stats->tx);
25861 + atomic_inc_unchecked(&vcc->stats->tx);
25862
25863 return 0;
25864 }
25865 diff -urNp linux-2.6.32.42/drivers/atm/horizon.c linux-2.6.32.42/drivers/atm/horizon.c
25866 --- linux-2.6.32.42/drivers/atm/horizon.c 2011-03-27 14:31:47.000000000 -0400
25867 +++ linux-2.6.32.42/drivers/atm/horizon.c 2011-04-17 15:56:46.000000000 -0400
25868 @@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
25869 {
25870 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25871 // VC layer stats
25872 - atomic_inc(&vcc->stats->rx);
25873 + atomic_inc_unchecked(&vcc->stats->rx);
25874 __net_timestamp(skb);
25875 // end of our responsability
25876 vcc->push (vcc, skb);
25877 @@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
25878 dev->tx_iovec = NULL;
25879
25880 // VC layer stats
25881 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25882 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25883
25884 // free the skb
25885 hrz_kfree_skb (skb);
25886 diff -urNp linux-2.6.32.42/drivers/atm/idt77252.c linux-2.6.32.42/drivers/atm/idt77252.c
25887 --- linux-2.6.32.42/drivers/atm/idt77252.c 2011-03-27 14:31:47.000000000 -0400
25888 +++ linux-2.6.32.42/drivers/atm/idt77252.c 2011-04-17 15:56:46.000000000 -0400
25889 @@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
25890 else
25891 dev_kfree_skb(skb);
25892
25893 - atomic_inc(&vcc->stats->tx);
25894 + atomic_inc_unchecked(&vcc->stats->tx);
25895 }
25896
25897 atomic_dec(&scq->used);
25898 @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
25899 if ((sb = dev_alloc_skb(64)) == NULL) {
25900 printk("%s: Can't allocate buffers for aal0.\n",
25901 card->name);
25902 - atomic_add(i, &vcc->stats->rx_drop);
25903 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
25904 break;
25905 }
25906 if (!atm_charge(vcc, sb->truesize)) {
25907 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25908 card->name);
25909 - atomic_add(i - 1, &vcc->stats->rx_drop);
25910 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25911 dev_kfree_skb(sb);
25912 break;
25913 }
25914 @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
25915 ATM_SKB(sb)->vcc = vcc;
25916 __net_timestamp(sb);
25917 vcc->push(vcc, sb);
25918 - atomic_inc(&vcc->stats->rx);
25919 + atomic_inc_unchecked(&vcc->stats->rx);
25920
25921 cell += ATM_CELL_PAYLOAD;
25922 }
25923 @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
25924 "(CDC: %08x)\n",
25925 card->name, len, rpp->len, readl(SAR_REG_CDC));
25926 recycle_rx_pool_skb(card, rpp);
25927 - atomic_inc(&vcc->stats->rx_err);
25928 + atomic_inc_unchecked(&vcc->stats->rx_err);
25929 return;
25930 }
25931 if (stat & SAR_RSQE_CRC) {
25932 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25933 recycle_rx_pool_skb(card, rpp);
25934 - atomic_inc(&vcc->stats->rx_err);
25935 + atomic_inc_unchecked(&vcc->stats->rx_err);
25936 return;
25937 }
25938 if (skb_queue_len(&rpp->queue) > 1) {
25939 @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
25940 RXPRINTK("%s: Can't alloc RX skb.\n",
25941 card->name);
25942 recycle_rx_pool_skb(card, rpp);
25943 - atomic_inc(&vcc->stats->rx_err);
25944 + atomic_inc_unchecked(&vcc->stats->rx_err);
25945 return;
25946 }
25947 if (!atm_charge(vcc, skb->truesize)) {
25948 @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
25949 __net_timestamp(skb);
25950
25951 vcc->push(vcc, skb);
25952 - atomic_inc(&vcc->stats->rx);
25953 + atomic_inc_unchecked(&vcc->stats->rx);
25954
25955 return;
25956 }
25957 @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
25958 __net_timestamp(skb);
25959
25960 vcc->push(vcc, skb);
25961 - atomic_inc(&vcc->stats->rx);
25962 + atomic_inc_unchecked(&vcc->stats->rx);
25963
25964 if (skb->truesize > SAR_FB_SIZE_3)
25965 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
25966 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
25967 if (vcc->qos.aal != ATM_AAL0) {
25968 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25969 card->name, vpi, vci);
25970 - atomic_inc(&vcc->stats->rx_drop);
25971 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25972 goto drop;
25973 }
25974
25975 if ((sb = dev_alloc_skb(64)) == NULL) {
25976 printk("%s: Can't allocate buffers for AAL0.\n",
25977 card->name);
25978 - atomic_inc(&vcc->stats->rx_err);
25979 + atomic_inc_unchecked(&vcc->stats->rx_err);
25980 goto drop;
25981 }
25982
25983 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
25984 ATM_SKB(sb)->vcc = vcc;
25985 __net_timestamp(sb);
25986 vcc->push(vcc, sb);
25987 - atomic_inc(&vcc->stats->rx);
25988 + atomic_inc_unchecked(&vcc->stats->rx);
25989
25990 drop:
25991 skb_pull(queue, 64);
25992 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
25993
25994 if (vc == NULL) {
25995 printk("%s: NULL connection in send().\n", card->name);
25996 - atomic_inc(&vcc->stats->tx_err);
25997 + atomic_inc_unchecked(&vcc->stats->tx_err);
25998 dev_kfree_skb(skb);
25999 return -EINVAL;
26000 }
26001 if (!test_bit(VCF_TX, &vc->flags)) {
26002 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
26003 - atomic_inc(&vcc->stats->tx_err);
26004 + atomic_inc_unchecked(&vcc->stats->tx_err);
26005 dev_kfree_skb(skb);
26006 return -EINVAL;
26007 }
26008 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26009 break;
26010 default:
26011 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
26012 - atomic_inc(&vcc->stats->tx_err);
26013 + atomic_inc_unchecked(&vcc->stats->tx_err);
26014 dev_kfree_skb(skb);
26015 return -EINVAL;
26016 }
26017
26018 if (skb_shinfo(skb)->nr_frags != 0) {
26019 printk("%s: No scatter-gather yet.\n", card->name);
26020 - atomic_inc(&vcc->stats->tx_err);
26021 + atomic_inc_unchecked(&vcc->stats->tx_err);
26022 dev_kfree_skb(skb);
26023 return -EINVAL;
26024 }
26025 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26026
26027 err = queue_skb(card, vc, skb, oam);
26028 if (err) {
26029 - atomic_inc(&vcc->stats->tx_err);
26030 + atomic_inc_unchecked(&vcc->stats->tx_err);
26031 dev_kfree_skb(skb);
26032 return err;
26033 }
26034 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
26035 skb = dev_alloc_skb(64);
26036 if (!skb) {
26037 printk("%s: Out of memory in send_oam().\n", card->name);
26038 - atomic_inc(&vcc->stats->tx_err);
26039 + atomic_inc_unchecked(&vcc->stats->tx_err);
26040 return -ENOMEM;
26041 }
26042 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
26043 diff -urNp linux-2.6.32.42/drivers/atm/iphase.c linux-2.6.32.42/drivers/atm/iphase.c
26044 --- linux-2.6.32.42/drivers/atm/iphase.c 2011-03-27 14:31:47.000000000 -0400
26045 +++ linux-2.6.32.42/drivers/atm/iphase.c 2011-04-17 15:56:46.000000000 -0400
26046 @@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
26047 status = (u_short) (buf_desc_ptr->desc_mode);
26048 if (status & (RX_CER | RX_PTE | RX_OFL))
26049 {
26050 - atomic_inc(&vcc->stats->rx_err);
26051 + atomic_inc_unchecked(&vcc->stats->rx_err);
26052 IF_ERR(printk("IA: bad packet, dropping it");)
26053 if (status & RX_CER) {
26054 IF_ERR(printk(" cause: packet CRC error\n");)
26055 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
26056 len = dma_addr - buf_addr;
26057 if (len > iadev->rx_buf_sz) {
26058 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
26059 - atomic_inc(&vcc->stats->rx_err);
26060 + atomic_inc_unchecked(&vcc->stats->rx_err);
26061 goto out_free_desc;
26062 }
26063
26064 @@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
26065 ia_vcc = INPH_IA_VCC(vcc);
26066 if (ia_vcc == NULL)
26067 {
26068 - atomic_inc(&vcc->stats->rx_err);
26069 + atomic_inc_unchecked(&vcc->stats->rx_err);
26070 dev_kfree_skb_any(skb);
26071 atm_return(vcc, atm_guess_pdu2truesize(len));
26072 goto INCR_DLE;
26073 @@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
26074 if ((length > iadev->rx_buf_sz) || (length >
26075 (skb->len - sizeof(struct cpcs_trailer))))
26076 {
26077 - atomic_inc(&vcc->stats->rx_err);
26078 + atomic_inc_unchecked(&vcc->stats->rx_err);
26079 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
26080 length, skb->len);)
26081 dev_kfree_skb_any(skb);
26082 @@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
26083
26084 IF_RX(printk("rx_dle_intr: skb push");)
26085 vcc->push(vcc,skb);
26086 - atomic_inc(&vcc->stats->rx);
26087 + atomic_inc_unchecked(&vcc->stats->rx);
26088 iadev->rx_pkt_cnt++;
26089 }
26090 INCR_DLE:
26091 @@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev,
26092 {
26093 struct k_sonet_stats *stats;
26094 stats = &PRIV(_ia_dev[board])->sonet_stats;
26095 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
26096 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
26097 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
26098 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
26099 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
26100 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
26101 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
26102 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
26103 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
26104 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
26105 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
26106 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
26107 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
26108 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
26109 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
26110 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
26111 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
26112 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
26113 }
26114 ia_cmds.status = 0;
26115 break;
26116 @@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
26117 if ((desc == 0) || (desc > iadev->num_tx_desc))
26118 {
26119 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
26120 - atomic_inc(&vcc->stats->tx);
26121 + atomic_inc_unchecked(&vcc->stats->tx);
26122 if (vcc->pop)
26123 vcc->pop(vcc, skb);
26124 else
26125 @@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
26126 ATM_DESC(skb) = vcc->vci;
26127 skb_queue_tail(&iadev->tx_dma_q, skb);
26128
26129 - atomic_inc(&vcc->stats->tx);
26130 + atomic_inc_unchecked(&vcc->stats->tx);
26131 iadev->tx_pkt_cnt++;
26132 /* Increment transaction counter */
26133 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
26134
26135 #if 0
26136 /* add flow control logic */
26137 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
26138 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
26139 if (iavcc->vc_desc_cnt > 10) {
26140 vcc->tx_quota = vcc->tx_quota * 3 / 4;
26141 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
26142 diff -urNp linux-2.6.32.42/drivers/atm/lanai.c linux-2.6.32.42/drivers/atm/lanai.c
26143 --- linux-2.6.32.42/drivers/atm/lanai.c 2011-03-27 14:31:47.000000000 -0400
26144 +++ linux-2.6.32.42/drivers/atm/lanai.c 2011-04-17 15:56:46.000000000 -0400
26145 @@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
26146 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
26147 lanai_endtx(lanai, lvcc);
26148 lanai_free_skb(lvcc->tx.atmvcc, skb);
26149 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
26150 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
26151 }
26152
26153 /* Try to fill the buffer - don't call unless there is backlog */
26154 @@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
26155 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
26156 __net_timestamp(skb);
26157 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
26158 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
26159 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
26160 out:
26161 lvcc->rx.buf.ptr = end;
26162 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
26163 @@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
26164 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
26165 "vcc %d\n", lanai->number, (unsigned int) s, vci);
26166 lanai->stats.service_rxnotaal5++;
26167 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26168 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26169 return 0;
26170 }
26171 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
26172 @@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
26173 int bytes;
26174 read_unlock(&vcc_sklist_lock);
26175 DPRINTK("got trashed rx pdu on vci %d\n", vci);
26176 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26177 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26178 lvcc->stats.x.aal5.service_trash++;
26179 bytes = (SERVICE_GET_END(s) * 16) -
26180 (((unsigned long) lvcc->rx.buf.ptr) -
26181 @@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
26182 }
26183 if (s & SERVICE_STREAM) {
26184 read_unlock(&vcc_sklist_lock);
26185 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26186 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26187 lvcc->stats.x.aal5.service_stream++;
26188 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
26189 "PDU on VCI %d!\n", lanai->number, vci);
26190 @@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
26191 return 0;
26192 }
26193 DPRINTK("got rx crc error on vci %d\n", vci);
26194 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26195 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26196 lvcc->stats.x.aal5.service_rxcrc++;
26197 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
26198 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
26199 diff -urNp linux-2.6.32.42/drivers/atm/nicstar.c linux-2.6.32.42/drivers/atm/nicstar.c
26200 --- linux-2.6.32.42/drivers/atm/nicstar.c 2011-03-27 14:31:47.000000000 -0400
26201 +++ linux-2.6.32.42/drivers/atm/nicstar.c 2011-04-17 15:56:46.000000000 -0400
26202 @@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
26203 if ((vc = (vc_map *) vcc->dev_data) == NULL)
26204 {
26205 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
26206 - atomic_inc(&vcc->stats->tx_err);
26207 + atomic_inc_unchecked(&vcc->stats->tx_err);
26208 dev_kfree_skb_any(skb);
26209 return -EINVAL;
26210 }
26211 @@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
26212 if (!vc->tx)
26213 {
26214 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
26215 - atomic_inc(&vcc->stats->tx_err);
26216 + atomic_inc_unchecked(&vcc->stats->tx_err);
26217 dev_kfree_skb_any(skb);
26218 return -EINVAL;
26219 }
26220 @@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
26221 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
26222 {
26223 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
26224 - atomic_inc(&vcc->stats->tx_err);
26225 + atomic_inc_unchecked(&vcc->stats->tx_err);
26226 dev_kfree_skb_any(skb);
26227 return -EINVAL;
26228 }
26229 @@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
26230 if (skb_shinfo(skb)->nr_frags != 0)
26231 {
26232 printk("nicstar%d: No scatter-gather yet.\n", card->index);
26233 - atomic_inc(&vcc->stats->tx_err);
26234 + atomic_inc_unchecked(&vcc->stats->tx_err);
26235 dev_kfree_skb_any(skb);
26236 return -EINVAL;
26237 }
26238 @@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
26239
26240 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
26241 {
26242 - atomic_inc(&vcc->stats->tx_err);
26243 + atomic_inc_unchecked(&vcc->stats->tx_err);
26244 dev_kfree_skb_any(skb);
26245 return -EIO;
26246 }
26247 - atomic_inc(&vcc->stats->tx);
26248 + atomic_inc_unchecked(&vcc->stats->tx);
26249
26250 return 0;
26251 }
26252 @@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
26253 {
26254 printk("nicstar%d: Can't allocate buffers for aal0.\n",
26255 card->index);
26256 - atomic_add(i,&vcc->stats->rx_drop);
26257 + atomic_add_unchecked(i,&vcc->stats->rx_drop);
26258 break;
26259 }
26260 if (!atm_charge(vcc, sb->truesize))
26261 {
26262 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
26263 card->index);
26264 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26265 + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26266 dev_kfree_skb_any(sb);
26267 break;
26268 }
26269 @@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
26270 ATM_SKB(sb)->vcc = vcc;
26271 __net_timestamp(sb);
26272 vcc->push(vcc, sb);
26273 - atomic_inc(&vcc->stats->rx);
26274 + atomic_inc_unchecked(&vcc->stats->rx);
26275 cell += ATM_CELL_PAYLOAD;
26276 }
26277
26278 @@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
26279 if (iovb == NULL)
26280 {
26281 printk("nicstar%d: Out of iovec buffers.\n", card->index);
26282 - atomic_inc(&vcc->stats->rx_drop);
26283 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26284 recycle_rx_buf(card, skb);
26285 return;
26286 }
26287 @@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
26288 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
26289 {
26290 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26291 - atomic_inc(&vcc->stats->rx_err);
26292 + atomic_inc_unchecked(&vcc->stats->rx_err);
26293 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
26294 NS_SKB(iovb)->iovcnt = 0;
26295 iovb->len = 0;
26296 @@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
26297 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
26298 card->index);
26299 which_list(card, skb);
26300 - atomic_inc(&vcc->stats->rx_err);
26301 + atomic_inc_unchecked(&vcc->stats->rx_err);
26302 recycle_rx_buf(card, skb);
26303 vc->rx_iov = NULL;
26304 recycle_iov_buf(card, iovb);
26305 @@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
26306 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
26307 card->index);
26308 which_list(card, skb);
26309 - atomic_inc(&vcc->stats->rx_err);
26310 + atomic_inc_unchecked(&vcc->stats->rx_err);
26311 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26312 NS_SKB(iovb)->iovcnt);
26313 vc->rx_iov = NULL;
26314 @@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
26315 printk(" - PDU size mismatch.\n");
26316 else
26317 printk(".\n");
26318 - atomic_inc(&vcc->stats->rx_err);
26319 + atomic_inc_unchecked(&vcc->stats->rx_err);
26320 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26321 NS_SKB(iovb)->iovcnt);
26322 vc->rx_iov = NULL;
26323 @@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
26324 if (!atm_charge(vcc, skb->truesize))
26325 {
26326 push_rxbufs(card, skb);
26327 - atomic_inc(&vcc->stats->rx_drop);
26328 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26329 }
26330 else
26331 {
26332 @@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
26333 ATM_SKB(skb)->vcc = vcc;
26334 __net_timestamp(skb);
26335 vcc->push(vcc, skb);
26336 - atomic_inc(&vcc->stats->rx);
26337 + atomic_inc_unchecked(&vcc->stats->rx);
26338 }
26339 }
26340 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
26341 @@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
26342 if (!atm_charge(vcc, sb->truesize))
26343 {
26344 push_rxbufs(card, sb);
26345 - atomic_inc(&vcc->stats->rx_drop);
26346 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26347 }
26348 else
26349 {
26350 @@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
26351 ATM_SKB(sb)->vcc = vcc;
26352 __net_timestamp(sb);
26353 vcc->push(vcc, sb);
26354 - atomic_inc(&vcc->stats->rx);
26355 + atomic_inc_unchecked(&vcc->stats->rx);
26356 }
26357
26358 push_rxbufs(card, skb);
26359 @@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
26360 if (!atm_charge(vcc, skb->truesize))
26361 {
26362 push_rxbufs(card, skb);
26363 - atomic_inc(&vcc->stats->rx_drop);
26364 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26365 }
26366 else
26367 {
26368 @@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
26369 ATM_SKB(skb)->vcc = vcc;
26370 __net_timestamp(skb);
26371 vcc->push(vcc, skb);
26372 - atomic_inc(&vcc->stats->rx);
26373 + atomic_inc_unchecked(&vcc->stats->rx);
26374 }
26375
26376 push_rxbufs(card, sb);
26377 @@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
26378 if (hb == NULL)
26379 {
26380 printk("nicstar%d: Out of huge buffers.\n", card->index);
26381 - atomic_inc(&vcc->stats->rx_drop);
26382 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26383 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26384 NS_SKB(iovb)->iovcnt);
26385 vc->rx_iov = NULL;
26386 @@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
26387 }
26388 else
26389 dev_kfree_skb_any(hb);
26390 - atomic_inc(&vcc->stats->rx_drop);
26391 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26392 }
26393 else
26394 {
26395 @@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
26396 #endif /* NS_USE_DESTRUCTORS */
26397 __net_timestamp(hb);
26398 vcc->push(vcc, hb);
26399 - atomic_inc(&vcc->stats->rx);
26400 + atomic_inc_unchecked(&vcc->stats->rx);
26401 }
26402 }
26403
26404 diff -urNp linux-2.6.32.42/drivers/atm/solos-pci.c linux-2.6.32.42/drivers/atm/solos-pci.c
26405 --- linux-2.6.32.42/drivers/atm/solos-pci.c 2011-04-17 17:00:52.000000000 -0400
26406 +++ linux-2.6.32.42/drivers/atm/solos-pci.c 2011-05-16 21:46:57.000000000 -0400
26407 @@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
26408 }
26409 atm_charge(vcc, skb->truesize);
26410 vcc->push(vcc, skb);
26411 - atomic_inc(&vcc->stats->rx);
26412 + atomic_inc_unchecked(&vcc->stats->rx);
26413 break;
26414
26415 case PKT_STATUS:
26416 @@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *
26417 char msg[500];
26418 char item[10];
26419
26420 + pax_track_stack();
26421 +
26422 len = buf->len;
26423 for (i = 0; i < len; i++){
26424 if(i % 8 == 0)
26425 @@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_car
26426 vcc = SKB_CB(oldskb)->vcc;
26427
26428 if (vcc) {
26429 - atomic_inc(&vcc->stats->tx);
26430 + atomic_inc_unchecked(&vcc->stats->tx);
26431 solos_pop(vcc, oldskb);
26432 } else
26433 dev_kfree_skb_irq(oldskb);
26434 diff -urNp linux-2.6.32.42/drivers/atm/suni.c linux-2.6.32.42/drivers/atm/suni.c
26435 --- linux-2.6.32.42/drivers/atm/suni.c 2011-03-27 14:31:47.000000000 -0400
26436 +++ linux-2.6.32.42/drivers/atm/suni.c 2011-04-17 15:56:46.000000000 -0400
26437 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26438
26439
26440 #define ADD_LIMITED(s,v) \
26441 - atomic_add((v),&stats->s); \
26442 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26443 + atomic_add_unchecked((v),&stats->s); \
26444 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26445
26446
26447 static void suni_hz(unsigned long from_timer)
26448 diff -urNp linux-2.6.32.42/drivers/atm/uPD98402.c linux-2.6.32.42/drivers/atm/uPD98402.c
26449 --- linux-2.6.32.42/drivers/atm/uPD98402.c 2011-03-27 14:31:47.000000000 -0400
26450 +++ linux-2.6.32.42/drivers/atm/uPD98402.c 2011-04-17 15:56:46.000000000 -0400
26451 @@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
26452 struct sonet_stats tmp;
26453 int error = 0;
26454
26455 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26456 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26457 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26458 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26459 if (zero && !error) {
26460 @@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev
26461
26462
26463 #define ADD_LIMITED(s,v) \
26464 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26465 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26466 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26467 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26468 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26469 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26470
26471
26472 static void stat_event(struct atm_dev *dev)
26473 @@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
26474 if (reason & uPD98402_INT_PFM) stat_event(dev);
26475 if (reason & uPD98402_INT_PCO) {
26476 (void) GET(PCOCR); /* clear interrupt cause */
26477 - atomic_add(GET(HECCT),
26478 + atomic_add_unchecked(GET(HECCT),
26479 &PRIV(dev)->sonet_stats.uncorr_hcs);
26480 }
26481 if ((reason & uPD98402_INT_RFO) &&
26482 @@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev
26483 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26484 uPD98402_INT_LOS),PIMR); /* enable them */
26485 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26486 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26487 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26488 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26489 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26490 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26491 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26492 return 0;
26493 }
26494
26495 diff -urNp linux-2.6.32.42/drivers/atm/zatm.c linux-2.6.32.42/drivers/atm/zatm.c
26496 --- linux-2.6.32.42/drivers/atm/zatm.c 2011-03-27 14:31:47.000000000 -0400
26497 +++ linux-2.6.32.42/drivers/atm/zatm.c 2011-04-17 15:56:46.000000000 -0400
26498 @@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
26499 }
26500 if (!size) {
26501 dev_kfree_skb_irq(skb);
26502 - if (vcc) atomic_inc(&vcc->stats->rx_err);
26503 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26504 continue;
26505 }
26506 if (!atm_charge(vcc,skb->truesize)) {
26507 @@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
26508 skb->len = size;
26509 ATM_SKB(skb)->vcc = vcc;
26510 vcc->push(vcc,skb);
26511 - atomic_inc(&vcc->stats->rx);
26512 + atomic_inc_unchecked(&vcc->stats->rx);
26513 }
26514 zout(pos & 0xffff,MTA(mbx));
26515 #if 0 /* probably a stupid idea */
26516 @@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
26517 skb_queue_head(&zatm_vcc->backlog,skb);
26518 break;
26519 }
26520 - atomic_inc(&vcc->stats->tx);
26521 + atomic_inc_unchecked(&vcc->stats->tx);
26522 wake_up(&zatm_vcc->tx_wait);
26523 }
26524
26525 diff -urNp linux-2.6.32.42/drivers/base/bus.c linux-2.6.32.42/drivers/base/bus.c
26526 --- linux-2.6.32.42/drivers/base/bus.c 2011-03-27 14:31:47.000000000 -0400
26527 +++ linux-2.6.32.42/drivers/base/bus.c 2011-04-17 15:56:46.000000000 -0400
26528 @@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kob
26529 return ret;
26530 }
26531
26532 -static struct sysfs_ops driver_sysfs_ops = {
26533 +static const struct sysfs_ops driver_sysfs_ops = {
26534 .show = drv_attr_show,
26535 .store = drv_attr_store,
26536 };
26537 @@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kob
26538 return ret;
26539 }
26540
26541 -static struct sysfs_ops bus_sysfs_ops = {
26542 +static const struct sysfs_ops bus_sysfs_ops = {
26543 .show = bus_attr_show,
26544 .store = bus_attr_store,
26545 };
26546 @@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset
26547 return 0;
26548 }
26549
26550 -static struct kset_uevent_ops bus_uevent_ops = {
26551 +static const struct kset_uevent_ops bus_uevent_ops = {
26552 .filter = bus_uevent_filter,
26553 };
26554
26555 diff -urNp linux-2.6.32.42/drivers/base/class.c linux-2.6.32.42/drivers/base/class.c
26556 --- linux-2.6.32.42/drivers/base/class.c 2011-03-27 14:31:47.000000000 -0400
26557 +++ linux-2.6.32.42/drivers/base/class.c 2011-04-17 15:56:46.000000000 -0400
26558 @@ -63,7 +63,7 @@ static void class_release(struct kobject
26559 kfree(cp);
26560 }
26561
26562 -static struct sysfs_ops class_sysfs_ops = {
26563 +static const struct sysfs_ops class_sysfs_ops = {
26564 .show = class_attr_show,
26565 .store = class_attr_store,
26566 };
26567 diff -urNp linux-2.6.32.42/drivers/base/core.c linux-2.6.32.42/drivers/base/core.c
26568 --- linux-2.6.32.42/drivers/base/core.c 2011-03-27 14:31:47.000000000 -0400
26569 +++ linux-2.6.32.42/drivers/base/core.c 2011-04-17 15:56:46.000000000 -0400
26570 @@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kob
26571 return ret;
26572 }
26573
26574 -static struct sysfs_ops dev_sysfs_ops = {
26575 +static const struct sysfs_ops dev_sysfs_ops = {
26576 .show = dev_attr_show,
26577 .store = dev_attr_store,
26578 };
26579 @@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset,
26580 return retval;
26581 }
26582
26583 -static struct kset_uevent_ops device_uevent_ops = {
26584 +static const struct kset_uevent_ops device_uevent_ops = {
26585 .filter = dev_uevent_filter,
26586 .name = dev_uevent_name,
26587 .uevent = dev_uevent,
26588 diff -urNp linux-2.6.32.42/drivers/base/memory.c linux-2.6.32.42/drivers/base/memory.c
26589 --- linux-2.6.32.42/drivers/base/memory.c 2011-03-27 14:31:47.000000000 -0400
26590 +++ linux-2.6.32.42/drivers/base/memory.c 2011-04-17 15:56:46.000000000 -0400
26591 @@ -44,7 +44,7 @@ static int memory_uevent(struct kset *ks
26592 return retval;
26593 }
26594
26595 -static struct kset_uevent_ops memory_uevent_ops = {
26596 +static const struct kset_uevent_ops memory_uevent_ops = {
26597 .name = memory_uevent_name,
26598 .uevent = memory_uevent,
26599 };
26600 diff -urNp linux-2.6.32.42/drivers/base/sys.c linux-2.6.32.42/drivers/base/sys.c
26601 --- linux-2.6.32.42/drivers/base/sys.c 2011-03-27 14:31:47.000000000 -0400
26602 +++ linux-2.6.32.42/drivers/base/sys.c 2011-04-17 15:56:46.000000000 -0400
26603 @@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struc
26604 return -EIO;
26605 }
26606
26607 -static struct sysfs_ops sysfs_ops = {
26608 +static const struct sysfs_ops sysfs_ops = {
26609 .show = sysdev_show,
26610 .store = sysdev_store,
26611 };
26612 @@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct
26613 return -EIO;
26614 }
26615
26616 -static struct sysfs_ops sysfs_class_ops = {
26617 +static const struct sysfs_ops sysfs_class_ops = {
26618 .show = sysdev_class_show,
26619 .store = sysdev_class_store,
26620 };
26621 diff -urNp linux-2.6.32.42/drivers/block/cciss.c linux-2.6.32.42/drivers/block/cciss.c
26622 --- linux-2.6.32.42/drivers/block/cciss.c 2011-03-27 14:31:47.000000000 -0400
26623 +++ linux-2.6.32.42/drivers/block/cciss.c 2011-04-17 15:56:46.000000000 -0400
26624 @@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct
26625 int err;
26626 u32 cp;
26627
26628 + memset(&arg64, 0, sizeof(arg64));
26629 +
26630 err = 0;
26631 err |=
26632 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26633 diff -urNp linux-2.6.32.42/drivers/block/cpqarray.c linux-2.6.32.42/drivers/block/cpqarray.c
26634 --- linux-2.6.32.42/drivers/block/cpqarray.c 2011-03-27 14:31:47.000000000 -0400
26635 +++ linux-2.6.32.42/drivers/block/cpqarray.c 2011-05-16 21:46:57.000000000 -0400
26636 @@ -896,6 +896,8 @@ static void do_ida_request(struct reques
26637 struct scatterlist tmp_sg[SG_MAX];
26638 int i, dir, seg;
26639
26640 + pax_track_stack();
26641 +
26642 if (blk_queue_plugged(q))
26643 goto startio;
26644
26645 diff -urNp linux-2.6.32.42/drivers/block/DAC960.c linux-2.6.32.42/drivers/block/DAC960.c
26646 --- linux-2.6.32.42/drivers/block/DAC960.c 2011-03-27 14:31:47.000000000 -0400
26647 +++ linux-2.6.32.42/drivers/block/DAC960.c 2011-05-16 21:46:57.000000000 -0400
26648 @@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfigur
26649 unsigned long flags;
26650 int Channel, TargetID;
26651
26652 + pax_track_stack();
26653 +
26654 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
26655 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
26656 sizeof(DAC960_SCSI_Inquiry_T) +
26657 diff -urNp linux-2.6.32.42/drivers/block/nbd.c linux-2.6.32.42/drivers/block/nbd.c
26658 --- linux-2.6.32.42/drivers/block/nbd.c 2011-06-25 12:55:34.000000000 -0400
26659 +++ linux-2.6.32.42/drivers/block/nbd.c 2011-06-25 12:56:37.000000000 -0400
26660 @@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *
26661 struct kvec iov;
26662 sigset_t blocked, oldset;
26663
26664 + pax_track_stack();
26665 +
26666 if (unlikely(!sock)) {
26667 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
26668 lo->disk->disk_name, (send ? "send" : "recv"));
26669 @@ -569,6 +571,8 @@ static void do_nbd_request(struct reques
26670 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
26671 unsigned int cmd, unsigned long arg)
26672 {
26673 + pax_track_stack();
26674 +
26675 switch (cmd) {
26676 case NBD_DISCONNECT: {
26677 struct request sreq;
26678 diff -urNp linux-2.6.32.42/drivers/block/pktcdvd.c linux-2.6.32.42/drivers/block/pktcdvd.c
26679 --- linux-2.6.32.42/drivers/block/pktcdvd.c 2011-03-27 14:31:47.000000000 -0400
26680 +++ linux-2.6.32.42/drivers/block/pktcdvd.c 2011-04-17 15:56:46.000000000 -0400
26681 @@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kob
26682 return len;
26683 }
26684
26685 -static struct sysfs_ops kobj_pkt_ops = {
26686 +static const struct sysfs_ops kobj_pkt_ops = {
26687 .show = kobj_pkt_show,
26688 .store = kobj_pkt_store
26689 };
26690 diff -urNp linux-2.6.32.42/drivers/char/agp/frontend.c linux-2.6.32.42/drivers/char/agp/frontend.c
26691 --- linux-2.6.32.42/drivers/char/agp/frontend.c 2011-03-27 14:31:47.000000000 -0400
26692 +++ linux-2.6.32.42/drivers/char/agp/frontend.c 2011-04-17 15:56:46.000000000 -0400
26693 @@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
26694 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
26695 return -EFAULT;
26696
26697 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
26698 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
26699 return -EFAULT;
26700
26701 client = agp_find_client_by_pid(reserve.pid);
26702 diff -urNp linux-2.6.32.42/drivers/char/briq_panel.c linux-2.6.32.42/drivers/char/briq_panel.c
26703 --- linux-2.6.32.42/drivers/char/briq_panel.c 2011-03-27 14:31:47.000000000 -0400
26704 +++ linux-2.6.32.42/drivers/char/briq_panel.c 2011-04-18 19:48:57.000000000 -0400
26705 @@ -10,6 +10,7 @@
26706 #include <linux/types.h>
26707 #include <linux/errno.h>
26708 #include <linux/tty.h>
26709 +#include <linux/mutex.h>
26710 #include <linux/timer.h>
26711 #include <linux/kernel.h>
26712 #include <linux/wait.h>
26713 @@ -36,6 +37,7 @@ static int vfd_is_open;
26714 static unsigned char vfd[40];
26715 static int vfd_cursor;
26716 static unsigned char ledpb, led;
26717 +static DEFINE_MUTEX(vfd_mutex);
26718
26719 static void update_vfd(void)
26720 {
26721 @@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct f
26722 if (!vfd_is_open)
26723 return -EBUSY;
26724
26725 + mutex_lock(&vfd_mutex);
26726 for (;;) {
26727 char c;
26728 if (!indx)
26729 break;
26730 - if (get_user(c, buf))
26731 + if (get_user(c, buf)) {
26732 + mutex_unlock(&vfd_mutex);
26733 return -EFAULT;
26734 + }
26735 if (esc) {
26736 set_led(c);
26737 esc = 0;
26738 @@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct f
26739 buf++;
26740 }
26741 update_vfd();
26742 + mutex_unlock(&vfd_mutex);
26743
26744 return len;
26745 }
26746 diff -urNp linux-2.6.32.42/drivers/char/genrtc.c linux-2.6.32.42/drivers/char/genrtc.c
26747 --- linux-2.6.32.42/drivers/char/genrtc.c 2011-03-27 14:31:47.000000000 -0400
26748 +++ linux-2.6.32.42/drivers/char/genrtc.c 2011-04-18 19:45:42.000000000 -0400
26749 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *i
26750 switch (cmd) {
26751
26752 case RTC_PLL_GET:
26753 + memset(&pll, 0, sizeof(pll));
26754 if (get_rtc_pll(&pll))
26755 return -EINVAL;
26756 else
26757 diff -urNp linux-2.6.32.42/drivers/char/hpet.c linux-2.6.32.42/drivers/char/hpet.c
26758 --- linux-2.6.32.42/drivers/char/hpet.c 2011-03-27 14:31:47.000000000 -0400
26759 +++ linux-2.6.32.42/drivers/char/hpet.c 2011-04-23 12:56:11.000000000 -0400
26760 @@ -430,7 +430,7 @@ static int hpet_release(struct inode *in
26761 return 0;
26762 }
26763
26764 -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
26765 +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
26766
26767 static int
26768 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
26769 @@ -565,7 +565,7 @@ static inline unsigned long hpet_time_di
26770 }
26771
26772 static int
26773 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
26774 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
26775 {
26776 struct hpet_timer __iomem *timer;
26777 struct hpet __iomem *hpet;
26778 @@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp,
26779 {
26780 struct hpet_info info;
26781
26782 + memset(&info, 0, sizeof(info));
26783 +
26784 if (devp->hd_ireqfreq)
26785 info.hi_ireqfreq =
26786 hpet_time_div(hpetp, devp->hd_ireqfreq);
26787 - else
26788 - info.hi_ireqfreq = 0;
26789 info.hi_flags =
26790 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
26791 info.hi_hpet = hpetp->hp_which;
26792 diff -urNp linux-2.6.32.42/drivers/char/hvc_beat.c linux-2.6.32.42/drivers/char/hvc_beat.c
26793 --- linux-2.6.32.42/drivers/char/hvc_beat.c 2011-03-27 14:31:47.000000000 -0400
26794 +++ linux-2.6.32.42/drivers/char/hvc_beat.c 2011-04-17 15:56:46.000000000 -0400
26795 @@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t v
26796 return cnt;
26797 }
26798
26799 -static struct hv_ops hvc_beat_get_put_ops = {
26800 +static const struct hv_ops hvc_beat_get_put_ops = {
26801 .get_chars = hvc_beat_get_chars,
26802 .put_chars = hvc_beat_put_chars,
26803 };
26804 diff -urNp linux-2.6.32.42/drivers/char/hvc_console.c linux-2.6.32.42/drivers/char/hvc_console.c
26805 --- linux-2.6.32.42/drivers/char/hvc_console.c 2011-03-27 14:31:47.000000000 -0400
26806 +++ linux-2.6.32.42/drivers/char/hvc_console.c 2011-04-17 15:56:46.000000000 -0400
26807 @@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_ind
26808 * console interfaces but can still be used as a tty device. This has to be
26809 * static because kmalloc will not work during early console init.
26810 */
26811 -static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
26812 +static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
26813 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
26814 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
26815
26816 @@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kr
26817 * vty adapters do NOT get an hvc_instantiate() callback since they
26818 * appear after early console init.
26819 */
26820 -int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
26821 +int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
26822 {
26823 struct hvc_struct *hp;
26824
26825 @@ -756,7 +756,7 @@ static const struct tty_operations hvc_o
26826 };
26827
26828 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
26829 - struct hv_ops *ops, int outbuf_size)
26830 + const struct hv_ops *ops, int outbuf_size)
26831 {
26832 struct hvc_struct *hp;
26833 int i;
26834 diff -urNp linux-2.6.32.42/drivers/char/hvc_console.h linux-2.6.32.42/drivers/char/hvc_console.h
26835 --- linux-2.6.32.42/drivers/char/hvc_console.h 2011-03-27 14:31:47.000000000 -0400
26836 +++ linux-2.6.32.42/drivers/char/hvc_console.h 2011-04-17 15:56:46.000000000 -0400
26837 @@ -55,7 +55,7 @@ struct hvc_struct {
26838 int outbuf_size;
26839 int n_outbuf;
26840 uint32_t vtermno;
26841 - struct hv_ops *ops;
26842 + const struct hv_ops *ops;
26843 int irq_requested;
26844 int data;
26845 struct winsize ws;
26846 @@ -76,11 +76,11 @@ struct hv_ops {
26847 };
26848
26849 /* Register a vterm and a slot index for use as a console (console_init) */
26850 -extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
26851 +extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
26852
26853 /* register a vterm for hvc tty operation (module_init or hotplug add) */
26854 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
26855 - struct hv_ops *ops, int outbuf_size);
26856 + const struct hv_ops *ops, int outbuf_size);
26857 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
26858 extern int hvc_remove(struct hvc_struct *hp);
26859
26860 diff -urNp linux-2.6.32.42/drivers/char/hvc_iseries.c linux-2.6.32.42/drivers/char/hvc_iseries.c
26861 --- linux-2.6.32.42/drivers/char/hvc_iseries.c 2011-03-27 14:31:47.000000000 -0400
26862 +++ linux-2.6.32.42/drivers/char/hvc_iseries.c 2011-04-17 15:56:46.000000000 -0400
26863 @@ -197,7 +197,7 @@ done:
26864 return sent;
26865 }
26866
26867 -static struct hv_ops hvc_get_put_ops = {
26868 +static const struct hv_ops hvc_get_put_ops = {
26869 .get_chars = get_chars,
26870 .put_chars = put_chars,
26871 .notifier_add = notifier_add_irq,
26872 diff -urNp linux-2.6.32.42/drivers/char/hvc_iucv.c linux-2.6.32.42/drivers/char/hvc_iucv.c
26873 --- linux-2.6.32.42/drivers/char/hvc_iucv.c 2011-03-27 14:31:47.000000000 -0400
26874 +++ linux-2.6.32.42/drivers/char/hvc_iucv.c 2011-04-17 15:56:46.000000000 -0400
26875 @@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(stru
26876
26877
26878 /* HVC operations */
26879 -static struct hv_ops hvc_iucv_ops = {
26880 +static const struct hv_ops hvc_iucv_ops = {
26881 .get_chars = hvc_iucv_get_chars,
26882 .put_chars = hvc_iucv_put_chars,
26883 .notifier_add = hvc_iucv_notifier_add,
26884 diff -urNp linux-2.6.32.42/drivers/char/hvc_rtas.c linux-2.6.32.42/drivers/char/hvc_rtas.c
26885 --- linux-2.6.32.42/drivers/char/hvc_rtas.c 2011-03-27 14:31:47.000000000 -0400
26886 +++ linux-2.6.32.42/drivers/char/hvc_rtas.c 2011-04-17 15:56:46.000000000 -0400
26887 @@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_
26888 return i;
26889 }
26890
26891 -static struct hv_ops hvc_rtas_get_put_ops = {
26892 +static const struct hv_ops hvc_rtas_get_put_ops = {
26893 .get_chars = hvc_rtas_read_console,
26894 .put_chars = hvc_rtas_write_console,
26895 };
26896 diff -urNp linux-2.6.32.42/drivers/char/hvcs.c linux-2.6.32.42/drivers/char/hvcs.c
26897 --- linux-2.6.32.42/drivers/char/hvcs.c 2011-03-27 14:31:47.000000000 -0400
26898 +++ linux-2.6.32.42/drivers/char/hvcs.c 2011-04-17 15:56:46.000000000 -0400
26899 @@ -82,6 +82,7 @@
26900 #include <asm/hvcserver.h>
26901 #include <asm/uaccess.h>
26902 #include <asm/vio.h>
26903 +#include <asm/local.h>
26904
26905 /*
26906 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
26907 @@ -269,7 +270,7 @@ struct hvcs_struct {
26908 unsigned int index;
26909
26910 struct tty_struct *tty;
26911 - int open_count;
26912 + local_t open_count;
26913
26914 /*
26915 * Used to tell the driver kernel_thread what operations need to take
26916 @@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
26917
26918 spin_lock_irqsave(&hvcsd->lock, flags);
26919
26920 - if (hvcsd->open_count > 0) {
26921 + if (local_read(&hvcsd->open_count) > 0) {
26922 spin_unlock_irqrestore(&hvcsd->lock, flags);
26923 printk(KERN_INFO "HVCS: vterm state unchanged. "
26924 "The hvcs device node is still in use.\n");
26925 @@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
26926 if ((retval = hvcs_partner_connect(hvcsd)))
26927 goto error_release;
26928
26929 - hvcsd->open_count = 1;
26930 + local_set(&hvcsd->open_count, 1);
26931 hvcsd->tty = tty;
26932 tty->driver_data = hvcsd;
26933
26934 @@ -1169,7 +1170,7 @@ fast_open:
26935
26936 spin_lock_irqsave(&hvcsd->lock, flags);
26937 kref_get(&hvcsd->kref);
26938 - hvcsd->open_count++;
26939 + local_inc(&hvcsd->open_count);
26940 hvcsd->todo_mask |= HVCS_SCHED_READ;
26941 spin_unlock_irqrestore(&hvcsd->lock, flags);
26942
26943 @@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct
26944 hvcsd = tty->driver_data;
26945
26946 spin_lock_irqsave(&hvcsd->lock, flags);
26947 - if (--hvcsd->open_count == 0) {
26948 + if (local_dec_and_test(&hvcsd->open_count)) {
26949
26950 vio_disable_interrupts(hvcsd->vdev);
26951
26952 @@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct
26953 free_irq(irq, hvcsd);
26954 kref_put(&hvcsd->kref, destroy_hvcs_struct);
26955 return;
26956 - } else if (hvcsd->open_count < 0) {
26957 + } else if (local_read(&hvcsd->open_count) < 0) {
26958 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
26959 " is missmanaged.\n",
26960 - hvcsd->vdev->unit_address, hvcsd->open_count);
26961 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
26962 }
26963
26964 spin_unlock_irqrestore(&hvcsd->lock, flags);
26965 @@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
26966
26967 spin_lock_irqsave(&hvcsd->lock, flags);
26968 /* Preserve this so that we know how many kref refs to put */
26969 - temp_open_count = hvcsd->open_count;
26970 + temp_open_count = local_read(&hvcsd->open_count);
26971
26972 /*
26973 * Don't kref put inside the spinlock because the destruction
26974 @@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
26975 hvcsd->tty->driver_data = NULL;
26976 hvcsd->tty = NULL;
26977
26978 - hvcsd->open_count = 0;
26979 + local_set(&hvcsd->open_count, 0);
26980
26981 /* This will drop any buffered data on the floor which is OK in a hangup
26982 * scenario. */
26983 @@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct
26984 * the middle of a write operation? This is a crummy place to do this
26985 * but we want to keep it all in the spinlock.
26986 */
26987 - if (hvcsd->open_count <= 0) {
26988 + if (local_read(&hvcsd->open_count) <= 0) {
26989 spin_unlock_irqrestore(&hvcsd->lock, flags);
26990 return -ENODEV;
26991 }
26992 @@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
26993 {
26994 struct hvcs_struct *hvcsd = tty->driver_data;
26995
26996 - if (!hvcsd || hvcsd->open_count <= 0)
26997 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
26998 return 0;
26999
27000 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
27001 diff -urNp linux-2.6.32.42/drivers/char/hvc_udbg.c linux-2.6.32.42/drivers/char/hvc_udbg.c
27002 --- linux-2.6.32.42/drivers/char/hvc_udbg.c 2011-03-27 14:31:47.000000000 -0400
27003 +++ linux-2.6.32.42/drivers/char/hvc_udbg.c 2011-04-17 15:56:46.000000000 -0400
27004 @@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno
27005 return i;
27006 }
27007
27008 -static struct hv_ops hvc_udbg_ops = {
27009 +static const struct hv_ops hvc_udbg_ops = {
27010 .get_chars = hvc_udbg_get,
27011 .put_chars = hvc_udbg_put,
27012 };
27013 diff -urNp linux-2.6.32.42/drivers/char/hvc_vio.c linux-2.6.32.42/drivers/char/hvc_vio.c
27014 --- linux-2.6.32.42/drivers/char/hvc_vio.c 2011-03-27 14:31:47.000000000 -0400
27015 +++ linux-2.6.32.42/drivers/char/hvc_vio.c 2011-04-17 15:56:46.000000000 -0400
27016 @@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t v
27017 return got;
27018 }
27019
27020 -static struct hv_ops hvc_get_put_ops = {
27021 +static const struct hv_ops hvc_get_put_ops = {
27022 .get_chars = filtered_get_chars,
27023 .put_chars = hvc_put_chars,
27024 .notifier_add = notifier_add_irq,
27025 diff -urNp linux-2.6.32.42/drivers/char/hvc_xen.c linux-2.6.32.42/drivers/char/hvc_xen.c
27026 --- linux-2.6.32.42/drivers/char/hvc_xen.c 2011-03-27 14:31:47.000000000 -0400
27027 +++ linux-2.6.32.42/drivers/char/hvc_xen.c 2011-04-17 15:56:46.000000000 -0400
27028 @@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno
27029 return recv;
27030 }
27031
27032 -static struct hv_ops hvc_ops = {
27033 +static const struct hv_ops hvc_ops = {
27034 .get_chars = read_console,
27035 .put_chars = write_console,
27036 .notifier_add = notifier_add_irq,
27037 diff -urNp linux-2.6.32.42/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.32.42/drivers/char/ipmi/ipmi_msghandler.c
27038 --- linux-2.6.32.42/drivers/char/ipmi/ipmi_msghandler.c 2011-03-27 14:31:47.000000000 -0400
27039 +++ linux-2.6.32.42/drivers/char/ipmi/ipmi_msghandler.c 2011-05-16 21:46:57.000000000 -0400
27040 @@ -414,7 +414,7 @@ struct ipmi_smi {
27041 struct proc_dir_entry *proc_dir;
27042 char proc_dir_name[10];
27043
27044 - atomic_t stats[IPMI_NUM_STATS];
27045 + atomic_unchecked_t stats[IPMI_NUM_STATS];
27046
27047 /*
27048 * run_to_completion duplicate of smb_info, smi_info
27049 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
27050
27051
27052 #define ipmi_inc_stat(intf, stat) \
27053 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
27054 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
27055 #define ipmi_get_stat(intf, stat) \
27056 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
27057 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
27058
27059 static int is_lan_addr(struct ipmi_addr *addr)
27060 {
27061 @@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
27062 INIT_LIST_HEAD(&intf->cmd_rcvrs);
27063 init_waitqueue_head(&intf->waitq);
27064 for (i = 0; i < IPMI_NUM_STATS; i++)
27065 - atomic_set(&intf->stats[i], 0);
27066 + atomic_set_unchecked(&intf->stats[i], 0);
27067
27068 intf->proc_dir = NULL;
27069
27070 @@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
27071 struct ipmi_smi_msg smi_msg;
27072 struct ipmi_recv_msg recv_msg;
27073
27074 + pax_track_stack();
27075 +
27076 si = (struct ipmi_system_interface_addr *) &addr;
27077 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
27078 si->channel = IPMI_BMC_CHANNEL;
27079 diff -urNp linux-2.6.32.42/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.32.42/drivers/char/ipmi/ipmi_si_intf.c
27080 --- linux-2.6.32.42/drivers/char/ipmi/ipmi_si_intf.c 2011-03-27 14:31:47.000000000 -0400
27081 +++ linux-2.6.32.42/drivers/char/ipmi/ipmi_si_intf.c 2011-04-17 15:56:46.000000000 -0400
27082 @@ -277,7 +277,7 @@ struct smi_info {
27083 unsigned char slave_addr;
27084
27085 /* Counters and things for the proc filesystem. */
27086 - atomic_t stats[SI_NUM_STATS];
27087 + atomic_unchecked_t stats[SI_NUM_STATS];
27088
27089 struct task_struct *thread;
27090
27091 @@ -285,9 +285,9 @@ struct smi_info {
27092 };
27093
27094 #define smi_inc_stat(smi, stat) \
27095 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
27096 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
27097 #define smi_get_stat(smi, stat) \
27098 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
27099 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
27100
27101 #define SI_MAX_PARMS 4
27102
27103 @@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info
27104 atomic_set(&new_smi->req_events, 0);
27105 new_smi->run_to_completion = 0;
27106 for (i = 0; i < SI_NUM_STATS; i++)
27107 - atomic_set(&new_smi->stats[i], 0);
27108 + atomic_set_unchecked(&new_smi->stats[i], 0);
27109
27110 new_smi->interrupt_disabled = 0;
27111 atomic_set(&new_smi->stop_operation, 0);
27112 diff -urNp linux-2.6.32.42/drivers/char/istallion.c linux-2.6.32.42/drivers/char/istallion.c
27113 --- linux-2.6.32.42/drivers/char/istallion.c 2011-03-27 14:31:47.000000000 -0400
27114 +++ linux-2.6.32.42/drivers/char/istallion.c 2011-05-16 21:46:57.000000000 -0400
27115 @@ -187,7 +187,6 @@ static struct ktermios stli_deftermios
27116 * re-used for each stats call.
27117 */
27118 static comstats_t stli_comstats;
27119 -static combrd_t stli_brdstats;
27120 static struct asystats stli_cdkstats;
27121
27122 /*****************************************************************************/
27123 @@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __u
27124 {
27125 struct stlibrd *brdp;
27126 unsigned int i;
27127 + combrd_t stli_brdstats;
27128
27129 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
27130 return -EFAULT;
27131 @@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stl
27132 struct stliport stli_dummyport;
27133 struct stliport *portp;
27134
27135 + pax_track_stack();
27136 +
27137 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
27138 return -EFAULT;
27139 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
27140 @@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stli
27141 struct stlibrd stli_dummybrd;
27142 struct stlibrd *brdp;
27143
27144 + pax_track_stack();
27145 +
27146 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
27147 return -EFAULT;
27148 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
27149 diff -urNp linux-2.6.32.42/drivers/char/Kconfig linux-2.6.32.42/drivers/char/Kconfig
27150 --- linux-2.6.32.42/drivers/char/Kconfig 2011-03-27 14:31:47.000000000 -0400
27151 +++ linux-2.6.32.42/drivers/char/Kconfig 2011-04-18 19:20:15.000000000 -0400
27152 @@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
27153
27154 config DEVKMEM
27155 bool "/dev/kmem virtual device support"
27156 - default y
27157 + default n
27158 + depends on !GRKERNSEC_KMEM
27159 help
27160 Say Y here if you want to support the /dev/kmem device. The
27161 /dev/kmem device is rarely used, but can be used for certain
27162 @@ -1114,6 +1115,7 @@ config DEVPORT
27163 bool
27164 depends on !M68K
27165 depends on ISA || PCI
27166 + depends on !GRKERNSEC_KMEM
27167 default y
27168
27169 source "drivers/s390/char/Kconfig"
27170 diff -urNp linux-2.6.32.42/drivers/char/keyboard.c linux-2.6.32.42/drivers/char/keyboard.c
27171 --- linux-2.6.32.42/drivers/char/keyboard.c 2011-03-27 14:31:47.000000000 -0400
27172 +++ linux-2.6.32.42/drivers/char/keyboard.c 2011-04-17 15:56:46.000000000 -0400
27173 @@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
27174 kbd->kbdmode == VC_MEDIUMRAW) &&
27175 value != KVAL(K_SAK))
27176 return; /* SAK is allowed even in raw mode */
27177 +
27178 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
27179 + {
27180 + void *func = fn_handler[value];
27181 + if (func == fn_show_state || func == fn_show_ptregs ||
27182 + func == fn_show_mem)
27183 + return;
27184 + }
27185 +#endif
27186 +
27187 fn_handler[value](vc);
27188 }
27189
27190 @@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_
27191 .evbit = { BIT_MASK(EV_SND) },
27192 },
27193
27194 - { }, /* Terminating entry */
27195 + { 0 }, /* Terminating entry */
27196 };
27197
27198 MODULE_DEVICE_TABLE(input, kbd_ids);
27199 diff -urNp linux-2.6.32.42/drivers/char/mem.c linux-2.6.32.42/drivers/char/mem.c
27200 --- linux-2.6.32.42/drivers/char/mem.c 2011-03-27 14:31:47.000000000 -0400
27201 +++ linux-2.6.32.42/drivers/char/mem.c 2011-04-17 15:56:46.000000000 -0400
27202 @@ -18,6 +18,7 @@
27203 #include <linux/raw.h>
27204 #include <linux/tty.h>
27205 #include <linux/capability.h>
27206 +#include <linux/security.h>
27207 #include <linux/ptrace.h>
27208 #include <linux/device.h>
27209 #include <linux/highmem.h>
27210 @@ -35,6 +36,10 @@
27211 # include <linux/efi.h>
27212 #endif
27213
27214 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27215 +extern struct file_operations grsec_fops;
27216 +#endif
27217 +
27218 static inline unsigned long size_inside_page(unsigned long start,
27219 unsigned long size)
27220 {
27221 @@ -102,9 +107,13 @@ static inline int range_is_allowed(unsig
27222
27223 while (cursor < to) {
27224 if (!devmem_is_allowed(pfn)) {
27225 +#ifdef CONFIG_GRKERNSEC_KMEM
27226 + gr_handle_mem_readwrite(from, to);
27227 +#else
27228 printk(KERN_INFO
27229 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27230 current->comm, from, to);
27231 +#endif
27232 return 0;
27233 }
27234 cursor += PAGE_SIZE;
27235 @@ -112,6 +121,11 @@ static inline int range_is_allowed(unsig
27236 }
27237 return 1;
27238 }
27239 +#elif defined(CONFIG_GRKERNSEC_KMEM)
27240 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27241 +{
27242 + return 0;
27243 +}
27244 #else
27245 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27246 {
27247 @@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * fi
27248 #endif
27249
27250 while (count > 0) {
27251 + char *temp;
27252 +
27253 /*
27254 * Handle first page in case it's not aligned
27255 */
27256 @@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * fi
27257 if (!ptr)
27258 return -EFAULT;
27259
27260 - if (copy_to_user(buf, ptr, sz)) {
27261 +#ifdef CONFIG_PAX_USERCOPY
27262 + temp = kmalloc(sz, GFP_KERNEL);
27263 + if (!temp) {
27264 + unxlate_dev_mem_ptr(p, ptr);
27265 + return -ENOMEM;
27266 + }
27267 + memcpy(temp, ptr, sz);
27268 +#else
27269 + temp = ptr;
27270 +#endif
27271 +
27272 + if (copy_to_user(buf, temp, sz)) {
27273 +
27274 +#ifdef CONFIG_PAX_USERCOPY
27275 + kfree(temp);
27276 +#endif
27277 +
27278 unxlate_dev_mem_ptr(p, ptr);
27279 return -EFAULT;
27280 }
27281
27282 +#ifdef CONFIG_PAX_USERCOPY
27283 + kfree(temp);
27284 +#endif
27285 +
27286 unxlate_dev_mem_ptr(p, ptr);
27287
27288 buf += sz;
27289 @@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *fi
27290 size_t count, loff_t *ppos)
27291 {
27292 unsigned long p = *ppos;
27293 - ssize_t low_count, read, sz;
27294 + ssize_t low_count, read, sz, err = 0;
27295 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
27296 - int err = 0;
27297
27298 read = 0;
27299 if (p < (unsigned long) high_memory) {
27300 @@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *fi
27301 }
27302 #endif
27303 while (low_count > 0) {
27304 + char *temp;
27305 +
27306 sz = size_inside_page(p, low_count);
27307
27308 /*
27309 @@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *fi
27310 */
27311 kbuf = xlate_dev_kmem_ptr((char *)p);
27312
27313 - if (copy_to_user(buf, kbuf, sz))
27314 +#ifdef CONFIG_PAX_USERCOPY
27315 + temp = kmalloc(sz, GFP_KERNEL);
27316 + if (!temp)
27317 + return -ENOMEM;
27318 + memcpy(temp, kbuf, sz);
27319 +#else
27320 + temp = kbuf;
27321 +#endif
27322 +
27323 + err = copy_to_user(buf, temp, sz);
27324 +
27325 +#ifdef CONFIG_PAX_USERCOPY
27326 + kfree(temp);
27327 +#endif
27328 +
27329 + if (err)
27330 return -EFAULT;
27331 buf += sz;
27332 p += sz;
27333 @@ -889,6 +941,9 @@ static const struct memdev {
27334 #ifdef CONFIG_CRASH_DUMP
27335 [12] = { "oldmem", 0, &oldmem_fops, NULL },
27336 #endif
27337 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27338 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
27339 +#endif
27340 };
27341
27342 static int memory_open(struct inode *inode, struct file *filp)
27343 diff -urNp linux-2.6.32.42/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.32.42/drivers/char/pcmcia/ipwireless/tty.c
27344 --- linux-2.6.32.42/drivers/char/pcmcia/ipwireless/tty.c 2011-03-27 14:31:47.000000000 -0400
27345 +++ linux-2.6.32.42/drivers/char/pcmcia/ipwireless/tty.c 2011-04-17 15:56:46.000000000 -0400
27346 @@ -29,6 +29,7 @@
27347 #include <linux/tty_driver.h>
27348 #include <linux/tty_flip.h>
27349 #include <linux/uaccess.h>
27350 +#include <asm/local.h>
27351
27352 #include "tty.h"
27353 #include "network.h"
27354 @@ -51,7 +52,7 @@ struct ipw_tty {
27355 int tty_type;
27356 struct ipw_network *network;
27357 struct tty_struct *linux_tty;
27358 - int open_count;
27359 + local_t open_count;
27360 unsigned int control_lines;
27361 struct mutex ipw_tty_mutex;
27362 int tx_bytes_queued;
27363 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
27364 mutex_unlock(&tty->ipw_tty_mutex);
27365 return -ENODEV;
27366 }
27367 - if (tty->open_count == 0)
27368 + if (local_read(&tty->open_count) == 0)
27369 tty->tx_bytes_queued = 0;
27370
27371 - tty->open_count++;
27372 + local_inc(&tty->open_count);
27373
27374 tty->linux_tty = linux_tty;
27375 linux_tty->driver_data = tty;
27376 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
27377
27378 static void do_ipw_close(struct ipw_tty *tty)
27379 {
27380 - tty->open_count--;
27381 -
27382 - if (tty->open_count == 0) {
27383 + if (local_dec_return(&tty->open_count) == 0) {
27384 struct tty_struct *linux_tty = tty->linux_tty;
27385
27386 if (linux_tty != NULL) {
27387 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
27388 return;
27389
27390 mutex_lock(&tty->ipw_tty_mutex);
27391 - if (tty->open_count == 0) {
27392 + if (local_read(&tty->open_count) == 0) {
27393 mutex_unlock(&tty->ipw_tty_mutex);
27394 return;
27395 }
27396 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
27397 return;
27398 }
27399
27400 - if (!tty->open_count) {
27401 + if (!local_read(&tty->open_count)) {
27402 mutex_unlock(&tty->ipw_tty_mutex);
27403 return;
27404 }
27405 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
27406 return -ENODEV;
27407
27408 mutex_lock(&tty->ipw_tty_mutex);
27409 - if (!tty->open_count) {
27410 + if (!local_read(&tty->open_count)) {
27411 mutex_unlock(&tty->ipw_tty_mutex);
27412 return -EINVAL;
27413 }
27414 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
27415 if (!tty)
27416 return -ENODEV;
27417
27418 - if (!tty->open_count)
27419 + if (!local_read(&tty->open_count))
27420 return -EINVAL;
27421
27422 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
27423 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
27424 if (!tty)
27425 return 0;
27426
27427 - if (!tty->open_count)
27428 + if (!local_read(&tty->open_count))
27429 return 0;
27430
27431 return tty->tx_bytes_queued;
27432 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
27433 if (!tty)
27434 return -ENODEV;
27435
27436 - if (!tty->open_count)
27437 + if (!local_read(&tty->open_count))
27438 return -EINVAL;
27439
27440 return get_control_lines(tty);
27441 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
27442 if (!tty)
27443 return -ENODEV;
27444
27445 - if (!tty->open_count)
27446 + if (!local_read(&tty->open_count))
27447 return -EINVAL;
27448
27449 return set_control_lines(tty, set, clear);
27450 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
27451 if (!tty)
27452 return -ENODEV;
27453
27454 - if (!tty->open_count)
27455 + if (!local_read(&tty->open_count))
27456 return -EINVAL;
27457
27458 /* FIXME: Exactly how is the tty object locked here .. */
27459 @@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty
27460 against a parallel ioctl etc */
27461 mutex_lock(&ttyj->ipw_tty_mutex);
27462 }
27463 - while (ttyj->open_count)
27464 + while (local_read(&ttyj->open_count))
27465 do_ipw_close(ttyj);
27466 ipwireless_disassociate_network_ttys(network,
27467 ttyj->channel_idx);
27468 diff -urNp linux-2.6.32.42/drivers/char/pty.c linux-2.6.32.42/drivers/char/pty.c
27469 --- linux-2.6.32.42/drivers/char/pty.c 2011-03-27 14:31:47.000000000 -0400
27470 +++ linux-2.6.32.42/drivers/char/pty.c 2011-04-17 15:56:46.000000000 -0400
27471 @@ -682,7 +682,18 @@ static int ptmx_open(struct inode *inode
27472 return ret;
27473 }
27474
27475 -static struct file_operations ptmx_fops;
27476 +static const struct file_operations ptmx_fops = {
27477 + .llseek = no_llseek,
27478 + .read = tty_read,
27479 + .write = tty_write,
27480 + .poll = tty_poll,
27481 + .unlocked_ioctl = tty_ioctl,
27482 + .compat_ioctl = tty_compat_ioctl,
27483 + .open = ptmx_open,
27484 + .release = tty_release,
27485 + .fasync = tty_fasync,
27486 +};
27487 +
27488
27489 static void __init unix98_pty_init(void)
27490 {
27491 @@ -736,9 +747,6 @@ static void __init unix98_pty_init(void)
27492 register_sysctl_table(pty_root_table);
27493
27494 /* Now create the /dev/ptmx special device */
27495 - tty_default_fops(&ptmx_fops);
27496 - ptmx_fops.open = ptmx_open;
27497 -
27498 cdev_init(&ptmx_cdev, &ptmx_fops);
27499 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
27500 register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
27501 diff -urNp linux-2.6.32.42/drivers/char/random.c linux-2.6.32.42/drivers/char/random.c
27502 --- linux-2.6.32.42/drivers/char/random.c 2011-03-27 14:31:47.000000000 -0400
27503 +++ linux-2.6.32.42/drivers/char/random.c 2011-04-17 15:56:46.000000000 -0400
27504 @@ -254,8 +254,13 @@
27505 /*
27506 * Configuration information
27507 */
27508 +#ifdef CONFIG_GRKERNSEC_RANDNET
27509 +#define INPUT_POOL_WORDS 512
27510 +#define OUTPUT_POOL_WORDS 128
27511 +#else
27512 #define INPUT_POOL_WORDS 128
27513 #define OUTPUT_POOL_WORDS 32
27514 +#endif
27515 #define SEC_XFER_SIZE 512
27516
27517 /*
27518 @@ -292,10 +297,17 @@ static struct poolinfo {
27519 int poolwords;
27520 int tap1, tap2, tap3, tap4, tap5;
27521 } poolinfo_table[] = {
27522 +#ifdef CONFIG_GRKERNSEC_RANDNET
27523 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
27524 + { 512, 411, 308, 208, 104, 1 },
27525 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
27526 + { 128, 103, 76, 51, 25, 1 },
27527 +#else
27528 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
27529 { 128, 103, 76, 51, 25, 1 },
27530 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
27531 { 32, 26, 20, 14, 7, 1 },
27532 +#endif
27533 #if 0
27534 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
27535 { 2048, 1638, 1231, 819, 411, 1 },
27536 @@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
27537 #include <linux/sysctl.h>
27538
27539 static int min_read_thresh = 8, min_write_thresh;
27540 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
27541 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
27542 static int max_write_thresh = INPUT_POOL_WORDS * 32;
27543 static char sysctl_bootid[16];
27544
27545 diff -urNp linux-2.6.32.42/drivers/char/rocket.c linux-2.6.32.42/drivers/char/rocket.c
27546 --- linux-2.6.32.42/drivers/char/rocket.c 2011-03-27 14:31:47.000000000 -0400
27547 +++ linux-2.6.32.42/drivers/char/rocket.c 2011-05-16 21:46:57.000000000 -0400
27548 @@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info
27549 struct rocket_ports tmp;
27550 int board;
27551
27552 + pax_track_stack();
27553 +
27554 if (!retports)
27555 return -EFAULT;
27556 memset(&tmp, 0, sizeof (tmp));
27557 diff -urNp linux-2.6.32.42/drivers/char/sonypi.c linux-2.6.32.42/drivers/char/sonypi.c
27558 --- linux-2.6.32.42/drivers/char/sonypi.c 2011-03-27 14:31:47.000000000 -0400
27559 +++ linux-2.6.32.42/drivers/char/sonypi.c 2011-04-17 15:56:46.000000000 -0400
27560 @@ -55,6 +55,7 @@
27561 #include <asm/uaccess.h>
27562 #include <asm/io.h>
27563 #include <asm/system.h>
27564 +#include <asm/local.h>
27565
27566 #include <linux/sonypi.h>
27567
27568 @@ -491,7 +492,7 @@ static struct sonypi_device {
27569 spinlock_t fifo_lock;
27570 wait_queue_head_t fifo_proc_list;
27571 struct fasync_struct *fifo_async;
27572 - int open_count;
27573 + local_t open_count;
27574 int model;
27575 struct input_dev *input_jog_dev;
27576 struct input_dev *input_key_dev;
27577 @@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, st
27578 static int sonypi_misc_release(struct inode *inode, struct file *file)
27579 {
27580 mutex_lock(&sonypi_device.lock);
27581 - sonypi_device.open_count--;
27582 + local_dec(&sonypi_device.open_count);
27583 mutex_unlock(&sonypi_device.lock);
27584 return 0;
27585 }
27586 @@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode
27587 lock_kernel();
27588 mutex_lock(&sonypi_device.lock);
27589 /* Flush input queue on first open */
27590 - if (!sonypi_device.open_count)
27591 + if (!local_read(&sonypi_device.open_count))
27592 kfifo_reset(sonypi_device.fifo);
27593 - sonypi_device.open_count++;
27594 + local_inc(&sonypi_device.open_count);
27595 mutex_unlock(&sonypi_device.lock);
27596 unlock_kernel();
27597 return 0;
27598 diff -urNp linux-2.6.32.42/drivers/char/stallion.c linux-2.6.32.42/drivers/char/stallion.c
27599 --- linux-2.6.32.42/drivers/char/stallion.c 2011-03-27 14:31:47.000000000 -0400
27600 +++ linux-2.6.32.42/drivers/char/stallion.c 2011-05-16 21:46:57.000000000 -0400
27601 @@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlp
27602 struct stlport stl_dummyport;
27603 struct stlport *portp;
27604
27605 + pax_track_stack();
27606 +
27607 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
27608 return -EFAULT;
27609 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
27610 diff -urNp linux-2.6.32.42/drivers/char/tpm/tpm_bios.c linux-2.6.32.42/drivers/char/tpm/tpm_bios.c
27611 --- linux-2.6.32.42/drivers/char/tpm/tpm_bios.c 2011-03-27 14:31:47.000000000 -0400
27612 +++ linux-2.6.32.42/drivers/char/tpm/tpm_bios.c 2011-04-17 15:56:46.000000000 -0400
27613 @@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start
27614 event = addr;
27615
27616 if ((event->event_type == 0 && event->event_size == 0) ||
27617 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27618 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27619 return NULL;
27620
27621 return addr;
27622 @@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(
27623 return NULL;
27624
27625 if ((event->event_type == 0 && event->event_size == 0) ||
27626 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27627 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27628 return NULL;
27629
27630 (*pos)++;
27631 @@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_
27632 int i;
27633
27634 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27635 - seq_putc(m, data[i]);
27636 + if (!seq_putc(m, data[i]))
27637 + return -EFAULT;
27638
27639 return 0;
27640 }
27641 @@ -409,6 +410,11 @@ static int read_log(struct tpm_bios_log
27642 log->bios_event_log_end = log->bios_event_log + len;
27643
27644 virt = acpi_os_map_memory(start, len);
27645 + if (!virt) {
27646 + kfree(log->bios_event_log);
27647 + log->bios_event_log = NULL;
27648 + return -EFAULT;
27649 + }
27650
27651 memcpy(log->bios_event_log, virt, len);
27652
27653 diff -urNp linux-2.6.32.42/drivers/char/tpm/tpm.c linux-2.6.32.42/drivers/char/tpm/tpm.c
27654 --- linux-2.6.32.42/drivers/char/tpm/tpm.c 2011-04-17 17:00:52.000000000 -0400
27655 +++ linux-2.6.32.42/drivers/char/tpm/tpm.c 2011-05-16 21:46:57.000000000 -0400
27656 @@ -402,7 +402,7 @@ static ssize_t tpm_transmit(struct tpm_c
27657 chip->vendor.req_complete_val)
27658 goto out_recv;
27659
27660 - if ((status == chip->vendor.req_canceled)) {
27661 + if (status == chip->vendor.req_canceled) {
27662 dev_err(chip->dev, "Operation Canceled\n");
27663 rc = -ECANCELED;
27664 goto out;
27665 @@ -821,6 +821,8 @@ ssize_t tpm_show_pubek(struct device *de
27666
27667 struct tpm_chip *chip = dev_get_drvdata(dev);
27668
27669 + pax_track_stack();
27670 +
27671 tpm_cmd.header.in = tpm_readpubek_header;
27672 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
27673 "attempting to read the PUBEK");
27674 diff -urNp linux-2.6.32.42/drivers/char/tty_io.c linux-2.6.32.42/drivers/char/tty_io.c
27675 --- linux-2.6.32.42/drivers/char/tty_io.c 2011-03-27 14:31:47.000000000 -0400
27676 +++ linux-2.6.32.42/drivers/char/tty_io.c 2011-04-17 15:56:46.000000000 -0400
27677 @@ -136,21 +136,10 @@ LIST_HEAD(tty_drivers); /* linked list
27678 DEFINE_MUTEX(tty_mutex);
27679 EXPORT_SYMBOL(tty_mutex);
27680
27681 -static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
27682 -static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
27683 ssize_t redirected_tty_write(struct file *, const char __user *,
27684 size_t, loff_t *);
27685 -static unsigned int tty_poll(struct file *, poll_table *);
27686 static int tty_open(struct inode *, struct file *);
27687 -static int tty_release(struct inode *, struct file *);
27688 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
27689 -#ifdef CONFIG_COMPAT
27690 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
27691 - unsigned long arg);
27692 -#else
27693 -#define tty_compat_ioctl NULL
27694 -#endif
27695 -static int tty_fasync(int fd, struct file *filp, int on);
27696 static void release_tty(struct tty_struct *tty, int idx);
27697 static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
27698 static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
27699 @@ -870,7 +859,7 @@ EXPORT_SYMBOL(start_tty);
27700 * read calls may be outstanding in parallel.
27701 */
27702
27703 -static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
27704 +ssize_t tty_read(struct file *file, char __user *buf, size_t count,
27705 loff_t *ppos)
27706 {
27707 int i;
27708 @@ -898,6 +887,8 @@ static ssize_t tty_read(struct file *fil
27709 return i;
27710 }
27711
27712 +EXPORT_SYMBOL(tty_read);
27713 +
27714 void tty_write_unlock(struct tty_struct *tty)
27715 {
27716 mutex_unlock(&tty->atomic_write_lock);
27717 @@ -1045,7 +1036,7 @@ void tty_write_message(struct tty_struct
27718 * write method will not be invoked in parallel for each device.
27719 */
27720
27721 -static ssize_t tty_write(struct file *file, const char __user *buf,
27722 +ssize_t tty_write(struct file *file, const char __user *buf,
27723 size_t count, loff_t *ppos)
27724 {
27725 struct tty_struct *tty;
27726 @@ -1072,6 +1063,8 @@ static ssize_t tty_write(struct file *fi
27727 return ret;
27728 }
27729
27730 +EXPORT_SYMBOL(tty_write);
27731 +
27732 ssize_t redirected_tty_write(struct file *file, const char __user *buf,
27733 size_t count, loff_t *ppos)
27734 {
27735 @@ -1867,7 +1860,7 @@ static int tty_open(struct inode *inode,
27736 * Takes bkl. See tty_release_dev
27737 */
27738
27739 -static int tty_release(struct inode *inode, struct file *filp)
27740 +int tty_release(struct inode *inode, struct file *filp)
27741 {
27742 lock_kernel();
27743 tty_release_dev(filp);
27744 @@ -1875,6 +1868,8 @@ static int tty_release(struct inode *ino
27745 return 0;
27746 }
27747
27748 +EXPORT_SYMBOL(tty_release);
27749 +
27750 /**
27751 * tty_poll - check tty status
27752 * @filp: file being polled
27753 @@ -1887,7 +1882,7 @@ static int tty_release(struct inode *ino
27754 * may be re-entered freely by other callers.
27755 */
27756
27757 -static unsigned int tty_poll(struct file *filp, poll_table *wait)
27758 +unsigned int tty_poll(struct file *filp, poll_table *wait)
27759 {
27760 struct tty_struct *tty;
27761 struct tty_ldisc *ld;
27762 @@ -1904,7 +1899,9 @@ static unsigned int tty_poll(struct file
27763 return ret;
27764 }
27765
27766 -static int tty_fasync(int fd, struct file *filp, int on)
27767 +EXPORT_SYMBOL(tty_poll);
27768 +
27769 +int tty_fasync(int fd, struct file *filp, int on)
27770 {
27771 struct tty_struct *tty;
27772 unsigned long flags;
27773 @@ -1948,6 +1945,8 @@ out:
27774 return retval;
27775 }
27776
27777 +EXPORT_SYMBOL(tty_fasync);
27778 +
27779 /**
27780 * tiocsti - fake input character
27781 * @tty: tty to fake input into
27782 @@ -2582,8 +2581,10 @@ long tty_ioctl(struct file *file, unsign
27783 return retval;
27784 }
27785
27786 +EXPORT_SYMBOL(tty_ioctl);
27787 +
27788 #ifdef CONFIG_COMPAT
27789 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
27790 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
27791 unsigned long arg)
27792 {
27793 struct inode *inode = file->f_dentry->d_inode;
27794 @@ -2607,6 +2608,8 @@ static long tty_compat_ioctl(struct file
27795
27796 return retval;
27797 }
27798 +
27799 +EXPORT_SYMBOL(tty_compat_ioctl);
27800 #endif
27801
27802 /*
27803 @@ -3050,11 +3053,6 @@ struct tty_struct *get_current_tty(void)
27804 }
27805 EXPORT_SYMBOL_GPL(get_current_tty);
27806
27807 -void tty_default_fops(struct file_operations *fops)
27808 -{
27809 - *fops = tty_fops;
27810 -}
27811 -
27812 /*
27813 * Initialize the console device. This is called *early*, so
27814 * we can't necessarily depend on lots of kernel help here.
27815 diff -urNp linux-2.6.32.42/drivers/char/tty_ldisc.c linux-2.6.32.42/drivers/char/tty_ldisc.c
27816 --- linux-2.6.32.42/drivers/char/tty_ldisc.c 2011-03-27 14:31:47.000000000 -0400
27817 +++ linux-2.6.32.42/drivers/char/tty_ldisc.c 2011-04-17 15:56:46.000000000 -0400
27818 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
27819 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
27820 struct tty_ldisc_ops *ldo = ld->ops;
27821
27822 - ldo->refcount--;
27823 + atomic_dec(&ldo->refcount);
27824 module_put(ldo->owner);
27825 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27826
27827 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
27828 spin_lock_irqsave(&tty_ldisc_lock, flags);
27829 tty_ldiscs[disc] = new_ldisc;
27830 new_ldisc->num = disc;
27831 - new_ldisc->refcount = 0;
27832 + atomic_set(&new_ldisc->refcount, 0);
27833 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27834
27835 return ret;
27836 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
27837 return -EINVAL;
27838
27839 spin_lock_irqsave(&tty_ldisc_lock, flags);
27840 - if (tty_ldiscs[disc]->refcount)
27841 + if (atomic_read(&tty_ldiscs[disc]->refcount))
27842 ret = -EBUSY;
27843 else
27844 tty_ldiscs[disc] = NULL;
27845 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
27846 if (ldops) {
27847 ret = ERR_PTR(-EAGAIN);
27848 if (try_module_get(ldops->owner)) {
27849 - ldops->refcount++;
27850 + atomic_inc(&ldops->refcount);
27851 ret = ldops;
27852 }
27853 }
27854 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
27855 unsigned long flags;
27856
27857 spin_lock_irqsave(&tty_ldisc_lock, flags);
27858 - ldops->refcount--;
27859 + atomic_dec(&ldops->refcount);
27860 module_put(ldops->owner);
27861 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27862 }
27863 diff -urNp linux-2.6.32.42/drivers/char/virtio_console.c linux-2.6.32.42/drivers/char/virtio_console.c
27864 --- linux-2.6.32.42/drivers/char/virtio_console.c 2011-03-27 14:31:47.000000000 -0400
27865 +++ linux-2.6.32.42/drivers/char/virtio_console.c 2011-04-17 15:56:46.000000000 -0400
27866 @@ -44,6 +44,7 @@ static unsigned int in_len;
27867 static char *in, *inbuf;
27868
27869 /* The operations for our console. */
27870 +/* cannot be const */
27871 static struct hv_ops virtio_cons;
27872
27873 /* The hvc device */
27874 diff -urNp linux-2.6.32.42/drivers/char/vt.c linux-2.6.32.42/drivers/char/vt.c
27875 --- linux-2.6.32.42/drivers/char/vt.c 2011-03-27 14:31:47.000000000 -0400
27876 +++ linux-2.6.32.42/drivers/char/vt.c 2011-04-17 15:56:46.000000000 -0400
27877 @@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
27878
27879 static void notify_write(struct vc_data *vc, unsigned int unicode)
27880 {
27881 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
27882 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
27883 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
27884 }
27885
27886 diff -urNp linux-2.6.32.42/drivers/char/vt_ioctl.c linux-2.6.32.42/drivers/char/vt_ioctl.c
27887 --- linux-2.6.32.42/drivers/char/vt_ioctl.c 2011-03-27 14:31:47.000000000 -0400
27888 +++ linux-2.6.32.42/drivers/char/vt_ioctl.c 2011-04-17 15:56:46.000000000 -0400
27889 @@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
27890 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
27891 return -EFAULT;
27892
27893 - if (!capable(CAP_SYS_TTY_CONFIG))
27894 - perm = 0;
27895 -
27896 switch (cmd) {
27897 case KDGKBENT:
27898 key_map = key_maps[s];
27899 @@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
27900 val = (i ? K_HOLE : K_NOSUCHMAP);
27901 return put_user(val, &user_kbe->kb_value);
27902 case KDSKBENT:
27903 + if (!capable(CAP_SYS_TTY_CONFIG))
27904 + perm = 0;
27905 +
27906 if (!perm)
27907 return -EPERM;
27908 +
27909 if (!i && v == K_NOSUCHMAP) {
27910 /* deallocate map */
27911 key_map = key_maps[s];
27912 @@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
27913 int i, j, k;
27914 int ret;
27915
27916 - if (!capable(CAP_SYS_TTY_CONFIG))
27917 - perm = 0;
27918 -
27919 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
27920 if (!kbs) {
27921 ret = -ENOMEM;
27922 @@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
27923 kfree(kbs);
27924 return ((p && *p) ? -EOVERFLOW : 0);
27925 case KDSKBSENT:
27926 + if (!capable(CAP_SYS_TTY_CONFIG))
27927 + perm = 0;
27928 +
27929 if (!perm) {
27930 ret = -EPERM;
27931 goto reterr;
27932 diff -urNp linux-2.6.32.42/drivers/cpufreq/cpufreq.c linux-2.6.32.42/drivers/cpufreq/cpufreq.c
27933 --- linux-2.6.32.42/drivers/cpufreq/cpufreq.c 2011-06-25 12:55:34.000000000 -0400
27934 +++ linux-2.6.32.42/drivers/cpufreq/cpufreq.c 2011-06-25 12:56:37.000000000 -0400
27935 @@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct
27936 complete(&policy->kobj_unregister);
27937 }
27938
27939 -static struct sysfs_ops sysfs_ops = {
27940 +static const struct sysfs_ops sysfs_ops = {
27941 .show = show,
27942 .store = store,
27943 };
27944 diff -urNp linux-2.6.32.42/drivers/cpuidle/sysfs.c linux-2.6.32.42/drivers/cpuidle/sysfs.c
27945 --- linux-2.6.32.42/drivers/cpuidle/sysfs.c 2011-03-27 14:31:47.000000000 -0400
27946 +++ linux-2.6.32.42/drivers/cpuidle/sysfs.c 2011-04-17 15:56:46.000000000 -0400
27947 @@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobj
27948 return ret;
27949 }
27950
27951 -static struct sysfs_ops cpuidle_sysfs_ops = {
27952 +static const struct sysfs_ops cpuidle_sysfs_ops = {
27953 .show = cpuidle_show,
27954 .store = cpuidle_store,
27955 };
27956 @@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct
27957 return ret;
27958 }
27959
27960 -static struct sysfs_ops cpuidle_state_sysfs_ops = {
27961 +static const struct sysfs_ops cpuidle_state_sysfs_ops = {
27962 .show = cpuidle_state_show,
27963 };
27964
27965 @@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpui
27966 .release = cpuidle_state_sysfs_release,
27967 };
27968
27969 -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
27970 +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
27971 {
27972 kobject_put(&device->kobjs[i]->kobj);
27973 wait_for_completion(&device->kobjs[i]->kobj_unregister);
27974 diff -urNp linux-2.6.32.42/drivers/crypto/hifn_795x.c linux-2.6.32.42/drivers/crypto/hifn_795x.c
27975 --- linux-2.6.32.42/drivers/crypto/hifn_795x.c 2011-03-27 14:31:47.000000000 -0400
27976 +++ linux-2.6.32.42/drivers/crypto/hifn_795x.c 2011-05-16 21:46:57.000000000 -0400
27977 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
27978 0xCA, 0x34, 0x2B, 0x2E};
27979 struct scatterlist sg;
27980
27981 + pax_track_stack();
27982 +
27983 memset(src, 0, sizeof(src));
27984 memset(ctx.key, 0, sizeof(ctx.key));
27985
27986 diff -urNp linux-2.6.32.42/drivers/crypto/padlock-aes.c linux-2.6.32.42/drivers/crypto/padlock-aes.c
27987 --- linux-2.6.32.42/drivers/crypto/padlock-aes.c 2011-03-27 14:31:47.000000000 -0400
27988 +++ linux-2.6.32.42/drivers/crypto/padlock-aes.c 2011-05-16 21:46:57.000000000 -0400
27989 @@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm
27990 struct crypto_aes_ctx gen_aes;
27991 int cpu;
27992
27993 + pax_track_stack();
27994 +
27995 if (key_len % 8) {
27996 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
27997 return -EINVAL;
27998 diff -urNp linux-2.6.32.42/drivers/dma/ioat/dma.c linux-2.6.32.42/drivers/dma/ioat/dma.c
27999 --- linux-2.6.32.42/drivers/dma/ioat/dma.c 2011-03-27 14:31:47.000000000 -0400
28000 +++ linux-2.6.32.42/drivers/dma/ioat/dma.c 2011-04-17 15:56:46.000000000 -0400
28001 @@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, str
28002 return entry->show(&chan->common, page);
28003 }
28004
28005 -struct sysfs_ops ioat_sysfs_ops = {
28006 +const struct sysfs_ops ioat_sysfs_ops = {
28007 .show = ioat_attr_show,
28008 };
28009
28010 diff -urNp linux-2.6.32.42/drivers/dma/ioat/dma.h linux-2.6.32.42/drivers/dma/ioat/dma.h
28011 --- linux-2.6.32.42/drivers/dma/ioat/dma.h 2011-03-27 14:31:47.000000000 -0400
28012 +++ linux-2.6.32.42/drivers/dma/ioat/dma.h 2011-04-17 15:56:46.000000000 -0400
28013 @@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_c
28014 unsigned long *phys_complete);
28015 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
28016 void ioat_kobject_del(struct ioatdma_device *device);
28017 -extern struct sysfs_ops ioat_sysfs_ops;
28018 +extern const struct sysfs_ops ioat_sysfs_ops;
28019 extern struct ioat_sysfs_entry ioat_version_attr;
28020 extern struct ioat_sysfs_entry ioat_cap_attr;
28021 #endif /* IOATDMA_H */
28022 diff -urNp linux-2.6.32.42/drivers/edac/edac_device_sysfs.c linux-2.6.32.42/drivers/edac/edac_device_sysfs.c
28023 --- linux-2.6.32.42/drivers/edac/edac_device_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28024 +++ linux-2.6.32.42/drivers/edac/edac_device_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28025 @@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(s
28026 }
28027
28028 /* edac_dev file operations for an 'ctl_info' */
28029 -static struct sysfs_ops device_ctl_info_ops = {
28030 +static const struct sysfs_ops device_ctl_info_ops = {
28031 .show = edac_dev_ctl_info_show,
28032 .store = edac_dev_ctl_info_store
28033 };
28034 @@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(s
28035 }
28036
28037 /* edac_dev file operations for an 'instance' */
28038 -static struct sysfs_ops device_instance_ops = {
28039 +static const struct sysfs_ops device_instance_ops = {
28040 .show = edac_dev_instance_show,
28041 .store = edac_dev_instance_store
28042 };
28043 @@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(stru
28044 }
28045
28046 /* edac_dev file operations for a 'block' */
28047 -static struct sysfs_ops device_block_ops = {
28048 +static const struct sysfs_ops device_block_ops = {
28049 .show = edac_dev_block_show,
28050 .store = edac_dev_block_store
28051 };
28052 diff -urNp linux-2.6.32.42/drivers/edac/edac_mc_sysfs.c linux-2.6.32.42/drivers/edac/edac_mc_sysfs.c
28053 --- linux-2.6.32.42/drivers/edac/edac_mc_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28054 +++ linux-2.6.32.42/drivers/edac/edac_mc_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28055 @@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kob
28056 return -EIO;
28057 }
28058
28059 -static struct sysfs_ops csrowfs_ops = {
28060 +static const struct sysfs_ops csrowfs_ops = {
28061 .show = csrowdev_show,
28062 .store = csrowdev_store
28063 };
28064 @@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobje
28065 }
28066
28067 /* Intermediate show/store table */
28068 -static struct sysfs_ops mci_ops = {
28069 +static const struct sysfs_ops mci_ops = {
28070 .show = mcidev_show,
28071 .store = mcidev_store
28072 };
28073 diff -urNp linux-2.6.32.42/drivers/edac/edac_pci_sysfs.c linux-2.6.32.42/drivers/edac/edac_pci_sysfs.c
28074 --- linux-2.6.32.42/drivers/edac/edac_pci_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28075 +++ linux-2.6.32.42/drivers/edac/edac_pci_sysfs.c 2011-05-04 17:56:20.000000000 -0400
28076 @@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log
28077 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
28078 static int edac_pci_poll_msec = 1000; /* one second workq period */
28079
28080 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
28081 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
28082 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
28083 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
28084
28085 static struct kobject *edac_pci_top_main_kobj;
28086 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
28087 @@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(s
28088 }
28089
28090 /* fs_ops table */
28091 -static struct sysfs_ops pci_instance_ops = {
28092 +static const struct sysfs_ops pci_instance_ops = {
28093 .show = edac_pci_instance_show,
28094 .store = edac_pci_instance_store
28095 };
28096 @@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct
28097 return -EIO;
28098 }
28099
28100 -static struct sysfs_ops edac_pci_sysfs_ops = {
28101 +static const struct sysfs_ops edac_pci_sysfs_ops = {
28102 .show = edac_pci_dev_show,
28103 .store = edac_pci_dev_store
28104 };
28105 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str
28106 edac_printk(KERN_CRIT, EDAC_PCI,
28107 "Signaled System Error on %s\n",
28108 pci_name(dev));
28109 - atomic_inc(&pci_nonparity_count);
28110 + atomic_inc_unchecked(&pci_nonparity_count);
28111 }
28112
28113 if (status & (PCI_STATUS_PARITY)) {
28114 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str
28115 "Master Data Parity Error on %s\n",
28116 pci_name(dev));
28117
28118 - atomic_inc(&pci_parity_count);
28119 + atomic_inc_unchecked(&pci_parity_count);
28120 }
28121
28122 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28123 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str
28124 "Detected Parity Error on %s\n",
28125 pci_name(dev));
28126
28127 - atomic_inc(&pci_parity_count);
28128 + atomic_inc_unchecked(&pci_parity_count);
28129 }
28130 }
28131
28132 @@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(str
28133 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
28134 "Signaled System Error on %s\n",
28135 pci_name(dev));
28136 - atomic_inc(&pci_nonparity_count);
28137 + atomic_inc_unchecked(&pci_nonparity_count);
28138 }
28139
28140 if (status & (PCI_STATUS_PARITY)) {
28141 @@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(str
28142 "Master Data Parity Error on "
28143 "%s\n", pci_name(dev));
28144
28145 - atomic_inc(&pci_parity_count);
28146 + atomic_inc_unchecked(&pci_parity_count);
28147 }
28148
28149 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28150 @@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(str
28151 "Detected Parity Error on %s\n",
28152 pci_name(dev));
28153
28154 - atomic_inc(&pci_parity_count);
28155 + atomic_inc_unchecked(&pci_parity_count);
28156 }
28157 }
28158 }
28159 @@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
28160 if (!check_pci_errors)
28161 return;
28162
28163 - before_count = atomic_read(&pci_parity_count);
28164 + before_count = atomic_read_unchecked(&pci_parity_count);
28165
28166 /* scan all PCI devices looking for a Parity Error on devices and
28167 * bridges.
28168 @@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
28169 /* Only if operator has selected panic on PCI Error */
28170 if (edac_pci_get_panic_on_pe()) {
28171 /* If the count is different 'after' from 'before' */
28172 - if (before_count != atomic_read(&pci_parity_count))
28173 + if (before_count != atomic_read_unchecked(&pci_parity_count))
28174 panic("EDAC: PCI Parity Error");
28175 }
28176 }
28177 diff -urNp linux-2.6.32.42/drivers/firewire/core-cdev.c linux-2.6.32.42/drivers/firewire/core-cdev.c
28178 --- linux-2.6.32.42/drivers/firewire/core-cdev.c 2011-03-27 14:31:47.000000000 -0400
28179 +++ linux-2.6.32.42/drivers/firewire/core-cdev.c 2011-04-17 15:56:46.000000000 -0400
28180 @@ -1141,8 +1141,7 @@ static int init_iso_resource(struct clie
28181 int ret;
28182
28183 if ((request->channels == 0 && request->bandwidth == 0) ||
28184 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
28185 - request->bandwidth < 0)
28186 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
28187 return -EINVAL;
28188
28189 r = kmalloc(sizeof(*r), GFP_KERNEL);
28190 diff -urNp linux-2.6.32.42/drivers/firewire/core-transaction.c linux-2.6.32.42/drivers/firewire/core-transaction.c
28191 --- linux-2.6.32.42/drivers/firewire/core-transaction.c 2011-03-27 14:31:47.000000000 -0400
28192 +++ linux-2.6.32.42/drivers/firewire/core-transaction.c 2011-05-16 21:46:57.000000000 -0400
28193 @@ -36,6 +36,7 @@
28194 #include <linux/string.h>
28195 #include <linux/timer.h>
28196 #include <linux/types.h>
28197 +#include <linux/sched.h>
28198
28199 #include <asm/byteorder.h>
28200
28201 @@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *c
28202 struct transaction_callback_data d;
28203 struct fw_transaction t;
28204
28205 + pax_track_stack();
28206 +
28207 init_completion(&d.done);
28208 d.payload = payload;
28209 fw_send_request(card, &t, tcode, destination_id, generation, speed,
28210 diff -urNp linux-2.6.32.42/drivers/firmware/dmi_scan.c linux-2.6.32.42/drivers/firmware/dmi_scan.c
28211 --- linux-2.6.32.42/drivers/firmware/dmi_scan.c 2011-03-27 14:31:47.000000000 -0400
28212 +++ linux-2.6.32.42/drivers/firmware/dmi_scan.c 2011-04-17 15:56:46.000000000 -0400
28213 @@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
28214 }
28215 }
28216 else {
28217 - /*
28218 - * no iounmap() for that ioremap(); it would be a no-op, but
28219 - * it's so early in setup that sucker gets confused into doing
28220 - * what it shouldn't if we actually call it.
28221 - */
28222 p = dmi_ioremap(0xF0000, 0x10000);
28223 if (p == NULL)
28224 goto error;
28225 diff -urNp linux-2.6.32.42/drivers/firmware/edd.c linux-2.6.32.42/drivers/firmware/edd.c
28226 --- linux-2.6.32.42/drivers/firmware/edd.c 2011-03-27 14:31:47.000000000 -0400
28227 +++ linux-2.6.32.42/drivers/firmware/edd.c 2011-04-17 15:56:46.000000000 -0400
28228 @@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, str
28229 return ret;
28230 }
28231
28232 -static struct sysfs_ops edd_attr_ops = {
28233 +static const struct sysfs_ops edd_attr_ops = {
28234 .show = edd_attr_show,
28235 };
28236
28237 diff -urNp linux-2.6.32.42/drivers/firmware/efivars.c linux-2.6.32.42/drivers/firmware/efivars.c
28238 --- linux-2.6.32.42/drivers/firmware/efivars.c 2011-03-27 14:31:47.000000000 -0400
28239 +++ linux-2.6.32.42/drivers/firmware/efivars.c 2011-04-17 15:56:46.000000000 -0400
28240 @@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct
28241 return ret;
28242 }
28243
28244 -static struct sysfs_ops efivar_attr_ops = {
28245 +static const struct sysfs_ops efivar_attr_ops = {
28246 .show = efivar_attr_show,
28247 .store = efivar_attr_store,
28248 };
28249 diff -urNp linux-2.6.32.42/drivers/firmware/iscsi_ibft.c linux-2.6.32.42/drivers/firmware/iscsi_ibft.c
28250 --- linux-2.6.32.42/drivers/firmware/iscsi_ibft.c 2011-03-27 14:31:47.000000000 -0400
28251 +++ linux-2.6.32.42/drivers/firmware/iscsi_ibft.c 2011-04-17 15:56:46.000000000 -0400
28252 @@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struc
28253 return ret;
28254 }
28255
28256 -static struct sysfs_ops ibft_attr_ops = {
28257 +static const struct sysfs_ops ibft_attr_ops = {
28258 .show = ibft_show_attribute,
28259 };
28260
28261 diff -urNp linux-2.6.32.42/drivers/firmware/memmap.c linux-2.6.32.42/drivers/firmware/memmap.c
28262 --- linux-2.6.32.42/drivers/firmware/memmap.c 2011-03-27 14:31:47.000000000 -0400
28263 +++ linux-2.6.32.42/drivers/firmware/memmap.c 2011-04-17 15:56:46.000000000 -0400
28264 @@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
28265 NULL
28266 };
28267
28268 -static struct sysfs_ops memmap_attr_ops = {
28269 +static const struct sysfs_ops memmap_attr_ops = {
28270 .show = memmap_attr_show,
28271 };
28272
28273 diff -urNp linux-2.6.32.42/drivers/gpio/vr41xx_giu.c linux-2.6.32.42/drivers/gpio/vr41xx_giu.c
28274 --- linux-2.6.32.42/drivers/gpio/vr41xx_giu.c 2011-03-27 14:31:47.000000000 -0400
28275 +++ linux-2.6.32.42/drivers/gpio/vr41xx_giu.c 2011-05-04 17:56:28.000000000 -0400
28276 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
28277 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
28278 maskl, pendl, maskh, pendh);
28279
28280 - atomic_inc(&irq_err_count);
28281 + atomic_inc_unchecked(&irq_err_count);
28282
28283 return -EINVAL;
28284 }
28285 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.32.42/drivers/gpu/drm/drm_crtc_helper.c
28286 --- linux-2.6.32.42/drivers/gpu/drm/drm_crtc_helper.c 2011-03-27 14:31:47.000000000 -0400
28287 +++ linux-2.6.32.42/drivers/gpu/drm/drm_crtc_helper.c 2011-05-16 21:46:57.000000000 -0400
28288 @@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct d
28289 struct drm_crtc *tmp;
28290 int crtc_mask = 1;
28291
28292 - WARN(!crtc, "checking null crtc?");
28293 + BUG_ON(!crtc);
28294
28295 dev = crtc->dev;
28296
28297 @@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm
28298
28299 adjusted_mode = drm_mode_duplicate(dev, mode);
28300
28301 + pax_track_stack();
28302 +
28303 crtc->enabled = drm_helper_crtc_in_use(crtc);
28304
28305 if (!crtc->enabled)
28306 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_drv.c linux-2.6.32.42/drivers/gpu/drm/drm_drv.c
28307 --- linux-2.6.32.42/drivers/gpu/drm/drm_drv.c 2011-03-27 14:31:47.000000000 -0400
28308 +++ linux-2.6.32.42/drivers/gpu/drm/drm_drv.c 2011-04-17 15:56:46.000000000 -0400
28309 @@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struc
28310 char *kdata = NULL;
28311
28312 atomic_inc(&dev->ioctl_count);
28313 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
28314 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
28315 ++file_priv->ioctl_count;
28316
28317 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
28318 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_fops.c linux-2.6.32.42/drivers/gpu/drm/drm_fops.c
28319 --- linux-2.6.32.42/drivers/gpu/drm/drm_fops.c 2011-03-27 14:31:47.000000000 -0400
28320 +++ linux-2.6.32.42/drivers/gpu/drm/drm_fops.c 2011-04-17 15:56:46.000000000 -0400
28321 @@ -66,7 +66,7 @@ static int drm_setup(struct drm_device *
28322 }
28323
28324 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
28325 - atomic_set(&dev->counts[i], 0);
28326 + atomic_set_unchecked(&dev->counts[i], 0);
28327
28328 dev->sigdata.lock = NULL;
28329
28330 @@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
28331
28332 retcode = drm_open_helper(inode, filp, dev);
28333 if (!retcode) {
28334 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
28335 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
28336 spin_lock(&dev->count_lock);
28337 - if (!dev->open_count++) {
28338 + if (local_inc_return(&dev->open_count) == 1) {
28339 spin_unlock(&dev->count_lock);
28340 retcode = drm_setup(dev);
28341 goto out;
28342 @@ -435,7 +435,7 @@ int drm_release(struct inode *inode, str
28343
28344 lock_kernel();
28345
28346 - DRM_DEBUG("open_count = %d\n", dev->open_count);
28347 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
28348
28349 if (dev->driver->preclose)
28350 dev->driver->preclose(dev, file_priv);
28351 @@ -447,7 +447,7 @@ int drm_release(struct inode *inode, str
28352 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
28353 task_pid_nr(current),
28354 (long)old_encode_dev(file_priv->minor->device),
28355 - dev->open_count);
28356 + local_read(&dev->open_count));
28357
28358 /* if the master has gone away we can't do anything with the lock */
28359 if (file_priv->minor->master)
28360 @@ -524,9 +524,9 @@ int drm_release(struct inode *inode, str
28361 * End inline drm_release
28362 */
28363
28364 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
28365 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
28366 spin_lock(&dev->count_lock);
28367 - if (!--dev->open_count) {
28368 + if (local_dec_and_test(&dev->open_count)) {
28369 if (atomic_read(&dev->ioctl_count)) {
28370 DRM_ERROR("Device busy: %d\n",
28371 atomic_read(&dev->ioctl_count));
28372 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_gem.c linux-2.6.32.42/drivers/gpu/drm/drm_gem.c
28373 --- linux-2.6.32.42/drivers/gpu/drm/drm_gem.c 2011-03-27 14:31:47.000000000 -0400
28374 +++ linux-2.6.32.42/drivers/gpu/drm/drm_gem.c 2011-04-17 15:56:46.000000000 -0400
28375 @@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
28376 spin_lock_init(&dev->object_name_lock);
28377 idr_init(&dev->object_name_idr);
28378 atomic_set(&dev->object_count, 0);
28379 - atomic_set(&dev->object_memory, 0);
28380 + atomic_set_unchecked(&dev->object_memory, 0);
28381 atomic_set(&dev->pin_count, 0);
28382 - atomic_set(&dev->pin_memory, 0);
28383 + atomic_set_unchecked(&dev->pin_memory, 0);
28384 atomic_set(&dev->gtt_count, 0);
28385 - atomic_set(&dev->gtt_memory, 0);
28386 + atomic_set_unchecked(&dev->gtt_memory, 0);
28387
28388 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
28389 if (!mm) {
28390 @@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *
28391 goto fput;
28392 }
28393 atomic_inc(&dev->object_count);
28394 - atomic_add(obj->size, &dev->object_memory);
28395 + atomic_add_unchecked(obj->size, &dev->object_memory);
28396 return obj;
28397 fput:
28398 fput(obj->filp);
28399 @@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
28400
28401 fput(obj->filp);
28402 atomic_dec(&dev->object_count);
28403 - atomic_sub(obj->size, &dev->object_memory);
28404 + atomic_sub_unchecked(obj->size, &dev->object_memory);
28405 kfree(obj);
28406 }
28407 EXPORT_SYMBOL(drm_gem_object_free);
28408 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_info.c linux-2.6.32.42/drivers/gpu/drm/drm_info.c
28409 --- linux-2.6.32.42/drivers/gpu/drm/drm_info.c 2011-03-27 14:31:47.000000000 -0400
28410 +++ linux-2.6.32.42/drivers/gpu/drm/drm_info.c 2011-04-17 15:56:46.000000000 -0400
28411 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
28412 struct drm_local_map *map;
28413 struct drm_map_list *r_list;
28414
28415 - /* Hardcoded from _DRM_FRAME_BUFFER,
28416 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
28417 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
28418 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
28419 + static const char * const types[] = {
28420 + [_DRM_FRAME_BUFFER] = "FB",
28421 + [_DRM_REGISTERS] = "REG",
28422 + [_DRM_SHM] = "SHM",
28423 + [_DRM_AGP] = "AGP",
28424 + [_DRM_SCATTER_GATHER] = "SG",
28425 + [_DRM_CONSISTENT] = "PCI",
28426 + [_DRM_GEM] = "GEM" };
28427 const char *type;
28428 int i;
28429
28430 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
28431 map = r_list->map;
28432 if (!map)
28433 continue;
28434 - if (map->type < 0 || map->type > 5)
28435 + if (map->type >= ARRAY_SIZE(types))
28436 type = "??";
28437 else
28438 type = types[map->type];
28439 @@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file
28440 struct drm_device *dev = node->minor->dev;
28441
28442 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
28443 - seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
28444 + seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
28445 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
28446 - seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
28447 - seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
28448 + seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
28449 + seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
28450 seq_printf(m, "%d gtt total\n", dev->gtt_total);
28451 return 0;
28452 }
28453 @@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, voi
28454 mutex_lock(&dev->struct_mutex);
28455 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
28456 atomic_read(&dev->vma_count),
28457 +#ifdef CONFIG_GRKERNSEC_HIDESYM
28458 + NULL, 0);
28459 +#else
28460 high_memory, (u64)virt_to_phys(high_memory));
28461 +#endif
28462
28463 list_for_each_entry(pt, &dev->vmalist, head) {
28464 vma = pt->vma;
28465 @@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, voi
28466 continue;
28467 seq_printf(m,
28468 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
28469 - pt->pid, vma->vm_start, vma->vm_end,
28470 + pt->pid,
28471 +#ifdef CONFIG_GRKERNSEC_HIDESYM
28472 + 0, 0,
28473 +#else
28474 + vma->vm_start, vma->vm_end,
28475 +#endif
28476 vma->vm_flags & VM_READ ? 'r' : '-',
28477 vma->vm_flags & VM_WRITE ? 'w' : '-',
28478 vma->vm_flags & VM_EXEC ? 'x' : '-',
28479 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
28480 vma->vm_flags & VM_LOCKED ? 'l' : '-',
28481 vma->vm_flags & VM_IO ? 'i' : '-',
28482 +#ifdef CONFIG_GRKERNSEC_HIDESYM
28483 + 0);
28484 +#else
28485 vma->vm_pgoff);
28486 +#endif
28487
28488 #if defined(__i386__)
28489 pgprot = pgprot_val(vma->vm_page_prot);
28490 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_ioctl.c linux-2.6.32.42/drivers/gpu/drm/drm_ioctl.c
28491 --- linux-2.6.32.42/drivers/gpu/drm/drm_ioctl.c 2011-03-27 14:31:47.000000000 -0400
28492 +++ linux-2.6.32.42/drivers/gpu/drm/drm_ioctl.c 2011-04-17 15:56:46.000000000 -0400
28493 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
28494 stats->data[i].value =
28495 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
28496 else
28497 - stats->data[i].value = atomic_read(&dev->counts[i]);
28498 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
28499 stats->data[i].type = dev->types[i];
28500 }
28501
28502 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_lock.c linux-2.6.32.42/drivers/gpu/drm/drm_lock.c
28503 --- linux-2.6.32.42/drivers/gpu/drm/drm_lock.c 2011-03-27 14:31:47.000000000 -0400
28504 +++ linux-2.6.32.42/drivers/gpu/drm/drm_lock.c 2011-04-17 15:56:46.000000000 -0400
28505 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
28506 if (drm_lock_take(&master->lock, lock->context)) {
28507 master->lock.file_priv = file_priv;
28508 master->lock.lock_time = jiffies;
28509 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
28510 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
28511 break; /* Got lock */
28512 }
28513
28514 @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
28515 return -EINVAL;
28516 }
28517
28518 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
28519 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
28520
28521 /* kernel_context_switch isn't used by any of the x86 drm
28522 * modules but is required by the Sparc driver.
28523 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i810/i810_dma.c linux-2.6.32.42/drivers/gpu/drm/i810/i810_dma.c
28524 --- linux-2.6.32.42/drivers/gpu/drm/i810/i810_dma.c 2011-03-27 14:31:47.000000000 -0400
28525 +++ linux-2.6.32.42/drivers/gpu/drm/i810/i810_dma.c 2011-04-17 15:56:46.000000000 -0400
28526 @@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de
28527 dma->buflist[vertex->idx],
28528 vertex->discard, vertex->used);
28529
28530 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28531 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28532 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28533 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28534 sarea_priv->last_enqueue = dev_priv->counter - 1;
28535 sarea_priv->last_dispatch = (int)hw_status[5];
28536
28537 @@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device
28538 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
28539 mc->last_render);
28540
28541 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28542 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28543 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28544 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28545 sarea_priv->last_enqueue = dev_priv->counter - 1;
28546 sarea_priv->last_dispatch = (int)hw_status[5];
28547
28548 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i810/i810_drv.h linux-2.6.32.42/drivers/gpu/drm/i810/i810_drv.h
28549 --- linux-2.6.32.42/drivers/gpu/drm/i810/i810_drv.h 2011-03-27 14:31:47.000000000 -0400
28550 +++ linux-2.6.32.42/drivers/gpu/drm/i810/i810_drv.h 2011-05-04 17:56:28.000000000 -0400
28551 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
28552 int page_flipping;
28553
28554 wait_queue_head_t irq_queue;
28555 - atomic_t irq_received;
28556 - atomic_t irq_emitted;
28557 + atomic_unchecked_t irq_received;
28558 + atomic_unchecked_t irq_emitted;
28559
28560 int front_offset;
28561 } drm_i810_private_t;
28562 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i830/i830_drv.h linux-2.6.32.42/drivers/gpu/drm/i830/i830_drv.h
28563 --- linux-2.6.32.42/drivers/gpu/drm/i830/i830_drv.h 2011-03-27 14:31:47.000000000 -0400
28564 +++ linux-2.6.32.42/drivers/gpu/drm/i830/i830_drv.h 2011-05-04 17:56:28.000000000 -0400
28565 @@ -115,8 +115,8 @@ typedef struct drm_i830_private {
28566 int page_flipping;
28567
28568 wait_queue_head_t irq_queue;
28569 - atomic_t irq_received;
28570 - atomic_t irq_emitted;
28571 + atomic_unchecked_t irq_received;
28572 + atomic_unchecked_t irq_emitted;
28573
28574 int use_mi_batchbuffer_start;
28575
28576 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i830/i830_irq.c linux-2.6.32.42/drivers/gpu/drm/i830/i830_irq.c
28577 --- linux-2.6.32.42/drivers/gpu/drm/i830/i830_irq.c 2011-03-27 14:31:47.000000000 -0400
28578 +++ linux-2.6.32.42/drivers/gpu/drm/i830/i830_irq.c 2011-05-04 17:56:28.000000000 -0400
28579 @@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_
28580
28581 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
28582
28583 - atomic_inc(&dev_priv->irq_received);
28584 + atomic_inc_unchecked(&dev_priv->irq_received);
28585 wake_up_interruptible(&dev_priv->irq_queue);
28586
28587 return IRQ_HANDLED;
28588 @@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_devi
28589
28590 DRM_DEBUG("%s\n", __func__);
28591
28592 - atomic_inc(&dev_priv->irq_emitted);
28593 + atomic_inc_unchecked(&dev_priv->irq_emitted);
28594
28595 BEGIN_LP_RING(2);
28596 OUT_RING(0);
28597 OUT_RING(GFX_OP_USER_INTERRUPT);
28598 ADVANCE_LP_RING();
28599
28600 - return atomic_read(&dev_priv->irq_emitted);
28601 + return atomic_read_unchecked(&dev_priv->irq_emitted);
28602 }
28603
28604 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
28605 @@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_devi
28606
28607 DRM_DEBUG("%s\n", __func__);
28608
28609 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
28610 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
28611 return 0;
28612
28613 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
28614 @@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_devi
28615
28616 for (;;) {
28617 __set_current_state(TASK_INTERRUPTIBLE);
28618 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
28619 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
28620 break;
28621 if ((signed)(end - jiffies) <= 0) {
28622 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
28623 @@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct d
28624 I830_WRITE16(I830REG_HWSTAM, 0xffff);
28625 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
28626 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
28627 - atomic_set(&dev_priv->irq_received, 0);
28628 - atomic_set(&dev_priv->irq_emitted, 0);
28629 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28630 + atomic_set_unchecked(&dev_priv->irq_emitted, 0);
28631 init_waitqueue_head(&dev_priv->irq_queue);
28632 }
28633
28634 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7017.c
28635 --- linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7017.c 2011-03-27 14:31:47.000000000 -0400
28636 +++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7017.c 2011-04-17 15:56:46.000000000 -0400
28637 @@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_
28638 }
28639 }
28640
28641 -struct intel_dvo_dev_ops ch7017_ops = {
28642 +const struct intel_dvo_dev_ops ch7017_ops = {
28643 .init = ch7017_init,
28644 .detect = ch7017_detect,
28645 .mode_valid = ch7017_mode_valid,
28646 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7xxx.c
28647 --- linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-03-27 14:31:47.000000000 -0400
28648 +++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-04-17 15:56:46.000000000 -0400
28649 @@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_
28650 }
28651 }
28652
28653 -struct intel_dvo_dev_ops ch7xxx_ops = {
28654 +const struct intel_dvo_dev_ops ch7xxx_ops = {
28655 .init = ch7xxx_init,
28656 .detect = ch7xxx_detect,
28657 .mode_valid = ch7xxx_mode_valid,
28658 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo.h linux-2.6.32.42/drivers/gpu/drm/i915/dvo.h
28659 --- linux-2.6.32.42/drivers/gpu/drm/i915/dvo.h 2011-03-27 14:31:47.000000000 -0400
28660 +++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo.h 2011-04-17 15:56:46.000000000 -0400
28661 @@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
28662 *
28663 * \return singly-linked list of modes or NULL if no modes found.
28664 */
28665 - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
28666 + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
28667
28668 /**
28669 * Clean up driver-specific bits of the output
28670 */
28671 - void (*destroy) (struct intel_dvo_device *dvo);
28672 + void (* const destroy) (struct intel_dvo_device *dvo);
28673
28674 /**
28675 * Debugging hook to dump device registers to log file
28676 */
28677 - void (*dump_regs)(struct intel_dvo_device *dvo);
28678 + void (* const dump_regs)(struct intel_dvo_device *dvo);
28679 };
28680
28681 -extern struct intel_dvo_dev_ops sil164_ops;
28682 -extern struct intel_dvo_dev_ops ch7xxx_ops;
28683 -extern struct intel_dvo_dev_ops ivch_ops;
28684 -extern struct intel_dvo_dev_ops tfp410_ops;
28685 -extern struct intel_dvo_dev_ops ch7017_ops;
28686 +extern const struct intel_dvo_dev_ops sil164_ops;
28687 +extern const struct intel_dvo_dev_ops ch7xxx_ops;
28688 +extern const struct intel_dvo_dev_ops ivch_ops;
28689 +extern const struct intel_dvo_dev_ops tfp410_ops;
28690 +extern const struct intel_dvo_dev_ops ch7017_ops;
28691
28692 #endif /* _INTEL_DVO_H */
28693 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ivch.c
28694 --- linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ivch.c 2011-03-27 14:31:47.000000000 -0400
28695 +++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ivch.c 2011-04-17 15:56:46.000000000 -0400
28696 @@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dv
28697 }
28698 }
28699
28700 -struct intel_dvo_dev_ops ivch_ops= {
28701 +const struct intel_dvo_dev_ops ivch_ops= {
28702 .init = ivch_init,
28703 .dpms = ivch_dpms,
28704 .save = ivch_save,
28705 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.32.42/drivers/gpu/drm/i915/dvo_sil164.c
28706 --- linux-2.6.32.42/drivers/gpu/drm/i915/dvo_sil164.c 2011-03-27 14:31:47.000000000 -0400
28707 +++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo_sil164.c 2011-04-17 15:56:46.000000000 -0400
28708 @@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
28709 }
28710 }
28711
28712 -struct intel_dvo_dev_ops sil164_ops = {
28713 +const struct intel_dvo_dev_ops sil164_ops = {
28714 .init = sil164_init,
28715 .detect = sil164_detect,
28716 .mode_valid = sil164_mode_valid,
28717 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.32.42/drivers/gpu/drm/i915/dvo_tfp410.c
28718 --- linux-2.6.32.42/drivers/gpu/drm/i915/dvo_tfp410.c 2011-03-27 14:31:47.000000000 -0400
28719 +++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo_tfp410.c 2011-04-17 15:56:46.000000000 -0400
28720 @@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_
28721 }
28722 }
28723
28724 -struct intel_dvo_dev_ops tfp410_ops = {
28725 +const struct intel_dvo_dev_ops tfp410_ops = {
28726 .init = tfp410_init,
28727 .detect = tfp410_detect,
28728 .mode_valid = tfp410_mode_valid,
28729 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.32.42/drivers/gpu/drm/i915/i915_debugfs.c
28730 --- linux-2.6.32.42/drivers/gpu/drm/i915/i915_debugfs.c 2011-03-27 14:31:47.000000000 -0400
28731 +++ linux-2.6.32.42/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-04 17:56:28.000000000 -0400
28732 @@ -192,7 +192,7 @@ static int i915_interrupt_info(struct se
28733 I915_READ(GTIMR));
28734 }
28735 seq_printf(m, "Interrupts received: %d\n",
28736 - atomic_read(&dev_priv->irq_received));
28737 + atomic_read_unchecked(&dev_priv->irq_received));
28738 if (dev_priv->hw_status_page != NULL) {
28739 seq_printf(m, "Current sequence: %d\n",
28740 i915_get_gem_seqno(dev));
28741 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.c linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.c
28742 --- linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.c 2011-03-27 14:31:47.000000000 -0400
28743 +++ linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.c 2011-04-17 15:56:46.000000000 -0400
28744 @@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
28745 return i915_resume(dev);
28746 }
28747
28748 -static struct vm_operations_struct i915_gem_vm_ops = {
28749 +static const struct vm_operations_struct i915_gem_vm_ops = {
28750 .fault = i915_gem_fault,
28751 .open = drm_gem_vm_open,
28752 .close = drm_gem_vm_close,
28753 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.h linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.h
28754 --- linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.h 2011-03-27 14:31:47.000000000 -0400
28755 +++ linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.h 2011-05-04 17:56:28.000000000 -0400
28756 @@ -197,7 +197,7 @@ typedef struct drm_i915_private {
28757 int page_flipping;
28758
28759 wait_queue_head_t irq_queue;
28760 - atomic_t irq_received;
28761 + atomic_unchecked_t irq_received;
28762 /** Protects user_irq_refcount and irq_mask_reg */
28763 spinlock_t user_irq_lock;
28764 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
28765 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/i915_gem.c linux-2.6.32.42/drivers/gpu/drm/i915/i915_gem.c
28766 --- linux-2.6.32.42/drivers/gpu/drm/i915/i915_gem.c 2011-03-27 14:31:47.000000000 -0400
28767 +++ linux-2.6.32.42/drivers/gpu/drm/i915/i915_gem.c 2011-04-17 15:56:46.000000000 -0400
28768 @@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_d
28769
28770 args->aper_size = dev->gtt_total;
28771 args->aper_available_size = (args->aper_size -
28772 - atomic_read(&dev->pin_memory));
28773 + atomic_read_unchecked(&dev->pin_memory));
28774
28775 return 0;
28776 }
28777 @@ -492,6 +492,11 @@ i915_gem_pread_ioctl(struct drm_device *
28778 return -EINVAL;
28779 }
28780
28781 + if (!access_ok(VERIFY_WRITE, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
28782 + drm_gem_object_unreference(obj);
28783 + return -EFAULT;
28784 + }
28785 +
28786 if (i915_gem_object_needs_bit17_swizzle(obj)) {
28787 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
28788 } else {
28789 @@ -965,6 +970,11 @@ i915_gem_pwrite_ioctl(struct drm_device
28790 return -EINVAL;
28791 }
28792
28793 + if (!access_ok(VERIFY_READ, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
28794 + drm_gem_object_unreference(obj);
28795 + return -EFAULT;
28796 + }
28797 +
28798 /* We can only do the GTT pwrite on untiled buffers, as otherwise
28799 * it would end up going through the fenced access, and we'll get
28800 * different detiling behavior between reading and writing.
28801 @@ -2054,7 +2064,7 @@ i915_gem_object_unbind(struct drm_gem_ob
28802
28803 if (obj_priv->gtt_space) {
28804 atomic_dec(&dev->gtt_count);
28805 - atomic_sub(obj->size, &dev->gtt_memory);
28806 + atomic_sub_unchecked(obj->size, &dev->gtt_memory);
28807
28808 drm_mm_put_block(obj_priv->gtt_space);
28809 obj_priv->gtt_space = NULL;
28810 @@ -2697,7 +2707,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
28811 goto search_free;
28812 }
28813 atomic_inc(&dev->gtt_count);
28814 - atomic_add(obj->size, &dev->gtt_memory);
28815 + atomic_add_unchecked(obj->size, &dev->gtt_memory);
28816
28817 /* Assert that the object is not currently in any GPU domain. As it
28818 * wasn't in the GTT, there shouldn't be any way it could have been in
28819 @@ -3751,9 +3761,9 @@ i915_gem_execbuffer(struct drm_device *d
28820 "%d/%d gtt bytes\n",
28821 atomic_read(&dev->object_count),
28822 atomic_read(&dev->pin_count),
28823 - atomic_read(&dev->object_memory),
28824 - atomic_read(&dev->pin_memory),
28825 - atomic_read(&dev->gtt_memory),
28826 + atomic_read_unchecked(&dev->object_memory),
28827 + atomic_read_unchecked(&dev->pin_memory),
28828 + atomic_read_unchecked(&dev->gtt_memory),
28829 dev->gtt_total);
28830 }
28831 goto err;
28832 @@ -3985,7 +3995,7 @@ i915_gem_object_pin(struct drm_gem_objec
28833 */
28834 if (obj_priv->pin_count == 1) {
28835 atomic_inc(&dev->pin_count);
28836 - atomic_add(obj->size, &dev->pin_memory);
28837 + atomic_add_unchecked(obj->size, &dev->pin_memory);
28838 if (!obj_priv->active &&
28839 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
28840 !list_empty(&obj_priv->list))
28841 @@ -4018,7 +4028,7 @@ i915_gem_object_unpin(struct drm_gem_obj
28842 list_move_tail(&obj_priv->list,
28843 &dev_priv->mm.inactive_list);
28844 atomic_dec(&dev->pin_count);
28845 - atomic_sub(obj->size, &dev->pin_memory);
28846 + atomic_sub_unchecked(obj->size, &dev->pin_memory);
28847 }
28848 i915_verify_inactive(dev, __FILE__, __LINE__);
28849 }
28850 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/i915_irq.c linux-2.6.32.42/drivers/gpu/drm/i915/i915_irq.c
28851 --- linux-2.6.32.42/drivers/gpu/drm/i915/i915_irq.c 2011-03-27 14:31:47.000000000 -0400
28852 +++ linux-2.6.32.42/drivers/gpu/drm/i915/i915_irq.c 2011-05-04 17:56:28.000000000 -0400
28853 @@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
28854 int irq_received;
28855 int ret = IRQ_NONE;
28856
28857 - atomic_inc(&dev_priv->irq_received);
28858 + atomic_inc_unchecked(&dev_priv->irq_received);
28859
28860 if (IS_IGDNG(dev))
28861 return igdng_irq_handler(dev);
28862 @@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct d
28863 {
28864 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28865
28866 - atomic_set(&dev_priv->irq_received, 0);
28867 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28868
28869 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28870 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28871 diff -urNp linux-2.6.32.42/drivers/gpu/drm/mga/mga_drv.h linux-2.6.32.42/drivers/gpu/drm/mga/mga_drv.h
28872 --- linux-2.6.32.42/drivers/gpu/drm/mga/mga_drv.h 2011-03-27 14:31:47.000000000 -0400
28873 +++ linux-2.6.32.42/drivers/gpu/drm/mga/mga_drv.h 2011-05-04 17:56:28.000000000 -0400
28874 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28875 u32 clear_cmd;
28876 u32 maccess;
28877
28878 - atomic_t vbl_received; /**< Number of vblanks received. */
28879 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28880 wait_queue_head_t fence_queue;
28881 - atomic_t last_fence_retired;
28882 + atomic_unchecked_t last_fence_retired;
28883 u32 next_fence_to_post;
28884
28885 unsigned int fb_cpp;
28886 diff -urNp linux-2.6.32.42/drivers/gpu/drm/mga/mga_irq.c linux-2.6.32.42/drivers/gpu/drm/mga/mga_irq.c
28887 --- linux-2.6.32.42/drivers/gpu/drm/mga/mga_irq.c 2011-03-27 14:31:47.000000000 -0400
28888 +++ linux-2.6.32.42/drivers/gpu/drm/mga/mga_irq.c 2011-05-04 17:56:28.000000000 -0400
28889 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
28890 if (crtc != 0)
28891 return 0;
28892
28893 - return atomic_read(&dev_priv->vbl_received);
28894 + return atomic_read_unchecked(&dev_priv->vbl_received);
28895 }
28896
28897
28898 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
28899 /* VBLANK interrupt */
28900 if (status & MGA_VLINEPEN) {
28901 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28902 - atomic_inc(&dev_priv->vbl_received);
28903 + atomic_inc_unchecked(&dev_priv->vbl_received);
28904 drm_handle_vblank(dev, 0);
28905 handled = 1;
28906 }
28907 @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
28908 MGA_WRITE(MGA_PRIMEND, prim_end);
28909 }
28910
28911 - atomic_inc(&dev_priv->last_fence_retired);
28912 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
28913 DRM_WAKEUP(&dev_priv->fence_queue);
28914 handled = 1;
28915 }
28916 @@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_dev
28917 * using fences.
28918 */
28919 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28920 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28921 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28922 - *sequence) <= (1 << 23)));
28923
28924 *sequence = cur_fence;
28925 diff -urNp linux-2.6.32.42/drivers/gpu/drm/r128/r128_cce.c linux-2.6.32.42/drivers/gpu/drm/r128/r128_cce.c
28926 --- linux-2.6.32.42/drivers/gpu/drm/r128/r128_cce.c 2011-03-27 14:31:47.000000000 -0400
28927 +++ linux-2.6.32.42/drivers/gpu/drm/r128/r128_cce.c 2011-05-04 17:56:28.000000000 -0400
28928 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
28929
28930 /* GH: Simple idle check.
28931 */
28932 - atomic_set(&dev_priv->idle_count, 0);
28933 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28934
28935 /* We don't support anything other than bus-mastering ring mode,
28936 * but the ring can be in either AGP or PCI space for the ring
28937 diff -urNp linux-2.6.32.42/drivers/gpu/drm/r128/r128_drv.h linux-2.6.32.42/drivers/gpu/drm/r128/r128_drv.h
28938 --- linux-2.6.32.42/drivers/gpu/drm/r128/r128_drv.h 2011-03-27 14:31:47.000000000 -0400
28939 +++ linux-2.6.32.42/drivers/gpu/drm/r128/r128_drv.h 2011-05-04 17:56:28.000000000 -0400
28940 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28941 int is_pci;
28942 unsigned long cce_buffers_offset;
28943
28944 - atomic_t idle_count;
28945 + atomic_unchecked_t idle_count;
28946
28947 int page_flipping;
28948 int current_page;
28949 u32 crtc_offset;
28950 u32 crtc_offset_cntl;
28951
28952 - atomic_t vbl_received;
28953 + atomic_unchecked_t vbl_received;
28954
28955 u32 color_fmt;
28956 unsigned int front_offset;
28957 diff -urNp linux-2.6.32.42/drivers/gpu/drm/r128/r128_irq.c linux-2.6.32.42/drivers/gpu/drm/r128/r128_irq.c
28958 --- linux-2.6.32.42/drivers/gpu/drm/r128/r128_irq.c 2011-03-27 14:31:47.000000000 -0400
28959 +++ linux-2.6.32.42/drivers/gpu/drm/r128/r128_irq.c 2011-05-04 17:56:28.000000000 -0400
28960 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
28961 if (crtc != 0)
28962 return 0;
28963
28964 - return atomic_read(&dev_priv->vbl_received);
28965 + return atomic_read_unchecked(&dev_priv->vbl_received);
28966 }
28967
28968 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28969 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
28970 /* VBLANK interrupt */
28971 if (status & R128_CRTC_VBLANK_INT) {
28972 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28973 - atomic_inc(&dev_priv->vbl_received);
28974 + atomic_inc_unchecked(&dev_priv->vbl_received);
28975 drm_handle_vblank(dev, 0);
28976 return IRQ_HANDLED;
28977 }
28978 diff -urNp linux-2.6.32.42/drivers/gpu/drm/r128/r128_state.c linux-2.6.32.42/drivers/gpu/drm/r128/r128_state.c
28979 --- linux-2.6.32.42/drivers/gpu/drm/r128/r128_state.c 2011-03-27 14:31:47.000000000 -0400
28980 +++ linux-2.6.32.42/drivers/gpu/drm/r128/r128_state.c 2011-05-04 17:56:28.000000000 -0400
28981 @@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_priv
28982
28983 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
28984 {
28985 - if (atomic_read(&dev_priv->idle_count) == 0) {
28986 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
28987 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28988 } else {
28989 - atomic_set(&dev_priv->idle_count, 0);
28990 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28991 }
28992 }
28993
28994 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/atom.c linux-2.6.32.42/drivers/gpu/drm/radeon/atom.c
28995 --- linux-2.6.32.42/drivers/gpu/drm/radeon/atom.c 2011-05-10 22:12:01.000000000 -0400
28996 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/atom.c 2011-05-16 21:46:57.000000000 -0400
28997 @@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct c
28998 char name[512];
28999 int i;
29000
29001 + pax_track_stack();
29002 +
29003 ctx->card = card;
29004 ctx->bios = bios;
29005
29006 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.32.42/drivers/gpu/drm/radeon/mkregtable.c
29007 --- linux-2.6.32.42/drivers/gpu/drm/radeon/mkregtable.c 2011-03-27 14:31:47.000000000 -0400
29008 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/mkregtable.c 2011-04-17 15:56:46.000000000 -0400
29009 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
29010 regex_t mask_rex;
29011 regmatch_t match[4];
29012 char buf[1024];
29013 - size_t end;
29014 + long end;
29015 int len;
29016 int done = 0;
29017 int r;
29018 unsigned o;
29019 struct offset *offset;
29020 char last_reg_s[10];
29021 - int last_reg;
29022 + unsigned long last_reg;
29023
29024 if (regcomp
29025 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
29026 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_atombios.c
29027 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_atombios.c 2011-03-27 14:31:47.000000000 -0400
29028 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-16 21:46:57.000000000 -0400
29029 @@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from
29030 bool linkb;
29031 struct radeon_i2c_bus_rec ddc_bus;
29032
29033 + pax_track_stack();
29034 +
29035 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29036
29037 if (data_offset == 0)
29038 @@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_o
29039 }
29040 }
29041
29042 -struct bios_connector {
29043 +static struct bios_connector {
29044 bool valid;
29045 uint16_t line_mux;
29046 uint16_t devices;
29047 int connector_type;
29048 struct radeon_i2c_bus_rec ddc_bus;
29049 -};
29050 +} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29051
29052 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
29053 drm_device
29054 @@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from
29055 uint8_t dac;
29056 union atom_supported_devices *supported_devices;
29057 int i, j;
29058 - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29059
29060 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29061
29062 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_display.c
29063 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_display.c 2011-03-27 14:31:47.000000000 -0400
29064 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_display.c 2011-04-17 15:56:46.000000000 -0400
29065 @@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pl
29066
29067 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
29068 error = freq - current_freq;
29069 - error = error < 0 ? 0xffffffff : error;
29070 + error = (int32_t)error < 0 ? 0xffffffff : error;
29071 } else
29072 error = abs(current_freq - freq);
29073 vco_diff = abs(vco - best_vco);
29074 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_drv.h
29075 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_drv.h 2011-03-27 14:31:47.000000000 -0400
29076 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-04 17:56:28.000000000 -0400
29077 @@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
29078
29079 /* SW interrupt */
29080 wait_queue_head_t swi_queue;
29081 - atomic_t swi_emitted;
29082 + atomic_unchecked_t swi_emitted;
29083 int vblank_crtc;
29084 uint32_t irq_enable_reg;
29085 uint32_t r500_disp_irq_reg;
29086 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_fence.c
29087 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_fence.c 2011-03-27 14:31:47.000000000 -0400
29088 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-04 17:56:28.000000000 -0400
29089 @@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_devi
29090 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
29091 return 0;
29092 }
29093 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
29094 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
29095 if (!rdev->cp.ready) {
29096 /* FIXME: cp is not running assume everythings is done right
29097 * away
29098 @@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct rade
29099 return r;
29100 }
29101 WREG32(rdev->fence_drv.scratch_reg, 0);
29102 - atomic_set(&rdev->fence_drv.seq, 0);
29103 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
29104 INIT_LIST_HEAD(&rdev->fence_drv.created);
29105 INIT_LIST_HEAD(&rdev->fence_drv.emited);
29106 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
29107 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon.h linux-2.6.32.42/drivers/gpu/drm/radeon/radeon.h
29108 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon.h 2011-03-27 14:31:47.000000000 -0400
29109 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon.h 2011-05-04 17:56:28.000000000 -0400
29110 @@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device
29111 */
29112 struct radeon_fence_driver {
29113 uint32_t scratch_reg;
29114 - atomic_t seq;
29115 + atomic_unchecked_t seq;
29116 uint32_t last_seq;
29117 unsigned long count_timeout;
29118 wait_queue_head_t queue;
29119 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ioc32.c
29120 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-03-27 14:31:47.000000000 -0400
29121 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-04-23 13:57:24.000000000 -0400
29122 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(str
29123 request = compat_alloc_user_space(sizeof(*request));
29124 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
29125 || __put_user(req32.param, &request->param)
29126 - || __put_user((void __user *)(unsigned long)req32.value,
29127 + || __put_user((unsigned long)req32.value,
29128 &request->value))
29129 return -EFAULT;
29130
29131 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_irq.c
29132 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_irq.c 2011-03-27 14:31:47.000000000 -0400
29133 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-04 17:56:28.000000000 -0400
29134 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
29135 unsigned int ret;
29136 RING_LOCALS;
29137
29138 - atomic_inc(&dev_priv->swi_emitted);
29139 - ret = atomic_read(&dev_priv->swi_emitted);
29140 + atomic_inc_unchecked(&dev_priv->swi_emitted);
29141 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
29142
29143 BEGIN_RING(4);
29144 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
29145 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
29146 drm_radeon_private_t *dev_priv =
29147 (drm_radeon_private_t *) dev->dev_private;
29148
29149 - atomic_set(&dev_priv->swi_emitted, 0);
29150 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
29151 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
29152
29153 dev->max_vblank_count = 0x001fffff;
29154 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_state.c
29155 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_state.c 2011-03-27 14:31:47.000000000 -0400
29156 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_state.c 2011-04-17 15:56:46.000000000 -0400
29157 @@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm
29158 {
29159 drm_radeon_private_t *dev_priv = dev->dev_private;
29160 drm_radeon_getparam_t *param = data;
29161 - int value;
29162 + int value = 0;
29163
29164 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
29165
29166 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ttm.c
29167 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ttm.c 2011-03-27 14:31:47.000000000 -0400
29168 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ttm.c 2011-04-17 15:56:46.000000000 -0400
29169 @@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_devic
29170 DRM_INFO("radeon: ttm finalized\n");
29171 }
29172
29173 -static struct vm_operations_struct radeon_ttm_vm_ops;
29174 -static const struct vm_operations_struct *ttm_vm_ops = NULL;
29175 -
29176 -static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
29177 -{
29178 - struct ttm_buffer_object *bo;
29179 - int r;
29180 -
29181 - bo = (struct ttm_buffer_object *)vma->vm_private_data;
29182 - if (bo == NULL) {
29183 - return VM_FAULT_NOPAGE;
29184 - }
29185 - r = ttm_vm_ops->fault(vma, vmf);
29186 - return r;
29187 -}
29188 -
29189 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
29190 {
29191 struct drm_file *file_priv;
29192 struct radeon_device *rdev;
29193 - int r;
29194
29195 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
29196 return drm_mmap(filp, vma);
29197 @@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struc
29198
29199 file_priv = (struct drm_file *)filp->private_data;
29200 rdev = file_priv->minor->dev->dev_private;
29201 - if (rdev == NULL) {
29202 + if (!rdev)
29203 return -EINVAL;
29204 - }
29205 - r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
29206 - if (unlikely(r != 0)) {
29207 - return r;
29208 - }
29209 - if (unlikely(ttm_vm_ops == NULL)) {
29210 - ttm_vm_ops = vma->vm_ops;
29211 - radeon_ttm_vm_ops = *ttm_vm_ops;
29212 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
29213 - }
29214 - vma->vm_ops = &radeon_ttm_vm_ops;
29215 - return 0;
29216 + return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
29217 }
29218
29219
29220 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/rs690.c linux-2.6.32.42/drivers/gpu/drm/radeon/rs690.c
29221 --- linux-2.6.32.42/drivers/gpu/drm/radeon/rs690.c 2011-03-27 14:31:47.000000000 -0400
29222 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/rs690.c 2011-04-17 15:56:46.000000000 -0400
29223 @@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct
29224 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
29225 rdev->pm.sideport_bandwidth.full)
29226 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
29227 - read_delay_latency.full = rfixed_const(370 * 800 * 1000);
29228 + read_delay_latency.full = rfixed_const(800 * 1000);
29229 read_delay_latency.full = rfixed_div(read_delay_latency,
29230 rdev->pm.igp_sideport_mclk);
29231 + a.full = rfixed_const(370);
29232 + read_delay_latency.full = rfixed_mul(read_delay_latency, a);
29233 } else {
29234 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
29235 rdev->pm.k8_bandwidth.full)
29236 diff -urNp linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo.c
29237 --- linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo.c 2011-03-27 14:31:47.000000000 -0400
29238 +++ linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo.c 2011-04-23 12:56:11.000000000 -0400
29239 @@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_a
29240 NULL
29241 };
29242
29243 -static struct sysfs_ops ttm_bo_global_ops = {
29244 +static const struct sysfs_ops ttm_bo_global_ops = {
29245 .show = &ttm_bo_global_show
29246 };
29247
29248 diff -urNp linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo_vm.c
29249 --- linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-03-27 14:31:47.000000000 -0400
29250 +++ linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-04-17 15:56:46.000000000 -0400
29251 @@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
29252 {
29253 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
29254 vma->vm_private_data;
29255 - struct ttm_bo_device *bdev = bo->bdev;
29256 + struct ttm_bo_device *bdev;
29257 unsigned long bus_base;
29258 unsigned long bus_offset;
29259 unsigned long bus_size;
29260 @@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
29261 unsigned long address = (unsigned long)vmf->virtual_address;
29262 int retval = VM_FAULT_NOPAGE;
29263
29264 + if (!bo)
29265 + return VM_FAULT_NOPAGE;
29266 + bdev = bo->bdev;
29267 +
29268 /*
29269 * Work around locking order reversal in fault / nopfn
29270 * between mmap_sem and bo_reserve: Perform a trylock operation
29271 diff -urNp linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_global.c
29272 --- linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_global.c 2011-03-27 14:31:47.000000000 -0400
29273 +++ linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_global.c 2011-04-17 15:56:46.000000000 -0400
29274 @@ -36,7 +36,7 @@
29275 struct ttm_global_item {
29276 struct mutex mutex;
29277 void *object;
29278 - int refcount;
29279 + atomic_t refcount;
29280 };
29281
29282 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
29283 @@ -49,7 +49,7 @@ void ttm_global_init(void)
29284 struct ttm_global_item *item = &glob[i];
29285 mutex_init(&item->mutex);
29286 item->object = NULL;
29287 - item->refcount = 0;
29288 + atomic_set(&item->refcount, 0);
29289 }
29290 }
29291
29292 @@ -59,7 +59,7 @@ void ttm_global_release(void)
29293 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
29294 struct ttm_global_item *item = &glob[i];
29295 BUG_ON(item->object != NULL);
29296 - BUG_ON(item->refcount != 0);
29297 + BUG_ON(atomic_read(&item->refcount) != 0);
29298 }
29299 }
29300
29301 @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
29302 void *object;
29303
29304 mutex_lock(&item->mutex);
29305 - if (item->refcount == 0) {
29306 + if (atomic_read(&item->refcount) == 0) {
29307 item->object = kzalloc(ref->size, GFP_KERNEL);
29308 if (unlikely(item->object == NULL)) {
29309 ret = -ENOMEM;
29310 @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
29311 goto out_err;
29312
29313 }
29314 - ++item->refcount;
29315 + atomic_inc(&item->refcount);
29316 ref->object = item->object;
29317 object = item->object;
29318 mutex_unlock(&item->mutex);
29319 @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
29320 struct ttm_global_item *item = &glob[ref->global_type];
29321
29322 mutex_lock(&item->mutex);
29323 - BUG_ON(item->refcount == 0);
29324 + BUG_ON(atomic_read(&item->refcount) == 0);
29325 BUG_ON(ref->object != item->object);
29326 - if (--item->refcount == 0) {
29327 + if (atomic_dec_and_test(&item->refcount)) {
29328 ref->release(ref);
29329 item->object = NULL;
29330 }
29331 diff -urNp linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_memory.c linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_memory.c
29332 --- linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_memory.c 2011-03-27 14:31:47.000000000 -0400
29333 +++ linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_memory.c 2011-04-17 15:56:46.000000000 -0400
29334 @@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_at
29335 NULL
29336 };
29337
29338 -static struct sysfs_ops ttm_mem_zone_ops = {
29339 +static const struct sysfs_ops ttm_mem_zone_ops = {
29340 .show = &ttm_mem_zone_show,
29341 .store = &ttm_mem_zone_store
29342 };
29343 diff -urNp linux-2.6.32.42/drivers/gpu/drm/via/via_drv.h linux-2.6.32.42/drivers/gpu/drm/via/via_drv.h
29344 --- linux-2.6.32.42/drivers/gpu/drm/via/via_drv.h 2011-03-27 14:31:47.000000000 -0400
29345 +++ linux-2.6.32.42/drivers/gpu/drm/via/via_drv.h 2011-05-04 17:56:28.000000000 -0400
29346 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
29347 typedef uint32_t maskarray_t[5];
29348
29349 typedef struct drm_via_irq {
29350 - atomic_t irq_received;
29351 + atomic_unchecked_t irq_received;
29352 uint32_t pending_mask;
29353 uint32_t enable_mask;
29354 wait_queue_head_t irq_queue;
29355 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
29356 struct timeval last_vblank;
29357 int last_vblank_valid;
29358 unsigned usec_per_vblank;
29359 - atomic_t vbl_received;
29360 + atomic_unchecked_t vbl_received;
29361 drm_via_state_t hc_state;
29362 char pci_buf[VIA_PCI_BUF_SIZE];
29363 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
29364 diff -urNp linux-2.6.32.42/drivers/gpu/drm/via/via_irq.c linux-2.6.32.42/drivers/gpu/drm/via/via_irq.c
29365 --- linux-2.6.32.42/drivers/gpu/drm/via/via_irq.c 2011-03-27 14:31:47.000000000 -0400
29366 +++ linux-2.6.32.42/drivers/gpu/drm/via/via_irq.c 2011-05-04 17:56:28.000000000 -0400
29367 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
29368 if (crtc != 0)
29369 return 0;
29370
29371 - return atomic_read(&dev_priv->vbl_received);
29372 + return atomic_read_unchecked(&dev_priv->vbl_received);
29373 }
29374
29375 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29376 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
29377
29378 status = VIA_READ(VIA_REG_INTERRUPT);
29379 if (status & VIA_IRQ_VBLANK_PENDING) {
29380 - atomic_inc(&dev_priv->vbl_received);
29381 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
29382 + atomic_inc_unchecked(&dev_priv->vbl_received);
29383 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
29384 do_gettimeofday(&cur_vblank);
29385 if (dev_priv->last_vblank_valid) {
29386 dev_priv->usec_per_vblank =
29387 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
29388 dev_priv->last_vblank = cur_vblank;
29389 dev_priv->last_vblank_valid = 1;
29390 }
29391 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
29392 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
29393 DRM_DEBUG("US per vblank is: %u\n",
29394 dev_priv->usec_per_vblank);
29395 }
29396 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
29397
29398 for (i = 0; i < dev_priv->num_irqs; ++i) {
29399 if (status & cur_irq->pending_mask) {
29400 - atomic_inc(&cur_irq->irq_received);
29401 + atomic_inc_unchecked(&cur_irq->irq_received);
29402 DRM_WAKEUP(&cur_irq->irq_queue);
29403 handled = 1;
29404 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
29405 @@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device *
29406 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29407 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
29408 masks[irq][4]));
29409 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
29410 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
29411 } else {
29412 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29413 (((cur_irq_sequence =
29414 - atomic_read(&cur_irq->irq_received)) -
29415 + atomic_read_unchecked(&cur_irq->irq_received)) -
29416 *sequence) <= (1 << 23)));
29417 }
29418 *sequence = cur_irq_sequence;
29419 @@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct dr
29420 }
29421
29422 for (i = 0; i < dev_priv->num_irqs; ++i) {
29423 - atomic_set(&cur_irq->irq_received, 0);
29424 + atomic_set_unchecked(&cur_irq->irq_received, 0);
29425 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
29426 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
29427 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
29428 @@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev,
29429 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
29430 case VIA_IRQ_RELATIVE:
29431 irqwait->request.sequence +=
29432 - atomic_read(&cur_irq->irq_received);
29433 + atomic_read_unchecked(&cur_irq->irq_received);
29434 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
29435 case VIA_IRQ_ABSOLUTE:
29436 break;
29437 diff -urNp linux-2.6.32.42/drivers/hid/hid-core.c linux-2.6.32.42/drivers/hid/hid-core.c
29438 --- linux-2.6.32.42/drivers/hid/hid-core.c 2011-05-10 22:12:01.000000000 -0400
29439 +++ linux-2.6.32.42/drivers/hid/hid-core.c 2011-05-10 22:12:32.000000000 -0400
29440 @@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device
29441
29442 int hid_add_device(struct hid_device *hdev)
29443 {
29444 - static atomic_t id = ATOMIC_INIT(0);
29445 + static atomic_unchecked_t id = ATOMIC_INIT(0);
29446 int ret;
29447
29448 if (WARN_ON(hdev->status & HID_STAT_ADDED))
29449 @@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hd
29450 /* XXX hack, any other cleaner solution after the driver core
29451 * is converted to allow more than 20 bytes as the device name? */
29452 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
29453 - hdev->vendor, hdev->product, atomic_inc_return(&id));
29454 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
29455
29456 ret = device_add(&hdev->dev);
29457 if (!ret)
29458 diff -urNp linux-2.6.32.42/drivers/hid/usbhid/hiddev.c linux-2.6.32.42/drivers/hid/usbhid/hiddev.c
29459 --- linux-2.6.32.42/drivers/hid/usbhid/hiddev.c 2011-03-27 14:31:47.000000000 -0400
29460 +++ linux-2.6.32.42/drivers/hid/usbhid/hiddev.c 2011-04-17 15:56:46.000000000 -0400
29461 @@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *fi
29462 return put_user(HID_VERSION, (int __user *)arg);
29463
29464 case HIDIOCAPPLICATION:
29465 - if (arg < 0 || arg >= hid->maxapplication)
29466 + if (arg >= hid->maxapplication)
29467 return -EINVAL;
29468
29469 for (i = 0; i < hid->maxcollection; i++)
29470 diff -urNp linux-2.6.32.42/drivers/hwmon/lis3lv02d.c linux-2.6.32.42/drivers/hwmon/lis3lv02d.c
29471 --- linux-2.6.32.42/drivers/hwmon/lis3lv02d.c 2011-03-27 14:31:47.000000000 -0400
29472 +++ linux-2.6.32.42/drivers/hwmon/lis3lv02d.c 2011-05-04 17:56:28.000000000 -0400
29473 @@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(in
29474 * the lid is closed. This leads to interrupts as soon as a little move
29475 * is done.
29476 */
29477 - atomic_inc(&lis3_dev.count);
29478 + atomic_inc_unchecked(&lis3_dev.count);
29479
29480 wake_up_interruptible(&lis3_dev.misc_wait);
29481 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
29482 @@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct in
29483 if (test_and_set_bit(0, &lis3_dev.misc_opened))
29484 return -EBUSY; /* already open */
29485
29486 - atomic_set(&lis3_dev.count, 0);
29487 + atomic_set_unchecked(&lis3_dev.count, 0);
29488
29489 /*
29490 * The sensor can generate interrupts for free-fall and direction
29491 @@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struc
29492 add_wait_queue(&lis3_dev.misc_wait, &wait);
29493 while (true) {
29494 set_current_state(TASK_INTERRUPTIBLE);
29495 - data = atomic_xchg(&lis3_dev.count, 0);
29496 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
29497 if (data)
29498 break;
29499
29500 @@ -244,7 +244,7 @@ out:
29501 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
29502 {
29503 poll_wait(file, &lis3_dev.misc_wait, wait);
29504 - if (atomic_read(&lis3_dev.count))
29505 + if (atomic_read_unchecked(&lis3_dev.count))
29506 return POLLIN | POLLRDNORM;
29507 return 0;
29508 }
29509 diff -urNp linux-2.6.32.42/drivers/hwmon/lis3lv02d.h linux-2.6.32.42/drivers/hwmon/lis3lv02d.h
29510 --- linux-2.6.32.42/drivers/hwmon/lis3lv02d.h 2011-03-27 14:31:47.000000000 -0400
29511 +++ linux-2.6.32.42/drivers/hwmon/lis3lv02d.h 2011-05-04 17:56:28.000000000 -0400
29512 @@ -201,7 +201,7 @@ struct lis3lv02d {
29513
29514 struct input_polled_dev *idev; /* input device */
29515 struct platform_device *pdev; /* platform device */
29516 - atomic_t count; /* interrupt count after last read */
29517 + atomic_unchecked_t count; /* interrupt count after last read */
29518 int xcalib; /* calibrated null value for x */
29519 int ycalib; /* calibrated null value for y */
29520 int zcalib; /* calibrated null value for z */
29521 diff -urNp linux-2.6.32.42/drivers/hwmon/sht15.c linux-2.6.32.42/drivers/hwmon/sht15.c
29522 --- linux-2.6.32.42/drivers/hwmon/sht15.c 2011-03-27 14:31:47.000000000 -0400
29523 +++ linux-2.6.32.42/drivers/hwmon/sht15.c 2011-05-04 17:56:28.000000000 -0400
29524 @@ -112,7 +112,7 @@ struct sht15_data {
29525 int supply_uV;
29526 int supply_uV_valid;
29527 struct work_struct update_supply_work;
29528 - atomic_t interrupt_handled;
29529 + atomic_unchecked_t interrupt_handled;
29530 };
29531
29532 /**
29533 @@ -245,13 +245,13 @@ static inline int sht15_update_single_va
29534 return ret;
29535
29536 gpio_direction_input(data->pdata->gpio_data);
29537 - atomic_set(&data->interrupt_handled, 0);
29538 + atomic_set_unchecked(&data->interrupt_handled, 0);
29539
29540 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29541 if (gpio_get_value(data->pdata->gpio_data) == 0) {
29542 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
29543 /* Only relevant if the interrupt hasn't occured. */
29544 - if (!atomic_read(&data->interrupt_handled))
29545 + if (!atomic_read_unchecked(&data->interrupt_handled))
29546 schedule_work(&data->read_work);
29547 }
29548 ret = wait_event_timeout(data->wait_queue,
29549 @@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired
29550 struct sht15_data *data = d;
29551 /* First disable the interrupt */
29552 disable_irq_nosync(irq);
29553 - atomic_inc(&data->interrupt_handled);
29554 + atomic_inc_unchecked(&data->interrupt_handled);
29555 /* Then schedule a reading work struct */
29556 if (data->flag != SHT15_READING_NOTHING)
29557 schedule_work(&data->read_work);
29558 @@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct wo
29559 here as could have gone low in meantime so verify
29560 it hasn't!
29561 */
29562 - atomic_set(&data->interrupt_handled, 0);
29563 + atomic_set_unchecked(&data->interrupt_handled, 0);
29564 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29565 /* If still not occured or another handler has been scheduled */
29566 if (gpio_get_value(data->pdata->gpio_data)
29567 - || atomic_read(&data->interrupt_handled))
29568 + || atomic_read_unchecked(&data->interrupt_handled))
29569 return;
29570 }
29571 /* Read the data back from the device */
29572 diff -urNp linux-2.6.32.42/drivers/hwmon/w83791d.c linux-2.6.32.42/drivers/hwmon/w83791d.c
29573 --- linux-2.6.32.42/drivers/hwmon/w83791d.c 2011-03-27 14:31:47.000000000 -0400
29574 +++ linux-2.6.32.42/drivers/hwmon/w83791d.c 2011-04-17 15:56:46.000000000 -0400
29575 @@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
29576 struct i2c_board_info *info);
29577 static int w83791d_remove(struct i2c_client *client);
29578
29579 -static int w83791d_read(struct i2c_client *client, u8 register);
29580 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
29581 +static int w83791d_read(struct i2c_client *client, u8 reg);
29582 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
29583 static struct w83791d_data *w83791d_update_device(struct device *dev);
29584
29585 #ifdef DEBUG
29586 diff -urNp linux-2.6.32.42/drivers/ide/ide-cd.c linux-2.6.32.42/drivers/ide/ide-cd.c
29587 --- linux-2.6.32.42/drivers/ide/ide-cd.c 2011-03-27 14:31:47.000000000 -0400
29588 +++ linux-2.6.32.42/drivers/ide/ide-cd.c 2011-04-17 15:56:46.000000000 -0400
29589 @@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_
29590 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
29591 if ((unsigned long)buf & alignment
29592 || blk_rq_bytes(rq) & q->dma_pad_mask
29593 - || object_is_on_stack(buf))
29594 + || object_starts_on_stack(buf))
29595 drive->dma = 0;
29596 }
29597 }
29598 diff -urNp linux-2.6.32.42/drivers/ide/ide-floppy.c linux-2.6.32.42/drivers/ide/ide-floppy.c
29599 --- linux-2.6.32.42/drivers/ide/ide-floppy.c 2011-03-27 14:31:47.000000000 -0400
29600 +++ linux-2.6.32.42/drivers/ide/ide-floppy.c 2011-05-16 21:46:57.000000000 -0400
29601 @@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_d
29602 u8 pc_buf[256], header_len, desc_cnt;
29603 int i, rc = 1, blocks, length;
29604
29605 + pax_track_stack();
29606 +
29607 ide_debug_log(IDE_DBG_FUNC, "enter");
29608
29609 drive->bios_cyl = 0;
29610 diff -urNp linux-2.6.32.42/drivers/ide/setup-pci.c linux-2.6.32.42/drivers/ide/setup-pci.c
29611 --- linux-2.6.32.42/drivers/ide/setup-pci.c 2011-03-27 14:31:47.000000000 -0400
29612 +++ linux-2.6.32.42/drivers/ide/setup-pci.c 2011-05-16 21:46:57.000000000 -0400
29613 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
29614 int ret, i, n_ports = dev2 ? 4 : 2;
29615 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
29616
29617 + pax_track_stack();
29618 +
29619 for (i = 0; i < n_ports / 2; i++) {
29620 ret = ide_setup_pci_controller(pdev[i], d, !i);
29621 if (ret < 0)
29622 diff -urNp linux-2.6.32.42/drivers/ieee1394/dv1394.c linux-2.6.32.42/drivers/ieee1394/dv1394.c
29623 --- linux-2.6.32.42/drivers/ieee1394/dv1394.c 2011-03-27 14:31:47.000000000 -0400
29624 +++ linux-2.6.32.42/drivers/ieee1394/dv1394.c 2011-04-23 12:56:11.000000000 -0400
29625 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
29626 based upon DIF section and sequence
29627 */
29628
29629 -static void inline
29630 +static inline void
29631 frame_put_packet (struct frame *f, struct packet *p)
29632 {
29633 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
29634 diff -urNp linux-2.6.32.42/drivers/ieee1394/hosts.c linux-2.6.32.42/drivers/ieee1394/hosts.c
29635 --- linux-2.6.32.42/drivers/ieee1394/hosts.c 2011-03-27 14:31:47.000000000 -0400
29636 +++ linux-2.6.32.42/drivers/ieee1394/hosts.c 2011-04-17 15:56:46.000000000 -0400
29637 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
29638 }
29639
29640 static struct hpsb_host_driver dummy_driver = {
29641 + .name = "dummy",
29642 .transmit_packet = dummy_transmit_packet,
29643 .devctl = dummy_devctl,
29644 .isoctl = dummy_isoctl
29645 diff -urNp linux-2.6.32.42/drivers/ieee1394/init_ohci1394_dma.c linux-2.6.32.42/drivers/ieee1394/init_ohci1394_dma.c
29646 --- linux-2.6.32.42/drivers/ieee1394/init_ohci1394_dma.c 2011-03-27 14:31:47.000000000 -0400
29647 +++ linux-2.6.32.42/drivers/ieee1394/init_ohci1394_dma.c 2011-04-17 15:56:46.000000000 -0400
29648 @@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_con
29649 for (func = 0; func < 8; func++) {
29650 u32 class = read_pci_config(num,slot,func,
29651 PCI_CLASS_REVISION);
29652 - if ((class == 0xffffffff))
29653 + if (class == 0xffffffff)
29654 continue; /* No device at this func */
29655
29656 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
29657 diff -urNp linux-2.6.32.42/drivers/ieee1394/ohci1394.c linux-2.6.32.42/drivers/ieee1394/ohci1394.c
29658 --- linux-2.6.32.42/drivers/ieee1394/ohci1394.c 2011-03-27 14:31:47.000000000 -0400
29659 +++ linux-2.6.32.42/drivers/ieee1394/ohci1394.c 2011-04-23 12:56:11.000000000 -0400
29660 @@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
29661 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
29662
29663 /* Module Parameters */
29664 -static int phys_dma = 1;
29665 +static int phys_dma;
29666 module_param(phys_dma, int, 0444);
29667 -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
29668 +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
29669
29670 static void dma_trm_tasklet(unsigned long data);
29671 static void dma_trm_reset(struct dma_trm_ctx *d);
29672 diff -urNp linux-2.6.32.42/drivers/ieee1394/sbp2.c linux-2.6.32.42/drivers/ieee1394/sbp2.c
29673 --- linux-2.6.32.42/drivers/ieee1394/sbp2.c 2011-03-27 14:31:47.000000000 -0400
29674 +++ linux-2.6.32.42/drivers/ieee1394/sbp2.c 2011-04-23 12:56:11.000000000 -0400
29675 @@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
29676 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
29677 MODULE_LICENSE("GPL");
29678
29679 -static int sbp2_module_init(void)
29680 +static int __init sbp2_module_init(void)
29681 {
29682 int ret;
29683
29684 diff -urNp linux-2.6.32.42/drivers/infiniband/core/cm.c linux-2.6.32.42/drivers/infiniband/core/cm.c
29685 --- linux-2.6.32.42/drivers/infiniband/core/cm.c 2011-03-27 14:31:47.000000000 -0400
29686 +++ linux-2.6.32.42/drivers/infiniband/core/cm.c 2011-04-17 15:56:46.000000000 -0400
29687 @@ -112,7 +112,7 @@ static char const counter_group_names[CM
29688
29689 struct cm_counter_group {
29690 struct kobject obj;
29691 - atomic_long_t counter[CM_ATTR_COUNT];
29692 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29693 };
29694
29695 struct cm_counter_attribute {
29696 @@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm
29697 struct ib_mad_send_buf *msg = NULL;
29698 int ret;
29699
29700 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29701 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29702 counter[CM_REQ_COUNTER]);
29703
29704 /* Quick state check to discard duplicate REQs. */
29705 @@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm
29706 if (!cm_id_priv)
29707 return;
29708
29709 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29710 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29711 counter[CM_REP_COUNTER]);
29712 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
29713 if (ret)
29714 @@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work
29715 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
29716 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
29717 spin_unlock_irq(&cm_id_priv->lock);
29718 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29719 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29720 counter[CM_RTU_COUNTER]);
29721 goto out;
29722 }
29723 @@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_wor
29724 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
29725 dreq_msg->local_comm_id);
29726 if (!cm_id_priv) {
29727 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29728 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29729 counter[CM_DREQ_COUNTER]);
29730 cm_issue_drep(work->port, work->mad_recv_wc);
29731 return -EINVAL;
29732 @@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_wor
29733 case IB_CM_MRA_REP_RCVD:
29734 break;
29735 case IB_CM_TIMEWAIT:
29736 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29737 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29738 counter[CM_DREQ_COUNTER]);
29739 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29740 goto unlock;
29741 @@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_wor
29742 cm_free_msg(msg);
29743 goto deref;
29744 case IB_CM_DREQ_RCVD:
29745 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29746 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29747 counter[CM_DREQ_COUNTER]);
29748 goto unlock;
29749 default:
29750 @@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work
29751 ib_modify_mad(cm_id_priv->av.port->mad_agent,
29752 cm_id_priv->msg, timeout)) {
29753 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
29754 - atomic_long_inc(&work->port->
29755 + atomic_long_inc_unchecked(&work->port->
29756 counter_group[CM_RECV_DUPLICATES].
29757 counter[CM_MRA_COUNTER]);
29758 goto out;
29759 @@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work
29760 break;
29761 case IB_CM_MRA_REQ_RCVD:
29762 case IB_CM_MRA_REP_RCVD:
29763 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29764 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29765 counter[CM_MRA_COUNTER]);
29766 /* fall through */
29767 default:
29768 @@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work
29769 case IB_CM_LAP_IDLE:
29770 break;
29771 case IB_CM_MRA_LAP_SENT:
29772 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29773 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29774 counter[CM_LAP_COUNTER]);
29775 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29776 goto unlock;
29777 @@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work
29778 cm_free_msg(msg);
29779 goto deref;
29780 case IB_CM_LAP_RCVD:
29781 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29782 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29783 counter[CM_LAP_COUNTER]);
29784 goto unlock;
29785 default:
29786 @@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm
29787 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
29788 if (cur_cm_id_priv) {
29789 spin_unlock_irq(&cm.lock);
29790 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29791 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29792 counter[CM_SIDR_REQ_COUNTER]);
29793 goto out; /* Duplicate message. */
29794 }
29795 @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
29796 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
29797 msg->retries = 1;
29798
29799 - atomic_long_add(1 + msg->retries,
29800 + atomic_long_add_unchecked(1 + msg->retries,
29801 &port->counter_group[CM_XMIT].counter[attr_index]);
29802 if (msg->retries)
29803 - atomic_long_add(msg->retries,
29804 + atomic_long_add_unchecked(msg->retries,
29805 &port->counter_group[CM_XMIT_RETRIES].
29806 counter[attr_index]);
29807
29808 @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
29809 }
29810
29811 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
29812 - atomic_long_inc(&port->counter_group[CM_RECV].
29813 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
29814 counter[attr_id - CM_ATTR_ID_OFFSET]);
29815
29816 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
29817 @@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct ko
29818 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
29819
29820 return sprintf(buf, "%ld\n",
29821 - atomic_long_read(&group->counter[cm_attr->index]));
29822 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
29823 }
29824
29825 -static struct sysfs_ops cm_counter_ops = {
29826 +static const struct sysfs_ops cm_counter_ops = {
29827 .show = cm_show_counter
29828 };
29829
29830 diff -urNp linux-2.6.32.42/drivers/infiniband/core/fmr_pool.c linux-2.6.32.42/drivers/infiniband/core/fmr_pool.c
29831 --- linux-2.6.32.42/drivers/infiniband/core/fmr_pool.c 2011-03-27 14:31:47.000000000 -0400
29832 +++ linux-2.6.32.42/drivers/infiniband/core/fmr_pool.c 2011-05-04 17:56:28.000000000 -0400
29833 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
29834
29835 struct task_struct *thread;
29836
29837 - atomic_t req_ser;
29838 - atomic_t flush_ser;
29839 + atomic_unchecked_t req_ser;
29840 + atomic_unchecked_t flush_ser;
29841
29842 wait_queue_head_t force_wait;
29843 };
29844 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
29845 struct ib_fmr_pool *pool = pool_ptr;
29846
29847 do {
29848 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
29849 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
29850 ib_fmr_batch_release(pool);
29851
29852 - atomic_inc(&pool->flush_ser);
29853 + atomic_inc_unchecked(&pool->flush_ser);
29854 wake_up_interruptible(&pool->force_wait);
29855
29856 if (pool->flush_function)
29857 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
29858 }
29859
29860 set_current_state(TASK_INTERRUPTIBLE);
29861 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
29862 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
29863 !kthread_should_stop())
29864 schedule();
29865 __set_current_state(TASK_RUNNING);
29866 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
29867 pool->dirty_watermark = params->dirty_watermark;
29868 pool->dirty_len = 0;
29869 spin_lock_init(&pool->pool_lock);
29870 - atomic_set(&pool->req_ser, 0);
29871 - atomic_set(&pool->flush_ser, 0);
29872 + atomic_set_unchecked(&pool->req_ser, 0);
29873 + atomic_set_unchecked(&pool->flush_ser, 0);
29874 init_waitqueue_head(&pool->force_wait);
29875
29876 pool->thread = kthread_run(ib_fmr_cleanup_thread,
29877 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
29878 }
29879 spin_unlock_irq(&pool->pool_lock);
29880
29881 - serial = atomic_inc_return(&pool->req_ser);
29882 + serial = atomic_inc_return_unchecked(&pool->req_ser);
29883 wake_up_process(pool->thread);
29884
29885 if (wait_event_interruptible(pool->force_wait,
29886 - atomic_read(&pool->flush_ser) - serial >= 0))
29887 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
29888 return -EINTR;
29889
29890 return 0;
29891 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
29892 } else {
29893 list_add_tail(&fmr->list, &pool->dirty_list);
29894 if (++pool->dirty_len >= pool->dirty_watermark) {
29895 - atomic_inc(&pool->req_ser);
29896 + atomic_inc_unchecked(&pool->req_ser);
29897 wake_up_process(pool->thread);
29898 }
29899 }
29900 diff -urNp linux-2.6.32.42/drivers/infiniband/core/sysfs.c linux-2.6.32.42/drivers/infiniband/core/sysfs.c
29901 --- linux-2.6.32.42/drivers/infiniband/core/sysfs.c 2011-03-27 14:31:47.000000000 -0400
29902 +++ linux-2.6.32.42/drivers/infiniband/core/sysfs.c 2011-04-17 15:56:46.000000000 -0400
29903 @@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kob
29904 return port_attr->show(p, port_attr, buf);
29905 }
29906
29907 -static struct sysfs_ops port_sysfs_ops = {
29908 +static const struct sysfs_ops port_sysfs_ops = {
29909 .show = port_attr_show
29910 };
29911
29912 diff -urNp linux-2.6.32.42/drivers/infiniband/core/uverbs_marshall.c linux-2.6.32.42/drivers/infiniband/core/uverbs_marshall.c
29913 --- linux-2.6.32.42/drivers/infiniband/core/uverbs_marshall.c 2011-03-27 14:31:47.000000000 -0400
29914 +++ linux-2.6.32.42/drivers/infiniband/core/uverbs_marshall.c 2011-04-17 15:56:46.000000000 -0400
29915 @@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_u
29916 dst->grh.sgid_index = src->grh.sgid_index;
29917 dst->grh.hop_limit = src->grh.hop_limit;
29918 dst->grh.traffic_class = src->grh.traffic_class;
29919 + memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
29920 dst->dlid = src->dlid;
29921 dst->sl = src->sl;
29922 dst->src_path_bits = src->src_path_bits;
29923 dst->static_rate = src->static_rate;
29924 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
29925 dst->port_num = src->port_num;
29926 + dst->reserved = 0;
29927 }
29928 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
29929
29930 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
29931 struct ib_qp_attr *src)
29932 {
29933 + dst->qp_state = src->qp_state;
29934 dst->cur_qp_state = src->cur_qp_state;
29935 dst->path_mtu = src->path_mtu;
29936 dst->path_mig_state = src->path_mig_state;
29937 @@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_u
29938 dst->rnr_retry = src->rnr_retry;
29939 dst->alt_port_num = src->alt_port_num;
29940 dst->alt_timeout = src->alt_timeout;
29941 + memset(dst->reserved, 0, sizeof(dst->reserved));
29942 }
29943 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
29944
29945 diff -urNp linux-2.6.32.42/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.32.42/drivers/infiniband/hw/ipath/ipath_fs.c
29946 --- linux-2.6.32.42/drivers/infiniband/hw/ipath/ipath_fs.c 2011-03-27 14:31:47.000000000 -0400
29947 +++ linux-2.6.32.42/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-16 21:46:57.000000000 -0400
29948 @@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(stru
29949 struct infinipath_counters counters;
29950 struct ipath_devdata *dd;
29951
29952 + pax_track_stack();
29953 +
29954 dd = file->f_path.dentry->d_inode->i_private;
29955 dd->ipath_f_read_counters(dd, &counters);
29956
29957 diff -urNp linux-2.6.32.42/drivers/infiniband/hw/nes/nes.c linux-2.6.32.42/drivers/infiniband/hw/nes/nes.c
29958 --- linux-2.6.32.42/drivers/infiniband/hw/nes/nes.c 2011-03-27 14:31:47.000000000 -0400
29959 +++ linux-2.6.32.42/drivers/infiniband/hw/nes/nes.c 2011-05-04 17:56:28.000000000 -0400
29960 @@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
29961 LIST_HEAD(nes_adapter_list);
29962 static LIST_HEAD(nes_dev_list);
29963
29964 -atomic_t qps_destroyed;
29965 +atomic_unchecked_t qps_destroyed;
29966
29967 static unsigned int ee_flsh_adapter;
29968 static unsigned int sysfs_nonidx_addr;
29969 @@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(str
29970 struct nes_adapter *nesadapter = nesdev->nesadapter;
29971 u32 qp_id;
29972
29973 - atomic_inc(&qps_destroyed);
29974 + atomic_inc_unchecked(&qps_destroyed);
29975
29976 /* Free the control structures */
29977
29978 diff -urNp linux-2.6.32.42/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.32.42/drivers/infiniband/hw/nes/nes_cm.c
29979 --- linux-2.6.32.42/drivers/infiniband/hw/nes/nes_cm.c 2011-03-27 14:31:47.000000000 -0400
29980 +++ linux-2.6.32.42/drivers/infiniband/hw/nes/nes_cm.c 2011-05-04 17:56:28.000000000 -0400
29981 @@ -69,11 +69,11 @@ u32 cm_packets_received;
29982 u32 cm_listens_created;
29983 u32 cm_listens_destroyed;
29984 u32 cm_backlog_drops;
29985 -atomic_t cm_loopbacks;
29986 -atomic_t cm_nodes_created;
29987 -atomic_t cm_nodes_destroyed;
29988 -atomic_t cm_accel_dropped_pkts;
29989 -atomic_t cm_resets_recvd;
29990 +atomic_unchecked_t cm_loopbacks;
29991 +atomic_unchecked_t cm_nodes_created;
29992 +atomic_unchecked_t cm_nodes_destroyed;
29993 +atomic_unchecked_t cm_accel_dropped_pkts;
29994 +atomic_unchecked_t cm_resets_recvd;
29995
29996 static inline int mini_cm_accelerated(struct nes_cm_core *,
29997 struct nes_cm_node *);
29998 @@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
29999
30000 static struct nes_cm_core *g_cm_core;
30001
30002 -atomic_t cm_connects;
30003 -atomic_t cm_accepts;
30004 -atomic_t cm_disconnects;
30005 -atomic_t cm_closes;
30006 -atomic_t cm_connecteds;
30007 -atomic_t cm_connect_reqs;
30008 -atomic_t cm_rejects;
30009 +atomic_unchecked_t cm_connects;
30010 +atomic_unchecked_t cm_accepts;
30011 +atomic_unchecked_t cm_disconnects;
30012 +atomic_unchecked_t cm_closes;
30013 +atomic_unchecked_t cm_connecteds;
30014 +atomic_unchecked_t cm_connect_reqs;
30015 +atomic_unchecked_t cm_rejects;
30016
30017
30018 /**
30019 @@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(
30020 cm_node->rem_mac);
30021
30022 add_hte_node(cm_core, cm_node);
30023 - atomic_inc(&cm_nodes_created);
30024 + atomic_inc_unchecked(&cm_nodes_created);
30025
30026 return cm_node;
30027 }
30028 @@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm
30029 }
30030
30031 atomic_dec(&cm_core->node_cnt);
30032 - atomic_inc(&cm_nodes_destroyed);
30033 + atomic_inc_unchecked(&cm_nodes_destroyed);
30034 nesqp = cm_node->nesqp;
30035 if (nesqp) {
30036 nesqp->cm_node = NULL;
30037 @@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm
30038
30039 static void drop_packet(struct sk_buff *skb)
30040 {
30041 - atomic_inc(&cm_accel_dropped_pkts);
30042 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30043 dev_kfree_skb_any(skb);
30044 }
30045
30046 @@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm
30047
30048 int reset = 0; /* whether to send reset in case of err.. */
30049 int passive_state;
30050 - atomic_inc(&cm_resets_recvd);
30051 + atomic_inc_unchecked(&cm_resets_recvd);
30052 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30053 " refcnt=%d\n", cm_node, cm_node->state,
30054 atomic_read(&cm_node->ref_count));
30055 @@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_conne
30056 rem_ref_cm_node(cm_node->cm_core, cm_node);
30057 return NULL;
30058 }
30059 - atomic_inc(&cm_loopbacks);
30060 + atomic_inc_unchecked(&cm_loopbacks);
30061 loopbackremotenode->loopbackpartner = cm_node;
30062 loopbackremotenode->tcp_cntxt.rcv_wscale =
30063 NES_CM_DEFAULT_RCV_WND_SCALE;
30064 @@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_c
30065 add_ref_cm_node(cm_node);
30066 } else if (cm_node->state == NES_CM_STATE_TSA) {
30067 rem_ref_cm_node(cm_core, cm_node);
30068 - atomic_inc(&cm_accel_dropped_pkts);
30069 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30070 dev_kfree_skb_any(skb);
30071 break;
30072 }
30073 @@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct ne
30074
30075 if ((cm_id) && (cm_id->event_handler)) {
30076 if (issue_disconn) {
30077 - atomic_inc(&cm_disconnects);
30078 + atomic_inc_unchecked(&cm_disconnects);
30079 cm_event.event = IW_CM_EVENT_DISCONNECT;
30080 cm_event.status = disconn_status;
30081 cm_event.local_addr = cm_id->local_addr;
30082 @@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct ne
30083 }
30084
30085 if (issue_close) {
30086 - atomic_inc(&cm_closes);
30087 + atomic_inc_unchecked(&cm_closes);
30088 nes_disconnect(nesqp, 1);
30089
30090 cm_id->provider_data = nesqp;
30091 @@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
30092
30093 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30094 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30095 - atomic_inc(&cm_accepts);
30096 + atomic_inc_unchecked(&cm_accepts);
30097
30098 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30099 atomic_read(&nesvnic->netdev->refcnt));
30100 @@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
30101
30102 struct nes_cm_core *cm_core;
30103
30104 - atomic_inc(&cm_rejects);
30105 + atomic_inc_unchecked(&cm_rejects);
30106 cm_node = (struct nes_cm_node *) cm_id->provider_data;
30107 loopback = cm_node->loopbackpartner;
30108 cm_core = cm_node->cm_core;
30109 @@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id,
30110 ntohl(cm_id->local_addr.sin_addr.s_addr),
30111 ntohs(cm_id->local_addr.sin_port));
30112
30113 - atomic_inc(&cm_connects);
30114 + atomic_inc_unchecked(&cm_connects);
30115 nesqp->active_conn = 1;
30116
30117 /* cache the cm_id in the qp */
30118 @@ -3195,7 +3195,7 @@ static void cm_event_connected(struct ne
30119 if (nesqp->destroyed) {
30120 return;
30121 }
30122 - atomic_inc(&cm_connecteds);
30123 + atomic_inc_unchecked(&cm_connecteds);
30124 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30125 " local port 0x%04X. jiffies = %lu.\n",
30126 nesqp->hwqp.qp_id,
30127 @@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm
30128
30129 ret = cm_id->event_handler(cm_id, &cm_event);
30130 cm_id->add_ref(cm_id);
30131 - atomic_inc(&cm_closes);
30132 + atomic_inc_unchecked(&cm_closes);
30133 cm_event.event = IW_CM_EVENT_CLOSE;
30134 cm_event.status = IW_CM_EVENT_STATUS_OK;
30135 cm_event.provider_data = cm_id->provider_data;
30136 @@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_
30137 return;
30138 cm_id = cm_node->cm_id;
30139
30140 - atomic_inc(&cm_connect_reqs);
30141 + atomic_inc_unchecked(&cm_connect_reqs);
30142 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30143 cm_node, cm_id, jiffies);
30144
30145 @@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct n
30146 return;
30147 cm_id = cm_node->cm_id;
30148
30149 - atomic_inc(&cm_connect_reqs);
30150 + atomic_inc_unchecked(&cm_connect_reqs);
30151 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30152 cm_node, cm_id, jiffies);
30153
30154 diff -urNp linux-2.6.32.42/drivers/infiniband/hw/nes/nes.h linux-2.6.32.42/drivers/infiniband/hw/nes/nes.h
30155 --- linux-2.6.32.42/drivers/infiniband/hw/nes/nes.h 2011-03-27 14:31:47.000000000 -0400
30156 +++ linux-2.6.32.42/drivers/infiniband/hw/nes/nes.h 2011-05-04 17:56:28.000000000 -0400
30157 @@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
30158 extern unsigned int wqm_quanta;
30159 extern struct list_head nes_adapter_list;
30160
30161 -extern atomic_t cm_connects;
30162 -extern atomic_t cm_accepts;
30163 -extern atomic_t cm_disconnects;
30164 -extern atomic_t cm_closes;
30165 -extern atomic_t cm_connecteds;
30166 -extern atomic_t cm_connect_reqs;
30167 -extern atomic_t cm_rejects;
30168 -extern atomic_t mod_qp_timouts;
30169 -extern atomic_t qps_created;
30170 -extern atomic_t qps_destroyed;
30171 -extern atomic_t sw_qps_destroyed;
30172 +extern atomic_unchecked_t cm_connects;
30173 +extern atomic_unchecked_t cm_accepts;
30174 +extern atomic_unchecked_t cm_disconnects;
30175 +extern atomic_unchecked_t cm_closes;
30176 +extern atomic_unchecked_t cm_connecteds;
30177 +extern atomic_unchecked_t cm_connect_reqs;
30178 +extern atomic_unchecked_t cm_rejects;
30179 +extern atomic_unchecked_t mod_qp_timouts;
30180 +extern atomic_unchecked_t qps_created;
30181 +extern atomic_unchecked_t qps_destroyed;
30182 +extern atomic_unchecked_t sw_qps_destroyed;
30183 extern u32 mh_detected;
30184 extern u32 mh_pauses_sent;
30185 extern u32 cm_packets_sent;
30186 @@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
30187 extern u32 cm_listens_created;
30188 extern u32 cm_listens_destroyed;
30189 extern u32 cm_backlog_drops;
30190 -extern atomic_t cm_loopbacks;
30191 -extern atomic_t cm_nodes_created;
30192 -extern atomic_t cm_nodes_destroyed;
30193 -extern atomic_t cm_accel_dropped_pkts;
30194 -extern atomic_t cm_resets_recvd;
30195 +extern atomic_unchecked_t cm_loopbacks;
30196 +extern atomic_unchecked_t cm_nodes_created;
30197 +extern atomic_unchecked_t cm_nodes_destroyed;
30198 +extern atomic_unchecked_t cm_accel_dropped_pkts;
30199 +extern atomic_unchecked_t cm_resets_recvd;
30200
30201 extern u32 int_mod_timer_init;
30202 extern u32 int_mod_cq_depth_256;
30203 diff -urNp linux-2.6.32.42/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.32.42/drivers/infiniband/hw/nes/nes_nic.c
30204 --- linux-2.6.32.42/drivers/infiniband/hw/nes/nes_nic.c 2011-03-27 14:31:47.000000000 -0400
30205 +++ linux-2.6.32.42/drivers/infiniband/hw/nes/nes_nic.c 2011-05-04 17:56:28.000000000 -0400
30206 @@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats
30207 target_stat_values[++index] = mh_detected;
30208 target_stat_values[++index] = mh_pauses_sent;
30209 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30210 - target_stat_values[++index] = atomic_read(&cm_connects);
30211 - target_stat_values[++index] = atomic_read(&cm_accepts);
30212 - target_stat_values[++index] = atomic_read(&cm_disconnects);
30213 - target_stat_values[++index] = atomic_read(&cm_connecteds);
30214 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30215 - target_stat_values[++index] = atomic_read(&cm_rejects);
30216 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30217 - target_stat_values[++index] = atomic_read(&qps_created);
30218 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30219 - target_stat_values[++index] = atomic_read(&qps_destroyed);
30220 - target_stat_values[++index] = atomic_read(&cm_closes);
30221 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30222 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30223 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30224 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30225 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30226 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30227 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30228 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30229 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30230 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30231 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30232 target_stat_values[++index] = cm_packets_sent;
30233 target_stat_values[++index] = cm_packets_bounced;
30234 target_stat_values[++index] = cm_packets_created;
30235 @@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats
30236 target_stat_values[++index] = cm_listens_created;
30237 target_stat_values[++index] = cm_listens_destroyed;
30238 target_stat_values[++index] = cm_backlog_drops;
30239 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
30240 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
30241 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30242 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30243 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30244 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30245 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30246 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30247 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30248 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30249 target_stat_values[++index] = int_mod_timer_init;
30250 target_stat_values[++index] = int_mod_cq_depth_1;
30251 target_stat_values[++index] = int_mod_cq_depth_4;
30252 diff -urNp linux-2.6.32.42/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.32.42/drivers/infiniband/hw/nes/nes_verbs.c
30253 --- linux-2.6.32.42/drivers/infiniband/hw/nes/nes_verbs.c 2011-03-27 14:31:47.000000000 -0400
30254 +++ linux-2.6.32.42/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-04 17:56:28.000000000 -0400
30255 @@ -45,9 +45,9 @@
30256
30257 #include <rdma/ib_umem.h>
30258
30259 -atomic_t mod_qp_timouts;
30260 -atomic_t qps_created;
30261 -atomic_t sw_qps_destroyed;
30262 +atomic_unchecked_t mod_qp_timouts;
30263 +atomic_unchecked_t qps_created;
30264 +atomic_unchecked_t sw_qps_destroyed;
30265
30266 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30267
30268 @@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struc
30269 if (init_attr->create_flags)
30270 return ERR_PTR(-EINVAL);
30271
30272 - atomic_inc(&qps_created);
30273 + atomic_inc_unchecked(&qps_created);
30274 switch (init_attr->qp_type) {
30275 case IB_QPT_RC:
30276 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
30277 @@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *
30278 struct iw_cm_event cm_event;
30279 int ret;
30280
30281 - atomic_inc(&sw_qps_destroyed);
30282 + atomic_inc_unchecked(&sw_qps_destroyed);
30283 nesqp->destroyed = 1;
30284
30285 /* Blow away the connection if it exists. */
30286 diff -urNp linux-2.6.32.42/drivers/input/gameport/gameport.c linux-2.6.32.42/drivers/input/gameport/gameport.c
30287 --- linux-2.6.32.42/drivers/input/gameport/gameport.c 2011-03-27 14:31:47.000000000 -0400
30288 +++ linux-2.6.32.42/drivers/input/gameport/gameport.c 2011-05-04 17:56:28.000000000 -0400
30289 @@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
30290 */
30291 static void gameport_init_port(struct gameport *gameport)
30292 {
30293 - static atomic_t gameport_no = ATOMIC_INIT(0);
30294 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30295
30296 __module_get(THIS_MODULE);
30297
30298 mutex_init(&gameport->drv_mutex);
30299 device_initialize(&gameport->dev);
30300 - dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
30301 + dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30302 gameport->dev.bus = &gameport_bus;
30303 gameport->dev.release = gameport_release_port;
30304 if (gameport->parent)
30305 diff -urNp linux-2.6.32.42/drivers/input/input.c linux-2.6.32.42/drivers/input/input.c
30306 --- linux-2.6.32.42/drivers/input/input.c 2011-03-27 14:31:47.000000000 -0400
30307 +++ linux-2.6.32.42/drivers/input/input.c 2011-05-04 17:56:28.000000000 -0400
30308 @@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
30309 */
30310 int input_register_device(struct input_dev *dev)
30311 {
30312 - static atomic_t input_no = ATOMIC_INIT(0);
30313 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30314 struct input_handler *handler;
30315 const char *path;
30316 int error;
30317 @@ -1585,7 +1585,7 @@ int input_register_device(struct input_d
30318 dev->setkeycode = input_default_setkeycode;
30319
30320 dev_set_name(&dev->dev, "input%ld",
30321 - (unsigned long) atomic_inc_return(&input_no) - 1);
30322 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
30323
30324 error = device_add(&dev->dev);
30325 if (error)
30326 diff -urNp linux-2.6.32.42/drivers/input/joystick/sidewinder.c linux-2.6.32.42/drivers/input/joystick/sidewinder.c
30327 --- linux-2.6.32.42/drivers/input/joystick/sidewinder.c 2011-03-27 14:31:47.000000000 -0400
30328 +++ linux-2.6.32.42/drivers/input/joystick/sidewinder.c 2011-05-18 20:09:36.000000000 -0400
30329 @@ -30,6 +30,7 @@
30330 #include <linux/kernel.h>
30331 #include <linux/module.h>
30332 #include <linux/slab.h>
30333 +#include <linux/sched.h>
30334 #include <linux/init.h>
30335 #include <linux/input.h>
30336 #include <linux/gameport.h>
30337 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
30338 unsigned char buf[SW_LENGTH];
30339 int i;
30340
30341 + pax_track_stack();
30342 +
30343 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
30344
30345 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
30346 diff -urNp linux-2.6.32.42/drivers/input/joystick/xpad.c linux-2.6.32.42/drivers/input/joystick/xpad.c
30347 --- linux-2.6.32.42/drivers/input/joystick/xpad.c 2011-03-27 14:31:47.000000000 -0400
30348 +++ linux-2.6.32.42/drivers/input/joystick/xpad.c 2011-05-04 17:56:28.000000000 -0400
30349 @@ -621,7 +621,7 @@ static void xpad_led_set(struct led_clas
30350
30351 static int xpad_led_probe(struct usb_xpad *xpad)
30352 {
30353 - static atomic_t led_seq = ATOMIC_INIT(0);
30354 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
30355 long led_no;
30356 struct xpad_led *led;
30357 struct led_classdev *led_cdev;
30358 @@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpa
30359 if (!led)
30360 return -ENOMEM;
30361
30362 - led_no = (long)atomic_inc_return(&led_seq) - 1;
30363 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
30364
30365 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
30366 led->xpad = xpad;
30367 diff -urNp linux-2.6.32.42/drivers/input/serio/serio.c linux-2.6.32.42/drivers/input/serio/serio.c
30368 --- linux-2.6.32.42/drivers/input/serio/serio.c 2011-03-27 14:31:47.000000000 -0400
30369 +++ linux-2.6.32.42/drivers/input/serio/serio.c 2011-05-04 17:56:28.000000000 -0400
30370 @@ -527,7 +527,7 @@ static void serio_release_port(struct de
30371 */
30372 static void serio_init_port(struct serio *serio)
30373 {
30374 - static atomic_t serio_no = ATOMIC_INIT(0);
30375 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
30376
30377 __module_get(THIS_MODULE);
30378
30379 @@ -536,7 +536,7 @@ static void serio_init_port(struct serio
30380 mutex_init(&serio->drv_mutex);
30381 device_initialize(&serio->dev);
30382 dev_set_name(&serio->dev, "serio%ld",
30383 - (long)atomic_inc_return(&serio_no) - 1);
30384 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
30385 serio->dev.bus = &serio_bus;
30386 serio->dev.release = serio_release_port;
30387 if (serio->parent) {
30388 diff -urNp linux-2.6.32.42/drivers/isdn/gigaset/common.c linux-2.6.32.42/drivers/isdn/gigaset/common.c
30389 --- linux-2.6.32.42/drivers/isdn/gigaset/common.c 2011-03-27 14:31:47.000000000 -0400
30390 +++ linux-2.6.32.42/drivers/isdn/gigaset/common.c 2011-04-17 15:56:46.000000000 -0400
30391 @@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct
30392 cs->commands_pending = 0;
30393 cs->cur_at_seq = 0;
30394 cs->gotfwver = -1;
30395 - cs->open_count = 0;
30396 + local_set(&cs->open_count, 0);
30397 cs->dev = NULL;
30398 cs->tty = NULL;
30399 cs->tty_dev = NULL;
30400 diff -urNp linux-2.6.32.42/drivers/isdn/gigaset/gigaset.h linux-2.6.32.42/drivers/isdn/gigaset/gigaset.h
30401 --- linux-2.6.32.42/drivers/isdn/gigaset/gigaset.h 2011-03-27 14:31:47.000000000 -0400
30402 +++ linux-2.6.32.42/drivers/isdn/gigaset/gigaset.h 2011-04-17 15:56:46.000000000 -0400
30403 @@ -34,6 +34,7 @@
30404 #include <linux/tty_driver.h>
30405 #include <linux/list.h>
30406 #include <asm/atomic.h>
30407 +#include <asm/local.h>
30408
30409 #define GIG_VERSION {0,5,0,0}
30410 #define GIG_COMPAT {0,4,0,0}
30411 @@ -446,7 +447,7 @@ struct cardstate {
30412 spinlock_t cmdlock;
30413 unsigned curlen, cmdbytes;
30414
30415 - unsigned open_count;
30416 + local_t open_count;
30417 struct tty_struct *tty;
30418 struct tasklet_struct if_wake_tasklet;
30419 unsigned control_state;
30420 diff -urNp linux-2.6.32.42/drivers/isdn/gigaset/interface.c linux-2.6.32.42/drivers/isdn/gigaset/interface.c
30421 --- linux-2.6.32.42/drivers/isdn/gigaset/interface.c 2011-03-27 14:31:47.000000000 -0400
30422 +++ linux-2.6.32.42/drivers/isdn/gigaset/interface.c 2011-04-17 15:56:46.000000000 -0400
30423 @@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tt
30424 return -ERESTARTSYS; // FIXME -EINTR?
30425 tty->driver_data = cs;
30426
30427 - ++cs->open_count;
30428 -
30429 - if (cs->open_count == 1) {
30430 + if (local_inc_return(&cs->open_count) == 1) {
30431 spin_lock_irqsave(&cs->lock, flags);
30432 cs->tty = tty;
30433 spin_unlock_irqrestore(&cs->lock, flags);
30434 @@ -195,10 +193,10 @@ static void if_close(struct tty_struct *
30435
30436 if (!cs->connected)
30437 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30438 - else if (!cs->open_count)
30439 + else if (!local_read(&cs->open_count))
30440 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30441 else {
30442 - if (!--cs->open_count) {
30443 + if (!local_dec_return(&cs->open_count)) {
30444 spin_lock_irqsave(&cs->lock, flags);
30445 cs->tty = NULL;
30446 spin_unlock_irqrestore(&cs->lock, flags);
30447 @@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *t
30448 if (!cs->connected) {
30449 gig_dbg(DEBUG_IF, "not connected");
30450 retval = -ENODEV;
30451 - } else if (!cs->open_count)
30452 + } else if (!local_read(&cs->open_count))
30453 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30454 else {
30455 retval = 0;
30456 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *t
30457 if (!cs->connected) {
30458 gig_dbg(DEBUG_IF, "not connected");
30459 retval = -ENODEV;
30460 - } else if (!cs->open_count)
30461 + } else if (!local_read(&cs->open_count))
30462 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30463 else if (cs->mstate != MS_LOCKED) {
30464 dev_warn(cs->dev, "can't write to unlocked device\n");
30465 @@ -395,7 +393,7 @@ static int if_write_room(struct tty_stru
30466 if (!cs->connected) {
30467 gig_dbg(DEBUG_IF, "not connected");
30468 retval = -ENODEV;
30469 - } else if (!cs->open_count)
30470 + } else if (!local_read(&cs->open_count))
30471 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30472 else if (cs->mstate != MS_LOCKED) {
30473 dev_warn(cs->dev, "can't write to unlocked device\n");
30474 @@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty
30475
30476 if (!cs->connected)
30477 gig_dbg(DEBUG_IF, "not connected");
30478 - else if (!cs->open_count)
30479 + else if (!local_read(&cs->open_count))
30480 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30481 else if (cs->mstate != MS_LOCKED)
30482 dev_warn(cs->dev, "can't write to unlocked device\n");
30483 @@ -453,7 +451,7 @@ static void if_throttle(struct tty_struc
30484
30485 if (!cs->connected)
30486 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30487 - else if (!cs->open_count)
30488 + else if (!local_read(&cs->open_count))
30489 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30490 else {
30491 //FIXME
30492 @@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_str
30493
30494 if (!cs->connected)
30495 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30496 - else if (!cs->open_count)
30497 + else if (!local_read(&cs->open_count))
30498 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30499 else {
30500 //FIXME
30501 @@ -510,7 +508,7 @@ static void if_set_termios(struct tty_st
30502 goto out;
30503 }
30504
30505 - if (!cs->open_count) {
30506 + if (!local_read(&cs->open_count)) {
30507 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30508 goto out;
30509 }
30510 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/avm/b1.c linux-2.6.32.42/drivers/isdn/hardware/avm/b1.c
30511 --- linux-2.6.32.42/drivers/isdn/hardware/avm/b1.c 2011-03-27 14:31:47.000000000 -0400
30512 +++ linux-2.6.32.42/drivers/isdn/hardware/avm/b1.c 2011-04-17 15:56:46.000000000 -0400
30513 @@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capilo
30514 }
30515 if (left) {
30516 if (t4file->user) {
30517 - if (copy_from_user(buf, dp, left))
30518 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30519 return -EFAULT;
30520 } else {
30521 memcpy(buf, dp, left);
30522 @@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capilo
30523 }
30524 if (left) {
30525 if (config->user) {
30526 - if (copy_from_user(buf, dp, left))
30527 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30528 return -EFAULT;
30529 } else {
30530 memcpy(buf, dp, left);
30531 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.32.42/drivers/isdn/hardware/eicon/capidtmf.c
30532 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/capidtmf.c 2011-03-27 14:31:47.000000000 -0400
30533 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-16 21:46:57.000000000 -0400
30534 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
30535 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
30536 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
30537
30538 + pax_track_stack();
30539
30540 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
30541 {
30542 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.32.42/drivers/isdn/hardware/eicon/capifunc.c
30543 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/capifunc.c 2011-03-27 14:31:47.000000000 -0400
30544 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/capifunc.c 2011-05-16 21:46:57.000000000 -0400
30545 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
30546 IDI_SYNC_REQ req;
30547 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30548
30549 + pax_track_stack();
30550 +
30551 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30552
30553 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30554 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.32.42/drivers/isdn/hardware/eicon/diddfunc.c
30555 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/diddfunc.c 2011-03-27 14:31:47.000000000 -0400
30556 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-16 21:46:57.000000000 -0400
30557 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
30558 IDI_SYNC_REQ req;
30559 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30560
30561 + pax_track_stack();
30562 +
30563 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30564
30565 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30566 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.32.42/drivers/isdn/hardware/eicon/divasfunc.c
30567 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/divasfunc.c 2011-03-27 14:31:47.000000000 -0400
30568 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-16 21:46:57.000000000 -0400
30569 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
30570 IDI_SYNC_REQ req;
30571 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30572
30573 + pax_track_stack();
30574 +
30575 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30576
30577 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30578 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.32.42/drivers/isdn/hardware/eicon/idifunc.c
30579 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/idifunc.c 2011-03-27 14:31:47.000000000 -0400
30580 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/idifunc.c 2011-05-16 21:46:57.000000000 -0400
30581 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
30582 IDI_SYNC_REQ req;
30583 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30584
30585 + pax_track_stack();
30586 +
30587 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30588
30589 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30590 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/message.c linux-2.6.32.42/drivers/isdn/hardware/eicon/message.c
30591 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/message.c 2011-03-27 14:31:47.000000000 -0400
30592 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/message.c 2011-05-16 21:46:57.000000000 -0400
30593 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
30594 dword d;
30595 word w;
30596
30597 + pax_track_stack();
30598 +
30599 a = plci->adapter;
30600 Id = ((word)plci->Id<<8)|a->Id;
30601 PUT_WORD(&SS_Ind[4],0x0000);
30602 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
30603 word j, n, w;
30604 dword d;
30605
30606 + pax_track_stack();
30607 +
30608
30609 for(i=0;i<8;i++) bp_parms[i].length = 0;
30610 for(i=0;i<2;i++) global_config[i].length = 0;
30611 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
30612 const byte llc3[] = {4,3,2,2,6,6,0};
30613 const byte header[] = {0,2,3,3,0,0,0};
30614
30615 + pax_track_stack();
30616 +
30617 for(i=0;i<8;i++) bp_parms[i].length = 0;
30618 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
30619 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
30620 @@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI
30621 word appl_number_group_type[MAX_APPL];
30622 PLCI *auxplci;
30623
30624 + pax_track_stack();
30625 +
30626 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
30627
30628 if(!a->group_optimization_enabled)
30629 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.32.42/drivers/isdn/hardware/eicon/mntfunc.c
30630 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/mntfunc.c 2011-03-27 14:31:47.000000000 -0400
30631 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-16 21:46:57.000000000 -0400
30632 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
30633 IDI_SYNC_REQ req;
30634 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30635
30636 + pax_track_stack();
30637 +
30638 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30639
30640 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30641 diff -urNp linux-2.6.32.42/drivers/isdn/i4l/isdn_common.c linux-2.6.32.42/drivers/isdn/i4l/isdn_common.c
30642 --- linux-2.6.32.42/drivers/isdn/i4l/isdn_common.c 2011-03-27 14:31:47.000000000 -0400
30643 +++ linux-2.6.32.42/drivers/isdn/i4l/isdn_common.c 2011-05-16 21:46:57.000000000 -0400
30644 @@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct f
30645 } iocpar;
30646 void __user *argp = (void __user *)arg;
30647
30648 + pax_track_stack();
30649 +
30650 #define name iocpar.name
30651 #define bname iocpar.bname
30652 #define iocts iocpar.iocts
30653 diff -urNp linux-2.6.32.42/drivers/isdn/icn/icn.c linux-2.6.32.42/drivers/isdn/icn/icn.c
30654 --- linux-2.6.32.42/drivers/isdn/icn/icn.c 2011-03-27 14:31:47.000000000 -0400
30655 +++ linux-2.6.32.42/drivers/isdn/icn/icn.c 2011-04-17 15:56:46.000000000 -0400
30656 @@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len
30657 if (count > len)
30658 count = len;
30659 if (user) {
30660 - if (copy_from_user(msg, buf, count))
30661 + if (count > sizeof msg || copy_from_user(msg, buf, count))
30662 return -EFAULT;
30663 } else
30664 memcpy(msg, buf, count);
30665 diff -urNp linux-2.6.32.42/drivers/isdn/mISDN/socket.c linux-2.6.32.42/drivers/isdn/mISDN/socket.c
30666 --- linux-2.6.32.42/drivers/isdn/mISDN/socket.c 2011-03-27 14:31:47.000000000 -0400
30667 +++ linux-2.6.32.42/drivers/isdn/mISDN/socket.c 2011-04-17 15:56:46.000000000 -0400
30668 @@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, uns
30669 if (dev) {
30670 struct mISDN_devinfo di;
30671
30672 + memset(&di, 0, sizeof(di));
30673 di.id = dev->id;
30674 di.Dprotocols = dev->Dprotocols;
30675 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
30676 @@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, uns
30677 if (dev) {
30678 struct mISDN_devinfo di;
30679
30680 + memset(&di, 0, sizeof(di));
30681 di.id = dev->id;
30682 di.Dprotocols = dev->Dprotocols;
30683 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
30684 diff -urNp linux-2.6.32.42/drivers/isdn/sc/interrupt.c linux-2.6.32.42/drivers/isdn/sc/interrupt.c
30685 --- linux-2.6.32.42/drivers/isdn/sc/interrupt.c 2011-03-27 14:31:47.000000000 -0400
30686 +++ linux-2.6.32.42/drivers/isdn/sc/interrupt.c 2011-04-17 15:56:46.000000000 -0400
30687 @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy,
30688 }
30689 else if(callid>=0x0000 && callid<=0x7FFF)
30690 {
30691 + int len;
30692 +
30693 pr_debug("%s: Got Incoming Call\n",
30694 sc_adapter[card]->devicename);
30695 - strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
30696 - strcpy(setup.eazmsn,
30697 - sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
30698 + len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
30699 + sizeof(setup.phone));
30700 + if (len >= sizeof(setup.phone))
30701 + continue;
30702 + len = strlcpy(setup.eazmsn,
30703 + sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
30704 + sizeof(setup.eazmsn));
30705 + if (len >= sizeof(setup.eazmsn))
30706 + continue;
30707 setup.si1 = 7;
30708 setup.si2 = 0;
30709 setup.plan = 0;
30710 @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy,
30711 * Handle a GetMyNumber Rsp
30712 */
30713 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
30714 - strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
30715 + strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
30716 + rcvmsg.msg_data.byte_array,
30717 + sizeof(rcvmsg.msg_data.byte_array));
30718 continue;
30719 }
30720
30721 diff -urNp linux-2.6.32.42/drivers/lguest/core.c linux-2.6.32.42/drivers/lguest/core.c
30722 --- linux-2.6.32.42/drivers/lguest/core.c 2011-03-27 14:31:47.000000000 -0400
30723 +++ linux-2.6.32.42/drivers/lguest/core.c 2011-04-17 15:56:46.000000000 -0400
30724 @@ -91,9 +91,17 @@ static __init int map_switcher(void)
30725 * it's worked so far. The end address needs +1 because __get_vm_area
30726 * allocates an extra guard page, so we need space for that.
30727 */
30728 +
30729 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30730 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30731 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
30732 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30733 +#else
30734 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30735 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
30736 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30737 +#endif
30738 +
30739 if (!switcher_vma) {
30740 err = -ENOMEM;
30741 printk("lguest: could not map switcher pages high\n");
30742 @@ -118,7 +126,7 @@ static __init int map_switcher(void)
30743 * Now the Switcher is mapped at the right address, we can't fail!
30744 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
30745 */
30746 - memcpy(switcher_vma->addr, start_switcher_text,
30747 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
30748 end_switcher_text - start_switcher_text);
30749
30750 printk(KERN_INFO "lguest: mapped switcher at %p\n",
30751 diff -urNp linux-2.6.32.42/drivers/lguest/x86/core.c linux-2.6.32.42/drivers/lguest/x86/core.c
30752 --- linux-2.6.32.42/drivers/lguest/x86/core.c 2011-03-27 14:31:47.000000000 -0400
30753 +++ linux-2.6.32.42/drivers/lguest/x86/core.c 2011-04-17 15:56:46.000000000 -0400
30754 @@ -59,7 +59,7 @@ static struct {
30755 /* Offset from where switcher.S was compiled to where we've copied it */
30756 static unsigned long switcher_offset(void)
30757 {
30758 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
30759 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
30760 }
30761
30762 /* This cpu's struct lguest_pages. */
30763 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
30764 * These copies are pretty cheap, so we do them unconditionally: */
30765 /* Save the current Host top-level page directory.
30766 */
30767 +
30768 +#ifdef CONFIG_PAX_PER_CPU_PGD
30769 + pages->state.host_cr3 = read_cr3();
30770 +#else
30771 pages->state.host_cr3 = __pa(current->mm->pgd);
30772 +#endif
30773 +
30774 /*
30775 * Set up the Guest's page tables to see this CPU's pages (and no
30776 * other CPU's pages).
30777 @@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
30778 * compiled-in switcher code and the high-mapped copy we just made.
30779 */
30780 for (i = 0; i < IDT_ENTRIES; i++)
30781 - default_idt_entries[i] += switcher_offset();
30782 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
30783
30784 /*
30785 * Set up the Switcher's per-cpu areas.
30786 @@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
30787 * it will be undisturbed when we switch. To change %cs and jump we
30788 * need this structure to feed to Intel's "lcall" instruction.
30789 */
30790 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
30791 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
30792 lguest_entry.segment = LGUEST_CS;
30793
30794 /*
30795 diff -urNp linux-2.6.32.42/drivers/lguest/x86/switcher_32.S linux-2.6.32.42/drivers/lguest/x86/switcher_32.S
30796 --- linux-2.6.32.42/drivers/lguest/x86/switcher_32.S 2011-03-27 14:31:47.000000000 -0400
30797 +++ linux-2.6.32.42/drivers/lguest/x86/switcher_32.S 2011-04-17 15:56:46.000000000 -0400
30798 @@ -87,6 +87,7 @@
30799 #include <asm/page.h>
30800 #include <asm/segment.h>
30801 #include <asm/lguest.h>
30802 +#include <asm/processor-flags.h>
30803
30804 // We mark the start of the code to copy
30805 // It's placed in .text tho it's never run here
30806 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
30807 // Changes type when we load it: damn Intel!
30808 // For after we switch over our page tables
30809 // That entry will be read-only: we'd crash.
30810 +
30811 +#ifdef CONFIG_PAX_KERNEXEC
30812 + mov %cr0, %edx
30813 + xor $X86_CR0_WP, %edx
30814 + mov %edx, %cr0
30815 +#endif
30816 +
30817 movl $(GDT_ENTRY_TSS*8), %edx
30818 ltr %dx
30819
30820 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
30821 // Let's clear it again for our return.
30822 // The GDT descriptor of the Host
30823 // Points to the table after two "size" bytes
30824 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
30825 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
30826 // Clear "used" from type field (byte 5, bit 2)
30827 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
30828 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
30829 +
30830 +#ifdef CONFIG_PAX_KERNEXEC
30831 + mov %cr0, %eax
30832 + xor $X86_CR0_WP, %eax
30833 + mov %eax, %cr0
30834 +#endif
30835
30836 // Once our page table's switched, the Guest is live!
30837 // The Host fades as we run this final step.
30838 @@ -295,13 +309,12 @@ deliver_to_host:
30839 // I consulted gcc, and it gave
30840 // These instructions, which I gladly credit:
30841 leal (%edx,%ebx,8), %eax
30842 - movzwl (%eax),%edx
30843 - movl 4(%eax), %eax
30844 - xorw %ax, %ax
30845 - orl %eax, %edx
30846 + movl 4(%eax), %edx
30847 + movw (%eax), %dx
30848 // Now the address of the handler's in %edx
30849 // We call it now: its "iret" drops us home.
30850 - jmp *%edx
30851 + ljmp $__KERNEL_CS, $1f
30852 +1: jmp *%edx
30853
30854 // Every interrupt can come to us here
30855 // But we must truly tell each apart.
30856 diff -urNp linux-2.6.32.42/drivers/macintosh/via-pmu-backlight.c linux-2.6.32.42/drivers/macintosh/via-pmu-backlight.c
30857 --- linux-2.6.32.42/drivers/macintosh/via-pmu-backlight.c 2011-03-27 14:31:47.000000000 -0400
30858 +++ linux-2.6.32.42/drivers/macintosh/via-pmu-backlight.c 2011-04-17 15:56:46.000000000 -0400
30859 @@ -15,7 +15,7 @@
30860
30861 #define MAX_PMU_LEVEL 0xFF
30862
30863 -static struct backlight_ops pmu_backlight_data;
30864 +static const struct backlight_ops pmu_backlight_data;
30865 static DEFINE_SPINLOCK(pmu_backlight_lock);
30866 static int sleeping, uses_pmu_bl;
30867 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
30868 @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
30869 return bd->props.brightness;
30870 }
30871
30872 -static struct backlight_ops pmu_backlight_data = {
30873 +static const struct backlight_ops pmu_backlight_data = {
30874 .get_brightness = pmu_backlight_get_brightness,
30875 .update_status = pmu_backlight_update_status,
30876
30877 diff -urNp linux-2.6.32.42/drivers/macintosh/via-pmu.c linux-2.6.32.42/drivers/macintosh/via-pmu.c
30878 --- linux-2.6.32.42/drivers/macintosh/via-pmu.c 2011-03-27 14:31:47.000000000 -0400
30879 +++ linux-2.6.32.42/drivers/macintosh/via-pmu.c 2011-04-17 15:56:46.000000000 -0400
30880 @@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state
30881 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
30882 }
30883
30884 -static struct platform_suspend_ops pmu_pm_ops = {
30885 +static const struct platform_suspend_ops pmu_pm_ops = {
30886 .enter = powerbook_sleep,
30887 .valid = pmu_sleep_valid,
30888 };
30889 diff -urNp linux-2.6.32.42/drivers/md/dm.c linux-2.6.32.42/drivers/md/dm.c
30890 --- linux-2.6.32.42/drivers/md/dm.c 2011-03-27 14:31:47.000000000 -0400
30891 +++ linux-2.6.32.42/drivers/md/dm.c 2011-05-04 17:56:28.000000000 -0400
30892 @@ -163,9 +163,9 @@ struct mapped_device {
30893 /*
30894 * Event handling.
30895 */
30896 - atomic_t event_nr;
30897 + atomic_unchecked_t event_nr;
30898 wait_queue_head_t eventq;
30899 - atomic_t uevent_seq;
30900 + atomic_unchecked_t uevent_seq;
30901 struct list_head uevent_list;
30902 spinlock_t uevent_lock; /* Protect access to uevent_list */
30903
30904 @@ -1770,8 +1770,8 @@ static struct mapped_device *alloc_dev(i
30905 rwlock_init(&md->map_lock);
30906 atomic_set(&md->holders, 1);
30907 atomic_set(&md->open_count, 0);
30908 - atomic_set(&md->event_nr, 0);
30909 - atomic_set(&md->uevent_seq, 0);
30910 + atomic_set_unchecked(&md->event_nr, 0);
30911 + atomic_set_unchecked(&md->uevent_seq, 0);
30912 INIT_LIST_HEAD(&md->uevent_list);
30913 spin_lock_init(&md->uevent_lock);
30914
30915 @@ -1921,7 +1921,7 @@ static void event_callback(void *context
30916
30917 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
30918
30919 - atomic_inc(&md->event_nr);
30920 + atomic_inc_unchecked(&md->event_nr);
30921 wake_up(&md->eventq);
30922 }
30923
30924 @@ -2556,18 +2556,18 @@ void dm_kobject_uevent(struct mapped_dev
30925
30926 uint32_t dm_next_uevent_seq(struct mapped_device *md)
30927 {
30928 - return atomic_add_return(1, &md->uevent_seq);
30929 + return atomic_add_return_unchecked(1, &md->uevent_seq);
30930 }
30931
30932 uint32_t dm_get_event_nr(struct mapped_device *md)
30933 {
30934 - return atomic_read(&md->event_nr);
30935 + return atomic_read_unchecked(&md->event_nr);
30936 }
30937
30938 int dm_wait_event(struct mapped_device *md, int event_nr)
30939 {
30940 return wait_event_interruptible(md->eventq,
30941 - (event_nr != atomic_read(&md->event_nr)));
30942 + (event_nr != atomic_read_unchecked(&md->event_nr)));
30943 }
30944
30945 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
30946 diff -urNp linux-2.6.32.42/drivers/md/dm-ioctl.c linux-2.6.32.42/drivers/md/dm-ioctl.c
30947 --- linux-2.6.32.42/drivers/md/dm-ioctl.c 2011-03-27 14:31:47.000000000 -0400
30948 +++ linux-2.6.32.42/drivers/md/dm-ioctl.c 2011-04-17 15:56:46.000000000 -0400
30949 @@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, str
30950 cmd == DM_LIST_VERSIONS_CMD)
30951 return 0;
30952
30953 - if ((cmd == DM_DEV_CREATE_CMD)) {
30954 + if (cmd == DM_DEV_CREATE_CMD) {
30955 if (!*param->name) {
30956 DMWARN("name not supplied when creating device");
30957 return -EINVAL;
30958 diff -urNp linux-2.6.32.42/drivers/md/dm-raid1.c linux-2.6.32.42/drivers/md/dm-raid1.c
30959 --- linux-2.6.32.42/drivers/md/dm-raid1.c 2011-03-27 14:31:47.000000000 -0400
30960 +++ linux-2.6.32.42/drivers/md/dm-raid1.c 2011-05-04 17:56:28.000000000 -0400
30961 @@ -41,7 +41,7 @@ enum dm_raid1_error {
30962
30963 struct mirror {
30964 struct mirror_set *ms;
30965 - atomic_t error_count;
30966 + atomic_unchecked_t error_count;
30967 unsigned long error_type;
30968 struct dm_dev *dev;
30969 sector_t offset;
30970 @@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m
30971 * simple way to tell if a device has encountered
30972 * errors.
30973 */
30974 - atomic_inc(&m->error_count);
30975 + atomic_inc_unchecked(&m->error_count);
30976
30977 if (test_and_set_bit(error_type, &m->error_type))
30978 return;
30979 @@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m
30980 }
30981
30982 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
30983 - if (!atomic_read(&new->error_count)) {
30984 + if (!atomic_read_unchecked(&new->error_count)) {
30985 set_default_mirror(new);
30986 break;
30987 }
30988 @@ -363,7 +363,7 @@ static struct mirror *choose_mirror(stru
30989 struct mirror *m = get_default_mirror(ms);
30990
30991 do {
30992 - if (likely(!atomic_read(&m->error_count)))
30993 + if (likely(!atomic_read_unchecked(&m->error_count)))
30994 return m;
30995
30996 if (m-- == ms->mirror)
30997 @@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
30998 {
30999 struct mirror *default_mirror = get_default_mirror(m->ms);
31000
31001 - return !atomic_read(&default_mirror->error_count);
31002 + return !atomic_read_unchecked(&default_mirror->error_count);
31003 }
31004
31005 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31006 @@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *
31007 */
31008 if (likely(region_in_sync(ms, region, 1)))
31009 m = choose_mirror(ms, bio->bi_sector);
31010 - else if (m && atomic_read(&m->error_count))
31011 + else if (m && atomic_read_unchecked(&m->error_count))
31012 m = NULL;
31013
31014 if (likely(m))
31015 @@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set
31016 }
31017
31018 ms->mirror[mirror].ms = ms;
31019 - atomic_set(&(ms->mirror[mirror].error_count), 0);
31020 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31021 ms->mirror[mirror].error_type = 0;
31022 ms->mirror[mirror].offset = offset;
31023
31024 @@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_targ
31025 */
31026 static char device_status_char(struct mirror *m)
31027 {
31028 - if (!atomic_read(&(m->error_count)))
31029 + if (!atomic_read_unchecked(&(m->error_count)))
31030 return 'A';
31031
31032 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
31033 diff -urNp linux-2.6.32.42/drivers/md/dm-stripe.c linux-2.6.32.42/drivers/md/dm-stripe.c
31034 --- linux-2.6.32.42/drivers/md/dm-stripe.c 2011-03-27 14:31:47.000000000 -0400
31035 +++ linux-2.6.32.42/drivers/md/dm-stripe.c 2011-05-04 17:56:28.000000000 -0400
31036 @@ -20,7 +20,7 @@ struct stripe {
31037 struct dm_dev *dev;
31038 sector_t physical_start;
31039
31040 - atomic_t error_count;
31041 + atomic_unchecked_t error_count;
31042 };
31043
31044 struct stripe_c {
31045 @@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *
31046 kfree(sc);
31047 return r;
31048 }
31049 - atomic_set(&(sc->stripe[i].error_count), 0);
31050 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31051 }
31052
31053 ti->private = sc;
31054 @@ -257,7 +257,7 @@ static int stripe_status(struct dm_targe
31055 DMEMIT("%d ", sc->stripes);
31056 for (i = 0; i < sc->stripes; i++) {
31057 DMEMIT("%s ", sc->stripe[i].dev->name);
31058 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31059 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31060 'D' : 'A';
31061 }
31062 buffer[i] = '\0';
31063 @@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_targe
31064 */
31065 for (i = 0; i < sc->stripes; i++)
31066 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31067 - atomic_inc(&(sc->stripe[i].error_count));
31068 - if (atomic_read(&(sc->stripe[i].error_count)) <
31069 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
31070 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31071 DM_IO_ERROR_THRESHOLD)
31072 queue_work(kstriped, &sc->kstriped_ws);
31073 }
31074 diff -urNp linux-2.6.32.42/drivers/md/dm-sysfs.c linux-2.6.32.42/drivers/md/dm-sysfs.c
31075 --- linux-2.6.32.42/drivers/md/dm-sysfs.c 2011-03-27 14:31:47.000000000 -0400
31076 +++ linux-2.6.32.42/drivers/md/dm-sysfs.c 2011-04-17 15:56:46.000000000 -0400
31077 @@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
31078 NULL,
31079 };
31080
31081 -static struct sysfs_ops dm_sysfs_ops = {
31082 +static const struct sysfs_ops dm_sysfs_ops = {
31083 .show = dm_attr_show,
31084 };
31085
31086 diff -urNp linux-2.6.32.42/drivers/md/dm-table.c linux-2.6.32.42/drivers/md/dm-table.c
31087 --- linux-2.6.32.42/drivers/md/dm-table.c 2011-06-25 12:55:34.000000000 -0400
31088 +++ linux-2.6.32.42/drivers/md/dm-table.c 2011-06-25 12:56:37.000000000 -0400
31089 @@ -376,7 +376,7 @@ static int device_area_is_invalid(struct
31090 if (!dev_size)
31091 return 0;
31092
31093 - if ((start >= dev_size) || (start + len > dev_size)) {
31094 + if ((start >= dev_size) || (len > dev_size - start)) {
31095 DMWARN("%s: %s too small for target: "
31096 "start=%llu, len=%llu, dev_size=%llu",
31097 dm_device_name(ti->table->md), bdevname(bdev, b),
31098 diff -urNp linux-2.6.32.42/drivers/md/md.c linux-2.6.32.42/drivers/md/md.c
31099 --- linux-2.6.32.42/drivers/md/md.c 2011-06-25 12:55:34.000000000 -0400
31100 +++ linux-2.6.32.42/drivers/md/md.c 2011-06-25 12:56:37.000000000 -0400
31101 @@ -153,10 +153,10 @@ static int start_readonly;
31102 * start build, activate spare
31103 */
31104 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31105 -static atomic_t md_event_count;
31106 +static atomic_unchecked_t md_event_count;
31107 void md_new_event(mddev_t *mddev)
31108 {
31109 - atomic_inc(&md_event_count);
31110 + atomic_inc_unchecked(&md_event_count);
31111 wake_up(&md_event_waiters);
31112 }
31113 EXPORT_SYMBOL_GPL(md_new_event);
31114 @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31115 */
31116 static void md_new_event_inintr(mddev_t *mddev)
31117 {
31118 - atomic_inc(&md_event_count);
31119 + atomic_inc_unchecked(&md_event_count);
31120 wake_up(&md_event_waiters);
31121 }
31122
31123 @@ -1218,7 +1218,7 @@ static int super_1_load(mdk_rdev_t *rdev
31124
31125 rdev->preferred_minor = 0xffff;
31126 rdev->data_offset = le64_to_cpu(sb->data_offset);
31127 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31128 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31129
31130 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31131 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31132 @@ -1392,7 +1392,7 @@ static void super_1_sync(mddev_t *mddev,
31133 else
31134 sb->resync_offset = cpu_to_le64(0);
31135
31136 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31137 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31138
31139 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31140 sb->size = cpu_to_le64(mddev->dev_sectors);
31141 @@ -2214,7 +2214,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
31142 static ssize_t
31143 errors_show(mdk_rdev_t *rdev, char *page)
31144 {
31145 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31146 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31147 }
31148
31149 static ssize_t
31150 @@ -2223,7 +2223,7 @@ errors_store(mdk_rdev_t *rdev, const cha
31151 char *e;
31152 unsigned long n = simple_strtoul(buf, &e, 10);
31153 if (*buf && (*e == 0 || *e == '\n')) {
31154 - atomic_set(&rdev->corrected_errors, n);
31155 + atomic_set_unchecked(&rdev->corrected_errors, n);
31156 return len;
31157 }
31158 return -EINVAL;
31159 @@ -2517,7 +2517,7 @@ static void rdev_free(struct kobject *ko
31160 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
31161 kfree(rdev);
31162 }
31163 -static struct sysfs_ops rdev_sysfs_ops = {
31164 +static const struct sysfs_ops rdev_sysfs_ops = {
31165 .show = rdev_attr_show,
31166 .store = rdev_attr_store,
31167 };
31168 @@ -2566,8 +2566,8 @@ static mdk_rdev_t *md_import_device(dev_
31169 rdev->data_offset = 0;
31170 rdev->sb_events = 0;
31171 atomic_set(&rdev->nr_pending, 0);
31172 - atomic_set(&rdev->read_errors, 0);
31173 - atomic_set(&rdev->corrected_errors, 0);
31174 + atomic_set_unchecked(&rdev->read_errors, 0);
31175 + atomic_set_unchecked(&rdev->corrected_errors, 0);
31176
31177 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
31178 if (!size) {
31179 @@ -3887,7 +3887,7 @@ static void md_free(struct kobject *ko)
31180 kfree(mddev);
31181 }
31182
31183 -static struct sysfs_ops md_sysfs_ops = {
31184 +static const struct sysfs_ops md_sysfs_ops = {
31185 .show = md_attr_show,
31186 .store = md_attr_store,
31187 };
31188 @@ -4474,7 +4474,8 @@ out:
31189 err = 0;
31190 blk_integrity_unregister(disk);
31191 md_new_event(mddev);
31192 - sysfs_notify_dirent(mddev->sysfs_state);
31193 + if (mddev->sysfs_state)
31194 + sysfs_notify_dirent(mddev->sysfs_state);
31195 return err;
31196 }
31197
31198 @@ -5954,7 +5955,7 @@ static int md_seq_show(struct seq_file *
31199
31200 spin_unlock(&pers_lock);
31201 seq_printf(seq, "\n");
31202 - mi->event = atomic_read(&md_event_count);
31203 + mi->event = atomic_read_unchecked(&md_event_count);
31204 return 0;
31205 }
31206 if (v == (void*)2) {
31207 @@ -6043,7 +6044,7 @@ static int md_seq_show(struct seq_file *
31208 chunk_kb ? "KB" : "B");
31209 if (bitmap->file) {
31210 seq_printf(seq, ", file: ");
31211 - seq_path(seq, &bitmap->file->f_path, " \t\n");
31212 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31213 }
31214
31215 seq_printf(seq, "\n");
31216 @@ -6077,7 +6078,7 @@ static int md_seq_open(struct inode *ino
31217 else {
31218 struct seq_file *p = file->private_data;
31219 p->private = mi;
31220 - mi->event = atomic_read(&md_event_count);
31221 + mi->event = atomic_read_unchecked(&md_event_count);
31222 }
31223 return error;
31224 }
31225 @@ -6093,7 +6094,7 @@ static unsigned int mdstat_poll(struct f
31226 /* always allow read */
31227 mask = POLLIN | POLLRDNORM;
31228
31229 - if (mi->event != atomic_read(&md_event_count))
31230 + if (mi->event != atomic_read_unchecked(&md_event_count))
31231 mask |= POLLERR | POLLPRI;
31232 return mask;
31233 }
31234 @@ -6137,7 +6138,7 @@ static int is_mddev_idle(mddev_t *mddev,
31235 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31236 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31237 (int)part_stat_read(&disk->part0, sectors[1]) -
31238 - atomic_read(&disk->sync_io);
31239 + atomic_read_unchecked(&disk->sync_io);
31240 /* sync IO will cause sync_io to increase before the disk_stats
31241 * as sync_io is counted when a request starts, and
31242 * disk_stats is counted when it completes.
31243 diff -urNp linux-2.6.32.42/drivers/md/md.h linux-2.6.32.42/drivers/md/md.h
31244 --- linux-2.6.32.42/drivers/md/md.h 2011-03-27 14:31:47.000000000 -0400
31245 +++ linux-2.6.32.42/drivers/md/md.h 2011-05-04 17:56:20.000000000 -0400
31246 @@ -94,10 +94,10 @@ struct mdk_rdev_s
31247 * only maintained for arrays that
31248 * support hot removal
31249 */
31250 - atomic_t read_errors; /* number of consecutive read errors that
31251 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
31252 * we have tried to ignore.
31253 */
31254 - atomic_t corrected_errors; /* number of corrected read errors,
31255 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31256 * for reporting to userspace and storing
31257 * in superblock.
31258 */
31259 @@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_
31260
31261 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31262 {
31263 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31264 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31265 }
31266
31267 struct mdk_personality
31268 diff -urNp linux-2.6.32.42/drivers/md/raid10.c linux-2.6.32.42/drivers/md/raid10.c
31269 --- linux-2.6.32.42/drivers/md/raid10.c 2011-03-27 14:31:47.000000000 -0400
31270 +++ linux-2.6.32.42/drivers/md/raid10.c 2011-05-04 17:56:28.000000000 -0400
31271 @@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bi
31272 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
31273 set_bit(R10BIO_Uptodate, &r10_bio->state);
31274 else {
31275 - atomic_add(r10_bio->sectors,
31276 + atomic_add_unchecked(r10_bio->sectors,
31277 &conf->mirrors[d].rdev->corrected_errors);
31278 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
31279 md_error(r10_bio->mddev,
31280 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
31281 test_bit(In_sync, &rdev->flags)) {
31282 atomic_inc(&rdev->nr_pending);
31283 rcu_read_unlock();
31284 - atomic_add(s, &rdev->corrected_errors);
31285 + atomic_add_unchecked(s, &rdev->corrected_errors);
31286 if (sync_page_io(rdev->bdev,
31287 r10_bio->devs[sl].addr +
31288 sect + rdev->data_offset,
31289 diff -urNp linux-2.6.32.42/drivers/md/raid1.c linux-2.6.32.42/drivers/md/raid1.c
31290 --- linux-2.6.32.42/drivers/md/raid1.c 2011-03-27 14:31:47.000000000 -0400
31291 +++ linux-2.6.32.42/drivers/md/raid1.c 2011-05-04 17:56:28.000000000 -0400
31292 @@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *
31293 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
31294 continue;
31295 rdev = conf->mirrors[d].rdev;
31296 - atomic_add(s, &rdev->corrected_errors);
31297 + atomic_add_unchecked(s, &rdev->corrected_errors);
31298 if (sync_page_io(rdev->bdev,
31299 sect + rdev->data_offset,
31300 s<<9,
31301 @@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf,
31302 /* Well, this device is dead */
31303 md_error(mddev, rdev);
31304 else {
31305 - atomic_add(s, &rdev->corrected_errors);
31306 + atomic_add_unchecked(s, &rdev->corrected_errors);
31307 printk(KERN_INFO
31308 "raid1:%s: read error corrected "
31309 "(%d sectors at %llu on %s)\n",
31310 diff -urNp linux-2.6.32.42/drivers/md/raid5.c linux-2.6.32.42/drivers/md/raid5.c
31311 --- linux-2.6.32.42/drivers/md/raid5.c 2011-06-25 12:55:34.000000000 -0400
31312 +++ linux-2.6.32.42/drivers/md/raid5.c 2011-06-25 12:58:39.000000000 -0400
31313 @@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_hea
31314 bi->bi_next = NULL;
31315 if ((rw & WRITE) &&
31316 test_bit(R5_ReWrite, &sh->dev[i].flags))
31317 - atomic_add(STRIPE_SECTORS,
31318 + atomic_add_unchecked(STRIPE_SECTORS,
31319 &rdev->corrected_errors);
31320 generic_make_request(bi);
31321 } else {
31322 @@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struc
31323 clear_bit(R5_ReadError, &sh->dev[i].flags);
31324 clear_bit(R5_ReWrite, &sh->dev[i].flags);
31325 }
31326 - if (atomic_read(&conf->disks[i].rdev->read_errors))
31327 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
31328 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
31329 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
31330 } else {
31331 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
31332 int retry = 0;
31333 rdev = conf->disks[i].rdev;
31334
31335 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
31336 - atomic_inc(&rdev->read_errors);
31337 + atomic_inc_unchecked(&rdev->read_errors);
31338 if (conf->mddev->degraded >= conf->max_degraded)
31339 printk_rl(KERN_WARNING
31340 "raid5:%s: read error not correctable "
31341 @@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struc
31342 (unsigned long long)(sh->sector
31343 + rdev->data_offset),
31344 bdn);
31345 - else if (atomic_read(&rdev->read_errors)
31346 + else if (atomic_read_unchecked(&rdev->read_errors)
31347 > conf->max_nr_stripes)
31348 printk(KERN_WARNING
31349 "raid5:%s: Too many read errors, failing device %s.\n",
31350 @@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct s
31351 sector_t r_sector;
31352 struct stripe_head sh2;
31353
31354 + pax_track_stack();
31355
31356 chunk_offset = sector_div(new_sector, sectors_per_chunk);
31357 stripe = new_sector;
31358 diff -urNp linux-2.6.32.42/drivers/media/common/saa7146_hlp.c linux-2.6.32.42/drivers/media/common/saa7146_hlp.c
31359 --- linux-2.6.32.42/drivers/media/common/saa7146_hlp.c 2011-03-27 14:31:47.000000000 -0400
31360 +++ linux-2.6.32.42/drivers/media/common/saa7146_hlp.c 2011-05-16 21:46:57.000000000 -0400
31361 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
31362
31363 int x[32], y[32], w[32], h[32];
31364
31365 + pax_track_stack();
31366 +
31367 /* clear out memory */
31368 memset(&line_list[0], 0x00, sizeof(u32)*32);
31369 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
31370 diff -urNp linux-2.6.32.42/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.32.42/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
31371 --- linux-2.6.32.42/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-03-27 14:31:47.000000000 -0400
31372 +++ linux-2.6.32.42/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-16 21:46:57.000000000 -0400
31373 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
31374 u8 buf[HOST_LINK_BUF_SIZE];
31375 int i;
31376
31377 + pax_track_stack();
31378 +
31379 dprintk("%s\n", __func__);
31380
31381 /* check if we have space for a link buf in the rx_buffer */
31382 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
31383 unsigned long timeout;
31384 int written;
31385
31386 + pax_track_stack();
31387 +
31388 dprintk("%s\n", __func__);
31389
31390 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
31391 diff -urNp linux-2.6.32.42/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.32.42/drivers/media/dvb/dvb-core/dvbdev.c
31392 --- linux-2.6.32.42/drivers/media/dvb/dvb-core/dvbdev.c 2011-03-27 14:31:47.000000000 -0400
31393 +++ linux-2.6.32.42/drivers/media/dvb/dvb-core/dvbdev.c 2011-04-17 15:56:46.000000000 -0400
31394 @@ -191,6 +191,7 @@ int dvb_register_device(struct dvb_adapt
31395 const struct dvb_device *template, void *priv, int type)
31396 {
31397 struct dvb_device *dvbdev;
31398 + /* cannot be const */
31399 struct file_operations *dvbdevfops;
31400 struct device *clsdev;
31401 int minor;
31402 diff -urNp linux-2.6.32.42/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.32.42/drivers/media/dvb/dvb-usb/dib0700_core.c
31403 --- linux-2.6.32.42/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-03-27 14:31:47.000000000 -0400
31404 +++ linux-2.6.32.42/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-16 21:46:57.000000000 -0400
31405 @@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb
31406
31407 u8 buf[260];
31408
31409 + pax_track_stack();
31410 +
31411 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
31412 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
31413
31414 diff -urNp linux-2.6.32.42/drivers/media/dvb/frontends/or51211.c linux-2.6.32.42/drivers/media/dvb/frontends/or51211.c
31415 --- linux-2.6.32.42/drivers/media/dvb/frontends/or51211.c 2011-03-27 14:31:47.000000000 -0400
31416 +++ linux-2.6.32.42/drivers/media/dvb/frontends/or51211.c 2011-05-16 21:46:57.000000000 -0400
31417 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
31418 u8 tudata[585];
31419 int i;
31420
31421 + pax_track_stack();
31422 +
31423 dprintk("Firmware is %zd bytes\n",fw->size);
31424
31425 /* Get eprom data */
31426 diff -urNp linux-2.6.32.42/drivers/media/radio/radio-cadet.c linux-2.6.32.42/drivers/media/radio/radio-cadet.c
31427 --- linux-2.6.32.42/drivers/media/radio/radio-cadet.c 2011-03-27 14:31:47.000000000 -0400
31428 +++ linux-2.6.32.42/drivers/media/radio/radio-cadet.c 2011-04-17 15:56:46.000000000 -0400
31429 @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
31430 while (i < count && dev->rdsin != dev->rdsout)
31431 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
31432
31433 - if (copy_to_user(data, readbuf, i))
31434 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
31435 return -EFAULT;
31436 return i;
31437 }
31438 diff -urNp linux-2.6.32.42/drivers/media/video/cx18/cx18-driver.c linux-2.6.32.42/drivers/media/video/cx18/cx18-driver.c
31439 --- linux-2.6.32.42/drivers/media/video/cx18/cx18-driver.c 2011-03-27 14:31:47.000000000 -0400
31440 +++ linux-2.6.32.42/drivers/media/video/cx18/cx18-driver.c 2011-05-16 21:46:57.000000000 -0400
31441 @@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl
31442
31443 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
31444
31445 -static atomic_t cx18_instance = ATOMIC_INIT(0);
31446 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
31447
31448 /* Parameter declarations */
31449 static int cardtype[CX18_MAX_CARDS];
31450 @@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
31451 struct i2c_client c;
31452 u8 eedata[256];
31453
31454 + pax_track_stack();
31455 +
31456 memset(&c, 0, sizeof(c));
31457 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
31458 c.adapter = &cx->i2c_adap[0];
31459 @@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct p
31460 struct cx18 *cx;
31461
31462 /* FIXME - module parameter arrays constrain max instances */
31463 - i = atomic_inc_return(&cx18_instance) - 1;
31464 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
31465 if (i >= CX18_MAX_CARDS) {
31466 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
31467 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
31468 diff -urNp linux-2.6.32.42/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.32.42/drivers/media/video/ivtv/ivtv-driver.c
31469 --- linux-2.6.32.42/drivers/media/video/ivtv/ivtv-driver.c 2011-03-27 14:31:47.000000000 -0400
31470 +++ linux-2.6.32.42/drivers/media/video/ivtv/ivtv-driver.c 2011-05-04 17:56:28.000000000 -0400
31471 @@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl
31472 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
31473
31474 /* ivtv instance counter */
31475 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
31476 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
31477
31478 /* Parameter declarations */
31479 static int cardtype[IVTV_MAX_CARDS];
31480 diff -urNp linux-2.6.32.42/drivers/media/video/omap24xxcam.c linux-2.6.32.42/drivers/media/video/omap24xxcam.c
31481 --- linux-2.6.32.42/drivers/media/video/omap24xxcam.c 2011-03-27 14:31:47.000000000 -0400
31482 +++ linux-2.6.32.42/drivers/media/video/omap24xxcam.c 2011-05-04 17:56:28.000000000 -0400
31483 @@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(str
31484 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
31485
31486 do_gettimeofday(&vb->ts);
31487 - vb->field_count = atomic_add_return(2, &fh->field_count);
31488 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
31489 if (csr & csr_error) {
31490 vb->state = VIDEOBUF_ERROR;
31491 if (!atomic_read(&fh->cam->in_reset)) {
31492 diff -urNp linux-2.6.32.42/drivers/media/video/omap24xxcam.h linux-2.6.32.42/drivers/media/video/omap24xxcam.h
31493 --- linux-2.6.32.42/drivers/media/video/omap24xxcam.h 2011-03-27 14:31:47.000000000 -0400
31494 +++ linux-2.6.32.42/drivers/media/video/omap24xxcam.h 2011-05-04 17:56:28.000000000 -0400
31495 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
31496 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
31497 struct videobuf_queue vbq;
31498 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
31499 - atomic_t field_count; /* field counter for videobuf_buffer */
31500 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
31501 /* accessing cam here doesn't need serialisation: it's constant */
31502 struct omap24xxcam_device *cam;
31503 };
31504 diff -urNp linux-2.6.32.42/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.32.42/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
31505 --- linux-2.6.32.42/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-03-27 14:31:47.000000000 -0400
31506 +++ linux-2.6.32.42/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-16 21:46:57.000000000 -0400
31507 @@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
31508 u8 *eeprom;
31509 struct tveeprom tvdata;
31510
31511 + pax_track_stack();
31512 +
31513 memset(&tvdata,0,sizeof(tvdata));
31514
31515 eeprom = pvr2_eeprom_fetch(hdw);
31516 diff -urNp linux-2.6.32.42/drivers/media/video/saa7134/saa6752hs.c linux-2.6.32.42/drivers/media/video/saa7134/saa6752hs.c
31517 --- linux-2.6.32.42/drivers/media/video/saa7134/saa6752hs.c 2011-03-27 14:31:47.000000000 -0400
31518 +++ linux-2.6.32.42/drivers/media/video/saa7134/saa6752hs.c 2011-05-16 21:46:57.000000000 -0400
31519 @@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_su
31520 unsigned char localPAT[256];
31521 unsigned char localPMT[256];
31522
31523 + pax_track_stack();
31524 +
31525 /* Set video format - must be done first as it resets other settings */
31526 set_reg8(client, 0x41, h->video_format);
31527
31528 diff -urNp linux-2.6.32.42/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.32.42/drivers/media/video/saa7164/saa7164-cmd.c
31529 --- linux-2.6.32.42/drivers/media/video/saa7164/saa7164-cmd.c 2011-03-27 14:31:47.000000000 -0400
31530 +++ linux-2.6.32.42/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-16 21:46:57.000000000 -0400
31531 @@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_d
31532 wait_queue_head_t *q = 0;
31533 dprintk(DBGLVL_CMD, "%s()\n", __func__);
31534
31535 + pax_track_stack();
31536 +
31537 /* While any outstand message on the bus exists... */
31538 do {
31539
31540 @@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
31541 u8 tmp[512];
31542 dprintk(DBGLVL_CMD, "%s()\n", __func__);
31543
31544 + pax_track_stack();
31545 +
31546 while (loop) {
31547
31548 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
31549 diff -urNp linux-2.6.32.42/drivers/media/video/usbvideo/konicawc.c linux-2.6.32.42/drivers/media/video/usbvideo/konicawc.c
31550 --- linux-2.6.32.42/drivers/media/video/usbvideo/konicawc.c 2011-03-27 14:31:47.000000000 -0400
31551 +++ linux-2.6.32.42/drivers/media/video/usbvideo/konicawc.c 2011-04-17 15:56:46.000000000 -0400
31552 @@ -225,7 +225,7 @@ static void konicawc_register_input(stru
31553 int error;
31554
31555 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
31556 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31557 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31558
31559 cam->input = input_dev = input_allocate_device();
31560 if (!input_dev) {
31561 diff -urNp linux-2.6.32.42/drivers/media/video/usbvideo/quickcam_messenger.c linux-2.6.32.42/drivers/media/video/usbvideo/quickcam_messenger.c
31562 --- linux-2.6.32.42/drivers/media/video/usbvideo/quickcam_messenger.c 2011-03-27 14:31:47.000000000 -0400
31563 +++ linux-2.6.32.42/drivers/media/video/usbvideo/quickcam_messenger.c 2011-04-17 15:56:46.000000000 -0400
31564 @@ -89,7 +89,7 @@ static void qcm_register_input(struct qc
31565 int error;
31566
31567 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
31568 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31569 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31570
31571 cam->input = input_dev = input_allocate_device();
31572 if (!input_dev) {
31573 diff -urNp linux-2.6.32.42/drivers/media/video/usbvision/usbvision-core.c linux-2.6.32.42/drivers/media/video/usbvision/usbvision-core.c
31574 --- linux-2.6.32.42/drivers/media/video/usbvision/usbvision-core.c 2011-03-27 14:31:47.000000000 -0400
31575 +++ linux-2.6.32.42/drivers/media/video/usbvision/usbvision-core.c 2011-05-16 21:46:57.000000000 -0400
31576 @@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_c
31577 unsigned char rv, gv, bv;
31578 static unsigned char *Y, *U, *V;
31579
31580 + pax_track_stack();
31581 +
31582 frame = usbvision->curFrame;
31583 imageSize = frame->frmwidth * frame->frmheight;
31584 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
31585 diff -urNp linux-2.6.32.42/drivers/media/video/v4l2-device.c linux-2.6.32.42/drivers/media/video/v4l2-device.c
31586 --- linux-2.6.32.42/drivers/media/video/v4l2-device.c 2011-03-27 14:31:47.000000000 -0400
31587 +++ linux-2.6.32.42/drivers/media/video/v4l2-device.c 2011-05-04 17:56:28.000000000 -0400
31588 @@ -50,9 +50,9 @@ int v4l2_device_register(struct device *
31589 EXPORT_SYMBOL_GPL(v4l2_device_register);
31590
31591 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
31592 - atomic_t *instance)
31593 + atomic_unchecked_t *instance)
31594 {
31595 - int num = atomic_inc_return(instance) - 1;
31596 + int num = atomic_inc_return_unchecked(instance) - 1;
31597 int len = strlen(basename);
31598
31599 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
31600 diff -urNp linux-2.6.32.42/drivers/media/video/videobuf-dma-sg.c linux-2.6.32.42/drivers/media/video/videobuf-dma-sg.c
31601 --- linux-2.6.32.42/drivers/media/video/videobuf-dma-sg.c 2011-03-27 14:31:47.000000000 -0400
31602 +++ linux-2.6.32.42/drivers/media/video/videobuf-dma-sg.c 2011-05-16 21:46:57.000000000 -0400
31603 @@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
31604 {
31605 struct videobuf_queue q;
31606
31607 + pax_track_stack();
31608 +
31609 /* Required to make generic handler to call __videobuf_alloc */
31610 q.int_ops = &sg_ops;
31611
31612 diff -urNp linux-2.6.32.42/drivers/message/fusion/mptbase.c linux-2.6.32.42/drivers/message/fusion/mptbase.c
31613 --- linux-2.6.32.42/drivers/message/fusion/mptbase.c 2011-03-27 14:31:47.000000000 -0400
31614 +++ linux-2.6.32.42/drivers/message/fusion/mptbase.c 2011-04-17 15:56:46.000000000 -0400
31615 @@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **s
31616 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
31617 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
31618
31619 +#ifdef CONFIG_GRKERNSEC_HIDESYM
31620 + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31621 + NULL, NULL);
31622 +#else
31623 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31624 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
31625 +#endif
31626 +
31627 /*
31628 * Rounding UP to nearest 4-kB boundary here...
31629 */
31630 diff -urNp linux-2.6.32.42/drivers/message/fusion/mptsas.c linux-2.6.32.42/drivers/message/fusion/mptsas.c
31631 --- linux-2.6.32.42/drivers/message/fusion/mptsas.c 2011-03-27 14:31:47.000000000 -0400
31632 +++ linux-2.6.32.42/drivers/message/fusion/mptsas.c 2011-04-17 15:56:46.000000000 -0400
31633 @@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devin
31634 return 0;
31635 }
31636
31637 +static inline void
31638 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31639 +{
31640 + if (phy_info->port_details) {
31641 + phy_info->port_details->rphy = rphy;
31642 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31643 + ioc->name, rphy));
31644 + }
31645 +
31646 + if (rphy) {
31647 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31648 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31649 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31650 + ioc->name, rphy, rphy->dev.release));
31651 + }
31652 +}
31653 +
31654 /* no mutex */
31655 static void
31656 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
31657 @@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
31658 return NULL;
31659 }
31660
31661 -static inline void
31662 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31663 -{
31664 - if (phy_info->port_details) {
31665 - phy_info->port_details->rphy = rphy;
31666 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31667 - ioc->name, rphy));
31668 - }
31669 -
31670 - if (rphy) {
31671 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31672 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31673 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31674 - ioc->name, rphy, rphy->dev.release));
31675 - }
31676 -}
31677 -
31678 static inline struct sas_port *
31679 mptsas_get_port(struct mptsas_phyinfo *phy_info)
31680 {
31681 diff -urNp linux-2.6.32.42/drivers/message/fusion/mptscsih.c linux-2.6.32.42/drivers/message/fusion/mptscsih.c
31682 --- linux-2.6.32.42/drivers/message/fusion/mptscsih.c 2011-03-27 14:31:47.000000000 -0400
31683 +++ linux-2.6.32.42/drivers/message/fusion/mptscsih.c 2011-04-17 15:56:46.000000000 -0400
31684 @@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
31685
31686 h = shost_priv(SChost);
31687
31688 - if (h) {
31689 - if (h->info_kbuf == NULL)
31690 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31691 - return h->info_kbuf;
31692 - h->info_kbuf[0] = '\0';
31693 + if (!h)
31694 + return NULL;
31695
31696 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31697 - h->info_kbuf[size-1] = '\0';
31698 - }
31699 + if (h->info_kbuf == NULL)
31700 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31701 + return h->info_kbuf;
31702 + h->info_kbuf[0] = '\0';
31703 +
31704 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31705 + h->info_kbuf[size-1] = '\0';
31706
31707 return h->info_kbuf;
31708 }
31709 diff -urNp linux-2.6.32.42/drivers/message/i2o/i2o_config.c linux-2.6.32.42/drivers/message/i2o/i2o_config.c
31710 --- linux-2.6.32.42/drivers/message/i2o/i2o_config.c 2011-03-27 14:31:47.000000000 -0400
31711 +++ linux-2.6.32.42/drivers/message/i2o/i2o_config.c 2011-05-16 21:46:57.000000000 -0400
31712 @@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned lon
31713 struct i2o_message *msg;
31714 unsigned int iop;
31715
31716 + pax_track_stack();
31717 +
31718 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
31719 return -EFAULT;
31720
31721 diff -urNp linux-2.6.32.42/drivers/message/i2o/i2o_proc.c linux-2.6.32.42/drivers/message/i2o/i2o_proc.c
31722 --- linux-2.6.32.42/drivers/message/i2o/i2o_proc.c 2011-03-27 14:31:47.000000000 -0400
31723 +++ linux-2.6.32.42/drivers/message/i2o/i2o_proc.c 2011-04-17 15:56:46.000000000 -0400
31724 @@ -259,13 +259,6 @@ static char *scsi_devices[] = {
31725 "Array Controller Device"
31726 };
31727
31728 -static char *chtostr(u8 * chars, int n)
31729 -{
31730 - char tmp[256];
31731 - tmp[0] = 0;
31732 - return strncat(tmp, (char *)chars, n);
31733 -}
31734 -
31735 static int i2o_report_query_status(struct seq_file *seq, int block_status,
31736 char *group)
31737 {
31738 @@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct
31739
31740 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
31741 seq_printf(seq, "%-#8x", ddm_table.module_id);
31742 - seq_printf(seq, "%-29s",
31743 - chtostr(ddm_table.module_name_version, 28));
31744 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
31745 seq_printf(seq, "%9d ", ddm_table.data_size);
31746 seq_printf(seq, "%8d", ddm_table.code_size);
31747
31748 @@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(s
31749
31750 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
31751 seq_printf(seq, "%-#8x", dst->module_id);
31752 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
31753 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
31754 + seq_printf(seq, "%-.28s", dst->module_name_version);
31755 + seq_printf(seq, "%-.8s", dst->date);
31756 seq_printf(seq, "%8d ", dst->module_size);
31757 seq_printf(seq, "%8d ", dst->mpb_size);
31758 seq_printf(seq, "0x%04x", dst->module_flags);
31759 @@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(str
31760 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
31761 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
31762 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
31763 - seq_printf(seq, "Vendor info : %s\n",
31764 - chtostr((u8 *) (work32 + 2), 16));
31765 - seq_printf(seq, "Product info : %s\n",
31766 - chtostr((u8 *) (work32 + 6), 16));
31767 - seq_printf(seq, "Description : %s\n",
31768 - chtostr((u8 *) (work32 + 10), 16));
31769 - seq_printf(seq, "Product rev. : %s\n",
31770 - chtostr((u8 *) (work32 + 14), 8));
31771 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
31772 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
31773 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
31774 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
31775
31776 seq_printf(seq, "Serial number : ");
31777 print_serial_number(seq, (u8 *) (work32 + 16),
31778 @@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(str
31779 }
31780
31781 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
31782 - seq_printf(seq, "Module name : %s\n",
31783 - chtostr(result.module_name, 24));
31784 - seq_printf(seq, "Module revision : %s\n",
31785 - chtostr(result.module_rev, 8));
31786 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
31787 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
31788
31789 seq_printf(seq, "Serial number : ");
31790 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
31791 @@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq
31792 return 0;
31793 }
31794
31795 - seq_printf(seq, "Device name : %s\n",
31796 - chtostr(result.device_name, 64));
31797 - seq_printf(seq, "Service name : %s\n",
31798 - chtostr(result.service_name, 64));
31799 - seq_printf(seq, "Physical name : %s\n",
31800 - chtostr(result.physical_location, 64));
31801 - seq_printf(seq, "Instance number : %s\n",
31802 - chtostr(result.instance_number, 4));
31803 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
31804 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
31805 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
31806 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
31807
31808 return 0;
31809 }
31810 diff -urNp linux-2.6.32.42/drivers/message/i2o/iop.c linux-2.6.32.42/drivers/message/i2o/iop.c
31811 --- linux-2.6.32.42/drivers/message/i2o/iop.c 2011-03-27 14:31:47.000000000 -0400
31812 +++ linux-2.6.32.42/drivers/message/i2o/iop.c 2011-05-04 17:56:28.000000000 -0400
31813 @@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
31814
31815 spin_lock_irqsave(&c->context_list_lock, flags);
31816
31817 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
31818 - atomic_inc(&c->context_list_counter);
31819 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
31820 + atomic_inc_unchecked(&c->context_list_counter);
31821
31822 - entry->context = atomic_read(&c->context_list_counter);
31823 + entry->context = atomic_read_unchecked(&c->context_list_counter);
31824
31825 list_add(&entry->list, &c->context_list);
31826
31827 @@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(voi
31828
31829 #if BITS_PER_LONG == 64
31830 spin_lock_init(&c->context_list_lock);
31831 - atomic_set(&c->context_list_counter, 0);
31832 + atomic_set_unchecked(&c->context_list_counter, 0);
31833 INIT_LIST_HEAD(&c->context_list);
31834 #endif
31835
31836 diff -urNp linux-2.6.32.42/drivers/mfd/wm8350-i2c.c linux-2.6.32.42/drivers/mfd/wm8350-i2c.c
31837 --- linux-2.6.32.42/drivers/mfd/wm8350-i2c.c 2011-03-27 14:31:47.000000000 -0400
31838 +++ linux-2.6.32.42/drivers/mfd/wm8350-i2c.c 2011-05-16 21:46:57.000000000 -0400
31839 @@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struc
31840 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
31841 int ret;
31842
31843 + pax_track_stack();
31844 +
31845 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
31846 return -EINVAL;
31847
31848 diff -urNp linux-2.6.32.42/drivers/misc/kgdbts.c linux-2.6.32.42/drivers/misc/kgdbts.c
31849 --- linux-2.6.32.42/drivers/misc/kgdbts.c 2011-03-27 14:31:47.000000000 -0400
31850 +++ linux-2.6.32.42/drivers/misc/kgdbts.c 2011-04-17 15:56:46.000000000 -0400
31851 @@ -118,7 +118,7 @@
31852 } while (0)
31853 #define MAX_CONFIG_LEN 40
31854
31855 -static struct kgdb_io kgdbts_io_ops;
31856 +static const struct kgdb_io kgdbts_io_ops;
31857 static char get_buf[BUFMAX];
31858 static int get_buf_cnt;
31859 static char put_buf[BUFMAX];
31860 @@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void
31861 module_put(THIS_MODULE);
31862 }
31863
31864 -static struct kgdb_io kgdbts_io_ops = {
31865 +static const struct kgdb_io kgdbts_io_ops = {
31866 .name = "kgdbts",
31867 .read_char = kgdbts_get_char,
31868 .write_char = kgdbts_put_char,
31869 diff -urNp linux-2.6.32.42/drivers/misc/sgi-gru/gruhandles.c linux-2.6.32.42/drivers/misc/sgi-gru/gruhandles.c
31870 --- linux-2.6.32.42/drivers/misc/sgi-gru/gruhandles.c 2011-03-27 14:31:47.000000000 -0400
31871 +++ linux-2.6.32.42/drivers/misc/sgi-gru/gruhandles.c 2011-04-17 15:56:46.000000000 -0400
31872 @@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistic
31873
31874 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
31875 {
31876 - atomic_long_inc(&mcs_op_statistics[op].count);
31877 - atomic_long_add(clks, &mcs_op_statistics[op].total);
31878 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
31879 + atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
31880 if (mcs_op_statistics[op].max < clks)
31881 mcs_op_statistics[op].max = clks;
31882 }
31883 diff -urNp linux-2.6.32.42/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.32.42/drivers/misc/sgi-gru/gruprocfs.c
31884 --- linux-2.6.32.42/drivers/misc/sgi-gru/gruprocfs.c 2011-03-27 14:31:47.000000000 -0400
31885 +++ linux-2.6.32.42/drivers/misc/sgi-gru/gruprocfs.c 2011-04-17 15:56:46.000000000 -0400
31886 @@ -32,9 +32,9 @@
31887
31888 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
31889
31890 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
31891 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
31892 {
31893 - unsigned long val = atomic_long_read(v);
31894 + unsigned long val = atomic_long_read_unchecked(v);
31895
31896 if (val)
31897 seq_printf(s, "%16lu %s\n", val, id);
31898 @@ -136,8 +136,8 @@ static int mcs_statistics_show(struct se
31899 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
31900
31901 for (op = 0; op < mcsop_last; op++) {
31902 - count = atomic_long_read(&mcs_op_statistics[op].count);
31903 - total = atomic_long_read(&mcs_op_statistics[op].total);
31904 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
31905 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
31906 max = mcs_op_statistics[op].max;
31907 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
31908 count ? total / count : 0, max);
31909 diff -urNp linux-2.6.32.42/drivers/misc/sgi-gru/grutables.h linux-2.6.32.42/drivers/misc/sgi-gru/grutables.h
31910 --- linux-2.6.32.42/drivers/misc/sgi-gru/grutables.h 2011-03-27 14:31:47.000000000 -0400
31911 +++ linux-2.6.32.42/drivers/misc/sgi-gru/grutables.h 2011-04-17 15:56:46.000000000 -0400
31912 @@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
31913 * GRU statistics.
31914 */
31915 struct gru_stats_s {
31916 - atomic_long_t vdata_alloc;
31917 - atomic_long_t vdata_free;
31918 - atomic_long_t gts_alloc;
31919 - atomic_long_t gts_free;
31920 - atomic_long_t vdata_double_alloc;
31921 - atomic_long_t gts_double_allocate;
31922 - atomic_long_t assign_context;
31923 - atomic_long_t assign_context_failed;
31924 - atomic_long_t free_context;
31925 - atomic_long_t load_user_context;
31926 - atomic_long_t load_kernel_context;
31927 - atomic_long_t lock_kernel_context;
31928 - atomic_long_t unlock_kernel_context;
31929 - atomic_long_t steal_user_context;
31930 - atomic_long_t steal_kernel_context;
31931 - atomic_long_t steal_context_failed;
31932 - atomic_long_t nopfn;
31933 - atomic_long_t break_cow;
31934 - atomic_long_t asid_new;
31935 - atomic_long_t asid_next;
31936 - atomic_long_t asid_wrap;
31937 - atomic_long_t asid_reuse;
31938 - atomic_long_t intr;
31939 - atomic_long_t intr_mm_lock_failed;
31940 - atomic_long_t call_os;
31941 - atomic_long_t call_os_offnode_reference;
31942 - atomic_long_t call_os_check_for_bug;
31943 - atomic_long_t call_os_wait_queue;
31944 - atomic_long_t user_flush_tlb;
31945 - atomic_long_t user_unload_context;
31946 - atomic_long_t user_exception;
31947 - atomic_long_t set_context_option;
31948 - atomic_long_t migrate_check;
31949 - atomic_long_t migrated_retarget;
31950 - atomic_long_t migrated_unload;
31951 - atomic_long_t migrated_unload_delay;
31952 - atomic_long_t migrated_nopfn_retarget;
31953 - atomic_long_t migrated_nopfn_unload;
31954 - atomic_long_t tlb_dropin;
31955 - atomic_long_t tlb_dropin_fail_no_asid;
31956 - atomic_long_t tlb_dropin_fail_upm;
31957 - atomic_long_t tlb_dropin_fail_invalid;
31958 - atomic_long_t tlb_dropin_fail_range_active;
31959 - atomic_long_t tlb_dropin_fail_idle;
31960 - atomic_long_t tlb_dropin_fail_fmm;
31961 - atomic_long_t tlb_dropin_fail_no_exception;
31962 - atomic_long_t tlb_dropin_fail_no_exception_war;
31963 - atomic_long_t tfh_stale_on_fault;
31964 - atomic_long_t mmu_invalidate_range;
31965 - atomic_long_t mmu_invalidate_page;
31966 - atomic_long_t mmu_clear_flush_young;
31967 - atomic_long_t flush_tlb;
31968 - atomic_long_t flush_tlb_gru;
31969 - atomic_long_t flush_tlb_gru_tgh;
31970 - atomic_long_t flush_tlb_gru_zero_asid;
31971 -
31972 - atomic_long_t copy_gpa;
31973 -
31974 - atomic_long_t mesq_receive;
31975 - atomic_long_t mesq_receive_none;
31976 - atomic_long_t mesq_send;
31977 - atomic_long_t mesq_send_failed;
31978 - atomic_long_t mesq_noop;
31979 - atomic_long_t mesq_send_unexpected_error;
31980 - atomic_long_t mesq_send_lb_overflow;
31981 - atomic_long_t mesq_send_qlimit_reached;
31982 - atomic_long_t mesq_send_amo_nacked;
31983 - atomic_long_t mesq_send_put_nacked;
31984 - atomic_long_t mesq_qf_not_full;
31985 - atomic_long_t mesq_qf_locked;
31986 - atomic_long_t mesq_qf_noop_not_full;
31987 - atomic_long_t mesq_qf_switch_head_failed;
31988 - atomic_long_t mesq_qf_unexpected_error;
31989 - atomic_long_t mesq_noop_unexpected_error;
31990 - atomic_long_t mesq_noop_lb_overflow;
31991 - atomic_long_t mesq_noop_qlimit_reached;
31992 - atomic_long_t mesq_noop_amo_nacked;
31993 - atomic_long_t mesq_noop_put_nacked;
31994 + atomic_long_unchecked_t vdata_alloc;
31995 + atomic_long_unchecked_t vdata_free;
31996 + atomic_long_unchecked_t gts_alloc;
31997 + atomic_long_unchecked_t gts_free;
31998 + atomic_long_unchecked_t vdata_double_alloc;
31999 + atomic_long_unchecked_t gts_double_allocate;
32000 + atomic_long_unchecked_t assign_context;
32001 + atomic_long_unchecked_t assign_context_failed;
32002 + atomic_long_unchecked_t free_context;
32003 + atomic_long_unchecked_t load_user_context;
32004 + atomic_long_unchecked_t load_kernel_context;
32005 + atomic_long_unchecked_t lock_kernel_context;
32006 + atomic_long_unchecked_t unlock_kernel_context;
32007 + atomic_long_unchecked_t steal_user_context;
32008 + atomic_long_unchecked_t steal_kernel_context;
32009 + atomic_long_unchecked_t steal_context_failed;
32010 + atomic_long_unchecked_t nopfn;
32011 + atomic_long_unchecked_t break_cow;
32012 + atomic_long_unchecked_t asid_new;
32013 + atomic_long_unchecked_t asid_next;
32014 + atomic_long_unchecked_t asid_wrap;
32015 + atomic_long_unchecked_t asid_reuse;
32016 + atomic_long_unchecked_t intr;
32017 + atomic_long_unchecked_t intr_mm_lock_failed;
32018 + atomic_long_unchecked_t call_os;
32019 + atomic_long_unchecked_t call_os_offnode_reference;
32020 + atomic_long_unchecked_t call_os_check_for_bug;
32021 + atomic_long_unchecked_t call_os_wait_queue;
32022 + atomic_long_unchecked_t user_flush_tlb;
32023 + atomic_long_unchecked_t user_unload_context;
32024 + atomic_long_unchecked_t user_exception;
32025 + atomic_long_unchecked_t set_context_option;
32026 + atomic_long_unchecked_t migrate_check;
32027 + atomic_long_unchecked_t migrated_retarget;
32028 + atomic_long_unchecked_t migrated_unload;
32029 + atomic_long_unchecked_t migrated_unload_delay;
32030 + atomic_long_unchecked_t migrated_nopfn_retarget;
32031 + atomic_long_unchecked_t migrated_nopfn_unload;
32032 + atomic_long_unchecked_t tlb_dropin;
32033 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
32034 + atomic_long_unchecked_t tlb_dropin_fail_upm;
32035 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
32036 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
32037 + atomic_long_unchecked_t tlb_dropin_fail_idle;
32038 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
32039 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
32040 + atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
32041 + atomic_long_unchecked_t tfh_stale_on_fault;
32042 + atomic_long_unchecked_t mmu_invalidate_range;
32043 + atomic_long_unchecked_t mmu_invalidate_page;
32044 + atomic_long_unchecked_t mmu_clear_flush_young;
32045 + atomic_long_unchecked_t flush_tlb;
32046 + atomic_long_unchecked_t flush_tlb_gru;
32047 + atomic_long_unchecked_t flush_tlb_gru_tgh;
32048 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
32049 +
32050 + atomic_long_unchecked_t copy_gpa;
32051 +
32052 + atomic_long_unchecked_t mesq_receive;
32053 + atomic_long_unchecked_t mesq_receive_none;
32054 + atomic_long_unchecked_t mesq_send;
32055 + atomic_long_unchecked_t mesq_send_failed;
32056 + atomic_long_unchecked_t mesq_noop;
32057 + atomic_long_unchecked_t mesq_send_unexpected_error;
32058 + atomic_long_unchecked_t mesq_send_lb_overflow;
32059 + atomic_long_unchecked_t mesq_send_qlimit_reached;
32060 + atomic_long_unchecked_t mesq_send_amo_nacked;
32061 + atomic_long_unchecked_t mesq_send_put_nacked;
32062 + atomic_long_unchecked_t mesq_qf_not_full;
32063 + atomic_long_unchecked_t mesq_qf_locked;
32064 + atomic_long_unchecked_t mesq_qf_noop_not_full;
32065 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
32066 + atomic_long_unchecked_t mesq_qf_unexpected_error;
32067 + atomic_long_unchecked_t mesq_noop_unexpected_error;
32068 + atomic_long_unchecked_t mesq_noop_lb_overflow;
32069 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
32070 + atomic_long_unchecked_t mesq_noop_amo_nacked;
32071 + atomic_long_unchecked_t mesq_noop_put_nacked;
32072
32073 };
32074
32075 @@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start
32076 cchop_deallocate, tghop_invalidate, mcsop_last};
32077
32078 struct mcs_op_statistic {
32079 - atomic_long_t count;
32080 - atomic_long_t total;
32081 + atomic_long_unchecked_t count;
32082 + atomic_long_unchecked_t total;
32083 unsigned long max;
32084 };
32085
32086 @@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_st
32087
32088 #define STAT(id) do { \
32089 if (gru_options & OPT_STATS) \
32090 - atomic_long_inc(&gru_stats.id); \
32091 + atomic_long_inc_unchecked(&gru_stats.id); \
32092 } while (0)
32093
32094 #ifdef CONFIG_SGI_GRU_DEBUG
32095 diff -urNp linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0001.c
32096 --- linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0001.c 2011-03-27 14:31:47.000000000 -0400
32097 +++ linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-16 21:46:57.000000000 -0400
32098 @@ -743,6 +743,8 @@ static int chip_ready (struct map_info *
32099 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
32100 unsigned long timeo = jiffies + HZ;
32101
32102 + pax_track_stack();
32103 +
32104 /* Prevent setting state FL_SYNCING for chip in suspended state. */
32105 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
32106 goto sleep;
32107 @@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(stru
32108 unsigned long initial_adr;
32109 int initial_len = len;
32110
32111 + pax_track_stack();
32112 +
32113 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
32114 adr += chip->start;
32115 initial_adr = adr;
32116 @@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(st
32117 int retries = 3;
32118 int ret;
32119
32120 + pax_track_stack();
32121 +
32122 adr += chip->start;
32123
32124 retry:
32125 diff -urNp linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0020.c
32126 --- linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0020.c 2011-03-27 14:31:47.000000000 -0400
32127 +++ linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-16 21:46:57.000000000 -0400
32128 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
32129 unsigned long cmd_addr;
32130 struct cfi_private *cfi = map->fldrv_priv;
32131
32132 + pax_track_stack();
32133 +
32134 adr += chip->start;
32135
32136 /* Ensure cmd read/writes are aligned. */
32137 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
32138 DECLARE_WAITQUEUE(wait, current);
32139 int wbufsize, z;
32140
32141 + pax_track_stack();
32142 +
32143 /* M58LW064A requires bus alignment for buffer wriets -- saw */
32144 if (adr & (map_bankwidth(map)-1))
32145 return -EINVAL;
32146 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
32147 DECLARE_WAITQUEUE(wait, current);
32148 int ret = 0;
32149
32150 + pax_track_stack();
32151 +
32152 adr += chip->start;
32153
32154 /* Let's determine this according to the interleave only once */
32155 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
32156 unsigned long timeo = jiffies + HZ;
32157 DECLARE_WAITQUEUE(wait, current);
32158
32159 + pax_track_stack();
32160 +
32161 adr += chip->start;
32162
32163 /* Let's determine this according to the interleave only once */
32164 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
32165 unsigned long timeo = jiffies + HZ;
32166 DECLARE_WAITQUEUE(wait, current);
32167
32168 + pax_track_stack();
32169 +
32170 adr += chip->start;
32171
32172 /* Let's determine this according to the interleave only once */
32173 diff -urNp linux-2.6.32.42/drivers/mtd/devices/doc2000.c linux-2.6.32.42/drivers/mtd/devices/doc2000.c
32174 --- linux-2.6.32.42/drivers/mtd/devices/doc2000.c 2011-03-27 14:31:47.000000000 -0400
32175 +++ linux-2.6.32.42/drivers/mtd/devices/doc2000.c 2011-04-17 15:56:46.000000000 -0400
32176 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
32177
32178 /* The ECC will not be calculated correctly if less than 512 is written */
32179 /* DBB-
32180 - if (len != 0x200 && eccbuf)
32181 + if (len != 0x200)
32182 printk(KERN_WARNING
32183 "ECC needs a full sector write (adr: %lx size %lx)\n",
32184 (long) to, (long) len);
32185 diff -urNp linux-2.6.32.42/drivers/mtd/devices/doc2001.c linux-2.6.32.42/drivers/mtd/devices/doc2001.c
32186 --- linux-2.6.32.42/drivers/mtd/devices/doc2001.c 2011-03-27 14:31:47.000000000 -0400
32187 +++ linux-2.6.32.42/drivers/mtd/devices/doc2001.c 2011-04-17 15:56:46.000000000 -0400
32188 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
32189 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
32190
32191 /* Don't allow read past end of device */
32192 - if (from >= this->totlen)
32193 + if (from >= this->totlen || !len)
32194 return -EINVAL;
32195
32196 /* Don't allow a single read to cross a 512-byte block boundary */
32197 diff -urNp linux-2.6.32.42/drivers/mtd/ftl.c linux-2.6.32.42/drivers/mtd/ftl.c
32198 --- linux-2.6.32.42/drivers/mtd/ftl.c 2011-03-27 14:31:47.000000000 -0400
32199 +++ linux-2.6.32.42/drivers/mtd/ftl.c 2011-05-16 21:46:57.000000000 -0400
32200 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
32201 loff_t offset;
32202 uint16_t srcunitswap = cpu_to_le16(srcunit);
32203
32204 + pax_track_stack();
32205 +
32206 eun = &part->EUNInfo[srcunit];
32207 xfer = &part->XferInfo[xferunit];
32208 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
32209 diff -urNp linux-2.6.32.42/drivers/mtd/inftlcore.c linux-2.6.32.42/drivers/mtd/inftlcore.c
32210 --- linux-2.6.32.42/drivers/mtd/inftlcore.c 2011-03-27 14:31:47.000000000 -0400
32211 +++ linux-2.6.32.42/drivers/mtd/inftlcore.c 2011-05-16 21:46:57.000000000 -0400
32212 @@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLr
32213 struct inftl_oob oob;
32214 size_t retlen;
32215
32216 + pax_track_stack();
32217 +
32218 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
32219 "pending=%d)\n", inftl, thisVUC, pendingblock);
32220
32221 diff -urNp linux-2.6.32.42/drivers/mtd/inftlmount.c linux-2.6.32.42/drivers/mtd/inftlmount.c
32222 --- linux-2.6.32.42/drivers/mtd/inftlmount.c 2011-03-27 14:31:47.000000000 -0400
32223 +++ linux-2.6.32.42/drivers/mtd/inftlmount.c 2011-05-16 21:46:57.000000000 -0400
32224 @@ -54,6 +54,8 @@ static int find_boot_record(struct INFTL
32225 struct INFTLPartition *ip;
32226 size_t retlen;
32227
32228 + pax_track_stack();
32229 +
32230 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
32231
32232 /*
32233 diff -urNp linux-2.6.32.42/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.32.42/drivers/mtd/lpddr/qinfo_probe.c
32234 --- linux-2.6.32.42/drivers/mtd/lpddr/qinfo_probe.c 2011-03-27 14:31:47.000000000 -0400
32235 +++ linux-2.6.32.42/drivers/mtd/lpddr/qinfo_probe.c 2011-05-16 21:46:57.000000000 -0400
32236 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
32237 {
32238 map_word pfow_val[4];
32239
32240 + pax_track_stack();
32241 +
32242 /* Check identification string */
32243 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
32244 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
32245 diff -urNp linux-2.6.32.42/drivers/mtd/mtdchar.c linux-2.6.32.42/drivers/mtd/mtdchar.c
32246 --- linux-2.6.32.42/drivers/mtd/mtdchar.c 2011-03-27 14:31:47.000000000 -0400
32247 +++ linux-2.6.32.42/drivers/mtd/mtdchar.c 2011-05-16 21:46:57.000000000 -0400
32248 @@ -460,6 +460,8 @@ static int mtd_ioctl(struct inode *inode
32249 u_long size;
32250 struct mtd_info_user info;
32251
32252 + pax_track_stack();
32253 +
32254 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
32255
32256 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
32257 diff -urNp linux-2.6.32.42/drivers/mtd/nftlcore.c linux-2.6.32.42/drivers/mtd/nftlcore.c
32258 --- linux-2.6.32.42/drivers/mtd/nftlcore.c 2011-03-27 14:31:47.000000000 -0400
32259 +++ linux-2.6.32.42/drivers/mtd/nftlcore.c 2011-05-16 21:46:57.000000000 -0400
32260 @@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLre
32261 int inplace = 1;
32262 size_t retlen;
32263
32264 + pax_track_stack();
32265 +
32266 memset(BlockMap, 0xff, sizeof(BlockMap));
32267 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
32268
32269 diff -urNp linux-2.6.32.42/drivers/mtd/nftlmount.c linux-2.6.32.42/drivers/mtd/nftlmount.c
32270 --- linux-2.6.32.42/drivers/mtd/nftlmount.c 2011-03-27 14:31:47.000000000 -0400
32271 +++ linux-2.6.32.42/drivers/mtd/nftlmount.c 2011-05-18 20:09:37.000000000 -0400
32272 @@ -23,6 +23,7 @@
32273 #include <asm/errno.h>
32274 #include <linux/delay.h>
32275 #include <linux/slab.h>
32276 +#include <linux/sched.h>
32277 #include <linux/mtd/mtd.h>
32278 #include <linux/mtd/nand.h>
32279 #include <linux/mtd/nftl.h>
32280 @@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLr
32281 struct mtd_info *mtd = nftl->mbd.mtd;
32282 unsigned int i;
32283
32284 + pax_track_stack();
32285 +
32286 /* Assume logical EraseSize == physical erasesize for starting the scan.
32287 We'll sort it out later if we find a MediaHeader which says otherwise */
32288 /* Actually, we won't. The new DiskOnChip driver has already scanned
32289 diff -urNp linux-2.6.32.42/drivers/mtd/ubi/build.c linux-2.6.32.42/drivers/mtd/ubi/build.c
32290 --- linux-2.6.32.42/drivers/mtd/ubi/build.c 2011-03-27 14:31:47.000000000 -0400
32291 +++ linux-2.6.32.42/drivers/mtd/ubi/build.c 2011-04-17 15:56:46.000000000 -0400
32292 @@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
32293 static int __init bytes_str_to_int(const char *str)
32294 {
32295 char *endp;
32296 - unsigned long result;
32297 + unsigned long result, scale = 1;
32298
32299 result = simple_strtoul(str, &endp, 0);
32300 if (str == endp || result >= INT_MAX) {
32301 @@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const
32302
32303 switch (*endp) {
32304 case 'G':
32305 - result *= 1024;
32306 + scale *= 1024;
32307 case 'M':
32308 - result *= 1024;
32309 + scale *= 1024;
32310 case 'K':
32311 - result *= 1024;
32312 + scale *= 1024;
32313 if (endp[1] == 'i' && endp[2] == 'B')
32314 endp += 2;
32315 case '\0':
32316 @@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const
32317 return -EINVAL;
32318 }
32319
32320 - return result;
32321 + if ((intoverflow_t)result*scale >= INT_MAX) {
32322 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
32323 + str);
32324 + return -EINVAL;
32325 + }
32326 +
32327 + return result*scale;
32328 }
32329
32330 /**
32331 diff -urNp linux-2.6.32.42/drivers/net/bnx2.c linux-2.6.32.42/drivers/net/bnx2.c
32332 --- linux-2.6.32.42/drivers/net/bnx2.c 2011-03-27 14:31:47.000000000 -0400
32333 +++ linux-2.6.32.42/drivers/net/bnx2.c 2011-05-16 21:46:57.000000000 -0400
32334 @@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
32335 int rc = 0;
32336 u32 magic, csum;
32337
32338 + pax_track_stack();
32339 +
32340 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
32341 goto test_nvram_done;
32342
32343 diff -urNp linux-2.6.32.42/drivers/net/cxgb3/t3_hw.c linux-2.6.32.42/drivers/net/cxgb3/t3_hw.c
32344 --- linux-2.6.32.42/drivers/net/cxgb3/t3_hw.c 2011-03-27 14:31:47.000000000 -0400
32345 +++ linux-2.6.32.42/drivers/net/cxgb3/t3_hw.c 2011-05-16 21:46:57.000000000 -0400
32346 @@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter
32347 int i, addr, ret;
32348 struct t3_vpd vpd;
32349
32350 + pax_track_stack();
32351 +
32352 /*
32353 * Card information is normally at VPD_BASE but some early cards had
32354 * it at 0.
32355 diff -urNp linux-2.6.32.42/drivers/net/e1000e/82571.c linux-2.6.32.42/drivers/net/e1000e/82571.c
32356 --- linux-2.6.32.42/drivers/net/e1000e/82571.c 2011-03-27 14:31:47.000000000 -0400
32357 +++ linux-2.6.32.42/drivers/net/e1000e/82571.c 2011-04-17 15:56:46.000000000 -0400
32358 @@ -212,6 +212,7 @@ static s32 e1000_init_mac_params_82571(s
32359 {
32360 struct e1000_hw *hw = &adapter->hw;
32361 struct e1000_mac_info *mac = &hw->mac;
32362 + /* cannot be const */
32363 struct e1000_mac_operations *func = &mac->ops;
32364 u32 swsm = 0;
32365 u32 swsm2 = 0;
32366 @@ -1656,7 +1657,7 @@ static void e1000_clear_hw_cntrs_82571(s
32367 temp = er32(ICRXDMTC);
32368 }
32369
32370 -static struct e1000_mac_operations e82571_mac_ops = {
32371 +static const struct e1000_mac_operations e82571_mac_ops = {
32372 /* .check_mng_mode: mac type dependent */
32373 /* .check_for_link: media type dependent */
32374 .id_led_init = e1000e_id_led_init,
32375 @@ -1674,7 +1675,7 @@ static struct e1000_mac_operations e8257
32376 .setup_led = e1000e_setup_led_generic,
32377 };
32378
32379 -static struct e1000_phy_operations e82_phy_ops_igp = {
32380 +static const struct e1000_phy_operations e82_phy_ops_igp = {
32381 .acquire_phy = e1000_get_hw_semaphore_82571,
32382 .check_reset_block = e1000e_check_reset_block_generic,
32383 .commit_phy = NULL,
32384 @@ -1691,7 +1692,7 @@ static struct e1000_phy_operations e82_p
32385 .cfg_on_link_up = NULL,
32386 };
32387
32388 -static struct e1000_phy_operations e82_phy_ops_m88 = {
32389 +static const struct e1000_phy_operations e82_phy_ops_m88 = {
32390 .acquire_phy = e1000_get_hw_semaphore_82571,
32391 .check_reset_block = e1000e_check_reset_block_generic,
32392 .commit_phy = e1000e_phy_sw_reset,
32393 @@ -1708,7 +1709,7 @@ static struct e1000_phy_operations e82_p
32394 .cfg_on_link_up = NULL,
32395 };
32396
32397 -static struct e1000_phy_operations e82_phy_ops_bm = {
32398 +static const struct e1000_phy_operations e82_phy_ops_bm = {
32399 .acquire_phy = e1000_get_hw_semaphore_82571,
32400 .check_reset_block = e1000e_check_reset_block_generic,
32401 .commit_phy = e1000e_phy_sw_reset,
32402 @@ -1725,7 +1726,7 @@ static struct e1000_phy_operations e82_p
32403 .cfg_on_link_up = NULL,
32404 };
32405
32406 -static struct e1000_nvm_operations e82571_nvm_ops = {
32407 +static const struct e1000_nvm_operations e82571_nvm_ops = {
32408 .acquire_nvm = e1000_acquire_nvm_82571,
32409 .read_nvm = e1000e_read_nvm_eerd,
32410 .release_nvm = e1000_release_nvm_82571,
32411 diff -urNp linux-2.6.32.42/drivers/net/e1000e/e1000.h linux-2.6.32.42/drivers/net/e1000e/e1000.h
32412 --- linux-2.6.32.42/drivers/net/e1000e/e1000.h 2011-03-27 14:31:47.000000000 -0400
32413 +++ linux-2.6.32.42/drivers/net/e1000e/e1000.h 2011-04-17 15:56:46.000000000 -0400
32414 @@ -375,9 +375,9 @@ struct e1000_info {
32415 u32 pba;
32416 u32 max_hw_frame_size;
32417 s32 (*get_variants)(struct e1000_adapter *);
32418 - struct e1000_mac_operations *mac_ops;
32419 - struct e1000_phy_operations *phy_ops;
32420 - struct e1000_nvm_operations *nvm_ops;
32421 + const struct e1000_mac_operations *mac_ops;
32422 + const struct e1000_phy_operations *phy_ops;
32423 + const struct e1000_nvm_operations *nvm_ops;
32424 };
32425
32426 /* hardware capability, feature, and workaround flags */
32427 diff -urNp linux-2.6.32.42/drivers/net/e1000e/es2lan.c linux-2.6.32.42/drivers/net/e1000e/es2lan.c
32428 --- linux-2.6.32.42/drivers/net/e1000e/es2lan.c 2011-03-27 14:31:47.000000000 -0400
32429 +++ linux-2.6.32.42/drivers/net/e1000e/es2lan.c 2011-04-17 15:56:46.000000000 -0400
32430 @@ -207,6 +207,7 @@ static s32 e1000_init_mac_params_80003es
32431 {
32432 struct e1000_hw *hw = &adapter->hw;
32433 struct e1000_mac_info *mac = &hw->mac;
32434 + /* cannot be const */
32435 struct e1000_mac_operations *func = &mac->ops;
32436
32437 /* Set media type */
32438 @@ -1365,7 +1366,7 @@ static void e1000_clear_hw_cntrs_80003es
32439 temp = er32(ICRXDMTC);
32440 }
32441
32442 -static struct e1000_mac_operations es2_mac_ops = {
32443 +static const struct e1000_mac_operations es2_mac_ops = {
32444 .id_led_init = e1000e_id_led_init,
32445 .check_mng_mode = e1000e_check_mng_mode_generic,
32446 /* check_for_link dependent on media type */
32447 @@ -1383,7 +1384,7 @@ static struct e1000_mac_operations es2_m
32448 .setup_led = e1000e_setup_led_generic,
32449 };
32450
32451 -static struct e1000_phy_operations es2_phy_ops = {
32452 +static const struct e1000_phy_operations es2_phy_ops = {
32453 .acquire_phy = e1000_acquire_phy_80003es2lan,
32454 .check_reset_block = e1000e_check_reset_block_generic,
32455 .commit_phy = e1000e_phy_sw_reset,
32456 @@ -1400,7 +1401,7 @@ static struct e1000_phy_operations es2_p
32457 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
32458 };
32459
32460 -static struct e1000_nvm_operations es2_nvm_ops = {
32461 +static const struct e1000_nvm_operations es2_nvm_ops = {
32462 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
32463 .read_nvm = e1000e_read_nvm_eerd,
32464 .release_nvm = e1000_release_nvm_80003es2lan,
32465 diff -urNp linux-2.6.32.42/drivers/net/e1000e/hw.h linux-2.6.32.42/drivers/net/e1000e/hw.h
32466 --- linux-2.6.32.42/drivers/net/e1000e/hw.h 2011-03-27 14:31:47.000000000 -0400
32467 +++ linux-2.6.32.42/drivers/net/e1000e/hw.h 2011-04-17 15:56:46.000000000 -0400
32468 @@ -756,34 +756,34 @@ struct e1000_mac_operations {
32469
32470 /* Function pointers for the PHY. */
32471 struct e1000_phy_operations {
32472 - s32 (*acquire_phy)(struct e1000_hw *);
32473 - s32 (*check_polarity)(struct e1000_hw *);
32474 - s32 (*check_reset_block)(struct e1000_hw *);
32475 - s32 (*commit_phy)(struct e1000_hw *);
32476 - s32 (*force_speed_duplex)(struct e1000_hw *);
32477 - s32 (*get_cfg_done)(struct e1000_hw *hw);
32478 - s32 (*get_cable_length)(struct e1000_hw *);
32479 - s32 (*get_phy_info)(struct e1000_hw *);
32480 - s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
32481 - s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
32482 - void (*release_phy)(struct e1000_hw *);
32483 - s32 (*reset_phy)(struct e1000_hw *);
32484 - s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
32485 - s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
32486 - s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
32487 - s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
32488 - s32 (*cfg_on_link_up)(struct e1000_hw *);
32489 + s32 (* acquire_phy)(struct e1000_hw *);
32490 + s32 (* check_polarity)(struct e1000_hw *);
32491 + s32 (* check_reset_block)(struct e1000_hw *);
32492 + s32 (* commit_phy)(struct e1000_hw *);
32493 + s32 (* force_speed_duplex)(struct e1000_hw *);
32494 + s32 (* get_cfg_done)(struct e1000_hw *hw);
32495 + s32 (* get_cable_length)(struct e1000_hw *);
32496 + s32 (* get_phy_info)(struct e1000_hw *);
32497 + s32 (* read_phy_reg)(struct e1000_hw *, u32, u16 *);
32498 + s32 (* read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
32499 + void (* release_phy)(struct e1000_hw *);
32500 + s32 (* reset_phy)(struct e1000_hw *);
32501 + s32 (* set_d0_lplu_state)(struct e1000_hw *, bool);
32502 + s32 (* set_d3_lplu_state)(struct e1000_hw *, bool);
32503 + s32 (* write_phy_reg)(struct e1000_hw *, u32, u16);
32504 + s32 (* write_phy_reg_locked)(struct e1000_hw *, u32, u16);
32505 + s32 (* cfg_on_link_up)(struct e1000_hw *);
32506 };
32507
32508 /* Function pointers for the NVM. */
32509 struct e1000_nvm_operations {
32510 - s32 (*acquire_nvm)(struct e1000_hw *);
32511 - s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
32512 - void (*release_nvm)(struct e1000_hw *);
32513 - s32 (*update_nvm)(struct e1000_hw *);
32514 - s32 (*valid_led_default)(struct e1000_hw *, u16 *);
32515 - s32 (*validate_nvm)(struct e1000_hw *);
32516 - s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
32517 + s32 (* const acquire_nvm)(struct e1000_hw *);
32518 + s32 (* const read_nvm)(struct e1000_hw *, u16, u16, u16 *);
32519 + void (* const release_nvm)(struct e1000_hw *);
32520 + s32 (* const update_nvm)(struct e1000_hw *);
32521 + s32 (* const valid_led_default)(struct e1000_hw *, u16 *);
32522 + s32 (* const validate_nvm)(struct e1000_hw *);
32523 + s32 (* const write_nvm)(struct e1000_hw *, u16, u16, u16 *);
32524 };
32525
32526 struct e1000_mac_info {
32527 diff -urNp linux-2.6.32.42/drivers/net/e1000e/ich8lan.c linux-2.6.32.42/drivers/net/e1000e/ich8lan.c
32528 --- linux-2.6.32.42/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:01.000000000 -0400
32529 +++ linux-2.6.32.42/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:32.000000000 -0400
32530 @@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan
32531 }
32532 }
32533
32534 -static struct e1000_mac_operations ich8_mac_ops = {
32535 +static const struct e1000_mac_operations ich8_mac_ops = {
32536 .id_led_init = e1000e_id_led_init,
32537 .check_mng_mode = e1000_check_mng_mode_ich8lan,
32538 .check_for_link = e1000_check_for_copper_link_ich8lan,
32539 @@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_
32540 /* id_led_init dependent on mac type */
32541 };
32542
32543 -static struct e1000_phy_operations ich8_phy_ops = {
32544 +static const struct e1000_phy_operations ich8_phy_ops = {
32545 .acquire_phy = e1000_acquire_swflag_ich8lan,
32546 .check_reset_block = e1000_check_reset_block_ich8lan,
32547 .commit_phy = NULL,
32548 @@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_
32549 .write_phy_reg = e1000e_write_phy_reg_igp,
32550 };
32551
32552 -static struct e1000_nvm_operations ich8_nvm_ops = {
32553 +static const struct e1000_nvm_operations ich8_nvm_ops = {
32554 .acquire_nvm = e1000_acquire_nvm_ich8lan,
32555 .read_nvm = e1000_read_nvm_ich8lan,
32556 .release_nvm = e1000_release_nvm_ich8lan,
32557 diff -urNp linux-2.6.32.42/drivers/net/hamradio/6pack.c linux-2.6.32.42/drivers/net/hamradio/6pack.c
32558 --- linux-2.6.32.42/drivers/net/hamradio/6pack.c 2011-03-27 14:31:47.000000000 -0400
32559 +++ linux-2.6.32.42/drivers/net/hamradio/6pack.c 2011-05-16 21:46:57.000000000 -0400
32560 @@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct t
32561 unsigned char buf[512];
32562 int count1;
32563
32564 + pax_track_stack();
32565 +
32566 if (!count)
32567 return;
32568
32569 diff -urNp linux-2.6.32.42/drivers/net/ibmveth.c linux-2.6.32.42/drivers/net/ibmveth.c
32570 --- linux-2.6.32.42/drivers/net/ibmveth.c 2011-03-27 14:31:47.000000000 -0400
32571 +++ linux-2.6.32.42/drivers/net/ibmveth.c 2011-04-17 15:56:46.000000000 -0400
32572 @@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attr
32573 NULL,
32574 };
32575
32576 -static struct sysfs_ops veth_pool_ops = {
32577 +static const struct sysfs_ops veth_pool_ops = {
32578 .show = veth_pool_show,
32579 .store = veth_pool_store,
32580 };
32581 diff -urNp linux-2.6.32.42/drivers/net/igb/e1000_82575.c linux-2.6.32.42/drivers/net/igb/e1000_82575.c
32582 --- linux-2.6.32.42/drivers/net/igb/e1000_82575.c 2011-03-27 14:31:47.000000000 -0400
32583 +++ linux-2.6.32.42/drivers/net/igb/e1000_82575.c 2011-04-17 15:56:46.000000000 -0400
32584 @@ -1410,7 +1410,7 @@ void igb_vmdq_set_replication_pf(struct
32585 wr32(E1000_VT_CTL, vt_ctl);
32586 }
32587
32588 -static struct e1000_mac_operations e1000_mac_ops_82575 = {
32589 +static const struct e1000_mac_operations e1000_mac_ops_82575 = {
32590 .reset_hw = igb_reset_hw_82575,
32591 .init_hw = igb_init_hw_82575,
32592 .check_for_link = igb_check_for_link_82575,
32593 @@ -1419,13 +1419,13 @@ static struct e1000_mac_operations e1000
32594 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
32595 };
32596
32597 -static struct e1000_phy_operations e1000_phy_ops_82575 = {
32598 +static const struct e1000_phy_operations e1000_phy_ops_82575 = {
32599 .acquire = igb_acquire_phy_82575,
32600 .get_cfg_done = igb_get_cfg_done_82575,
32601 .release = igb_release_phy_82575,
32602 };
32603
32604 -static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
32605 +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
32606 .acquire = igb_acquire_nvm_82575,
32607 .read = igb_read_nvm_eerd,
32608 .release = igb_release_nvm_82575,
32609 diff -urNp linux-2.6.32.42/drivers/net/igb/e1000_hw.h linux-2.6.32.42/drivers/net/igb/e1000_hw.h
32610 --- linux-2.6.32.42/drivers/net/igb/e1000_hw.h 2011-03-27 14:31:47.000000000 -0400
32611 +++ linux-2.6.32.42/drivers/net/igb/e1000_hw.h 2011-04-17 15:56:46.000000000 -0400
32612 @@ -305,17 +305,17 @@ struct e1000_phy_operations {
32613 };
32614
32615 struct e1000_nvm_operations {
32616 - s32 (*acquire)(struct e1000_hw *);
32617 - s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
32618 - void (*release)(struct e1000_hw *);
32619 - s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
32620 + s32 (* const acquire)(struct e1000_hw *);
32621 + s32 (* const read)(struct e1000_hw *, u16, u16, u16 *);
32622 + void (* const release)(struct e1000_hw *);
32623 + s32 (* const write)(struct e1000_hw *, u16, u16, u16 *);
32624 };
32625
32626 struct e1000_info {
32627 s32 (*get_invariants)(struct e1000_hw *);
32628 - struct e1000_mac_operations *mac_ops;
32629 - struct e1000_phy_operations *phy_ops;
32630 - struct e1000_nvm_operations *nvm_ops;
32631 + const struct e1000_mac_operations *mac_ops;
32632 + const struct e1000_phy_operations *phy_ops;
32633 + const struct e1000_nvm_operations *nvm_ops;
32634 };
32635
32636 extern const struct e1000_info e1000_82575_info;
32637 diff -urNp linux-2.6.32.42/drivers/net/iseries_veth.c linux-2.6.32.42/drivers/net/iseries_veth.c
32638 --- linux-2.6.32.42/drivers/net/iseries_veth.c 2011-03-27 14:31:47.000000000 -0400
32639 +++ linux-2.6.32.42/drivers/net/iseries_veth.c 2011-04-17 15:56:46.000000000 -0400
32640 @@ -384,7 +384,7 @@ static struct attribute *veth_cnx_defaul
32641 NULL
32642 };
32643
32644 -static struct sysfs_ops veth_cnx_sysfs_ops = {
32645 +static const struct sysfs_ops veth_cnx_sysfs_ops = {
32646 .show = veth_cnx_attribute_show
32647 };
32648
32649 @@ -441,7 +441,7 @@ static struct attribute *veth_port_defau
32650 NULL
32651 };
32652
32653 -static struct sysfs_ops veth_port_sysfs_ops = {
32654 +static const struct sysfs_ops veth_port_sysfs_ops = {
32655 .show = veth_port_attribute_show
32656 };
32657
32658 diff -urNp linux-2.6.32.42/drivers/net/ixgb/ixgb_main.c linux-2.6.32.42/drivers/net/ixgb/ixgb_main.c
32659 --- linux-2.6.32.42/drivers/net/ixgb/ixgb_main.c 2011-03-27 14:31:47.000000000 -0400
32660 +++ linux-2.6.32.42/drivers/net/ixgb/ixgb_main.c 2011-05-16 21:46:57.000000000 -0400
32661 @@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev
32662 u32 rctl;
32663 int i;
32664
32665 + pax_track_stack();
32666 +
32667 /* Check for Promiscuous and All Multicast modes */
32668
32669 rctl = IXGB_READ_REG(hw, RCTL);
32670 diff -urNp linux-2.6.32.42/drivers/net/ixgb/ixgb_param.c linux-2.6.32.42/drivers/net/ixgb/ixgb_param.c
32671 --- linux-2.6.32.42/drivers/net/ixgb/ixgb_param.c 2011-03-27 14:31:47.000000000 -0400
32672 +++ linux-2.6.32.42/drivers/net/ixgb/ixgb_param.c 2011-05-16 21:46:57.000000000 -0400
32673 @@ -260,6 +260,9 @@ void __devinit
32674 ixgb_check_options(struct ixgb_adapter *adapter)
32675 {
32676 int bd = adapter->bd_number;
32677 +
32678 + pax_track_stack();
32679 +
32680 if (bd >= IXGB_MAX_NIC) {
32681 printk(KERN_NOTICE
32682 "Warning: no configuration for board #%i\n", bd);
32683 diff -urNp linux-2.6.32.42/drivers/net/mlx4/main.c linux-2.6.32.42/drivers/net/mlx4/main.c
32684 --- linux-2.6.32.42/drivers/net/mlx4/main.c 2011-03-27 14:31:47.000000000 -0400
32685 +++ linux-2.6.32.42/drivers/net/mlx4/main.c 2011-05-18 20:09:37.000000000 -0400
32686 @@ -38,6 +38,7 @@
32687 #include <linux/errno.h>
32688 #include <linux/pci.h>
32689 #include <linux/dma-mapping.h>
32690 +#include <linux/sched.h>
32691
32692 #include <linux/mlx4/device.h>
32693 #include <linux/mlx4/doorbell.h>
32694 @@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev
32695 u64 icm_size;
32696 int err;
32697
32698 + pax_track_stack();
32699 +
32700 err = mlx4_QUERY_FW(dev);
32701 if (err) {
32702 if (err == -EACCES)
32703 diff -urNp linux-2.6.32.42/drivers/net/niu.c linux-2.6.32.42/drivers/net/niu.c
32704 --- linux-2.6.32.42/drivers/net/niu.c 2011-05-10 22:12:01.000000000 -0400
32705 +++ linux-2.6.32.42/drivers/net/niu.c 2011-05-16 21:46:57.000000000 -0400
32706 @@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struc
32707 int i, num_irqs, err;
32708 u8 first_ldg;
32709
32710 + pax_track_stack();
32711 +
32712 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
32713 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
32714 ldg_num_map[i] = first_ldg + i;
32715 diff -urNp linux-2.6.32.42/drivers/net/pcnet32.c linux-2.6.32.42/drivers/net/pcnet32.c
32716 --- linux-2.6.32.42/drivers/net/pcnet32.c 2011-03-27 14:31:47.000000000 -0400
32717 +++ linux-2.6.32.42/drivers/net/pcnet32.c 2011-04-17 15:56:46.000000000 -0400
32718 @@ -79,7 +79,7 @@ static int cards_found;
32719 /*
32720 * VLB I/O addresses
32721 */
32722 -static unsigned int pcnet32_portlist[] __initdata =
32723 +static unsigned int pcnet32_portlist[] __devinitdata =
32724 { 0x300, 0x320, 0x340, 0x360, 0 };
32725
32726 static int pcnet32_debug = 0;
32727 diff -urNp linux-2.6.32.42/drivers/net/tg3.h linux-2.6.32.42/drivers/net/tg3.h
32728 --- linux-2.6.32.42/drivers/net/tg3.h 2011-03-27 14:31:47.000000000 -0400
32729 +++ linux-2.6.32.42/drivers/net/tg3.h 2011-04-17 15:56:46.000000000 -0400
32730 @@ -95,6 +95,7 @@
32731 #define CHIPREV_ID_5750_A0 0x4000
32732 #define CHIPREV_ID_5750_A1 0x4001
32733 #define CHIPREV_ID_5750_A3 0x4003
32734 +#define CHIPREV_ID_5750_C1 0x4201
32735 #define CHIPREV_ID_5750_C2 0x4202
32736 #define CHIPREV_ID_5752_A0_HW 0x5000
32737 #define CHIPREV_ID_5752_A0 0x6000
32738 diff -urNp linux-2.6.32.42/drivers/net/tulip/de2104x.c linux-2.6.32.42/drivers/net/tulip/de2104x.c
32739 --- linux-2.6.32.42/drivers/net/tulip/de2104x.c 2011-03-27 14:31:47.000000000 -0400
32740 +++ linux-2.6.32.42/drivers/net/tulip/de2104x.c 2011-05-16 21:46:57.000000000 -0400
32741 @@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_i
32742 struct de_srom_info_leaf *il;
32743 void *bufp;
32744
32745 + pax_track_stack();
32746 +
32747 /* download entire eeprom */
32748 for (i = 0; i < DE_EEPROM_WORDS; i++)
32749 ((__le16 *)ee_data)[i] =
32750 diff -urNp linux-2.6.32.42/drivers/net/tulip/de4x5.c linux-2.6.32.42/drivers/net/tulip/de4x5.c
32751 --- linux-2.6.32.42/drivers/net/tulip/de4x5.c 2011-03-27 14:31:47.000000000 -0400
32752 +++ linux-2.6.32.42/drivers/net/tulip/de4x5.c 2011-04-17 15:56:46.000000000 -0400
32753 @@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, stru
32754 for (i=0; i<ETH_ALEN; i++) {
32755 tmp.addr[i] = dev->dev_addr[i];
32756 }
32757 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32758 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32759 break;
32760
32761 case DE4X5_SET_HWADDR: /* Set the hardware address */
32762 @@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, stru
32763 spin_lock_irqsave(&lp->lock, flags);
32764 memcpy(&statbuf, &lp->pktStats, ioc->len);
32765 spin_unlock_irqrestore(&lp->lock, flags);
32766 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
32767 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32768 return -EFAULT;
32769 break;
32770 }
32771 diff -urNp linux-2.6.32.42/drivers/net/usb/hso.c linux-2.6.32.42/drivers/net/usb/hso.c
32772 --- linux-2.6.32.42/drivers/net/usb/hso.c 2011-03-27 14:31:47.000000000 -0400
32773 +++ linux-2.6.32.42/drivers/net/usb/hso.c 2011-04-17 15:56:46.000000000 -0400
32774 @@ -71,7 +71,7 @@
32775 #include <asm/byteorder.h>
32776 #include <linux/serial_core.h>
32777 #include <linux/serial.h>
32778 -
32779 +#include <asm/local.h>
32780
32781 #define DRIVER_VERSION "1.2"
32782 #define MOD_AUTHOR "Option Wireless"
32783 @@ -258,7 +258,7 @@ struct hso_serial {
32784
32785 /* from usb_serial_port */
32786 struct tty_struct *tty;
32787 - int open_count;
32788 + local_t open_count;
32789 spinlock_t serial_lock;
32790
32791 int (*write_data) (struct hso_serial *serial);
32792 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_
32793 struct urb *urb;
32794
32795 urb = serial->rx_urb[0];
32796 - if (serial->open_count > 0) {
32797 + if (local_read(&serial->open_count) > 0) {
32798 count = put_rxbuf_data(urb, serial);
32799 if (count == -1)
32800 return;
32801 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_cal
32802 DUMP1(urb->transfer_buffer, urb->actual_length);
32803
32804 /* Anyone listening? */
32805 - if (serial->open_count == 0)
32806 + if (local_read(&serial->open_count) == 0)
32807 return;
32808
32809 if (status == 0) {
32810 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
32811 spin_unlock_irq(&serial->serial_lock);
32812
32813 /* check for port already opened, if not set the termios */
32814 - serial->open_count++;
32815 - if (serial->open_count == 1) {
32816 + if (local_inc_return(&serial->open_count) == 1) {
32817 tty->low_latency = 1;
32818 serial->rx_state = RX_IDLE;
32819 /* Force default termio settings */
32820 @@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_st
32821 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
32822 if (result) {
32823 hso_stop_serial_device(serial->parent);
32824 - serial->open_count--;
32825 + local_dec(&serial->open_count);
32826 kref_put(&serial->parent->ref, hso_serial_ref_free);
32827 }
32828 } else {
32829 @@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_
32830
32831 /* reset the rts and dtr */
32832 /* do the actual close */
32833 - serial->open_count--;
32834 + local_dec(&serial->open_count);
32835
32836 - if (serial->open_count <= 0) {
32837 - serial->open_count = 0;
32838 + if (local_read(&serial->open_count) <= 0) {
32839 + local_set(&serial->open_count, 0);
32840 spin_lock_irq(&serial->serial_lock);
32841 if (serial->tty == tty) {
32842 serial->tty->driver_data = NULL;
32843 @@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struc
32844
32845 /* the actual setup */
32846 spin_lock_irqsave(&serial->serial_lock, flags);
32847 - if (serial->open_count)
32848 + if (local_read(&serial->open_count))
32849 _hso_serial_set_termios(tty, old);
32850 else
32851 tty->termios = old;
32852 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
32853 /* Start all serial ports */
32854 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
32855 if (serial_table[i] && (serial_table[i]->interface == iface)) {
32856 - if (dev2ser(serial_table[i])->open_count) {
32857 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
32858 result =
32859 hso_start_serial_device(serial_table[i], GFP_NOIO);
32860 hso_kick_transmit(dev2ser(serial_table[i]));
32861 diff -urNp linux-2.6.32.42/drivers/net/vxge/vxge-main.c linux-2.6.32.42/drivers/net/vxge/vxge-main.c
32862 --- linux-2.6.32.42/drivers/net/vxge/vxge-main.c 2011-03-27 14:31:47.000000000 -0400
32863 +++ linux-2.6.32.42/drivers/net/vxge/vxge-main.c 2011-05-16 21:46:57.000000000 -0400
32864 @@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_T
32865 struct sk_buff *completed[NR_SKB_COMPLETED];
32866 int more;
32867
32868 + pax_track_stack();
32869 +
32870 do {
32871 more = 0;
32872 skb_ptr = completed;
32873 @@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_conf
32874 u8 mtable[256] = {0}; /* CPU to vpath mapping */
32875 int index;
32876
32877 + pax_track_stack();
32878 +
32879 /*
32880 * Filling
32881 * - itable with bucket numbers
32882 diff -urNp linux-2.6.32.42/drivers/net/wan/cycx_x25.c linux-2.6.32.42/drivers/net/wan/cycx_x25.c
32883 --- linux-2.6.32.42/drivers/net/wan/cycx_x25.c 2011-03-27 14:31:47.000000000 -0400
32884 +++ linux-2.6.32.42/drivers/net/wan/cycx_x25.c 2011-05-16 21:46:57.000000000 -0400
32885 @@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned
32886 unsigned char hex[1024],
32887 * phex = hex;
32888
32889 + pax_track_stack();
32890 +
32891 if (len >= (sizeof(hex) / 2))
32892 len = (sizeof(hex) / 2) - 1;
32893
32894 diff -urNp linux-2.6.32.42/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.32.42/drivers/net/wimax/i2400m/usb-fw.c
32895 --- linux-2.6.32.42/drivers/net/wimax/i2400m/usb-fw.c 2011-03-27 14:31:47.000000000 -0400
32896 +++ linux-2.6.32.42/drivers/net/wimax/i2400m/usb-fw.c 2011-05-16 21:46:57.000000000 -0400
32897 @@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
32898 int do_autopm = 1;
32899 DECLARE_COMPLETION_ONSTACK(notif_completion);
32900
32901 + pax_track_stack();
32902 +
32903 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
32904 i2400m, ack, ack_size);
32905 BUG_ON(_ack == i2400m->bm_ack_buf);
32906 diff -urNp linux-2.6.32.42/drivers/net/wireless/airo.c linux-2.6.32.42/drivers/net/wireless/airo.c
32907 --- linux-2.6.32.42/drivers/net/wireless/airo.c 2011-03-27 14:31:47.000000000 -0400
32908 +++ linux-2.6.32.42/drivers/net/wireless/airo.c 2011-05-16 21:46:57.000000000 -0400
32909 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
32910 BSSListElement * loop_net;
32911 BSSListElement * tmp_net;
32912
32913 + pax_track_stack();
32914 +
32915 /* Blow away current list of scan results */
32916 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
32917 list_move_tail (&loop_net->list, &ai->network_free_list);
32918 @@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *
32919 WepKeyRid wkr;
32920 int rc;
32921
32922 + pax_track_stack();
32923 +
32924 memset( &mySsid, 0, sizeof( mySsid ) );
32925 kfree (ai->flash);
32926 ai->flash = NULL;
32927 @@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct i
32928 __le32 *vals = stats.vals;
32929 int len;
32930
32931 + pax_track_stack();
32932 +
32933 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32934 return -ENOMEM;
32935 data = (struct proc_data *)file->private_data;
32936 @@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct ino
32937 /* If doLoseSync is not 1, we won't do a Lose Sync */
32938 int doLoseSync = -1;
32939
32940 + pax_track_stack();
32941 +
32942 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32943 return -ENOMEM;
32944 data = (struct proc_data *)file->private_data;
32945 @@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_de
32946 int i;
32947 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
32948
32949 + pax_track_stack();
32950 +
32951 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
32952 if (!qual)
32953 return -ENOMEM;
32954 @@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(str
32955 CapabilityRid cap_rid;
32956 __le32 *vals = stats_rid.vals;
32957
32958 + pax_track_stack();
32959 +
32960 /* Get stats out of the card */
32961 clear_bit(JOB_WSTATS, &local->jobs);
32962 if (local->power.event) {
32963 diff -urNp linux-2.6.32.42/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.32.42/drivers/net/wireless/ath/ath5k/debug.c
32964 --- linux-2.6.32.42/drivers/net/wireless/ath/ath5k/debug.c 2011-03-27 14:31:47.000000000 -0400
32965 +++ linux-2.6.32.42/drivers/net/wireless/ath/ath5k/debug.c 2011-05-16 21:46:57.000000000 -0400
32966 @@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct f
32967 unsigned int v;
32968 u64 tsf;
32969
32970 + pax_track_stack();
32971 +
32972 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
32973 len += snprintf(buf+len, sizeof(buf)-len,
32974 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
32975 @@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct fi
32976 unsigned int len = 0;
32977 unsigned int i;
32978
32979 + pax_track_stack();
32980 +
32981 len += snprintf(buf+len, sizeof(buf)-len,
32982 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
32983
32984 diff -urNp linux-2.6.32.42/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.32.42/drivers/net/wireless/ath/ath9k/debug.c
32985 --- linux-2.6.32.42/drivers/net/wireless/ath/ath9k/debug.c 2011-03-27 14:31:47.000000000 -0400
32986 +++ linux-2.6.32.42/drivers/net/wireless/ath/ath9k/debug.c 2011-05-16 21:46:57.000000000 -0400
32987 @@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struc
32988 char buf[512];
32989 unsigned int len = 0;
32990
32991 + pax_track_stack();
32992 +
32993 len += snprintf(buf + len, sizeof(buf) - len,
32994 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
32995 len += snprintf(buf + len, sizeof(buf) - len,
32996 @@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct fi
32997 int i;
32998 u8 addr[ETH_ALEN];
32999
33000 + pax_track_stack();
33001 +
33002 len += snprintf(buf + len, sizeof(buf) - len,
33003 "primary: %s (%s chan=%d ht=%d)\n",
33004 wiphy_name(sc->pri_wiphy->hw->wiphy),
33005 diff -urNp linux-2.6.32.42/drivers/net/wireless/b43/debugfs.c linux-2.6.32.42/drivers/net/wireless/b43/debugfs.c
33006 --- linux-2.6.32.42/drivers/net/wireless/b43/debugfs.c 2011-03-27 14:31:47.000000000 -0400
33007 +++ linux-2.6.32.42/drivers/net/wireless/b43/debugfs.c 2011-04-17 15:56:46.000000000 -0400
33008 @@ -43,7 +43,7 @@ static struct dentry *rootdir;
33009 struct b43_debugfs_fops {
33010 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
33011 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
33012 - struct file_operations fops;
33013 + const struct file_operations fops;
33014 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
33015 size_t file_struct_offset;
33016 };
33017 diff -urNp linux-2.6.32.42/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.32.42/drivers/net/wireless/b43legacy/debugfs.c
33018 --- linux-2.6.32.42/drivers/net/wireless/b43legacy/debugfs.c 2011-03-27 14:31:47.000000000 -0400
33019 +++ linux-2.6.32.42/drivers/net/wireless/b43legacy/debugfs.c 2011-04-17 15:56:46.000000000 -0400
33020 @@ -44,7 +44,7 @@ static struct dentry *rootdir;
33021 struct b43legacy_debugfs_fops {
33022 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
33023 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
33024 - struct file_operations fops;
33025 + const struct file_operations fops;
33026 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
33027 size_t file_struct_offset;
33028 /* Take wl->irq_lock before calling read/write? */
33029 diff -urNp linux-2.6.32.42/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.32.42/drivers/net/wireless/ipw2x00/ipw2100.c
33030 --- linux-2.6.32.42/drivers/net/wireless/ipw2x00/ipw2100.c 2011-03-27 14:31:47.000000000 -0400
33031 +++ linux-2.6.32.42/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-16 21:46:57.000000000 -0400
33032 @@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2
33033 int err;
33034 DECLARE_SSID_BUF(ssid);
33035
33036 + pax_track_stack();
33037 +
33038 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
33039
33040 if (ssid_len)
33041 @@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw210
33042 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
33043 int err;
33044
33045 + pax_track_stack();
33046 +
33047 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
33048 idx, keylen, len);
33049
33050 diff -urNp linux-2.6.32.42/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.32.42/drivers/net/wireless/ipw2x00/libipw_rx.c
33051 --- linux-2.6.32.42/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-03-27 14:31:47.000000000 -0400
33052 +++ linux-2.6.32.42/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-16 21:46:57.000000000 -0400
33053 @@ -1566,6 +1566,8 @@ static void libipw_process_probe_respons
33054 unsigned long flags;
33055 DECLARE_SSID_BUF(ssid);
33056
33057 + pax_track_stack();
33058 +
33059 LIBIPW_DEBUG_SCAN("'%s' (%pM"
33060 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
33061 print_ssid(ssid, info_element->data, info_element->len),
33062 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-1000.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-1000.c
33063 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-03-27 14:31:47.000000000 -0400
33064 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-04-17 15:56:46.000000000 -0400
33065 @@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib =
33066 },
33067 };
33068
33069 -static struct iwl_ops iwl1000_ops = {
33070 +static const struct iwl_ops iwl1000_ops = {
33071 .ucode = &iwl5000_ucode,
33072 .lib = &iwl1000_lib,
33073 .hcmd = &iwl5000_hcmd,
33074 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-3945.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-3945.c
33075 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-03-27 14:31:47.000000000 -0400
33076 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-04-17 15:56:46.000000000 -0400
33077 @@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945
33078 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
33079 };
33080
33081 -static struct iwl_ops iwl3945_ops = {
33082 +static const struct iwl_ops iwl3945_ops = {
33083 .ucode = &iwl3945_ucode,
33084 .lib = &iwl3945_lib,
33085 .hcmd = &iwl3945_hcmd,
33086 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-4965.c
33087 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-03-27 14:31:47.000000000 -0400
33088 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-04-17 15:56:46.000000000 -0400
33089 @@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib =
33090 },
33091 };
33092
33093 -static struct iwl_ops iwl4965_ops = {
33094 +static const struct iwl_ops iwl4965_ops = {
33095 .ucode = &iwl4965_ucode,
33096 .lib = &iwl4965_lib,
33097 .hcmd = &iwl4965_hcmd,
33098 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-5000.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-5000.c
33099 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:55:34.000000000 -0400
33100 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:56:37.000000000 -0400
33101 @@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib =
33102 },
33103 };
33104
33105 -struct iwl_ops iwl5000_ops = {
33106 +const struct iwl_ops iwl5000_ops = {
33107 .ucode = &iwl5000_ucode,
33108 .lib = &iwl5000_lib,
33109 .hcmd = &iwl5000_hcmd,
33110 .utils = &iwl5000_hcmd_utils,
33111 };
33112
33113 -static struct iwl_ops iwl5150_ops = {
33114 +static const struct iwl_ops iwl5150_ops = {
33115 .ucode = &iwl5000_ucode,
33116 .lib = &iwl5150_lib,
33117 .hcmd = &iwl5000_hcmd,
33118 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-6000.c
33119 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-03-27 14:31:47.000000000 -0400
33120 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-04-17 15:56:46.000000000 -0400
33121 @@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000
33122 .calc_rssi = iwl5000_calc_rssi,
33123 };
33124
33125 -static struct iwl_ops iwl6000_ops = {
33126 +static const struct iwl_ops iwl6000_ops = {
33127 .ucode = &iwl5000_ucode,
33128 .lib = &iwl6000_lib,
33129 .hcmd = &iwl5000_hcmd,
33130 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
33131 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-03-27 14:31:47.000000000 -0400
33132 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-16 21:46:57.000000000 -0400
33133 @@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, s
33134 u8 active_index = 0;
33135 s32 tpt = 0;
33136
33137 + pax_track_stack();
33138 +
33139 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
33140
33141 if (!ieee80211_is_data(hdr->frame_control) ||
33142 @@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_
33143 u8 valid_tx_ant = 0;
33144 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
33145
33146 + pax_track_stack();
33147 +
33148 /* Override starting rate (index 0) if needed for debug purposes */
33149 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
33150
33151 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debugfs.c
33152 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-03-27 14:31:47.000000000 -0400
33153 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-16 21:46:57.000000000 -0400
33154 @@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(str
33155 int pos = 0;
33156 const size_t bufsz = sizeof(buf);
33157
33158 + pax_track_stack();
33159 +
33160 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
33161 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
33162 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
33163 @@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
33164 const size_t bufsz = sizeof(buf);
33165 ssize_t ret;
33166
33167 + pax_track_stack();
33168 +
33169 for (i = 0; i < AC_NUM; i++) {
33170 pos += scnprintf(buf + pos, bufsz - pos,
33171 "\tcw_min\tcw_max\taifsn\ttxop\n");
33172 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debug.h
33173 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-03-27 14:31:47.000000000 -0400
33174 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-04-17 15:56:46.000000000 -0400
33175 @@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_pri
33176 #endif
33177
33178 #else
33179 -#define IWL_DEBUG(__priv, level, fmt, args...)
33180 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
33181 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
33182 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
33183 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
33184 void *p, u32 len)
33185 {}
33186 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-dev.h linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-dev.h
33187 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-03-27 14:31:47.000000000 -0400
33188 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-04-17 15:56:46.000000000 -0400
33189 @@ -68,7 +68,7 @@ struct iwl_tx_queue;
33190
33191 /* shared structures from iwl-5000.c */
33192 extern struct iwl_mod_params iwl50_mod_params;
33193 -extern struct iwl_ops iwl5000_ops;
33194 +extern const struct iwl_ops iwl5000_ops;
33195 extern struct iwl_ucode_ops iwl5000_ucode;
33196 extern struct iwl_lib_ops iwl5000_lib;
33197 extern struct iwl_hcmd_ops iwl5000_hcmd;
33198 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.32.42/drivers/net/wireless/iwmc3200wifi/debugfs.c
33199 --- linux-2.6.32.42/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-03-27 14:31:47.000000000 -0400
33200 +++ linux-2.6.32.42/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-16 21:46:57.000000000 -0400
33201 @@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
33202 int buf_len = 512;
33203 size_t len = 0;
33204
33205 + pax_track_stack();
33206 +
33207 if (*ppos != 0)
33208 return 0;
33209 if (count < sizeof(buf))
33210 diff -urNp linux-2.6.32.42/drivers/net/wireless/libertas/debugfs.c linux-2.6.32.42/drivers/net/wireless/libertas/debugfs.c
33211 --- linux-2.6.32.42/drivers/net/wireless/libertas/debugfs.c 2011-03-27 14:31:47.000000000 -0400
33212 +++ linux-2.6.32.42/drivers/net/wireless/libertas/debugfs.c 2011-04-17 15:56:46.000000000 -0400
33213 @@ -708,7 +708,7 @@ out_unlock:
33214 struct lbs_debugfs_files {
33215 const char *name;
33216 int perm;
33217 - struct file_operations fops;
33218 + const struct file_operations fops;
33219 };
33220
33221 static const struct lbs_debugfs_files debugfs_files[] = {
33222 diff -urNp linux-2.6.32.42/drivers/net/wireless/rndis_wlan.c linux-2.6.32.42/drivers/net/wireless/rndis_wlan.c
33223 --- linux-2.6.32.42/drivers/net/wireless/rndis_wlan.c 2011-03-27 14:31:47.000000000 -0400
33224 +++ linux-2.6.32.42/drivers/net/wireless/rndis_wlan.c 2011-04-17 15:56:46.000000000 -0400
33225 @@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbn
33226
33227 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
33228
33229 - if (rts_threshold < 0 || rts_threshold > 2347)
33230 + if (rts_threshold > 2347)
33231 rts_threshold = 2347;
33232
33233 tmp = cpu_to_le32(rts_threshold);
33234 diff -urNp linux-2.6.32.42/drivers/oprofile/buffer_sync.c linux-2.6.32.42/drivers/oprofile/buffer_sync.c
33235 --- linux-2.6.32.42/drivers/oprofile/buffer_sync.c 2011-03-27 14:31:47.000000000 -0400
33236 +++ linux-2.6.32.42/drivers/oprofile/buffer_sync.c 2011-04-17 15:56:46.000000000 -0400
33237 @@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
33238 if (cookie == NO_COOKIE)
33239 offset = pc;
33240 if (cookie == INVALID_COOKIE) {
33241 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33242 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33243 offset = pc;
33244 }
33245 if (cookie != last_cookie) {
33246 @@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
33247 /* add userspace sample */
33248
33249 if (!mm) {
33250 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
33251 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
33252 return 0;
33253 }
33254
33255 cookie = lookup_dcookie(mm, s->eip, &offset);
33256
33257 if (cookie == INVALID_COOKIE) {
33258 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33259 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33260 return 0;
33261 }
33262
33263 @@ -561,7 +561,7 @@ void sync_buffer(int cpu)
33264 /* ignore backtraces if failed to add a sample */
33265 if (state == sb_bt_start) {
33266 state = sb_bt_ignore;
33267 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
33268 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
33269 }
33270 }
33271 release_mm(mm);
33272 diff -urNp linux-2.6.32.42/drivers/oprofile/event_buffer.c linux-2.6.32.42/drivers/oprofile/event_buffer.c
33273 --- linux-2.6.32.42/drivers/oprofile/event_buffer.c 2011-03-27 14:31:47.000000000 -0400
33274 +++ linux-2.6.32.42/drivers/oprofile/event_buffer.c 2011-04-17 15:56:46.000000000 -0400
33275 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
33276 }
33277
33278 if (buffer_pos == buffer_size) {
33279 - atomic_inc(&oprofile_stats.event_lost_overflow);
33280 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
33281 return;
33282 }
33283
33284 diff -urNp linux-2.6.32.42/drivers/oprofile/oprof.c linux-2.6.32.42/drivers/oprofile/oprof.c
33285 --- linux-2.6.32.42/drivers/oprofile/oprof.c 2011-03-27 14:31:47.000000000 -0400
33286 +++ linux-2.6.32.42/drivers/oprofile/oprof.c 2011-04-17 15:56:46.000000000 -0400
33287 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
33288 if (oprofile_ops.switch_events())
33289 return;
33290
33291 - atomic_inc(&oprofile_stats.multiplex_counter);
33292 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
33293 start_switch_worker();
33294 }
33295
33296 diff -urNp linux-2.6.32.42/drivers/oprofile/oprofilefs.c linux-2.6.32.42/drivers/oprofile/oprofilefs.c
33297 --- linux-2.6.32.42/drivers/oprofile/oprofilefs.c 2011-03-27 14:31:47.000000000 -0400
33298 +++ linux-2.6.32.42/drivers/oprofile/oprofilefs.c 2011-04-17 15:56:46.000000000 -0400
33299 @@ -187,7 +187,7 @@ static const struct file_operations atom
33300
33301
33302 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
33303 - char const *name, atomic_t *val)
33304 + char const *name, atomic_unchecked_t *val)
33305 {
33306 struct dentry *d = __oprofilefs_create_file(sb, root, name,
33307 &atomic_ro_fops, 0444);
33308 diff -urNp linux-2.6.32.42/drivers/oprofile/oprofile_stats.c linux-2.6.32.42/drivers/oprofile/oprofile_stats.c
33309 --- linux-2.6.32.42/drivers/oprofile/oprofile_stats.c 2011-03-27 14:31:47.000000000 -0400
33310 +++ linux-2.6.32.42/drivers/oprofile/oprofile_stats.c 2011-04-17 15:56:46.000000000 -0400
33311 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
33312 cpu_buf->sample_invalid_eip = 0;
33313 }
33314
33315 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
33316 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
33317 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
33318 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
33319 - atomic_set(&oprofile_stats.multiplex_counter, 0);
33320 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
33321 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
33322 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
33323 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
33324 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
33325 }
33326
33327
33328 diff -urNp linux-2.6.32.42/drivers/oprofile/oprofile_stats.h linux-2.6.32.42/drivers/oprofile/oprofile_stats.h
33329 --- linux-2.6.32.42/drivers/oprofile/oprofile_stats.h 2011-03-27 14:31:47.000000000 -0400
33330 +++ linux-2.6.32.42/drivers/oprofile/oprofile_stats.h 2011-04-17 15:56:46.000000000 -0400
33331 @@ -13,11 +13,11 @@
33332 #include <asm/atomic.h>
33333
33334 struct oprofile_stat_struct {
33335 - atomic_t sample_lost_no_mm;
33336 - atomic_t sample_lost_no_mapping;
33337 - atomic_t bt_lost_no_mapping;
33338 - atomic_t event_lost_overflow;
33339 - atomic_t multiplex_counter;
33340 + atomic_unchecked_t sample_lost_no_mm;
33341 + atomic_unchecked_t sample_lost_no_mapping;
33342 + atomic_unchecked_t bt_lost_no_mapping;
33343 + atomic_unchecked_t event_lost_overflow;
33344 + atomic_unchecked_t multiplex_counter;
33345 };
33346
33347 extern struct oprofile_stat_struct oprofile_stats;
33348 diff -urNp linux-2.6.32.42/drivers/parisc/pdc_stable.c linux-2.6.32.42/drivers/parisc/pdc_stable.c
33349 --- linux-2.6.32.42/drivers/parisc/pdc_stable.c 2011-03-27 14:31:47.000000000 -0400
33350 +++ linux-2.6.32.42/drivers/parisc/pdc_stable.c 2011-04-17 15:56:46.000000000 -0400
33351 @@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj
33352 return ret;
33353 }
33354
33355 -static struct sysfs_ops pdcspath_attr_ops = {
33356 +static const struct sysfs_ops pdcspath_attr_ops = {
33357 .show = pdcspath_attr_show,
33358 .store = pdcspath_attr_store,
33359 };
33360 diff -urNp linux-2.6.32.42/drivers/parport/procfs.c linux-2.6.32.42/drivers/parport/procfs.c
33361 --- linux-2.6.32.42/drivers/parport/procfs.c 2011-03-27 14:31:47.000000000 -0400
33362 +++ linux-2.6.32.42/drivers/parport/procfs.c 2011-04-17 15:56:46.000000000 -0400
33363 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
33364
33365 *ppos += len;
33366
33367 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
33368 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
33369 }
33370
33371 #ifdef CONFIG_PARPORT_1284
33372 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
33373
33374 *ppos += len;
33375
33376 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
33377 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
33378 }
33379 #endif /* IEEE1284.3 support. */
33380
33381 diff -urNp linux-2.6.32.42/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.32.42/drivers/pci/hotplug/acpiphp_glue.c
33382 --- linux-2.6.32.42/drivers/pci/hotplug/acpiphp_glue.c 2011-03-27 14:31:47.000000000 -0400
33383 +++ linux-2.6.32.42/drivers/pci/hotplug/acpiphp_glue.c 2011-04-17 15:56:46.000000000 -0400
33384 @@ -111,7 +111,7 @@ static int post_dock_fixups(struct notif
33385 }
33386
33387
33388 -static struct acpi_dock_ops acpiphp_dock_ops = {
33389 +static const struct acpi_dock_ops acpiphp_dock_ops = {
33390 .handler = handle_hotplug_event_func,
33391 };
33392
33393 diff -urNp linux-2.6.32.42/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.32.42/drivers/pci/hotplug/cpqphp_nvram.c
33394 --- linux-2.6.32.42/drivers/pci/hotplug/cpqphp_nvram.c 2011-03-27 14:31:47.000000000 -0400
33395 +++ linux-2.6.32.42/drivers/pci/hotplug/cpqphp_nvram.c 2011-04-17 15:56:46.000000000 -0400
33396 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
33397
33398 void compaq_nvram_init (void __iomem *rom_start)
33399 {
33400 +
33401 +#ifndef CONFIG_PAX_KERNEXEC
33402 if (rom_start) {
33403 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
33404 }
33405 +#endif
33406 +
33407 dbg("int15 entry = %p\n", compaq_int15_entry_point);
33408
33409 /* initialize our int15 lock */
33410 diff -urNp linux-2.6.32.42/drivers/pci/hotplug/fakephp.c linux-2.6.32.42/drivers/pci/hotplug/fakephp.c
33411 --- linux-2.6.32.42/drivers/pci/hotplug/fakephp.c 2011-03-27 14:31:47.000000000 -0400
33412 +++ linux-2.6.32.42/drivers/pci/hotplug/fakephp.c 2011-04-17 15:56:46.000000000 -0400
33413 @@ -73,7 +73,7 @@ static void legacy_release(struct kobjec
33414 }
33415
33416 static struct kobj_type legacy_ktype = {
33417 - .sysfs_ops = &(struct sysfs_ops){
33418 + .sysfs_ops = &(const struct sysfs_ops){
33419 .store = legacy_store, .show = legacy_show
33420 },
33421 .release = &legacy_release,
33422 diff -urNp linux-2.6.32.42/drivers/pci/intel-iommu.c linux-2.6.32.42/drivers/pci/intel-iommu.c
33423 --- linux-2.6.32.42/drivers/pci/intel-iommu.c 2011-05-10 22:12:01.000000000 -0400
33424 +++ linux-2.6.32.42/drivers/pci/intel-iommu.c 2011-05-10 22:12:33.000000000 -0400
33425 @@ -2643,7 +2643,7 @@ error:
33426 return 0;
33427 }
33428
33429 -static dma_addr_t intel_map_page(struct device *dev, struct page *page,
33430 +dma_addr_t intel_map_page(struct device *dev, struct page *page,
33431 unsigned long offset, size_t size,
33432 enum dma_data_direction dir,
33433 struct dma_attrs *attrs)
33434 @@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain
33435 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
33436 }
33437
33438 -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
33439 +void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
33440 size_t size, enum dma_data_direction dir,
33441 struct dma_attrs *attrs)
33442 {
33443 @@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct devi
33444 }
33445 }
33446
33447 -static void *intel_alloc_coherent(struct device *hwdev, size_t size,
33448 +void *intel_alloc_coherent(struct device *hwdev, size_t size,
33449 dma_addr_t *dma_handle, gfp_t flags)
33450 {
33451 void *vaddr;
33452 @@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct
33453 return NULL;
33454 }
33455
33456 -static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
33457 +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
33458 dma_addr_t dma_handle)
33459 {
33460 int order;
33461 @@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct d
33462 free_pages((unsigned long)vaddr, order);
33463 }
33464
33465 -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
33466 +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
33467 int nelems, enum dma_data_direction dir,
33468 struct dma_attrs *attrs)
33469 {
33470 @@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(str
33471 return nelems;
33472 }
33473
33474 -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
33475 +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
33476 enum dma_data_direction dir, struct dma_attrs *attrs)
33477 {
33478 int i;
33479 @@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *h
33480 return nelems;
33481 }
33482
33483 -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
33484 +int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
33485 {
33486 return !dma_addr;
33487 }
33488
33489 -struct dma_map_ops intel_dma_ops = {
33490 +const struct dma_map_ops intel_dma_ops = {
33491 .alloc_coherent = intel_alloc_coherent,
33492 .free_coherent = intel_free_coherent,
33493 .map_sg = intel_map_sg,
33494 diff -urNp linux-2.6.32.42/drivers/pci/pcie/aspm.c linux-2.6.32.42/drivers/pci/pcie/aspm.c
33495 --- linux-2.6.32.42/drivers/pci/pcie/aspm.c 2011-03-27 14:31:47.000000000 -0400
33496 +++ linux-2.6.32.42/drivers/pci/pcie/aspm.c 2011-04-17 15:56:46.000000000 -0400
33497 @@ -27,9 +27,9 @@
33498 #define MODULE_PARAM_PREFIX "pcie_aspm."
33499
33500 /* Note: those are not register definitions */
33501 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
33502 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
33503 -#define ASPM_STATE_L1 (4) /* L1 state */
33504 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
33505 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
33506 +#define ASPM_STATE_L1 (4U) /* L1 state */
33507 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
33508 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
33509
33510 diff -urNp linux-2.6.32.42/drivers/pci/probe.c linux-2.6.32.42/drivers/pci/probe.c
33511 --- linux-2.6.32.42/drivers/pci/probe.c 2011-03-27 14:31:47.000000000 -0400
33512 +++ linux-2.6.32.42/drivers/pci/probe.c 2011-04-17 15:56:46.000000000 -0400
33513 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
33514 return ret;
33515 }
33516
33517 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
33518 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
33519 struct device_attribute *attr,
33520 char *buf)
33521 {
33522 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
33523 }
33524
33525 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
33526 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
33527 struct device_attribute *attr,
33528 char *buf)
33529 {
33530 diff -urNp linux-2.6.32.42/drivers/pci/proc.c linux-2.6.32.42/drivers/pci/proc.c
33531 --- linux-2.6.32.42/drivers/pci/proc.c 2011-03-27 14:31:47.000000000 -0400
33532 +++ linux-2.6.32.42/drivers/pci/proc.c 2011-04-17 15:56:46.000000000 -0400
33533 @@ -480,7 +480,16 @@ static const struct file_operations proc
33534 static int __init pci_proc_init(void)
33535 {
33536 struct pci_dev *dev = NULL;
33537 +
33538 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
33539 +#ifdef CONFIG_GRKERNSEC_PROC_USER
33540 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
33541 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33542 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33543 +#endif
33544 +#else
33545 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
33546 +#endif
33547 proc_create("devices", 0, proc_bus_pci_dir,
33548 &proc_bus_pci_dev_operations);
33549 proc_initialized = 1;
33550 diff -urNp linux-2.6.32.42/drivers/pci/slot.c linux-2.6.32.42/drivers/pci/slot.c
33551 --- linux-2.6.32.42/drivers/pci/slot.c 2011-03-27 14:31:47.000000000 -0400
33552 +++ linux-2.6.32.42/drivers/pci/slot.c 2011-04-17 15:56:46.000000000 -0400
33553 @@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struc
33554 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
33555 }
33556
33557 -static struct sysfs_ops pci_slot_sysfs_ops = {
33558 +static const struct sysfs_ops pci_slot_sysfs_ops = {
33559 .show = pci_slot_attr_show,
33560 .store = pci_slot_attr_store,
33561 };
33562 diff -urNp linux-2.6.32.42/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.32.42/drivers/pcmcia/pcmcia_ioctl.c
33563 --- linux-2.6.32.42/drivers/pcmcia/pcmcia_ioctl.c 2011-03-27 14:31:47.000000000 -0400
33564 +++ linux-2.6.32.42/drivers/pcmcia/pcmcia_ioctl.c 2011-04-17 15:56:46.000000000 -0400
33565 @@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode
33566 return -EFAULT;
33567 }
33568 }
33569 - buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
33570 + buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
33571 if (!buf)
33572 return -ENOMEM;
33573
33574 diff -urNp linux-2.6.32.42/drivers/platform/x86/acer-wmi.c linux-2.6.32.42/drivers/platform/x86/acer-wmi.c
33575 --- linux-2.6.32.42/drivers/platform/x86/acer-wmi.c 2011-03-27 14:31:47.000000000 -0400
33576 +++ linux-2.6.32.42/drivers/platform/x86/acer-wmi.c 2011-04-17 15:56:46.000000000 -0400
33577 @@ -918,7 +918,7 @@ static int update_bl_status(struct backl
33578 return 0;
33579 }
33580
33581 -static struct backlight_ops acer_bl_ops = {
33582 +static const struct backlight_ops acer_bl_ops = {
33583 .get_brightness = read_brightness,
33584 .update_status = update_bl_status,
33585 };
33586 diff -urNp linux-2.6.32.42/drivers/platform/x86/asus_acpi.c linux-2.6.32.42/drivers/platform/x86/asus_acpi.c
33587 --- linux-2.6.32.42/drivers/platform/x86/asus_acpi.c 2011-03-27 14:31:47.000000000 -0400
33588 +++ linux-2.6.32.42/drivers/platform/x86/asus_acpi.c 2011-04-17 15:56:46.000000000 -0400
33589 @@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_
33590 return 0;
33591 }
33592
33593 -static struct backlight_ops asus_backlight_data = {
33594 +static const struct backlight_ops asus_backlight_data = {
33595 .get_brightness = read_brightness,
33596 .update_status = set_brightness_status,
33597 };
33598 diff -urNp linux-2.6.32.42/drivers/platform/x86/asus-laptop.c linux-2.6.32.42/drivers/platform/x86/asus-laptop.c
33599 --- linux-2.6.32.42/drivers/platform/x86/asus-laptop.c 2011-03-27 14:31:47.000000000 -0400
33600 +++ linux-2.6.32.42/drivers/platform/x86/asus-laptop.c 2011-04-17 15:56:46.000000000 -0400
33601 @@ -250,7 +250,7 @@ static struct backlight_device *asus_bac
33602 */
33603 static int read_brightness(struct backlight_device *bd);
33604 static int update_bl_status(struct backlight_device *bd);
33605 -static struct backlight_ops asusbl_ops = {
33606 +static const struct backlight_ops asusbl_ops = {
33607 .get_brightness = read_brightness,
33608 .update_status = update_bl_status,
33609 };
33610 diff -urNp linux-2.6.32.42/drivers/platform/x86/compal-laptop.c linux-2.6.32.42/drivers/platform/x86/compal-laptop.c
33611 --- linux-2.6.32.42/drivers/platform/x86/compal-laptop.c 2011-03-27 14:31:47.000000000 -0400
33612 +++ linux-2.6.32.42/drivers/platform/x86/compal-laptop.c 2011-04-17 15:56:46.000000000 -0400
33613 @@ -163,7 +163,7 @@ static int bl_update_status(struct backl
33614 return set_lcd_level(b->props.brightness);
33615 }
33616
33617 -static struct backlight_ops compalbl_ops = {
33618 +static const struct backlight_ops compalbl_ops = {
33619 .get_brightness = bl_get_brightness,
33620 .update_status = bl_update_status,
33621 };
33622 diff -urNp linux-2.6.32.42/drivers/platform/x86/dell-laptop.c linux-2.6.32.42/drivers/platform/x86/dell-laptop.c
33623 --- linux-2.6.32.42/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:01.000000000 -0400
33624 +++ linux-2.6.32.42/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:33.000000000 -0400
33625 @@ -318,7 +318,7 @@ static int dell_get_intensity(struct bac
33626 return buffer.output[1];
33627 }
33628
33629 -static struct backlight_ops dell_ops = {
33630 +static const struct backlight_ops dell_ops = {
33631 .get_brightness = dell_get_intensity,
33632 .update_status = dell_send_intensity,
33633 };
33634 diff -urNp linux-2.6.32.42/drivers/platform/x86/eeepc-laptop.c linux-2.6.32.42/drivers/platform/x86/eeepc-laptop.c
33635 --- linux-2.6.32.42/drivers/platform/x86/eeepc-laptop.c 2011-03-27 14:31:47.000000000 -0400
33636 +++ linux-2.6.32.42/drivers/platform/x86/eeepc-laptop.c 2011-04-17 15:56:46.000000000 -0400
33637 @@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device
33638 */
33639 static int read_brightness(struct backlight_device *bd);
33640 static int update_bl_status(struct backlight_device *bd);
33641 -static struct backlight_ops eeepcbl_ops = {
33642 +static const struct backlight_ops eeepcbl_ops = {
33643 .get_brightness = read_brightness,
33644 .update_status = update_bl_status,
33645 };
33646 diff -urNp linux-2.6.32.42/drivers/platform/x86/fujitsu-laptop.c linux-2.6.32.42/drivers/platform/x86/fujitsu-laptop.c
33647 --- linux-2.6.32.42/drivers/platform/x86/fujitsu-laptop.c 2011-03-27 14:31:47.000000000 -0400
33648 +++ linux-2.6.32.42/drivers/platform/x86/fujitsu-laptop.c 2011-04-17 15:56:46.000000000 -0400
33649 @@ -436,7 +436,7 @@ static int bl_update_status(struct backl
33650 return ret;
33651 }
33652
33653 -static struct backlight_ops fujitsubl_ops = {
33654 +static const struct backlight_ops fujitsubl_ops = {
33655 .get_brightness = bl_get_brightness,
33656 .update_status = bl_update_status,
33657 };
33658 diff -urNp linux-2.6.32.42/drivers/platform/x86/msi-laptop.c linux-2.6.32.42/drivers/platform/x86/msi-laptop.c
33659 --- linux-2.6.32.42/drivers/platform/x86/msi-laptop.c 2011-03-27 14:31:47.000000000 -0400
33660 +++ linux-2.6.32.42/drivers/platform/x86/msi-laptop.c 2011-04-17 15:56:46.000000000 -0400
33661 @@ -161,7 +161,7 @@ static int bl_update_status(struct backl
33662 return set_lcd_level(b->props.brightness);
33663 }
33664
33665 -static struct backlight_ops msibl_ops = {
33666 +static const struct backlight_ops msibl_ops = {
33667 .get_brightness = bl_get_brightness,
33668 .update_status = bl_update_status,
33669 };
33670 diff -urNp linux-2.6.32.42/drivers/platform/x86/panasonic-laptop.c linux-2.6.32.42/drivers/platform/x86/panasonic-laptop.c
33671 --- linux-2.6.32.42/drivers/platform/x86/panasonic-laptop.c 2011-03-27 14:31:47.000000000 -0400
33672 +++ linux-2.6.32.42/drivers/platform/x86/panasonic-laptop.c 2011-04-17 15:56:46.000000000 -0400
33673 @@ -352,7 +352,7 @@ static int bl_set_status(struct backligh
33674 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
33675 }
33676
33677 -static struct backlight_ops pcc_backlight_ops = {
33678 +static const struct backlight_ops pcc_backlight_ops = {
33679 .get_brightness = bl_get,
33680 .update_status = bl_set_status,
33681 };
33682 diff -urNp linux-2.6.32.42/drivers/platform/x86/sony-laptop.c linux-2.6.32.42/drivers/platform/x86/sony-laptop.c
33683 --- linux-2.6.32.42/drivers/platform/x86/sony-laptop.c 2011-03-27 14:31:47.000000000 -0400
33684 +++ linux-2.6.32.42/drivers/platform/x86/sony-laptop.c 2011-04-17 15:56:46.000000000 -0400
33685 @@ -850,7 +850,7 @@ static int sony_backlight_get_brightness
33686 }
33687
33688 static struct backlight_device *sony_backlight_device;
33689 -static struct backlight_ops sony_backlight_ops = {
33690 +static const struct backlight_ops sony_backlight_ops = {
33691 .update_status = sony_backlight_update_status,
33692 .get_brightness = sony_backlight_get_brightness,
33693 };
33694 diff -urNp linux-2.6.32.42/drivers/platform/x86/thinkpad_acpi.c linux-2.6.32.42/drivers/platform/x86/thinkpad_acpi.c
33695 --- linux-2.6.32.42/drivers/platform/x86/thinkpad_acpi.c 2011-03-27 14:31:47.000000000 -0400
33696 +++ linux-2.6.32.42/drivers/platform/x86/thinkpad_acpi.c 2011-04-17 15:56:46.000000000 -0400
33697 @@ -6122,7 +6122,7 @@ static void tpacpi_brightness_notify_cha
33698 BACKLIGHT_UPDATE_HOTKEY);
33699 }
33700
33701 -static struct backlight_ops ibm_backlight_data = {
33702 +static const struct backlight_ops ibm_backlight_data = {
33703 .get_brightness = brightness_get,
33704 .update_status = brightness_update_status,
33705 };
33706 diff -urNp linux-2.6.32.42/drivers/platform/x86/toshiba_acpi.c linux-2.6.32.42/drivers/platform/x86/toshiba_acpi.c
33707 --- linux-2.6.32.42/drivers/platform/x86/toshiba_acpi.c 2011-03-27 14:31:47.000000000 -0400
33708 +++ linux-2.6.32.42/drivers/platform/x86/toshiba_acpi.c 2011-04-17 15:56:46.000000000 -0400
33709 @@ -671,7 +671,7 @@ static acpi_status remove_device(void)
33710 return AE_OK;
33711 }
33712
33713 -static struct backlight_ops toshiba_backlight_data = {
33714 +static const struct backlight_ops toshiba_backlight_data = {
33715 .get_brightness = get_lcd,
33716 .update_status = set_lcd_status,
33717 };
33718 diff -urNp linux-2.6.32.42/drivers/pnp/pnpbios/bioscalls.c linux-2.6.32.42/drivers/pnp/pnpbios/bioscalls.c
33719 --- linux-2.6.32.42/drivers/pnp/pnpbios/bioscalls.c 2011-03-27 14:31:47.000000000 -0400
33720 +++ linux-2.6.32.42/drivers/pnp/pnpbios/bioscalls.c 2011-04-17 15:56:46.000000000 -0400
33721 @@ -60,7 +60,7 @@ do { \
33722 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
33723 } while(0)
33724
33725 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
33726 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
33727 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
33728
33729 /*
33730 @@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func
33731
33732 cpu = get_cpu();
33733 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
33734 +
33735 + pax_open_kernel();
33736 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
33737 + pax_close_kernel();
33738
33739 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
33740 spin_lock_irqsave(&pnp_bios_lock, flags);
33741 @@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func
33742 :"memory");
33743 spin_unlock_irqrestore(&pnp_bios_lock, flags);
33744
33745 + pax_open_kernel();
33746 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
33747 + pax_close_kernel();
33748 +
33749 put_cpu();
33750
33751 /* If we get here and this is set then the PnP BIOS faulted on us. */
33752 @@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 n
33753 return status;
33754 }
33755
33756 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
33757 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
33758 {
33759 int i;
33760
33761 @@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_i
33762 pnp_bios_callpoint.offset = header->fields.pm16offset;
33763 pnp_bios_callpoint.segment = PNP_CS16;
33764
33765 + pax_open_kernel();
33766 +
33767 for_each_possible_cpu(i) {
33768 struct desc_struct *gdt = get_cpu_gdt_table(i);
33769 if (!gdt)
33770 @@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_i
33771 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
33772 (unsigned long)__va(header->fields.pm16dseg));
33773 }
33774 +
33775 + pax_close_kernel();
33776 }
33777 diff -urNp linux-2.6.32.42/drivers/pnp/resource.c linux-2.6.32.42/drivers/pnp/resource.c
33778 --- linux-2.6.32.42/drivers/pnp/resource.c 2011-03-27 14:31:47.000000000 -0400
33779 +++ linux-2.6.32.42/drivers/pnp/resource.c 2011-04-17 15:56:46.000000000 -0400
33780 @@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
33781 return 1;
33782
33783 /* check if the resource is valid */
33784 - if (*irq < 0 || *irq > 15)
33785 + if (*irq > 15)
33786 return 0;
33787
33788 /* check if the resource is reserved */
33789 @@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
33790 return 1;
33791
33792 /* check if the resource is valid */
33793 - if (*dma < 0 || *dma == 4 || *dma > 7)
33794 + if (*dma == 4 || *dma > 7)
33795 return 0;
33796
33797 /* check if the resource is reserved */
33798 diff -urNp linux-2.6.32.42/drivers/rtc/rtc-dev.c linux-2.6.32.42/drivers/rtc/rtc-dev.c
33799 --- linux-2.6.32.42/drivers/rtc/rtc-dev.c 2011-03-27 14:31:47.000000000 -0400
33800 +++ linux-2.6.32.42/drivers/rtc/rtc-dev.c 2011-04-17 15:56:46.000000000 -0400
33801 @@ -14,6 +14,7 @@
33802 #include <linux/module.h>
33803 #include <linux/rtc.h>
33804 #include <linux/sched.h>
33805 +#include <linux/grsecurity.h>
33806 #include "rtc-core.h"
33807
33808 static dev_t rtc_devt;
33809 @@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *f
33810 if (copy_from_user(&tm, uarg, sizeof(tm)))
33811 return -EFAULT;
33812
33813 + gr_log_timechange();
33814 +
33815 return rtc_set_time(rtc, &tm);
33816
33817 case RTC_PIE_ON:
33818 diff -urNp linux-2.6.32.42/drivers/s390/cio/qdio_perf.c linux-2.6.32.42/drivers/s390/cio/qdio_perf.c
33819 --- linux-2.6.32.42/drivers/s390/cio/qdio_perf.c 2011-03-27 14:31:47.000000000 -0400
33820 +++ linux-2.6.32.42/drivers/s390/cio/qdio_perf.c 2011-04-17 15:56:46.000000000 -0400
33821 @@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_
33822 static int qdio_perf_proc_show(struct seq_file *m, void *v)
33823 {
33824 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
33825 - (long)atomic_long_read(&perf_stats.qdio_int));
33826 + (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
33827 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
33828 - (long)atomic_long_read(&perf_stats.pci_int));
33829 + (long)atomic_long_read_unchecked(&perf_stats.pci_int));
33830 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
33831 - (long)atomic_long_read(&perf_stats.thin_int));
33832 + (long)atomic_long_read_unchecked(&perf_stats.thin_int));
33833 seq_printf(m, "\n");
33834 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
33835 - (long)atomic_long_read(&perf_stats.tasklet_inbound));
33836 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
33837 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
33838 - (long)atomic_long_read(&perf_stats.tasklet_outbound));
33839 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
33840 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
33841 - (long)atomic_long_read(&perf_stats.tasklet_thinint),
33842 - (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
33843 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
33844 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
33845 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
33846 - (long)atomic_long_read(&perf_stats.thinint_inbound),
33847 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
33848 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
33849 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
33850 seq_printf(m, "\n");
33851 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
33852 - (long)atomic_long_read(&perf_stats.siga_in));
33853 + (long)atomic_long_read_unchecked(&perf_stats.siga_in));
33854 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
33855 - (long)atomic_long_read(&perf_stats.siga_out));
33856 + (long)atomic_long_read_unchecked(&perf_stats.siga_out));
33857 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
33858 - (long)atomic_long_read(&perf_stats.siga_sync));
33859 + (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
33860 seq_printf(m, "\n");
33861 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
33862 - (long)atomic_long_read(&perf_stats.inbound_handler));
33863 + (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
33864 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
33865 - (long)atomic_long_read(&perf_stats.outbound_handler));
33866 + (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
33867 seq_printf(m, "\n");
33868 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
33869 - (long)atomic_long_read(&perf_stats.fast_requeue));
33870 + (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
33871 seq_printf(m, "Number of outbound target full condition\t: %li\n",
33872 - (long)atomic_long_read(&perf_stats.outbound_target_full));
33873 + (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
33874 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
33875 - (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
33876 + (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
33877 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
33878 - (long)atomic_long_read(&perf_stats.debug_stop_polling));
33879 + (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
33880 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
33881 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
33882 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
33883 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
33884 - (long)atomic_long_read(&perf_stats.debug_eqbs_all),
33885 - (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
33886 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
33887 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
33888 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
33889 - (long)atomic_long_read(&perf_stats.debug_sqbs_all),
33890 - (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
33891 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
33892 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
33893 seq_printf(m, "\n");
33894 return 0;
33895 }
33896 diff -urNp linux-2.6.32.42/drivers/s390/cio/qdio_perf.h linux-2.6.32.42/drivers/s390/cio/qdio_perf.h
33897 --- linux-2.6.32.42/drivers/s390/cio/qdio_perf.h 2011-03-27 14:31:47.000000000 -0400
33898 +++ linux-2.6.32.42/drivers/s390/cio/qdio_perf.h 2011-04-17 15:56:46.000000000 -0400
33899 @@ -13,46 +13,46 @@
33900
33901 struct qdio_perf_stats {
33902 /* interrupt handler calls */
33903 - atomic_long_t qdio_int;
33904 - atomic_long_t pci_int;
33905 - atomic_long_t thin_int;
33906 + atomic_long_unchecked_t qdio_int;
33907 + atomic_long_unchecked_t pci_int;
33908 + atomic_long_unchecked_t thin_int;
33909
33910 /* tasklet runs */
33911 - atomic_long_t tasklet_inbound;
33912 - atomic_long_t tasklet_outbound;
33913 - atomic_long_t tasklet_thinint;
33914 - atomic_long_t tasklet_thinint_loop;
33915 - atomic_long_t thinint_inbound;
33916 - atomic_long_t thinint_inbound_loop;
33917 - atomic_long_t thinint_inbound_loop2;
33918 + atomic_long_unchecked_t tasklet_inbound;
33919 + atomic_long_unchecked_t tasklet_outbound;
33920 + atomic_long_unchecked_t tasklet_thinint;
33921 + atomic_long_unchecked_t tasklet_thinint_loop;
33922 + atomic_long_unchecked_t thinint_inbound;
33923 + atomic_long_unchecked_t thinint_inbound_loop;
33924 + atomic_long_unchecked_t thinint_inbound_loop2;
33925
33926 /* signal adapter calls */
33927 - atomic_long_t siga_out;
33928 - atomic_long_t siga_in;
33929 - atomic_long_t siga_sync;
33930 + atomic_long_unchecked_t siga_out;
33931 + atomic_long_unchecked_t siga_in;
33932 + atomic_long_unchecked_t siga_sync;
33933
33934 /* misc */
33935 - atomic_long_t inbound_handler;
33936 - atomic_long_t outbound_handler;
33937 - atomic_long_t fast_requeue;
33938 - atomic_long_t outbound_target_full;
33939 + atomic_long_unchecked_t inbound_handler;
33940 + atomic_long_unchecked_t outbound_handler;
33941 + atomic_long_unchecked_t fast_requeue;
33942 + atomic_long_unchecked_t outbound_target_full;
33943
33944 /* for debugging */
33945 - atomic_long_t debug_tl_out_timer;
33946 - atomic_long_t debug_stop_polling;
33947 - atomic_long_t debug_eqbs_all;
33948 - atomic_long_t debug_eqbs_incomplete;
33949 - atomic_long_t debug_sqbs_all;
33950 - atomic_long_t debug_sqbs_incomplete;
33951 + atomic_long_unchecked_t debug_tl_out_timer;
33952 + atomic_long_unchecked_t debug_stop_polling;
33953 + atomic_long_unchecked_t debug_eqbs_all;
33954 + atomic_long_unchecked_t debug_eqbs_incomplete;
33955 + atomic_long_unchecked_t debug_sqbs_all;
33956 + atomic_long_unchecked_t debug_sqbs_incomplete;
33957 };
33958
33959 extern struct qdio_perf_stats perf_stats;
33960 extern int qdio_performance_stats;
33961
33962 -static inline void qdio_perf_stat_inc(atomic_long_t *count)
33963 +static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
33964 {
33965 if (qdio_performance_stats)
33966 - atomic_long_inc(count);
33967 + atomic_long_inc_unchecked(count);
33968 }
33969
33970 int qdio_setup_perf_stats(void);
33971 diff -urNp linux-2.6.32.42/drivers/scsi/aacraid/commctrl.c linux-2.6.32.42/drivers/scsi/aacraid/commctrl.c
33972 --- linux-2.6.32.42/drivers/scsi/aacraid/commctrl.c 2011-03-27 14:31:47.000000000 -0400
33973 +++ linux-2.6.32.42/drivers/scsi/aacraid/commctrl.c 2011-05-16 21:46:57.000000000 -0400
33974 @@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_d
33975 u32 actual_fibsize64, actual_fibsize = 0;
33976 int i;
33977
33978 + pax_track_stack();
33979
33980 if (dev->in_reset) {
33981 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
33982 diff -urNp linux-2.6.32.42/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.32.42/drivers/scsi/aic94xx/aic94xx_init.c
33983 --- linux-2.6.32.42/drivers/scsi/aic94xx/aic94xx_init.c 2011-03-27 14:31:47.000000000 -0400
33984 +++ linux-2.6.32.42/drivers/scsi/aic94xx/aic94xx_init.c 2011-04-17 15:56:46.000000000 -0400
33985 @@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(stru
33986 flash_error_table[i].reason);
33987 }
33988
33989 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
33990 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
33991 asd_show_update_bios, asd_store_update_bios);
33992
33993 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
33994 diff -urNp linux-2.6.32.42/drivers/scsi/BusLogic.c linux-2.6.32.42/drivers/scsi/BusLogic.c
33995 --- linux-2.6.32.42/drivers/scsi/BusLogic.c 2011-03-27 14:31:47.000000000 -0400
33996 +++ linux-2.6.32.42/drivers/scsi/BusLogic.c 2011-05-16 21:46:57.000000000 -0400
33997 @@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFla
33998 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
33999 *PrototypeHostAdapter)
34000 {
34001 + pax_track_stack();
34002 +
34003 /*
34004 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
34005 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
34006 diff -urNp linux-2.6.32.42/drivers/scsi/dpt_i2o.c linux-2.6.32.42/drivers/scsi/dpt_i2o.c
34007 --- linux-2.6.32.42/drivers/scsi/dpt_i2o.c 2011-03-27 14:31:47.000000000 -0400
34008 +++ linux-2.6.32.42/drivers/scsi/dpt_i2o.c 2011-05-16 21:46:57.000000000 -0400
34009 @@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* p
34010 dma_addr_t addr;
34011 ulong flags = 0;
34012
34013 + pax_track_stack();
34014 +
34015 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
34016 // get user msg size in u32s
34017 if(get_user(size, &user_msg[0])){
34018 @@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
34019 s32 rcode;
34020 dma_addr_t addr;
34021
34022 + pax_track_stack();
34023 +
34024 memset(msg, 0 , sizeof(msg));
34025 len = scsi_bufflen(cmd);
34026 direction = 0x00000000;
34027 diff -urNp linux-2.6.32.42/drivers/scsi/eata.c linux-2.6.32.42/drivers/scsi/eata.c
34028 --- linux-2.6.32.42/drivers/scsi/eata.c 2011-03-27 14:31:47.000000000 -0400
34029 +++ linux-2.6.32.42/drivers/scsi/eata.c 2011-05-16 21:46:57.000000000 -0400
34030 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
34031 struct hostdata *ha;
34032 char name[16];
34033
34034 + pax_track_stack();
34035 +
34036 sprintf(name, "%s%d", driver_name, j);
34037
34038 if (!request_region(port_base, REGION_SIZE, driver_name)) {
34039 diff -urNp linux-2.6.32.42/drivers/scsi/fcoe/libfcoe.c linux-2.6.32.42/drivers/scsi/fcoe/libfcoe.c
34040 --- linux-2.6.32.42/drivers/scsi/fcoe/libfcoe.c 2011-03-27 14:31:47.000000000 -0400
34041 +++ linux-2.6.32.42/drivers/scsi/fcoe/libfcoe.c 2011-05-16 21:46:57.000000000 -0400
34042 @@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fc
34043 size_t rlen;
34044 size_t dlen;
34045
34046 + pax_track_stack();
34047 +
34048 fiph = (struct fip_header *)skb->data;
34049 sub = fiph->fip_subcode;
34050 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
34051 diff -urNp linux-2.6.32.42/drivers/scsi/gdth.c linux-2.6.32.42/drivers/scsi/gdth.c
34052 --- linux-2.6.32.42/drivers/scsi/gdth.c 2011-03-27 14:31:47.000000000 -0400
34053 +++ linux-2.6.32.42/drivers/scsi/gdth.c 2011-05-16 21:46:57.000000000 -0400
34054 @@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
34055 ulong flags;
34056 gdth_ha_str *ha;
34057
34058 + pax_track_stack();
34059 +
34060 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
34061 return -EFAULT;
34062 ha = gdth_find_ha(ldrv.ionode);
34063 @@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg
34064 gdth_ha_str *ha;
34065 int rval;
34066
34067 + pax_track_stack();
34068 +
34069 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
34070 res.number >= MAX_HDRIVES)
34071 return -EFAULT;
34072 @@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg,
34073 gdth_ha_str *ha;
34074 int rval;
34075
34076 + pax_track_stack();
34077 +
34078 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
34079 return -EFAULT;
34080 ha = gdth_find_ha(gen.ionode);
34081 @@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
34082 int i;
34083 gdth_cmd_str gdtcmd;
34084 char cmnd[MAX_COMMAND_SIZE];
34085 +
34086 + pax_track_stack();
34087 +
34088 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
34089
34090 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
34091 diff -urNp linux-2.6.32.42/drivers/scsi/gdth_proc.c linux-2.6.32.42/drivers/scsi/gdth_proc.c
34092 --- linux-2.6.32.42/drivers/scsi/gdth_proc.c 2011-03-27 14:31:47.000000000 -0400
34093 +++ linux-2.6.32.42/drivers/scsi/gdth_proc.c 2011-05-16 21:46:57.000000000 -0400
34094 @@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi
34095 ulong64 paddr;
34096
34097 char cmnd[MAX_COMMAND_SIZE];
34098 +
34099 + pax_track_stack();
34100 +
34101 memset(cmnd, 0xff, 12);
34102 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
34103
34104 @@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,ch
34105 gdth_hget_str *phg;
34106 char cmnd[MAX_COMMAND_SIZE];
34107
34108 + pax_track_stack();
34109 +
34110 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
34111 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
34112 if (!gdtcmd || !estr)
34113 diff -urNp linux-2.6.32.42/drivers/scsi/hosts.c linux-2.6.32.42/drivers/scsi/hosts.c
34114 --- linux-2.6.32.42/drivers/scsi/hosts.c 2011-03-27 14:31:47.000000000 -0400
34115 +++ linux-2.6.32.42/drivers/scsi/hosts.c 2011-05-04 17:56:28.000000000 -0400
34116 @@ -40,7 +40,7 @@
34117 #include "scsi_logging.h"
34118
34119
34120 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
34121 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
34122
34123
34124 static void scsi_host_cls_release(struct device *dev)
34125 @@ -344,7 +344,7 @@ struct Scsi_Host *scsi_host_alloc(struct
34126 * subtract one because we increment first then return, but we need to
34127 * know what the next host number was before increment
34128 */
34129 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
34130 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
34131 shost->dma_channel = 0xff;
34132
34133 /* These three are default values which can be overridden */
34134 diff -urNp linux-2.6.32.42/drivers/scsi/ipr.c linux-2.6.32.42/drivers/scsi/ipr.c
34135 --- linux-2.6.32.42/drivers/scsi/ipr.c 2011-03-27 14:31:47.000000000 -0400
34136 +++ linux-2.6.32.42/drivers/scsi/ipr.c 2011-04-17 15:56:46.000000000 -0400
34137 @@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
34138 return true;
34139 }
34140
34141 -static struct ata_port_operations ipr_sata_ops = {
34142 +static const struct ata_port_operations ipr_sata_ops = {
34143 .phy_reset = ipr_ata_phy_reset,
34144 .hardreset = ipr_sata_reset,
34145 .post_internal_cmd = ipr_ata_post_internal,
34146 diff -urNp linux-2.6.32.42/drivers/scsi/libfc/fc_exch.c linux-2.6.32.42/drivers/scsi/libfc/fc_exch.c
34147 --- linux-2.6.32.42/drivers/scsi/libfc/fc_exch.c 2011-03-27 14:31:47.000000000 -0400
34148 +++ linux-2.6.32.42/drivers/scsi/libfc/fc_exch.c 2011-04-17 15:56:46.000000000 -0400
34149 @@ -86,12 +86,12 @@ struct fc_exch_mgr {
34150 * all together if not used XXX
34151 */
34152 struct {
34153 - atomic_t no_free_exch;
34154 - atomic_t no_free_exch_xid;
34155 - atomic_t xid_not_found;
34156 - atomic_t xid_busy;
34157 - atomic_t seq_not_found;
34158 - atomic_t non_bls_resp;
34159 + atomic_unchecked_t no_free_exch;
34160 + atomic_unchecked_t no_free_exch_xid;
34161 + atomic_unchecked_t xid_not_found;
34162 + atomic_unchecked_t xid_busy;
34163 + atomic_unchecked_t seq_not_found;
34164 + atomic_unchecked_t non_bls_resp;
34165 } stats;
34166 };
34167 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
34168 @@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(
34169 /* allocate memory for exchange */
34170 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
34171 if (!ep) {
34172 - atomic_inc(&mp->stats.no_free_exch);
34173 + atomic_inc_unchecked(&mp->stats.no_free_exch);
34174 goto out;
34175 }
34176 memset(ep, 0, sizeof(*ep));
34177 @@ -557,7 +557,7 @@ out:
34178 return ep;
34179 err:
34180 spin_unlock_bh(&pool->lock);
34181 - atomic_inc(&mp->stats.no_free_exch_xid);
34182 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
34183 mempool_free(ep, mp->ep_pool);
34184 return NULL;
34185 }
34186 @@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34187 xid = ntohs(fh->fh_ox_id); /* we originated exch */
34188 ep = fc_exch_find(mp, xid);
34189 if (!ep) {
34190 - atomic_inc(&mp->stats.xid_not_found);
34191 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34192 reject = FC_RJT_OX_ID;
34193 goto out;
34194 }
34195 @@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34196 ep = fc_exch_find(mp, xid);
34197 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
34198 if (ep) {
34199 - atomic_inc(&mp->stats.xid_busy);
34200 + atomic_inc_unchecked(&mp->stats.xid_busy);
34201 reject = FC_RJT_RX_ID;
34202 goto rel;
34203 }
34204 @@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34205 }
34206 xid = ep->xid; /* get our XID */
34207 } else if (!ep) {
34208 - atomic_inc(&mp->stats.xid_not_found);
34209 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34210 reject = FC_RJT_RX_ID; /* XID not found */
34211 goto out;
34212 }
34213 @@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34214 } else {
34215 sp = &ep->seq;
34216 if (sp->id != fh->fh_seq_id) {
34217 - atomic_inc(&mp->stats.seq_not_found);
34218 + atomic_inc_unchecked(&mp->stats.seq_not_found);
34219 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
34220 goto rel;
34221 }
34222 @@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct
34223
34224 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
34225 if (!ep) {
34226 - atomic_inc(&mp->stats.xid_not_found);
34227 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34228 goto out;
34229 }
34230 if (ep->esb_stat & ESB_ST_COMPLETE) {
34231 - atomic_inc(&mp->stats.xid_not_found);
34232 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34233 goto out;
34234 }
34235 if (ep->rxid == FC_XID_UNKNOWN)
34236 ep->rxid = ntohs(fh->fh_rx_id);
34237 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
34238 - atomic_inc(&mp->stats.xid_not_found);
34239 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34240 goto rel;
34241 }
34242 if (ep->did != ntoh24(fh->fh_s_id) &&
34243 ep->did != FC_FID_FLOGI) {
34244 - atomic_inc(&mp->stats.xid_not_found);
34245 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34246 goto rel;
34247 }
34248 sof = fr_sof(fp);
34249 @@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct
34250 } else {
34251 sp = &ep->seq;
34252 if (sp->id != fh->fh_seq_id) {
34253 - atomic_inc(&mp->stats.seq_not_found);
34254 + atomic_inc_unchecked(&mp->stats.seq_not_found);
34255 goto rel;
34256 }
34257 }
34258 @@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_
34259 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
34260
34261 if (!sp)
34262 - atomic_inc(&mp->stats.xid_not_found);
34263 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34264 else
34265 - atomic_inc(&mp->stats.non_bls_resp);
34266 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
34267
34268 fc_frame_free(fp);
34269 }
34270 diff -urNp linux-2.6.32.42/drivers/scsi/libsas/sas_ata.c linux-2.6.32.42/drivers/scsi/libsas/sas_ata.c
34271 --- linux-2.6.32.42/drivers/scsi/libsas/sas_ata.c 2011-03-27 14:31:47.000000000 -0400
34272 +++ linux-2.6.32.42/drivers/scsi/libsas/sas_ata.c 2011-04-23 12:56:11.000000000 -0400
34273 @@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_l
34274 }
34275 }
34276
34277 -static struct ata_port_operations sas_sata_ops = {
34278 +static const struct ata_port_operations sas_sata_ops = {
34279 .phy_reset = sas_ata_phy_reset,
34280 .post_internal_cmd = sas_ata_post_internal,
34281 .qc_defer = ata_std_qc_defer,
34282 diff -urNp linux-2.6.32.42/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.32.42/drivers/scsi/lpfc/lpfc_debugfs.c
34283 --- linux-2.6.32.42/drivers/scsi/lpfc/lpfc_debugfs.c 2011-03-27 14:31:47.000000000 -0400
34284 +++ linux-2.6.32.42/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-16 21:46:57.000000000 -0400
34285 @@ -124,7 +124,7 @@ struct lpfc_debug {
34286 int len;
34287 };
34288
34289 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34290 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34291 static unsigned long lpfc_debugfs_start_time = 0L;
34292
34293 /**
34294 @@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
34295 lpfc_debugfs_enable = 0;
34296
34297 len = 0;
34298 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
34299 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
34300 (lpfc_debugfs_max_disc_trc - 1);
34301 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
34302 dtp = vport->disc_trc + i;
34303 @@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
34304 lpfc_debugfs_enable = 0;
34305
34306 len = 0;
34307 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
34308 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
34309 (lpfc_debugfs_max_slow_ring_trc - 1);
34310 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
34311 dtp = phba->slow_ring_trc + i;
34312 @@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
34313 uint32_t *ptr;
34314 char buffer[1024];
34315
34316 + pax_track_stack();
34317 +
34318 off = 0;
34319 spin_lock_irq(&phba->hbalock);
34320
34321 @@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
34322 !vport || !vport->disc_trc)
34323 return;
34324
34325 - index = atomic_inc_return(&vport->disc_trc_cnt) &
34326 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
34327 (lpfc_debugfs_max_disc_trc - 1);
34328 dtp = vport->disc_trc + index;
34329 dtp->fmt = fmt;
34330 dtp->data1 = data1;
34331 dtp->data2 = data2;
34332 dtp->data3 = data3;
34333 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34334 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34335 dtp->jif = jiffies;
34336 #endif
34337 return;
34338 @@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
34339 !phba || !phba->slow_ring_trc)
34340 return;
34341
34342 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
34343 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
34344 (lpfc_debugfs_max_slow_ring_trc - 1);
34345 dtp = phba->slow_ring_trc + index;
34346 dtp->fmt = fmt;
34347 dtp->data1 = data1;
34348 dtp->data2 = data2;
34349 dtp->data3 = data3;
34350 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34351 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34352 dtp->jif = jiffies;
34353 #endif
34354 return;
34355 @@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
34356 "slow_ring buffer\n");
34357 goto debug_failed;
34358 }
34359 - atomic_set(&phba->slow_ring_trc_cnt, 0);
34360 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
34361 memset(phba->slow_ring_trc, 0,
34362 (sizeof(struct lpfc_debugfs_trc) *
34363 lpfc_debugfs_max_slow_ring_trc));
34364 @@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
34365 "buffer\n");
34366 goto debug_failed;
34367 }
34368 - atomic_set(&vport->disc_trc_cnt, 0);
34369 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
34370
34371 snprintf(name, sizeof(name), "discovery_trace");
34372 vport->debug_disc_trc =
34373 diff -urNp linux-2.6.32.42/drivers/scsi/lpfc/lpfc.h linux-2.6.32.42/drivers/scsi/lpfc/lpfc.h
34374 --- linux-2.6.32.42/drivers/scsi/lpfc/lpfc.h 2011-03-27 14:31:47.000000000 -0400
34375 +++ linux-2.6.32.42/drivers/scsi/lpfc/lpfc.h 2011-05-04 17:56:28.000000000 -0400
34376 @@ -400,7 +400,7 @@ struct lpfc_vport {
34377 struct dentry *debug_nodelist;
34378 struct dentry *vport_debugfs_root;
34379 struct lpfc_debugfs_trc *disc_trc;
34380 - atomic_t disc_trc_cnt;
34381 + atomic_unchecked_t disc_trc_cnt;
34382 #endif
34383 uint8_t stat_data_enabled;
34384 uint8_t stat_data_blocked;
34385 @@ -725,8 +725,8 @@ struct lpfc_hba {
34386 struct timer_list fabric_block_timer;
34387 unsigned long bit_flags;
34388 #define FABRIC_COMANDS_BLOCKED 0
34389 - atomic_t num_rsrc_err;
34390 - atomic_t num_cmd_success;
34391 + atomic_unchecked_t num_rsrc_err;
34392 + atomic_unchecked_t num_cmd_success;
34393 unsigned long last_rsrc_error_time;
34394 unsigned long last_ramp_down_time;
34395 unsigned long last_ramp_up_time;
34396 @@ -740,7 +740,7 @@ struct lpfc_hba {
34397 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
34398 struct dentry *debug_slow_ring_trc;
34399 struct lpfc_debugfs_trc *slow_ring_trc;
34400 - atomic_t slow_ring_trc_cnt;
34401 + atomic_unchecked_t slow_ring_trc_cnt;
34402 #endif
34403
34404 /* Used for deferred freeing of ELS data buffers */
34405 diff -urNp linux-2.6.32.42/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.32.42/drivers/scsi/lpfc/lpfc_scsi.c
34406 --- linux-2.6.32.42/drivers/scsi/lpfc/lpfc_scsi.c 2011-03-27 14:31:47.000000000 -0400
34407 +++ linux-2.6.32.42/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-04 17:56:28.000000000 -0400
34408 @@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
34409 uint32_t evt_posted;
34410
34411 spin_lock_irqsave(&phba->hbalock, flags);
34412 - atomic_inc(&phba->num_rsrc_err);
34413 + atomic_inc_unchecked(&phba->num_rsrc_err);
34414 phba->last_rsrc_error_time = jiffies;
34415
34416 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
34417 @@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
34418 unsigned long flags;
34419 struct lpfc_hba *phba = vport->phba;
34420 uint32_t evt_posted;
34421 - atomic_inc(&phba->num_cmd_success);
34422 + atomic_inc_unchecked(&phba->num_cmd_success);
34423
34424 if (vport->cfg_lun_queue_depth <= queue_depth)
34425 return;
34426 @@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
34427 int i;
34428 struct lpfc_rport_data *rdata;
34429
34430 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
34431 - num_cmd_success = atomic_read(&phba->num_cmd_success);
34432 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
34433 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
34434
34435 vports = lpfc_create_vport_work_array(phba);
34436 if (vports != NULL)
34437 @@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
34438 }
34439 }
34440 lpfc_destroy_vport_work_array(phba, vports);
34441 - atomic_set(&phba->num_rsrc_err, 0);
34442 - atomic_set(&phba->num_cmd_success, 0);
34443 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
34444 + atomic_set_unchecked(&phba->num_cmd_success, 0);
34445 }
34446
34447 /**
34448 @@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
34449 }
34450 }
34451 lpfc_destroy_vport_work_array(phba, vports);
34452 - atomic_set(&phba->num_rsrc_err, 0);
34453 - atomic_set(&phba->num_cmd_success, 0);
34454 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
34455 + atomic_set_unchecked(&phba->num_cmd_success, 0);
34456 }
34457
34458 /**
34459 diff -urNp linux-2.6.32.42/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.32.42/drivers/scsi/megaraid/megaraid_mbox.c
34460 --- linux-2.6.32.42/drivers/scsi/megaraid/megaraid_mbox.c 2011-03-27 14:31:47.000000000 -0400
34461 +++ linux-2.6.32.42/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-16 21:46:57.000000000 -0400
34462 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
34463 int rval;
34464 int i;
34465
34466 + pax_track_stack();
34467 +
34468 // Allocate memory for the base list of scb for management module.
34469 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
34470
34471 diff -urNp linux-2.6.32.42/drivers/scsi/osd/osd_initiator.c linux-2.6.32.42/drivers/scsi/osd/osd_initiator.c
34472 --- linux-2.6.32.42/drivers/scsi/osd/osd_initiator.c 2011-03-27 14:31:47.000000000 -0400
34473 +++ linux-2.6.32.42/drivers/scsi/osd/osd_initiator.c 2011-05-16 21:46:57.000000000 -0400
34474 @@ -94,6 +94,8 @@ static int _osd_print_system_info(struct
34475 int nelem = ARRAY_SIZE(get_attrs), a = 0;
34476 int ret;
34477
34478 + pax_track_stack();
34479 +
34480 or = osd_start_request(od, GFP_KERNEL);
34481 if (!or)
34482 return -ENOMEM;
34483 diff -urNp linux-2.6.32.42/drivers/scsi/pmcraid.c linux-2.6.32.42/drivers/scsi/pmcraid.c
34484 --- linux-2.6.32.42/drivers/scsi/pmcraid.c 2011-05-10 22:12:01.000000000 -0400
34485 +++ linux-2.6.32.42/drivers/scsi/pmcraid.c 2011-05-10 22:12:33.000000000 -0400
34486 @@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct sc
34487 res->scsi_dev = scsi_dev;
34488 scsi_dev->hostdata = res;
34489 res->change_detected = 0;
34490 - atomic_set(&res->read_failures, 0);
34491 - atomic_set(&res->write_failures, 0);
34492 + atomic_set_unchecked(&res->read_failures, 0);
34493 + atomic_set_unchecked(&res->write_failures, 0);
34494 rc = 0;
34495 }
34496 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
34497 @@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct
34498
34499 /* If this was a SCSI read/write command keep count of errors */
34500 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
34501 - atomic_inc(&res->read_failures);
34502 + atomic_inc_unchecked(&res->read_failures);
34503 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
34504 - atomic_inc(&res->write_failures);
34505 + atomic_inc_unchecked(&res->write_failures);
34506
34507 if (!RES_IS_GSCSI(res->cfg_entry) &&
34508 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
34509 @@ -4113,7 +4113,7 @@ static void pmcraid_worker_function(stru
34510
34511 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
34512 /* add resources only after host is added into system */
34513 - if (!atomic_read(&pinstance->expose_resources))
34514 + if (!atomic_read_unchecked(&pinstance->expose_resources))
34515 return;
34516
34517 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
34518 @@ -4847,7 +4847,7 @@ static int __devinit pmcraid_init_instan
34519 init_waitqueue_head(&pinstance->reset_wait_q);
34520
34521 atomic_set(&pinstance->outstanding_cmds, 0);
34522 - atomic_set(&pinstance->expose_resources, 0);
34523 + atomic_set_unchecked(&pinstance->expose_resources, 0);
34524
34525 INIT_LIST_HEAD(&pinstance->free_res_q);
34526 INIT_LIST_HEAD(&pinstance->used_res_q);
34527 @@ -5499,7 +5499,7 @@ static int __devinit pmcraid_probe(
34528 /* Schedule worker thread to handle CCN and take care of adding and
34529 * removing devices to OS
34530 */
34531 - atomic_set(&pinstance->expose_resources, 1);
34532 + atomic_set_unchecked(&pinstance->expose_resources, 1);
34533 schedule_work(&pinstance->worker_q);
34534 return rc;
34535
34536 diff -urNp linux-2.6.32.42/drivers/scsi/pmcraid.h linux-2.6.32.42/drivers/scsi/pmcraid.h
34537 --- linux-2.6.32.42/drivers/scsi/pmcraid.h 2011-03-27 14:31:47.000000000 -0400
34538 +++ linux-2.6.32.42/drivers/scsi/pmcraid.h 2011-05-04 17:56:28.000000000 -0400
34539 @@ -690,7 +690,7 @@ struct pmcraid_instance {
34540 atomic_t outstanding_cmds;
34541
34542 /* should add/delete resources to mid-layer now ?*/
34543 - atomic_t expose_resources;
34544 + atomic_unchecked_t expose_resources;
34545
34546 /* Tasklet to handle deferred processing */
34547 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
34548 @@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
34549 struct list_head queue; /* link to "to be exposed" resources */
34550 struct pmcraid_config_table_entry cfg_entry;
34551 struct scsi_device *scsi_dev; /* Link scsi_device structure */
34552 - atomic_t read_failures; /* count of failed READ commands */
34553 - atomic_t write_failures; /* count of failed WRITE commands */
34554 + atomic_unchecked_t read_failures; /* count of failed READ commands */
34555 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34556
34557 /* To indicate add/delete/modify during CCN */
34558 u8 change_detected;
34559 diff -urNp linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_def.h
34560 --- linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_def.h 2011-03-27 14:31:47.000000000 -0400
34561 +++ linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_def.h 2011-05-04 17:56:28.000000000 -0400
34562 @@ -240,7 +240,7 @@ struct ddb_entry {
34563 atomic_t retry_relogin_timer; /* Min Time between relogins
34564 * (4000 only) */
34565 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
34566 - atomic_t relogin_retry_count; /* Num of times relogin has been
34567 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34568 * retried */
34569
34570 uint16_t port;
34571 diff -urNp linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_init.c
34572 --- linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_init.c 2011-03-27 14:31:47.000000000 -0400
34573 +++ linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_init.c 2011-05-04 17:56:28.000000000 -0400
34574 @@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_
34575 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
34576 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34577 atomic_set(&ddb_entry->relogin_timer, 0);
34578 - atomic_set(&ddb_entry->relogin_retry_count, 0);
34579 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34580 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34581 list_add_tail(&ddb_entry->list, &ha->ddb_list);
34582 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
34583 @@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct s
34584 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34585 atomic_set(&ddb_entry->port_down_timer,
34586 ha->port_down_retry_count);
34587 - atomic_set(&ddb_entry->relogin_retry_count, 0);
34588 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34589 atomic_set(&ddb_entry->relogin_timer, 0);
34590 clear_bit(DF_RELOGIN, &ddb_entry->flags);
34591 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
34592 diff -urNp linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_os.c
34593 --- linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_os.c 2011-03-27 14:31:47.000000000 -0400
34594 +++ linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_os.c 2011-05-04 17:56:28.000000000 -0400
34595 @@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_ql
34596 ddb_entry->fw_ddb_device_state ==
34597 DDB_DS_SESSION_FAILED) {
34598 /* Reset retry relogin timer */
34599 - atomic_inc(&ddb_entry->relogin_retry_count);
34600 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34601 DEBUG2(printk("scsi%ld: index[%d] relogin"
34602 " timed out-retrying"
34603 " relogin (%d)\n",
34604 ha->host_no,
34605 ddb_entry->fw_ddb_index,
34606 - atomic_read(&ddb_entry->
34607 + atomic_read_unchecked(&ddb_entry->
34608 relogin_retry_count))
34609 );
34610 start_dpc++;
34611 diff -urNp linux-2.6.32.42/drivers/scsi/scsi.c linux-2.6.32.42/drivers/scsi/scsi.c
34612 --- linux-2.6.32.42/drivers/scsi/scsi.c 2011-03-27 14:31:47.000000000 -0400
34613 +++ linux-2.6.32.42/drivers/scsi/scsi.c 2011-05-04 17:56:28.000000000 -0400
34614 @@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
34615 unsigned long timeout;
34616 int rtn = 0;
34617
34618 - atomic_inc(&cmd->device->iorequest_cnt);
34619 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34620
34621 /* check if the device is still usable */
34622 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34623 diff -urNp linux-2.6.32.42/drivers/scsi/scsi_debug.c linux-2.6.32.42/drivers/scsi/scsi_debug.c
34624 --- linux-2.6.32.42/drivers/scsi/scsi_debug.c 2011-03-27 14:31:47.000000000 -0400
34625 +++ linux-2.6.32.42/drivers/scsi/scsi_debug.c 2011-05-16 21:46:57.000000000 -0400
34626 @@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_
34627 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
34628 unsigned char *cmd = (unsigned char *)scp->cmnd;
34629
34630 + pax_track_stack();
34631 +
34632 if ((errsts = check_readiness(scp, 1, devip)))
34633 return errsts;
34634 memset(arr, 0, sizeof(arr));
34635 @@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cm
34636 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
34637 unsigned char *cmd = (unsigned char *)scp->cmnd;
34638
34639 + pax_track_stack();
34640 +
34641 if ((errsts = check_readiness(scp, 1, devip)))
34642 return errsts;
34643 memset(arr, 0, sizeof(arr));
34644 diff -urNp linux-2.6.32.42/drivers/scsi/scsi_lib.c linux-2.6.32.42/drivers/scsi/scsi_lib.c
34645 --- linux-2.6.32.42/drivers/scsi/scsi_lib.c 2011-05-10 22:12:01.000000000 -0400
34646 +++ linux-2.6.32.42/drivers/scsi/scsi_lib.c 2011-05-10 22:12:33.000000000 -0400
34647 @@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct req
34648
34649 scsi_init_cmd_errh(cmd);
34650 cmd->result = DID_NO_CONNECT << 16;
34651 - atomic_inc(&cmd->device->iorequest_cnt);
34652 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34653
34654 /*
34655 * SCSI request completion path will do scsi_device_unbusy(),
34656 @@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct req
34657 */
34658 cmd->serial_number = 0;
34659
34660 - atomic_inc(&cmd->device->iodone_cnt);
34661 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
34662 if (cmd->result)
34663 - atomic_inc(&cmd->device->ioerr_cnt);
34664 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34665
34666 disposition = scsi_decide_disposition(cmd);
34667 if (disposition != SUCCESS &&
34668 diff -urNp linux-2.6.32.42/drivers/scsi/scsi_sysfs.c linux-2.6.32.42/drivers/scsi/scsi_sysfs.c
34669 --- linux-2.6.32.42/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:34.000000000 -0400
34670 +++ linux-2.6.32.42/drivers/scsi/scsi_sysfs.c 2011-06-25 12:56:37.000000000 -0400
34671 @@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev,
34672 char *buf) \
34673 { \
34674 struct scsi_device *sdev = to_scsi_device(dev); \
34675 - unsigned long long count = atomic_read(&sdev->field); \
34676 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
34677 return snprintf(buf, 20, "0x%llx\n", count); \
34678 } \
34679 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34680 diff -urNp linux-2.6.32.42/drivers/scsi/scsi_transport_fc.c linux-2.6.32.42/drivers/scsi/scsi_transport_fc.c
34681 --- linux-2.6.32.42/drivers/scsi/scsi_transport_fc.c 2011-03-27 14:31:47.000000000 -0400
34682 +++ linux-2.6.32.42/drivers/scsi/scsi_transport_fc.c 2011-05-04 17:56:28.000000000 -0400
34683 @@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
34684 * Netlink Infrastructure
34685 */
34686
34687 -static atomic_t fc_event_seq;
34688 +static atomic_unchecked_t fc_event_seq;
34689
34690 /**
34691 * fc_get_event_number - Obtain the next sequential FC event number
34692 @@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
34693 u32
34694 fc_get_event_number(void)
34695 {
34696 - return atomic_add_return(1, &fc_event_seq);
34697 + return atomic_add_return_unchecked(1, &fc_event_seq);
34698 }
34699 EXPORT_SYMBOL(fc_get_event_number);
34700
34701 @@ -641,7 +641,7 @@ static __init int fc_transport_init(void
34702 {
34703 int error;
34704
34705 - atomic_set(&fc_event_seq, 0);
34706 + atomic_set_unchecked(&fc_event_seq, 0);
34707
34708 error = transport_class_register(&fc_host_class);
34709 if (error)
34710 diff -urNp linux-2.6.32.42/drivers/scsi/scsi_transport_iscsi.c linux-2.6.32.42/drivers/scsi/scsi_transport_iscsi.c
34711 --- linux-2.6.32.42/drivers/scsi/scsi_transport_iscsi.c 2011-03-27 14:31:47.000000000 -0400
34712 +++ linux-2.6.32.42/drivers/scsi/scsi_transport_iscsi.c 2011-05-04 17:56:28.000000000 -0400
34713 @@ -81,7 +81,7 @@ struct iscsi_internal {
34714 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
34715 };
34716
34717 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34718 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34719 static struct workqueue_struct *iscsi_eh_timer_workq;
34720
34721 /*
34722 @@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_s
34723 int err;
34724
34725 ihost = shost->shost_data;
34726 - session->sid = atomic_add_return(1, &iscsi_session_nr);
34727 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34728
34729 if (id == ISCSI_MAX_TARGET) {
34730 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
34731 @@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(v
34732 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34733 ISCSI_TRANSPORT_VERSION);
34734
34735 - atomic_set(&iscsi_session_nr, 0);
34736 + atomic_set_unchecked(&iscsi_session_nr, 0);
34737
34738 err = class_register(&iscsi_transport_class);
34739 if (err)
34740 diff -urNp linux-2.6.32.42/drivers/scsi/scsi_transport_srp.c linux-2.6.32.42/drivers/scsi/scsi_transport_srp.c
34741 --- linux-2.6.32.42/drivers/scsi/scsi_transport_srp.c 2011-03-27 14:31:47.000000000 -0400
34742 +++ linux-2.6.32.42/drivers/scsi/scsi_transport_srp.c 2011-05-04 17:56:28.000000000 -0400
34743 @@ -33,7 +33,7 @@
34744 #include "scsi_transport_srp_internal.h"
34745
34746 struct srp_host_attrs {
34747 - atomic_t next_port_id;
34748 + atomic_unchecked_t next_port_id;
34749 };
34750 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34751
34752 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
34753 struct Scsi_Host *shost = dev_to_shost(dev);
34754 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34755
34756 - atomic_set(&srp_host->next_port_id, 0);
34757 + atomic_set_unchecked(&srp_host->next_port_id, 0);
34758 return 0;
34759 }
34760
34761 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
34762 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34763 rport->roles = ids->roles;
34764
34765 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34766 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34767 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34768
34769 transport_setup_device(&rport->dev);
34770 diff -urNp linux-2.6.32.42/drivers/scsi/sg.c linux-2.6.32.42/drivers/scsi/sg.c
34771 --- linux-2.6.32.42/drivers/scsi/sg.c 2011-03-27 14:31:47.000000000 -0400
34772 +++ linux-2.6.32.42/drivers/scsi/sg.c 2011-04-17 15:56:46.000000000 -0400
34773 @@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
34774 const struct file_operations * fops;
34775 };
34776
34777 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34778 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34779 {"allow_dio", &adio_fops},
34780 {"debug", &debug_fops},
34781 {"def_reserved_size", &dressz_fops},
34782 @@ -2307,7 +2307,7 @@ sg_proc_init(void)
34783 {
34784 int k, mask;
34785 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34786 - struct sg_proc_leaf * leaf;
34787 + const struct sg_proc_leaf * leaf;
34788
34789 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34790 if (!sg_proc_sgp)
34791 diff -urNp linux-2.6.32.42/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.32.42/drivers/scsi/sym53c8xx_2/sym_glue.c
34792 --- linux-2.6.32.42/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-03-27 14:31:47.000000000 -0400
34793 +++ linux-2.6.32.42/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-16 21:46:57.000000000 -0400
34794 @@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct p
34795 int do_iounmap = 0;
34796 int do_disable_device = 1;
34797
34798 + pax_track_stack();
34799 +
34800 memset(&sym_dev, 0, sizeof(sym_dev));
34801 memset(&nvram, 0, sizeof(nvram));
34802 sym_dev.pdev = pdev;
34803 diff -urNp linux-2.6.32.42/drivers/serial/kgdboc.c linux-2.6.32.42/drivers/serial/kgdboc.c
34804 --- linux-2.6.32.42/drivers/serial/kgdboc.c 2011-03-27 14:31:47.000000000 -0400
34805 +++ linux-2.6.32.42/drivers/serial/kgdboc.c 2011-04-17 15:56:46.000000000 -0400
34806 @@ -18,7 +18,7 @@
34807
34808 #define MAX_CONFIG_LEN 40
34809
34810 -static struct kgdb_io kgdboc_io_ops;
34811 +static const struct kgdb_io kgdboc_io_ops;
34812
34813 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
34814 static int configured = -1;
34815 @@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
34816 module_put(THIS_MODULE);
34817 }
34818
34819 -static struct kgdb_io kgdboc_io_ops = {
34820 +static const struct kgdb_io kgdboc_io_ops = {
34821 .name = "kgdboc",
34822 .read_char = kgdboc_get_char,
34823 .write_char = kgdboc_put_char,
34824 diff -urNp linux-2.6.32.42/drivers/spi/spi.c linux-2.6.32.42/drivers/spi/spi.c
34825 --- linux-2.6.32.42/drivers/spi/spi.c 2011-03-27 14:31:47.000000000 -0400
34826 +++ linux-2.6.32.42/drivers/spi/spi.c 2011-05-04 17:56:28.000000000 -0400
34827 @@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, str
34828 EXPORT_SYMBOL_GPL(spi_sync);
34829
34830 /* portable code must never pass more than 32 bytes */
34831 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34832 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
34833
34834 static u8 *buf;
34835
34836 diff -urNp linux-2.6.32.42/drivers/staging/android/binder.c linux-2.6.32.42/drivers/staging/android/binder.c
34837 --- linux-2.6.32.42/drivers/staging/android/binder.c 2011-03-27 14:31:47.000000000 -0400
34838 +++ linux-2.6.32.42/drivers/staging/android/binder.c 2011-04-17 15:56:46.000000000 -0400
34839 @@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_a
34840 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
34841 }
34842
34843 -static struct vm_operations_struct binder_vm_ops = {
34844 +static const struct vm_operations_struct binder_vm_ops = {
34845 .open = binder_vma_open,
34846 .close = binder_vma_close,
34847 };
34848 diff -urNp linux-2.6.32.42/drivers/staging/b3dfg/b3dfg.c linux-2.6.32.42/drivers/staging/b3dfg/b3dfg.c
34849 --- linux-2.6.32.42/drivers/staging/b3dfg/b3dfg.c 2011-03-27 14:31:47.000000000 -0400
34850 +++ linux-2.6.32.42/drivers/staging/b3dfg/b3dfg.c 2011-04-17 15:56:46.000000000 -0400
34851 @@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_are
34852 return VM_FAULT_NOPAGE;
34853 }
34854
34855 -static struct vm_operations_struct b3dfg_vm_ops = {
34856 +static const struct vm_operations_struct b3dfg_vm_ops = {
34857 .fault = b3dfg_vma_fault,
34858 };
34859
34860 @@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp,
34861 return r;
34862 }
34863
34864 -static struct file_operations b3dfg_fops = {
34865 +static const struct file_operations b3dfg_fops = {
34866 .owner = THIS_MODULE,
34867 .open = b3dfg_open,
34868 .release = b3dfg_release,
34869 diff -urNp linux-2.6.32.42/drivers/staging/comedi/comedi_fops.c linux-2.6.32.42/drivers/staging/comedi/comedi_fops.c
34870 --- linux-2.6.32.42/drivers/staging/comedi/comedi_fops.c 2011-03-27 14:31:47.000000000 -0400
34871 +++ linux-2.6.32.42/drivers/staging/comedi/comedi_fops.c 2011-04-17 15:56:46.000000000 -0400
34872 @@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct
34873 mutex_unlock(&dev->mutex);
34874 }
34875
34876 -static struct vm_operations_struct comedi_vm_ops = {
34877 +static const struct vm_operations_struct comedi_vm_ops = {
34878 .close = comedi_unmap,
34879 };
34880
34881 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.32.42/drivers/staging/dream/qdsp5/adsp_driver.c
34882 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/adsp_driver.c 2011-03-27 14:31:47.000000000 -0400
34883 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/adsp_driver.c 2011-04-17 15:56:46.000000000 -0400
34884 @@ -576,7 +576,7 @@ static struct adsp_device *inode_to_devi
34885 static dev_t adsp_devno;
34886 static struct class *adsp_class;
34887
34888 -static struct file_operations adsp_fops = {
34889 +static const struct file_operations adsp_fops = {
34890 .owner = THIS_MODULE,
34891 .open = adsp_open,
34892 .unlocked_ioctl = adsp_ioctl,
34893 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_aac.c
34894 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_aac.c 2011-03-27 14:31:47.000000000 -0400
34895 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_aac.c 2011-04-17 15:56:46.000000000 -0400
34896 @@ -1022,7 +1022,7 @@ done:
34897 return rc;
34898 }
34899
34900 -static struct file_operations audio_aac_fops = {
34901 +static const struct file_operations audio_aac_fops = {
34902 .owner = THIS_MODULE,
34903 .open = audio_open,
34904 .release = audio_release,
34905 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_amrnb.c
34906 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-03-27 14:31:47.000000000 -0400
34907 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-04-17 15:56:46.000000000 -0400
34908 @@ -833,7 +833,7 @@ done:
34909 return rc;
34910 }
34911
34912 -static struct file_operations audio_amrnb_fops = {
34913 +static const struct file_operations audio_amrnb_fops = {
34914 .owner = THIS_MODULE,
34915 .open = audamrnb_open,
34916 .release = audamrnb_release,
34917 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_evrc.c
34918 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_evrc.c 2011-03-27 14:31:47.000000000 -0400
34919 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_evrc.c 2011-04-17 15:56:46.000000000 -0400
34920 @@ -805,7 +805,7 @@ dma_fail:
34921 return rc;
34922 }
34923
34924 -static struct file_operations audio_evrc_fops = {
34925 +static const struct file_operations audio_evrc_fops = {
34926 .owner = THIS_MODULE,
34927 .open = audevrc_open,
34928 .release = audevrc_release,
34929 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_in.c
34930 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_in.c 2011-03-27 14:31:47.000000000 -0400
34931 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_in.c 2011-04-17 15:56:46.000000000 -0400
34932 @@ -913,7 +913,7 @@ static int audpre_open(struct inode *ino
34933 return 0;
34934 }
34935
34936 -static struct file_operations audio_fops = {
34937 +static const struct file_operations audio_fops = {
34938 .owner = THIS_MODULE,
34939 .open = audio_in_open,
34940 .release = audio_in_release,
34941 @@ -922,7 +922,7 @@ static struct file_operations audio_fops
34942 .unlocked_ioctl = audio_in_ioctl,
34943 };
34944
34945 -static struct file_operations audpre_fops = {
34946 +static const struct file_operations audpre_fops = {
34947 .owner = THIS_MODULE,
34948 .open = audpre_open,
34949 .unlocked_ioctl = audpre_ioctl,
34950 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_mp3.c
34951 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_mp3.c 2011-03-27 14:31:47.000000000 -0400
34952 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_mp3.c 2011-04-17 15:56:46.000000000 -0400
34953 @@ -941,7 +941,7 @@ done:
34954 return rc;
34955 }
34956
34957 -static struct file_operations audio_mp3_fops = {
34958 +static const struct file_operations audio_mp3_fops = {
34959 .owner = THIS_MODULE,
34960 .open = audio_open,
34961 .release = audio_release,
34962 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_out.c
34963 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_out.c 2011-03-27 14:31:47.000000000 -0400
34964 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_out.c 2011-04-17 15:56:46.000000000 -0400
34965 @@ -810,7 +810,7 @@ static int audpp_open(struct inode *inod
34966 return 0;
34967 }
34968
34969 -static struct file_operations audio_fops = {
34970 +static const struct file_operations audio_fops = {
34971 .owner = THIS_MODULE,
34972 .open = audio_open,
34973 .release = audio_release,
34974 @@ -819,7 +819,7 @@ static struct file_operations audio_fops
34975 .unlocked_ioctl = audio_ioctl,
34976 };
34977
34978 -static struct file_operations audpp_fops = {
34979 +static const struct file_operations audpp_fops = {
34980 .owner = THIS_MODULE,
34981 .open = audpp_open,
34982 .unlocked_ioctl = audpp_ioctl,
34983 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_qcelp.c
34984 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-03-27 14:31:47.000000000 -0400
34985 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-04-17 15:56:46.000000000 -0400
34986 @@ -816,7 +816,7 @@ err:
34987 return rc;
34988 }
34989
34990 -static struct file_operations audio_qcelp_fops = {
34991 +static const struct file_operations audio_qcelp_fops = {
34992 .owner = THIS_MODULE,
34993 .open = audqcelp_open,
34994 .release = audqcelp_release,
34995 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/snd.c linux-2.6.32.42/drivers/staging/dream/qdsp5/snd.c
34996 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/snd.c 2011-03-27 14:31:47.000000000 -0400
34997 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/snd.c 2011-04-17 15:56:46.000000000 -0400
34998 @@ -242,7 +242,7 @@ err:
34999 return rc;
35000 }
35001
35002 -static struct file_operations snd_fops = {
35003 +static const struct file_operations snd_fops = {
35004 .owner = THIS_MODULE,
35005 .open = snd_open,
35006 .release = snd_release,
35007 diff -urNp linux-2.6.32.42/drivers/staging/dream/smd/smd_qmi.c linux-2.6.32.42/drivers/staging/dream/smd/smd_qmi.c
35008 --- linux-2.6.32.42/drivers/staging/dream/smd/smd_qmi.c 2011-03-27 14:31:47.000000000 -0400
35009 +++ linux-2.6.32.42/drivers/staging/dream/smd/smd_qmi.c 2011-04-17 15:56:46.000000000 -0400
35010 @@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip,
35011 return 0;
35012 }
35013
35014 -static struct file_operations qmi_fops = {
35015 +static const struct file_operations qmi_fops = {
35016 .owner = THIS_MODULE,
35017 .read = qmi_read,
35018 .write = qmi_write,
35019 diff -urNp linux-2.6.32.42/drivers/staging/dream/smd/smd_rpcrouter_device.c linux-2.6.32.42/drivers/staging/dream/smd/smd_rpcrouter_device.c
35020 --- linux-2.6.32.42/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-03-27 14:31:47.000000000 -0400
35021 +++ linux-2.6.32.42/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-04-17 15:56:46.000000000 -0400
35022 @@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file
35023 return rc;
35024 }
35025
35026 -static struct file_operations rpcrouter_server_fops = {
35027 +static const struct file_operations rpcrouter_server_fops = {
35028 .owner = THIS_MODULE,
35029 .open = rpcrouter_open,
35030 .release = rpcrouter_release,
35031 @@ -224,7 +224,7 @@ static struct file_operations rpcrouter_
35032 .unlocked_ioctl = rpcrouter_ioctl,
35033 };
35034
35035 -static struct file_operations rpcrouter_router_fops = {
35036 +static const struct file_operations rpcrouter_router_fops = {
35037 .owner = THIS_MODULE,
35038 .open = rpcrouter_open,
35039 .release = rpcrouter_release,
35040 diff -urNp linux-2.6.32.42/drivers/staging/dst/dcore.c linux-2.6.32.42/drivers/staging/dst/dcore.c
35041 --- linux-2.6.32.42/drivers/staging/dst/dcore.c 2011-03-27 14:31:47.000000000 -0400
35042 +++ linux-2.6.32.42/drivers/staging/dst/dcore.c 2011-04-17 15:56:46.000000000 -0400
35043 @@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendi
35044 return 0;
35045 }
35046
35047 -static struct block_device_operations dst_blk_ops = {
35048 +static const struct block_device_operations dst_blk_ops = {
35049 .open = dst_bdev_open,
35050 .release = dst_bdev_release,
35051 .owner = THIS_MODULE,
35052 @@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(s
35053 n->size = ctl->size;
35054
35055 atomic_set(&n->refcnt, 1);
35056 - atomic_long_set(&n->gen, 0);
35057 + atomic_long_set_unchecked(&n->gen, 0);
35058 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
35059
35060 err = dst_node_sysfs_init(n);
35061 diff -urNp linux-2.6.32.42/drivers/staging/dst/trans.c linux-2.6.32.42/drivers/staging/dst/trans.c
35062 --- linux-2.6.32.42/drivers/staging/dst/trans.c 2011-03-27 14:31:47.000000000 -0400
35063 +++ linux-2.6.32.42/drivers/staging/dst/trans.c 2011-04-17 15:56:46.000000000 -0400
35064 @@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n,
35065 t->error = 0;
35066 t->retries = 0;
35067 atomic_set(&t->refcnt, 1);
35068 - t->gen = atomic_long_inc_return(&n->gen);
35069 + t->gen = atomic_long_inc_return_unchecked(&n->gen);
35070
35071 t->enc = bio_data_dir(bio);
35072 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
35073 diff -urNp linux-2.6.32.42/drivers/staging/et131x/et1310_tx.c linux-2.6.32.42/drivers/staging/et131x/et1310_tx.c
35074 --- linux-2.6.32.42/drivers/staging/et131x/et1310_tx.c 2011-03-27 14:31:47.000000000 -0400
35075 +++ linux-2.6.32.42/drivers/staging/et131x/et1310_tx.c 2011-05-04 17:56:28.000000000 -0400
35076 @@ -710,11 +710,11 @@ inline void et131x_free_send_packet(stru
35077 struct net_device_stats *stats = &etdev->net_stats;
35078
35079 if (pMpTcb->Flags & fMP_DEST_BROAD)
35080 - atomic_inc(&etdev->Stats.brdcstxmt);
35081 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
35082 else if (pMpTcb->Flags & fMP_DEST_MULTI)
35083 - atomic_inc(&etdev->Stats.multixmt);
35084 + atomic_inc_unchecked(&etdev->Stats.multixmt);
35085 else
35086 - atomic_inc(&etdev->Stats.unixmt);
35087 + atomic_inc_unchecked(&etdev->Stats.unixmt);
35088
35089 if (pMpTcb->Packet) {
35090 stats->tx_bytes += pMpTcb->Packet->len;
35091 diff -urNp linux-2.6.32.42/drivers/staging/et131x/et131x_adapter.h linux-2.6.32.42/drivers/staging/et131x/et131x_adapter.h
35092 --- linux-2.6.32.42/drivers/staging/et131x/et131x_adapter.h 2011-03-27 14:31:47.000000000 -0400
35093 +++ linux-2.6.32.42/drivers/staging/et131x/et131x_adapter.h 2011-05-04 17:56:28.000000000 -0400
35094 @@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
35095 * operations
35096 */
35097 u32 unircv; /* # multicast packets received */
35098 - atomic_t unixmt; /* # multicast packets for Tx */
35099 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
35100 u32 multircv; /* # multicast packets received */
35101 - atomic_t multixmt; /* # multicast packets for Tx */
35102 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
35103 u32 brdcstrcv; /* # broadcast packets received */
35104 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
35105 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
35106 u32 norcvbuf; /* # Rx packets discarded */
35107 u32 noxmtbuf; /* # Tx packets discarded */
35108
35109 diff -urNp linux-2.6.32.42/drivers/staging/go7007/go7007-v4l2.c linux-2.6.32.42/drivers/staging/go7007/go7007-v4l2.c
35110 --- linux-2.6.32.42/drivers/staging/go7007/go7007-v4l2.c 2011-03-27 14:31:47.000000000 -0400
35111 +++ linux-2.6.32.42/drivers/staging/go7007/go7007-v4l2.c 2011-04-17 15:56:46.000000000 -0400
35112 @@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_are
35113 return 0;
35114 }
35115
35116 -static struct vm_operations_struct go7007_vm_ops = {
35117 +static const struct vm_operations_struct go7007_vm_ops = {
35118 .open = go7007_vm_open,
35119 .close = go7007_vm_close,
35120 .fault = go7007_vm_fault,
35121 diff -urNp linux-2.6.32.42/drivers/staging/hv/blkvsc_drv.c linux-2.6.32.42/drivers/staging/hv/blkvsc_drv.c
35122 --- linux-2.6.32.42/drivers/staging/hv/blkvsc_drv.c 2011-03-27 14:31:47.000000000 -0400
35123 +++ linux-2.6.32.42/drivers/staging/hv/blkvsc_drv.c 2011-04-17 15:56:46.000000000 -0400
35124 @@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKV
35125 /* The one and only one */
35126 static struct blkvsc_driver_context g_blkvsc_drv;
35127
35128 -static struct block_device_operations block_ops = {
35129 +static const struct block_device_operations block_ops = {
35130 .owner = THIS_MODULE,
35131 .open = blkvsc_open,
35132 .release = blkvsc_release,
35133 diff -urNp linux-2.6.32.42/drivers/staging/hv/Channel.c linux-2.6.32.42/drivers/staging/hv/Channel.c
35134 --- linux-2.6.32.42/drivers/staging/hv/Channel.c 2011-04-17 17:00:52.000000000 -0400
35135 +++ linux-2.6.32.42/drivers/staging/hv/Channel.c 2011-05-04 17:56:28.000000000 -0400
35136 @@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vm
35137
35138 DPRINT_ENTER(VMBUS);
35139
35140 - nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
35141 - atomic_inc(&gVmbusConnection.NextGpadlHandle);
35142 + nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
35143 + atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
35144
35145 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
35146 ASSERT(msgInfo != NULL);
35147 diff -urNp linux-2.6.32.42/drivers/staging/hv/Hv.c linux-2.6.32.42/drivers/staging/hv/Hv.c
35148 --- linux-2.6.32.42/drivers/staging/hv/Hv.c 2011-03-27 14:31:47.000000000 -0400
35149 +++ linux-2.6.32.42/drivers/staging/hv/Hv.c 2011-04-17 15:56:46.000000000 -0400
35150 @@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, vo
35151 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
35152 u32 outputAddressHi = outputAddress >> 32;
35153 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
35154 - volatile void *hypercallPage = gHvContext.HypercallPage;
35155 + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
35156
35157 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
35158 Control, Input, Output);
35159 diff -urNp linux-2.6.32.42/drivers/staging/hv/vmbus_drv.c linux-2.6.32.42/drivers/staging/hv/vmbus_drv.c
35160 --- linux-2.6.32.42/drivers/staging/hv/vmbus_drv.c 2011-03-27 14:31:47.000000000 -0400
35161 +++ linux-2.6.32.42/drivers/staging/hv/vmbus_drv.c 2011-05-04 17:56:28.000000000 -0400
35162 @@ -532,7 +532,7 @@ static int vmbus_child_device_register(s
35163 to_device_context(root_device_obj);
35164 struct device_context *child_device_ctx =
35165 to_device_context(child_device_obj);
35166 - static atomic_t device_num = ATOMIC_INIT(0);
35167 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
35168
35169 DPRINT_ENTER(VMBUS_DRV);
35170
35171 @@ -541,7 +541,7 @@ static int vmbus_child_device_register(s
35172
35173 /* Set the device name. Otherwise, device_register() will fail. */
35174 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
35175 - atomic_inc_return(&device_num));
35176 + atomic_inc_return_unchecked(&device_num));
35177
35178 /* The new device belongs to this bus */
35179 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
35180 diff -urNp linux-2.6.32.42/drivers/staging/hv/VmbusPrivate.h linux-2.6.32.42/drivers/staging/hv/VmbusPrivate.h
35181 --- linux-2.6.32.42/drivers/staging/hv/VmbusPrivate.h 2011-04-17 17:00:52.000000000 -0400
35182 +++ linux-2.6.32.42/drivers/staging/hv/VmbusPrivate.h 2011-05-04 17:56:28.000000000 -0400
35183 @@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
35184 struct VMBUS_CONNECTION {
35185 enum VMBUS_CONNECT_STATE ConnectState;
35186
35187 - atomic_t NextGpadlHandle;
35188 + atomic_unchecked_t NextGpadlHandle;
35189
35190 /*
35191 * Represents channel interrupts. Each bit position represents a
35192 diff -urNp linux-2.6.32.42/drivers/staging/octeon/ethernet.c linux-2.6.32.42/drivers/staging/octeon/ethernet.c
35193 --- linux-2.6.32.42/drivers/staging/octeon/ethernet.c 2011-03-27 14:31:47.000000000 -0400
35194 +++ linux-2.6.32.42/drivers/staging/octeon/ethernet.c 2011-05-04 17:56:28.000000000 -0400
35195 @@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_
35196 * since the RX tasklet also increments it.
35197 */
35198 #ifdef CONFIG_64BIT
35199 - atomic64_add(rx_status.dropped_packets,
35200 - (atomic64_t *)&priv->stats.rx_dropped);
35201 + atomic64_add_unchecked(rx_status.dropped_packets,
35202 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35203 #else
35204 - atomic_add(rx_status.dropped_packets,
35205 - (atomic_t *)&priv->stats.rx_dropped);
35206 + atomic_add_unchecked(rx_status.dropped_packets,
35207 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
35208 #endif
35209 }
35210
35211 diff -urNp linux-2.6.32.42/drivers/staging/octeon/ethernet-rx.c linux-2.6.32.42/drivers/staging/octeon/ethernet-rx.c
35212 --- linux-2.6.32.42/drivers/staging/octeon/ethernet-rx.c 2011-03-27 14:31:47.000000000 -0400
35213 +++ linux-2.6.32.42/drivers/staging/octeon/ethernet-rx.c 2011-05-04 17:56:28.000000000 -0400
35214 @@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long un
35215 /* Increment RX stats for virtual ports */
35216 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
35217 #ifdef CONFIG_64BIT
35218 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
35219 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
35220 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
35221 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
35222 #else
35223 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
35224 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
35225 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
35226 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
35227 #endif
35228 }
35229 netif_receive_skb(skb);
35230 @@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long un
35231 dev->name);
35232 */
35233 #ifdef CONFIG_64BIT
35234 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
35235 + atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
35236 #else
35237 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
35238 + atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
35239 #endif
35240 dev_kfree_skb_irq(skb);
35241 }
35242 diff -urNp linux-2.6.32.42/drivers/staging/panel/panel.c linux-2.6.32.42/drivers/staging/panel/panel.c
35243 --- linux-2.6.32.42/drivers/staging/panel/panel.c 2011-03-27 14:31:47.000000000 -0400
35244 +++ linux-2.6.32.42/drivers/staging/panel/panel.c 2011-04-17 15:56:46.000000000 -0400
35245 @@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *ino
35246 return 0;
35247 }
35248
35249 -static struct file_operations lcd_fops = {
35250 +static const struct file_operations lcd_fops = {
35251 .write = lcd_write,
35252 .open = lcd_open,
35253 .release = lcd_release,
35254 @@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *
35255 return 0;
35256 }
35257
35258 -static struct file_operations keypad_fops = {
35259 +static const struct file_operations keypad_fops = {
35260 .read = keypad_read, /* read */
35261 .open = keypad_open, /* open */
35262 .release = keypad_release, /* close */
35263 diff -urNp linux-2.6.32.42/drivers/staging/phison/phison.c linux-2.6.32.42/drivers/staging/phison/phison.c
35264 --- linux-2.6.32.42/drivers/staging/phison/phison.c 2011-03-27 14:31:47.000000000 -0400
35265 +++ linux-2.6.32.42/drivers/staging/phison/phison.c 2011-04-17 15:56:46.000000000 -0400
35266 @@ -43,7 +43,7 @@ static struct scsi_host_template phison_
35267 ATA_BMDMA_SHT(DRV_NAME),
35268 };
35269
35270 -static struct ata_port_operations phison_ops = {
35271 +static const struct ata_port_operations phison_ops = {
35272 .inherits = &ata_bmdma_port_ops,
35273 .prereset = phison_pre_reset,
35274 };
35275 diff -urNp linux-2.6.32.42/drivers/staging/poch/poch.c linux-2.6.32.42/drivers/staging/poch/poch.c
35276 --- linux-2.6.32.42/drivers/staging/poch/poch.c 2011-03-27 14:31:47.000000000 -0400
35277 +++ linux-2.6.32.42/drivers/staging/poch/poch.c 2011-04-17 15:56:46.000000000 -0400
35278 @@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inod
35279 return 0;
35280 }
35281
35282 -static struct file_operations poch_fops = {
35283 +static const struct file_operations poch_fops = {
35284 .owner = THIS_MODULE,
35285 .open = poch_open,
35286 .release = poch_release,
35287 diff -urNp linux-2.6.32.42/drivers/staging/pohmelfs/inode.c linux-2.6.32.42/drivers/staging/pohmelfs/inode.c
35288 --- linux-2.6.32.42/drivers/staging/pohmelfs/inode.c 2011-03-27 14:31:47.000000000 -0400
35289 +++ linux-2.6.32.42/drivers/staging/pohmelfs/inode.c 2011-05-04 17:56:20.000000000 -0400
35290 @@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct su
35291 mutex_init(&psb->mcache_lock);
35292 psb->mcache_root = RB_ROOT;
35293 psb->mcache_timeout = msecs_to_jiffies(5000);
35294 - atomic_long_set(&psb->mcache_gen, 0);
35295 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
35296
35297 psb->trans_max_pages = 100;
35298
35299 @@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct su
35300 INIT_LIST_HEAD(&psb->crypto_ready_list);
35301 INIT_LIST_HEAD(&psb->crypto_active_list);
35302
35303 - atomic_set(&psb->trans_gen, 1);
35304 + atomic_set_unchecked(&psb->trans_gen, 1);
35305 atomic_long_set(&psb->total_inodes, 0);
35306
35307 mutex_init(&psb->state_lock);
35308 diff -urNp linux-2.6.32.42/drivers/staging/pohmelfs/mcache.c linux-2.6.32.42/drivers/staging/pohmelfs/mcache.c
35309 --- linux-2.6.32.42/drivers/staging/pohmelfs/mcache.c 2011-03-27 14:31:47.000000000 -0400
35310 +++ linux-2.6.32.42/drivers/staging/pohmelfs/mcache.c 2011-04-17 15:56:46.000000000 -0400
35311 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
35312 m->data = data;
35313 m->start = start;
35314 m->size = size;
35315 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
35316 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
35317
35318 mutex_lock(&psb->mcache_lock);
35319 err = pohmelfs_mcache_insert(psb, m);
35320 diff -urNp linux-2.6.32.42/drivers/staging/pohmelfs/netfs.h linux-2.6.32.42/drivers/staging/pohmelfs/netfs.h
35321 --- linux-2.6.32.42/drivers/staging/pohmelfs/netfs.h 2011-03-27 14:31:47.000000000 -0400
35322 +++ linux-2.6.32.42/drivers/staging/pohmelfs/netfs.h 2011-05-04 17:56:20.000000000 -0400
35323 @@ -570,14 +570,14 @@ struct pohmelfs_config;
35324 struct pohmelfs_sb {
35325 struct rb_root mcache_root;
35326 struct mutex mcache_lock;
35327 - atomic_long_t mcache_gen;
35328 + atomic_long_unchecked_t mcache_gen;
35329 unsigned long mcache_timeout;
35330
35331 unsigned int idx;
35332
35333 unsigned int trans_retries;
35334
35335 - atomic_t trans_gen;
35336 + atomic_unchecked_t trans_gen;
35337
35338 unsigned int crypto_attached_size;
35339 unsigned int crypto_align_size;
35340 diff -urNp linux-2.6.32.42/drivers/staging/pohmelfs/trans.c linux-2.6.32.42/drivers/staging/pohmelfs/trans.c
35341 --- linux-2.6.32.42/drivers/staging/pohmelfs/trans.c 2011-03-27 14:31:47.000000000 -0400
35342 +++ linux-2.6.32.42/drivers/staging/pohmelfs/trans.c 2011-05-04 17:56:28.000000000 -0400
35343 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
35344 int err;
35345 struct netfs_cmd *cmd = t->iovec.iov_base;
35346
35347 - t->gen = atomic_inc_return(&psb->trans_gen);
35348 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
35349
35350 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
35351 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
35352 diff -urNp linux-2.6.32.42/drivers/staging/sep/sep_driver.c linux-2.6.32.42/drivers/staging/sep/sep_driver.c
35353 --- linux-2.6.32.42/drivers/staging/sep/sep_driver.c 2011-03-27 14:31:47.000000000 -0400
35354 +++ linux-2.6.32.42/drivers/staging/sep/sep_driver.c 2011-04-17 15:56:46.000000000 -0400
35355 @@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver
35356 static dev_t sep_devno;
35357
35358 /* the files operations structure of the driver */
35359 -static struct file_operations sep_file_operations = {
35360 +static const struct file_operations sep_file_operations = {
35361 .owner = THIS_MODULE,
35362 .ioctl = sep_ioctl,
35363 .poll = sep_poll,
35364 diff -urNp linux-2.6.32.42/drivers/staging/usbip/vhci.h linux-2.6.32.42/drivers/staging/usbip/vhci.h
35365 --- linux-2.6.32.42/drivers/staging/usbip/vhci.h 2011-03-27 14:31:47.000000000 -0400
35366 +++ linux-2.6.32.42/drivers/staging/usbip/vhci.h 2011-05-04 17:56:28.000000000 -0400
35367 @@ -92,7 +92,7 @@ struct vhci_hcd {
35368 unsigned resuming:1;
35369 unsigned long re_timeout;
35370
35371 - atomic_t seqnum;
35372 + atomic_unchecked_t seqnum;
35373
35374 /*
35375 * NOTE:
35376 diff -urNp linux-2.6.32.42/drivers/staging/usbip/vhci_hcd.c linux-2.6.32.42/drivers/staging/usbip/vhci_hcd.c
35377 --- linux-2.6.32.42/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:01.000000000 -0400
35378 +++ linux-2.6.32.42/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:33.000000000 -0400
35379 @@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
35380 return;
35381 }
35382
35383 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
35384 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35385 if (priv->seqnum == 0xffff)
35386 usbip_uinfo("seqnum max\n");
35387
35388 @@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_h
35389 return -ENOMEM;
35390 }
35391
35392 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
35393 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35394 if (unlink->seqnum == 0xffff)
35395 usbip_uinfo("seqnum max\n");
35396
35397 @@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hc
35398 vdev->rhport = rhport;
35399 }
35400
35401 - atomic_set(&vhci->seqnum, 0);
35402 + atomic_set_unchecked(&vhci->seqnum, 0);
35403 spin_lock_init(&vhci->lock);
35404
35405
35406 diff -urNp linux-2.6.32.42/drivers/staging/usbip/vhci_rx.c linux-2.6.32.42/drivers/staging/usbip/vhci_rx.c
35407 --- linux-2.6.32.42/drivers/staging/usbip/vhci_rx.c 2011-04-17 17:00:52.000000000 -0400
35408 +++ linux-2.6.32.42/drivers/staging/usbip/vhci_rx.c 2011-05-04 17:56:28.000000000 -0400
35409 @@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct
35410 usbip_uerr("cannot find a urb of seqnum %u\n",
35411 pdu->base.seqnum);
35412 usbip_uinfo("max seqnum %d\n",
35413 - atomic_read(&the_controller->seqnum));
35414 + atomic_read_unchecked(&the_controller->seqnum));
35415 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
35416 return;
35417 }
35418 diff -urNp linux-2.6.32.42/drivers/staging/vme/devices/vme_user.c linux-2.6.32.42/drivers/staging/vme/devices/vme_user.c
35419 --- linux-2.6.32.42/drivers/staging/vme/devices/vme_user.c 2011-03-27 14:31:47.000000000 -0400
35420 +++ linux-2.6.32.42/drivers/staging/vme/devices/vme_user.c 2011-04-17 15:56:46.000000000 -0400
35421 @@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *
35422 static int __init vme_user_probe(struct device *, int, int);
35423 static int __exit vme_user_remove(struct device *, int, int);
35424
35425 -static struct file_operations vme_user_fops = {
35426 +static const struct file_operations vme_user_fops = {
35427 .open = vme_user_open,
35428 .release = vme_user_release,
35429 .read = vme_user_read,
35430 diff -urNp linux-2.6.32.42/drivers/telephony/ixj.c linux-2.6.32.42/drivers/telephony/ixj.c
35431 --- linux-2.6.32.42/drivers/telephony/ixj.c 2011-03-27 14:31:47.000000000 -0400
35432 +++ linux-2.6.32.42/drivers/telephony/ixj.c 2011-05-16 21:46:57.000000000 -0400
35433 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
35434 bool mContinue;
35435 char *pIn, *pOut;
35436
35437 + pax_track_stack();
35438 +
35439 if (!SCI_Prepare(j))
35440 return 0;
35441
35442 diff -urNp linux-2.6.32.42/drivers/uio/uio.c linux-2.6.32.42/drivers/uio/uio.c
35443 --- linux-2.6.32.42/drivers/uio/uio.c 2011-03-27 14:31:47.000000000 -0400
35444 +++ linux-2.6.32.42/drivers/uio/uio.c 2011-05-04 17:56:20.000000000 -0400
35445 @@ -23,6 +23,7 @@
35446 #include <linux/string.h>
35447 #include <linux/kobject.h>
35448 #include <linux/uio_driver.h>
35449 +#include <asm/local.h>
35450
35451 #define UIO_MAX_DEVICES 255
35452
35453 @@ -30,10 +31,10 @@ struct uio_device {
35454 struct module *owner;
35455 struct device *dev;
35456 int minor;
35457 - atomic_t event;
35458 + atomic_unchecked_t event;
35459 struct fasync_struct *async_queue;
35460 wait_queue_head_t wait;
35461 - int vma_count;
35462 + local_t vma_count;
35463 struct uio_info *info;
35464 struct kobject *map_dir;
35465 struct kobject *portio_dir;
35466 @@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobj
35467 return entry->show(mem, buf);
35468 }
35469
35470 -static struct sysfs_ops map_sysfs_ops = {
35471 +static const struct sysfs_ops map_sysfs_ops = {
35472 .show = map_type_show,
35473 };
35474
35475 @@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct k
35476 return entry->show(port, buf);
35477 }
35478
35479 -static struct sysfs_ops portio_sysfs_ops = {
35480 +static const struct sysfs_ops portio_sysfs_ops = {
35481 .show = portio_type_show,
35482 };
35483
35484 @@ -255,7 +256,7 @@ static ssize_t show_event(struct device
35485 struct uio_device *idev = dev_get_drvdata(dev);
35486 if (idev)
35487 return sprintf(buf, "%u\n",
35488 - (unsigned int)atomic_read(&idev->event));
35489 + (unsigned int)atomic_read_unchecked(&idev->event));
35490 else
35491 return -ENODEV;
35492 }
35493 @@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *i
35494 {
35495 struct uio_device *idev = info->uio_dev;
35496
35497 - atomic_inc(&idev->event);
35498 + atomic_inc_unchecked(&idev->event);
35499 wake_up_interruptible(&idev->wait);
35500 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
35501 }
35502 @@ -477,7 +478,7 @@ static int uio_open(struct inode *inode,
35503 }
35504
35505 listener->dev = idev;
35506 - listener->event_count = atomic_read(&idev->event);
35507 + listener->event_count = atomic_read_unchecked(&idev->event);
35508 filep->private_data = listener;
35509
35510 if (idev->info->open) {
35511 @@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file
35512 return -EIO;
35513
35514 poll_wait(filep, &idev->wait, wait);
35515 - if (listener->event_count != atomic_read(&idev->event))
35516 + if (listener->event_count != atomic_read_unchecked(&idev->event))
35517 return POLLIN | POLLRDNORM;
35518 return 0;
35519 }
35520 @@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *fil
35521 do {
35522 set_current_state(TASK_INTERRUPTIBLE);
35523
35524 - event_count = atomic_read(&idev->event);
35525 + event_count = atomic_read_unchecked(&idev->event);
35526 if (event_count != listener->event_count) {
35527 if (copy_to_user(buf, &event_count, count))
35528 retval = -EFAULT;
35529 @@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_
35530 static void uio_vma_open(struct vm_area_struct *vma)
35531 {
35532 struct uio_device *idev = vma->vm_private_data;
35533 - idev->vma_count++;
35534 + local_inc(&idev->vma_count);
35535 }
35536
35537 static void uio_vma_close(struct vm_area_struct *vma)
35538 {
35539 struct uio_device *idev = vma->vm_private_data;
35540 - idev->vma_count--;
35541 + local_dec(&idev->vma_count);
35542 }
35543
35544 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35545 @@ -840,7 +841,7 @@ int __uio_register_device(struct module
35546 idev->owner = owner;
35547 idev->info = info;
35548 init_waitqueue_head(&idev->wait);
35549 - atomic_set(&idev->event, 0);
35550 + atomic_set_unchecked(&idev->event, 0);
35551
35552 ret = uio_get_minor(idev);
35553 if (ret)
35554 diff -urNp linux-2.6.32.42/drivers/usb/atm/usbatm.c linux-2.6.32.42/drivers/usb/atm/usbatm.c
35555 --- linux-2.6.32.42/drivers/usb/atm/usbatm.c 2011-03-27 14:31:47.000000000 -0400
35556 +++ linux-2.6.32.42/drivers/usb/atm/usbatm.c 2011-04-17 15:56:46.000000000 -0400
35557 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
35558 if (printk_ratelimit())
35559 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
35560 __func__, vpi, vci);
35561 - atomic_inc(&vcc->stats->rx_err);
35562 + atomic_inc_unchecked(&vcc->stats->rx_err);
35563 return;
35564 }
35565
35566 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
35567 if (length > ATM_MAX_AAL5_PDU) {
35568 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
35569 __func__, length, vcc);
35570 - atomic_inc(&vcc->stats->rx_err);
35571 + atomic_inc_unchecked(&vcc->stats->rx_err);
35572 goto out;
35573 }
35574
35575 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
35576 if (sarb->len < pdu_length) {
35577 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
35578 __func__, pdu_length, sarb->len, vcc);
35579 - atomic_inc(&vcc->stats->rx_err);
35580 + atomic_inc_unchecked(&vcc->stats->rx_err);
35581 goto out;
35582 }
35583
35584 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
35585 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
35586 __func__, vcc);
35587 - atomic_inc(&vcc->stats->rx_err);
35588 + atomic_inc_unchecked(&vcc->stats->rx_err);
35589 goto out;
35590 }
35591
35592 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
35593 if (printk_ratelimit())
35594 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
35595 __func__, length);
35596 - atomic_inc(&vcc->stats->rx_drop);
35597 + atomic_inc_unchecked(&vcc->stats->rx_drop);
35598 goto out;
35599 }
35600
35601 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
35602
35603 vcc->push(vcc, skb);
35604
35605 - atomic_inc(&vcc->stats->rx);
35606 + atomic_inc_unchecked(&vcc->stats->rx);
35607 out:
35608 skb_trim(sarb, 0);
35609 }
35610 @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
35611 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
35612
35613 usbatm_pop(vcc, skb);
35614 - atomic_inc(&vcc->stats->tx);
35615 + atomic_inc_unchecked(&vcc->stats->tx);
35616
35617 skb = skb_dequeue(&instance->sndqueue);
35618 }
35619 @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
35620 if (!left--)
35621 return sprintf(page,
35622 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
35623 - atomic_read(&atm_dev->stats.aal5.tx),
35624 - atomic_read(&atm_dev->stats.aal5.tx_err),
35625 - atomic_read(&atm_dev->stats.aal5.rx),
35626 - atomic_read(&atm_dev->stats.aal5.rx_err),
35627 - atomic_read(&atm_dev->stats.aal5.rx_drop));
35628 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
35629 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
35630 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
35631 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
35632 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
35633
35634 if (!left--) {
35635 if (instance->disconnected)
35636 diff -urNp linux-2.6.32.42/drivers/usb/class/cdc-wdm.c linux-2.6.32.42/drivers/usb/class/cdc-wdm.c
35637 --- linux-2.6.32.42/drivers/usb/class/cdc-wdm.c 2011-03-27 14:31:47.000000000 -0400
35638 +++ linux-2.6.32.42/drivers/usb/class/cdc-wdm.c 2011-04-17 15:56:46.000000000 -0400
35639 @@ -314,7 +314,7 @@ static ssize_t wdm_write
35640 if (r < 0)
35641 goto outnp;
35642
35643 - if (!file->f_flags && O_NONBLOCK)
35644 + if (!(file->f_flags & O_NONBLOCK))
35645 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
35646 &desc->flags));
35647 else
35648 diff -urNp linux-2.6.32.42/drivers/usb/core/hcd.c linux-2.6.32.42/drivers/usb/core/hcd.c
35649 --- linux-2.6.32.42/drivers/usb/core/hcd.c 2011-03-27 14:31:47.000000000 -0400
35650 +++ linux-2.6.32.42/drivers/usb/core/hcd.c 2011-04-17 15:56:46.000000000 -0400
35651 @@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
35652
35653 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
35654
35655 -struct usb_mon_operations *mon_ops;
35656 +const struct usb_mon_operations *mon_ops;
35657
35658 /*
35659 * The registration is unlocked.
35660 @@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
35661 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
35662 */
35663
35664 -int usb_mon_register (struct usb_mon_operations *ops)
35665 +int usb_mon_register (const struct usb_mon_operations *ops)
35666 {
35667
35668 if (mon_ops)
35669 diff -urNp linux-2.6.32.42/drivers/usb/core/hcd.h linux-2.6.32.42/drivers/usb/core/hcd.h
35670 --- linux-2.6.32.42/drivers/usb/core/hcd.h 2011-03-27 14:31:47.000000000 -0400
35671 +++ linux-2.6.32.42/drivers/usb/core/hcd.h 2011-04-17 15:56:46.000000000 -0400
35672 @@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) {
35673 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
35674
35675 struct usb_mon_operations {
35676 - void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
35677 - void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
35678 - void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
35679 + void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
35680 + void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
35681 + void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
35682 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
35683 };
35684
35685 -extern struct usb_mon_operations *mon_ops;
35686 +extern const struct usb_mon_operations *mon_ops;
35687
35688 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
35689 {
35690 @@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(s
35691 (*mon_ops->urb_complete)(bus, urb, status);
35692 }
35693
35694 -int usb_mon_register(struct usb_mon_operations *ops);
35695 +int usb_mon_register(const struct usb_mon_operations *ops);
35696 void usb_mon_deregister(void);
35697
35698 #else
35699 diff -urNp linux-2.6.32.42/drivers/usb/core/message.c linux-2.6.32.42/drivers/usb/core/message.c
35700 --- linux-2.6.32.42/drivers/usb/core/message.c 2011-03-27 14:31:47.000000000 -0400
35701 +++ linux-2.6.32.42/drivers/usb/core/message.c 2011-04-17 15:56:46.000000000 -0400
35702 @@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device
35703 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
35704 if (buf) {
35705 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
35706 - if (len > 0) {
35707 - smallbuf = kmalloc(++len, GFP_NOIO);
35708 + if (len++ > 0) {
35709 + smallbuf = kmalloc(len, GFP_NOIO);
35710 if (!smallbuf)
35711 return buf;
35712 memcpy(smallbuf, buf, len);
35713 diff -urNp linux-2.6.32.42/drivers/usb/misc/appledisplay.c linux-2.6.32.42/drivers/usb/misc/appledisplay.c
35714 --- linux-2.6.32.42/drivers/usb/misc/appledisplay.c 2011-03-27 14:31:47.000000000 -0400
35715 +++ linux-2.6.32.42/drivers/usb/misc/appledisplay.c 2011-04-17 15:56:46.000000000 -0400
35716 @@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightnes
35717 return pdata->msgdata[1];
35718 }
35719
35720 -static struct backlight_ops appledisplay_bl_data = {
35721 +static const struct backlight_ops appledisplay_bl_data = {
35722 .get_brightness = appledisplay_bl_get_brightness,
35723 .update_status = appledisplay_bl_update_status,
35724 };
35725 diff -urNp linux-2.6.32.42/drivers/usb/mon/mon_main.c linux-2.6.32.42/drivers/usb/mon/mon_main.c
35726 --- linux-2.6.32.42/drivers/usb/mon/mon_main.c 2011-03-27 14:31:47.000000000 -0400
35727 +++ linux-2.6.32.42/drivers/usb/mon/mon_main.c 2011-04-17 15:56:46.000000000 -0400
35728 @@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
35729 /*
35730 * Ops
35731 */
35732 -static struct usb_mon_operations mon_ops_0 = {
35733 +static const struct usb_mon_operations mon_ops_0 = {
35734 .urb_submit = mon_submit,
35735 .urb_submit_error = mon_submit_error,
35736 .urb_complete = mon_complete,
35737 diff -urNp linux-2.6.32.42/drivers/usb/wusbcore/wa-hc.h linux-2.6.32.42/drivers/usb/wusbcore/wa-hc.h
35738 --- linux-2.6.32.42/drivers/usb/wusbcore/wa-hc.h 2011-03-27 14:31:47.000000000 -0400
35739 +++ linux-2.6.32.42/drivers/usb/wusbcore/wa-hc.h 2011-05-04 17:56:28.000000000 -0400
35740 @@ -192,7 +192,7 @@ struct wahc {
35741 struct list_head xfer_delayed_list;
35742 spinlock_t xfer_list_lock;
35743 struct work_struct xfer_work;
35744 - atomic_t xfer_id_count;
35745 + atomic_unchecked_t xfer_id_count;
35746 };
35747
35748
35749 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
35750 INIT_LIST_HEAD(&wa->xfer_delayed_list);
35751 spin_lock_init(&wa->xfer_list_lock);
35752 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
35753 - atomic_set(&wa->xfer_id_count, 1);
35754 + atomic_set_unchecked(&wa->xfer_id_count, 1);
35755 }
35756
35757 /**
35758 diff -urNp linux-2.6.32.42/drivers/usb/wusbcore/wa-xfer.c linux-2.6.32.42/drivers/usb/wusbcore/wa-xfer.c
35759 --- linux-2.6.32.42/drivers/usb/wusbcore/wa-xfer.c 2011-03-27 14:31:47.000000000 -0400
35760 +++ linux-2.6.32.42/drivers/usb/wusbcore/wa-xfer.c 2011-05-04 17:56:28.000000000 -0400
35761 @@ -293,7 +293,7 @@ out:
35762 */
35763 static void wa_xfer_id_init(struct wa_xfer *xfer)
35764 {
35765 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
35766 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
35767 }
35768
35769 /*
35770 diff -urNp linux-2.6.32.42/drivers/uwb/wlp/messages.c linux-2.6.32.42/drivers/uwb/wlp/messages.c
35771 --- linux-2.6.32.42/drivers/uwb/wlp/messages.c 2011-03-27 14:31:47.000000000 -0400
35772 +++ linux-2.6.32.42/drivers/uwb/wlp/messages.c 2011-04-17 15:56:46.000000000 -0400
35773 @@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
35774 size_t len = skb->len;
35775 size_t used;
35776 ssize_t result;
35777 - struct wlp_nonce enonce, rnonce;
35778 + struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
35779 enum wlp_assc_error assc_err;
35780 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
35781 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
35782 diff -urNp linux-2.6.32.42/drivers/uwb/wlp/sysfs.c linux-2.6.32.42/drivers/uwb/wlp/sysfs.c
35783 --- linux-2.6.32.42/drivers/uwb/wlp/sysfs.c 2011-03-27 14:31:47.000000000 -0400
35784 +++ linux-2.6.32.42/drivers/uwb/wlp/sysfs.c 2011-04-17 15:56:46.000000000 -0400
35785 @@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobjec
35786 return ret;
35787 }
35788
35789 -static
35790 -struct sysfs_ops wss_sysfs_ops = {
35791 +static const struct sysfs_ops wss_sysfs_ops = {
35792 .show = wlp_wss_attr_show,
35793 .store = wlp_wss_attr_store,
35794 };
35795 diff -urNp linux-2.6.32.42/drivers/video/atmel_lcdfb.c linux-2.6.32.42/drivers/video/atmel_lcdfb.c
35796 --- linux-2.6.32.42/drivers/video/atmel_lcdfb.c 2011-03-27 14:31:47.000000000 -0400
35797 +++ linux-2.6.32.42/drivers/video/atmel_lcdfb.c 2011-04-17 15:56:46.000000000 -0400
35798 @@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struc
35799 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
35800 }
35801
35802 -static struct backlight_ops atmel_lcdc_bl_ops = {
35803 +static const struct backlight_ops atmel_lcdc_bl_ops = {
35804 .update_status = atmel_bl_update_status,
35805 .get_brightness = atmel_bl_get_brightness,
35806 };
35807 diff -urNp linux-2.6.32.42/drivers/video/aty/aty128fb.c linux-2.6.32.42/drivers/video/aty/aty128fb.c
35808 --- linux-2.6.32.42/drivers/video/aty/aty128fb.c 2011-03-27 14:31:47.000000000 -0400
35809 +++ linux-2.6.32.42/drivers/video/aty/aty128fb.c 2011-04-17 15:56:46.000000000 -0400
35810 @@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(stru
35811 return bd->props.brightness;
35812 }
35813
35814 -static struct backlight_ops aty128_bl_data = {
35815 +static const struct backlight_ops aty128_bl_data = {
35816 .get_brightness = aty128_bl_get_brightness,
35817 .update_status = aty128_bl_update_status,
35818 };
35819 diff -urNp linux-2.6.32.42/drivers/video/aty/atyfb_base.c linux-2.6.32.42/drivers/video/aty/atyfb_base.c
35820 --- linux-2.6.32.42/drivers/video/aty/atyfb_base.c 2011-03-27 14:31:47.000000000 -0400
35821 +++ linux-2.6.32.42/drivers/video/aty/atyfb_base.c 2011-04-17 15:56:46.000000000 -0400
35822 @@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct
35823 return bd->props.brightness;
35824 }
35825
35826 -static struct backlight_ops aty_bl_data = {
35827 +static const struct backlight_ops aty_bl_data = {
35828 .get_brightness = aty_bl_get_brightness,
35829 .update_status = aty_bl_update_status,
35830 };
35831 diff -urNp linux-2.6.32.42/drivers/video/aty/radeon_backlight.c linux-2.6.32.42/drivers/video/aty/radeon_backlight.c
35832 --- linux-2.6.32.42/drivers/video/aty/radeon_backlight.c 2011-03-27 14:31:47.000000000 -0400
35833 +++ linux-2.6.32.42/drivers/video/aty/radeon_backlight.c 2011-04-17 15:56:46.000000000 -0400
35834 @@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(stru
35835 return bd->props.brightness;
35836 }
35837
35838 -static struct backlight_ops radeon_bl_data = {
35839 +static const struct backlight_ops radeon_bl_data = {
35840 .get_brightness = radeon_bl_get_brightness,
35841 .update_status = radeon_bl_update_status,
35842 };
35843 diff -urNp linux-2.6.32.42/drivers/video/backlight/adp5520_bl.c linux-2.6.32.42/drivers/video/backlight/adp5520_bl.c
35844 --- linux-2.6.32.42/drivers/video/backlight/adp5520_bl.c 2011-03-27 14:31:47.000000000 -0400
35845 +++ linux-2.6.32.42/drivers/video/backlight/adp5520_bl.c 2011-04-17 15:56:46.000000000 -0400
35846 @@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(str
35847 return error ? data->current_brightness : reg_val;
35848 }
35849
35850 -static struct backlight_ops adp5520_bl_ops = {
35851 +static const struct backlight_ops adp5520_bl_ops = {
35852 .update_status = adp5520_bl_update_status,
35853 .get_brightness = adp5520_bl_get_brightness,
35854 };
35855 diff -urNp linux-2.6.32.42/drivers/video/backlight/adx_bl.c linux-2.6.32.42/drivers/video/backlight/adx_bl.c
35856 --- linux-2.6.32.42/drivers/video/backlight/adx_bl.c 2011-03-27 14:31:47.000000000 -0400
35857 +++ linux-2.6.32.42/drivers/video/backlight/adx_bl.c 2011-04-17 15:56:46.000000000 -0400
35858 @@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct
35859 return 1;
35860 }
35861
35862 -static struct backlight_ops adx_backlight_ops = {
35863 +static const struct backlight_ops adx_backlight_ops = {
35864 .options = 0,
35865 .update_status = adx_backlight_update_status,
35866 .get_brightness = adx_backlight_get_brightness,
35867 diff -urNp linux-2.6.32.42/drivers/video/backlight/atmel-pwm-bl.c linux-2.6.32.42/drivers/video/backlight/atmel-pwm-bl.c
35868 --- linux-2.6.32.42/drivers/video/backlight/atmel-pwm-bl.c 2011-03-27 14:31:47.000000000 -0400
35869 +++ linux-2.6.32.42/drivers/video/backlight/atmel-pwm-bl.c 2011-04-17 15:56:46.000000000 -0400
35870 @@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct
35871 return pwm_channel_enable(&pwmbl->pwmc);
35872 }
35873
35874 -static struct backlight_ops atmel_pwm_bl_ops = {
35875 +static const struct backlight_ops atmel_pwm_bl_ops = {
35876 .get_brightness = atmel_pwm_bl_get_intensity,
35877 .update_status = atmel_pwm_bl_set_intensity,
35878 };
35879 diff -urNp linux-2.6.32.42/drivers/video/backlight/backlight.c linux-2.6.32.42/drivers/video/backlight/backlight.c
35880 --- linux-2.6.32.42/drivers/video/backlight/backlight.c 2011-03-27 14:31:47.000000000 -0400
35881 +++ linux-2.6.32.42/drivers/video/backlight/backlight.c 2011-04-17 15:56:46.000000000 -0400
35882 @@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
35883 * ERR_PTR() or a pointer to the newly allocated device.
35884 */
35885 struct backlight_device *backlight_device_register(const char *name,
35886 - struct device *parent, void *devdata, struct backlight_ops *ops)
35887 + struct device *parent, void *devdata, const struct backlight_ops *ops)
35888 {
35889 struct backlight_device *new_bd;
35890 int rc;
35891 diff -urNp linux-2.6.32.42/drivers/video/backlight/corgi_lcd.c linux-2.6.32.42/drivers/video/backlight/corgi_lcd.c
35892 --- linux-2.6.32.42/drivers/video/backlight/corgi_lcd.c 2011-03-27 14:31:47.000000000 -0400
35893 +++ linux-2.6.32.42/drivers/video/backlight/corgi_lcd.c 2011-04-17 15:56:46.000000000 -0400
35894 @@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit
35895 }
35896 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
35897
35898 -static struct backlight_ops corgi_bl_ops = {
35899 +static const struct backlight_ops corgi_bl_ops = {
35900 .get_brightness = corgi_bl_get_intensity,
35901 .update_status = corgi_bl_update_status,
35902 };
35903 diff -urNp linux-2.6.32.42/drivers/video/backlight/cr_bllcd.c linux-2.6.32.42/drivers/video/backlight/cr_bllcd.c
35904 --- linux-2.6.32.42/drivers/video/backlight/cr_bllcd.c 2011-03-27 14:31:47.000000000 -0400
35905 +++ linux-2.6.32.42/drivers/video/backlight/cr_bllcd.c 2011-04-17 15:56:46.000000000 -0400
35906 @@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(st
35907 return intensity;
35908 }
35909
35910 -static struct backlight_ops cr_backlight_ops = {
35911 +static const struct backlight_ops cr_backlight_ops = {
35912 .get_brightness = cr_backlight_get_intensity,
35913 .update_status = cr_backlight_set_intensity,
35914 };
35915 diff -urNp linux-2.6.32.42/drivers/video/backlight/da903x_bl.c linux-2.6.32.42/drivers/video/backlight/da903x_bl.c
35916 --- linux-2.6.32.42/drivers/video/backlight/da903x_bl.c 2011-03-27 14:31:47.000000000 -0400
35917 +++ linux-2.6.32.42/drivers/video/backlight/da903x_bl.c 2011-04-17 15:56:46.000000000 -0400
35918 @@ -94,7 +94,7 @@ static int da903x_backlight_get_brightne
35919 return data->current_brightness;
35920 }
35921
35922 -static struct backlight_ops da903x_backlight_ops = {
35923 +static const struct backlight_ops da903x_backlight_ops = {
35924 .update_status = da903x_backlight_update_status,
35925 .get_brightness = da903x_backlight_get_brightness,
35926 };
35927 diff -urNp linux-2.6.32.42/drivers/video/backlight/generic_bl.c linux-2.6.32.42/drivers/video/backlight/generic_bl.c
35928 --- linux-2.6.32.42/drivers/video/backlight/generic_bl.c 2011-03-27 14:31:47.000000000 -0400
35929 +++ linux-2.6.32.42/drivers/video/backlight/generic_bl.c 2011-04-17 15:56:46.000000000 -0400
35930 @@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
35931 }
35932 EXPORT_SYMBOL(corgibl_limit_intensity);
35933
35934 -static struct backlight_ops genericbl_ops = {
35935 +static const struct backlight_ops genericbl_ops = {
35936 .options = BL_CORE_SUSPENDRESUME,
35937 .get_brightness = genericbl_get_intensity,
35938 .update_status = genericbl_send_intensity,
35939 diff -urNp linux-2.6.32.42/drivers/video/backlight/hp680_bl.c linux-2.6.32.42/drivers/video/backlight/hp680_bl.c
35940 --- linux-2.6.32.42/drivers/video/backlight/hp680_bl.c 2011-03-27 14:31:47.000000000 -0400
35941 +++ linux-2.6.32.42/drivers/video/backlight/hp680_bl.c 2011-04-17 15:56:46.000000000 -0400
35942 @@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct
35943 return current_intensity;
35944 }
35945
35946 -static struct backlight_ops hp680bl_ops = {
35947 +static const struct backlight_ops hp680bl_ops = {
35948 .get_brightness = hp680bl_get_intensity,
35949 .update_status = hp680bl_set_intensity,
35950 };
35951 diff -urNp linux-2.6.32.42/drivers/video/backlight/jornada720_bl.c linux-2.6.32.42/drivers/video/backlight/jornada720_bl.c
35952 --- linux-2.6.32.42/drivers/video/backlight/jornada720_bl.c 2011-03-27 14:31:47.000000000 -0400
35953 +++ linux-2.6.32.42/drivers/video/backlight/jornada720_bl.c 2011-04-17 15:56:46.000000000 -0400
35954 @@ -93,7 +93,7 @@ out:
35955 return ret;
35956 }
35957
35958 -static struct backlight_ops jornada_bl_ops = {
35959 +static const struct backlight_ops jornada_bl_ops = {
35960 .get_brightness = jornada_bl_get_brightness,
35961 .update_status = jornada_bl_update_status,
35962 .options = BL_CORE_SUSPENDRESUME,
35963 diff -urNp linux-2.6.32.42/drivers/video/backlight/kb3886_bl.c linux-2.6.32.42/drivers/video/backlight/kb3886_bl.c
35964 --- linux-2.6.32.42/drivers/video/backlight/kb3886_bl.c 2011-03-27 14:31:47.000000000 -0400
35965 +++ linux-2.6.32.42/drivers/video/backlight/kb3886_bl.c 2011-04-17 15:56:46.000000000 -0400
35966 @@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct
35967 return kb3886bl_intensity;
35968 }
35969
35970 -static struct backlight_ops kb3886bl_ops = {
35971 +static const struct backlight_ops kb3886bl_ops = {
35972 .get_brightness = kb3886bl_get_intensity,
35973 .update_status = kb3886bl_send_intensity,
35974 };
35975 diff -urNp linux-2.6.32.42/drivers/video/backlight/locomolcd.c linux-2.6.32.42/drivers/video/backlight/locomolcd.c
35976 --- linux-2.6.32.42/drivers/video/backlight/locomolcd.c 2011-03-27 14:31:47.000000000 -0400
35977 +++ linux-2.6.32.42/drivers/video/backlight/locomolcd.c 2011-04-17 15:56:46.000000000 -0400
35978 @@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struc
35979 return current_intensity;
35980 }
35981
35982 -static struct backlight_ops locomobl_data = {
35983 +static const struct backlight_ops locomobl_data = {
35984 .get_brightness = locomolcd_get_intensity,
35985 .update_status = locomolcd_set_intensity,
35986 };
35987 diff -urNp linux-2.6.32.42/drivers/video/backlight/mbp_nvidia_bl.c linux-2.6.32.42/drivers/video/backlight/mbp_nvidia_bl.c
35988 --- linux-2.6.32.42/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:01.000000000 -0400
35989 +++ linux-2.6.32.42/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:33.000000000 -0400
35990 @@ -33,7 +33,7 @@ struct dmi_match_data {
35991 unsigned long iostart;
35992 unsigned long iolen;
35993 /* Backlight operations structure. */
35994 - struct backlight_ops backlight_ops;
35995 + const struct backlight_ops backlight_ops;
35996 };
35997
35998 /* Module parameters. */
35999 diff -urNp linux-2.6.32.42/drivers/video/backlight/omap1_bl.c linux-2.6.32.42/drivers/video/backlight/omap1_bl.c
36000 --- linux-2.6.32.42/drivers/video/backlight/omap1_bl.c 2011-03-27 14:31:47.000000000 -0400
36001 +++ linux-2.6.32.42/drivers/video/backlight/omap1_bl.c 2011-04-17 15:56:46.000000000 -0400
36002 @@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct b
36003 return bl->current_intensity;
36004 }
36005
36006 -static struct backlight_ops omapbl_ops = {
36007 +static const struct backlight_ops omapbl_ops = {
36008 .get_brightness = omapbl_get_intensity,
36009 .update_status = omapbl_update_status,
36010 };
36011 diff -urNp linux-2.6.32.42/drivers/video/backlight/progear_bl.c linux-2.6.32.42/drivers/video/backlight/progear_bl.c
36012 --- linux-2.6.32.42/drivers/video/backlight/progear_bl.c 2011-03-27 14:31:47.000000000 -0400
36013 +++ linux-2.6.32.42/drivers/video/backlight/progear_bl.c 2011-04-17 15:56:46.000000000 -0400
36014 @@ -54,7 +54,7 @@ static int progearbl_get_intensity(struc
36015 return intensity - HW_LEVEL_MIN;
36016 }
36017
36018 -static struct backlight_ops progearbl_ops = {
36019 +static const struct backlight_ops progearbl_ops = {
36020 .get_brightness = progearbl_get_intensity,
36021 .update_status = progearbl_set_intensity,
36022 };
36023 diff -urNp linux-2.6.32.42/drivers/video/backlight/pwm_bl.c linux-2.6.32.42/drivers/video/backlight/pwm_bl.c
36024 --- linux-2.6.32.42/drivers/video/backlight/pwm_bl.c 2011-03-27 14:31:47.000000000 -0400
36025 +++ linux-2.6.32.42/drivers/video/backlight/pwm_bl.c 2011-04-17 15:56:46.000000000 -0400
36026 @@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(
36027 return bl->props.brightness;
36028 }
36029
36030 -static struct backlight_ops pwm_backlight_ops = {
36031 +static const struct backlight_ops pwm_backlight_ops = {
36032 .update_status = pwm_backlight_update_status,
36033 .get_brightness = pwm_backlight_get_brightness,
36034 };
36035 diff -urNp linux-2.6.32.42/drivers/video/backlight/tosa_bl.c linux-2.6.32.42/drivers/video/backlight/tosa_bl.c
36036 --- linux-2.6.32.42/drivers/video/backlight/tosa_bl.c 2011-03-27 14:31:47.000000000 -0400
36037 +++ linux-2.6.32.42/drivers/video/backlight/tosa_bl.c 2011-04-17 15:56:46.000000000 -0400
36038 @@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct
36039 return props->brightness;
36040 }
36041
36042 -static struct backlight_ops bl_ops = {
36043 +static const struct backlight_ops bl_ops = {
36044 .get_brightness = tosa_bl_get_brightness,
36045 .update_status = tosa_bl_update_status,
36046 };
36047 diff -urNp linux-2.6.32.42/drivers/video/backlight/wm831x_bl.c linux-2.6.32.42/drivers/video/backlight/wm831x_bl.c
36048 --- linux-2.6.32.42/drivers/video/backlight/wm831x_bl.c 2011-03-27 14:31:47.000000000 -0400
36049 +++ linux-2.6.32.42/drivers/video/backlight/wm831x_bl.c 2011-04-17 15:56:46.000000000 -0400
36050 @@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightne
36051 return data->current_brightness;
36052 }
36053
36054 -static struct backlight_ops wm831x_backlight_ops = {
36055 +static const struct backlight_ops wm831x_backlight_ops = {
36056 .options = BL_CORE_SUSPENDRESUME,
36057 .update_status = wm831x_backlight_update_status,
36058 .get_brightness = wm831x_backlight_get_brightness,
36059 diff -urNp linux-2.6.32.42/drivers/video/bf54x-lq043fb.c linux-2.6.32.42/drivers/video/bf54x-lq043fb.c
36060 --- linux-2.6.32.42/drivers/video/bf54x-lq043fb.c 2011-03-27 14:31:47.000000000 -0400
36061 +++ linux-2.6.32.42/drivers/video/bf54x-lq043fb.c 2011-04-17 15:56:46.000000000 -0400
36062 @@ -463,7 +463,7 @@ static int bl_get_brightness(struct back
36063 return 0;
36064 }
36065
36066 -static struct backlight_ops bfin_lq043fb_bl_ops = {
36067 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
36068 .get_brightness = bl_get_brightness,
36069 };
36070
36071 diff -urNp linux-2.6.32.42/drivers/video/bfin-t350mcqb-fb.c linux-2.6.32.42/drivers/video/bfin-t350mcqb-fb.c
36072 --- linux-2.6.32.42/drivers/video/bfin-t350mcqb-fb.c 2011-03-27 14:31:47.000000000 -0400
36073 +++ linux-2.6.32.42/drivers/video/bfin-t350mcqb-fb.c 2011-04-17 15:56:46.000000000 -0400
36074 @@ -381,7 +381,7 @@ static int bl_get_brightness(struct back
36075 return 0;
36076 }
36077
36078 -static struct backlight_ops bfin_lq043fb_bl_ops = {
36079 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
36080 .get_brightness = bl_get_brightness,
36081 };
36082
36083 diff -urNp linux-2.6.32.42/drivers/video/fbcmap.c linux-2.6.32.42/drivers/video/fbcmap.c
36084 --- linux-2.6.32.42/drivers/video/fbcmap.c 2011-03-27 14:31:47.000000000 -0400
36085 +++ linux-2.6.32.42/drivers/video/fbcmap.c 2011-04-17 15:56:46.000000000 -0400
36086 @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
36087 rc = -ENODEV;
36088 goto out;
36089 }
36090 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
36091 - !info->fbops->fb_setcmap)) {
36092 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
36093 rc = -EINVAL;
36094 goto out1;
36095 }
36096 diff -urNp linux-2.6.32.42/drivers/video/fbmem.c linux-2.6.32.42/drivers/video/fbmem.c
36097 --- linux-2.6.32.42/drivers/video/fbmem.c 2011-03-27 14:31:47.000000000 -0400
36098 +++ linux-2.6.32.42/drivers/video/fbmem.c 2011-05-16 21:46:57.000000000 -0400
36099 @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
36100 image->dx += image->width + 8;
36101 }
36102 } else if (rotate == FB_ROTATE_UD) {
36103 - for (x = 0; x < num && image->dx >= 0; x++) {
36104 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
36105 info->fbops->fb_imageblit(info, image);
36106 image->dx -= image->width + 8;
36107 }
36108 @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
36109 image->dy += image->height + 8;
36110 }
36111 } else if (rotate == FB_ROTATE_CCW) {
36112 - for (x = 0; x < num && image->dy >= 0; x++) {
36113 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
36114 info->fbops->fb_imageblit(info, image);
36115 image->dy -= image->height + 8;
36116 }
36117 @@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct
36118 int flags = info->flags;
36119 int ret = 0;
36120
36121 + pax_track_stack();
36122 +
36123 if (var->activate & FB_ACTIVATE_INV_MODE) {
36124 struct fb_videomode mode1, mode2;
36125
36126 @@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *
36127 void __user *argp = (void __user *)arg;
36128 long ret = 0;
36129
36130 + pax_track_stack();
36131 +
36132 switch (cmd) {
36133 case FBIOGET_VSCREENINFO:
36134 if (!lock_fb_info(info))
36135 @@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *
36136 return -EFAULT;
36137 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
36138 return -EINVAL;
36139 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
36140 + if (con2fb.framebuffer >= FB_MAX)
36141 return -EINVAL;
36142 if (!registered_fb[con2fb.framebuffer])
36143 request_module("fb%d", con2fb.framebuffer);
36144 diff -urNp linux-2.6.32.42/drivers/video/i810/i810_accel.c linux-2.6.32.42/drivers/video/i810/i810_accel.c
36145 --- linux-2.6.32.42/drivers/video/i810/i810_accel.c 2011-03-27 14:31:47.000000000 -0400
36146 +++ linux-2.6.32.42/drivers/video/i810/i810_accel.c 2011-04-17 15:56:46.000000000 -0400
36147 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
36148 }
36149 }
36150 printk("ringbuffer lockup!!!\n");
36151 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
36152 i810_report_error(mmio);
36153 par->dev_flags |= LOCKUP;
36154 info->pixmap.scan_align = 1;
36155 diff -urNp linux-2.6.32.42/drivers/video/nvidia/nv_backlight.c linux-2.6.32.42/drivers/video/nvidia/nv_backlight.c
36156 --- linux-2.6.32.42/drivers/video/nvidia/nv_backlight.c 2011-03-27 14:31:47.000000000 -0400
36157 +++ linux-2.6.32.42/drivers/video/nvidia/nv_backlight.c 2011-04-17 15:56:46.000000000 -0400
36158 @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
36159 return bd->props.brightness;
36160 }
36161
36162 -static struct backlight_ops nvidia_bl_ops = {
36163 +static const struct backlight_ops nvidia_bl_ops = {
36164 .get_brightness = nvidia_bl_get_brightness,
36165 .update_status = nvidia_bl_update_status,
36166 };
36167 diff -urNp linux-2.6.32.42/drivers/video/riva/fbdev.c linux-2.6.32.42/drivers/video/riva/fbdev.c
36168 --- linux-2.6.32.42/drivers/video/riva/fbdev.c 2011-03-27 14:31:47.000000000 -0400
36169 +++ linux-2.6.32.42/drivers/video/riva/fbdev.c 2011-04-17 15:56:46.000000000 -0400
36170 @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
36171 return bd->props.brightness;
36172 }
36173
36174 -static struct backlight_ops riva_bl_ops = {
36175 +static const struct backlight_ops riva_bl_ops = {
36176 .get_brightness = riva_bl_get_brightness,
36177 .update_status = riva_bl_update_status,
36178 };
36179 diff -urNp linux-2.6.32.42/drivers/video/uvesafb.c linux-2.6.32.42/drivers/video/uvesafb.c
36180 --- linux-2.6.32.42/drivers/video/uvesafb.c 2011-03-27 14:31:47.000000000 -0400
36181 +++ linux-2.6.32.42/drivers/video/uvesafb.c 2011-04-17 15:56:46.000000000 -0400
36182 @@ -18,6 +18,7 @@
36183 #include <linux/fb.h>
36184 #include <linux/io.h>
36185 #include <linux/mutex.h>
36186 +#include <linux/moduleloader.h>
36187 #include <video/edid.h>
36188 #include <video/uvesafb.h>
36189 #ifdef CONFIG_X86
36190 @@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
36191 NULL,
36192 };
36193
36194 - return call_usermodehelper(v86d_path, argv, envp, 1);
36195 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
36196 }
36197
36198 /*
36199 @@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(
36200 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
36201 par->pmi_setpal = par->ypan = 0;
36202 } else {
36203 +
36204 +#ifdef CONFIG_PAX_KERNEXEC
36205 +#ifdef CONFIG_MODULES
36206 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
36207 +#endif
36208 + if (!par->pmi_code) {
36209 + par->pmi_setpal = par->ypan = 0;
36210 + return 0;
36211 + }
36212 +#endif
36213 +
36214 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
36215 + task->t.regs.edi);
36216 +
36217 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36218 + pax_open_kernel();
36219 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
36220 + pax_close_kernel();
36221 +
36222 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
36223 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
36224 +#else
36225 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
36226 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
36227 +#endif
36228 +
36229 printk(KERN_INFO "uvesafb: protected mode interface info at "
36230 "%04x:%04x\n",
36231 (u16)task->t.regs.es, (u16)task->t.regs.edi);
36232 @@ -1799,6 +1822,11 @@ out:
36233 if (par->vbe_modes)
36234 kfree(par->vbe_modes);
36235
36236 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36237 + if (par->pmi_code)
36238 + module_free_exec(NULL, par->pmi_code);
36239 +#endif
36240 +
36241 framebuffer_release(info);
36242 return err;
36243 }
36244 @@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platfor
36245 kfree(par->vbe_state_orig);
36246 if (par->vbe_state_saved)
36247 kfree(par->vbe_state_saved);
36248 +
36249 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36250 + if (par->pmi_code)
36251 + module_free_exec(NULL, par->pmi_code);
36252 +#endif
36253 +
36254 }
36255
36256 framebuffer_release(info);
36257 diff -urNp linux-2.6.32.42/drivers/video/vesafb.c linux-2.6.32.42/drivers/video/vesafb.c
36258 --- linux-2.6.32.42/drivers/video/vesafb.c 2011-03-27 14:31:47.000000000 -0400
36259 +++ linux-2.6.32.42/drivers/video/vesafb.c 2011-04-17 15:56:46.000000000 -0400
36260 @@ -9,6 +9,7 @@
36261 */
36262
36263 #include <linux/module.h>
36264 +#include <linux/moduleloader.h>
36265 #include <linux/kernel.h>
36266 #include <linux/errno.h>
36267 #include <linux/string.h>
36268 @@ -53,8 +54,8 @@ static int vram_remap __initdata; /*
36269 static int vram_total __initdata; /* Set total amount of memory */
36270 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
36271 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
36272 -static void (*pmi_start)(void) __read_mostly;
36273 -static void (*pmi_pal) (void) __read_mostly;
36274 +static void (*pmi_start)(void) __read_only;
36275 +static void (*pmi_pal) (void) __read_only;
36276 static int depth __read_mostly;
36277 static int vga_compat __read_mostly;
36278 /* --------------------------------------------------------------------- */
36279 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
36280 unsigned int size_vmode;
36281 unsigned int size_remap;
36282 unsigned int size_total;
36283 + void *pmi_code = NULL;
36284
36285 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
36286 return -ENODEV;
36287 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
36288 size_remap = size_total;
36289 vesafb_fix.smem_len = size_remap;
36290
36291 -#ifndef __i386__
36292 - screen_info.vesapm_seg = 0;
36293 -#endif
36294 -
36295 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
36296 printk(KERN_WARNING
36297 "vesafb: cannot reserve video memory at 0x%lx\n",
36298 @@ -315,9 +313,21 @@ static int __init vesafb_probe(struct pl
36299 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
36300 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
36301
36302 +#ifdef __i386__
36303 +
36304 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36305 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
36306 + if (!pmi_code)
36307 +#elif !defined(CONFIG_PAX_KERNEXEC)
36308 + if (0)
36309 +#endif
36310 +
36311 +#endif
36312 + screen_info.vesapm_seg = 0;
36313 +
36314 if (screen_info.vesapm_seg) {
36315 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
36316 - screen_info.vesapm_seg,screen_info.vesapm_off);
36317 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
36318 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
36319 }
36320
36321 if (screen_info.vesapm_seg < 0xc000)
36322 @@ -325,9 +335,25 @@ static int __init vesafb_probe(struct pl
36323
36324 if (ypan || pmi_setpal) {
36325 unsigned short *pmi_base;
36326 - pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
36327 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
36328 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
36329 +
36330 + pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
36331 +
36332 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36333 + pax_open_kernel();
36334 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
36335 +#else
36336 + pmi_code = pmi_base;
36337 +#endif
36338 +
36339 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
36340 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
36341 +
36342 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36343 + pmi_start = ktva_ktla(pmi_start);
36344 + pmi_pal = ktva_ktla(pmi_pal);
36345 + pax_close_kernel();
36346 +#endif
36347 +
36348 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
36349 if (pmi_base[3]) {
36350 printk(KERN_INFO "vesafb: pmi: ports = ");
36351 @@ -469,6 +495,11 @@ static int __init vesafb_probe(struct pl
36352 info->node, info->fix.id);
36353 return 0;
36354 err:
36355 +
36356 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36357 + module_free_exec(NULL, pmi_code);
36358 +#endif
36359 +
36360 if (info->screen_base)
36361 iounmap(info->screen_base);
36362 framebuffer_release(info);
36363 diff -urNp linux-2.6.32.42/drivers/xen/sys-hypervisor.c linux-2.6.32.42/drivers/xen/sys-hypervisor.c
36364 --- linux-2.6.32.42/drivers/xen/sys-hypervisor.c 2011-03-27 14:31:47.000000000 -0400
36365 +++ linux-2.6.32.42/drivers/xen/sys-hypervisor.c 2011-04-17 15:56:46.000000000 -0400
36366 @@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct ko
36367 return 0;
36368 }
36369
36370 -static struct sysfs_ops hyp_sysfs_ops = {
36371 +static const struct sysfs_ops hyp_sysfs_ops = {
36372 .show = hyp_sysfs_show,
36373 .store = hyp_sysfs_store,
36374 };
36375 diff -urNp linux-2.6.32.42/fs/9p/vfs_inode.c linux-2.6.32.42/fs/9p/vfs_inode.c
36376 --- linux-2.6.32.42/fs/9p/vfs_inode.c 2011-03-27 14:31:47.000000000 -0400
36377 +++ linux-2.6.32.42/fs/9p/vfs_inode.c 2011-04-17 15:56:46.000000000 -0400
36378 @@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct
36379 static void
36380 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
36381 {
36382 - char *s = nd_get_link(nd);
36383 + const char *s = nd_get_link(nd);
36384
36385 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
36386 IS_ERR(s) ? "<error>" : s);
36387 diff -urNp linux-2.6.32.42/fs/aio.c linux-2.6.32.42/fs/aio.c
36388 --- linux-2.6.32.42/fs/aio.c 2011-03-27 14:31:47.000000000 -0400
36389 +++ linux-2.6.32.42/fs/aio.c 2011-06-04 20:40:21.000000000 -0400
36390 @@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx
36391 size += sizeof(struct io_event) * nr_events;
36392 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
36393
36394 - if (nr_pages < 0)
36395 + if (nr_pages <= 0)
36396 return -EINVAL;
36397
36398 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
36399 @@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ct
36400 struct aio_timeout to;
36401 int retry = 0;
36402
36403 + pax_track_stack();
36404 +
36405 /* needed to zero any padding within an entry (there shouldn't be
36406 * any, but C is fun!
36407 */
36408 @@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *i
36409 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
36410 {
36411 ssize_t ret;
36412 + struct iovec iovstack;
36413
36414 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
36415 kiocb->ki_nbytes, 1,
36416 - &kiocb->ki_inline_vec, &kiocb->ki_iovec);
36417 + &iovstack, &kiocb->ki_iovec);
36418 if (ret < 0)
36419 goto out;
36420
36421 + if (kiocb->ki_iovec == &iovstack) {
36422 + kiocb->ki_inline_vec = iovstack;
36423 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
36424 + }
36425 kiocb->ki_nr_segs = kiocb->ki_nbytes;
36426 kiocb->ki_cur_seg = 0;
36427 /* ki_nbytes/left now reflect bytes instead of segs */
36428 diff -urNp linux-2.6.32.42/fs/attr.c linux-2.6.32.42/fs/attr.c
36429 --- linux-2.6.32.42/fs/attr.c 2011-03-27 14:31:47.000000000 -0400
36430 +++ linux-2.6.32.42/fs/attr.c 2011-04-17 15:56:46.000000000 -0400
36431 @@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode
36432 unsigned long limit;
36433
36434 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
36435 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
36436 if (limit != RLIM_INFINITY && offset > limit)
36437 goto out_sig;
36438 if (offset > inode->i_sb->s_maxbytes)
36439 diff -urNp linux-2.6.32.42/fs/autofs/root.c linux-2.6.32.42/fs/autofs/root.c
36440 --- linux-2.6.32.42/fs/autofs/root.c 2011-03-27 14:31:47.000000000 -0400
36441 +++ linux-2.6.32.42/fs/autofs/root.c 2011-04-17 15:56:46.000000000 -0400
36442 @@ -299,7 +299,8 @@ static int autofs_root_symlink(struct in
36443 set_bit(n,sbi->symlink_bitmap);
36444 sl = &sbi->symlink[n];
36445 sl->len = strlen(symname);
36446 - sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
36447 + slsize = sl->len+1;
36448 + sl->data = kmalloc(slsize, GFP_KERNEL);
36449 if (!sl->data) {
36450 clear_bit(n,sbi->symlink_bitmap);
36451 unlock_kernel();
36452 diff -urNp linux-2.6.32.42/fs/autofs4/symlink.c linux-2.6.32.42/fs/autofs4/symlink.c
36453 --- linux-2.6.32.42/fs/autofs4/symlink.c 2011-03-27 14:31:47.000000000 -0400
36454 +++ linux-2.6.32.42/fs/autofs4/symlink.c 2011-04-17 15:56:46.000000000 -0400
36455 @@ -15,7 +15,7 @@
36456 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
36457 {
36458 struct autofs_info *ino = autofs4_dentry_ino(dentry);
36459 - nd_set_link(nd, (char *)ino->u.symlink);
36460 + nd_set_link(nd, ino->u.symlink);
36461 return NULL;
36462 }
36463
36464 diff -urNp linux-2.6.32.42/fs/befs/linuxvfs.c linux-2.6.32.42/fs/befs/linuxvfs.c
36465 --- linux-2.6.32.42/fs/befs/linuxvfs.c 2011-03-27 14:31:47.000000000 -0400
36466 +++ linux-2.6.32.42/fs/befs/linuxvfs.c 2011-04-17 15:56:46.000000000 -0400
36467 @@ -493,7 +493,7 @@ static void befs_put_link(struct dentry
36468 {
36469 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
36470 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
36471 - char *link = nd_get_link(nd);
36472 + const char *link = nd_get_link(nd);
36473 if (!IS_ERR(link))
36474 kfree(link);
36475 }
36476 diff -urNp linux-2.6.32.42/fs/binfmt_aout.c linux-2.6.32.42/fs/binfmt_aout.c
36477 --- linux-2.6.32.42/fs/binfmt_aout.c 2011-03-27 14:31:47.000000000 -0400
36478 +++ linux-2.6.32.42/fs/binfmt_aout.c 2011-04-17 15:56:46.000000000 -0400
36479 @@ -16,6 +16,7 @@
36480 #include <linux/string.h>
36481 #include <linux/fs.h>
36482 #include <linux/file.h>
36483 +#include <linux/security.h>
36484 #include <linux/stat.h>
36485 #include <linux/fcntl.h>
36486 #include <linux/ptrace.h>
36487 @@ -102,6 +103,8 @@ static int aout_core_dump(long signr, st
36488 #endif
36489 # define START_STACK(u) (u.start_stack)
36490
36491 + memset(&dump, 0, sizeof(dump));
36492 +
36493 fs = get_fs();
36494 set_fs(KERNEL_DS);
36495 has_dumped = 1;
36496 @@ -113,10 +116,12 @@ static int aout_core_dump(long signr, st
36497
36498 /* If the size of the dump file exceeds the rlimit, then see what would happen
36499 if we wrote the stack, but not the data area. */
36500 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
36501 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
36502 dump.u_dsize = 0;
36503
36504 /* Make sure we have enough room to write the stack and data areas. */
36505 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
36506 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
36507 dump.u_ssize = 0;
36508
36509 @@ -146,9 +151,7 @@ static int aout_core_dump(long signr, st
36510 dump_size = dump.u_ssize << PAGE_SHIFT;
36511 DUMP_WRITE(dump_start,dump_size);
36512 }
36513 -/* Finally dump the task struct. Not be used by gdb, but could be useful */
36514 - set_fs(KERNEL_DS);
36515 - DUMP_WRITE(current,sizeof(*current));
36516 +/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
36517 end_coredump:
36518 set_fs(fs);
36519 return has_dumped;
36520 @@ -249,6 +252,8 @@ static int load_aout_binary(struct linux
36521 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
36522 if (rlim >= RLIM_INFINITY)
36523 rlim = ~0;
36524 +
36525 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
36526 if (ex.a_data + ex.a_bss > rlim)
36527 return -ENOMEM;
36528
36529 @@ -277,6 +282,27 @@ static int load_aout_binary(struct linux
36530 install_exec_creds(bprm);
36531 current->flags &= ~PF_FORKNOEXEC;
36532
36533 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
36534 + current->mm->pax_flags = 0UL;
36535 +#endif
36536 +
36537 +#ifdef CONFIG_PAX_PAGEEXEC
36538 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
36539 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
36540 +
36541 +#ifdef CONFIG_PAX_EMUTRAMP
36542 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
36543 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
36544 +#endif
36545 +
36546 +#ifdef CONFIG_PAX_MPROTECT
36547 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
36548 + current->mm->pax_flags |= MF_PAX_MPROTECT;
36549 +#endif
36550 +
36551 + }
36552 +#endif
36553 +
36554 if (N_MAGIC(ex) == OMAGIC) {
36555 unsigned long text_addr, map_size;
36556 loff_t pos;
36557 @@ -349,7 +375,7 @@ static int load_aout_binary(struct linux
36558
36559 down_write(&current->mm->mmap_sem);
36560 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
36561 - PROT_READ | PROT_WRITE | PROT_EXEC,
36562 + PROT_READ | PROT_WRITE,
36563 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
36564 fd_offset + ex.a_text);
36565 up_write(&current->mm->mmap_sem);
36566 diff -urNp linux-2.6.32.42/fs/binfmt_elf.c linux-2.6.32.42/fs/binfmt_elf.c
36567 --- linux-2.6.32.42/fs/binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
36568 +++ linux-2.6.32.42/fs/binfmt_elf.c 2011-05-16 21:46:57.000000000 -0400
36569 @@ -50,6 +50,10 @@ static int elf_core_dump(long signr, str
36570 #define elf_core_dump NULL
36571 #endif
36572
36573 +#ifdef CONFIG_PAX_MPROTECT
36574 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
36575 +#endif
36576 +
36577 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
36578 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
36579 #else
36580 @@ -69,6 +73,11 @@ static struct linux_binfmt elf_format =
36581 .load_binary = load_elf_binary,
36582 .load_shlib = load_elf_library,
36583 .core_dump = elf_core_dump,
36584 +
36585 +#ifdef CONFIG_PAX_MPROTECT
36586 + .handle_mprotect= elf_handle_mprotect,
36587 +#endif
36588 +
36589 .min_coredump = ELF_EXEC_PAGESIZE,
36590 .hasvdso = 1
36591 };
36592 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
36593
36594 static int set_brk(unsigned long start, unsigned long end)
36595 {
36596 + unsigned long e = end;
36597 +
36598 start = ELF_PAGEALIGN(start);
36599 end = ELF_PAGEALIGN(end);
36600 if (end > start) {
36601 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
36602 if (BAD_ADDR(addr))
36603 return addr;
36604 }
36605 - current->mm->start_brk = current->mm->brk = end;
36606 + current->mm->start_brk = current->mm->brk = e;
36607 return 0;
36608 }
36609
36610 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
36611 elf_addr_t __user *u_rand_bytes;
36612 const char *k_platform = ELF_PLATFORM;
36613 const char *k_base_platform = ELF_BASE_PLATFORM;
36614 - unsigned char k_rand_bytes[16];
36615 + u32 k_rand_bytes[4];
36616 int items;
36617 elf_addr_t *elf_info;
36618 int ei_index = 0;
36619 const struct cred *cred = current_cred();
36620 struct vm_area_struct *vma;
36621 + unsigned long saved_auxv[AT_VECTOR_SIZE];
36622 +
36623 + pax_track_stack();
36624
36625 /*
36626 * In some cases (e.g. Hyper-Threading), we want to avoid L1
36627 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
36628 * Generate 16 random bytes for userspace PRNG seeding.
36629 */
36630 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
36631 - u_rand_bytes = (elf_addr_t __user *)
36632 - STACK_ALLOC(p, sizeof(k_rand_bytes));
36633 + srandom32(k_rand_bytes[0] ^ random32());
36634 + srandom32(k_rand_bytes[1] ^ random32());
36635 + srandom32(k_rand_bytes[2] ^ random32());
36636 + srandom32(k_rand_bytes[3] ^ random32());
36637 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
36638 + u_rand_bytes = (elf_addr_t __user *) p;
36639 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
36640 return -EFAULT;
36641
36642 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
36643 return -EFAULT;
36644 current->mm->env_end = p;
36645
36646 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
36647 +
36648 /* Put the elf_info on the stack in the right place. */
36649 sp = (elf_addr_t __user *)envp + 1;
36650 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
36651 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
36652 return -EFAULT;
36653 return 0;
36654 }
36655 @@ -385,10 +405,10 @@ static unsigned long load_elf_interp(str
36656 {
36657 struct elf_phdr *elf_phdata;
36658 struct elf_phdr *eppnt;
36659 - unsigned long load_addr = 0;
36660 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
36661 int load_addr_set = 0;
36662 unsigned long last_bss = 0, elf_bss = 0;
36663 - unsigned long error = ~0UL;
36664 + unsigned long error = -EINVAL;
36665 unsigned long total_size;
36666 int retval, i, size;
36667
36668 @@ -434,6 +454,11 @@ static unsigned long load_elf_interp(str
36669 goto out_close;
36670 }
36671
36672 +#ifdef CONFIG_PAX_SEGMEXEC
36673 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
36674 + pax_task_size = SEGMEXEC_TASK_SIZE;
36675 +#endif
36676 +
36677 eppnt = elf_phdata;
36678 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
36679 if (eppnt->p_type == PT_LOAD) {
36680 @@ -477,8 +502,8 @@ static unsigned long load_elf_interp(str
36681 k = load_addr + eppnt->p_vaddr;
36682 if (BAD_ADDR(k) ||
36683 eppnt->p_filesz > eppnt->p_memsz ||
36684 - eppnt->p_memsz > TASK_SIZE ||
36685 - TASK_SIZE - eppnt->p_memsz < k) {
36686 + eppnt->p_memsz > pax_task_size ||
36687 + pax_task_size - eppnt->p_memsz < k) {
36688 error = -ENOMEM;
36689 goto out_close;
36690 }
36691 @@ -532,6 +557,194 @@ out:
36692 return error;
36693 }
36694
36695 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
36696 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
36697 +{
36698 + unsigned long pax_flags = 0UL;
36699 +
36700 +#ifdef CONFIG_PAX_PAGEEXEC
36701 + if (elf_phdata->p_flags & PF_PAGEEXEC)
36702 + pax_flags |= MF_PAX_PAGEEXEC;
36703 +#endif
36704 +
36705 +#ifdef CONFIG_PAX_SEGMEXEC
36706 + if (elf_phdata->p_flags & PF_SEGMEXEC)
36707 + pax_flags |= MF_PAX_SEGMEXEC;
36708 +#endif
36709 +
36710 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36711 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36712 + if (nx_enabled)
36713 + pax_flags &= ~MF_PAX_SEGMEXEC;
36714 + else
36715 + pax_flags &= ~MF_PAX_PAGEEXEC;
36716 + }
36717 +#endif
36718 +
36719 +#ifdef CONFIG_PAX_EMUTRAMP
36720 + if (elf_phdata->p_flags & PF_EMUTRAMP)
36721 + pax_flags |= MF_PAX_EMUTRAMP;
36722 +#endif
36723 +
36724 +#ifdef CONFIG_PAX_MPROTECT
36725 + if (elf_phdata->p_flags & PF_MPROTECT)
36726 + pax_flags |= MF_PAX_MPROTECT;
36727 +#endif
36728 +
36729 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36730 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
36731 + pax_flags |= MF_PAX_RANDMMAP;
36732 +#endif
36733 +
36734 + return pax_flags;
36735 +}
36736 +#endif
36737 +
36738 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36739 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
36740 +{
36741 + unsigned long pax_flags = 0UL;
36742 +
36743 +#ifdef CONFIG_PAX_PAGEEXEC
36744 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
36745 + pax_flags |= MF_PAX_PAGEEXEC;
36746 +#endif
36747 +
36748 +#ifdef CONFIG_PAX_SEGMEXEC
36749 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
36750 + pax_flags |= MF_PAX_SEGMEXEC;
36751 +#endif
36752 +
36753 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36754 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36755 + if (nx_enabled)
36756 + pax_flags &= ~MF_PAX_SEGMEXEC;
36757 + else
36758 + pax_flags &= ~MF_PAX_PAGEEXEC;
36759 + }
36760 +#endif
36761 +
36762 +#ifdef CONFIG_PAX_EMUTRAMP
36763 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
36764 + pax_flags |= MF_PAX_EMUTRAMP;
36765 +#endif
36766 +
36767 +#ifdef CONFIG_PAX_MPROTECT
36768 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
36769 + pax_flags |= MF_PAX_MPROTECT;
36770 +#endif
36771 +
36772 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36773 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
36774 + pax_flags |= MF_PAX_RANDMMAP;
36775 +#endif
36776 +
36777 + return pax_flags;
36778 +}
36779 +#endif
36780 +
36781 +#ifdef CONFIG_PAX_EI_PAX
36782 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
36783 +{
36784 + unsigned long pax_flags = 0UL;
36785 +
36786 +#ifdef CONFIG_PAX_PAGEEXEC
36787 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
36788 + pax_flags |= MF_PAX_PAGEEXEC;
36789 +#endif
36790 +
36791 +#ifdef CONFIG_PAX_SEGMEXEC
36792 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
36793 + pax_flags |= MF_PAX_SEGMEXEC;
36794 +#endif
36795 +
36796 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36797 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36798 + if (nx_enabled)
36799 + pax_flags &= ~MF_PAX_SEGMEXEC;
36800 + else
36801 + pax_flags &= ~MF_PAX_PAGEEXEC;
36802 + }
36803 +#endif
36804 +
36805 +#ifdef CONFIG_PAX_EMUTRAMP
36806 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
36807 + pax_flags |= MF_PAX_EMUTRAMP;
36808 +#endif
36809 +
36810 +#ifdef CONFIG_PAX_MPROTECT
36811 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
36812 + pax_flags |= MF_PAX_MPROTECT;
36813 +#endif
36814 +
36815 +#ifdef CONFIG_PAX_ASLR
36816 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
36817 + pax_flags |= MF_PAX_RANDMMAP;
36818 +#endif
36819 +
36820 + return pax_flags;
36821 +}
36822 +#endif
36823 +
36824 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
36825 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
36826 +{
36827 + unsigned long pax_flags = 0UL;
36828 +
36829 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36830 + unsigned long i;
36831 + int found_flags = 0;
36832 +#endif
36833 +
36834 +#ifdef CONFIG_PAX_EI_PAX
36835 + pax_flags = pax_parse_ei_pax(elf_ex);
36836 +#endif
36837 +
36838 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36839 + for (i = 0UL; i < elf_ex->e_phnum; i++)
36840 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
36841 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
36842 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
36843 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
36844 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
36845 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
36846 + return -EINVAL;
36847 +
36848 +#ifdef CONFIG_PAX_SOFTMODE
36849 + if (pax_softmode)
36850 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
36851 + else
36852 +#endif
36853 +
36854 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
36855 + found_flags = 1;
36856 + break;
36857 + }
36858 +#endif
36859 +
36860 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
36861 + if (found_flags == 0) {
36862 + struct elf_phdr phdr;
36863 + memset(&phdr, 0, sizeof(phdr));
36864 + phdr.p_flags = PF_NOEMUTRAMP;
36865 +#ifdef CONFIG_PAX_SOFTMODE
36866 + if (pax_softmode)
36867 + pax_flags = pax_parse_softmode(&phdr);
36868 + else
36869 +#endif
36870 + pax_flags = pax_parse_hardmode(&phdr);
36871 + }
36872 +#endif
36873 +
36874 +
36875 + if (0 > pax_check_flags(&pax_flags))
36876 + return -EINVAL;
36877 +
36878 + current->mm->pax_flags = pax_flags;
36879 + return 0;
36880 +}
36881 +#endif
36882 +
36883 /*
36884 * These are the functions used to load ELF style executables and shared
36885 * libraries. There is no binary dependent code anywhere else.
36886 @@ -548,6 +761,11 @@ static unsigned long randomize_stack_top
36887 {
36888 unsigned int random_variable = 0;
36889
36890 +#ifdef CONFIG_PAX_RANDUSTACK
36891 + if (randomize_va_space)
36892 + return stack_top - current->mm->delta_stack;
36893 +#endif
36894 +
36895 if ((current->flags & PF_RANDOMIZE) &&
36896 !(current->personality & ADDR_NO_RANDOMIZE)) {
36897 random_variable = get_random_int() & STACK_RND_MASK;
36898 @@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_
36899 unsigned long load_addr = 0, load_bias = 0;
36900 int load_addr_set = 0;
36901 char * elf_interpreter = NULL;
36902 - unsigned long error;
36903 + unsigned long error = 0;
36904 struct elf_phdr *elf_ppnt, *elf_phdata;
36905 unsigned long elf_bss, elf_brk;
36906 int retval, i;
36907 @@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_
36908 unsigned long start_code, end_code, start_data, end_data;
36909 unsigned long reloc_func_desc = 0;
36910 int executable_stack = EXSTACK_DEFAULT;
36911 - unsigned long def_flags = 0;
36912 struct {
36913 struct elfhdr elf_ex;
36914 struct elfhdr interp_elf_ex;
36915 } *loc;
36916 + unsigned long pax_task_size = TASK_SIZE;
36917
36918 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
36919 if (!loc) {
36920 @@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_
36921
36922 /* OK, This is the point of no return */
36923 current->flags &= ~PF_FORKNOEXEC;
36924 - current->mm->def_flags = def_flags;
36925 +
36926 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
36927 + current->mm->pax_flags = 0UL;
36928 +#endif
36929 +
36930 +#ifdef CONFIG_PAX_DLRESOLVE
36931 + current->mm->call_dl_resolve = 0UL;
36932 +#endif
36933 +
36934 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
36935 + current->mm->call_syscall = 0UL;
36936 +#endif
36937 +
36938 +#ifdef CONFIG_PAX_ASLR
36939 + current->mm->delta_mmap = 0UL;
36940 + current->mm->delta_stack = 0UL;
36941 +#endif
36942 +
36943 + current->mm->def_flags = 0;
36944 +
36945 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
36946 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
36947 + send_sig(SIGKILL, current, 0);
36948 + goto out_free_dentry;
36949 + }
36950 +#endif
36951 +
36952 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
36953 + pax_set_initial_flags(bprm);
36954 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
36955 + if (pax_set_initial_flags_func)
36956 + (pax_set_initial_flags_func)(bprm);
36957 +#endif
36958 +
36959 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
36960 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
36961 + current->mm->context.user_cs_limit = PAGE_SIZE;
36962 + current->mm->def_flags |= VM_PAGEEXEC;
36963 + }
36964 +#endif
36965 +
36966 +#ifdef CONFIG_PAX_SEGMEXEC
36967 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
36968 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
36969 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
36970 + pax_task_size = SEGMEXEC_TASK_SIZE;
36971 + }
36972 +#endif
36973 +
36974 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
36975 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36976 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
36977 + put_cpu();
36978 + }
36979 +#endif
36980
36981 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
36982 may depend on the personality. */
36983 SET_PERSONALITY(loc->elf_ex);
36984 +
36985 +#ifdef CONFIG_PAX_ASLR
36986 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
36987 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
36988 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
36989 + }
36990 +#endif
36991 +
36992 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
36993 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36994 + executable_stack = EXSTACK_DISABLE_X;
36995 + current->personality &= ~READ_IMPLIES_EXEC;
36996 + } else
36997 +#endif
36998 +
36999 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
37000 current->personality |= READ_IMPLIES_EXEC;
37001
37002 @@ -804,6 +1091,20 @@ static int load_elf_binary(struct linux_
37003 #else
37004 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
37005 #endif
37006 +
37007 +#ifdef CONFIG_PAX_RANDMMAP
37008 + /* PaX: randomize base address at the default exe base if requested */
37009 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
37010 +#ifdef CONFIG_SPARC64
37011 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
37012 +#else
37013 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
37014 +#endif
37015 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
37016 + elf_flags |= MAP_FIXED;
37017 + }
37018 +#endif
37019 +
37020 }
37021
37022 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
37023 @@ -836,9 +1137,9 @@ static int load_elf_binary(struct linux_
37024 * allowed task size. Note that p_filesz must always be
37025 * <= p_memsz so it is only necessary to check p_memsz.
37026 */
37027 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
37028 - elf_ppnt->p_memsz > TASK_SIZE ||
37029 - TASK_SIZE - elf_ppnt->p_memsz < k) {
37030 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
37031 + elf_ppnt->p_memsz > pax_task_size ||
37032 + pax_task_size - elf_ppnt->p_memsz < k) {
37033 /* set_brk can never work. Avoid overflows. */
37034 send_sig(SIGKILL, current, 0);
37035 retval = -EINVAL;
37036 @@ -866,6 +1167,11 @@ static int load_elf_binary(struct linux_
37037 start_data += load_bias;
37038 end_data += load_bias;
37039
37040 +#ifdef CONFIG_PAX_RANDMMAP
37041 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
37042 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
37043 +#endif
37044 +
37045 /* Calling set_brk effectively mmaps the pages that we need
37046 * for the bss and break sections. We must do this before
37047 * mapping in the interpreter, to make sure it doesn't wind
37048 @@ -877,9 +1183,11 @@ static int load_elf_binary(struct linux_
37049 goto out_free_dentry;
37050 }
37051 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
37052 - send_sig(SIGSEGV, current, 0);
37053 - retval = -EFAULT; /* Nobody gets to see this, but.. */
37054 - goto out_free_dentry;
37055 + /*
37056 + * This bss-zeroing can fail if the ELF
37057 + * file specifies odd protections. So
37058 + * we don't check the return value
37059 + */
37060 }
37061
37062 if (elf_interpreter) {
37063 @@ -1112,8 +1420,10 @@ static int dump_seek(struct file *file,
37064 unsigned long n = off;
37065 if (n > PAGE_SIZE)
37066 n = PAGE_SIZE;
37067 - if (!dump_write(file, buf, n))
37068 + if (!dump_write(file, buf, n)) {
37069 + free_page((unsigned long)buf);
37070 return 0;
37071 + }
37072 off -= n;
37073 }
37074 free_page((unsigned long)buf);
37075 @@ -1125,7 +1435,7 @@ static int dump_seek(struct file *file,
37076 * Decide what to dump of a segment, part, all or none.
37077 */
37078 static unsigned long vma_dump_size(struct vm_area_struct *vma,
37079 - unsigned long mm_flags)
37080 + unsigned long mm_flags, long signr)
37081 {
37082 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
37083
37084 @@ -1159,7 +1469,7 @@ static unsigned long vma_dump_size(struc
37085 if (vma->vm_file == NULL)
37086 return 0;
37087
37088 - if (FILTER(MAPPED_PRIVATE))
37089 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
37090 goto whole;
37091
37092 /*
37093 @@ -1255,8 +1565,11 @@ static int writenote(struct memelfnote *
37094 #undef DUMP_WRITE
37095
37096 #define DUMP_WRITE(addr, nr) \
37097 + do { \
37098 + gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
37099 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
37100 - goto end_coredump;
37101 + goto end_coredump; \
37102 + } while (0);
37103
37104 static void fill_elf_header(struct elfhdr *elf, int segs,
37105 u16 machine, u32 flags, u8 osabi)
37106 @@ -1385,9 +1698,9 @@ static void fill_auxv_note(struct memelf
37107 {
37108 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
37109 int i = 0;
37110 - do
37111 + do {
37112 i += 2;
37113 - while (auxv[i - 2] != AT_NULL);
37114 + } while (auxv[i - 2] != AT_NULL);
37115 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
37116 }
37117
37118 @@ -1973,7 +2286,7 @@ static int elf_core_dump(long signr, str
37119 phdr.p_offset = offset;
37120 phdr.p_vaddr = vma->vm_start;
37121 phdr.p_paddr = 0;
37122 - phdr.p_filesz = vma_dump_size(vma, mm_flags);
37123 + phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
37124 phdr.p_memsz = vma->vm_end - vma->vm_start;
37125 offset += phdr.p_filesz;
37126 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
37127 @@ -2006,7 +2319,7 @@ static int elf_core_dump(long signr, str
37128 unsigned long addr;
37129 unsigned long end;
37130
37131 - end = vma->vm_start + vma_dump_size(vma, mm_flags);
37132 + end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
37133
37134 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
37135 struct page *page;
37136 @@ -2015,6 +2328,7 @@ static int elf_core_dump(long signr, str
37137 page = get_dump_page(addr);
37138 if (page) {
37139 void *kaddr = kmap(page);
37140 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
37141 stop = ((size += PAGE_SIZE) > limit) ||
37142 !dump_write(file, kaddr, PAGE_SIZE);
37143 kunmap(page);
37144 @@ -2042,6 +2356,97 @@ out:
37145
37146 #endif /* USE_ELF_CORE_DUMP */
37147
37148 +#ifdef CONFIG_PAX_MPROTECT
37149 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
37150 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
37151 + * we'll remove VM_MAYWRITE for good on RELRO segments.
37152 + *
37153 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
37154 + * basis because we want to allow the common case and not the special ones.
37155 + */
37156 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
37157 +{
37158 + struct elfhdr elf_h;
37159 + struct elf_phdr elf_p;
37160 + unsigned long i;
37161 + unsigned long oldflags;
37162 + bool is_textrel_rw, is_textrel_rx, is_relro;
37163 +
37164 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
37165 + return;
37166 +
37167 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
37168 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
37169 +
37170 +#ifdef CONFIG_PAX_ELFRELOCS
37171 + /* possible TEXTREL */
37172 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
37173 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
37174 +#else
37175 + is_textrel_rw = false;
37176 + is_textrel_rx = false;
37177 +#endif
37178 +
37179 + /* possible RELRO */
37180 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
37181 +
37182 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
37183 + return;
37184 +
37185 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
37186 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
37187 +
37188 +#ifdef CONFIG_PAX_ETEXECRELOCS
37189 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
37190 +#else
37191 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
37192 +#endif
37193 +
37194 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
37195 + !elf_check_arch(&elf_h) ||
37196 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
37197 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
37198 + return;
37199 +
37200 + for (i = 0UL; i < elf_h.e_phnum; i++) {
37201 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
37202 + return;
37203 + switch (elf_p.p_type) {
37204 + case PT_DYNAMIC:
37205 + if (!is_textrel_rw && !is_textrel_rx)
37206 + continue;
37207 + i = 0UL;
37208 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
37209 + elf_dyn dyn;
37210 +
37211 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
37212 + return;
37213 + if (dyn.d_tag == DT_NULL)
37214 + return;
37215 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
37216 + gr_log_textrel(vma);
37217 + if (is_textrel_rw)
37218 + vma->vm_flags |= VM_MAYWRITE;
37219 + else
37220 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
37221 + vma->vm_flags &= ~VM_MAYWRITE;
37222 + return;
37223 + }
37224 + i++;
37225 + }
37226 + return;
37227 +
37228 + case PT_GNU_RELRO:
37229 + if (!is_relro)
37230 + continue;
37231 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
37232 + vma->vm_flags &= ~VM_MAYWRITE;
37233 + return;
37234 + }
37235 + }
37236 +}
37237 +#endif
37238 +
37239 static int __init init_elf_binfmt(void)
37240 {
37241 return register_binfmt(&elf_format);
37242 diff -urNp linux-2.6.32.42/fs/binfmt_flat.c linux-2.6.32.42/fs/binfmt_flat.c
37243 --- linux-2.6.32.42/fs/binfmt_flat.c 2011-03-27 14:31:47.000000000 -0400
37244 +++ linux-2.6.32.42/fs/binfmt_flat.c 2011-04-17 15:56:46.000000000 -0400
37245 @@ -564,7 +564,9 @@ static int load_flat_file(struct linux_b
37246 realdatastart = (unsigned long) -ENOMEM;
37247 printk("Unable to allocate RAM for process data, errno %d\n",
37248 (int)-realdatastart);
37249 + down_write(&current->mm->mmap_sem);
37250 do_munmap(current->mm, textpos, text_len);
37251 + up_write(&current->mm->mmap_sem);
37252 ret = realdatastart;
37253 goto err;
37254 }
37255 @@ -588,8 +590,10 @@ static int load_flat_file(struct linux_b
37256 }
37257 if (IS_ERR_VALUE(result)) {
37258 printk("Unable to read data+bss, errno %d\n", (int)-result);
37259 + down_write(&current->mm->mmap_sem);
37260 do_munmap(current->mm, textpos, text_len);
37261 do_munmap(current->mm, realdatastart, data_len + extra);
37262 + up_write(&current->mm->mmap_sem);
37263 ret = result;
37264 goto err;
37265 }
37266 @@ -658,8 +662,10 @@ static int load_flat_file(struct linux_b
37267 }
37268 if (IS_ERR_VALUE(result)) {
37269 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
37270 + down_write(&current->mm->mmap_sem);
37271 do_munmap(current->mm, textpos, text_len + data_len + extra +
37272 MAX_SHARED_LIBS * sizeof(unsigned long));
37273 + up_write(&current->mm->mmap_sem);
37274 ret = result;
37275 goto err;
37276 }
37277 diff -urNp linux-2.6.32.42/fs/bio.c linux-2.6.32.42/fs/bio.c
37278 --- linux-2.6.32.42/fs/bio.c 2011-03-27 14:31:47.000000000 -0400
37279 +++ linux-2.6.32.42/fs/bio.c 2011-04-17 15:56:46.000000000 -0400
37280 @@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_cr
37281
37282 i = 0;
37283 while (i < bio_slab_nr) {
37284 - struct bio_slab *bslab = &bio_slabs[i];
37285 + bslab = &bio_slabs[i];
37286
37287 if (!bslab->slab && entry == -1)
37288 entry = i;
37289 @@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct b
37290 const int read = bio_data_dir(bio) == READ;
37291 struct bio_map_data *bmd = bio->bi_private;
37292 int i;
37293 - char *p = bmd->sgvecs[0].iov_base;
37294 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
37295
37296 __bio_for_each_segment(bvec, bio, i, 0) {
37297 char *addr = page_address(bvec->bv_page);
37298 diff -urNp linux-2.6.32.42/fs/block_dev.c linux-2.6.32.42/fs/block_dev.c
37299 --- linux-2.6.32.42/fs/block_dev.c 2011-06-25 12:55:34.000000000 -0400
37300 +++ linux-2.6.32.42/fs/block_dev.c 2011-06-25 12:56:37.000000000 -0400
37301 @@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev,
37302 else if (bdev->bd_contains == bdev)
37303 res = 0; /* is a whole device which isn't held */
37304
37305 - else if (bdev->bd_contains->bd_holder == bd_claim)
37306 + else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
37307 res = 0; /* is a partition of a device that is being partitioned */
37308 else if (bdev->bd_contains->bd_holder != NULL)
37309 res = -EBUSY; /* is a partition of a held device */
37310 diff -urNp linux-2.6.32.42/fs/btrfs/ctree.c linux-2.6.32.42/fs/btrfs/ctree.c
37311 --- linux-2.6.32.42/fs/btrfs/ctree.c 2011-03-27 14:31:47.000000000 -0400
37312 +++ linux-2.6.32.42/fs/btrfs/ctree.c 2011-04-17 15:56:46.000000000 -0400
37313 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
37314 free_extent_buffer(buf);
37315 add_root_to_dirty_list(root);
37316 } else {
37317 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
37318 - parent_start = parent->start;
37319 - else
37320 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
37321 + if (parent)
37322 + parent_start = parent->start;
37323 + else
37324 + parent_start = 0;
37325 + } else
37326 parent_start = 0;
37327
37328 WARN_ON(trans->transid != btrfs_header_generation(parent));
37329 @@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_tran
37330
37331 ret = 0;
37332 if (slot == 0) {
37333 - struct btrfs_disk_key disk_key;
37334 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
37335 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
37336 }
37337 diff -urNp linux-2.6.32.42/fs/btrfs/disk-io.c linux-2.6.32.42/fs/btrfs/disk-io.c
37338 --- linux-2.6.32.42/fs/btrfs/disk-io.c 2011-04-17 17:00:52.000000000 -0400
37339 +++ linux-2.6.32.42/fs/btrfs/disk-io.c 2011-04-17 17:03:11.000000000 -0400
37340 @@ -39,7 +39,7 @@
37341 #include "tree-log.h"
37342 #include "free-space-cache.h"
37343
37344 -static struct extent_io_ops btree_extent_io_ops;
37345 +static const struct extent_io_ops btree_extent_io_ops;
37346 static void end_workqueue_fn(struct btrfs_work *work);
37347 static void free_fs_root(struct btrfs_root *root);
37348
37349 @@ -2607,7 +2607,7 @@ out:
37350 return 0;
37351 }
37352
37353 -static struct extent_io_ops btree_extent_io_ops = {
37354 +static const struct extent_io_ops btree_extent_io_ops = {
37355 .write_cache_pages_lock_hook = btree_lock_page_hook,
37356 .readpage_end_io_hook = btree_readpage_end_io_hook,
37357 .submit_bio_hook = btree_submit_bio_hook,
37358 diff -urNp linux-2.6.32.42/fs/btrfs/extent_io.h linux-2.6.32.42/fs/btrfs/extent_io.h
37359 --- linux-2.6.32.42/fs/btrfs/extent_io.h 2011-03-27 14:31:47.000000000 -0400
37360 +++ linux-2.6.32.42/fs/btrfs/extent_io.h 2011-04-17 15:56:46.000000000 -0400
37361 @@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(s
37362 struct bio *bio, int mirror_num,
37363 unsigned long bio_flags);
37364 struct extent_io_ops {
37365 - int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
37366 + int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
37367 u64 start, u64 end, int *page_started,
37368 unsigned long *nr_written);
37369 - int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
37370 - int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
37371 + int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
37372 + int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
37373 extent_submit_bio_hook_t *submit_bio_hook;
37374 - int (*merge_bio_hook)(struct page *page, unsigned long offset,
37375 + int (* const merge_bio_hook)(struct page *page, unsigned long offset,
37376 size_t size, struct bio *bio,
37377 unsigned long bio_flags);
37378 - int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
37379 - int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
37380 + int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
37381 + int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
37382 u64 start, u64 end,
37383 struct extent_state *state);
37384 - int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
37385 + int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
37386 u64 start, u64 end,
37387 struct extent_state *state);
37388 - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
37389 + int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
37390 struct extent_state *state);
37391 - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
37392 + int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
37393 struct extent_state *state, int uptodate);
37394 - int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
37395 + int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
37396 unsigned long old, unsigned long bits);
37397 - int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
37398 + int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
37399 unsigned long bits);
37400 - int (*merge_extent_hook)(struct inode *inode,
37401 + int (* const merge_extent_hook)(struct inode *inode,
37402 struct extent_state *new,
37403 struct extent_state *other);
37404 - int (*split_extent_hook)(struct inode *inode,
37405 + int (* const split_extent_hook)(struct inode *inode,
37406 struct extent_state *orig, u64 split);
37407 - int (*write_cache_pages_lock_hook)(struct page *page);
37408 + int (* const write_cache_pages_lock_hook)(struct page *page);
37409 };
37410
37411 struct extent_io_tree {
37412 @@ -88,7 +88,7 @@ struct extent_io_tree {
37413 u64 dirty_bytes;
37414 spinlock_t lock;
37415 spinlock_t buffer_lock;
37416 - struct extent_io_ops *ops;
37417 + const struct extent_io_ops *ops;
37418 };
37419
37420 struct extent_state {
37421 diff -urNp linux-2.6.32.42/fs/btrfs/extent-tree.c linux-2.6.32.42/fs/btrfs/extent-tree.c
37422 --- linux-2.6.32.42/fs/btrfs/extent-tree.c 2011-03-27 14:31:47.000000000 -0400
37423 +++ linux-2.6.32.42/fs/btrfs/extent-tree.c 2011-06-12 06:39:08.000000000 -0400
37424 @@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(
37425 u64 group_start = group->key.objectid;
37426 new_extents = kmalloc(sizeof(*new_extents),
37427 GFP_NOFS);
37428 + if (!new_extents) {
37429 + ret = -ENOMEM;
37430 + goto out;
37431 + }
37432 nr_extents = 1;
37433 ret = get_new_locations(reloc_inode,
37434 extent_key,
37435 diff -urNp linux-2.6.32.42/fs/btrfs/free-space-cache.c linux-2.6.32.42/fs/btrfs/free-space-cache.c
37436 --- linux-2.6.32.42/fs/btrfs/free-space-cache.c 2011-03-27 14:31:47.000000000 -0400
37437 +++ linux-2.6.32.42/fs/btrfs/free-space-cache.c 2011-04-17 15:56:46.000000000 -0400
37438 @@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
37439
37440 while(1) {
37441 if (entry->bytes < bytes || entry->offset < min_start) {
37442 - struct rb_node *node;
37443 -
37444 node = rb_next(&entry->offset_index);
37445 if (!node)
37446 break;
37447 @@ -1226,7 +1224,7 @@ again:
37448 */
37449 while (entry->bitmap || found_bitmap ||
37450 (!entry->bitmap && entry->bytes < min_bytes)) {
37451 - struct rb_node *node = rb_next(&entry->offset_index);
37452 + node = rb_next(&entry->offset_index);
37453
37454 if (entry->bitmap && entry->bytes > bytes + empty_size) {
37455 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
37456 diff -urNp linux-2.6.32.42/fs/btrfs/inode.c linux-2.6.32.42/fs/btrfs/inode.c
37457 --- linux-2.6.32.42/fs/btrfs/inode.c 2011-03-27 14:31:47.000000000 -0400
37458 +++ linux-2.6.32.42/fs/btrfs/inode.c 2011-06-12 06:39:58.000000000 -0400
37459 @@ -63,7 +63,7 @@ static const struct inode_operations btr
37460 static const struct address_space_operations btrfs_aops;
37461 static const struct address_space_operations btrfs_symlink_aops;
37462 static const struct file_operations btrfs_dir_file_operations;
37463 -static struct extent_io_ops btrfs_extent_io_ops;
37464 +static const struct extent_io_ops btrfs_extent_io_ops;
37465
37466 static struct kmem_cache *btrfs_inode_cachep;
37467 struct kmem_cache *btrfs_trans_handle_cachep;
37468 @@ -925,6 +925,7 @@ static int cow_file_range_async(struct i
37469 1, 0, NULL, GFP_NOFS);
37470 while (start < end) {
37471 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
37472 + BUG_ON(!async_cow);
37473 async_cow->inode = inode;
37474 async_cow->root = root;
37475 async_cow->locked_page = locked_page;
37476 @@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(st
37477 inline_size = btrfs_file_extent_inline_item_len(leaf,
37478 btrfs_item_nr(leaf, path->slots[0]));
37479 tmp = kmalloc(inline_size, GFP_NOFS);
37480 + if (!tmp)
37481 + return -ENOMEM;
37482 ptr = btrfs_file_extent_inline_start(item);
37483
37484 read_extent_buffer(leaf, tmp, ptr, inline_size);
37485 @@ -5410,7 +5413,7 @@ fail:
37486 return -ENOMEM;
37487 }
37488
37489 -static int btrfs_getattr(struct vfsmount *mnt,
37490 +int btrfs_getattr(struct vfsmount *mnt,
37491 struct dentry *dentry, struct kstat *stat)
37492 {
37493 struct inode *inode = dentry->d_inode;
37494 @@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount
37495 return 0;
37496 }
37497
37498 +EXPORT_SYMBOL(btrfs_getattr);
37499 +
37500 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
37501 +{
37502 + return BTRFS_I(inode)->root->anon_super.s_dev;
37503 +}
37504 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
37505 +
37506 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
37507 struct inode *new_dir, struct dentry *new_dentry)
37508 {
37509 @@ -5972,7 +5983,7 @@ static const struct file_operations btrf
37510 .fsync = btrfs_sync_file,
37511 };
37512
37513 -static struct extent_io_ops btrfs_extent_io_ops = {
37514 +static const struct extent_io_ops btrfs_extent_io_ops = {
37515 .fill_delalloc = run_delalloc_range,
37516 .submit_bio_hook = btrfs_submit_bio_hook,
37517 .merge_bio_hook = btrfs_merge_bio_hook,
37518 diff -urNp linux-2.6.32.42/fs/btrfs/relocation.c linux-2.6.32.42/fs/btrfs/relocation.c
37519 --- linux-2.6.32.42/fs/btrfs/relocation.c 2011-03-27 14:31:47.000000000 -0400
37520 +++ linux-2.6.32.42/fs/btrfs/relocation.c 2011-04-17 15:56:46.000000000 -0400
37521 @@ -884,7 +884,7 @@ static int __update_reloc_root(struct bt
37522 }
37523 spin_unlock(&rc->reloc_root_tree.lock);
37524
37525 - BUG_ON((struct btrfs_root *)node->data != root);
37526 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
37527
37528 if (!del) {
37529 spin_lock(&rc->reloc_root_tree.lock);
37530 diff -urNp linux-2.6.32.42/fs/btrfs/sysfs.c linux-2.6.32.42/fs/btrfs/sysfs.c
37531 --- linux-2.6.32.42/fs/btrfs/sysfs.c 2011-03-27 14:31:47.000000000 -0400
37532 +++ linux-2.6.32.42/fs/btrfs/sysfs.c 2011-04-17 15:56:46.000000000 -0400
37533 @@ -164,12 +164,12 @@ static void btrfs_root_release(struct ko
37534 complete(&root->kobj_unregister);
37535 }
37536
37537 -static struct sysfs_ops btrfs_super_attr_ops = {
37538 +static const struct sysfs_ops btrfs_super_attr_ops = {
37539 .show = btrfs_super_attr_show,
37540 .store = btrfs_super_attr_store,
37541 };
37542
37543 -static struct sysfs_ops btrfs_root_attr_ops = {
37544 +static const struct sysfs_ops btrfs_root_attr_ops = {
37545 .show = btrfs_root_attr_show,
37546 .store = btrfs_root_attr_store,
37547 };
37548 diff -urNp linux-2.6.32.42/fs/buffer.c linux-2.6.32.42/fs/buffer.c
37549 --- linux-2.6.32.42/fs/buffer.c 2011-03-27 14:31:47.000000000 -0400
37550 +++ linux-2.6.32.42/fs/buffer.c 2011-04-17 15:56:46.000000000 -0400
37551 @@ -25,6 +25,7 @@
37552 #include <linux/percpu.h>
37553 #include <linux/slab.h>
37554 #include <linux/capability.h>
37555 +#include <linux/security.h>
37556 #include <linux/blkdev.h>
37557 #include <linux/file.h>
37558 #include <linux/quotaops.h>
37559 diff -urNp linux-2.6.32.42/fs/cachefiles/bind.c linux-2.6.32.42/fs/cachefiles/bind.c
37560 --- linux-2.6.32.42/fs/cachefiles/bind.c 2011-03-27 14:31:47.000000000 -0400
37561 +++ linux-2.6.32.42/fs/cachefiles/bind.c 2011-04-17 15:56:46.000000000 -0400
37562 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
37563 args);
37564
37565 /* start by checking things over */
37566 - ASSERT(cache->fstop_percent >= 0 &&
37567 - cache->fstop_percent < cache->fcull_percent &&
37568 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
37569 cache->fcull_percent < cache->frun_percent &&
37570 cache->frun_percent < 100);
37571
37572 - ASSERT(cache->bstop_percent >= 0 &&
37573 - cache->bstop_percent < cache->bcull_percent &&
37574 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
37575 cache->bcull_percent < cache->brun_percent &&
37576 cache->brun_percent < 100);
37577
37578 diff -urNp linux-2.6.32.42/fs/cachefiles/daemon.c linux-2.6.32.42/fs/cachefiles/daemon.c
37579 --- linux-2.6.32.42/fs/cachefiles/daemon.c 2011-03-27 14:31:47.000000000 -0400
37580 +++ linux-2.6.32.42/fs/cachefiles/daemon.c 2011-04-17 15:56:46.000000000 -0400
37581 @@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(s
37582 if (test_bit(CACHEFILES_DEAD, &cache->flags))
37583 return -EIO;
37584
37585 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
37586 + if (datalen > PAGE_SIZE - 1)
37587 return -EOPNOTSUPP;
37588
37589 /* drag the command string into the kernel so we can parse it */
37590 @@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
37591 if (args[0] != '%' || args[1] != '\0')
37592 return -EINVAL;
37593
37594 - if (fstop < 0 || fstop >= cache->fcull_percent)
37595 + if (fstop >= cache->fcull_percent)
37596 return cachefiles_daemon_range_error(cache, args);
37597
37598 cache->fstop_percent = fstop;
37599 @@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
37600 if (args[0] != '%' || args[1] != '\0')
37601 return -EINVAL;
37602
37603 - if (bstop < 0 || bstop >= cache->bcull_percent)
37604 + if (bstop >= cache->bcull_percent)
37605 return cachefiles_daemon_range_error(cache, args);
37606
37607 cache->bstop_percent = bstop;
37608 diff -urNp linux-2.6.32.42/fs/cachefiles/internal.h linux-2.6.32.42/fs/cachefiles/internal.h
37609 --- linux-2.6.32.42/fs/cachefiles/internal.h 2011-03-27 14:31:47.000000000 -0400
37610 +++ linux-2.6.32.42/fs/cachefiles/internal.h 2011-05-04 17:56:28.000000000 -0400
37611 @@ -56,7 +56,7 @@ struct cachefiles_cache {
37612 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
37613 struct rb_root active_nodes; /* active nodes (can't be culled) */
37614 rwlock_t active_lock; /* lock for active_nodes */
37615 - atomic_t gravecounter; /* graveyard uniquifier */
37616 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
37617 unsigned frun_percent; /* when to stop culling (% files) */
37618 unsigned fcull_percent; /* when to start culling (% files) */
37619 unsigned fstop_percent; /* when to stop allocating (% files) */
37620 @@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struc
37621 * proc.c
37622 */
37623 #ifdef CONFIG_CACHEFILES_HISTOGRAM
37624 -extern atomic_t cachefiles_lookup_histogram[HZ];
37625 -extern atomic_t cachefiles_mkdir_histogram[HZ];
37626 -extern atomic_t cachefiles_create_histogram[HZ];
37627 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
37628 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
37629 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
37630
37631 extern int __init cachefiles_proc_init(void);
37632 extern void cachefiles_proc_cleanup(void);
37633 static inline
37634 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
37635 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
37636 {
37637 unsigned long jif = jiffies - start_jif;
37638 if (jif >= HZ)
37639 jif = HZ - 1;
37640 - atomic_inc(&histogram[jif]);
37641 + atomic_inc_unchecked(&histogram[jif]);
37642 }
37643
37644 #else
37645 diff -urNp linux-2.6.32.42/fs/cachefiles/namei.c linux-2.6.32.42/fs/cachefiles/namei.c
37646 --- linux-2.6.32.42/fs/cachefiles/namei.c 2011-03-27 14:31:47.000000000 -0400
37647 +++ linux-2.6.32.42/fs/cachefiles/namei.c 2011-05-04 17:56:28.000000000 -0400
37648 @@ -250,7 +250,7 @@ try_again:
37649 /* first step is to make up a grave dentry in the graveyard */
37650 sprintf(nbuffer, "%08x%08x",
37651 (uint32_t) get_seconds(),
37652 - (uint32_t) atomic_inc_return(&cache->gravecounter));
37653 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
37654
37655 /* do the multiway lock magic */
37656 trap = lock_rename(cache->graveyard, dir);
37657 diff -urNp linux-2.6.32.42/fs/cachefiles/proc.c linux-2.6.32.42/fs/cachefiles/proc.c
37658 --- linux-2.6.32.42/fs/cachefiles/proc.c 2011-03-27 14:31:47.000000000 -0400
37659 +++ linux-2.6.32.42/fs/cachefiles/proc.c 2011-05-04 17:56:28.000000000 -0400
37660 @@ -14,9 +14,9 @@
37661 #include <linux/seq_file.h>
37662 #include "internal.h"
37663
37664 -atomic_t cachefiles_lookup_histogram[HZ];
37665 -atomic_t cachefiles_mkdir_histogram[HZ];
37666 -atomic_t cachefiles_create_histogram[HZ];
37667 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
37668 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
37669 +atomic_unchecked_t cachefiles_create_histogram[HZ];
37670
37671 /*
37672 * display the latency histogram
37673 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
37674 return 0;
37675 default:
37676 index = (unsigned long) v - 3;
37677 - x = atomic_read(&cachefiles_lookup_histogram[index]);
37678 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
37679 - z = atomic_read(&cachefiles_create_histogram[index]);
37680 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
37681 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
37682 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
37683 if (x == 0 && y == 0 && z == 0)
37684 return 0;
37685
37686 diff -urNp linux-2.6.32.42/fs/cachefiles/rdwr.c linux-2.6.32.42/fs/cachefiles/rdwr.c
37687 --- linux-2.6.32.42/fs/cachefiles/rdwr.c 2011-03-27 14:31:47.000000000 -0400
37688 +++ linux-2.6.32.42/fs/cachefiles/rdwr.c 2011-04-17 15:56:46.000000000 -0400
37689 @@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache
37690 old_fs = get_fs();
37691 set_fs(KERNEL_DS);
37692 ret = file->f_op->write(
37693 - file, (const void __user *) data, len, &pos);
37694 + file, (__force const void __user *) data, len, &pos);
37695 set_fs(old_fs);
37696 kunmap(page);
37697 if (ret != len)
37698 diff -urNp linux-2.6.32.42/fs/cifs/cifs_debug.c linux-2.6.32.42/fs/cifs/cifs_debug.c
37699 --- linux-2.6.32.42/fs/cifs/cifs_debug.c 2011-03-27 14:31:47.000000000 -0400
37700 +++ linux-2.6.32.42/fs/cifs/cifs_debug.c 2011-05-04 17:56:28.000000000 -0400
37701 @@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(str
37702 tcon = list_entry(tmp3,
37703 struct cifsTconInfo,
37704 tcon_list);
37705 - atomic_set(&tcon->num_smbs_sent, 0);
37706 - atomic_set(&tcon->num_writes, 0);
37707 - atomic_set(&tcon->num_reads, 0);
37708 - atomic_set(&tcon->num_oplock_brks, 0);
37709 - atomic_set(&tcon->num_opens, 0);
37710 - atomic_set(&tcon->num_posixopens, 0);
37711 - atomic_set(&tcon->num_posixmkdirs, 0);
37712 - atomic_set(&tcon->num_closes, 0);
37713 - atomic_set(&tcon->num_deletes, 0);
37714 - atomic_set(&tcon->num_mkdirs, 0);
37715 - atomic_set(&tcon->num_rmdirs, 0);
37716 - atomic_set(&tcon->num_renames, 0);
37717 - atomic_set(&tcon->num_t2renames, 0);
37718 - atomic_set(&tcon->num_ffirst, 0);
37719 - atomic_set(&tcon->num_fnext, 0);
37720 - atomic_set(&tcon->num_fclose, 0);
37721 - atomic_set(&tcon->num_hardlinks, 0);
37722 - atomic_set(&tcon->num_symlinks, 0);
37723 - atomic_set(&tcon->num_locks, 0);
37724 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
37725 + atomic_set_unchecked(&tcon->num_writes, 0);
37726 + atomic_set_unchecked(&tcon->num_reads, 0);
37727 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
37728 + atomic_set_unchecked(&tcon->num_opens, 0);
37729 + atomic_set_unchecked(&tcon->num_posixopens, 0);
37730 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
37731 + atomic_set_unchecked(&tcon->num_closes, 0);
37732 + atomic_set_unchecked(&tcon->num_deletes, 0);
37733 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
37734 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
37735 + atomic_set_unchecked(&tcon->num_renames, 0);
37736 + atomic_set_unchecked(&tcon->num_t2renames, 0);
37737 + atomic_set_unchecked(&tcon->num_ffirst, 0);
37738 + atomic_set_unchecked(&tcon->num_fnext, 0);
37739 + atomic_set_unchecked(&tcon->num_fclose, 0);
37740 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
37741 + atomic_set_unchecked(&tcon->num_symlinks, 0);
37742 + atomic_set_unchecked(&tcon->num_locks, 0);
37743 }
37744 }
37745 }
37746 @@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct s
37747 if (tcon->need_reconnect)
37748 seq_puts(m, "\tDISCONNECTED ");
37749 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
37750 - atomic_read(&tcon->num_smbs_sent),
37751 - atomic_read(&tcon->num_oplock_brks));
37752 + atomic_read_unchecked(&tcon->num_smbs_sent),
37753 + atomic_read_unchecked(&tcon->num_oplock_brks));
37754 seq_printf(m, "\nReads: %d Bytes: %lld",
37755 - atomic_read(&tcon->num_reads),
37756 + atomic_read_unchecked(&tcon->num_reads),
37757 (long long)(tcon->bytes_read));
37758 seq_printf(m, "\nWrites: %d Bytes: %lld",
37759 - atomic_read(&tcon->num_writes),
37760 + atomic_read_unchecked(&tcon->num_writes),
37761 (long long)(tcon->bytes_written));
37762 seq_printf(m, "\nFlushes: %d",
37763 - atomic_read(&tcon->num_flushes));
37764 + atomic_read_unchecked(&tcon->num_flushes));
37765 seq_printf(m, "\nLocks: %d HardLinks: %d "
37766 "Symlinks: %d",
37767 - atomic_read(&tcon->num_locks),
37768 - atomic_read(&tcon->num_hardlinks),
37769 - atomic_read(&tcon->num_symlinks));
37770 + atomic_read_unchecked(&tcon->num_locks),
37771 + atomic_read_unchecked(&tcon->num_hardlinks),
37772 + atomic_read_unchecked(&tcon->num_symlinks));
37773 seq_printf(m, "\nOpens: %d Closes: %d "
37774 "Deletes: %d",
37775 - atomic_read(&tcon->num_opens),
37776 - atomic_read(&tcon->num_closes),
37777 - atomic_read(&tcon->num_deletes));
37778 + atomic_read_unchecked(&tcon->num_opens),
37779 + atomic_read_unchecked(&tcon->num_closes),
37780 + atomic_read_unchecked(&tcon->num_deletes));
37781 seq_printf(m, "\nPosix Opens: %d "
37782 "Posix Mkdirs: %d",
37783 - atomic_read(&tcon->num_posixopens),
37784 - atomic_read(&tcon->num_posixmkdirs));
37785 + atomic_read_unchecked(&tcon->num_posixopens),
37786 + atomic_read_unchecked(&tcon->num_posixmkdirs));
37787 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
37788 - atomic_read(&tcon->num_mkdirs),
37789 - atomic_read(&tcon->num_rmdirs));
37790 + atomic_read_unchecked(&tcon->num_mkdirs),
37791 + atomic_read_unchecked(&tcon->num_rmdirs));
37792 seq_printf(m, "\nRenames: %d T2 Renames %d",
37793 - atomic_read(&tcon->num_renames),
37794 - atomic_read(&tcon->num_t2renames));
37795 + atomic_read_unchecked(&tcon->num_renames),
37796 + atomic_read_unchecked(&tcon->num_t2renames));
37797 seq_printf(m, "\nFindFirst: %d FNext %d "
37798 "FClose %d",
37799 - atomic_read(&tcon->num_ffirst),
37800 - atomic_read(&tcon->num_fnext),
37801 - atomic_read(&tcon->num_fclose));
37802 + atomic_read_unchecked(&tcon->num_ffirst),
37803 + atomic_read_unchecked(&tcon->num_fnext),
37804 + atomic_read_unchecked(&tcon->num_fclose));
37805 }
37806 }
37807 }
37808 diff -urNp linux-2.6.32.42/fs/cifs/cifsglob.h linux-2.6.32.42/fs/cifs/cifsglob.h
37809 --- linux-2.6.32.42/fs/cifs/cifsglob.h 2011-03-27 14:31:47.000000000 -0400
37810 +++ linux-2.6.32.42/fs/cifs/cifsglob.h 2011-05-04 17:56:28.000000000 -0400
37811 @@ -252,28 +252,28 @@ struct cifsTconInfo {
37812 __u16 Flags; /* optional support bits */
37813 enum statusEnum tidStatus;
37814 #ifdef CONFIG_CIFS_STATS
37815 - atomic_t num_smbs_sent;
37816 - atomic_t num_writes;
37817 - atomic_t num_reads;
37818 - atomic_t num_flushes;
37819 - atomic_t num_oplock_brks;
37820 - atomic_t num_opens;
37821 - atomic_t num_closes;
37822 - atomic_t num_deletes;
37823 - atomic_t num_mkdirs;
37824 - atomic_t num_posixopens;
37825 - atomic_t num_posixmkdirs;
37826 - atomic_t num_rmdirs;
37827 - atomic_t num_renames;
37828 - atomic_t num_t2renames;
37829 - atomic_t num_ffirst;
37830 - atomic_t num_fnext;
37831 - atomic_t num_fclose;
37832 - atomic_t num_hardlinks;
37833 - atomic_t num_symlinks;
37834 - atomic_t num_locks;
37835 - atomic_t num_acl_get;
37836 - atomic_t num_acl_set;
37837 + atomic_unchecked_t num_smbs_sent;
37838 + atomic_unchecked_t num_writes;
37839 + atomic_unchecked_t num_reads;
37840 + atomic_unchecked_t num_flushes;
37841 + atomic_unchecked_t num_oplock_brks;
37842 + atomic_unchecked_t num_opens;
37843 + atomic_unchecked_t num_closes;
37844 + atomic_unchecked_t num_deletes;
37845 + atomic_unchecked_t num_mkdirs;
37846 + atomic_unchecked_t num_posixopens;
37847 + atomic_unchecked_t num_posixmkdirs;
37848 + atomic_unchecked_t num_rmdirs;
37849 + atomic_unchecked_t num_renames;
37850 + atomic_unchecked_t num_t2renames;
37851 + atomic_unchecked_t num_ffirst;
37852 + atomic_unchecked_t num_fnext;
37853 + atomic_unchecked_t num_fclose;
37854 + atomic_unchecked_t num_hardlinks;
37855 + atomic_unchecked_t num_symlinks;
37856 + atomic_unchecked_t num_locks;
37857 + atomic_unchecked_t num_acl_get;
37858 + atomic_unchecked_t num_acl_set;
37859 #ifdef CONFIG_CIFS_STATS2
37860 unsigned long long time_writes;
37861 unsigned long long time_reads;
37862 @@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const st
37863 }
37864
37865 #ifdef CONFIG_CIFS_STATS
37866 -#define cifs_stats_inc atomic_inc
37867 +#define cifs_stats_inc atomic_inc_unchecked
37868
37869 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
37870 unsigned int bytes)
37871 diff -urNp linux-2.6.32.42/fs/cifs/link.c linux-2.6.32.42/fs/cifs/link.c
37872 --- linux-2.6.32.42/fs/cifs/link.c 2011-03-27 14:31:47.000000000 -0400
37873 +++ linux-2.6.32.42/fs/cifs/link.c 2011-04-17 15:56:46.000000000 -0400
37874 @@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct
37875
37876 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
37877 {
37878 - char *p = nd_get_link(nd);
37879 + const char *p = nd_get_link(nd);
37880 if (!IS_ERR(p))
37881 kfree(p);
37882 }
37883 diff -urNp linux-2.6.32.42/fs/coda/cache.c linux-2.6.32.42/fs/coda/cache.c
37884 --- linux-2.6.32.42/fs/coda/cache.c 2011-03-27 14:31:47.000000000 -0400
37885 +++ linux-2.6.32.42/fs/coda/cache.c 2011-05-04 17:56:28.000000000 -0400
37886 @@ -24,14 +24,14 @@
37887 #include <linux/coda_fs_i.h>
37888 #include <linux/coda_cache.h>
37889
37890 -static atomic_t permission_epoch = ATOMIC_INIT(0);
37891 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
37892
37893 /* replace or extend an acl cache hit */
37894 void coda_cache_enter(struct inode *inode, int mask)
37895 {
37896 struct coda_inode_info *cii = ITOC(inode);
37897
37898 - cii->c_cached_epoch = atomic_read(&permission_epoch);
37899 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
37900 if (cii->c_uid != current_fsuid()) {
37901 cii->c_uid = current_fsuid();
37902 cii->c_cached_perm = mask;
37903 @@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inod
37904 void coda_cache_clear_inode(struct inode *inode)
37905 {
37906 struct coda_inode_info *cii = ITOC(inode);
37907 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
37908 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
37909 }
37910
37911 /* remove all acl caches */
37912 void coda_cache_clear_all(struct super_block *sb)
37913 {
37914 - atomic_inc(&permission_epoch);
37915 + atomic_inc_unchecked(&permission_epoch);
37916 }
37917
37918
37919 @@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode
37920
37921 hit = (mask & cii->c_cached_perm) == mask &&
37922 cii->c_uid == current_fsuid() &&
37923 - cii->c_cached_epoch == atomic_read(&permission_epoch);
37924 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
37925
37926 return hit;
37927 }
37928 diff -urNp linux-2.6.32.42/fs/compat_binfmt_elf.c linux-2.6.32.42/fs/compat_binfmt_elf.c
37929 --- linux-2.6.32.42/fs/compat_binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
37930 +++ linux-2.6.32.42/fs/compat_binfmt_elf.c 2011-04-17 15:56:46.000000000 -0400
37931 @@ -29,10 +29,12 @@
37932 #undef elfhdr
37933 #undef elf_phdr
37934 #undef elf_note
37935 +#undef elf_dyn
37936 #undef elf_addr_t
37937 #define elfhdr elf32_hdr
37938 #define elf_phdr elf32_phdr
37939 #define elf_note elf32_note
37940 +#define elf_dyn Elf32_Dyn
37941 #define elf_addr_t Elf32_Addr
37942
37943 /*
37944 diff -urNp linux-2.6.32.42/fs/compat.c linux-2.6.32.42/fs/compat.c
37945 --- linux-2.6.32.42/fs/compat.c 2011-04-17 17:00:52.000000000 -0400
37946 +++ linux-2.6.32.42/fs/compat.c 2011-05-16 21:46:57.000000000 -0400
37947 @@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
37948
37949 struct compat_readdir_callback {
37950 struct compat_old_linux_dirent __user *dirent;
37951 + struct file * file;
37952 int result;
37953 };
37954
37955 @@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf
37956 buf->result = -EOVERFLOW;
37957 return -EOVERFLOW;
37958 }
37959 +
37960 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
37961 + return 0;
37962 +
37963 buf->result++;
37964 dirent = buf->dirent;
37965 if (!access_ok(VERIFY_WRITE, dirent,
37966 @@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(u
37967
37968 buf.result = 0;
37969 buf.dirent = dirent;
37970 + buf.file = file;
37971
37972 error = vfs_readdir(file, compat_fillonedir, &buf);
37973 if (buf.result)
37974 @@ -899,6 +905,7 @@ struct compat_linux_dirent {
37975 struct compat_getdents_callback {
37976 struct compat_linux_dirent __user *current_dir;
37977 struct compat_linux_dirent __user *previous;
37978 + struct file * file;
37979 int count;
37980 int error;
37981 };
37982 @@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, c
37983 buf->error = -EOVERFLOW;
37984 return -EOVERFLOW;
37985 }
37986 +
37987 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
37988 + return 0;
37989 +
37990 dirent = buf->previous;
37991 if (dirent) {
37992 if (__put_user(offset, &dirent->d_off))
37993 @@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsi
37994 buf.previous = NULL;
37995 buf.count = count;
37996 buf.error = 0;
37997 + buf.file = file;
37998
37999 error = vfs_readdir(file, compat_filldir, &buf);
38000 if (error >= 0)
38001 @@ -987,6 +999,7 @@ out:
38002 struct compat_getdents_callback64 {
38003 struct linux_dirent64 __user *current_dir;
38004 struct linux_dirent64 __user *previous;
38005 + struct file * file;
38006 int count;
38007 int error;
38008 };
38009 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf
38010 buf->error = -EINVAL; /* only used if we fail.. */
38011 if (reclen > buf->count)
38012 return -EINVAL;
38013 +
38014 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38015 + return 0;
38016 +
38017 dirent = buf->previous;
38018
38019 if (dirent) {
38020 @@ -1054,6 +1071,7 @@ asmlinkage long compat_sys_getdents64(un
38021 buf.previous = NULL;
38022 buf.count = count;
38023 buf.error = 0;
38024 + buf.file = file;
38025
38026 error = vfs_readdir(file, compat_filldir64, &buf);
38027 if (error >= 0)
38028 @@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(in
38029 * verify all the pointers
38030 */
38031 ret = -EINVAL;
38032 - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
38033 + if (nr_segs > UIO_MAXIOV)
38034 goto out;
38035 if (!file->f_op)
38036 goto out;
38037 @@ -1463,6 +1481,11 @@ int compat_do_execve(char * filename,
38038 compat_uptr_t __user *envp,
38039 struct pt_regs * regs)
38040 {
38041 +#ifdef CONFIG_GRKERNSEC
38042 + struct file *old_exec_file;
38043 + struct acl_subject_label *old_acl;
38044 + struct rlimit old_rlim[RLIM_NLIMITS];
38045 +#endif
38046 struct linux_binprm *bprm;
38047 struct file *file;
38048 struct files_struct *displaced;
38049 @@ -1499,6 +1522,19 @@ int compat_do_execve(char * filename,
38050 bprm->filename = filename;
38051 bprm->interp = filename;
38052
38053 + if (gr_process_user_ban()) {
38054 + retval = -EPERM;
38055 + goto out_file;
38056 + }
38057 +
38058 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
38059 + retval = -EAGAIN;
38060 + if (gr_handle_nproc())
38061 + goto out_file;
38062 + retval = -EACCES;
38063 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
38064 + goto out_file;
38065 +
38066 retval = bprm_mm_init(bprm);
38067 if (retval)
38068 goto out_file;
38069 @@ -1528,9 +1564,40 @@ int compat_do_execve(char * filename,
38070 if (retval < 0)
38071 goto out;
38072
38073 + if (!gr_tpe_allow(file)) {
38074 + retval = -EACCES;
38075 + goto out;
38076 + }
38077 +
38078 + if (gr_check_crash_exec(file)) {
38079 + retval = -EACCES;
38080 + goto out;
38081 + }
38082 +
38083 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
38084 +
38085 + gr_handle_exec_args_compat(bprm, argv);
38086 +
38087 +#ifdef CONFIG_GRKERNSEC
38088 + old_acl = current->acl;
38089 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
38090 + old_exec_file = current->exec_file;
38091 + get_file(file);
38092 + current->exec_file = file;
38093 +#endif
38094 +
38095 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
38096 + bprm->unsafe & LSM_UNSAFE_SHARE);
38097 + if (retval < 0)
38098 + goto out_fail;
38099 +
38100 retval = search_binary_handler(bprm, regs);
38101 if (retval < 0)
38102 - goto out;
38103 + goto out_fail;
38104 +#ifdef CONFIG_GRKERNSEC
38105 + if (old_exec_file)
38106 + fput(old_exec_file);
38107 +#endif
38108
38109 /* execve succeeded */
38110 current->fs->in_exec = 0;
38111 @@ -1541,6 +1608,14 @@ int compat_do_execve(char * filename,
38112 put_files_struct(displaced);
38113 return retval;
38114
38115 +out_fail:
38116 +#ifdef CONFIG_GRKERNSEC
38117 + current->acl = old_acl;
38118 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
38119 + fput(current->exec_file);
38120 + current->exec_file = old_exec_file;
38121 +#endif
38122 +
38123 out:
38124 if (bprm->mm) {
38125 acct_arg_size(bprm, 0);
38126 @@ -1711,6 +1786,8 @@ int compat_core_sys_select(int n, compat
38127 struct fdtable *fdt;
38128 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
38129
38130 + pax_track_stack();
38131 +
38132 if (n < 0)
38133 goto out_nofds;
38134
38135 diff -urNp linux-2.6.32.42/fs/compat_ioctl.c linux-2.6.32.42/fs/compat_ioctl.c
38136 --- linux-2.6.32.42/fs/compat_ioctl.c 2011-03-27 14:31:47.000000000 -0400
38137 +++ linux-2.6.32.42/fs/compat_ioctl.c 2011-04-23 12:56:11.000000000 -0400
38138 @@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsi
38139 up = (struct compat_video_spu_palette __user *) arg;
38140 err = get_user(palp, &up->palette);
38141 err |= get_user(length, &up->length);
38142 + if (err)
38143 + return -EFAULT;
38144
38145 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
38146 err = put_user(compat_ptr(palp), &up_native->palette);
38147 diff -urNp linux-2.6.32.42/fs/configfs/dir.c linux-2.6.32.42/fs/configfs/dir.c
38148 --- linux-2.6.32.42/fs/configfs/dir.c 2011-03-27 14:31:47.000000000 -0400
38149 +++ linux-2.6.32.42/fs/configfs/dir.c 2011-05-11 18:25:15.000000000 -0400
38150 @@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file
38151 }
38152 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
38153 struct configfs_dirent *next;
38154 - const char * name;
38155 + const unsigned char * name;
38156 + char d_name[sizeof(next->s_dentry->d_iname)];
38157 int len;
38158
38159 next = list_entry(p, struct configfs_dirent,
38160 @@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file
38161 continue;
38162
38163 name = configfs_get_name(next);
38164 - len = strlen(name);
38165 + if (next->s_dentry && name == next->s_dentry->d_iname) {
38166 + len = next->s_dentry->d_name.len;
38167 + memcpy(d_name, name, len);
38168 + name = d_name;
38169 + } else
38170 + len = strlen(name);
38171 if (next->s_dentry)
38172 ino = next->s_dentry->d_inode->i_ino;
38173 else
38174 diff -urNp linux-2.6.32.42/fs/dcache.c linux-2.6.32.42/fs/dcache.c
38175 --- linux-2.6.32.42/fs/dcache.c 2011-03-27 14:31:47.000000000 -0400
38176 +++ linux-2.6.32.42/fs/dcache.c 2011-04-23 13:32:21.000000000 -0400
38177 @@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
38178
38179 static struct kmem_cache *dentry_cache __read_mostly;
38180
38181 -#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
38182 -
38183 /*
38184 * This is the single most critical data structure when it comes
38185 * to the dcache: the hashtable for lookups. Somebody should try
38186 @@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned lon
38187 mempages -= reserve;
38188
38189 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
38190 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
38191 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
38192
38193 dcache_init();
38194 inode_init();
38195 diff -urNp linux-2.6.32.42/fs/dlm/lockspace.c linux-2.6.32.42/fs/dlm/lockspace.c
38196 --- linux-2.6.32.42/fs/dlm/lockspace.c 2011-03-27 14:31:47.000000000 -0400
38197 +++ linux-2.6.32.42/fs/dlm/lockspace.c 2011-04-17 15:56:46.000000000 -0400
38198 @@ -148,7 +148,7 @@ static void lockspace_kobj_release(struc
38199 kfree(ls);
38200 }
38201
38202 -static struct sysfs_ops dlm_attr_ops = {
38203 +static const struct sysfs_ops dlm_attr_ops = {
38204 .show = dlm_attr_show,
38205 .store = dlm_attr_store,
38206 };
38207 diff -urNp linux-2.6.32.42/fs/ecryptfs/inode.c linux-2.6.32.42/fs/ecryptfs/inode.c
38208 --- linux-2.6.32.42/fs/ecryptfs/inode.c 2011-03-27 14:31:47.000000000 -0400
38209 +++ linux-2.6.32.42/fs/ecryptfs/inode.c 2011-04-17 15:56:46.000000000 -0400
38210 @@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struc
38211 old_fs = get_fs();
38212 set_fs(get_ds());
38213 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
38214 - (char __user *)lower_buf,
38215 + (__force char __user *)lower_buf,
38216 lower_bufsiz);
38217 set_fs(old_fs);
38218 if (rc < 0)
38219 @@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct
38220 }
38221 old_fs = get_fs();
38222 set_fs(get_ds());
38223 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
38224 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
38225 set_fs(old_fs);
38226 if (rc < 0)
38227 goto out_free;
38228 diff -urNp linux-2.6.32.42/fs/exec.c linux-2.6.32.42/fs/exec.c
38229 --- linux-2.6.32.42/fs/exec.c 2011-06-25 12:55:34.000000000 -0400
38230 +++ linux-2.6.32.42/fs/exec.c 2011-06-25 12:56:37.000000000 -0400
38231 @@ -56,12 +56,24 @@
38232 #include <linux/fsnotify.h>
38233 #include <linux/fs_struct.h>
38234 #include <linux/pipe_fs_i.h>
38235 +#include <linux/random.h>
38236 +#include <linux/seq_file.h>
38237 +
38238 +#ifdef CONFIG_PAX_REFCOUNT
38239 +#include <linux/kallsyms.h>
38240 +#include <linux/kdebug.h>
38241 +#endif
38242
38243 #include <asm/uaccess.h>
38244 #include <asm/mmu_context.h>
38245 #include <asm/tlb.h>
38246 #include "internal.h"
38247
38248 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
38249 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
38250 +EXPORT_SYMBOL(pax_set_initial_flags_func);
38251 +#endif
38252 +
38253 int core_uses_pid;
38254 char core_pattern[CORENAME_MAX_SIZE] = "core";
38255 unsigned int core_pipe_limit;
38256 @@ -115,7 +127,7 @@ SYSCALL_DEFINE1(uselib, const char __use
38257 goto out;
38258
38259 file = do_filp_open(AT_FDCWD, tmp,
38260 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
38261 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
38262 MAY_READ | MAY_EXEC | MAY_OPEN);
38263 putname(tmp);
38264 error = PTR_ERR(file);
38265 @@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_b
38266 int write)
38267 {
38268 struct page *page;
38269 - int ret;
38270
38271 -#ifdef CONFIG_STACK_GROWSUP
38272 - if (write) {
38273 - ret = expand_stack_downwards(bprm->vma, pos);
38274 - if (ret < 0)
38275 - return NULL;
38276 - }
38277 -#endif
38278 - ret = get_user_pages(current, bprm->mm, pos,
38279 - 1, write, 1, &page, NULL);
38280 - if (ret <= 0)
38281 + if (0 > expand_stack_downwards(bprm->vma, pos))
38282 + return NULL;
38283 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
38284 return NULL;
38285
38286 if (write) {
38287 @@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_b
38288 vma->vm_end = STACK_TOP_MAX;
38289 vma->vm_start = vma->vm_end - PAGE_SIZE;
38290 vma->vm_flags = VM_STACK_FLAGS;
38291 +
38292 +#ifdef CONFIG_PAX_SEGMEXEC
38293 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
38294 +#endif
38295 +
38296 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
38297
38298 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
38299 @@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_b
38300 mm->stack_vm = mm->total_vm = 1;
38301 up_write(&mm->mmap_sem);
38302 bprm->p = vma->vm_end - sizeof(void *);
38303 +
38304 +#ifdef CONFIG_PAX_RANDUSTACK
38305 + if (randomize_va_space)
38306 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
38307 +#endif
38308 +
38309 return 0;
38310 err:
38311 up_write(&mm->mmap_sem);
38312 @@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char **
38313 int r;
38314 mm_segment_t oldfs = get_fs();
38315 set_fs(KERNEL_DS);
38316 - r = copy_strings(argc, (char __user * __user *)argv, bprm);
38317 + r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
38318 set_fs(oldfs);
38319 return r;
38320 }
38321 @@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_are
38322 unsigned long new_end = old_end - shift;
38323 struct mmu_gather *tlb;
38324
38325 - BUG_ON(new_start > new_end);
38326 + if (new_start >= new_end || new_start < mmap_min_addr)
38327 + return -ENOMEM;
38328
38329 /*
38330 * ensure there are no vmas between where we want to go
38331 @@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_are
38332 if (vma != find_vma(mm, new_start))
38333 return -EFAULT;
38334
38335 +#ifdef CONFIG_PAX_SEGMEXEC
38336 + BUG_ON(pax_find_mirror_vma(vma));
38337 +#endif
38338 +
38339 /*
38340 * cover the whole range: [new_start, old_end)
38341 */
38342 @@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm
38343 stack_top = arch_align_stack(stack_top);
38344 stack_top = PAGE_ALIGN(stack_top);
38345
38346 - if (unlikely(stack_top < mmap_min_addr) ||
38347 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
38348 - return -ENOMEM;
38349 -
38350 stack_shift = vma->vm_end - stack_top;
38351
38352 bprm->p -= stack_shift;
38353 @@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm
38354 bprm->exec -= stack_shift;
38355
38356 down_write(&mm->mmap_sem);
38357 +
38358 + /* Move stack pages down in memory. */
38359 + if (stack_shift) {
38360 + ret = shift_arg_pages(vma, stack_shift);
38361 + if (ret)
38362 + goto out_unlock;
38363 + }
38364 +
38365 vm_flags = VM_STACK_FLAGS;
38366
38367 /*
38368 @@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm
38369 vm_flags &= ~VM_EXEC;
38370 vm_flags |= mm->def_flags;
38371
38372 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
38373 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38374 + vm_flags &= ~VM_EXEC;
38375 +
38376 +#ifdef CONFIG_PAX_MPROTECT
38377 + if (mm->pax_flags & MF_PAX_MPROTECT)
38378 + vm_flags &= ~VM_MAYEXEC;
38379 +#endif
38380 +
38381 + }
38382 +#endif
38383 +
38384 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
38385 vm_flags);
38386 if (ret)
38387 goto out_unlock;
38388 BUG_ON(prev != vma);
38389
38390 - /* Move stack pages down in memory. */
38391 - if (stack_shift) {
38392 - ret = shift_arg_pages(vma, stack_shift);
38393 - if (ret)
38394 - goto out_unlock;
38395 - }
38396 -
38397 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
38398 stack_size = vma->vm_end - vma->vm_start;
38399 /*
38400 @@ -707,7 +736,7 @@ struct file *open_exec(const char *name)
38401 int err;
38402
38403 file = do_filp_open(AT_FDCWD, name,
38404 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
38405 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
38406 MAY_EXEC | MAY_OPEN);
38407 if (IS_ERR(file))
38408 goto out;
38409 @@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_
38410 old_fs = get_fs();
38411 set_fs(get_ds());
38412 /* The cast to a user pointer is valid due to the set_fs() */
38413 - result = vfs_read(file, (void __user *)addr, count, &pos);
38414 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
38415 set_fs(old_fs);
38416 return result;
38417 }
38418 @@ -1152,7 +1181,7 @@ int check_unsafe_exec(struct linux_binpr
38419 }
38420 rcu_read_unlock();
38421
38422 - if (p->fs->users > n_fs) {
38423 + if (atomic_read(&p->fs->users) > n_fs) {
38424 bprm->unsafe |= LSM_UNSAFE_SHARE;
38425 } else {
38426 res = -EAGAIN;
38427 @@ -1347,6 +1376,11 @@ int do_execve(char * filename,
38428 char __user *__user *envp,
38429 struct pt_regs * regs)
38430 {
38431 +#ifdef CONFIG_GRKERNSEC
38432 + struct file *old_exec_file;
38433 + struct acl_subject_label *old_acl;
38434 + struct rlimit old_rlim[RLIM_NLIMITS];
38435 +#endif
38436 struct linux_binprm *bprm;
38437 struct file *file;
38438 struct files_struct *displaced;
38439 @@ -1383,6 +1417,23 @@ int do_execve(char * filename,
38440 bprm->filename = filename;
38441 bprm->interp = filename;
38442
38443 + if (gr_process_user_ban()) {
38444 + retval = -EPERM;
38445 + goto out_file;
38446 + }
38447 +
38448 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
38449 +
38450 + if (gr_handle_nproc()) {
38451 + retval = -EAGAIN;
38452 + goto out_file;
38453 + }
38454 +
38455 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
38456 + retval = -EACCES;
38457 + goto out_file;
38458 + }
38459 +
38460 retval = bprm_mm_init(bprm);
38461 if (retval)
38462 goto out_file;
38463 @@ -1412,10 +1463,41 @@ int do_execve(char * filename,
38464 if (retval < 0)
38465 goto out;
38466
38467 + if (!gr_tpe_allow(file)) {
38468 + retval = -EACCES;
38469 + goto out;
38470 + }
38471 +
38472 + if (gr_check_crash_exec(file)) {
38473 + retval = -EACCES;
38474 + goto out;
38475 + }
38476 +
38477 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
38478 +
38479 + gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
38480 +
38481 +#ifdef CONFIG_GRKERNSEC
38482 + old_acl = current->acl;
38483 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
38484 + old_exec_file = current->exec_file;
38485 + get_file(file);
38486 + current->exec_file = file;
38487 +#endif
38488 +
38489 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
38490 + bprm->unsafe & LSM_UNSAFE_SHARE);
38491 + if (retval < 0)
38492 + goto out_fail;
38493 +
38494 current->flags &= ~PF_KTHREAD;
38495 retval = search_binary_handler(bprm,regs);
38496 if (retval < 0)
38497 - goto out;
38498 + goto out_fail;
38499 +#ifdef CONFIG_GRKERNSEC
38500 + if (old_exec_file)
38501 + fput(old_exec_file);
38502 +#endif
38503
38504 /* execve succeeded */
38505 current->fs->in_exec = 0;
38506 @@ -1426,6 +1508,14 @@ int do_execve(char * filename,
38507 put_files_struct(displaced);
38508 return retval;
38509
38510 +out_fail:
38511 +#ifdef CONFIG_GRKERNSEC
38512 + current->acl = old_acl;
38513 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
38514 + fput(current->exec_file);
38515 + current->exec_file = old_exec_file;
38516 +#endif
38517 +
38518 out:
38519 if (bprm->mm) {
38520 acct_arg_size(bprm, 0);
38521 @@ -1591,6 +1681,220 @@ out:
38522 return ispipe;
38523 }
38524
38525 +int pax_check_flags(unsigned long *flags)
38526 +{
38527 + int retval = 0;
38528 +
38529 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
38530 + if (*flags & MF_PAX_SEGMEXEC)
38531 + {
38532 + *flags &= ~MF_PAX_SEGMEXEC;
38533 + retval = -EINVAL;
38534 + }
38535 +#endif
38536 +
38537 + if ((*flags & MF_PAX_PAGEEXEC)
38538 +
38539 +#ifdef CONFIG_PAX_PAGEEXEC
38540 + && (*flags & MF_PAX_SEGMEXEC)
38541 +#endif
38542 +
38543 + )
38544 + {
38545 + *flags &= ~MF_PAX_PAGEEXEC;
38546 + retval = -EINVAL;
38547 + }
38548 +
38549 + if ((*flags & MF_PAX_MPROTECT)
38550 +
38551 +#ifdef CONFIG_PAX_MPROTECT
38552 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
38553 +#endif
38554 +
38555 + )
38556 + {
38557 + *flags &= ~MF_PAX_MPROTECT;
38558 + retval = -EINVAL;
38559 + }
38560 +
38561 + if ((*flags & MF_PAX_EMUTRAMP)
38562 +
38563 +#ifdef CONFIG_PAX_EMUTRAMP
38564 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
38565 +#endif
38566 +
38567 + )
38568 + {
38569 + *flags &= ~MF_PAX_EMUTRAMP;
38570 + retval = -EINVAL;
38571 + }
38572 +
38573 + return retval;
38574 +}
38575 +
38576 +EXPORT_SYMBOL(pax_check_flags);
38577 +
38578 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
38579 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
38580 +{
38581 + struct task_struct *tsk = current;
38582 + struct mm_struct *mm = current->mm;
38583 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
38584 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
38585 + char *path_exec = NULL;
38586 + char *path_fault = NULL;
38587 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
38588 +
38589 + if (buffer_exec && buffer_fault) {
38590 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
38591 +
38592 + down_read(&mm->mmap_sem);
38593 + vma = mm->mmap;
38594 + while (vma && (!vma_exec || !vma_fault)) {
38595 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
38596 + vma_exec = vma;
38597 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
38598 + vma_fault = vma;
38599 + vma = vma->vm_next;
38600 + }
38601 + if (vma_exec) {
38602 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
38603 + if (IS_ERR(path_exec))
38604 + path_exec = "<path too long>";
38605 + else {
38606 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
38607 + if (path_exec) {
38608 + *path_exec = 0;
38609 + path_exec = buffer_exec;
38610 + } else
38611 + path_exec = "<path too long>";
38612 + }
38613 + }
38614 + if (vma_fault) {
38615 + start = vma_fault->vm_start;
38616 + end = vma_fault->vm_end;
38617 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
38618 + if (vma_fault->vm_file) {
38619 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
38620 + if (IS_ERR(path_fault))
38621 + path_fault = "<path too long>";
38622 + else {
38623 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
38624 + if (path_fault) {
38625 + *path_fault = 0;
38626 + path_fault = buffer_fault;
38627 + } else
38628 + path_fault = "<path too long>";
38629 + }
38630 + } else
38631 + path_fault = "<anonymous mapping>";
38632 + }
38633 + up_read(&mm->mmap_sem);
38634 + }
38635 + if (tsk->signal->curr_ip)
38636 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
38637 + else
38638 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
38639 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
38640 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
38641 + task_uid(tsk), task_euid(tsk), pc, sp);
38642 + free_page((unsigned long)buffer_exec);
38643 + free_page((unsigned long)buffer_fault);
38644 + pax_report_insns(pc, sp);
38645 + do_coredump(SIGKILL, SIGKILL, regs);
38646 +}
38647 +#endif
38648 +
38649 +#ifdef CONFIG_PAX_REFCOUNT
38650 +void pax_report_refcount_overflow(struct pt_regs *regs)
38651 +{
38652 + if (current->signal->curr_ip)
38653 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
38654 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
38655 + else
38656 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
38657 + current->comm, task_pid_nr(current), current_uid(), current_euid());
38658 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
38659 + show_regs(regs);
38660 + force_sig_specific(SIGKILL, current);
38661 +}
38662 +#endif
38663 +
38664 +#ifdef CONFIG_PAX_USERCOPY
38665 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
38666 +int object_is_on_stack(const void *obj, unsigned long len)
38667 +{
38668 + const void * const stack = task_stack_page(current);
38669 + const void * const stackend = stack + THREAD_SIZE;
38670 +
38671 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38672 + const void *frame = NULL;
38673 + const void *oldframe;
38674 +#endif
38675 +
38676 + if (obj + len < obj)
38677 + return -1;
38678 +
38679 + if (obj + len <= stack || stackend <= obj)
38680 + return 0;
38681 +
38682 + if (obj < stack || stackend < obj + len)
38683 + return -1;
38684 +
38685 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38686 + oldframe = __builtin_frame_address(1);
38687 + if (oldframe)
38688 + frame = __builtin_frame_address(2);
38689 + /*
38690 + low ----------------------------------------------> high
38691 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
38692 + ^----------------^
38693 + allow copies only within here
38694 + */
38695 + while (stack <= frame && frame < stackend) {
38696 + /* if obj + len extends past the last frame, this
38697 + check won't pass and the next frame will be 0,
38698 + causing us to bail out and correctly report
38699 + the copy as invalid
38700 + */
38701 + if (obj + len <= frame)
38702 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
38703 + oldframe = frame;
38704 + frame = *(const void * const *)frame;
38705 + }
38706 + return -1;
38707 +#else
38708 + return 1;
38709 +#endif
38710 +}
38711 +
38712 +
38713 +void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
38714 +{
38715 + if (current->signal->curr_ip)
38716 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38717 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38718 + else
38719 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38720 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38721 +
38722 + dump_stack();
38723 + gr_handle_kernel_exploit();
38724 + do_group_exit(SIGKILL);
38725 +}
38726 +#endif
38727 +
38728 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
38729 +void pax_track_stack(void)
38730 +{
38731 + unsigned long sp = (unsigned long)&sp;
38732 + if (sp < current_thread_info()->lowest_stack &&
38733 + sp > (unsigned long)task_stack_page(current))
38734 + current_thread_info()->lowest_stack = sp;
38735 +}
38736 +EXPORT_SYMBOL(pax_track_stack);
38737 +#endif
38738 +
38739 static int zap_process(struct task_struct *start)
38740 {
38741 struct task_struct *t;
38742 @@ -1793,17 +2097,17 @@ static void wait_for_dump_helpers(struct
38743 pipe = file->f_path.dentry->d_inode->i_pipe;
38744
38745 pipe_lock(pipe);
38746 - pipe->readers++;
38747 - pipe->writers--;
38748 + atomic_inc(&pipe->readers);
38749 + atomic_dec(&pipe->writers);
38750
38751 - while ((pipe->readers > 1) && (!signal_pending(current))) {
38752 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
38753 wake_up_interruptible_sync(&pipe->wait);
38754 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
38755 pipe_wait(pipe);
38756 }
38757
38758 - pipe->readers--;
38759 - pipe->writers++;
38760 + atomic_dec(&pipe->readers);
38761 + atomic_inc(&pipe->writers);
38762 pipe_unlock(pipe);
38763
38764 }
38765 @@ -1826,10 +2130,13 @@ void do_coredump(long signr, int exit_co
38766 char **helper_argv = NULL;
38767 int helper_argc = 0;
38768 int dump_count = 0;
38769 - static atomic_t core_dump_count = ATOMIC_INIT(0);
38770 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
38771
38772 audit_core_dumps(signr);
38773
38774 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
38775 + gr_handle_brute_attach(current, mm->flags);
38776 +
38777 binfmt = mm->binfmt;
38778 if (!binfmt || !binfmt->core_dump)
38779 goto fail;
38780 @@ -1874,6 +2181,8 @@ void do_coredump(long signr, int exit_co
38781 */
38782 clear_thread_flag(TIF_SIGPENDING);
38783
38784 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
38785 +
38786 /*
38787 * lock_kernel() because format_corename() is controlled by sysctl, which
38788 * uses lock_kernel()
38789 @@ -1908,7 +2217,7 @@ void do_coredump(long signr, int exit_co
38790 goto fail_unlock;
38791 }
38792
38793 - dump_count = atomic_inc_return(&core_dump_count);
38794 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
38795 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
38796 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
38797 task_tgid_vnr(current), current->comm);
38798 @@ -1972,7 +2281,7 @@ close_fail:
38799 filp_close(file, NULL);
38800 fail_dropcount:
38801 if (dump_count)
38802 - atomic_dec(&core_dump_count);
38803 + atomic_dec_unchecked(&core_dump_count);
38804 fail_unlock:
38805 if (helper_argv)
38806 argv_free(helper_argv);
38807 diff -urNp linux-2.6.32.42/fs/ext2/balloc.c linux-2.6.32.42/fs/ext2/balloc.c
38808 --- linux-2.6.32.42/fs/ext2/balloc.c 2011-03-27 14:31:47.000000000 -0400
38809 +++ linux-2.6.32.42/fs/ext2/balloc.c 2011-04-17 15:56:46.000000000 -0400
38810 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
38811
38812 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38813 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38814 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38815 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38816 sbi->s_resuid != current_fsuid() &&
38817 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38818 return 0;
38819 diff -urNp linux-2.6.32.42/fs/ext3/balloc.c linux-2.6.32.42/fs/ext3/balloc.c
38820 --- linux-2.6.32.42/fs/ext3/balloc.c 2011-03-27 14:31:47.000000000 -0400
38821 +++ linux-2.6.32.42/fs/ext3/balloc.c 2011-04-17 15:56:46.000000000 -0400
38822 @@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct e
38823
38824 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38825 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38826 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38827 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38828 sbi->s_resuid != current_fsuid() &&
38829 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38830 return 0;
38831 diff -urNp linux-2.6.32.42/fs/ext4/balloc.c linux-2.6.32.42/fs/ext4/balloc.c
38832 --- linux-2.6.32.42/fs/ext4/balloc.c 2011-03-27 14:31:47.000000000 -0400
38833 +++ linux-2.6.32.42/fs/ext4/balloc.c 2011-04-17 15:56:46.000000000 -0400
38834 @@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_
38835 /* Hm, nope. Are (enough) root reserved blocks available? */
38836 if (sbi->s_resuid == current_fsuid() ||
38837 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
38838 - capable(CAP_SYS_RESOURCE)) {
38839 + capable_nolog(CAP_SYS_RESOURCE)) {
38840 if (free_blocks >= (nblocks + dirty_blocks))
38841 return 1;
38842 }
38843 diff -urNp linux-2.6.32.42/fs/ext4/ext4.h linux-2.6.32.42/fs/ext4/ext4.h
38844 --- linux-2.6.32.42/fs/ext4/ext4.h 2011-03-27 14:31:47.000000000 -0400
38845 +++ linux-2.6.32.42/fs/ext4/ext4.h 2011-04-17 15:56:46.000000000 -0400
38846 @@ -1078,19 +1078,19 @@ struct ext4_sb_info {
38847
38848 /* stats for buddy allocator */
38849 spinlock_t s_mb_pa_lock;
38850 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
38851 - atomic_t s_bal_success; /* we found long enough chunks */
38852 - atomic_t s_bal_allocated; /* in blocks */
38853 - atomic_t s_bal_ex_scanned; /* total extents scanned */
38854 - atomic_t s_bal_goals; /* goal hits */
38855 - atomic_t s_bal_breaks; /* too long searches */
38856 - atomic_t s_bal_2orders; /* 2^order hits */
38857 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
38858 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
38859 + atomic_unchecked_t s_bal_allocated; /* in blocks */
38860 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
38861 + atomic_unchecked_t s_bal_goals; /* goal hits */
38862 + atomic_unchecked_t s_bal_breaks; /* too long searches */
38863 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
38864 spinlock_t s_bal_lock;
38865 unsigned long s_mb_buddies_generated;
38866 unsigned long long s_mb_generation_time;
38867 - atomic_t s_mb_lost_chunks;
38868 - atomic_t s_mb_preallocated;
38869 - atomic_t s_mb_discarded;
38870 + atomic_unchecked_t s_mb_lost_chunks;
38871 + atomic_unchecked_t s_mb_preallocated;
38872 + atomic_unchecked_t s_mb_discarded;
38873 atomic_t s_lock_busy;
38874
38875 /* locality groups */
38876 diff -urNp linux-2.6.32.42/fs/ext4/mballoc.c linux-2.6.32.42/fs/ext4/mballoc.c
38877 --- linux-2.6.32.42/fs/ext4/mballoc.c 2011-06-25 12:55:34.000000000 -0400
38878 +++ linux-2.6.32.42/fs/ext4/mballoc.c 2011-06-25 12:56:37.000000000 -0400
38879 @@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ex
38880 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
38881
38882 if (EXT4_SB(sb)->s_mb_stats)
38883 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
38884 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
38885
38886 break;
38887 }
38888 @@ -2131,7 +2131,7 @@ repeat:
38889 ac->ac_status = AC_STATUS_CONTINUE;
38890 ac->ac_flags |= EXT4_MB_HINT_FIRST;
38891 cr = 3;
38892 - atomic_inc(&sbi->s_mb_lost_chunks);
38893 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
38894 goto repeat;
38895 }
38896 }
38897 @@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struc
38898 ext4_grpblk_t counters[16];
38899 } sg;
38900
38901 + pax_track_stack();
38902 +
38903 group--;
38904 if (group == 0)
38905 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
38906 @@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *
38907 if (sbi->s_mb_stats) {
38908 printk(KERN_INFO
38909 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
38910 - atomic_read(&sbi->s_bal_allocated),
38911 - atomic_read(&sbi->s_bal_reqs),
38912 - atomic_read(&sbi->s_bal_success));
38913 + atomic_read_unchecked(&sbi->s_bal_allocated),
38914 + atomic_read_unchecked(&sbi->s_bal_reqs),
38915 + atomic_read_unchecked(&sbi->s_bal_success));
38916 printk(KERN_INFO
38917 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
38918 "%u 2^N hits, %u breaks, %u lost\n",
38919 - atomic_read(&sbi->s_bal_ex_scanned),
38920 - atomic_read(&sbi->s_bal_goals),
38921 - atomic_read(&sbi->s_bal_2orders),
38922 - atomic_read(&sbi->s_bal_breaks),
38923 - atomic_read(&sbi->s_mb_lost_chunks));
38924 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
38925 + atomic_read_unchecked(&sbi->s_bal_goals),
38926 + atomic_read_unchecked(&sbi->s_bal_2orders),
38927 + atomic_read_unchecked(&sbi->s_bal_breaks),
38928 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
38929 printk(KERN_INFO
38930 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
38931 sbi->s_mb_buddies_generated++,
38932 sbi->s_mb_generation_time);
38933 printk(KERN_INFO
38934 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
38935 - atomic_read(&sbi->s_mb_preallocated),
38936 - atomic_read(&sbi->s_mb_discarded));
38937 + atomic_read_unchecked(&sbi->s_mb_preallocated),
38938 + atomic_read_unchecked(&sbi->s_mb_discarded));
38939 }
38940
38941 free_percpu(sbi->s_locality_groups);
38942 @@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct
38943 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
38944
38945 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
38946 - atomic_inc(&sbi->s_bal_reqs);
38947 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
38948 + atomic_inc_unchecked(&sbi->s_bal_reqs);
38949 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
38950 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
38951 - atomic_inc(&sbi->s_bal_success);
38952 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
38953 + atomic_inc_unchecked(&sbi->s_bal_success);
38954 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
38955 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
38956 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
38957 - atomic_inc(&sbi->s_bal_goals);
38958 + atomic_inc_unchecked(&sbi->s_bal_goals);
38959 if (ac->ac_found > sbi->s_mb_max_to_scan)
38960 - atomic_inc(&sbi->s_bal_breaks);
38961 + atomic_inc_unchecked(&sbi->s_bal_breaks);
38962 }
38963
38964 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
38965 @@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
38966 trace_ext4_mb_new_inode_pa(ac, pa);
38967
38968 ext4_mb_use_inode_pa(ac, pa);
38969 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38970 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38971
38972 ei = EXT4_I(ac->ac_inode);
38973 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
38974 @@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
38975 trace_ext4_mb_new_group_pa(ac, pa);
38976
38977 ext4_mb_use_group_pa(ac, pa);
38978 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38979 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38980
38981 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
38982 lg = ac->ac_lg;
38983 @@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
38984 * from the bitmap and continue.
38985 */
38986 }
38987 - atomic_add(free, &sbi->s_mb_discarded);
38988 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
38989
38990 return err;
38991 }
38992 @@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_bud
38993 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
38994 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
38995 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
38996 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
38997 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
38998
38999 if (ac) {
39000 ac->ac_sb = sb;
39001 diff -urNp linux-2.6.32.42/fs/ext4/super.c linux-2.6.32.42/fs/ext4/super.c
39002 --- linux-2.6.32.42/fs/ext4/super.c 2011-03-27 14:31:47.000000000 -0400
39003 +++ linux-2.6.32.42/fs/ext4/super.c 2011-04-17 15:56:46.000000000 -0400
39004 @@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobje
39005 }
39006
39007
39008 -static struct sysfs_ops ext4_attr_ops = {
39009 +static const struct sysfs_ops ext4_attr_ops = {
39010 .show = ext4_attr_show,
39011 .store = ext4_attr_store,
39012 };
39013 diff -urNp linux-2.6.32.42/fs/fcntl.c linux-2.6.32.42/fs/fcntl.c
39014 --- linux-2.6.32.42/fs/fcntl.c 2011-03-27 14:31:47.000000000 -0400
39015 +++ linux-2.6.32.42/fs/fcntl.c 2011-04-17 15:56:46.000000000 -0400
39016 @@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct
39017 if (err)
39018 return err;
39019
39020 + if (gr_handle_chroot_fowner(pid, type))
39021 + return -ENOENT;
39022 + if (gr_check_protected_task_fowner(pid, type))
39023 + return -EACCES;
39024 +
39025 f_modown(filp, pid, type, force);
39026 return 0;
39027 }
39028 @@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned in
39029 switch (cmd) {
39030 case F_DUPFD:
39031 case F_DUPFD_CLOEXEC:
39032 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
39033 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
39034 break;
39035 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
39036 diff -urNp linux-2.6.32.42/fs/fifo.c linux-2.6.32.42/fs/fifo.c
39037 --- linux-2.6.32.42/fs/fifo.c 2011-03-27 14:31:47.000000000 -0400
39038 +++ linux-2.6.32.42/fs/fifo.c 2011-04-17 15:56:46.000000000 -0400
39039 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode
39040 */
39041 filp->f_op = &read_pipefifo_fops;
39042 pipe->r_counter++;
39043 - if (pipe->readers++ == 0)
39044 + if (atomic_inc_return(&pipe->readers) == 1)
39045 wake_up_partner(inode);
39046
39047 - if (!pipe->writers) {
39048 + if (!atomic_read(&pipe->writers)) {
39049 if ((filp->f_flags & O_NONBLOCK)) {
39050 /* suppress POLLHUP until we have
39051 * seen a writer */
39052 @@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode
39053 * errno=ENXIO when there is no process reading the FIFO.
39054 */
39055 ret = -ENXIO;
39056 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
39057 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
39058 goto err;
39059
39060 filp->f_op = &write_pipefifo_fops;
39061 pipe->w_counter++;
39062 - if (!pipe->writers++)
39063 + if (atomic_inc_return(&pipe->writers) == 1)
39064 wake_up_partner(inode);
39065
39066 - if (!pipe->readers) {
39067 + if (!atomic_read(&pipe->readers)) {
39068 wait_for_partner(inode, &pipe->r_counter);
39069 if (signal_pending(current))
39070 goto err_wr;
39071 @@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode
39072 */
39073 filp->f_op = &rdwr_pipefifo_fops;
39074
39075 - pipe->readers++;
39076 - pipe->writers++;
39077 + atomic_inc(&pipe->readers);
39078 + atomic_inc(&pipe->writers);
39079 pipe->r_counter++;
39080 pipe->w_counter++;
39081 - if (pipe->readers == 1 || pipe->writers == 1)
39082 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
39083 wake_up_partner(inode);
39084 break;
39085
39086 @@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode
39087 return 0;
39088
39089 err_rd:
39090 - if (!--pipe->readers)
39091 + if (atomic_dec_and_test(&pipe->readers))
39092 wake_up_interruptible(&pipe->wait);
39093 ret = -ERESTARTSYS;
39094 goto err;
39095
39096 err_wr:
39097 - if (!--pipe->writers)
39098 + if (atomic_dec_and_test(&pipe->writers))
39099 wake_up_interruptible(&pipe->wait);
39100 ret = -ERESTARTSYS;
39101 goto err;
39102
39103 err:
39104 - if (!pipe->readers && !pipe->writers)
39105 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
39106 free_pipe_info(inode);
39107
39108 err_nocleanup:
39109 diff -urNp linux-2.6.32.42/fs/file.c linux-2.6.32.42/fs/file.c
39110 --- linux-2.6.32.42/fs/file.c 2011-03-27 14:31:47.000000000 -0400
39111 +++ linux-2.6.32.42/fs/file.c 2011-04-17 15:56:46.000000000 -0400
39112 @@ -14,6 +14,7 @@
39113 #include <linux/slab.h>
39114 #include <linux/vmalloc.h>
39115 #include <linux/file.h>
39116 +#include <linux/security.h>
39117 #include <linux/fdtable.h>
39118 #include <linux/bitops.h>
39119 #include <linux/interrupt.h>
39120 @@ -257,6 +258,8 @@ int expand_files(struct files_struct *fi
39121 * N.B. For clone tasks sharing a files structure, this test
39122 * will limit the total number of files that can be opened.
39123 */
39124 +
39125 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
39126 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
39127 return -EMFILE;
39128
39129 diff -urNp linux-2.6.32.42/fs/filesystems.c linux-2.6.32.42/fs/filesystems.c
39130 --- linux-2.6.32.42/fs/filesystems.c 2011-03-27 14:31:47.000000000 -0400
39131 +++ linux-2.6.32.42/fs/filesystems.c 2011-04-17 15:56:46.000000000 -0400
39132 @@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(con
39133 int len = dot ? dot - name : strlen(name);
39134
39135 fs = __get_fs_type(name, len);
39136 +
39137 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
39138 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
39139 +#else
39140 if (!fs && (request_module("%.*s", len, name) == 0))
39141 +#endif
39142 fs = __get_fs_type(name, len);
39143
39144 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
39145 diff -urNp linux-2.6.32.42/fs/fscache/cookie.c linux-2.6.32.42/fs/fscache/cookie.c
39146 --- linux-2.6.32.42/fs/fscache/cookie.c 2011-03-27 14:31:47.000000000 -0400
39147 +++ linux-2.6.32.42/fs/fscache/cookie.c 2011-05-04 17:56:28.000000000 -0400
39148 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
39149 parent ? (char *) parent->def->name : "<no-parent>",
39150 def->name, netfs_data);
39151
39152 - fscache_stat(&fscache_n_acquires);
39153 + fscache_stat_unchecked(&fscache_n_acquires);
39154
39155 /* if there's no parent cookie, then we don't create one here either */
39156 if (!parent) {
39157 - fscache_stat(&fscache_n_acquires_null);
39158 + fscache_stat_unchecked(&fscache_n_acquires_null);
39159 _leave(" [no parent]");
39160 return NULL;
39161 }
39162 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
39163 /* allocate and initialise a cookie */
39164 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
39165 if (!cookie) {
39166 - fscache_stat(&fscache_n_acquires_oom);
39167 + fscache_stat_unchecked(&fscache_n_acquires_oom);
39168 _leave(" [ENOMEM]");
39169 return NULL;
39170 }
39171 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
39172
39173 switch (cookie->def->type) {
39174 case FSCACHE_COOKIE_TYPE_INDEX:
39175 - fscache_stat(&fscache_n_cookie_index);
39176 + fscache_stat_unchecked(&fscache_n_cookie_index);
39177 break;
39178 case FSCACHE_COOKIE_TYPE_DATAFILE:
39179 - fscache_stat(&fscache_n_cookie_data);
39180 + fscache_stat_unchecked(&fscache_n_cookie_data);
39181 break;
39182 default:
39183 - fscache_stat(&fscache_n_cookie_special);
39184 + fscache_stat_unchecked(&fscache_n_cookie_special);
39185 break;
39186 }
39187
39188 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
39189 if (fscache_acquire_non_index_cookie(cookie) < 0) {
39190 atomic_dec(&parent->n_children);
39191 __fscache_cookie_put(cookie);
39192 - fscache_stat(&fscache_n_acquires_nobufs);
39193 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
39194 _leave(" = NULL");
39195 return NULL;
39196 }
39197 }
39198
39199 - fscache_stat(&fscache_n_acquires_ok);
39200 + fscache_stat_unchecked(&fscache_n_acquires_ok);
39201 _leave(" = %p", cookie);
39202 return cookie;
39203 }
39204 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
39205 cache = fscache_select_cache_for_object(cookie->parent);
39206 if (!cache) {
39207 up_read(&fscache_addremove_sem);
39208 - fscache_stat(&fscache_n_acquires_no_cache);
39209 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
39210 _leave(" = -ENOMEDIUM [no cache]");
39211 return -ENOMEDIUM;
39212 }
39213 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
39214 object = cache->ops->alloc_object(cache, cookie);
39215 fscache_stat_d(&fscache_n_cop_alloc_object);
39216 if (IS_ERR(object)) {
39217 - fscache_stat(&fscache_n_object_no_alloc);
39218 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
39219 ret = PTR_ERR(object);
39220 goto error;
39221 }
39222
39223 - fscache_stat(&fscache_n_object_alloc);
39224 + fscache_stat_unchecked(&fscache_n_object_alloc);
39225
39226 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
39227
39228 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
39229 struct fscache_object *object;
39230 struct hlist_node *_p;
39231
39232 - fscache_stat(&fscache_n_updates);
39233 + fscache_stat_unchecked(&fscache_n_updates);
39234
39235 if (!cookie) {
39236 - fscache_stat(&fscache_n_updates_null);
39237 + fscache_stat_unchecked(&fscache_n_updates_null);
39238 _leave(" [no cookie]");
39239 return;
39240 }
39241 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
39242 struct fscache_object *object;
39243 unsigned long event;
39244
39245 - fscache_stat(&fscache_n_relinquishes);
39246 + fscache_stat_unchecked(&fscache_n_relinquishes);
39247 if (retire)
39248 - fscache_stat(&fscache_n_relinquishes_retire);
39249 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
39250
39251 if (!cookie) {
39252 - fscache_stat(&fscache_n_relinquishes_null);
39253 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
39254 _leave(" [no cookie]");
39255 return;
39256 }
39257 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
39258
39259 /* wait for the cookie to finish being instantiated (or to fail) */
39260 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
39261 - fscache_stat(&fscache_n_relinquishes_waitcrt);
39262 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
39263 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
39264 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
39265 }
39266 diff -urNp linux-2.6.32.42/fs/fscache/internal.h linux-2.6.32.42/fs/fscache/internal.h
39267 --- linux-2.6.32.42/fs/fscache/internal.h 2011-03-27 14:31:47.000000000 -0400
39268 +++ linux-2.6.32.42/fs/fscache/internal.h 2011-05-04 17:56:28.000000000 -0400
39269 @@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
39270 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
39271 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
39272
39273 -extern atomic_t fscache_n_op_pend;
39274 -extern atomic_t fscache_n_op_run;
39275 -extern atomic_t fscache_n_op_enqueue;
39276 -extern atomic_t fscache_n_op_deferred_release;
39277 -extern atomic_t fscache_n_op_release;
39278 -extern atomic_t fscache_n_op_gc;
39279 -extern atomic_t fscache_n_op_cancelled;
39280 -extern atomic_t fscache_n_op_rejected;
39281 -
39282 -extern atomic_t fscache_n_attr_changed;
39283 -extern atomic_t fscache_n_attr_changed_ok;
39284 -extern atomic_t fscache_n_attr_changed_nobufs;
39285 -extern atomic_t fscache_n_attr_changed_nomem;
39286 -extern atomic_t fscache_n_attr_changed_calls;
39287 -
39288 -extern atomic_t fscache_n_allocs;
39289 -extern atomic_t fscache_n_allocs_ok;
39290 -extern atomic_t fscache_n_allocs_wait;
39291 -extern atomic_t fscache_n_allocs_nobufs;
39292 -extern atomic_t fscache_n_allocs_intr;
39293 -extern atomic_t fscache_n_allocs_object_dead;
39294 -extern atomic_t fscache_n_alloc_ops;
39295 -extern atomic_t fscache_n_alloc_op_waits;
39296 -
39297 -extern atomic_t fscache_n_retrievals;
39298 -extern atomic_t fscache_n_retrievals_ok;
39299 -extern atomic_t fscache_n_retrievals_wait;
39300 -extern atomic_t fscache_n_retrievals_nodata;
39301 -extern atomic_t fscache_n_retrievals_nobufs;
39302 -extern atomic_t fscache_n_retrievals_intr;
39303 -extern atomic_t fscache_n_retrievals_nomem;
39304 -extern atomic_t fscache_n_retrievals_object_dead;
39305 -extern atomic_t fscache_n_retrieval_ops;
39306 -extern atomic_t fscache_n_retrieval_op_waits;
39307 -
39308 -extern atomic_t fscache_n_stores;
39309 -extern atomic_t fscache_n_stores_ok;
39310 -extern atomic_t fscache_n_stores_again;
39311 -extern atomic_t fscache_n_stores_nobufs;
39312 -extern atomic_t fscache_n_stores_oom;
39313 -extern atomic_t fscache_n_store_ops;
39314 -extern atomic_t fscache_n_store_calls;
39315 -extern atomic_t fscache_n_store_pages;
39316 -extern atomic_t fscache_n_store_radix_deletes;
39317 -extern atomic_t fscache_n_store_pages_over_limit;
39318 -
39319 -extern atomic_t fscache_n_store_vmscan_not_storing;
39320 -extern atomic_t fscache_n_store_vmscan_gone;
39321 -extern atomic_t fscache_n_store_vmscan_busy;
39322 -extern atomic_t fscache_n_store_vmscan_cancelled;
39323 -
39324 -extern atomic_t fscache_n_marks;
39325 -extern atomic_t fscache_n_uncaches;
39326 -
39327 -extern atomic_t fscache_n_acquires;
39328 -extern atomic_t fscache_n_acquires_null;
39329 -extern atomic_t fscache_n_acquires_no_cache;
39330 -extern atomic_t fscache_n_acquires_ok;
39331 -extern atomic_t fscache_n_acquires_nobufs;
39332 -extern atomic_t fscache_n_acquires_oom;
39333 -
39334 -extern atomic_t fscache_n_updates;
39335 -extern atomic_t fscache_n_updates_null;
39336 -extern atomic_t fscache_n_updates_run;
39337 -
39338 -extern atomic_t fscache_n_relinquishes;
39339 -extern atomic_t fscache_n_relinquishes_null;
39340 -extern atomic_t fscache_n_relinquishes_waitcrt;
39341 -extern atomic_t fscache_n_relinquishes_retire;
39342 -
39343 -extern atomic_t fscache_n_cookie_index;
39344 -extern atomic_t fscache_n_cookie_data;
39345 -extern atomic_t fscache_n_cookie_special;
39346 -
39347 -extern atomic_t fscache_n_object_alloc;
39348 -extern atomic_t fscache_n_object_no_alloc;
39349 -extern atomic_t fscache_n_object_lookups;
39350 -extern atomic_t fscache_n_object_lookups_negative;
39351 -extern atomic_t fscache_n_object_lookups_positive;
39352 -extern atomic_t fscache_n_object_lookups_timed_out;
39353 -extern atomic_t fscache_n_object_created;
39354 -extern atomic_t fscache_n_object_avail;
39355 -extern atomic_t fscache_n_object_dead;
39356 -
39357 -extern atomic_t fscache_n_checkaux_none;
39358 -extern atomic_t fscache_n_checkaux_okay;
39359 -extern atomic_t fscache_n_checkaux_update;
39360 -extern atomic_t fscache_n_checkaux_obsolete;
39361 +extern atomic_unchecked_t fscache_n_op_pend;
39362 +extern atomic_unchecked_t fscache_n_op_run;
39363 +extern atomic_unchecked_t fscache_n_op_enqueue;
39364 +extern atomic_unchecked_t fscache_n_op_deferred_release;
39365 +extern atomic_unchecked_t fscache_n_op_release;
39366 +extern atomic_unchecked_t fscache_n_op_gc;
39367 +extern atomic_unchecked_t fscache_n_op_cancelled;
39368 +extern atomic_unchecked_t fscache_n_op_rejected;
39369 +
39370 +extern atomic_unchecked_t fscache_n_attr_changed;
39371 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
39372 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
39373 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
39374 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
39375 +
39376 +extern atomic_unchecked_t fscache_n_allocs;
39377 +extern atomic_unchecked_t fscache_n_allocs_ok;
39378 +extern atomic_unchecked_t fscache_n_allocs_wait;
39379 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
39380 +extern atomic_unchecked_t fscache_n_allocs_intr;
39381 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
39382 +extern atomic_unchecked_t fscache_n_alloc_ops;
39383 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
39384 +
39385 +extern atomic_unchecked_t fscache_n_retrievals;
39386 +extern atomic_unchecked_t fscache_n_retrievals_ok;
39387 +extern atomic_unchecked_t fscache_n_retrievals_wait;
39388 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
39389 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
39390 +extern atomic_unchecked_t fscache_n_retrievals_intr;
39391 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
39392 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
39393 +extern atomic_unchecked_t fscache_n_retrieval_ops;
39394 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
39395 +
39396 +extern atomic_unchecked_t fscache_n_stores;
39397 +extern atomic_unchecked_t fscache_n_stores_ok;
39398 +extern atomic_unchecked_t fscache_n_stores_again;
39399 +extern atomic_unchecked_t fscache_n_stores_nobufs;
39400 +extern atomic_unchecked_t fscache_n_stores_oom;
39401 +extern atomic_unchecked_t fscache_n_store_ops;
39402 +extern atomic_unchecked_t fscache_n_store_calls;
39403 +extern atomic_unchecked_t fscache_n_store_pages;
39404 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
39405 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
39406 +
39407 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
39408 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
39409 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
39410 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
39411 +
39412 +extern atomic_unchecked_t fscache_n_marks;
39413 +extern atomic_unchecked_t fscache_n_uncaches;
39414 +
39415 +extern atomic_unchecked_t fscache_n_acquires;
39416 +extern atomic_unchecked_t fscache_n_acquires_null;
39417 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
39418 +extern atomic_unchecked_t fscache_n_acquires_ok;
39419 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
39420 +extern atomic_unchecked_t fscache_n_acquires_oom;
39421 +
39422 +extern atomic_unchecked_t fscache_n_updates;
39423 +extern atomic_unchecked_t fscache_n_updates_null;
39424 +extern atomic_unchecked_t fscache_n_updates_run;
39425 +
39426 +extern atomic_unchecked_t fscache_n_relinquishes;
39427 +extern atomic_unchecked_t fscache_n_relinquishes_null;
39428 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
39429 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
39430 +
39431 +extern atomic_unchecked_t fscache_n_cookie_index;
39432 +extern atomic_unchecked_t fscache_n_cookie_data;
39433 +extern atomic_unchecked_t fscache_n_cookie_special;
39434 +
39435 +extern atomic_unchecked_t fscache_n_object_alloc;
39436 +extern atomic_unchecked_t fscache_n_object_no_alloc;
39437 +extern atomic_unchecked_t fscache_n_object_lookups;
39438 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
39439 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
39440 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
39441 +extern atomic_unchecked_t fscache_n_object_created;
39442 +extern atomic_unchecked_t fscache_n_object_avail;
39443 +extern atomic_unchecked_t fscache_n_object_dead;
39444 +
39445 +extern atomic_unchecked_t fscache_n_checkaux_none;
39446 +extern atomic_unchecked_t fscache_n_checkaux_okay;
39447 +extern atomic_unchecked_t fscache_n_checkaux_update;
39448 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
39449
39450 extern atomic_t fscache_n_cop_alloc_object;
39451 extern atomic_t fscache_n_cop_lookup_object;
39452 @@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t
39453 atomic_inc(stat);
39454 }
39455
39456 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
39457 +{
39458 + atomic_inc_unchecked(stat);
39459 +}
39460 +
39461 static inline void fscache_stat_d(atomic_t *stat)
39462 {
39463 atomic_dec(stat);
39464 @@ -259,6 +264,7 @@ extern const struct file_operations fsca
39465
39466 #define __fscache_stat(stat) (NULL)
39467 #define fscache_stat(stat) do {} while (0)
39468 +#define fscache_stat_unchecked(stat) do {} while (0)
39469 #define fscache_stat_d(stat) do {} while (0)
39470 #endif
39471
39472 diff -urNp linux-2.6.32.42/fs/fscache/object.c linux-2.6.32.42/fs/fscache/object.c
39473 --- linux-2.6.32.42/fs/fscache/object.c 2011-03-27 14:31:47.000000000 -0400
39474 +++ linux-2.6.32.42/fs/fscache/object.c 2011-05-04 17:56:28.000000000 -0400
39475 @@ -144,7 +144,7 @@ static void fscache_object_state_machine
39476 /* update the object metadata on disk */
39477 case FSCACHE_OBJECT_UPDATING:
39478 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
39479 - fscache_stat(&fscache_n_updates_run);
39480 + fscache_stat_unchecked(&fscache_n_updates_run);
39481 fscache_stat(&fscache_n_cop_update_object);
39482 object->cache->ops->update_object(object);
39483 fscache_stat_d(&fscache_n_cop_update_object);
39484 @@ -233,7 +233,7 @@ static void fscache_object_state_machine
39485 spin_lock(&object->lock);
39486 object->state = FSCACHE_OBJECT_DEAD;
39487 spin_unlock(&object->lock);
39488 - fscache_stat(&fscache_n_object_dead);
39489 + fscache_stat_unchecked(&fscache_n_object_dead);
39490 goto terminal_transit;
39491
39492 /* handle the parent cache of this object being withdrawn from
39493 @@ -248,7 +248,7 @@ static void fscache_object_state_machine
39494 spin_lock(&object->lock);
39495 object->state = FSCACHE_OBJECT_DEAD;
39496 spin_unlock(&object->lock);
39497 - fscache_stat(&fscache_n_object_dead);
39498 + fscache_stat_unchecked(&fscache_n_object_dead);
39499 goto terminal_transit;
39500
39501 /* complain about the object being woken up once it is
39502 @@ -492,7 +492,7 @@ static void fscache_lookup_object(struct
39503 parent->cookie->def->name, cookie->def->name,
39504 object->cache->tag->name);
39505
39506 - fscache_stat(&fscache_n_object_lookups);
39507 + fscache_stat_unchecked(&fscache_n_object_lookups);
39508 fscache_stat(&fscache_n_cop_lookup_object);
39509 ret = object->cache->ops->lookup_object(object);
39510 fscache_stat_d(&fscache_n_cop_lookup_object);
39511 @@ -503,7 +503,7 @@ static void fscache_lookup_object(struct
39512 if (ret == -ETIMEDOUT) {
39513 /* probably stuck behind another object, so move this one to
39514 * the back of the queue */
39515 - fscache_stat(&fscache_n_object_lookups_timed_out);
39516 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
39517 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
39518 }
39519
39520 @@ -526,7 +526,7 @@ void fscache_object_lookup_negative(stru
39521
39522 spin_lock(&object->lock);
39523 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
39524 - fscache_stat(&fscache_n_object_lookups_negative);
39525 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
39526
39527 /* transit here to allow write requests to begin stacking up
39528 * and read requests to begin returning ENODATA */
39529 @@ -572,7 +572,7 @@ void fscache_obtained_object(struct fsca
39530 * result, in which case there may be data available */
39531 spin_lock(&object->lock);
39532 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
39533 - fscache_stat(&fscache_n_object_lookups_positive);
39534 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
39535
39536 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
39537
39538 @@ -586,7 +586,7 @@ void fscache_obtained_object(struct fsca
39539 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
39540 } else {
39541 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
39542 - fscache_stat(&fscache_n_object_created);
39543 + fscache_stat_unchecked(&fscache_n_object_created);
39544
39545 object->state = FSCACHE_OBJECT_AVAILABLE;
39546 spin_unlock(&object->lock);
39547 @@ -633,7 +633,7 @@ static void fscache_object_available(str
39548 fscache_enqueue_dependents(object);
39549
39550 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
39551 - fscache_stat(&fscache_n_object_avail);
39552 + fscache_stat_unchecked(&fscache_n_object_avail);
39553
39554 _leave("");
39555 }
39556 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
39557 enum fscache_checkaux result;
39558
39559 if (!object->cookie->def->check_aux) {
39560 - fscache_stat(&fscache_n_checkaux_none);
39561 + fscache_stat_unchecked(&fscache_n_checkaux_none);
39562 return FSCACHE_CHECKAUX_OKAY;
39563 }
39564
39565 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
39566 switch (result) {
39567 /* entry okay as is */
39568 case FSCACHE_CHECKAUX_OKAY:
39569 - fscache_stat(&fscache_n_checkaux_okay);
39570 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
39571 break;
39572
39573 /* entry requires update */
39574 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
39575 - fscache_stat(&fscache_n_checkaux_update);
39576 + fscache_stat_unchecked(&fscache_n_checkaux_update);
39577 break;
39578
39579 /* entry requires deletion */
39580 case FSCACHE_CHECKAUX_OBSOLETE:
39581 - fscache_stat(&fscache_n_checkaux_obsolete);
39582 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
39583 break;
39584
39585 default:
39586 diff -urNp linux-2.6.32.42/fs/fscache/operation.c linux-2.6.32.42/fs/fscache/operation.c
39587 --- linux-2.6.32.42/fs/fscache/operation.c 2011-03-27 14:31:47.000000000 -0400
39588 +++ linux-2.6.32.42/fs/fscache/operation.c 2011-05-04 17:56:28.000000000 -0400
39589 @@ -16,7 +16,7 @@
39590 #include <linux/seq_file.h>
39591 #include "internal.h"
39592
39593 -atomic_t fscache_op_debug_id;
39594 +atomic_unchecked_t fscache_op_debug_id;
39595 EXPORT_SYMBOL(fscache_op_debug_id);
39596
39597 /**
39598 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs
39599 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
39600 ASSERTCMP(atomic_read(&op->usage), >, 0);
39601
39602 - fscache_stat(&fscache_n_op_enqueue);
39603 + fscache_stat_unchecked(&fscache_n_op_enqueue);
39604 switch (op->flags & FSCACHE_OP_TYPE) {
39605 case FSCACHE_OP_FAST:
39606 _debug("queue fast");
39607 @@ -76,7 +76,7 @@ static void fscache_run_op(struct fscach
39608 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
39609 if (op->processor)
39610 fscache_enqueue_operation(op);
39611 - fscache_stat(&fscache_n_op_run);
39612 + fscache_stat_unchecked(&fscache_n_op_run);
39613 }
39614
39615 /*
39616 @@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct f
39617 if (object->n_ops > 0) {
39618 atomic_inc(&op->usage);
39619 list_add_tail(&op->pend_link, &object->pending_ops);
39620 - fscache_stat(&fscache_n_op_pend);
39621 + fscache_stat_unchecked(&fscache_n_op_pend);
39622 } else if (!list_empty(&object->pending_ops)) {
39623 atomic_inc(&op->usage);
39624 list_add_tail(&op->pend_link, &object->pending_ops);
39625 - fscache_stat(&fscache_n_op_pend);
39626 + fscache_stat_unchecked(&fscache_n_op_pend);
39627 fscache_start_operations(object);
39628 } else {
39629 ASSERTCMP(object->n_in_progress, ==, 0);
39630 @@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct f
39631 object->n_exclusive++; /* reads and writes must wait */
39632 atomic_inc(&op->usage);
39633 list_add_tail(&op->pend_link, &object->pending_ops);
39634 - fscache_stat(&fscache_n_op_pend);
39635 + fscache_stat_unchecked(&fscache_n_op_pend);
39636 ret = 0;
39637 } else {
39638 /* not allowed to submit ops in any other state */
39639 @@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_obj
39640 if (object->n_exclusive > 0) {
39641 atomic_inc(&op->usage);
39642 list_add_tail(&op->pend_link, &object->pending_ops);
39643 - fscache_stat(&fscache_n_op_pend);
39644 + fscache_stat_unchecked(&fscache_n_op_pend);
39645 } else if (!list_empty(&object->pending_ops)) {
39646 atomic_inc(&op->usage);
39647 list_add_tail(&op->pend_link, &object->pending_ops);
39648 - fscache_stat(&fscache_n_op_pend);
39649 + fscache_stat_unchecked(&fscache_n_op_pend);
39650 fscache_start_operations(object);
39651 } else {
39652 ASSERTCMP(object->n_exclusive, ==, 0);
39653 @@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_obj
39654 object->n_ops++;
39655 atomic_inc(&op->usage);
39656 list_add_tail(&op->pend_link, &object->pending_ops);
39657 - fscache_stat(&fscache_n_op_pend);
39658 + fscache_stat_unchecked(&fscache_n_op_pend);
39659 ret = 0;
39660 } else if (object->state == FSCACHE_OBJECT_DYING ||
39661 object->state == FSCACHE_OBJECT_LC_DYING ||
39662 object->state == FSCACHE_OBJECT_WITHDRAWING) {
39663 - fscache_stat(&fscache_n_op_rejected);
39664 + fscache_stat_unchecked(&fscache_n_op_rejected);
39665 ret = -ENOBUFS;
39666 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
39667 fscache_report_unexpected_submission(object, op, ostate);
39668 @@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_ope
39669
39670 ret = -EBUSY;
39671 if (!list_empty(&op->pend_link)) {
39672 - fscache_stat(&fscache_n_op_cancelled);
39673 + fscache_stat_unchecked(&fscache_n_op_cancelled);
39674 list_del_init(&op->pend_link);
39675 object->n_ops--;
39676 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
39677 @@ -344,7 +344,7 @@ void fscache_put_operation(struct fscach
39678 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
39679 BUG();
39680
39681 - fscache_stat(&fscache_n_op_release);
39682 + fscache_stat_unchecked(&fscache_n_op_release);
39683
39684 if (op->release) {
39685 op->release(op);
39686 @@ -361,7 +361,7 @@ void fscache_put_operation(struct fscach
39687 * lock, and defer it otherwise */
39688 if (!spin_trylock(&object->lock)) {
39689 _debug("defer put");
39690 - fscache_stat(&fscache_n_op_deferred_release);
39691 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
39692
39693 cache = object->cache;
39694 spin_lock(&cache->op_gc_list_lock);
39695 @@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_st
39696
39697 _debug("GC DEFERRED REL OBJ%x OP%x",
39698 object->debug_id, op->debug_id);
39699 - fscache_stat(&fscache_n_op_gc);
39700 + fscache_stat_unchecked(&fscache_n_op_gc);
39701
39702 ASSERTCMP(atomic_read(&op->usage), ==, 0);
39703
39704 diff -urNp linux-2.6.32.42/fs/fscache/page.c linux-2.6.32.42/fs/fscache/page.c
39705 --- linux-2.6.32.42/fs/fscache/page.c 2011-03-27 14:31:47.000000000 -0400
39706 +++ linux-2.6.32.42/fs/fscache/page.c 2011-05-04 17:56:28.000000000 -0400
39707 @@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct
39708 val = radix_tree_lookup(&cookie->stores, page->index);
39709 if (!val) {
39710 rcu_read_unlock();
39711 - fscache_stat(&fscache_n_store_vmscan_not_storing);
39712 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
39713 __fscache_uncache_page(cookie, page);
39714 return true;
39715 }
39716 @@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct
39717 spin_unlock(&cookie->stores_lock);
39718
39719 if (xpage) {
39720 - fscache_stat(&fscache_n_store_vmscan_cancelled);
39721 - fscache_stat(&fscache_n_store_radix_deletes);
39722 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
39723 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39724 ASSERTCMP(xpage, ==, page);
39725 } else {
39726 - fscache_stat(&fscache_n_store_vmscan_gone);
39727 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
39728 }
39729
39730 wake_up_bit(&cookie->flags, 0);
39731 @@ -106,7 +106,7 @@ page_busy:
39732 /* we might want to wait here, but that could deadlock the allocator as
39733 * the slow-work threads writing to the cache may all end up sleeping
39734 * on memory allocation */
39735 - fscache_stat(&fscache_n_store_vmscan_busy);
39736 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
39737 return false;
39738 }
39739 EXPORT_SYMBOL(__fscache_maybe_release_page);
39740 @@ -130,7 +130,7 @@ static void fscache_end_page_write(struc
39741 FSCACHE_COOKIE_STORING_TAG);
39742 if (!radix_tree_tag_get(&cookie->stores, page->index,
39743 FSCACHE_COOKIE_PENDING_TAG)) {
39744 - fscache_stat(&fscache_n_store_radix_deletes);
39745 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39746 xpage = radix_tree_delete(&cookie->stores, page->index);
39747 }
39748 spin_unlock(&cookie->stores_lock);
39749 @@ -151,7 +151,7 @@ static void fscache_attr_changed_op(stru
39750
39751 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
39752
39753 - fscache_stat(&fscache_n_attr_changed_calls);
39754 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
39755
39756 if (fscache_object_is_active(object)) {
39757 fscache_set_op_state(op, "CallFS");
39758 @@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscach
39759
39760 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39761
39762 - fscache_stat(&fscache_n_attr_changed);
39763 + fscache_stat_unchecked(&fscache_n_attr_changed);
39764
39765 op = kzalloc(sizeof(*op), GFP_KERNEL);
39766 if (!op) {
39767 - fscache_stat(&fscache_n_attr_changed_nomem);
39768 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
39769 _leave(" = -ENOMEM");
39770 return -ENOMEM;
39771 }
39772 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
39773 if (fscache_submit_exclusive_op(object, op) < 0)
39774 goto nobufs;
39775 spin_unlock(&cookie->lock);
39776 - fscache_stat(&fscache_n_attr_changed_ok);
39777 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
39778 fscache_put_operation(op);
39779 _leave(" = 0");
39780 return 0;
39781 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
39782 nobufs:
39783 spin_unlock(&cookie->lock);
39784 kfree(op);
39785 - fscache_stat(&fscache_n_attr_changed_nobufs);
39786 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
39787 _leave(" = %d", -ENOBUFS);
39788 return -ENOBUFS;
39789 }
39790 @@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache
39791 /* allocate a retrieval operation and attempt to submit it */
39792 op = kzalloc(sizeof(*op), GFP_NOIO);
39793 if (!op) {
39794 - fscache_stat(&fscache_n_retrievals_nomem);
39795 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39796 return NULL;
39797 }
39798
39799 @@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_loo
39800 return 0;
39801 }
39802
39803 - fscache_stat(&fscache_n_retrievals_wait);
39804 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
39805
39806 jif = jiffies;
39807 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
39808 fscache_wait_bit_interruptible,
39809 TASK_INTERRUPTIBLE) != 0) {
39810 - fscache_stat(&fscache_n_retrievals_intr);
39811 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
39812 _leave(" = -ERESTARTSYS");
39813 return -ERESTARTSYS;
39814 }
39815 @@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_loo
39816 */
39817 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
39818 struct fscache_retrieval *op,
39819 - atomic_t *stat_op_waits,
39820 - atomic_t *stat_object_dead)
39821 + atomic_unchecked_t *stat_op_waits,
39822 + atomic_unchecked_t *stat_object_dead)
39823 {
39824 int ret;
39825
39826 @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_ac
39827 goto check_if_dead;
39828
39829 _debug(">>> WT");
39830 - fscache_stat(stat_op_waits);
39831 + fscache_stat_unchecked(stat_op_waits);
39832 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
39833 fscache_wait_bit_interruptible,
39834 TASK_INTERRUPTIBLE) < 0) {
39835 @@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_ac
39836
39837 check_if_dead:
39838 if (unlikely(fscache_object_is_dead(object))) {
39839 - fscache_stat(stat_object_dead);
39840 + fscache_stat_unchecked(stat_object_dead);
39841 return -ENOBUFS;
39842 }
39843 return 0;
39844 @@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct
39845
39846 _enter("%p,%p,,,", cookie, page);
39847
39848 - fscache_stat(&fscache_n_retrievals);
39849 + fscache_stat_unchecked(&fscache_n_retrievals);
39850
39851 if (hlist_empty(&cookie->backing_objects))
39852 goto nobufs;
39853 @@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct
39854 goto nobufs_unlock;
39855 spin_unlock(&cookie->lock);
39856
39857 - fscache_stat(&fscache_n_retrieval_ops);
39858 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
39859
39860 /* pin the netfs read context in case we need to do the actual netfs
39861 * read because we've encountered a cache read failure */
39862 @@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct
39863
39864 error:
39865 if (ret == -ENOMEM)
39866 - fscache_stat(&fscache_n_retrievals_nomem);
39867 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39868 else if (ret == -ERESTARTSYS)
39869 - fscache_stat(&fscache_n_retrievals_intr);
39870 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
39871 else if (ret == -ENODATA)
39872 - fscache_stat(&fscache_n_retrievals_nodata);
39873 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
39874 else if (ret < 0)
39875 - fscache_stat(&fscache_n_retrievals_nobufs);
39876 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39877 else
39878 - fscache_stat(&fscache_n_retrievals_ok);
39879 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
39880
39881 fscache_put_retrieval(op);
39882 _leave(" = %d", ret);
39883 @@ -453,7 +453,7 @@ nobufs_unlock:
39884 spin_unlock(&cookie->lock);
39885 kfree(op);
39886 nobufs:
39887 - fscache_stat(&fscache_n_retrievals_nobufs);
39888 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39889 _leave(" = -ENOBUFS");
39890 return -ENOBUFS;
39891 }
39892 @@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct
39893
39894 _enter("%p,,%d,,,", cookie, *nr_pages);
39895
39896 - fscache_stat(&fscache_n_retrievals);
39897 + fscache_stat_unchecked(&fscache_n_retrievals);
39898
39899 if (hlist_empty(&cookie->backing_objects))
39900 goto nobufs;
39901 @@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct
39902 goto nobufs_unlock;
39903 spin_unlock(&cookie->lock);
39904
39905 - fscache_stat(&fscache_n_retrieval_ops);
39906 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
39907
39908 /* pin the netfs read context in case we need to do the actual netfs
39909 * read because we've encountered a cache read failure */
39910 @@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct
39911
39912 error:
39913 if (ret == -ENOMEM)
39914 - fscache_stat(&fscache_n_retrievals_nomem);
39915 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39916 else if (ret == -ERESTARTSYS)
39917 - fscache_stat(&fscache_n_retrievals_intr);
39918 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
39919 else if (ret == -ENODATA)
39920 - fscache_stat(&fscache_n_retrievals_nodata);
39921 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
39922 else if (ret < 0)
39923 - fscache_stat(&fscache_n_retrievals_nobufs);
39924 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39925 else
39926 - fscache_stat(&fscache_n_retrievals_ok);
39927 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
39928
39929 fscache_put_retrieval(op);
39930 _leave(" = %d", ret);
39931 @@ -570,7 +570,7 @@ nobufs_unlock:
39932 spin_unlock(&cookie->lock);
39933 kfree(op);
39934 nobufs:
39935 - fscache_stat(&fscache_n_retrievals_nobufs);
39936 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39937 _leave(" = -ENOBUFS");
39938 return -ENOBUFS;
39939 }
39940 @@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_
39941
39942 _enter("%p,%p,,,", cookie, page);
39943
39944 - fscache_stat(&fscache_n_allocs);
39945 + fscache_stat_unchecked(&fscache_n_allocs);
39946
39947 if (hlist_empty(&cookie->backing_objects))
39948 goto nobufs;
39949 @@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_
39950 goto nobufs_unlock;
39951 spin_unlock(&cookie->lock);
39952
39953 - fscache_stat(&fscache_n_alloc_ops);
39954 + fscache_stat_unchecked(&fscache_n_alloc_ops);
39955
39956 ret = fscache_wait_for_retrieval_activation(
39957 object, op,
39958 @@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_
39959
39960 error:
39961 if (ret == -ERESTARTSYS)
39962 - fscache_stat(&fscache_n_allocs_intr);
39963 + fscache_stat_unchecked(&fscache_n_allocs_intr);
39964 else if (ret < 0)
39965 - fscache_stat(&fscache_n_allocs_nobufs);
39966 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
39967 else
39968 - fscache_stat(&fscache_n_allocs_ok);
39969 + fscache_stat_unchecked(&fscache_n_allocs_ok);
39970
39971 fscache_put_retrieval(op);
39972 _leave(" = %d", ret);
39973 @@ -651,7 +651,7 @@ nobufs_unlock:
39974 spin_unlock(&cookie->lock);
39975 kfree(op);
39976 nobufs:
39977 - fscache_stat(&fscache_n_allocs_nobufs);
39978 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
39979 _leave(" = -ENOBUFS");
39980 return -ENOBUFS;
39981 }
39982 @@ -694,7 +694,7 @@ static void fscache_write_op(struct fsca
39983
39984 spin_lock(&cookie->stores_lock);
39985
39986 - fscache_stat(&fscache_n_store_calls);
39987 + fscache_stat_unchecked(&fscache_n_store_calls);
39988
39989 /* find a page to store */
39990 page = NULL;
39991 @@ -705,7 +705,7 @@ static void fscache_write_op(struct fsca
39992 page = results[0];
39993 _debug("gang %d [%lx]", n, page->index);
39994 if (page->index > op->store_limit) {
39995 - fscache_stat(&fscache_n_store_pages_over_limit);
39996 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
39997 goto superseded;
39998 }
39999
40000 @@ -721,7 +721,7 @@ static void fscache_write_op(struct fsca
40001
40002 if (page) {
40003 fscache_set_op_state(&op->op, "Store");
40004 - fscache_stat(&fscache_n_store_pages);
40005 + fscache_stat_unchecked(&fscache_n_store_pages);
40006 fscache_stat(&fscache_n_cop_write_page);
40007 ret = object->cache->ops->write_page(op, page);
40008 fscache_stat_d(&fscache_n_cop_write_page);
40009 @@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_
40010 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
40011 ASSERT(PageFsCache(page));
40012
40013 - fscache_stat(&fscache_n_stores);
40014 + fscache_stat_unchecked(&fscache_n_stores);
40015
40016 op = kzalloc(sizeof(*op), GFP_NOIO);
40017 if (!op)
40018 @@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_
40019 spin_unlock(&cookie->stores_lock);
40020 spin_unlock(&object->lock);
40021
40022 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
40023 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
40024 op->store_limit = object->store_limit;
40025
40026 if (fscache_submit_op(object, &op->op) < 0)
40027 @@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_
40028
40029 spin_unlock(&cookie->lock);
40030 radix_tree_preload_end();
40031 - fscache_stat(&fscache_n_store_ops);
40032 - fscache_stat(&fscache_n_stores_ok);
40033 + fscache_stat_unchecked(&fscache_n_store_ops);
40034 + fscache_stat_unchecked(&fscache_n_stores_ok);
40035
40036 /* the slow work queue now carries its own ref on the object */
40037 fscache_put_operation(&op->op);
40038 @@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_
40039 return 0;
40040
40041 already_queued:
40042 - fscache_stat(&fscache_n_stores_again);
40043 + fscache_stat_unchecked(&fscache_n_stores_again);
40044 already_pending:
40045 spin_unlock(&cookie->stores_lock);
40046 spin_unlock(&object->lock);
40047 spin_unlock(&cookie->lock);
40048 radix_tree_preload_end();
40049 kfree(op);
40050 - fscache_stat(&fscache_n_stores_ok);
40051 + fscache_stat_unchecked(&fscache_n_stores_ok);
40052 _leave(" = 0");
40053 return 0;
40054
40055 @@ -886,14 +886,14 @@ nobufs:
40056 spin_unlock(&cookie->lock);
40057 radix_tree_preload_end();
40058 kfree(op);
40059 - fscache_stat(&fscache_n_stores_nobufs);
40060 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
40061 _leave(" = -ENOBUFS");
40062 return -ENOBUFS;
40063
40064 nomem_free:
40065 kfree(op);
40066 nomem:
40067 - fscache_stat(&fscache_n_stores_oom);
40068 + fscache_stat_unchecked(&fscache_n_stores_oom);
40069 _leave(" = -ENOMEM");
40070 return -ENOMEM;
40071 }
40072 @@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscac
40073 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
40074 ASSERTCMP(page, !=, NULL);
40075
40076 - fscache_stat(&fscache_n_uncaches);
40077 + fscache_stat_unchecked(&fscache_n_uncaches);
40078
40079 /* cache withdrawal may beat us to it */
40080 if (!PageFsCache(page))
40081 @@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fs
40082 unsigned long loop;
40083
40084 #ifdef CONFIG_FSCACHE_STATS
40085 - atomic_add(pagevec->nr, &fscache_n_marks);
40086 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
40087 #endif
40088
40089 for (loop = 0; loop < pagevec->nr; loop++) {
40090 diff -urNp linux-2.6.32.42/fs/fscache/stats.c linux-2.6.32.42/fs/fscache/stats.c
40091 --- linux-2.6.32.42/fs/fscache/stats.c 2011-03-27 14:31:47.000000000 -0400
40092 +++ linux-2.6.32.42/fs/fscache/stats.c 2011-05-04 17:56:28.000000000 -0400
40093 @@ -18,95 +18,95 @@
40094 /*
40095 * operation counters
40096 */
40097 -atomic_t fscache_n_op_pend;
40098 -atomic_t fscache_n_op_run;
40099 -atomic_t fscache_n_op_enqueue;
40100 -atomic_t fscache_n_op_requeue;
40101 -atomic_t fscache_n_op_deferred_release;
40102 -atomic_t fscache_n_op_release;
40103 -atomic_t fscache_n_op_gc;
40104 -atomic_t fscache_n_op_cancelled;
40105 -atomic_t fscache_n_op_rejected;
40106 -
40107 -atomic_t fscache_n_attr_changed;
40108 -atomic_t fscache_n_attr_changed_ok;
40109 -atomic_t fscache_n_attr_changed_nobufs;
40110 -atomic_t fscache_n_attr_changed_nomem;
40111 -atomic_t fscache_n_attr_changed_calls;
40112 -
40113 -atomic_t fscache_n_allocs;
40114 -atomic_t fscache_n_allocs_ok;
40115 -atomic_t fscache_n_allocs_wait;
40116 -atomic_t fscache_n_allocs_nobufs;
40117 -atomic_t fscache_n_allocs_intr;
40118 -atomic_t fscache_n_allocs_object_dead;
40119 -atomic_t fscache_n_alloc_ops;
40120 -atomic_t fscache_n_alloc_op_waits;
40121 -
40122 -atomic_t fscache_n_retrievals;
40123 -atomic_t fscache_n_retrievals_ok;
40124 -atomic_t fscache_n_retrievals_wait;
40125 -atomic_t fscache_n_retrievals_nodata;
40126 -atomic_t fscache_n_retrievals_nobufs;
40127 -atomic_t fscache_n_retrievals_intr;
40128 -atomic_t fscache_n_retrievals_nomem;
40129 -atomic_t fscache_n_retrievals_object_dead;
40130 -atomic_t fscache_n_retrieval_ops;
40131 -atomic_t fscache_n_retrieval_op_waits;
40132 -
40133 -atomic_t fscache_n_stores;
40134 -atomic_t fscache_n_stores_ok;
40135 -atomic_t fscache_n_stores_again;
40136 -atomic_t fscache_n_stores_nobufs;
40137 -atomic_t fscache_n_stores_oom;
40138 -atomic_t fscache_n_store_ops;
40139 -atomic_t fscache_n_store_calls;
40140 -atomic_t fscache_n_store_pages;
40141 -atomic_t fscache_n_store_radix_deletes;
40142 -atomic_t fscache_n_store_pages_over_limit;
40143 -
40144 -atomic_t fscache_n_store_vmscan_not_storing;
40145 -atomic_t fscache_n_store_vmscan_gone;
40146 -atomic_t fscache_n_store_vmscan_busy;
40147 -atomic_t fscache_n_store_vmscan_cancelled;
40148 -
40149 -atomic_t fscache_n_marks;
40150 -atomic_t fscache_n_uncaches;
40151 -
40152 -atomic_t fscache_n_acquires;
40153 -atomic_t fscache_n_acquires_null;
40154 -atomic_t fscache_n_acquires_no_cache;
40155 -atomic_t fscache_n_acquires_ok;
40156 -atomic_t fscache_n_acquires_nobufs;
40157 -atomic_t fscache_n_acquires_oom;
40158 -
40159 -atomic_t fscache_n_updates;
40160 -atomic_t fscache_n_updates_null;
40161 -atomic_t fscache_n_updates_run;
40162 -
40163 -atomic_t fscache_n_relinquishes;
40164 -atomic_t fscache_n_relinquishes_null;
40165 -atomic_t fscache_n_relinquishes_waitcrt;
40166 -atomic_t fscache_n_relinquishes_retire;
40167 -
40168 -atomic_t fscache_n_cookie_index;
40169 -atomic_t fscache_n_cookie_data;
40170 -atomic_t fscache_n_cookie_special;
40171 -
40172 -atomic_t fscache_n_object_alloc;
40173 -atomic_t fscache_n_object_no_alloc;
40174 -atomic_t fscache_n_object_lookups;
40175 -atomic_t fscache_n_object_lookups_negative;
40176 -atomic_t fscache_n_object_lookups_positive;
40177 -atomic_t fscache_n_object_lookups_timed_out;
40178 -atomic_t fscache_n_object_created;
40179 -atomic_t fscache_n_object_avail;
40180 -atomic_t fscache_n_object_dead;
40181 -
40182 -atomic_t fscache_n_checkaux_none;
40183 -atomic_t fscache_n_checkaux_okay;
40184 -atomic_t fscache_n_checkaux_update;
40185 -atomic_t fscache_n_checkaux_obsolete;
40186 +atomic_unchecked_t fscache_n_op_pend;
40187 +atomic_unchecked_t fscache_n_op_run;
40188 +atomic_unchecked_t fscache_n_op_enqueue;
40189 +atomic_unchecked_t fscache_n_op_requeue;
40190 +atomic_unchecked_t fscache_n_op_deferred_release;
40191 +atomic_unchecked_t fscache_n_op_release;
40192 +atomic_unchecked_t fscache_n_op_gc;
40193 +atomic_unchecked_t fscache_n_op_cancelled;
40194 +atomic_unchecked_t fscache_n_op_rejected;
40195 +
40196 +atomic_unchecked_t fscache_n_attr_changed;
40197 +atomic_unchecked_t fscache_n_attr_changed_ok;
40198 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
40199 +atomic_unchecked_t fscache_n_attr_changed_nomem;
40200 +atomic_unchecked_t fscache_n_attr_changed_calls;
40201 +
40202 +atomic_unchecked_t fscache_n_allocs;
40203 +atomic_unchecked_t fscache_n_allocs_ok;
40204 +atomic_unchecked_t fscache_n_allocs_wait;
40205 +atomic_unchecked_t fscache_n_allocs_nobufs;
40206 +atomic_unchecked_t fscache_n_allocs_intr;
40207 +atomic_unchecked_t fscache_n_allocs_object_dead;
40208 +atomic_unchecked_t fscache_n_alloc_ops;
40209 +atomic_unchecked_t fscache_n_alloc_op_waits;
40210 +
40211 +atomic_unchecked_t fscache_n_retrievals;
40212 +atomic_unchecked_t fscache_n_retrievals_ok;
40213 +atomic_unchecked_t fscache_n_retrievals_wait;
40214 +atomic_unchecked_t fscache_n_retrievals_nodata;
40215 +atomic_unchecked_t fscache_n_retrievals_nobufs;
40216 +atomic_unchecked_t fscache_n_retrievals_intr;
40217 +atomic_unchecked_t fscache_n_retrievals_nomem;
40218 +atomic_unchecked_t fscache_n_retrievals_object_dead;
40219 +atomic_unchecked_t fscache_n_retrieval_ops;
40220 +atomic_unchecked_t fscache_n_retrieval_op_waits;
40221 +
40222 +atomic_unchecked_t fscache_n_stores;
40223 +atomic_unchecked_t fscache_n_stores_ok;
40224 +atomic_unchecked_t fscache_n_stores_again;
40225 +atomic_unchecked_t fscache_n_stores_nobufs;
40226 +atomic_unchecked_t fscache_n_stores_oom;
40227 +atomic_unchecked_t fscache_n_store_ops;
40228 +atomic_unchecked_t fscache_n_store_calls;
40229 +atomic_unchecked_t fscache_n_store_pages;
40230 +atomic_unchecked_t fscache_n_store_radix_deletes;
40231 +atomic_unchecked_t fscache_n_store_pages_over_limit;
40232 +
40233 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
40234 +atomic_unchecked_t fscache_n_store_vmscan_gone;
40235 +atomic_unchecked_t fscache_n_store_vmscan_busy;
40236 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
40237 +
40238 +atomic_unchecked_t fscache_n_marks;
40239 +atomic_unchecked_t fscache_n_uncaches;
40240 +
40241 +atomic_unchecked_t fscache_n_acquires;
40242 +atomic_unchecked_t fscache_n_acquires_null;
40243 +atomic_unchecked_t fscache_n_acquires_no_cache;
40244 +atomic_unchecked_t fscache_n_acquires_ok;
40245 +atomic_unchecked_t fscache_n_acquires_nobufs;
40246 +atomic_unchecked_t fscache_n_acquires_oom;
40247 +
40248 +atomic_unchecked_t fscache_n_updates;
40249 +atomic_unchecked_t fscache_n_updates_null;
40250 +atomic_unchecked_t fscache_n_updates_run;
40251 +
40252 +atomic_unchecked_t fscache_n_relinquishes;
40253 +atomic_unchecked_t fscache_n_relinquishes_null;
40254 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
40255 +atomic_unchecked_t fscache_n_relinquishes_retire;
40256 +
40257 +atomic_unchecked_t fscache_n_cookie_index;
40258 +atomic_unchecked_t fscache_n_cookie_data;
40259 +atomic_unchecked_t fscache_n_cookie_special;
40260 +
40261 +atomic_unchecked_t fscache_n_object_alloc;
40262 +atomic_unchecked_t fscache_n_object_no_alloc;
40263 +atomic_unchecked_t fscache_n_object_lookups;
40264 +atomic_unchecked_t fscache_n_object_lookups_negative;
40265 +atomic_unchecked_t fscache_n_object_lookups_positive;
40266 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
40267 +atomic_unchecked_t fscache_n_object_created;
40268 +atomic_unchecked_t fscache_n_object_avail;
40269 +atomic_unchecked_t fscache_n_object_dead;
40270 +
40271 +atomic_unchecked_t fscache_n_checkaux_none;
40272 +atomic_unchecked_t fscache_n_checkaux_okay;
40273 +atomic_unchecked_t fscache_n_checkaux_update;
40274 +atomic_unchecked_t fscache_n_checkaux_obsolete;
40275
40276 atomic_t fscache_n_cop_alloc_object;
40277 atomic_t fscache_n_cop_lookup_object;
40278 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
40279 seq_puts(m, "FS-Cache statistics\n");
40280
40281 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
40282 - atomic_read(&fscache_n_cookie_index),
40283 - atomic_read(&fscache_n_cookie_data),
40284 - atomic_read(&fscache_n_cookie_special));
40285 + atomic_read_unchecked(&fscache_n_cookie_index),
40286 + atomic_read_unchecked(&fscache_n_cookie_data),
40287 + atomic_read_unchecked(&fscache_n_cookie_special));
40288
40289 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
40290 - atomic_read(&fscache_n_object_alloc),
40291 - atomic_read(&fscache_n_object_no_alloc),
40292 - atomic_read(&fscache_n_object_avail),
40293 - atomic_read(&fscache_n_object_dead));
40294 + atomic_read_unchecked(&fscache_n_object_alloc),
40295 + atomic_read_unchecked(&fscache_n_object_no_alloc),
40296 + atomic_read_unchecked(&fscache_n_object_avail),
40297 + atomic_read_unchecked(&fscache_n_object_dead));
40298 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
40299 - atomic_read(&fscache_n_checkaux_none),
40300 - atomic_read(&fscache_n_checkaux_okay),
40301 - atomic_read(&fscache_n_checkaux_update),
40302 - atomic_read(&fscache_n_checkaux_obsolete));
40303 + atomic_read_unchecked(&fscache_n_checkaux_none),
40304 + atomic_read_unchecked(&fscache_n_checkaux_okay),
40305 + atomic_read_unchecked(&fscache_n_checkaux_update),
40306 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
40307
40308 seq_printf(m, "Pages : mrk=%u unc=%u\n",
40309 - atomic_read(&fscache_n_marks),
40310 - atomic_read(&fscache_n_uncaches));
40311 + atomic_read_unchecked(&fscache_n_marks),
40312 + atomic_read_unchecked(&fscache_n_uncaches));
40313
40314 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
40315 " oom=%u\n",
40316 - atomic_read(&fscache_n_acquires),
40317 - atomic_read(&fscache_n_acquires_null),
40318 - atomic_read(&fscache_n_acquires_no_cache),
40319 - atomic_read(&fscache_n_acquires_ok),
40320 - atomic_read(&fscache_n_acquires_nobufs),
40321 - atomic_read(&fscache_n_acquires_oom));
40322 + atomic_read_unchecked(&fscache_n_acquires),
40323 + atomic_read_unchecked(&fscache_n_acquires_null),
40324 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
40325 + atomic_read_unchecked(&fscache_n_acquires_ok),
40326 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
40327 + atomic_read_unchecked(&fscache_n_acquires_oom));
40328
40329 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
40330 - atomic_read(&fscache_n_object_lookups),
40331 - atomic_read(&fscache_n_object_lookups_negative),
40332 - atomic_read(&fscache_n_object_lookups_positive),
40333 - atomic_read(&fscache_n_object_lookups_timed_out),
40334 - atomic_read(&fscache_n_object_created));
40335 + atomic_read_unchecked(&fscache_n_object_lookups),
40336 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
40337 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
40338 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
40339 + atomic_read_unchecked(&fscache_n_object_created));
40340
40341 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
40342 - atomic_read(&fscache_n_updates),
40343 - atomic_read(&fscache_n_updates_null),
40344 - atomic_read(&fscache_n_updates_run));
40345 + atomic_read_unchecked(&fscache_n_updates),
40346 + atomic_read_unchecked(&fscache_n_updates_null),
40347 + atomic_read_unchecked(&fscache_n_updates_run));
40348
40349 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
40350 - atomic_read(&fscache_n_relinquishes),
40351 - atomic_read(&fscache_n_relinquishes_null),
40352 - atomic_read(&fscache_n_relinquishes_waitcrt),
40353 - atomic_read(&fscache_n_relinquishes_retire));
40354 + atomic_read_unchecked(&fscache_n_relinquishes),
40355 + atomic_read_unchecked(&fscache_n_relinquishes_null),
40356 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
40357 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
40358
40359 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
40360 - atomic_read(&fscache_n_attr_changed),
40361 - atomic_read(&fscache_n_attr_changed_ok),
40362 - atomic_read(&fscache_n_attr_changed_nobufs),
40363 - atomic_read(&fscache_n_attr_changed_nomem),
40364 - atomic_read(&fscache_n_attr_changed_calls));
40365 + atomic_read_unchecked(&fscache_n_attr_changed),
40366 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
40367 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
40368 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
40369 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
40370
40371 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
40372 - atomic_read(&fscache_n_allocs),
40373 - atomic_read(&fscache_n_allocs_ok),
40374 - atomic_read(&fscache_n_allocs_wait),
40375 - atomic_read(&fscache_n_allocs_nobufs),
40376 - atomic_read(&fscache_n_allocs_intr));
40377 + atomic_read_unchecked(&fscache_n_allocs),
40378 + atomic_read_unchecked(&fscache_n_allocs_ok),
40379 + atomic_read_unchecked(&fscache_n_allocs_wait),
40380 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
40381 + atomic_read_unchecked(&fscache_n_allocs_intr));
40382 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
40383 - atomic_read(&fscache_n_alloc_ops),
40384 - atomic_read(&fscache_n_alloc_op_waits),
40385 - atomic_read(&fscache_n_allocs_object_dead));
40386 + atomic_read_unchecked(&fscache_n_alloc_ops),
40387 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
40388 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
40389
40390 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
40391 " int=%u oom=%u\n",
40392 - atomic_read(&fscache_n_retrievals),
40393 - atomic_read(&fscache_n_retrievals_ok),
40394 - atomic_read(&fscache_n_retrievals_wait),
40395 - atomic_read(&fscache_n_retrievals_nodata),
40396 - atomic_read(&fscache_n_retrievals_nobufs),
40397 - atomic_read(&fscache_n_retrievals_intr),
40398 - atomic_read(&fscache_n_retrievals_nomem));
40399 + atomic_read_unchecked(&fscache_n_retrievals),
40400 + atomic_read_unchecked(&fscache_n_retrievals_ok),
40401 + atomic_read_unchecked(&fscache_n_retrievals_wait),
40402 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
40403 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
40404 + atomic_read_unchecked(&fscache_n_retrievals_intr),
40405 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
40406 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
40407 - atomic_read(&fscache_n_retrieval_ops),
40408 - atomic_read(&fscache_n_retrieval_op_waits),
40409 - atomic_read(&fscache_n_retrievals_object_dead));
40410 + atomic_read_unchecked(&fscache_n_retrieval_ops),
40411 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
40412 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
40413
40414 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
40415 - atomic_read(&fscache_n_stores),
40416 - atomic_read(&fscache_n_stores_ok),
40417 - atomic_read(&fscache_n_stores_again),
40418 - atomic_read(&fscache_n_stores_nobufs),
40419 - atomic_read(&fscache_n_stores_oom));
40420 + atomic_read_unchecked(&fscache_n_stores),
40421 + atomic_read_unchecked(&fscache_n_stores_ok),
40422 + atomic_read_unchecked(&fscache_n_stores_again),
40423 + atomic_read_unchecked(&fscache_n_stores_nobufs),
40424 + atomic_read_unchecked(&fscache_n_stores_oom));
40425 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
40426 - atomic_read(&fscache_n_store_ops),
40427 - atomic_read(&fscache_n_store_calls),
40428 - atomic_read(&fscache_n_store_pages),
40429 - atomic_read(&fscache_n_store_radix_deletes),
40430 - atomic_read(&fscache_n_store_pages_over_limit));
40431 + atomic_read_unchecked(&fscache_n_store_ops),
40432 + atomic_read_unchecked(&fscache_n_store_calls),
40433 + atomic_read_unchecked(&fscache_n_store_pages),
40434 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
40435 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
40436
40437 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
40438 - atomic_read(&fscache_n_store_vmscan_not_storing),
40439 - atomic_read(&fscache_n_store_vmscan_gone),
40440 - atomic_read(&fscache_n_store_vmscan_busy),
40441 - atomic_read(&fscache_n_store_vmscan_cancelled));
40442 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
40443 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
40444 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
40445 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
40446
40447 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
40448 - atomic_read(&fscache_n_op_pend),
40449 - atomic_read(&fscache_n_op_run),
40450 - atomic_read(&fscache_n_op_enqueue),
40451 - atomic_read(&fscache_n_op_cancelled),
40452 - atomic_read(&fscache_n_op_rejected));
40453 + atomic_read_unchecked(&fscache_n_op_pend),
40454 + atomic_read_unchecked(&fscache_n_op_run),
40455 + atomic_read_unchecked(&fscache_n_op_enqueue),
40456 + atomic_read_unchecked(&fscache_n_op_cancelled),
40457 + atomic_read_unchecked(&fscache_n_op_rejected));
40458 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
40459 - atomic_read(&fscache_n_op_deferred_release),
40460 - atomic_read(&fscache_n_op_release),
40461 - atomic_read(&fscache_n_op_gc));
40462 + atomic_read_unchecked(&fscache_n_op_deferred_release),
40463 + atomic_read_unchecked(&fscache_n_op_release),
40464 + atomic_read_unchecked(&fscache_n_op_gc));
40465
40466 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
40467 atomic_read(&fscache_n_cop_alloc_object),
40468 diff -urNp linux-2.6.32.42/fs/fs_struct.c linux-2.6.32.42/fs/fs_struct.c
40469 --- linux-2.6.32.42/fs/fs_struct.c 2011-03-27 14:31:47.000000000 -0400
40470 +++ linux-2.6.32.42/fs/fs_struct.c 2011-04-17 15:56:46.000000000 -0400
40471 @@ -4,6 +4,7 @@
40472 #include <linux/path.h>
40473 #include <linux/slab.h>
40474 #include <linux/fs_struct.h>
40475 +#include <linux/grsecurity.h>
40476
40477 /*
40478 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
40479 @@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
40480 old_root = fs->root;
40481 fs->root = *path;
40482 path_get(path);
40483 + gr_set_chroot_entries(current, path);
40484 write_unlock(&fs->lock);
40485 if (old_root.dentry)
40486 path_put(&old_root);
40487 @@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
40488 && fs->root.mnt == old_root->mnt) {
40489 path_get(new_root);
40490 fs->root = *new_root;
40491 + gr_set_chroot_entries(p, new_root);
40492 count++;
40493 }
40494 if (fs->pwd.dentry == old_root->dentry
40495 @@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
40496 task_lock(tsk);
40497 write_lock(&fs->lock);
40498 tsk->fs = NULL;
40499 - kill = !--fs->users;
40500 + gr_clear_chroot_entries(tsk);
40501 + kill = !atomic_dec_return(&fs->users);
40502 write_unlock(&fs->lock);
40503 task_unlock(tsk);
40504 if (kill)
40505 @@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
40506 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
40507 /* We don't need to lock fs - think why ;-) */
40508 if (fs) {
40509 - fs->users = 1;
40510 + atomic_set(&fs->users, 1);
40511 fs->in_exec = 0;
40512 rwlock_init(&fs->lock);
40513 fs->umask = old->umask;
40514 @@ -127,8 +131,9 @@ int unshare_fs_struct(void)
40515
40516 task_lock(current);
40517 write_lock(&fs->lock);
40518 - kill = !--fs->users;
40519 + kill = !atomic_dec_return(&fs->users);
40520 current->fs = new_fs;
40521 + gr_set_chroot_entries(current, &new_fs->root);
40522 write_unlock(&fs->lock);
40523 task_unlock(current);
40524
40525 @@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
40526
40527 /* to be mentioned only in INIT_TASK */
40528 struct fs_struct init_fs = {
40529 - .users = 1,
40530 + .users = ATOMIC_INIT(1),
40531 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
40532 .umask = 0022,
40533 };
40534 @@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
40535 task_lock(current);
40536
40537 write_lock(&init_fs.lock);
40538 - init_fs.users++;
40539 + atomic_inc(&init_fs.users);
40540 write_unlock(&init_fs.lock);
40541
40542 write_lock(&fs->lock);
40543 current->fs = &init_fs;
40544 - kill = !--fs->users;
40545 + gr_set_chroot_entries(current, &current->fs->root);
40546 + kill = !atomic_dec_return(&fs->users);
40547 write_unlock(&fs->lock);
40548
40549 task_unlock(current);
40550 diff -urNp linux-2.6.32.42/fs/fuse/cuse.c linux-2.6.32.42/fs/fuse/cuse.c
40551 --- linux-2.6.32.42/fs/fuse/cuse.c 2011-03-27 14:31:47.000000000 -0400
40552 +++ linux-2.6.32.42/fs/fuse/cuse.c 2011-04-17 15:56:46.000000000 -0400
40553 @@ -528,8 +528,18 @@ static int cuse_channel_release(struct i
40554 return rc;
40555 }
40556
40557 -static struct file_operations cuse_channel_fops; /* initialized during init */
40558 -
40559 +static const struct file_operations cuse_channel_fops = { /* initialized during init */
40560 + .owner = THIS_MODULE,
40561 + .llseek = no_llseek,
40562 + .read = do_sync_read,
40563 + .aio_read = fuse_dev_read,
40564 + .write = do_sync_write,
40565 + .aio_write = fuse_dev_write,
40566 + .poll = fuse_dev_poll,
40567 + .open = cuse_channel_open,
40568 + .release = cuse_channel_release,
40569 + .fasync = fuse_dev_fasync,
40570 +};
40571
40572 /**************************************************************************
40573 * Misc stuff and module initializatiion
40574 @@ -575,12 +585,6 @@ static int __init cuse_init(void)
40575 for (i = 0; i < CUSE_CONNTBL_LEN; i++)
40576 INIT_LIST_HEAD(&cuse_conntbl[i]);
40577
40578 - /* inherit and extend fuse_dev_operations */
40579 - cuse_channel_fops = fuse_dev_operations;
40580 - cuse_channel_fops.owner = THIS_MODULE;
40581 - cuse_channel_fops.open = cuse_channel_open;
40582 - cuse_channel_fops.release = cuse_channel_release;
40583 -
40584 cuse_class = class_create(THIS_MODULE, "cuse");
40585 if (IS_ERR(cuse_class))
40586 return PTR_ERR(cuse_class);
40587 diff -urNp linux-2.6.32.42/fs/fuse/dev.c linux-2.6.32.42/fs/fuse/dev.c
40588 --- linux-2.6.32.42/fs/fuse/dev.c 2011-03-27 14:31:47.000000000 -0400
40589 +++ linux-2.6.32.42/fs/fuse/dev.c 2011-04-17 15:56:46.000000000 -0400
40590 @@ -745,7 +745,7 @@ __releases(&fc->lock)
40591 * request_end(). Otherwise add it to the processing list, and set
40592 * the 'sent' flag.
40593 */
40594 -static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
40595 +ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
40596 unsigned long nr_segs, loff_t pos)
40597 {
40598 int err;
40599 @@ -827,6 +827,7 @@ static ssize_t fuse_dev_read(struct kioc
40600 spin_unlock(&fc->lock);
40601 return err;
40602 }
40603 +EXPORT_SYMBOL_GPL(fuse_dev_read);
40604
40605 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
40606 struct fuse_copy_state *cs)
40607 @@ -885,7 +886,7 @@ static int fuse_notify_inval_entry(struc
40608 {
40609 struct fuse_notify_inval_entry_out outarg;
40610 int err = -EINVAL;
40611 - char buf[FUSE_NAME_MAX+1];
40612 + char *buf = NULL;
40613 struct qstr name;
40614
40615 if (size < sizeof(outarg))
40616 @@ -899,6 +900,11 @@ static int fuse_notify_inval_entry(struc
40617 if (outarg.namelen > FUSE_NAME_MAX)
40618 goto err;
40619
40620 + err = -ENOMEM;
40621 + buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
40622 + if (!buf)
40623 + goto err;
40624 +
40625 name.name = buf;
40626 name.len = outarg.namelen;
40627 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
40628 @@ -910,17 +916,15 @@ static int fuse_notify_inval_entry(struc
40629
40630 down_read(&fc->killsb);
40631 err = -ENOENT;
40632 - if (!fc->sb)
40633 - goto err_unlock;
40634 -
40635 - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
40636 -
40637 -err_unlock:
40638 + if (fc->sb)
40639 + err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
40640 up_read(&fc->killsb);
40641 + kfree(buf);
40642 return err;
40643
40644 err:
40645 fuse_copy_finish(cs);
40646 + kfree(buf);
40647 return err;
40648 }
40649
40650 @@ -987,7 +991,7 @@ static int copy_out_args(struct fuse_cop
40651 * it from the list and copy the rest of the buffer to the request.
40652 * The request is finished by calling request_end()
40653 */
40654 -static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40655 +ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40656 unsigned long nr_segs, loff_t pos)
40657 {
40658 int err;
40659 @@ -1083,8 +1087,9 @@ static ssize_t fuse_dev_write(struct kio
40660 fuse_copy_finish(&cs);
40661 return err;
40662 }
40663 +EXPORT_SYMBOL_GPL(fuse_dev_write);
40664
40665 -static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
40666 +unsigned fuse_dev_poll(struct file *file, poll_table *wait)
40667 {
40668 unsigned mask = POLLOUT | POLLWRNORM;
40669 struct fuse_conn *fc = fuse_get_conn(file);
40670 @@ -1102,6 +1107,7 @@ static unsigned fuse_dev_poll(struct fil
40671
40672 return mask;
40673 }
40674 +EXPORT_SYMBOL_GPL(fuse_dev_poll);
40675
40676 /*
40677 * Abort all requests on the given list (pending or processing)
40678 @@ -1218,7 +1224,7 @@ int fuse_dev_release(struct inode *inode
40679 }
40680 EXPORT_SYMBOL_GPL(fuse_dev_release);
40681
40682 -static int fuse_dev_fasync(int fd, struct file *file, int on)
40683 +int fuse_dev_fasync(int fd, struct file *file, int on)
40684 {
40685 struct fuse_conn *fc = fuse_get_conn(file);
40686 if (!fc)
40687 @@ -1227,6 +1233,7 @@ static int fuse_dev_fasync(int fd, struc
40688 /* No locking - fasync_helper does its own locking */
40689 return fasync_helper(fd, file, on, &fc->fasync);
40690 }
40691 +EXPORT_SYMBOL_GPL(fuse_dev_fasync);
40692
40693 const struct file_operations fuse_dev_operations = {
40694 .owner = THIS_MODULE,
40695 diff -urNp linux-2.6.32.42/fs/fuse/dir.c linux-2.6.32.42/fs/fuse/dir.c
40696 --- linux-2.6.32.42/fs/fuse/dir.c 2011-03-27 14:31:47.000000000 -0400
40697 +++ linux-2.6.32.42/fs/fuse/dir.c 2011-04-17 15:56:46.000000000 -0400
40698 @@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
40699 return link;
40700 }
40701
40702 -static void free_link(char *link)
40703 +static void free_link(const char *link)
40704 {
40705 if (!IS_ERR(link))
40706 free_page((unsigned long) link);
40707 diff -urNp linux-2.6.32.42/fs/fuse/fuse_i.h linux-2.6.32.42/fs/fuse/fuse_i.h
40708 --- linux-2.6.32.42/fs/fuse/fuse_i.h 2011-03-27 14:31:47.000000000 -0400
40709 +++ linux-2.6.32.42/fs/fuse/fuse_i.h 2011-04-17 15:56:46.000000000 -0400
40710 @@ -525,6 +525,16 @@ extern const struct file_operations fuse
40711
40712 extern const struct dentry_operations fuse_dentry_operations;
40713
40714 +extern ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
40715 + unsigned long nr_segs, loff_t pos);
40716 +
40717 +extern ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40718 + unsigned long nr_segs, loff_t pos);
40719 +
40720 +extern unsigned fuse_dev_poll(struct file *file, poll_table *wait);
40721 +
40722 +extern int fuse_dev_fasync(int fd, struct file *file, int on);
40723 +
40724 /**
40725 * Inode to nodeid comparison.
40726 */
40727 diff -urNp linux-2.6.32.42/fs/gfs2/ops_inode.c linux-2.6.32.42/fs/gfs2/ops_inode.c
40728 --- linux-2.6.32.42/fs/gfs2/ops_inode.c 2011-03-27 14:31:47.000000000 -0400
40729 +++ linux-2.6.32.42/fs/gfs2/ops_inode.c 2011-05-16 21:46:57.000000000 -0400
40730 @@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odi
40731 unsigned int x;
40732 int error;
40733
40734 + pax_track_stack();
40735 +
40736 if (ndentry->d_inode) {
40737 nip = GFS2_I(ndentry->d_inode);
40738 if (ip == nip)
40739 diff -urNp linux-2.6.32.42/fs/gfs2/sys.c linux-2.6.32.42/fs/gfs2/sys.c
40740 --- linux-2.6.32.42/fs/gfs2/sys.c 2011-03-27 14:31:47.000000000 -0400
40741 +++ linux-2.6.32.42/fs/gfs2/sys.c 2011-04-17 15:56:46.000000000 -0400
40742 @@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct ko
40743 return a->store ? a->store(sdp, buf, len) : len;
40744 }
40745
40746 -static struct sysfs_ops gfs2_attr_ops = {
40747 +static const struct sysfs_ops gfs2_attr_ops = {
40748 .show = gfs2_attr_show,
40749 .store = gfs2_attr_store,
40750 };
40751 @@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset
40752 return 0;
40753 }
40754
40755 -static struct kset_uevent_ops gfs2_uevent_ops = {
40756 +static const struct kset_uevent_ops gfs2_uevent_ops = {
40757 .uevent = gfs2_uevent,
40758 };
40759
40760 diff -urNp linux-2.6.32.42/fs/hfsplus/catalog.c linux-2.6.32.42/fs/hfsplus/catalog.c
40761 --- linux-2.6.32.42/fs/hfsplus/catalog.c 2011-03-27 14:31:47.000000000 -0400
40762 +++ linux-2.6.32.42/fs/hfsplus/catalog.c 2011-05-16 21:46:57.000000000 -0400
40763 @@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block
40764 int err;
40765 u16 type;
40766
40767 + pax_track_stack();
40768 +
40769 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
40770 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
40771 if (err)
40772 @@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct
40773 int entry_size;
40774 int err;
40775
40776 + pax_track_stack();
40777 +
40778 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
40779 sb = dir->i_sb;
40780 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
40781 @@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
40782 int entry_size, type;
40783 int err = 0;
40784
40785 + pax_track_stack();
40786 +
40787 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
40788 dst_dir->i_ino, dst_name->name);
40789 sb = src_dir->i_sb;
40790 diff -urNp linux-2.6.32.42/fs/hfsplus/dir.c linux-2.6.32.42/fs/hfsplus/dir.c
40791 --- linux-2.6.32.42/fs/hfsplus/dir.c 2011-03-27 14:31:47.000000000 -0400
40792 +++ linux-2.6.32.42/fs/hfsplus/dir.c 2011-05-16 21:46:57.000000000 -0400
40793 @@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *
40794 struct hfsplus_readdir_data *rd;
40795 u16 type;
40796
40797 + pax_track_stack();
40798 +
40799 if (filp->f_pos >= inode->i_size)
40800 return 0;
40801
40802 diff -urNp linux-2.6.32.42/fs/hfsplus/inode.c linux-2.6.32.42/fs/hfsplus/inode.c
40803 --- linux-2.6.32.42/fs/hfsplus/inode.c 2011-03-27 14:31:47.000000000 -0400
40804 +++ linux-2.6.32.42/fs/hfsplus/inode.c 2011-05-16 21:46:57.000000000 -0400
40805 @@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode
40806 int res = 0;
40807 u16 type;
40808
40809 + pax_track_stack();
40810 +
40811 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
40812
40813 HFSPLUS_I(inode).dev = 0;
40814 @@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode
40815 struct hfs_find_data fd;
40816 hfsplus_cat_entry entry;
40817
40818 + pax_track_stack();
40819 +
40820 if (HFSPLUS_IS_RSRC(inode))
40821 main_inode = HFSPLUS_I(inode).rsrc_inode;
40822
40823 diff -urNp linux-2.6.32.42/fs/hfsplus/ioctl.c linux-2.6.32.42/fs/hfsplus/ioctl.c
40824 --- linux-2.6.32.42/fs/hfsplus/ioctl.c 2011-03-27 14:31:47.000000000 -0400
40825 +++ linux-2.6.32.42/fs/hfsplus/ioctl.c 2011-05-16 21:46:57.000000000 -0400
40826 @@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dent
40827 struct hfsplus_cat_file *file;
40828 int res;
40829
40830 + pax_track_stack();
40831 +
40832 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40833 return -EOPNOTSUPP;
40834
40835 @@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *
40836 struct hfsplus_cat_file *file;
40837 ssize_t res = 0;
40838
40839 + pax_track_stack();
40840 +
40841 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40842 return -EOPNOTSUPP;
40843
40844 diff -urNp linux-2.6.32.42/fs/hfsplus/super.c linux-2.6.32.42/fs/hfsplus/super.c
40845 --- linux-2.6.32.42/fs/hfsplus/super.c 2011-03-27 14:31:47.000000000 -0400
40846 +++ linux-2.6.32.42/fs/hfsplus/super.c 2011-05-16 21:46:57.000000000 -0400
40847 @@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct sup
40848 struct nls_table *nls = NULL;
40849 int err = -EINVAL;
40850
40851 + pax_track_stack();
40852 +
40853 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
40854 if (!sbi)
40855 return -ENOMEM;
40856 diff -urNp linux-2.6.32.42/fs/hugetlbfs/inode.c linux-2.6.32.42/fs/hugetlbfs/inode.c
40857 --- linux-2.6.32.42/fs/hugetlbfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40858 +++ linux-2.6.32.42/fs/hugetlbfs/inode.c 2011-04-17 15:56:46.000000000 -0400
40859 @@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs
40860 .kill_sb = kill_litter_super,
40861 };
40862
40863 -static struct vfsmount *hugetlbfs_vfsmount;
40864 +struct vfsmount *hugetlbfs_vfsmount;
40865
40866 static int can_do_hugetlb_shm(void)
40867 {
40868 diff -urNp linux-2.6.32.42/fs/ioctl.c linux-2.6.32.42/fs/ioctl.c
40869 --- linux-2.6.32.42/fs/ioctl.c 2011-03-27 14:31:47.000000000 -0400
40870 +++ linux-2.6.32.42/fs/ioctl.c 2011-04-17 15:56:46.000000000 -0400
40871 @@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
40872 u64 phys, u64 len, u32 flags)
40873 {
40874 struct fiemap_extent extent;
40875 - struct fiemap_extent *dest = fieinfo->fi_extents_start;
40876 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
40877
40878 /* only count the extents */
40879 if (fieinfo->fi_extents_max == 0) {
40880 @@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
40881
40882 fieinfo.fi_flags = fiemap.fm_flags;
40883 fieinfo.fi_extents_max = fiemap.fm_extent_count;
40884 - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
40885 + fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
40886
40887 if (fiemap.fm_extent_count != 0 &&
40888 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
40889 @@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
40890 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
40891 fiemap.fm_flags = fieinfo.fi_flags;
40892 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
40893 - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
40894 + if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
40895 error = -EFAULT;
40896
40897 return error;
40898 diff -urNp linux-2.6.32.42/fs/jbd/checkpoint.c linux-2.6.32.42/fs/jbd/checkpoint.c
40899 --- linux-2.6.32.42/fs/jbd/checkpoint.c 2011-03-27 14:31:47.000000000 -0400
40900 +++ linux-2.6.32.42/fs/jbd/checkpoint.c 2011-05-16 21:46:57.000000000 -0400
40901 @@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal
40902 tid_t this_tid;
40903 int result;
40904
40905 + pax_track_stack();
40906 +
40907 jbd_debug(1, "Start checkpoint\n");
40908
40909 /*
40910 diff -urNp linux-2.6.32.42/fs/jffs2/compr_rtime.c linux-2.6.32.42/fs/jffs2/compr_rtime.c
40911 --- linux-2.6.32.42/fs/jffs2/compr_rtime.c 2011-03-27 14:31:47.000000000 -0400
40912 +++ linux-2.6.32.42/fs/jffs2/compr_rtime.c 2011-05-16 21:46:57.000000000 -0400
40913 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
40914 int outpos = 0;
40915 int pos=0;
40916
40917 + pax_track_stack();
40918 +
40919 memset(positions,0,sizeof(positions));
40920
40921 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
40922 @@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsign
40923 int outpos = 0;
40924 int pos=0;
40925
40926 + pax_track_stack();
40927 +
40928 memset(positions,0,sizeof(positions));
40929
40930 while (outpos<destlen) {
40931 diff -urNp linux-2.6.32.42/fs/jffs2/compr_rubin.c linux-2.6.32.42/fs/jffs2/compr_rubin.c
40932 --- linux-2.6.32.42/fs/jffs2/compr_rubin.c 2011-03-27 14:31:47.000000000 -0400
40933 +++ linux-2.6.32.42/fs/jffs2/compr_rubin.c 2011-05-16 21:46:57.000000000 -0400
40934 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
40935 int ret;
40936 uint32_t mysrclen, mydstlen;
40937
40938 + pax_track_stack();
40939 +
40940 mysrclen = *sourcelen;
40941 mydstlen = *dstlen - 8;
40942
40943 diff -urNp linux-2.6.32.42/fs/jffs2/erase.c linux-2.6.32.42/fs/jffs2/erase.c
40944 --- linux-2.6.32.42/fs/jffs2/erase.c 2011-03-27 14:31:47.000000000 -0400
40945 +++ linux-2.6.32.42/fs/jffs2/erase.c 2011-04-17 15:56:46.000000000 -0400
40946 @@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
40947 struct jffs2_unknown_node marker = {
40948 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
40949 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
40950 - .totlen = cpu_to_je32(c->cleanmarker_size)
40951 + .totlen = cpu_to_je32(c->cleanmarker_size),
40952 + .hdr_crc = cpu_to_je32(0)
40953 };
40954
40955 jffs2_prealloc_raw_node_refs(c, jeb, 1);
40956 diff -urNp linux-2.6.32.42/fs/jffs2/wbuf.c linux-2.6.32.42/fs/jffs2/wbuf.c
40957 --- linux-2.6.32.42/fs/jffs2/wbuf.c 2011-03-27 14:31:47.000000000 -0400
40958 +++ linux-2.6.32.42/fs/jffs2/wbuf.c 2011-04-17 15:56:46.000000000 -0400
40959 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
40960 {
40961 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
40962 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
40963 - .totlen = constant_cpu_to_je32(8)
40964 + .totlen = constant_cpu_to_je32(8),
40965 + .hdr_crc = constant_cpu_to_je32(0)
40966 };
40967
40968 /*
40969 diff -urNp linux-2.6.32.42/fs/jffs2/xattr.c linux-2.6.32.42/fs/jffs2/xattr.c
40970 --- linux-2.6.32.42/fs/jffs2/xattr.c 2011-03-27 14:31:47.000000000 -0400
40971 +++ linux-2.6.32.42/fs/jffs2/xattr.c 2011-05-16 21:46:57.000000000 -0400
40972 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
40973
40974 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
40975
40976 + pax_track_stack();
40977 +
40978 /* Phase.1 : Merge same xref */
40979 for (i=0; i < XREF_TMPHASH_SIZE; i++)
40980 xref_tmphash[i] = NULL;
40981 diff -urNp linux-2.6.32.42/fs/jfs/super.c linux-2.6.32.42/fs/jfs/super.c
40982 --- linux-2.6.32.42/fs/jfs/super.c 2011-03-27 14:31:47.000000000 -0400
40983 +++ linux-2.6.32.42/fs/jfs/super.c 2011-06-07 18:06:04.000000000 -0400
40984 @@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
40985
40986 jfs_inode_cachep =
40987 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
40988 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
40989 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
40990 init_once);
40991 if (jfs_inode_cachep == NULL)
40992 return -ENOMEM;
40993 diff -urNp linux-2.6.32.42/fs/Kconfig.binfmt linux-2.6.32.42/fs/Kconfig.binfmt
40994 --- linux-2.6.32.42/fs/Kconfig.binfmt 2011-03-27 14:31:47.000000000 -0400
40995 +++ linux-2.6.32.42/fs/Kconfig.binfmt 2011-04-17 15:56:46.000000000 -0400
40996 @@ -86,7 +86,7 @@ config HAVE_AOUT
40997
40998 config BINFMT_AOUT
40999 tristate "Kernel support for a.out and ECOFF binaries"
41000 - depends on HAVE_AOUT
41001 + depends on HAVE_AOUT && BROKEN
41002 ---help---
41003 A.out (Assembler.OUTput) is a set of formats for libraries and
41004 executables used in the earliest versions of UNIX. Linux used
41005 diff -urNp linux-2.6.32.42/fs/libfs.c linux-2.6.32.42/fs/libfs.c
41006 --- linux-2.6.32.42/fs/libfs.c 2011-03-27 14:31:47.000000000 -0400
41007 +++ linux-2.6.32.42/fs/libfs.c 2011-05-11 18:25:15.000000000 -0400
41008 @@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, v
41009
41010 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
41011 struct dentry *next;
41012 + char d_name[sizeof(next->d_iname)];
41013 + const unsigned char *name;
41014 +
41015 next = list_entry(p, struct dentry, d_u.d_child);
41016 if (d_unhashed(next) || !next->d_inode)
41017 continue;
41018
41019 spin_unlock(&dcache_lock);
41020 - if (filldir(dirent, next->d_name.name,
41021 + name = next->d_name.name;
41022 + if (name == next->d_iname) {
41023 + memcpy(d_name, name, next->d_name.len);
41024 + name = d_name;
41025 + }
41026 + if (filldir(dirent, name,
41027 next->d_name.len, filp->f_pos,
41028 next->d_inode->i_ino,
41029 dt_type(next->d_inode)) < 0)
41030 diff -urNp linux-2.6.32.42/fs/lockd/clntproc.c linux-2.6.32.42/fs/lockd/clntproc.c
41031 --- linux-2.6.32.42/fs/lockd/clntproc.c 2011-03-27 14:31:47.000000000 -0400
41032 +++ linux-2.6.32.42/fs/lockd/clntproc.c 2011-05-16 21:46:57.000000000 -0400
41033 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
41034 /*
41035 * Cookie counter for NLM requests
41036 */
41037 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
41038 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
41039
41040 void nlmclnt_next_cookie(struct nlm_cookie *c)
41041 {
41042 - u32 cookie = atomic_inc_return(&nlm_cookie);
41043 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
41044
41045 memcpy(c->data, &cookie, 4);
41046 c->len=4;
41047 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
41048 struct nlm_rqst reqst, *req;
41049 int status;
41050
41051 + pax_track_stack();
41052 +
41053 req = &reqst;
41054 memset(req, 0, sizeof(*req));
41055 locks_init_lock(&req->a_args.lock.fl);
41056 diff -urNp linux-2.6.32.42/fs/lockd/svc.c linux-2.6.32.42/fs/lockd/svc.c
41057 --- linux-2.6.32.42/fs/lockd/svc.c 2011-03-27 14:31:47.000000000 -0400
41058 +++ linux-2.6.32.42/fs/lockd/svc.c 2011-04-17 15:56:46.000000000 -0400
41059 @@ -43,7 +43,7 @@
41060
41061 static struct svc_program nlmsvc_program;
41062
41063 -struct nlmsvc_binding * nlmsvc_ops;
41064 +const struct nlmsvc_binding * nlmsvc_ops;
41065 EXPORT_SYMBOL_GPL(nlmsvc_ops);
41066
41067 static DEFINE_MUTEX(nlmsvc_mutex);
41068 diff -urNp linux-2.6.32.42/fs/locks.c linux-2.6.32.42/fs/locks.c
41069 --- linux-2.6.32.42/fs/locks.c 2011-03-27 14:31:47.000000000 -0400
41070 +++ linux-2.6.32.42/fs/locks.c 2011-04-17 15:56:46.000000000 -0400
41071 @@ -2007,16 +2007,16 @@ void locks_remove_flock(struct file *fil
41072 return;
41073
41074 if (filp->f_op && filp->f_op->flock) {
41075 - struct file_lock fl = {
41076 + struct file_lock flock = {
41077 .fl_pid = current->tgid,
41078 .fl_file = filp,
41079 .fl_flags = FL_FLOCK,
41080 .fl_type = F_UNLCK,
41081 .fl_end = OFFSET_MAX,
41082 };
41083 - filp->f_op->flock(filp, F_SETLKW, &fl);
41084 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
41085 - fl.fl_ops->fl_release_private(&fl);
41086 + filp->f_op->flock(filp, F_SETLKW, &flock);
41087 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
41088 + flock.fl_ops->fl_release_private(&flock);
41089 }
41090
41091 lock_kernel();
41092 diff -urNp linux-2.6.32.42/fs/namei.c linux-2.6.32.42/fs/namei.c
41093 --- linux-2.6.32.42/fs/namei.c 2011-03-27 14:31:47.000000000 -0400
41094 +++ linux-2.6.32.42/fs/namei.c 2011-05-16 21:46:57.000000000 -0400
41095 @@ -224,14 +224,6 @@ int generic_permission(struct inode *ino
41096 return ret;
41097
41098 /*
41099 - * Read/write DACs are always overridable.
41100 - * Executable DACs are overridable if at least one exec bit is set.
41101 - */
41102 - if (!(mask & MAY_EXEC) || execute_ok(inode))
41103 - if (capable(CAP_DAC_OVERRIDE))
41104 - return 0;
41105 -
41106 - /*
41107 * Searching includes executable on directories, else just read.
41108 */
41109 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
41110 @@ -239,6 +231,14 @@ int generic_permission(struct inode *ino
41111 if (capable(CAP_DAC_READ_SEARCH))
41112 return 0;
41113
41114 + /*
41115 + * Read/write DACs are always overridable.
41116 + * Executable DACs are overridable if at least one exec bit is set.
41117 + */
41118 + if (!(mask & MAY_EXEC) || execute_ok(inode))
41119 + if (capable(CAP_DAC_OVERRIDE))
41120 + return 0;
41121 +
41122 return -EACCES;
41123 }
41124
41125 @@ -458,7 +458,8 @@ static int exec_permission_lite(struct i
41126 if (!ret)
41127 goto ok;
41128
41129 - if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
41130 + if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
41131 + capable(CAP_DAC_OVERRIDE))
41132 goto ok;
41133
41134 return ret;
41135 @@ -638,7 +639,7 @@ static __always_inline int __do_follow_l
41136 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
41137 error = PTR_ERR(cookie);
41138 if (!IS_ERR(cookie)) {
41139 - char *s = nd_get_link(nd);
41140 + const char *s = nd_get_link(nd);
41141 error = 0;
41142 if (s)
41143 error = __vfs_follow_link(nd, s);
41144 @@ -669,6 +670,13 @@ static inline int do_follow_link(struct
41145 err = security_inode_follow_link(path->dentry, nd);
41146 if (err)
41147 goto loop;
41148 +
41149 + if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
41150 + path->dentry->d_inode, path->dentry, nd->path.mnt)) {
41151 + err = -EACCES;
41152 + goto loop;
41153 + }
41154 +
41155 current->link_count++;
41156 current->total_link_count++;
41157 nd->depth++;
41158 @@ -1016,11 +1024,18 @@ return_reval:
41159 break;
41160 }
41161 return_base:
41162 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
41163 + path_put(&nd->path);
41164 + return -ENOENT;
41165 + }
41166 return 0;
41167 out_dput:
41168 path_put_conditional(&next, nd);
41169 break;
41170 }
41171 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
41172 + err = -ENOENT;
41173 +
41174 path_put(&nd->path);
41175 return_err:
41176 return err;
41177 @@ -1091,13 +1106,20 @@ static int do_path_lookup(int dfd, const
41178 int retval = path_init(dfd, name, flags, nd);
41179 if (!retval)
41180 retval = path_walk(name, nd);
41181 - if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
41182 - nd->path.dentry->d_inode))
41183 - audit_inode(name, nd->path.dentry);
41184 +
41185 + if (likely(!retval)) {
41186 + if (nd->path.dentry && nd->path.dentry->d_inode) {
41187 + if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
41188 + retval = -ENOENT;
41189 + if (!audit_dummy_context())
41190 + audit_inode(name, nd->path.dentry);
41191 + }
41192 + }
41193 if (nd->root.mnt) {
41194 path_put(&nd->root);
41195 nd->root.mnt = NULL;
41196 }
41197 +
41198 return retval;
41199 }
41200
41201 @@ -1576,6 +1598,20 @@ int may_open(struct path *path, int acc_
41202 if (error)
41203 goto err_out;
41204
41205 +
41206 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
41207 + error = -EPERM;
41208 + goto err_out;
41209 + }
41210 + if (gr_handle_rawio(inode)) {
41211 + error = -EPERM;
41212 + goto err_out;
41213 + }
41214 + if (!gr_acl_handle_open(dentry, path->mnt, flag)) {
41215 + error = -EACCES;
41216 + goto err_out;
41217 + }
41218 +
41219 if (flag & O_TRUNC) {
41220 error = get_write_access(inode);
41221 if (error)
41222 @@ -1621,12 +1657,19 @@ static int __open_namei_create(struct na
41223 int error;
41224 struct dentry *dir = nd->path.dentry;
41225
41226 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, flag, mode)) {
41227 + error = -EACCES;
41228 + goto out_unlock;
41229 + }
41230 +
41231 if (!IS_POSIXACL(dir->d_inode))
41232 mode &= ~current_umask();
41233 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
41234 if (error)
41235 goto out_unlock;
41236 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
41237 + if (!error)
41238 + gr_handle_create(path->dentry, nd->path.mnt);
41239 out_unlock:
41240 mutex_unlock(&dir->d_inode->i_mutex);
41241 dput(nd->path.dentry);
41242 @@ -1709,6 +1752,22 @@ struct file *do_filp_open(int dfd, const
41243 &nd, flag);
41244 if (error)
41245 return ERR_PTR(error);
41246 +
41247 + if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
41248 + error = -EPERM;
41249 + goto exit;
41250 + }
41251 +
41252 + if (gr_handle_rawio(nd.path.dentry->d_inode)) {
41253 + error = -EPERM;
41254 + goto exit;
41255 + }
41256 +
41257 + if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, flag)) {
41258 + error = -EACCES;
41259 + goto exit;
41260 + }
41261 +
41262 goto ok;
41263 }
41264
41265 @@ -1795,6 +1854,14 @@ do_last:
41266 /*
41267 * It already exists.
41268 */
41269 +
41270 + /* only check if O_CREAT is specified, all other checks need
41271 + to go into may_open */
41272 + if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
41273 + error = -EACCES;
41274 + goto exit_mutex_unlock;
41275 + }
41276 +
41277 mutex_unlock(&dir->d_inode->i_mutex);
41278 audit_inode(pathname, path.dentry);
41279
41280 @@ -1887,6 +1954,13 @@ do_link:
41281 error = security_inode_follow_link(path.dentry, &nd);
41282 if (error)
41283 goto exit_dput;
41284 +
41285 + if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
41286 + path.dentry, nd.path.mnt)) {
41287 + error = -EACCES;
41288 + goto exit_dput;
41289 + }
41290 +
41291 error = __do_follow_link(&path, &nd);
41292 if (error) {
41293 /* Does someone understand code flow here? Or it is only
41294 @@ -2061,6 +2135,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
41295 error = may_mknod(mode);
41296 if (error)
41297 goto out_dput;
41298 +
41299 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
41300 + error = -EPERM;
41301 + goto out_dput;
41302 + }
41303 +
41304 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
41305 + error = -EACCES;
41306 + goto out_dput;
41307 + }
41308 +
41309 error = mnt_want_write(nd.path.mnt);
41310 if (error)
41311 goto out_dput;
41312 @@ -2081,6 +2166,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
41313 }
41314 out_drop_write:
41315 mnt_drop_write(nd.path.mnt);
41316 +
41317 + if (!error)
41318 + gr_handle_create(dentry, nd.path.mnt);
41319 out_dput:
41320 dput(dentry);
41321 out_unlock:
41322 @@ -2134,6 +2222,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
41323 if (IS_ERR(dentry))
41324 goto out_unlock;
41325
41326 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
41327 + error = -EACCES;
41328 + goto out_dput;
41329 + }
41330 +
41331 if (!IS_POSIXACL(nd.path.dentry->d_inode))
41332 mode &= ~current_umask();
41333 error = mnt_want_write(nd.path.mnt);
41334 @@ -2145,6 +2238,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
41335 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
41336 out_drop_write:
41337 mnt_drop_write(nd.path.mnt);
41338 +
41339 + if (!error)
41340 + gr_handle_create(dentry, nd.path.mnt);
41341 +
41342 out_dput:
41343 dput(dentry);
41344 out_unlock:
41345 @@ -2226,6 +2323,8 @@ static long do_rmdir(int dfd, const char
41346 char * name;
41347 struct dentry *dentry;
41348 struct nameidata nd;
41349 + ino_t saved_ino = 0;
41350 + dev_t saved_dev = 0;
41351
41352 error = user_path_parent(dfd, pathname, &nd, &name);
41353 if (error)
41354 @@ -2250,6 +2349,19 @@ static long do_rmdir(int dfd, const char
41355 error = PTR_ERR(dentry);
41356 if (IS_ERR(dentry))
41357 goto exit2;
41358 +
41359 + if (dentry->d_inode != NULL) {
41360 + if (dentry->d_inode->i_nlink <= 1) {
41361 + saved_ino = dentry->d_inode->i_ino;
41362 + saved_dev = gr_get_dev_from_dentry(dentry);
41363 + }
41364 +
41365 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
41366 + error = -EACCES;
41367 + goto exit3;
41368 + }
41369 + }
41370 +
41371 error = mnt_want_write(nd.path.mnt);
41372 if (error)
41373 goto exit3;
41374 @@ -2257,6 +2369,8 @@ static long do_rmdir(int dfd, const char
41375 if (error)
41376 goto exit4;
41377 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
41378 + if (!error && (saved_dev || saved_ino))
41379 + gr_handle_delete(saved_ino, saved_dev);
41380 exit4:
41381 mnt_drop_write(nd.path.mnt);
41382 exit3:
41383 @@ -2318,6 +2432,8 @@ static long do_unlinkat(int dfd, const c
41384 struct dentry *dentry;
41385 struct nameidata nd;
41386 struct inode *inode = NULL;
41387 + ino_t saved_ino = 0;
41388 + dev_t saved_dev = 0;
41389
41390 error = user_path_parent(dfd, pathname, &nd, &name);
41391 if (error)
41392 @@ -2337,8 +2453,19 @@ static long do_unlinkat(int dfd, const c
41393 if (nd.last.name[nd.last.len])
41394 goto slashes;
41395 inode = dentry->d_inode;
41396 - if (inode)
41397 + if (inode) {
41398 + if (inode->i_nlink <= 1) {
41399 + saved_ino = inode->i_ino;
41400 + saved_dev = gr_get_dev_from_dentry(dentry);
41401 + }
41402 +
41403 atomic_inc(&inode->i_count);
41404 +
41405 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
41406 + error = -EACCES;
41407 + goto exit2;
41408 + }
41409 + }
41410 error = mnt_want_write(nd.path.mnt);
41411 if (error)
41412 goto exit2;
41413 @@ -2346,6 +2473,8 @@ static long do_unlinkat(int dfd, const c
41414 if (error)
41415 goto exit3;
41416 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
41417 + if (!error && (saved_ino || saved_dev))
41418 + gr_handle_delete(saved_ino, saved_dev);
41419 exit3:
41420 mnt_drop_write(nd.path.mnt);
41421 exit2:
41422 @@ -2424,6 +2553,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
41423 if (IS_ERR(dentry))
41424 goto out_unlock;
41425
41426 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
41427 + error = -EACCES;
41428 + goto out_dput;
41429 + }
41430 +
41431 error = mnt_want_write(nd.path.mnt);
41432 if (error)
41433 goto out_dput;
41434 @@ -2431,6 +2565,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
41435 if (error)
41436 goto out_drop_write;
41437 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
41438 + if (!error)
41439 + gr_handle_create(dentry, nd.path.mnt);
41440 out_drop_write:
41441 mnt_drop_write(nd.path.mnt);
41442 out_dput:
41443 @@ -2524,6 +2660,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
41444 error = PTR_ERR(new_dentry);
41445 if (IS_ERR(new_dentry))
41446 goto out_unlock;
41447 +
41448 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
41449 + old_path.dentry->d_inode,
41450 + old_path.dentry->d_inode->i_mode, to)) {
41451 + error = -EACCES;
41452 + goto out_dput;
41453 + }
41454 +
41455 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
41456 + old_path.dentry, old_path.mnt, to)) {
41457 + error = -EACCES;
41458 + goto out_dput;
41459 + }
41460 +
41461 error = mnt_want_write(nd.path.mnt);
41462 if (error)
41463 goto out_dput;
41464 @@ -2531,6 +2681,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
41465 if (error)
41466 goto out_drop_write;
41467 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
41468 + if (!error)
41469 + gr_handle_create(new_dentry, nd.path.mnt);
41470 out_drop_write:
41471 mnt_drop_write(nd.path.mnt);
41472 out_dput:
41473 @@ -2708,6 +2860,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41474 char *to;
41475 int error;
41476
41477 + pax_track_stack();
41478 +
41479 error = user_path_parent(olddfd, oldname, &oldnd, &from);
41480 if (error)
41481 goto exit;
41482 @@ -2764,6 +2918,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41483 if (new_dentry == trap)
41484 goto exit5;
41485
41486 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
41487 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
41488 + to);
41489 + if (error)
41490 + goto exit5;
41491 +
41492 error = mnt_want_write(oldnd.path.mnt);
41493 if (error)
41494 goto exit5;
41495 @@ -2773,6 +2933,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41496 goto exit6;
41497 error = vfs_rename(old_dir->d_inode, old_dentry,
41498 new_dir->d_inode, new_dentry);
41499 + if (!error)
41500 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
41501 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
41502 exit6:
41503 mnt_drop_write(oldnd.path.mnt);
41504 exit5:
41505 @@ -2798,6 +2961,8 @@ SYSCALL_DEFINE2(rename, const char __use
41506
41507 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
41508 {
41509 + char tmpbuf[64];
41510 + const char *newlink;
41511 int len;
41512
41513 len = PTR_ERR(link);
41514 @@ -2807,7 +2972,14 @@ int vfs_readlink(struct dentry *dentry,
41515 len = strlen(link);
41516 if (len > (unsigned) buflen)
41517 len = buflen;
41518 - if (copy_to_user(buffer, link, len))
41519 +
41520 + if (len < sizeof(tmpbuf)) {
41521 + memcpy(tmpbuf, link, len);
41522 + newlink = tmpbuf;
41523 + } else
41524 + newlink = link;
41525 +
41526 + if (copy_to_user(buffer, newlink, len))
41527 len = -EFAULT;
41528 out:
41529 return len;
41530 diff -urNp linux-2.6.32.42/fs/namespace.c linux-2.6.32.42/fs/namespace.c
41531 --- linux-2.6.32.42/fs/namespace.c 2011-03-27 14:31:47.000000000 -0400
41532 +++ linux-2.6.32.42/fs/namespace.c 2011-04-17 15:56:46.000000000 -0400
41533 @@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mn
41534 if (!(sb->s_flags & MS_RDONLY))
41535 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
41536 up_write(&sb->s_umount);
41537 +
41538 + gr_log_remount(mnt->mnt_devname, retval);
41539 +
41540 return retval;
41541 }
41542
41543 @@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mn
41544 security_sb_umount_busy(mnt);
41545 up_write(&namespace_sem);
41546 release_mounts(&umount_list);
41547 +
41548 + gr_log_unmount(mnt->mnt_devname, retval);
41549 +
41550 return retval;
41551 }
41552
41553 @@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_
41554 if (retval)
41555 goto dput_out;
41556
41557 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
41558 + retval = -EPERM;
41559 + goto dput_out;
41560 + }
41561 +
41562 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
41563 + retval = -EPERM;
41564 + goto dput_out;
41565 + }
41566 +
41567 if (flags & MS_REMOUNT)
41568 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
41569 data_page);
41570 @@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_
41571 dev_name, data_page);
41572 dput_out:
41573 path_put(&path);
41574 +
41575 + gr_log_mount(dev_name, dir_name, retval);
41576 +
41577 return retval;
41578 }
41579
41580 @@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
41581 goto out1;
41582 }
41583
41584 + if (gr_handle_chroot_pivot()) {
41585 + error = -EPERM;
41586 + path_put(&old);
41587 + goto out1;
41588 + }
41589 +
41590 read_lock(&current->fs->lock);
41591 root = current->fs->root;
41592 path_get(&current->fs->root);
41593 diff -urNp linux-2.6.32.42/fs/ncpfs/dir.c linux-2.6.32.42/fs/ncpfs/dir.c
41594 --- linux-2.6.32.42/fs/ncpfs/dir.c 2011-03-27 14:31:47.000000000 -0400
41595 +++ linux-2.6.32.42/fs/ncpfs/dir.c 2011-05-16 21:46:57.000000000 -0400
41596 @@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *den
41597 int res, val = 0, len;
41598 __u8 __name[NCP_MAXPATHLEN + 1];
41599
41600 + pax_track_stack();
41601 +
41602 parent = dget_parent(dentry);
41603 dir = parent->d_inode;
41604
41605 @@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct
41606 int error, res, len;
41607 __u8 __name[NCP_MAXPATHLEN + 1];
41608
41609 + pax_track_stack();
41610 +
41611 lock_kernel();
41612 error = -EIO;
41613 if (!ncp_conn_valid(server))
41614 @@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, st
41615 int error, result, len;
41616 int opmode;
41617 __u8 __name[NCP_MAXPATHLEN + 1];
41618 -
41619 +
41620 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
41621 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
41622
41623 + pax_track_stack();
41624 +
41625 error = -EIO;
41626 lock_kernel();
41627 if (!ncp_conn_valid(server))
41628 @@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir,
41629 int error, len;
41630 __u8 __name[NCP_MAXPATHLEN + 1];
41631
41632 + pax_track_stack();
41633 +
41634 DPRINTK("ncp_mkdir: making %s/%s\n",
41635 dentry->d_parent->d_name.name, dentry->d_name.name);
41636
41637 @@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir,
41638 if (!ncp_conn_valid(server))
41639 goto out;
41640
41641 + pax_track_stack();
41642 +
41643 ncp_age_dentry(server, dentry);
41644 len = sizeof(__name);
41645 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
41646 @@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_
41647 int old_len, new_len;
41648 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
41649
41650 + pax_track_stack();
41651 +
41652 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
41653 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
41654 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
41655 diff -urNp linux-2.6.32.42/fs/ncpfs/inode.c linux-2.6.32.42/fs/ncpfs/inode.c
41656 --- linux-2.6.32.42/fs/ncpfs/inode.c 2011-03-27 14:31:47.000000000 -0400
41657 +++ linux-2.6.32.42/fs/ncpfs/inode.c 2011-05-16 21:46:57.000000000 -0400
41658 @@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_b
41659 #endif
41660 struct ncp_entry_info finfo;
41661
41662 + pax_track_stack();
41663 +
41664 data.wdog_pid = NULL;
41665 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
41666 if (!server)
41667 diff -urNp linux-2.6.32.42/fs/nfs/inode.c linux-2.6.32.42/fs/nfs/inode.c
41668 --- linux-2.6.32.42/fs/nfs/inode.c 2011-05-10 22:12:01.000000000 -0400
41669 +++ linux-2.6.32.42/fs/nfs/inode.c 2011-05-10 22:12:33.000000000 -0400
41670 @@ -973,16 +973,16 @@ static int nfs_size_need_update(const st
41671 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
41672 }
41673
41674 -static atomic_long_t nfs_attr_generation_counter;
41675 +static atomic_long_unchecked_t nfs_attr_generation_counter;
41676
41677 static unsigned long nfs_read_attr_generation_counter(void)
41678 {
41679 - return atomic_long_read(&nfs_attr_generation_counter);
41680 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
41681 }
41682
41683 unsigned long nfs_inc_attr_generation_counter(void)
41684 {
41685 - return atomic_long_inc_return(&nfs_attr_generation_counter);
41686 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
41687 }
41688
41689 void nfs_fattr_init(struct nfs_fattr *fattr)
41690 diff -urNp linux-2.6.32.42/fs/nfsd/lockd.c linux-2.6.32.42/fs/nfsd/lockd.c
41691 --- linux-2.6.32.42/fs/nfsd/lockd.c 2011-04-17 17:00:52.000000000 -0400
41692 +++ linux-2.6.32.42/fs/nfsd/lockd.c 2011-04-17 17:03:15.000000000 -0400
41693 @@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
41694 fput(filp);
41695 }
41696
41697 -static struct nlmsvc_binding nfsd_nlm_ops = {
41698 +static const struct nlmsvc_binding nfsd_nlm_ops = {
41699 .fopen = nlm_fopen, /* open file for locking */
41700 .fclose = nlm_fclose, /* close file */
41701 };
41702 diff -urNp linux-2.6.32.42/fs/nfsd/nfs4state.c linux-2.6.32.42/fs/nfsd/nfs4state.c
41703 --- linux-2.6.32.42/fs/nfsd/nfs4state.c 2011-03-27 14:31:47.000000000 -0400
41704 +++ linux-2.6.32.42/fs/nfsd/nfs4state.c 2011-05-16 21:46:57.000000000 -0400
41705 @@ -3457,6 +3457,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
41706 unsigned int cmd;
41707 int err;
41708
41709 + pax_track_stack();
41710 +
41711 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
41712 (long long) lock->lk_offset,
41713 (long long) lock->lk_length);
41714 diff -urNp linux-2.6.32.42/fs/nfsd/nfs4xdr.c linux-2.6.32.42/fs/nfsd/nfs4xdr.c
41715 --- linux-2.6.32.42/fs/nfsd/nfs4xdr.c 2011-03-27 14:31:47.000000000 -0400
41716 +++ linux-2.6.32.42/fs/nfsd/nfs4xdr.c 2011-05-16 21:46:57.000000000 -0400
41717 @@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
41718 struct nfsd4_compoundres *resp = rqstp->rq_resp;
41719 u32 minorversion = resp->cstate.minorversion;
41720
41721 + pax_track_stack();
41722 +
41723 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
41724 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
41725 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
41726 diff -urNp linux-2.6.32.42/fs/nfsd/vfs.c linux-2.6.32.42/fs/nfsd/vfs.c
41727 --- linux-2.6.32.42/fs/nfsd/vfs.c 2011-05-10 22:12:01.000000000 -0400
41728 +++ linux-2.6.32.42/fs/nfsd/vfs.c 2011-05-10 22:12:33.000000000 -0400
41729 @@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
41730 } else {
41731 oldfs = get_fs();
41732 set_fs(KERNEL_DS);
41733 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
41734 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
41735 set_fs(oldfs);
41736 }
41737
41738 @@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
41739
41740 /* Write the data. */
41741 oldfs = get_fs(); set_fs(KERNEL_DS);
41742 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
41743 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
41744 set_fs(oldfs);
41745 if (host_err < 0)
41746 goto out_nfserr;
41747 @@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
41748 */
41749
41750 oldfs = get_fs(); set_fs(KERNEL_DS);
41751 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
41752 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
41753 set_fs(oldfs);
41754
41755 if (host_err < 0)
41756 diff -urNp linux-2.6.32.42/fs/nilfs2/ioctl.c linux-2.6.32.42/fs/nilfs2/ioctl.c
41757 --- linux-2.6.32.42/fs/nilfs2/ioctl.c 2011-03-27 14:31:47.000000000 -0400
41758 +++ linux-2.6.32.42/fs/nilfs2/ioctl.c 2011-05-04 17:56:28.000000000 -0400
41759 @@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(st
41760 unsigned int cmd, void __user *argp)
41761 {
41762 struct nilfs_argv argv[5];
41763 - const static size_t argsz[5] = {
41764 + static const size_t argsz[5] = {
41765 sizeof(struct nilfs_vdesc),
41766 sizeof(struct nilfs_period),
41767 sizeof(__u64),
41768 diff -urNp linux-2.6.32.42/fs/notify/dnotify/dnotify.c linux-2.6.32.42/fs/notify/dnotify/dnotify.c
41769 --- linux-2.6.32.42/fs/notify/dnotify/dnotify.c 2011-03-27 14:31:47.000000000 -0400
41770 +++ linux-2.6.32.42/fs/notify/dnotify/dnotify.c 2011-04-17 15:56:46.000000000 -0400
41771 @@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsn
41772 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
41773 }
41774
41775 -static struct fsnotify_ops dnotify_fsnotify_ops = {
41776 +static const struct fsnotify_ops dnotify_fsnotify_ops = {
41777 .handle_event = dnotify_handle_event,
41778 .should_send_event = dnotify_should_send_event,
41779 .free_group_priv = NULL,
41780 diff -urNp linux-2.6.32.42/fs/notify/notification.c linux-2.6.32.42/fs/notify/notification.c
41781 --- linux-2.6.32.42/fs/notify/notification.c 2011-03-27 14:31:47.000000000 -0400
41782 +++ linux-2.6.32.42/fs/notify/notification.c 2011-05-04 17:56:28.000000000 -0400
41783 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
41784 * get set to 0 so it will never get 'freed'
41785 */
41786 static struct fsnotify_event q_overflow_event;
41787 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41788 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41789
41790 /**
41791 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
41792 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
41793 */
41794 u32 fsnotify_get_cookie(void)
41795 {
41796 - return atomic_inc_return(&fsnotify_sync_cookie);
41797 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
41798 }
41799 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
41800
41801 diff -urNp linux-2.6.32.42/fs/ntfs/dir.c linux-2.6.32.42/fs/ntfs/dir.c
41802 --- linux-2.6.32.42/fs/ntfs/dir.c 2011-03-27 14:31:47.000000000 -0400
41803 +++ linux-2.6.32.42/fs/ntfs/dir.c 2011-04-17 15:56:46.000000000 -0400
41804 @@ -1328,7 +1328,7 @@ find_next_index_buffer:
41805 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
41806 ~(s64)(ndir->itype.index.block_size - 1)));
41807 /* Bounds checks. */
41808 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41809 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41810 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
41811 "inode 0x%lx or driver bug.", vdir->i_ino);
41812 goto err_out;
41813 diff -urNp linux-2.6.32.42/fs/ntfs/file.c linux-2.6.32.42/fs/ntfs/file.c
41814 --- linux-2.6.32.42/fs/ntfs/file.c 2011-03-27 14:31:47.000000000 -0400
41815 +++ linux-2.6.32.42/fs/ntfs/file.c 2011-04-17 15:56:46.000000000 -0400
41816 @@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_
41817 #endif /* NTFS_RW */
41818 };
41819
41820 -const struct file_operations ntfs_empty_file_ops = {};
41821 +const struct file_operations ntfs_empty_file_ops __read_only;
41822
41823 -const struct inode_operations ntfs_empty_inode_ops = {};
41824 +const struct inode_operations ntfs_empty_inode_ops __read_only;
41825 diff -urNp linux-2.6.32.42/fs/ocfs2/cluster/masklog.c linux-2.6.32.42/fs/ocfs2/cluster/masklog.c
41826 --- linux-2.6.32.42/fs/ocfs2/cluster/masklog.c 2011-03-27 14:31:47.000000000 -0400
41827 +++ linux-2.6.32.42/fs/ocfs2/cluster/masklog.c 2011-04-17 15:56:46.000000000 -0400
41828 @@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject
41829 return mlog_mask_store(mlog_attr->mask, buf, count);
41830 }
41831
41832 -static struct sysfs_ops mlog_attr_ops = {
41833 +static const struct sysfs_ops mlog_attr_ops = {
41834 .show = mlog_show,
41835 .store = mlog_store,
41836 };
41837 diff -urNp linux-2.6.32.42/fs/ocfs2/localalloc.c linux-2.6.32.42/fs/ocfs2/localalloc.c
41838 --- linux-2.6.32.42/fs/ocfs2/localalloc.c 2011-03-27 14:31:47.000000000 -0400
41839 +++ linux-2.6.32.42/fs/ocfs2/localalloc.c 2011-04-17 15:56:46.000000000 -0400
41840 @@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_windo
41841 goto bail;
41842 }
41843
41844 - atomic_inc(&osb->alloc_stats.moves);
41845 + atomic_inc_unchecked(&osb->alloc_stats.moves);
41846
41847 status = 0;
41848 bail:
41849 diff -urNp linux-2.6.32.42/fs/ocfs2/namei.c linux-2.6.32.42/fs/ocfs2/namei.c
41850 --- linux-2.6.32.42/fs/ocfs2/namei.c 2011-03-27 14:31:47.000000000 -0400
41851 +++ linux-2.6.32.42/fs/ocfs2/namei.c 2011-05-16 21:46:57.000000000 -0400
41852 @@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *ol
41853 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
41854 struct ocfs2_dir_lookup_result target_insert = { NULL, };
41855
41856 + pax_track_stack();
41857 +
41858 /* At some point it might be nice to break this function up a
41859 * bit. */
41860
41861 diff -urNp linux-2.6.32.42/fs/ocfs2/ocfs2.h linux-2.6.32.42/fs/ocfs2/ocfs2.h
41862 --- linux-2.6.32.42/fs/ocfs2/ocfs2.h 2011-03-27 14:31:47.000000000 -0400
41863 +++ linux-2.6.32.42/fs/ocfs2/ocfs2.h 2011-04-17 15:56:46.000000000 -0400
41864 @@ -217,11 +217,11 @@ enum ocfs2_vol_state
41865
41866 struct ocfs2_alloc_stats
41867 {
41868 - atomic_t moves;
41869 - atomic_t local_data;
41870 - atomic_t bitmap_data;
41871 - atomic_t bg_allocs;
41872 - atomic_t bg_extends;
41873 + atomic_unchecked_t moves;
41874 + atomic_unchecked_t local_data;
41875 + atomic_unchecked_t bitmap_data;
41876 + atomic_unchecked_t bg_allocs;
41877 + atomic_unchecked_t bg_extends;
41878 };
41879
41880 enum ocfs2_local_alloc_state
41881 diff -urNp linux-2.6.32.42/fs/ocfs2/suballoc.c linux-2.6.32.42/fs/ocfs2/suballoc.c
41882 --- linux-2.6.32.42/fs/ocfs2/suballoc.c 2011-03-27 14:31:47.000000000 -0400
41883 +++ linux-2.6.32.42/fs/ocfs2/suballoc.c 2011-04-17 15:56:46.000000000 -0400
41884 @@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(s
41885 mlog_errno(status);
41886 goto bail;
41887 }
41888 - atomic_inc(&osb->alloc_stats.bg_extends);
41889 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
41890
41891 /* You should never ask for this much metadata */
41892 BUG_ON(bits_wanted >
41893 @@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
41894 mlog_errno(status);
41895 goto bail;
41896 }
41897 - atomic_inc(&osb->alloc_stats.bg_allocs);
41898 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
41899
41900 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
41901 ac->ac_bits_given += (*num_bits);
41902 @@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
41903 mlog_errno(status);
41904 goto bail;
41905 }
41906 - atomic_inc(&osb->alloc_stats.bg_allocs);
41907 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
41908
41909 BUG_ON(num_bits != 1);
41910
41911 @@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
41912 cluster_start,
41913 num_clusters);
41914 if (!status)
41915 - atomic_inc(&osb->alloc_stats.local_data);
41916 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
41917 } else {
41918 if (min_clusters > (osb->bitmap_cpg - 1)) {
41919 /* The only paths asking for contiguousness
41920 @@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
41921 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
41922 bg_blkno,
41923 bg_bit_off);
41924 - atomic_inc(&osb->alloc_stats.bitmap_data);
41925 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
41926 }
41927 }
41928 if (status < 0) {
41929 diff -urNp linux-2.6.32.42/fs/ocfs2/super.c linux-2.6.32.42/fs/ocfs2/super.c
41930 --- linux-2.6.32.42/fs/ocfs2/super.c 2011-03-27 14:31:47.000000000 -0400
41931 +++ linux-2.6.32.42/fs/ocfs2/super.c 2011-04-17 15:56:46.000000000 -0400
41932 @@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
41933 "%10s => GlobalAllocs: %d LocalAllocs: %d "
41934 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
41935 "Stats",
41936 - atomic_read(&osb->alloc_stats.bitmap_data),
41937 - atomic_read(&osb->alloc_stats.local_data),
41938 - atomic_read(&osb->alloc_stats.bg_allocs),
41939 - atomic_read(&osb->alloc_stats.moves),
41940 - atomic_read(&osb->alloc_stats.bg_extends));
41941 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
41942 + atomic_read_unchecked(&osb->alloc_stats.local_data),
41943 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
41944 + atomic_read_unchecked(&osb->alloc_stats.moves),
41945 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
41946
41947 out += snprintf(buf + out, len - out,
41948 "%10s => State: %u Descriptor: %llu Size: %u bits "
41949 @@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct
41950 spin_lock_init(&osb->osb_xattr_lock);
41951 ocfs2_init_inode_steal_slot(osb);
41952
41953 - atomic_set(&osb->alloc_stats.moves, 0);
41954 - atomic_set(&osb->alloc_stats.local_data, 0);
41955 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
41956 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
41957 - atomic_set(&osb->alloc_stats.bg_extends, 0);
41958 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
41959 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
41960 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
41961 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
41962 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
41963
41964 /* Copy the blockcheck stats from the superblock probe */
41965 osb->osb_ecc_stats = *stats;
41966 diff -urNp linux-2.6.32.42/fs/open.c linux-2.6.32.42/fs/open.c
41967 --- linux-2.6.32.42/fs/open.c 2011-03-27 14:31:47.000000000 -0400
41968 +++ linux-2.6.32.42/fs/open.c 2011-04-17 15:56:46.000000000 -0400
41969 @@ -275,6 +275,10 @@ static long do_sys_truncate(const char _
41970 error = locks_verify_truncate(inode, NULL, length);
41971 if (!error)
41972 error = security_path_truncate(&path, length, 0);
41973 +
41974 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
41975 + error = -EACCES;
41976 +
41977 if (!error) {
41978 vfs_dq_init(inode);
41979 error = do_truncate(path.dentry, length, 0, NULL);
41980 @@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
41981 if (__mnt_is_readonly(path.mnt))
41982 res = -EROFS;
41983
41984 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
41985 + res = -EACCES;
41986 +
41987 out_path_release:
41988 path_put(&path);
41989 out:
41990 @@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user
41991 if (error)
41992 goto dput_and_out;
41993
41994 + gr_log_chdir(path.dentry, path.mnt);
41995 +
41996 set_fs_pwd(current->fs, &path);
41997
41998 dput_and_out:
41999 @@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
42000 goto out_putf;
42001
42002 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
42003 +
42004 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
42005 + error = -EPERM;
42006 +
42007 + if (!error)
42008 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
42009 +
42010 if (!error)
42011 set_fs_pwd(current->fs, &file->f_path);
42012 out_putf:
42013 @@ -588,7 +604,18 @@ SYSCALL_DEFINE1(chroot, const char __use
42014 if (!capable(CAP_SYS_CHROOT))
42015 goto dput_and_out;
42016
42017 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
42018 + goto dput_and_out;
42019 +
42020 + if (gr_handle_chroot_caps(&path)) {
42021 + error = -ENOMEM;
42022 + goto dput_and_out;
42023 + }
42024 +
42025 set_fs_root(current->fs, &path);
42026 +
42027 + gr_handle_chroot_chdir(&path);
42028 +
42029 error = 0;
42030 dput_and_out:
42031 path_put(&path);
42032 @@ -616,12 +643,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
42033 err = mnt_want_write_file(file);
42034 if (err)
42035 goto out_putf;
42036 +
42037 mutex_lock(&inode->i_mutex);
42038 +
42039 + if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
42040 + err = -EACCES;
42041 + goto out_unlock;
42042 + }
42043 +
42044 if (mode == (mode_t) -1)
42045 mode = inode->i_mode;
42046 +
42047 + if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
42048 + err = -EPERM;
42049 + goto out_unlock;
42050 + }
42051 +
42052 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
42053 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
42054 err = notify_change(dentry, &newattrs);
42055 +
42056 +out_unlock:
42057 mutex_unlock(&inode->i_mutex);
42058 mnt_drop_write(file->f_path.mnt);
42059 out_putf:
42060 @@ -645,12 +687,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
42061 error = mnt_want_write(path.mnt);
42062 if (error)
42063 goto dput_and_out;
42064 +
42065 mutex_lock(&inode->i_mutex);
42066 +
42067 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
42068 + error = -EACCES;
42069 + goto out_unlock;
42070 + }
42071 +
42072 if (mode == (mode_t) -1)
42073 mode = inode->i_mode;
42074 +
42075 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
42076 + error = -EACCES;
42077 + goto out_unlock;
42078 + }
42079 +
42080 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
42081 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
42082 error = notify_change(path.dentry, &newattrs);
42083 +
42084 +out_unlock:
42085 mutex_unlock(&inode->i_mutex);
42086 mnt_drop_write(path.mnt);
42087 dput_and_out:
42088 @@ -664,12 +721,15 @@ SYSCALL_DEFINE2(chmod, const char __user
42089 return sys_fchmodat(AT_FDCWD, filename, mode);
42090 }
42091
42092 -static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
42093 +static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
42094 {
42095 struct inode *inode = dentry->d_inode;
42096 int error;
42097 struct iattr newattrs;
42098
42099 + if (!gr_acl_handle_chown(dentry, mnt))
42100 + return -EACCES;
42101 +
42102 newattrs.ia_valid = ATTR_CTIME;
42103 if (user != (uid_t) -1) {
42104 newattrs.ia_valid |= ATTR_UID;
42105 @@ -700,7 +760,7 @@ SYSCALL_DEFINE3(chown, const char __user
42106 error = mnt_want_write(path.mnt);
42107 if (error)
42108 goto out_release;
42109 - error = chown_common(path.dentry, user, group);
42110 + error = chown_common(path.dentry, user, group, path.mnt);
42111 mnt_drop_write(path.mnt);
42112 out_release:
42113 path_put(&path);
42114 @@ -725,7 +785,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
42115 error = mnt_want_write(path.mnt);
42116 if (error)
42117 goto out_release;
42118 - error = chown_common(path.dentry, user, group);
42119 + error = chown_common(path.dentry, user, group, path.mnt);
42120 mnt_drop_write(path.mnt);
42121 out_release:
42122 path_put(&path);
42123 @@ -744,7 +804,7 @@ SYSCALL_DEFINE3(lchown, const char __use
42124 error = mnt_want_write(path.mnt);
42125 if (error)
42126 goto out_release;
42127 - error = chown_common(path.dentry, user, group);
42128 + error = chown_common(path.dentry, user, group, path.mnt);
42129 mnt_drop_write(path.mnt);
42130 out_release:
42131 path_put(&path);
42132 @@ -767,7 +827,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
42133 goto out_fput;
42134 dentry = file->f_path.dentry;
42135 audit_inode(NULL, dentry);
42136 - error = chown_common(dentry, user, group);
42137 + error = chown_common(dentry, user, group, file->f_path.mnt);
42138 mnt_drop_write(file->f_path.mnt);
42139 out_fput:
42140 fput(file);
42141 @@ -1036,7 +1096,10 @@ long do_sys_open(int dfd, const char __u
42142 if (!IS_ERR(tmp)) {
42143 fd = get_unused_fd_flags(flags);
42144 if (fd >= 0) {
42145 - struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
42146 + struct file *f;
42147 + /* don't allow to be set by userland */
42148 + flags &= ~FMODE_GREXEC;
42149 + f = do_filp_open(dfd, tmp, flags, mode, 0);
42150 if (IS_ERR(f)) {
42151 put_unused_fd(fd);
42152 fd = PTR_ERR(f);
42153 diff -urNp linux-2.6.32.42/fs/partitions/ldm.c linux-2.6.32.42/fs/partitions/ldm.c
42154 --- linux-2.6.32.42/fs/partitions/ldm.c 2011-06-25 12:55:34.000000000 -0400
42155 +++ linux-2.6.32.42/fs/partitions/ldm.c 2011-06-25 12:56:37.000000000 -0400
42156 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
42157 ldm_error ("A VBLK claims to have %d parts.", num);
42158 return false;
42159 }
42160 +
42161 if (rec >= num) {
42162 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
42163 return false;
42164 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
42165 goto found;
42166 }
42167
42168 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
42169 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
42170 if (!f) {
42171 ldm_crit ("Out of memory.");
42172 return false;
42173 diff -urNp linux-2.6.32.42/fs/partitions/mac.c linux-2.6.32.42/fs/partitions/mac.c
42174 --- linux-2.6.32.42/fs/partitions/mac.c 2011-03-27 14:31:47.000000000 -0400
42175 +++ linux-2.6.32.42/fs/partitions/mac.c 2011-04-17 15:56:46.000000000 -0400
42176 @@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitio
42177 return 0; /* not a MacOS disk */
42178 }
42179 blocks_in_map = be32_to_cpu(part->map_count);
42180 + printk(" [mac]");
42181 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
42182 put_dev_sector(sect);
42183 return 0;
42184 }
42185 - printk(" [mac]");
42186 for (slot = 1; slot <= blocks_in_map; ++slot) {
42187 int pos = slot * secsize;
42188 put_dev_sector(sect);
42189 diff -urNp linux-2.6.32.42/fs/pipe.c linux-2.6.32.42/fs/pipe.c
42190 --- linux-2.6.32.42/fs/pipe.c 2011-03-27 14:31:47.000000000 -0400
42191 +++ linux-2.6.32.42/fs/pipe.c 2011-04-23 13:37:17.000000000 -0400
42192 @@ -401,9 +401,9 @@ redo:
42193 }
42194 if (bufs) /* More to do? */
42195 continue;
42196 - if (!pipe->writers)
42197 + if (!atomic_read(&pipe->writers))
42198 break;
42199 - if (!pipe->waiting_writers) {
42200 + if (!atomic_read(&pipe->waiting_writers)) {
42201 /* syscall merging: Usually we must not sleep
42202 * if O_NONBLOCK is set, or if we got some data.
42203 * But if a writer sleeps in kernel space, then
42204 @@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
42205 mutex_lock(&inode->i_mutex);
42206 pipe = inode->i_pipe;
42207
42208 - if (!pipe->readers) {
42209 + if (!atomic_read(&pipe->readers)) {
42210 send_sig(SIGPIPE, current, 0);
42211 ret = -EPIPE;
42212 goto out;
42213 @@ -511,7 +511,7 @@ redo1:
42214 for (;;) {
42215 int bufs;
42216
42217 - if (!pipe->readers) {
42218 + if (!atomic_read(&pipe->readers)) {
42219 send_sig(SIGPIPE, current, 0);
42220 if (!ret)
42221 ret = -EPIPE;
42222 @@ -597,9 +597,9 @@ redo2:
42223 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
42224 do_wakeup = 0;
42225 }
42226 - pipe->waiting_writers++;
42227 + atomic_inc(&pipe->waiting_writers);
42228 pipe_wait(pipe);
42229 - pipe->waiting_writers--;
42230 + atomic_dec(&pipe->waiting_writers);
42231 }
42232 out:
42233 mutex_unlock(&inode->i_mutex);
42234 @@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
42235 mask = 0;
42236 if (filp->f_mode & FMODE_READ) {
42237 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
42238 - if (!pipe->writers && filp->f_version != pipe->w_counter)
42239 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
42240 mask |= POLLHUP;
42241 }
42242
42243 @@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
42244 * Most Unices do not set POLLERR for FIFOs but on Linux they
42245 * behave exactly like pipes for poll().
42246 */
42247 - if (!pipe->readers)
42248 + if (!atomic_read(&pipe->readers))
42249 mask |= POLLERR;
42250 }
42251
42252 @@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
42253
42254 mutex_lock(&inode->i_mutex);
42255 pipe = inode->i_pipe;
42256 - pipe->readers -= decr;
42257 - pipe->writers -= decw;
42258 + atomic_sub(decr, &pipe->readers);
42259 + atomic_sub(decw, &pipe->writers);
42260
42261 - if (!pipe->readers && !pipe->writers) {
42262 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
42263 free_pipe_info(inode);
42264 } else {
42265 wake_up_interruptible_sync(&pipe->wait);
42266 @@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
42267
42268 if (inode->i_pipe) {
42269 ret = 0;
42270 - inode->i_pipe->readers++;
42271 + atomic_inc(&inode->i_pipe->readers);
42272 }
42273
42274 mutex_unlock(&inode->i_mutex);
42275 @@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
42276
42277 if (inode->i_pipe) {
42278 ret = 0;
42279 - inode->i_pipe->writers++;
42280 + atomic_inc(&inode->i_pipe->writers);
42281 }
42282
42283 mutex_unlock(&inode->i_mutex);
42284 @@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
42285 if (inode->i_pipe) {
42286 ret = 0;
42287 if (filp->f_mode & FMODE_READ)
42288 - inode->i_pipe->readers++;
42289 + atomic_inc(&inode->i_pipe->readers);
42290 if (filp->f_mode & FMODE_WRITE)
42291 - inode->i_pipe->writers++;
42292 + atomic_inc(&inode->i_pipe->writers);
42293 }
42294
42295 mutex_unlock(&inode->i_mutex);
42296 @@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
42297 inode->i_pipe = NULL;
42298 }
42299
42300 -static struct vfsmount *pipe_mnt __read_mostly;
42301 +struct vfsmount *pipe_mnt __read_mostly;
42302 static int pipefs_delete_dentry(struct dentry *dentry)
42303 {
42304 /*
42305 @@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(voi
42306 goto fail_iput;
42307 inode->i_pipe = pipe;
42308
42309 - pipe->readers = pipe->writers = 1;
42310 + atomic_set(&pipe->readers, 1);
42311 + atomic_set(&pipe->writers, 1);
42312 inode->i_fop = &rdwr_pipefifo_fops;
42313
42314 /*
42315 diff -urNp linux-2.6.32.42/fs/proc/array.c linux-2.6.32.42/fs/proc/array.c
42316 --- linux-2.6.32.42/fs/proc/array.c 2011-03-27 14:31:47.000000000 -0400
42317 +++ linux-2.6.32.42/fs/proc/array.c 2011-05-16 21:46:57.000000000 -0400
42318 @@ -60,6 +60,7 @@
42319 #include <linux/tty.h>
42320 #include <linux/string.h>
42321 #include <linux/mman.h>
42322 +#include <linux/grsecurity.h>
42323 #include <linux/proc_fs.h>
42324 #include <linux/ioport.h>
42325 #include <linux/uaccess.h>
42326 @@ -321,6 +322,21 @@ static inline void task_context_switch_c
42327 p->nivcsw);
42328 }
42329
42330 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42331 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
42332 +{
42333 + if (p->mm)
42334 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
42335 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
42336 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
42337 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
42338 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
42339 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
42340 + else
42341 + seq_printf(m, "PaX:\t-----\n");
42342 +}
42343 +#endif
42344 +
42345 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
42346 struct pid *pid, struct task_struct *task)
42347 {
42348 @@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m,
42349 task_cap(m, task);
42350 cpuset_task_status_allowed(m, task);
42351 task_context_switch_counts(m, task);
42352 +
42353 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42354 + task_pax(m, task);
42355 +#endif
42356 +
42357 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
42358 + task_grsec_rbac(m, task);
42359 +#endif
42360 +
42361 return 0;
42362 }
42363
42364 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42365 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42366 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
42367 + _mm->pax_flags & MF_PAX_SEGMEXEC))
42368 +#endif
42369 +
42370 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
42371 struct pid *pid, struct task_struct *task, int whole)
42372 {
42373 @@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file
42374 cputime_t cutime, cstime, utime, stime;
42375 cputime_t cgtime, gtime;
42376 unsigned long rsslim = 0;
42377 - char tcomm[sizeof(task->comm)];
42378 + char tcomm[sizeof(task->comm)] = { 0 };
42379 unsigned long flags;
42380
42381 + pax_track_stack();
42382 +
42383 state = *get_task_state(task);
42384 vsize = eip = esp = 0;
42385 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
42386 @@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file
42387 gtime = task_gtime(task);
42388 }
42389
42390 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42391 + if (PAX_RAND_FLAGS(mm)) {
42392 + eip = 0;
42393 + esp = 0;
42394 + wchan = 0;
42395 + }
42396 +#endif
42397 +#ifdef CONFIG_GRKERNSEC_HIDESYM
42398 + wchan = 0;
42399 + eip =0;
42400 + esp =0;
42401 +#endif
42402 +
42403 /* scale priority and nice values from timeslices to -20..20 */
42404 /* to make it look like a "normal" Unix priority/nice value */
42405 priority = task_prio(task);
42406 @@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file
42407 vsize,
42408 mm ? get_mm_rss(mm) : 0,
42409 rsslim,
42410 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42411 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
42412 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
42413 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
42414 +#else
42415 mm ? (permitted ? mm->start_code : 1) : 0,
42416 mm ? (permitted ? mm->end_code : 1) : 0,
42417 (permitted && mm) ? mm->start_stack : 0,
42418 +#endif
42419 esp,
42420 eip,
42421 /* The signal information here is obsolete.
42422 @@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, s
42423
42424 return 0;
42425 }
42426 +
42427 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42428 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
42429 +{
42430 + u32 curr_ip = 0;
42431 + unsigned long flags;
42432 +
42433 + if (lock_task_sighand(task, &flags)) {
42434 + curr_ip = task->signal->curr_ip;
42435 + unlock_task_sighand(task, &flags);
42436 + }
42437 +
42438 + return sprintf(buffer, "%pI4\n", &curr_ip);
42439 +}
42440 +#endif
42441 diff -urNp linux-2.6.32.42/fs/proc/base.c linux-2.6.32.42/fs/proc/base.c
42442 --- linux-2.6.32.42/fs/proc/base.c 2011-04-22 19:16:29.000000000 -0400
42443 +++ linux-2.6.32.42/fs/proc/base.c 2011-06-04 21:20:50.000000000 -0400
42444 @@ -102,6 +102,22 @@ struct pid_entry {
42445 union proc_op op;
42446 };
42447
42448 +struct getdents_callback {
42449 + struct linux_dirent __user * current_dir;
42450 + struct linux_dirent __user * previous;
42451 + struct file * file;
42452 + int count;
42453 + int error;
42454 +};
42455 +
42456 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
42457 + loff_t offset, u64 ino, unsigned int d_type)
42458 +{
42459 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
42460 + buf->error = -EINVAL;
42461 + return 0;
42462 +}
42463 +
42464 #define NOD(NAME, MODE, IOP, FOP, OP) { \
42465 .name = (NAME), \
42466 .len = sizeof(NAME) - 1, \
42467 @@ -213,6 +229,9 @@ static int check_mem_permission(struct t
42468 if (task == current)
42469 return 0;
42470
42471 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
42472 + return -EPERM;
42473 +
42474 /*
42475 * If current is actively ptrace'ing, and would also be
42476 * permitted to freshly attach with ptrace now, permit it.
42477 @@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_
42478 if (!mm->arg_end)
42479 goto out_mm; /* Shh! No looking before we're done */
42480
42481 + if (gr_acl_handle_procpidmem(task))
42482 + goto out_mm;
42483 +
42484 len = mm->arg_end - mm->arg_start;
42485
42486 if (len > PAGE_SIZE)
42487 @@ -287,12 +309,28 @@ out:
42488 return res;
42489 }
42490
42491 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42492 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42493 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
42494 + _mm->pax_flags & MF_PAX_SEGMEXEC))
42495 +#endif
42496 +
42497 static int proc_pid_auxv(struct task_struct *task, char *buffer)
42498 {
42499 int res = 0;
42500 struct mm_struct *mm = get_task_mm(task);
42501 if (mm) {
42502 unsigned int nwords = 0;
42503 +
42504 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42505 + /* allow if we're currently ptracing this task */
42506 + if (PAX_RAND_FLAGS(mm) &&
42507 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
42508 + mmput(mm);
42509 + return res;
42510 + }
42511 +#endif
42512 +
42513 do {
42514 nwords += 2;
42515 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
42516 @@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_str
42517 }
42518
42519
42520 -#ifdef CONFIG_KALLSYMS
42521 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42522 /*
42523 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
42524 * Returns the resolved symbol. If that fails, simply return the address.
42525 @@ -328,7 +366,7 @@ static int proc_pid_wchan(struct task_st
42526 }
42527 #endif /* CONFIG_KALLSYMS */
42528
42529 -#ifdef CONFIG_STACKTRACE
42530 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42531
42532 #define MAX_STACK_TRACE_DEPTH 64
42533
42534 @@ -522,7 +560,7 @@ static int proc_pid_limits(struct task_s
42535 return count;
42536 }
42537
42538 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42539 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42540 static int proc_pid_syscall(struct task_struct *task, char *buffer)
42541 {
42542 long nr;
42543 @@ -547,7 +585,7 @@ static int proc_pid_syscall(struct task_
42544 /************************************************************************/
42545
42546 /* permission checks */
42547 -static int proc_fd_access_allowed(struct inode *inode)
42548 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
42549 {
42550 struct task_struct *task;
42551 int allowed = 0;
42552 @@ -557,7 +595,10 @@ static int proc_fd_access_allowed(struct
42553 */
42554 task = get_proc_task(inode);
42555 if (task) {
42556 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
42557 + if (log)
42558 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
42559 + else
42560 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
42561 put_task_struct(task);
42562 }
42563 return allowed;
42564 @@ -936,6 +977,9 @@ static ssize_t environ_read(struct file
42565 if (!task)
42566 goto out_no_task;
42567
42568 + if (gr_acl_handle_procpidmem(task))
42569 + goto out;
42570 +
42571 if (!ptrace_may_access(task, PTRACE_MODE_READ))
42572 goto out;
42573
42574 @@ -1350,7 +1394,7 @@ static void *proc_pid_follow_link(struct
42575 path_put(&nd->path);
42576
42577 /* Are we allowed to snoop on the tasks file descriptors? */
42578 - if (!proc_fd_access_allowed(inode))
42579 + if (!proc_fd_access_allowed(inode,0))
42580 goto out;
42581
42582 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
42583 @@ -1390,8 +1434,18 @@ static int proc_pid_readlink(struct dent
42584 struct path path;
42585
42586 /* Are we allowed to snoop on the tasks file descriptors? */
42587 - if (!proc_fd_access_allowed(inode))
42588 - goto out;
42589 + /* logging this is needed for learning on chromium to work properly,
42590 + but we don't want to flood the logs from 'ps' which does a readlink
42591 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
42592 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
42593 + */
42594 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
42595 + if (!proc_fd_access_allowed(inode,0))
42596 + goto out;
42597 + } else {
42598 + if (!proc_fd_access_allowed(inode,1))
42599 + goto out;
42600 + }
42601
42602 error = PROC_I(inode)->op.proc_get_link(inode, &path);
42603 if (error)
42604 @@ -1456,7 +1510,11 @@ static struct inode *proc_pid_make_inode
42605 rcu_read_lock();
42606 cred = __task_cred(task);
42607 inode->i_uid = cred->euid;
42608 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42609 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42610 +#else
42611 inode->i_gid = cred->egid;
42612 +#endif
42613 rcu_read_unlock();
42614 }
42615 security_task_to_inode(task, inode);
42616 @@ -1474,6 +1532,9 @@ static int pid_getattr(struct vfsmount *
42617 struct inode *inode = dentry->d_inode;
42618 struct task_struct *task;
42619 const struct cred *cred;
42620 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42621 + const struct cred *tmpcred = current_cred();
42622 +#endif
42623
42624 generic_fillattr(inode, stat);
42625
42626 @@ -1481,13 +1542,41 @@ static int pid_getattr(struct vfsmount *
42627 stat->uid = 0;
42628 stat->gid = 0;
42629 task = pid_task(proc_pid(inode), PIDTYPE_PID);
42630 +
42631 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
42632 + rcu_read_unlock();
42633 + return -ENOENT;
42634 + }
42635 +
42636 if (task) {
42637 + cred = __task_cred(task);
42638 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42639 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
42640 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42641 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
42642 +#endif
42643 + ) {
42644 +#endif
42645 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
42646 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42647 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
42648 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42649 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
42650 +#endif
42651 task_dumpable(task)) {
42652 - cred = __task_cred(task);
42653 stat->uid = cred->euid;
42654 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42655 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
42656 +#else
42657 stat->gid = cred->egid;
42658 +#endif
42659 }
42660 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42661 + } else {
42662 + rcu_read_unlock();
42663 + return -ENOENT;
42664 + }
42665 +#endif
42666 }
42667 rcu_read_unlock();
42668 return 0;
42669 @@ -1518,11 +1607,20 @@ static int pid_revalidate(struct dentry
42670
42671 if (task) {
42672 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
42673 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42674 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
42675 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42676 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
42677 +#endif
42678 task_dumpable(task)) {
42679 rcu_read_lock();
42680 cred = __task_cred(task);
42681 inode->i_uid = cred->euid;
42682 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42683 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42684 +#else
42685 inode->i_gid = cred->egid;
42686 +#endif
42687 rcu_read_unlock();
42688 } else {
42689 inode->i_uid = 0;
42690 @@ -1643,7 +1741,8 @@ static int proc_fd_info(struct inode *in
42691 int fd = proc_fd(inode);
42692
42693 if (task) {
42694 - files = get_files_struct(task);
42695 + if (!gr_acl_handle_procpidmem(task))
42696 + files = get_files_struct(task);
42697 put_task_struct(task);
42698 }
42699 if (files) {
42700 @@ -1895,12 +1994,22 @@ static const struct file_operations proc
42701 static int proc_fd_permission(struct inode *inode, int mask)
42702 {
42703 int rv;
42704 + struct task_struct *task;
42705
42706 rv = generic_permission(inode, mask, NULL);
42707 - if (rv == 0)
42708 - return 0;
42709 +
42710 if (task_pid(current) == proc_pid(inode))
42711 rv = 0;
42712 +
42713 + task = get_proc_task(inode);
42714 + if (task == NULL)
42715 + return rv;
42716 +
42717 + if (gr_acl_handle_procpidmem(task))
42718 + rv = -EACCES;
42719 +
42720 + put_task_struct(task);
42721 +
42722 return rv;
42723 }
42724
42725 @@ -2009,6 +2118,9 @@ static struct dentry *proc_pident_lookup
42726 if (!task)
42727 goto out_no_task;
42728
42729 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42730 + goto out;
42731 +
42732 /*
42733 * Yes, it does not scale. And it should not. Don't add
42734 * new entries into /proc/<tgid>/ without very good reasons.
42735 @@ -2053,6 +2165,9 @@ static int proc_pident_readdir(struct fi
42736 if (!task)
42737 goto out_no_task;
42738
42739 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42740 + goto out;
42741 +
42742 ret = 0;
42743 i = filp->f_pos;
42744 switch (i) {
42745 @@ -2320,7 +2435,7 @@ static void *proc_self_follow_link(struc
42746 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
42747 void *cookie)
42748 {
42749 - char *s = nd_get_link(nd);
42750 + const char *s = nd_get_link(nd);
42751 if (!IS_ERR(s))
42752 __putname(s);
42753 }
42754 @@ -2519,7 +2634,7 @@ static const struct pid_entry tgid_base_
42755 #ifdef CONFIG_SCHED_DEBUG
42756 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
42757 #endif
42758 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42759 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42760 INF("syscall", S_IRUSR, proc_pid_syscall),
42761 #endif
42762 INF("cmdline", S_IRUGO, proc_pid_cmdline),
42763 @@ -2544,10 +2659,10 @@ static const struct pid_entry tgid_base_
42764 #ifdef CONFIG_SECURITY
42765 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42766 #endif
42767 -#ifdef CONFIG_KALLSYMS
42768 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42769 INF("wchan", S_IRUGO, proc_pid_wchan),
42770 #endif
42771 -#ifdef CONFIG_STACKTRACE
42772 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42773 ONE("stack", S_IRUSR, proc_pid_stack),
42774 #endif
42775 #ifdef CONFIG_SCHEDSTATS
42776 @@ -2577,6 +2692,9 @@ static const struct pid_entry tgid_base_
42777 #ifdef CONFIG_TASK_IO_ACCOUNTING
42778 INF("io", S_IRUGO, proc_tgid_io_accounting),
42779 #endif
42780 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42781 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
42782 +#endif
42783 };
42784
42785 static int proc_tgid_base_readdir(struct file * filp,
42786 @@ -2701,7 +2819,14 @@ static struct dentry *proc_pid_instantia
42787 if (!inode)
42788 goto out;
42789
42790 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42791 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
42792 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42793 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42794 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
42795 +#else
42796 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
42797 +#endif
42798 inode->i_op = &proc_tgid_base_inode_operations;
42799 inode->i_fop = &proc_tgid_base_operations;
42800 inode->i_flags|=S_IMMUTABLE;
42801 @@ -2743,7 +2868,11 @@ struct dentry *proc_pid_lookup(struct in
42802 if (!task)
42803 goto out;
42804
42805 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42806 + goto out_put_task;
42807 +
42808 result = proc_pid_instantiate(dir, dentry, task, NULL);
42809 +out_put_task:
42810 put_task_struct(task);
42811 out:
42812 return result;
42813 @@ -2808,6 +2937,11 @@ int proc_pid_readdir(struct file * filp,
42814 {
42815 unsigned int nr;
42816 struct task_struct *reaper;
42817 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42818 + const struct cred *tmpcred = current_cred();
42819 + const struct cred *itercred;
42820 +#endif
42821 + filldir_t __filldir = filldir;
42822 struct tgid_iter iter;
42823 struct pid_namespace *ns;
42824
42825 @@ -2831,8 +2965,27 @@ int proc_pid_readdir(struct file * filp,
42826 for (iter = next_tgid(ns, iter);
42827 iter.task;
42828 iter.tgid += 1, iter = next_tgid(ns, iter)) {
42829 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42830 + rcu_read_lock();
42831 + itercred = __task_cred(iter.task);
42832 +#endif
42833 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
42834 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42835 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
42836 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42837 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
42838 +#endif
42839 + )
42840 +#endif
42841 + )
42842 + __filldir = &gr_fake_filldir;
42843 + else
42844 + __filldir = filldir;
42845 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42846 + rcu_read_unlock();
42847 +#endif
42848 filp->f_pos = iter.tgid + TGID_OFFSET;
42849 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
42850 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
42851 put_task_struct(iter.task);
42852 goto out;
42853 }
42854 @@ -2858,7 +3011,7 @@ static const struct pid_entry tid_base_s
42855 #ifdef CONFIG_SCHED_DEBUG
42856 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
42857 #endif
42858 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42859 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42860 INF("syscall", S_IRUSR, proc_pid_syscall),
42861 #endif
42862 INF("cmdline", S_IRUGO, proc_pid_cmdline),
42863 @@ -2882,10 +3035,10 @@ static const struct pid_entry tid_base_s
42864 #ifdef CONFIG_SECURITY
42865 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42866 #endif
42867 -#ifdef CONFIG_KALLSYMS
42868 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42869 INF("wchan", S_IRUGO, proc_pid_wchan),
42870 #endif
42871 -#ifdef CONFIG_STACKTRACE
42872 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42873 ONE("stack", S_IRUSR, proc_pid_stack),
42874 #endif
42875 #ifdef CONFIG_SCHEDSTATS
42876 diff -urNp linux-2.6.32.42/fs/proc/cmdline.c linux-2.6.32.42/fs/proc/cmdline.c
42877 --- linux-2.6.32.42/fs/proc/cmdline.c 2011-03-27 14:31:47.000000000 -0400
42878 +++ linux-2.6.32.42/fs/proc/cmdline.c 2011-04-17 15:56:46.000000000 -0400
42879 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
42880
42881 static int __init proc_cmdline_init(void)
42882 {
42883 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
42884 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
42885 +#else
42886 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
42887 +#endif
42888 return 0;
42889 }
42890 module_init(proc_cmdline_init);
42891 diff -urNp linux-2.6.32.42/fs/proc/devices.c linux-2.6.32.42/fs/proc/devices.c
42892 --- linux-2.6.32.42/fs/proc/devices.c 2011-03-27 14:31:47.000000000 -0400
42893 +++ linux-2.6.32.42/fs/proc/devices.c 2011-04-17 15:56:46.000000000 -0400
42894 @@ -64,7 +64,11 @@ static const struct file_operations proc
42895
42896 static int __init proc_devices_init(void)
42897 {
42898 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
42899 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
42900 +#else
42901 proc_create("devices", 0, NULL, &proc_devinfo_operations);
42902 +#endif
42903 return 0;
42904 }
42905 module_init(proc_devices_init);
42906 diff -urNp linux-2.6.32.42/fs/proc/inode.c linux-2.6.32.42/fs/proc/inode.c
42907 --- linux-2.6.32.42/fs/proc/inode.c 2011-03-27 14:31:47.000000000 -0400
42908 +++ linux-2.6.32.42/fs/proc/inode.c 2011-04-17 15:56:46.000000000 -0400
42909 @@ -457,7 +457,11 @@ struct inode *proc_get_inode(struct supe
42910 if (de->mode) {
42911 inode->i_mode = de->mode;
42912 inode->i_uid = de->uid;
42913 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42914 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42915 +#else
42916 inode->i_gid = de->gid;
42917 +#endif
42918 }
42919 if (de->size)
42920 inode->i_size = de->size;
42921 diff -urNp linux-2.6.32.42/fs/proc/internal.h linux-2.6.32.42/fs/proc/internal.h
42922 --- linux-2.6.32.42/fs/proc/internal.h 2011-03-27 14:31:47.000000000 -0400
42923 +++ linux-2.6.32.42/fs/proc/internal.h 2011-04-17 15:56:46.000000000 -0400
42924 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
42925 struct pid *pid, struct task_struct *task);
42926 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
42927 struct pid *pid, struct task_struct *task);
42928 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42929 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
42930 +#endif
42931 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
42932
42933 extern const struct file_operations proc_maps_operations;
42934 diff -urNp linux-2.6.32.42/fs/proc/Kconfig linux-2.6.32.42/fs/proc/Kconfig
42935 --- linux-2.6.32.42/fs/proc/Kconfig 2011-03-27 14:31:47.000000000 -0400
42936 +++ linux-2.6.32.42/fs/proc/Kconfig 2011-04-17 15:56:46.000000000 -0400
42937 @@ -30,12 +30,12 @@ config PROC_FS
42938
42939 config PROC_KCORE
42940 bool "/proc/kcore support" if !ARM
42941 - depends on PROC_FS && MMU
42942 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
42943
42944 config PROC_VMCORE
42945 bool "/proc/vmcore support (EXPERIMENTAL)"
42946 - depends on PROC_FS && CRASH_DUMP
42947 - default y
42948 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
42949 + default n
42950 help
42951 Exports the dump image of crashed kernel in ELF format.
42952
42953 @@ -59,8 +59,8 @@ config PROC_SYSCTL
42954 limited in memory.
42955
42956 config PROC_PAGE_MONITOR
42957 - default y
42958 - depends on PROC_FS && MMU
42959 + default n
42960 + depends on PROC_FS && MMU && !GRKERNSEC
42961 bool "Enable /proc page monitoring" if EMBEDDED
42962 help
42963 Various /proc files exist to monitor process memory utilization:
42964 diff -urNp linux-2.6.32.42/fs/proc/kcore.c linux-2.6.32.42/fs/proc/kcore.c
42965 --- linux-2.6.32.42/fs/proc/kcore.c 2011-03-27 14:31:47.000000000 -0400
42966 +++ linux-2.6.32.42/fs/proc/kcore.c 2011-05-16 21:46:57.000000000 -0400
42967 @@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bu
42968 off_t offset = 0;
42969 struct kcore_list *m;
42970
42971 + pax_track_stack();
42972 +
42973 /* setup ELF header */
42974 elf = (struct elfhdr *) bufp;
42975 bufp += sizeof(struct elfhdr);
42976 @@ -477,9 +479,10 @@ read_kcore(struct file *file, char __use
42977 * the addresses in the elf_phdr on our list.
42978 */
42979 start = kc_offset_to_vaddr(*fpos - elf_buflen);
42980 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
42981 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
42982 + if (tsz > buflen)
42983 tsz = buflen;
42984 -
42985 +
42986 while (buflen) {
42987 struct kcore_list *m;
42988
42989 @@ -508,20 +511,23 @@ read_kcore(struct file *file, char __use
42990 kfree(elf_buf);
42991 } else {
42992 if (kern_addr_valid(start)) {
42993 - unsigned long n;
42994 + char *elf_buf;
42995 + mm_segment_t oldfs;
42996
42997 - n = copy_to_user(buffer, (char *)start, tsz);
42998 - /*
42999 - * We cannot distingush between fault on source
43000 - * and fault on destination. When this happens
43001 - * we clear too and hope it will trigger the
43002 - * EFAULT again.
43003 - */
43004 - if (n) {
43005 - if (clear_user(buffer + tsz - n,
43006 - n))
43007 + elf_buf = kmalloc(tsz, GFP_KERNEL);
43008 + if (!elf_buf)
43009 + return -ENOMEM;
43010 + oldfs = get_fs();
43011 + set_fs(KERNEL_DS);
43012 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
43013 + set_fs(oldfs);
43014 + if (copy_to_user(buffer, elf_buf, tsz)) {
43015 + kfree(elf_buf);
43016 return -EFAULT;
43017 + }
43018 }
43019 + set_fs(oldfs);
43020 + kfree(elf_buf);
43021 } else {
43022 if (clear_user(buffer, tsz))
43023 return -EFAULT;
43024 @@ -541,6 +547,9 @@ read_kcore(struct file *file, char __use
43025
43026 static int open_kcore(struct inode *inode, struct file *filp)
43027 {
43028 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
43029 + return -EPERM;
43030 +#endif
43031 if (!capable(CAP_SYS_RAWIO))
43032 return -EPERM;
43033 if (kcore_need_update)
43034 diff -urNp linux-2.6.32.42/fs/proc/meminfo.c linux-2.6.32.42/fs/proc/meminfo.c
43035 --- linux-2.6.32.42/fs/proc/meminfo.c 2011-03-27 14:31:47.000000000 -0400
43036 +++ linux-2.6.32.42/fs/proc/meminfo.c 2011-05-16 21:46:57.000000000 -0400
43037 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
43038 unsigned long pages[NR_LRU_LISTS];
43039 int lru;
43040
43041 + pax_track_stack();
43042 +
43043 /*
43044 * display in kilobytes.
43045 */
43046 @@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_
43047 vmi.used >> 10,
43048 vmi.largest_chunk >> 10
43049 #ifdef CONFIG_MEMORY_FAILURE
43050 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
43051 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
43052 #endif
43053 );
43054
43055 diff -urNp linux-2.6.32.42/fs/proc/nommu.c linux-2.6.32.42/fs/proc/nommu.c
43056 --- linux-2.6.32.42/fs/proc/nommu.c 2011-03-27 14:31:47.000000000 -0400
43057 +++ linux-2.6.32.42/fs/proc/nommu.c 2011-04-17 15:56:46.000000000 -0400
43058 @@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_
43059 if (len < 1)
43060 len = 1;
43061 seq_printf(m, "%*c", len, ' ');
43062 - seq_path(m, &file->f_path, "");
43063 + seq_path(m, &file->f_path, "\n\\");
43064 }
43065
43066 seq_putc(m, '\n');
43067 diff -urNp linux-2.6.32.42/fs/proc/proc_net.c linux-2.6.32.42/fs/proc/proc_net.c
43068 --- linux-2.6.32.42/fs/proc/proc_net.c 2011-03-27 14:31:47.000000000 -0400
43069 +++ linux-2.6.32.42/fs/proc/proc_net.c 2011-04-17 15:56:46.000000000 -0400
43070 @@ -104,6 +104,17 @@ static struct net *get_proc_task_net(str
43071 struct task_struct *task;
43072 struct nsproxy *ns;
43073 struct net *net = NULL;
43074 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43075 + const struct cred *cred = current_cred();
43076 +#endif
43077 +
43078 +#ifdef CONFIG_GRKERNSEC_PROC_USER
43079 + if (cred->fsuid)
43080 + return net;
43081 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43082 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
43083 + return net;
43084 +#endif
43085
43086 rcu_read_lock();
43087 task = pid_task(proc_pid(dir), PIDTYPE_PID);
43088 diff -urNp linux-2.6.32.42/fs/proc/proc_sysctl.c linux-2.6.32.42/fs/proc/proc_sysctl.c
43089 --- linux-2.6.32.42/fs/proc/proc_sysctl.c 2011-03-27 14:31:47.000000000 -0400
43090 +++ linux-2.6.32.42/fs/proc/proc_sysctl.c 2011-04-17 15:56:46.000000000 -0400
43091 @@ -7,6 +7,8 @@
43092 #include <linux/security.h>
43093 #include "internal.h"
43094
43095 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
43096 +
43097 static const struct dentry_operations proc_sys_dentry_operations;
43098 static const struct file_operations proc_sys_file_operations;
43099 static const struct inode_operations proc_sys_inode_operations;
43100 @@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
43101 if (!p)
43102 goto out;
43103
43104 + if (gr_handle_sysctl(p, MAY_EXEC))
43105 + goto out;
43106 +
43107 err = ERR_PTR(-ENOMEM);
43108 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
43109 if (h)
43110 @@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
43111 if (*pos < file->f_pos)
43112 continue;
43113
43114 + if (gr_handle_sysctl(table, 0))
43115 + continue;
43116 +
43117 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
43118 if (res)
43119 return res;
43120 @@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
43121 if (IS_ERR(head))
43122 return PTR_ERR(head);
43123
43124 + if (table && gr_handle_sysctl(table, MAY_EXEC))
43125 + return -ENOENT;
43126 +
43127 generic_fillattr(inode, stat);
43128 if (table)
43129 stat->mode = (stat->mode & S_IFMT) | table->mode;
43130 diff -urNp linux-2.6.32.42/fs/proc/root.c linux-2.6.32.42/fs/proc/root.c
43131 --- linux-2.6.32.42/fs/proc/root.c 2011-03-27 14:31:47.000000000 -0400
43132 +++ linux-2.6.32.42/fs/proc/root.c 2011-04-17 15:56:46.000000000 -0400
43133 @@ -134,7 +134,15 @@ void __init proc_root_init(void)
43134 #ifdef CONFIG_PROC_DEVICETREE
43135 proc_device_tree_init();
43136 #endif
43137 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
43138 +#ifdef CONFIG_GRKERNSEC_PROC_USER
43139 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
43140 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43141 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
43142 +#endif
43143 +#else
43144 proc_mkdir("bus", NULL);
43145 +#endif
43146 proc_sys_init();
43147 }
43148
43149 diff -urNp linux-2.6.32.42/fs/proc/task_mmu.c linux-2.6.32.42/fs/proc/task_mmu.c
43150 --- linux-2.6.32.42/fs/proc/task_mmu.c 2011-03-27 14:31:47.000000000 -0400
43151 +++ linux-2.6.32.42/fs/proc/task_mmu.c 2011-04-23 13:38:09.000000000 -0400
43152 @@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
43153 "VmStk:\t%8lu kB\n"
43154 "VmExe:\t%8lu kB\n"
43155 "VmLib:\t%8lu kB\n"
43156 - "VmPTE:\t%8lu kB\n",
43157 - hiwater_vm << (PAGE_SHIFT-10),
43158 + "VmPTE:\t%8lu kB\n"
43159 +
43160 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
43161 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
43162 +#endif
43163 +
43164 + ,hiwater_vm << (PAGE_SHIFT-10),
43165 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
43166 mm->locked_vm << (PAGE_SHIFT-10),
43167 hiwater_rss << (PAGE_SHIFT-10),
43168 total_rss << (PAGE_SHIFT-10),
43169 data << (PAGE_SHIFT-10),
43170 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
43171 - (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
43172 + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
43173 +
43174 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
43175 + , mm->context.user_cs_base, mm->context.user_cs_limit
43176 +#endif
43177 +
43178 + );
43179 }
43180
43181 unsigned long task_vsize(struct mm_struct *mm)
43182 @@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, v
43183 struct proc_maps_private *priv = m->private;
43184 struct vm_area_struct *vma = v;
43185
43186 - vma_stop(priv, vma);
43187 + if (!IS_ERR(vma))
43188 + vma_stop(priv, vma);
43189 if (priv->task)
43190 put_task_struct(priv->task);
43191 }
43192 @@ -199,6 +211,12 @@ static int do_maps_open(struct inode *in
43193 return ret;
43194 }
43195
43196 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43197 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
43198 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
43199 + _mm->pax_flags & MF_PAX_SEGMEXEC))
43200 +#endif
43201 +
43202 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
43203 {
43204 struct mm_struct *mm = vma->vm_mm;
43205 @@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file
43206 int flags = vma->vm_flags;
43207 unsigned long ino = 0;
43208 unsigned long long pgoff = 0;
43209 - unsigned long start;
43210 dev_t dev = 0;
43211 int len;
43212
43213 @@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file
43214 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
43215 }
43216
43217 - /* We don't show the stack guard page in /proc/maps */
43218 - start = vma->vm_start;
43219 - if (vma->vm_flags & VM_GROWSDOWN)
43220 - if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
43221 - start += PAGE_SIZE;
43222 -
43223 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
43224 - start,
43225 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43226 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
43227 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
43228 +#else
43229 + vma->vm_start,
43230 vma->vm_end,
43231 +#endif
43232 flags & VM_READ ? 'r' : '-',
43233 flags & VM_WRITE ? 'w' : '-',
43234 flags & VM_EXEC ? 'x' : '-',
43235 flags & VM_MAYSHARE ? 's' : 'p',
43236 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43237 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
43238 +#else
43239 pgoff,
43240 +#endif
43241 MAJOR(dev), MINOR(dev), ino, &len);
43242
43243 /*
43244 @@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file
43245 */
43246 if (file) {
43247 pad_len_spaces(m, len);
43248 - seq_path(m, &file->f_path, "\n");
43249 + seq_path(m, &file->f_path, "\n\\");
43250 } else {
43251 const char *name = arch_vma_name(vma);
43252 if (!name) {
43253 @@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file
43254 if (vma->vm_start <= mm->brk &&
43255 vma->vm_end >= mm->start_brk) {
43256 name = "[heap]";
43257 - } else if (vma->vm_start <= mm->start_stack &&
43258 - vma->vm_end >= mm->start_stack) {
43259 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
43260 + (vma->vm_start <= mm->start_stack &&
43261 + vma->vm_end >= mm->start_stack)) {
43262 name = "[stack]";
43263 }
43264 } else {
43265 @@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m,
43266 };
43267
43268 memset(&mss, 0, sizeof mss);
43269 - mss.vma = vma;
43270 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
43271 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
43272 +
43273 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43274 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
43275 +#endif
43276 + mss.vma = vma;
43277 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
43278 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
43279 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43280 + }
43281 +#endif
43282
43283 show_map_vma(m, vma);
43284
43285 @@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m,
43286 "Swap: %8lu kB\n"
43287 "KernelPageSize: %8lu kB\n"
43288 "MMUPageSize: %8lu kB\n",
43289 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43290 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
43291 +#else
43292 (vma->vm_end - vma->vm_start) >> 10,
43293 +#endif
43294 mss.resident >> 10,
43295 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
43296 mss.shared_clean >> 10,
43297 diff -urNp linux-2.6.32.42/fs/proc/task_nommu.c linux-2.6.32.42/fs/proc/task_nommu.c
43298 --- linux-2.6.32.42/fs/proc/task_nommu.c 2011-03-27 14:31:47.000000000 -0400
43299 +++ linux-2.6.32.42/fs/proc/task_nommu.c 2011-04-17 15:56:46.000000000 -0400
43300 @@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct
43301 else
43302 bytes += kobjsize(mm);
43303
43304 - if (current->fs && current->fs->users > 1)
43305 + if (current->fs && atomic_read(&current->fs->users) > 1)
43306 sbytes += kobjsize(current->fs);
43307 else
43308 bytes += kobjsize(current->fs);
43309 @@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_fil
43310 if (len < 1)
43311 len = 1;
43312 seq_printf(m, "%*c", len, ' ');
43313 - seq_path(m, &file->f_path, "");
43314 + seq_path(m, &file->f_path, "\n\\");
43315 }
43316
43317 seq_putc(m, '\n');
43318 diff -urNp linux-2.6.32.42/fs/readdir.c linux-2.6.32.42/fs/readdir.c
43319 --- linux-2.6.32.42/fs/readdir.c 2011-03-27 14:31:47.000000000 -0400
43320 +++ linux-2.6.32.42/fs/readdir.c 2011-04-17 15:56:46.000000000 -0400
43321 @@ -16,6 +16,7 @@
43322 #include <linux/security.h>
43323 #include <linux/syscalls.h>
43324 #include <linux/unistd.h>
43325 +#include <linux/namei.h>
43326
43327 #include <asm/uaccess.h>
43328
43329 @@ -67,6 +68,7 @@ struct old_linux_dirent {
43330
43331 struct readdir_callback {
43332 struct old_linux_dirent __user * dirent;
43333 + struct file * file;
43334 int result;
43335 };
43336
43337 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
43338 buf->result = -EOVERFLOW;
43339 return -EOVERFLOW;
43340 }
43341 +
43342 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43343 + return 0;
43344 +
43345 buf->result++;
43346 dirent = buf->dirent;
43347 if (!access_ok(VERIFY_WRITE, dirent,
43348 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
43349
43350 buf.result = 0;
43351 buf.dirent = dirent;
43352 + buf.file = file;
43353
43354 error = vfs_readdir(file, fillonedir, &buf);
43355 if (buf.result)
43356 @@ -142,6 +149,7 @@ struct linux_dirent {
43357 struct getdents_callback {
43358 struct linux_dirent __user * current_dir;
43359 struct linux_dirent __user * previous;
43360 + struct file * file;
43361 int count;
43362 int error;
43363 };
43364 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
43365 buf->error = -EOVERFLOW;
43366 return -EOVERFLOW;
43367 }
43368 +
43369 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43370 + return 0;
43371 +
43372 dirent = buf->previous;
43373 if (dirent) {
43374 if (__put_user(offset, &dirent->d_off))
43375 @@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
43376 buf.previous = NULL;
43377 buf.count = count;
43378 buf.error = 0;
43379 + buf.file = file;
43380
43381 error = vfs_readdir(file, filldir, &buf);
43382 if (error >= 0)
43383 @@ -228,6 +241,7 @@ out:
43384 struct getdents_callback64 {
43385 struct linux_dirent64 __user * current_dir;
43386 struct linux_dirent64 __user * previous;
43387 + struct file *file;
43388 int count;
43389 int error;
43390 };
43391 @@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
43392 buf->error = -EINVAL; /* only used if we fail.. */
43393 if (reclen > buf->count)
43394 return -EINVAL;
43395 +
43396 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43397 + return 0;
43398 +
43399 dirent = buf->previous;
43400 if (dirent) {
43401 if (__put_user(offset, &dirent->d_off))
43402 @@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
43403
43404 buf.current_dir = dirent;
43405 buf.previous = NULL;
43406 + buf.file = file;
43407 buf.count = count;
43408 buf.error = 0;
43409
43410 diff -urNp linux-2.6.32.42/fs/reiserfs/dir.c linux-2.6.32.42/fs/reiserfs/dir.c
43411 --- linux-2.6.32.42/fs/reiserfs/dir.c 2011-03-27 14:31:47.000000000 -0400
43412 +++ linux-2.6.32.42/fs/reiserfs/dir.c 2011-05-16 21:46:57.000000000 -0400
43413 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
43414 struct reiserfs_dir_entry de;
43415 int ret = 0;
43416
43417 + pax_track_stack();
43418 +
43419 reiserfs_write_lock(inode->i_sb);
43420
43421 reiserfs_check_lock_depth(inode->i_sb, "readdir");
43422 diff -urNp linux-2.6.32.42/fs/reiserfs/do_balan.c linux-2.6.32.42/fs/reiserfs/do_balan.c
43423 --- linux-2.6.32.42/fs/reiserfs/do_balan.c 2011-03-27 14:31:47.000000000 -0400
43424 +++ linux-2.6.32.42/fs/reiserfs/do_balan.c 2011-04-17 15:56:46.000000000 -0400
43425 @@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb,
43426 return;
43427 }
43428
43429 - atomic_inc(&(fs_generation(tb->tb_sb)));
43430 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
43431 do_balance_starts(tb);
43432
43433 /* balance leaf returns 0 except if combining L R and S into
43434 diff -urNp linux-2.6.32.42/fs/reiserfs/item_ops.c linux-2.6.32.42/fs/reiserfs/item_ops.c
43435 --- linux-2.6.32.42/fs/reiserfs/item_ops.c 2011-03-27 14:31:47.000000000 -0400
43436 +++ linux-2.6.32.42/fs/reiserfs/item_ops.c 2011-04-17 15:56:46.000000000 -0400
43437 @@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
43438 vi->vi_index, vi->vi_type, vi->vi_ih);
43439 }
43440
43441 -static struct item_operations stat_data_ops = {
43442 +static const struct item_operations stat_data_ops = {
43443 .bytes_number = sd_bytes_number,
43444 .decrement_key = sd_decrement_key,
43445 .is_left_mergeable = sd_is_left_mergeable,
43446 @@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
43447 vi->vi_index, vi->vi_type, vi->vi_ih);
43448 }
43449
43450 -static struct item_operations direct_ops = {
43451 +static const struct item_operations direct_ops = {
43452 .bytes_number = direct_bytes_number,
43453 .decrement_key = direct_decrement_key,
43454 .is_left_mergeable = direct_is_left_mergeable,
43455 @@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
43456 vi->vi_index, vi->vi_type, vi->vi_ih);
43457 }
43458
43459 -static struct item_operations indirect_ops = {
43460 +static const struct item_operations indirect_ops = {
43461 .bytes_number = indirect_bytes_number,
43462 .decrement_key = indirect_decrement_key,
43463 .is_left_mergeable = indirect_is_left_mergeable,
43464 @@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
43465 printk("\n");
43466 }
43467
43468 -static struct item_operations direntry_ops = {
43469 +static const struct item_operations direntry_ops = {
43470 .bytes_number = direntry_bytes_number,
43471 .decrement_key = direntry_decrement_key,
43472 .is_left_mergeable = direntry_is_left_mergeable,
43473 @@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
43474 "Invalid item type observed, run fsck ASAP");
43475 }
43476
43477 -static struct item_operations errcatch_ops = {
43478 +static const struct item_operations errcatch_ops = {
43479 errcatch_bytes_number,
43480 errcatch_decrement_key,
43481 errcatch_is_left_mergeable,
43482 @@ -746,7 +746,7 @@ static struct item_operations errcatch_o
43483 #error Item types must use disk-format assigned values.
43484 #endif
43485
43486 -struct item_operations *item_ops[TYPE_ANY + 1] = {
43487 +const struct item_operations * const item_ops[TYPE_ANY + 1] = {
43488 &stat_data_ops,
43489 &indirect_ops,
43490 &direct_ops,
43491 diff -urNp linux-2.6.32.42/fs/reiserfs/journal.c linux-2.6.32.42/fs/reiserfs/journal.c
43492 --- linux-2.6.32.42/fs/reiserfs/journal.c 2011-03-27 14:31:47.000000000 -0400
43493 +++ linux-2.6.32.42/fs/reiserfs/journal.c 2011-05-16 21:46:57.000000000 -0400
43494 @@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_brea
43495 struct buffer_head *bh;
43496 int i, j;
43497
43498 + pax_track_stack();
43499 +
43500 bh = __getblk(dev, block, bufsize);
43501 if (buffer_uptodate(bh))
43502 return (bh);
43503 diff -urNp linux-2.6.32.42/fs/reiserfs/namei.c linux-2.6.32.42/fs/reiserfs/namei.c
43504 --- linux-2.6.32.42/fs/reiserfs/namei.c 2011-03-27 14:31:47.000000000 -0400
43505 +++ linux-2.6.32.42/fs/reiserfs/namei.c 2011-05-16 21:46:57.000000000 -0400
43506 @@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode
43507 unsigned long savelink = 1;
43508 struct timespec ctime;
43509
43510 + pax_track_stack();
43511 +
43512 /* three balancings: (1) old name removal, (2) new name insertion
43513 and (3) maybe "save" link insertion
43514 stat data updates: (1) old directory,
43515 diff -urNp linux-2.6.32.42/fs/reiserfs/procfs.c linux-2.6.32.42/fs/reiserfs/procfs.c
43516 --- linux-2.6.32.42/fs/reiserfs/procfs.c 2011-03-27 14:31:47.000000000 -0400
43517 +++ linux-2.6.32.42/fs/reiserfs/procfs.c 2011-05-16 21:46:57.000000000 -0400
43518 @@ -123,7 +123,7 @@ static int show_super(struct seq_file *m
43519 "SMALL_TAILS " : "NO_TAILS ",
43520 replay_only(sb) ? "REPLAY_ONLY " : "",
43521 convert_reiserfs(sb) ? "CONV " : "",
43522 - atomic_read(&r->s_generation_counter),
43523 + atomic_read_unchecked(&r->s_generation_counter),
43524 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
43525 SF(s_do_balance), SF(s_unneeded_left_neighbor),
43526 SF(s_good_search_by_key_reada), SF(s_bmaps),
43527 @@ -309,6 +309,8 @@ static int show_journal(struct seq_file
43528 struct journal_params *jp = &rs->s_v1.s_journal;
43529 char b[BDEVNAME_SIZE];
43530
43531 + pax_track_stack();
43532 +
43533 seq_printf(m, /* on-disk fields */
43534 "jp_journal_1st_block: \t%i\n"
43535 "jp_journal_dev: \t%s[%x]\n"
43536 diff -urNp linux-2.6.32.42/fs/reiserfs/stree.c linux-2.6.32.42/fs/reiserfs/stree.c
43537 --- linux-2.6.32.42/fs/reiserfs/stree.c 2011-03-27 14:31:47.000000000 -0400
43538 +++ linux-2.6.32.42/fs/reiserfs/stree.c 2011-05-16 21:46:57.000000000 -0400
43539 @@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs
43540 int iter = 0;
43541 #endif
43542
43543 + pax_track_stack();
43544 +
43545 BUG_ON(!th->t_trans_id);
43546
43547 init_tb_struct(th, &s_del_balance, sb, path,
43548 @@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct r
43549 int retval;
43550 int quota_cut_bytes = 0;
43551
43552 + pax_track_stack();
43553 +
43554 BUG_ON(!th->t_trans_id);
43555
43556 le_key2cpu_key(&cpu_key, key);
43557 @@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiser
43558 int quota_cut_bytes;
43559 loff_t tail_pos = 0;
43560
43561 + pax_track_stack();
43562 +
43563 BUG_ON(!th->t_trans_id);
43564
43565 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
43566 @@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reis
43567 int retval;
43568 int fs_gen;
43569
43570 + pax_track_stack();
43571 +
43572 BUG_ON(!th->t_trans_id);
43573
43574 fs_gen = get_generation(inode->i_sb);
43575 @@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs
43576 int fs_gen = 0;
43577 int quota_bytes = 0;
43578
43579 + pax_track_stack();
43580 +
43581 BUG_ON(!th->t_trans_id);
43582
43583 if (inode) { /* Do we count quotas for item? */
43584 diff -urNp linux-2.6.32.42/fs/reiserfs/super.c linux-2.6.32.42/fs/reiserfs/super.c
43585 --- linux-2.6.32.42/fs/reiserfs/super.c 2011-03-27 14:31:47.000000000 -0400
43586 +++ linux-2.6.32.42/fs/reiserfs/super.c 2011-05-16 21:46:57.000000000 -0400
43587 @@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct
43588 {.option_name = NULL}
43589 };
43590
43591 + pax_track_stack();
43592 +
43593 *blocks = 0;
43594 if (!options || !*options)
43595 /* use default configuration: create tails, journaling on, no
43596 diff -urNp linux-2.6.32.42/fs/select.c linux-2.6.32.42/fs/select.c
43597 --- linux-2.6.32.42/fs/select.c 2011-03-27 14:31:47.000000000 -0400
43598 +++ linux-2.6.32.42/fs/select.c 2011-05-16 21:46:57.000000000 -0400
43599 @@ -20,6 +20,7 @@
43600 #include <linux/module.h>
43601 #include <linux/slab.h>
43602 #include <linux/poll.h>
43603 +#include <linux/security.h>
43604 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
43605 #include <linux/file.h>
43606 #include <linux/fdtable.h>
43607 @@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, s
43608 int retval, i, timed_out = 0;
43609 unsigned long slack = 0;
43610
43611 + pax_track_stack();
43612 +
43613 rcu_read_lock();
43614 retval = max_select_fd(n, fds);
43615 rcu_read_unlock();
43616 @@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user
43617 /* Allocate small arguments on the stack to save memory and be faster */
43618 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
43619
43620 + pax_track_stack();
43621 +
43622 ret = -EINVAL;
43623 if (n < 0)
43624 goto out_nofds;
43625 @@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *uf
43626 struct poll_list *walk = head;
43627 unsigned long todo = nfds;
43628
43629 + pax_track_stack();
43630 +
43631 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
43632 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
43633 return -EINVAL;
43634
43635 diff -urNp linux-2.6.32.42/fs/seq_file.c linux-2.6.32.42/fs/seq_file.c
43636 --- linux-2.6.32.42/fs/seq_file.c 2011-03-27 14:31:47.000000000 -0400
43637 +++ linux-2.6.32.42/fs/seq_file.c 2011-04-17 15:56:46.000000000 -0400
43638 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
43639 return 0;
43640 }
43641 if (!m->buf) {
43642 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
43643 + m->size = PAGE_SIZE;
43644 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
43645 if (!m->buf)
43646 return -ENOMEM;
43647 }
43648 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
43649 Eoverflow:
43650 m->op->stop(m, p);
43651 kfree(m->buf);
43652 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
43653 + m->size <<= 1;
43654 + m->buf = kmalloc(m->size, GFP_KERNEL);
43655 return !m->buf ? -ENOMEM : -EAGAIN;
43656 }
43657
43658 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
43659 m->version = file->f_version;
43660 /* grab buffer if we didn't have one */
43661 if (!m->buf) {
43662 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
43663 + m->size = PAGE_SIZE;
43664 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
43665 if (!m->buf)
43666 goto Enomem;
43667 }
43668 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
43669 goto Fill;
43670 m->op->stop(m, p);
43671 kfree(m->buf);
43672 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
43673 + m->size <<= 1;
43674 + m->buf = kmalloc(m->size, GFP_KERNEL);
43675 if (!m->buf)
43676 goto Enomem;
43677 m->count = 0;
43678 diff -urNp linux-2.6.32.42/fs/smbfs/symlink.c linux-2.6.32.42/fs/smbfs/symlink.c
43679 --- linux-2.6.32.42/fs/smbfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
43680 +++ linux-2.6.32.42/fs/smbfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
43681 @@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
43682
43683 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
43684 {
43685 - char *s = nd_get_link(nd);
43686 + const char *s = nd_get_link(nd);
43687 if (!IS_ERR(s))
43688 __putname(s);
43689 }
43690 diff -urNp linux-2.6.32.42/fs/splice.c linux-2.6.32.42/fs/splice.c
43691 --- linux-2.6.32.42/fs/splice.c 2011-03-27 14:31:47.000000000 -0400
43692 +++ linux-2.6.32.42/fs/splice.c 2011-05-16 21:46:57.000000000 -0400
43693 @@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode
43694 pipe_lock(pipe);
43695
43696 for (;;) {
43697 - if (!pipe->readers) {
43698 + if (!atomic_read(&pipe->readers)) {
43699 send_sig(SIGPIPE, current, 0);
43700 if (!ret)
43701 ret = -EPIPE;
43702 @@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode
43703 do_wakeup = 0;
43704 }
43705
43706 - pipe->waiting_writers++;
43707 + atomic_inc(&pipe->waiting_writers);
43708 pipe_wait(pipe);
43709 - pipe->waiting_writers--;
43710 + atomic_dec(&pipe->waiting_writers);
43711 }
43712
43713 pipe_unlock(pipe);
43714 @@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *
43715 .spd_release = spd_release_page,
43716 };
43717
43718 + pax_track_stack();
43719 +
43720 index = *ppos >> PAGE_CACHE_SHIFT;
43721 loff = *ppos & ~PAGE_CACHE_MASK;
43722 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
43723 @@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file
43724 old_fs = get_fs();
43725 set_fs(get_ds());
43726 /* The cast to a user pointer is valid due to the set_fs() */
43727 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
43728 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
43729 set_fs(old_fs);
43730
43731 return res;
43732 @@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file
43733 old_fs = get_fs();
43734 set_fs(get_ds());
43735 /* The cast to a user pointer is valid due to the set_fs() */
43736 - res = vfs_write(file, (const char __user *)buf, count, &pos);
43737 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
43738 set_fs(old_fs);
43739
43740 return res;
43741 @@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct
43742 .spd_release = spd_release_page,
43743 };
43744
43745 + pax_track_stack();
43746 +
43747 index = *ppos >> PAGE_CACHE_SHIFT;
43748 offset = *ppos & ~PAGE_CACHE_MASK;
43749 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
43750 @@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct
43751 goto err;
43752
43753 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
43754 - vec[i].iov_base = (void __user *) page_address(page);
43755 + vec[i].iov_base = (__force void __user *) page_address(page);
43756 vec[i].iov_len = this_len;
43757 pages[i] = page;
43758 spd.nr_pages++;
43759 @@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
43760 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
43761 {
43762 while (!pipe->nrbufs) {
43763 - if (!pipe->writers)
43764 + if (!atomic_read(&pipe->writers))
43765 return 0;
43766
43767 - if (!pipe->waiting_writers && sd->num_spliced)
43768 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
43769 return 0;
43770
43771 if (sd->flags & SPLICE_F_NONBLOCK)
43772 @@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct fi
43773 * out of the pipe right after the splice_to_pipe(). So set
43774 * PIPE_READERS appropriately.
43775 */
43776 - pipe->readers = 1;
43777 + atomic_set(&pipe->readers, 1);
43778
43779 current->splice_pipe = pipe;
43780 }
43781 @@ -1592,6 +1596,8 @@ static long vmsplice_to_pipe(struct file
43782 .spd_release = spd_release_page,
43783 };
43784
43785 + pax_track_stack();
43786 +
43787 pipe = pipe_info(file->f_path.dentry->d_inode);
43788 if (!pipe)
43789 return -EBADF;
43790 @@ -1700,9 +1706,9 @@ static int ipipe_prep(struct pipe_inode_
43791 ret = -ERESTARTSYS;
43792 break;
43793 }
43794 - if (!pipe->writers)
43795 + if (!atomic_read(&pipe->writers))
43796 break;
43797 - if (!pipe->waiting_writers) {
43798 + if (!atomic_read(&pipe->waiting_writers)) {
43799 if (flags & SPLICE_F_NONBLOCK) {
43800 ret = -EAGAIN;
43801 break;
43802 @@ -1734,7 +1740,7 @@ static int opipe_prep(struct pipe_inode_
43803 pipe_lock(pipe);
43804
43805 while (pipe->nrbufs >= PIPE_BUFFERS) {
43806 - if (!pipe->readers) {
43807 + if (!atomic_read(&pipe->readers)) {
43808 send_sig(SIGPIPE, current, 0);
43809 ret = -EPIPE;
43810 break;
43811 @@ -1747,9 +1753,9 @@ static int opipe_prep(struct pipe_inode_
43812 ret = -ERESTARTSYS;
43813 break;
43814 }
43815 - pipe->waiting_writers++;
43816 + atomic_inc(&pipe->waiting_writers);
43817 pipe_wait(pipe);
43818 - pipe->waiting_writers--;
43819 + atomic_dec(&pipe->waiting_writers);
43820 }
43821
43822 pipe_unlock(pipe);
43823 @@ -1785,14 +1791,14 @@ retry:
43824 pipe_double_lock(ipipe, opipe);
43825
43826 do {
43827 - if (!opipe->readers) {
43828 + if (!atomic_read(&opipe->readers)) {
43829 send_sig(SIGPIPE, current, 0);
43830 if (!ret)
43831 ret = -EPIPE;
43832 break;
43833 }
43834
43835 - if (!ipipe->nrbufs && !ipipe->writers)
43836 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
43837 break;
43838
43839 /*
43840 @@ -1892,7 +1898,7 @@ static int link_pipe(struct pipe_inode_i
43841 pipe_double_lock(ipipe, opipe);
43842
43843 do {
43844 - if (!opipe->readers) {
43845 + if (!atomic_read(&opipe->readers)) {
43846 send_sig(SIGPIPE, current, 0);
43847 if (!ret)
43848 ret = -EPIPE;
43849 @@ -1937,7 +1943,7 @@ static int link_pipe(struct pipe_inode_i
43850 * return EAGAIN if we have the potential of some data in the
43851 * future, otherwise just return 0
43852 */
43853 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
43854 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
43855 ret = -EAGAIN;
43856
43857 pipe_unlock(ipipe);
43858 diff -urNp linux-2.6.32.42/fs/sysfs/file.c linux-2.6.32.42/fs/sysfs/file.c
43859 --- linux-2.6.32.42/fs/sysfs/file.c 2011-03-27 14:31:47.000000000 -0400
43860 +++ linux-2.6.32.42/fs/sysfs/file.c 2011-05-04 17:56:20.000000000 -0400
43861 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
43862
43863 struct sysfs_open_dirent {
43864 atomic_t refcnt;
43865 - atomic_t event;
43866 + atomic_unchecked_t event;
43867 wait_queue_head_t poll;
43868 struct list_head buffers; /* goes through sysfs_buffer.list */
43869 };
43870 @@ -53,7 +53,7 @@ struct sysfs_buffer {
43871 size_t count;
43872 loff_t pos;
43873 char * page;
43874 - struct sysfs_ops * ops;
43875 + const struct sysfs_ops * ops;
43876 struct mutex mutex;
43877 int needs_read_fill;
43878 int event;
43879 @@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentr
43880 {
43881 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
43882 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
43883 - struct sysfs_ops * ops = buffer->ops;
43884 + const struct sysfs_ops * ops = buffer->ops;
43885 int ret = 0;
43886 ssize_t count;
43887
43888 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
43889 if (!sysfs_get_active_two(attr_sd))
43890 return -ENODEV;
43891
43892 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
43893 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
43894 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
43895
43896 sysfs_put_active_two(attr_sd);
43897 @@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentr
43898 {
43899 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
43900 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
43901 - struct sysfs_ops * ops = buffer->ops;
43902 + const struct sysfs_ops * ops = buffer->ops;
43903 int rc;
43904
43905 /* need attr_sd for attr and ops, its parent for kobj */
43906 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
43907 return -ENOMEM;
43908
43909 atomic_set(&new_od->refcnt, 0);
43910 - atomic_set(&new_od->event, 1);
43911 + atomic_set_unchecked(&new_od->event, 1);
43912 init_waitqueue_head(&new_od->poll);
43913 INIT_LIST_HEAD(&new_od->buffers);
43914 goto retry;
43915 @@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode
43916 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
43917 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
43918 struct sysfs_buffer *buffer;
43919 - struct sysfs_ops *ops;
43920 + const struct sysfs_ops *ops;
43921 int error = -EACCES;
43922 char *p;
43923
43924 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
43925
43926 sysfs_put_active_two(attr_sd);
43927
43928 - if (buffer->event != atomic_read(&od->event))
43929 + if (buffer->event != atomic_read_unchecked(&od->event))
43930 goto trigger;
43931
43932 return DEFAULT_POLLMASK;
43933 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
43934
43935 od = sd->s_attr.open;
43936 if (od) {
43937 - atomic_inc(&od->event);
43938 + atomic_inc_unchecked(&od->event);
43939 wake_up_interruptible(&od->poll);
43940 }
43941
43942 diff -urNp linux-2.6.32.42/fs/sysfs/mount.c linux-2.6.32.42/fs/sysfs/mount.c
43943 --- linux-2.6.32.42/fs/sysfs/mount.c 2011-03-27 14:31:47.000000000 -0400
43944 +++ linux-2.6.32.42/fs/sysfs/mount.c 2011-04-17 15:56:46.000000000 -0400
43945 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
43946 .s_name = "",
43947 .s_count = ATOMIC_INIT(1),
43948 .s_flags = SYSFS_DIR,
43949 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
43950 + .s_mode = S_IFDIR | S_IRWXU,
43951 +#else
43952 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
43953 +#endif
43954 .s_ino = 1,
43955 };
43956
43957 diff -urNp linux-2.6.32.42/fs/sysfs/symlink.c linux-2.6.32.42/fs/sysfs/symlink.c
43958 --- linux-2.6.32.42/fs/sysfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
43959 +++ linux-2.6.32.42/fs/sysfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
43960 @@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct de
43961
43962 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
43963 {
43964 - char *page = nd_get_link(nd);
43965 + const char *page = nd_get_link(nd);
43966 if (!IS_ERR(page))
43967 free_page((unsigned long)page);
43968 }
43969 diff -urNp linux-2.6.32.42/fs/udf/balloc.c linux-2.6.32.42/fs/udf/balloc.c
43970 --- linux-2.6.32.42/fs/udf/balloc.c 2011-03-27 14:31:47.000000000 -0400
43971 +++ linux-2.6.32.42/fs/udf/balloc.c 2011-04-17 15:56:46.000000000 -0400
43972 @@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struc
43973
43974 mutex_lock(&sbi->s_alloc_mutex);
43975 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
43976 - if (bloc->logicalBlockNum < 0 ||
43977 - (bloc->logicalBlockNum + count) >
43978 - partmap->s_partition_len) {
43979 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
43980 udf_debug("%d < %d || %d + %d > %d\n",
43981 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
43982 count, partmap->s_partition_len);
43983 @@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct
43984
43985 mutex_lock(&sbi->s_alloc_mutex);
43986 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
43987 - if (bloc->logicalBlockNum < 0 ||
43988 - (bloc->logicalBlockNum + count) >
43989 - partmap->s_partition_len) {
43990 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
43991 udf_debug("%d < %d || %d + %d > %d\n",
43992 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
43993 partmap->s_partition_len);
43994 diff -urNp linux-2.6.32.42/fs/udf/inode.c linux-2.6.32.42/fs/udf/inode.c
43995 --- linux-2.6.32.42/fs/udf/inode.c 2011-03-27 14:31:47.000000000 -0400
43996 +++ linux-2.6.32.42/fs/udf/inode.c 2011-05-16 21:46:57.000000000 -0400
43997 @@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(
43998 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
43999 int lastblock = 0;
44000
44001 + pax_track_stack();
44002 +
44003 prev_epos.offset = udf_file_entry_alloc_offset(inode);
44004 prev_epos.block = iinfo->i_location;
44005 prev_epos.bh = NULL;
44006 diff -urNp linux-2.6.32.42/fs/udf/misc.c linux-2.6.32.42/fs/udf/misc.c
44007 --- linux-2.6.32.42/fs/udf/misc.c 2011-03-27 14:31:47.000000000 -0400
44008 +++ linux-2.6.32.42/fs/udf/misc.c 2011-04-23 12:56:11.000000000 -0400
44009 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
44010
44011 u8 udf_tag_checksum(const struct tag *t)
44012 {
44013 - u8 *data = (u8 *)t;
44014 + const u8 *data = (const u8 *)t;
44015 u8 checksum = 0;
44016 int i;
44017 for (i = 0; i < sizeof(struct tag); ++i)
44018 diff -urNp linux-2.6.32.42/fs/utimes.c linux-2.6.32.42/fs/utimes.c
44019 --- linux-2.6.32.42/fs/utimes.c 2011-03-27 14:31:47.000000000 -0400
44020 +++ linux-2.6.32.42/fs/utimes.c 2011-04-17 15:56:46.000000000 -0400
44021 @@ -1,6 +1,7 @@
44022 #include <linux/compiler.h>
44023 #include <linux/file.h>
44024 #include <linux/fs.h>
44025 +#include <linux/security.h>
44026 #include <linux/linkage.h>
44027 #include <linux/mount.h>
44028 #include <linux/namei.h>
44029 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
44030 goto mnt_drop_write_and_out;
44031 }
44032 }
44033 +
44034 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
44035 + error = -EACCES;
44036 + goto mnt_drop_write_and_out;
44037 + }
44038 +
44039 mutex_lock(&inode->i_mutex);
44040 error = notify_change(path->dentry, &newattrs);
44041 mutex_unlock(&inode->i_mutex);
44042 diff -urNp linux-2.6.32.42/fs/xattr_acl.c linux-2.6.32.42/fs/xattr_acl.c
44043 --- linux-2.6.32.42/fs/xattr_acl.c 2011-03-27 14:31:47.000000000 -0400
44044 +++ linux-2.6.32.42/fs/xattr_acl.c 2011-04-17 15:56:46.000000000 -0400
44045 @@ -17,8 +17,8 @@
44046 struct posix_acl *
44047 posix_acl_from_xattr(const void *value, size_t size)
44048 {
44049 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
44050 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
44051 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
44052 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
44053 int count;
44054 struct posix_acl *acl;
44055 struct posix_acl_entry *acl_e;
44056 diff -urNp linux-2.6.32.42/fs/xattr.c linux-2.6.32.42/fs/xattr.c
44057 --- linux-2.6.32.42/fs/xattr.c 2011-03-27 14:31:47.000000000 -0400
44058 +++ linux-2.6.32.42/fs/xattr.c 2011-04-17 15:56:46.000000000 -0400
44059 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
44060 * Extended attribute SET operations
44061 */
44062 static long
44063 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
44064 +setxattr(struct path *path, const char __user *name, const void __user *value,
44065 size_t size, int flags)
44066 {
44067 int error;
44068 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
44069 return PTR_ERR(kvalue);
44070 }
44071
44072 - error = vfs_setxattr(d, kname, kvalue, size, flags);
44073 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
44074 + error = -EACCES;
44075 + goto out;
44076 + }
44077 +
44078 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
44079 +out:
44080 kfree(kvalue);
44081 return error;
44082 }
44083 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
44084 return error;
44085 error = mnt_want_write(path.mnt);
44086 if (!error) {
44087 - error = setxattr(path.dentry, name, value, size, flags);
44088 + error = setxattr(&path, name, value, size, flags);
44089 mnt_drop_write(path.mnt);
44090 }
44091 path_put(&path);
44092 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
44093 return error;
44094 error = mnt_want_write(path.mnt);
44095 if (!error) {
44096 - error = setxattr(path.dentry, name, value, size, flags);
44097 + error = setxattr(&path, name, value, size, flags);
44098 mnt_drop_write(path.mnt);
44099 }
44100 path_put(&path);
44101 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
44102 const void __user *,value, size_t, size, int, flags)
44103 {
44104 struct file *f;
44105 - struct dentry *dentry;
44106 int error = -EBADF;
44107
44108 f = fget(fd);
44109 if (!f)
44110 return error;
44111 - dentry = f->f_path.dentry;
44112 - audit_inode(NULL, dentry);
44113 + audit_inode(NULL, f->f_path.dentry);
44114 error = mnt_want_write_file(f);
44115 if (!error) {
44116 - error = setxattr(dentry, name, value, size, flags);
44117 + error = setxattr(&f->f_path, name, value, size, flags);
44118 mnt_drop_write(f->f_path.mnt);
44119 }
44120 fput(f);
44121 diff -urNp linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl32.c
44122 --- linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-03-27 14:31:47.000000000 -0400
44123 +++ linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-04-17 15:56:46.000000000 -0400
44124 @@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
44125 xfs_fsop_geom_t fsgeo;
44126 int error;
44127
44128 + memset(&fsgeo, 0, sizeof(fsgeo));
44129 error = xfs_fs_geometry(mp, &fsgeo, 3);
44130 if (error)
44131 return -error;
44132 diff -urNp linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl.c
44133 --- linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 17:00:52.000000000 -0400
44134 +++ linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 20:07:09.000000000 -0400
44135 @@ -134,7 +134,7 @@ xfs_find_handle(
44136 }
44137
44138 error = -EFAULT;
44139 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
44140 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
44141 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
44142 goto out_put;
44143
44144 @@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
44145 if (IS_ERR(dentry))
44146 return PTR_ERR(dentry);
44147
44148 - kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
44149 + kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
44150 if (!kbuf)
44151 goto out_dput;
44152
44153 @@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
44154 xfs_mount_t *mp,
44155 void __user *arg)
44156 {
44157 - xfs_fsop_geom_t fsgeo;
44158 + xfs_fsop_geom_t fsgeo;
44159 int error;
44160
44161 error = xfs_fs_geometry(mp, &fsgeo, 3);
44162 diff -urNp linux-2.6.32.42/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.32.42/fs/xfs/linux-2.6/xfs_iops.c
44163 --- linux-2.6.32.42/fs/xfs/linux-2.6/xfs_iops.c 2011-03-27 14:31:47.000000000 -0400
44164 +++ linux-2.6.32.42/fs/xfs/linux-2.6/xfs_iops.c 2011-04-17 15:56:46.000000000 -0400
44165 @@ -468,7 +468,7 @@ xfs_vn_put_link(
44166 struct nameidata *nd,
44167 void *p)
44168 {
44169 - char *s = nd_get_link(nd);
44170 + const char *s = nd_get_link(nd);
44171
44172 if (!IS_ERR(s))
44173 kfree(s);
44174 diff -urNp linux-2.6.32.42/fs/xfs/xfs_bmap.c linux-2.6.32.42/fs/xfs/xfs_bmap.c
44175 --- linux-2.6.32.42/fs/xfs/xfs_bmap.c 2011-03-27 14:31:47.000000000 -0400
44176 +++ linux-2.6.32.42/fs/xfs/xfs_bmap.c 2011-04-17 15:56:46.000000000 -0400
44177 @@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
44178 int nmap,
44179 int ret_nmap);
44180 #else
44181 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
44182 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
44183 #endif /* DEBUG */
44184
44185 #if defined(XFS_RW_TRACE)
44186 diff -urNp linux-2.6.32.42/fs/xfs/xfs_dir2_sf.c linux-2.6.32.42/fs/xfs/xfs_dir2_sf.c
44187 --- linux-2.6.32.42/fs/xfs/xfs_dir2_sf.c 2011-03-27 14:31:47.000000000 -0400
44188 +++ linux-2.6.32.42/fs/xfs/xfs_dir2_sf.c 2011-04-18 22:07:30.000000000 -0400
44189 @@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
44190 }
44191
44192 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
44193 - if (filldir(dirent, sfep->name, sfep->namelen,
44194 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
44195 + char name[sfep->namelen];
44196 + memcpy(name, sfep->name, sfep->namelen);
44197 + if (filldir(dirent, name, sfep->namelen,
44198 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
44199 + *offset = off & 0x7fffffff;
44200 + return 0;
44201 + }
44202 + } else if (filldir(dirent, sfep->name, sfep->namelen,
44203 off & 0x7fffffff, ino, DT_UNKNOWN)) {
44204 *offset = off & 0x7fffffff;
44205 return 0;
44206 diff -urNp linux-2.6.32.42/grsecurity/gracl_alloc.c linux-2.6.32.42/grsecurity/gracl_alloc.c
44207 --- linux-2.6.32.42/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
44208 +++ linux-2.6.32.42/grsecurity/gracl_alloc.c 2011-04-17 15:56:46.000000000 -0400
44209 @@ -0,0 +1,105 @@
44210 +#include <linux/kernel.h>
44211 +#include <linux/mm.h>
44212 +#include <linux/slab.h>
44213 +#include <linux/vmalloc.h>
44214 +#include <linux/gracl.h>
44215 +#include <linux/grsecurity.h>
44216 +
44217 +static unsigned long alloc_stack_next = 1;
44218 +static unsigned long alloc_stack_size = 1;
44219 +static void **alloc_stack;
44220 +
44221 +static __inline__ int
44222 +alloc_pop(void)
44223 +{
44224 + if (alloc_stack_next == 1)
44225 + return 0;
44226 +
44227 + kfree(alloc_stack[alloc_stack_next - 2]);
44228 +
44229 + alloc_stack_next--;
44230 +
44231 + return 1;
44232 +}
44233 +
44234 +static __inline__ int
44235 +alloc_push(void *buf)
44236 +{
44237 + if (alloc_stack_next >= alloc_stack_size)
44238 + return 1;
44239 +
44240 + alloc_stack[alloc_stack_next - 1] = buf;
44241 +
44242 + alloc_stack_next++;
44243 +
44244 + return 0;
44245 +}
44246 +
44247 +void *
44248 +acl_alloc(unsigned long len)
44249 +{
44250 + void *ret = NULL;
44251 +
44252 + if (!len || len > PAGE_SIZE)
44253 + goto out;
44254 +
44255 + ret = kmalloc(len, GFP_KERNEL);
44256 +
44257 + if (ret) {
44258 + if (alloc_push(ret)) {
44259 + kfree(ret);
44260 + ret = NULL;
44261 + }
44262 + }
44263 +
44264 +out:
44265 + return ret;
44266 +}
44267 +
44268 +void *
44269 +acl_alloc_num(unsigned long num, unsigned long len)
44270 +{
44271 + if (!len || (num > (PAGE_SIZE / len)))
44272 + return NULL;
44273 +
44274 + return acl_alloc(num * len);
44275 +}
44276 +
44277 +void
44278 +acl_free_all(void)
44279 +{
44280 + if (gr_acl_is_enabled() || !alloc_stack)
44281 + return;
44282 +
44283 + while (alloc_pop()) ;
44284 +
44285 + if (alloc_stack) {
44286 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
44287 + kfree(alloc_stack);
44288 + else
44289 + vfree(alloc_stack);
44290 + }
44291 +
44292 + alloc_stack = NULL;
44293 + alloc_stack_size = 1;
44294 + alloc_stack_next = 1;
44295 +
44296 + return;
44297 +}
44298 +
44299 +int
44300 +acl_alloc_stack_init(unsigned long size)
44301 +{
44302 + if ((size * sizeof (void *)) <= PAGE_SIZE)
44303 + alloc_stack =
44304 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
44305 + else
44306 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
44307 +
44308 + alloc_stack_size = size;
44309 +
44310 + if (!alloc_stack)
44311 + return 0;
44312 + else
44313 + return 1;
44314 +}
44315 diff -urNp linux-2.6.32.42/grsecurity/gracl.c linux-2.6.32.42/grsecurity/gracl.c
44316 --- linux-2.6.32.42/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
44317 +++ linux-2.6.32.42/grsecurity/gracl.c 2011-06-11 16:24:26.000000000 -0400
44318 @@ -0,0 +1,4085 @@
44319 +#include <linux/kernel.h>
44320 +#include <linux/module.h>
44321 +#include <linux/sched.h>
44322 +#include <linux/mm.h>
44323 +#include <linux/file.h>
44324 +#include <linux/fs.h>
44325 +#include <linux/namei.h>
44326 +#include <linux/mount.h>
44327 +#include <linux/tty.h>
44328 +#include <linux/proc_fs.h>
44329 +#include <linux/smp_lock.h>
44330 +#include <linux/slab.h>
44331 +#include <linux/vmalloc.h>
44332 +#include <linux/types.h>
44333 +#include <linux/sysctl.h>
44334 +#include <linux/netdevice.h>
44335 +#include <linux/ptrace.h>
44336 +#include <linux/gracl.h>
44337 +#include <linux/gralloc.h>
44338 +#include <linux/grsecurity.h>
44339 +#include <linux/grinternal.h>
44340 +#include <linux/pid_namespace.h>
44341 +#include <linux/fdtable.h>
44342 +#include <linux/percpu.h>
44343 +
44344 +#include <asm/uaccess.h>
44345 +#include <asm/errno.h>
44346 +#include <asm/mman.h>
44347 +
44348 +static struct acl_role_db acl_role_set;
44349 +static struct name_db name_set;
44350 +static struct inodev_db inodev_set;
44351 +
44352 +/* for keeping track of userspace pointers used for subjects, so we
44353 + can share references in the kernel as well
44354 +*/
44355 +
44356 +static struct dentry *real_root;
44357 +static struct vfsmount *real_root_mnt;
44358 +
44359 +static struct acl_subj_map_db subj_map_set;
44360 +
44361 +static struct acl_role_label *default_role;
44362 +
44363 +static struct acl_role_label *role_list;
44364 +
44365 +static u16 acl_sp_role_value;
44366 +
44367 +extern char *gr_shared_page[4];
44368 +static DEFINE_MUTEX(gr_dev_mutex);
44369 +DEFINE_RWLOCK(gr_inode_lock);
44370 +
44371 +struct gr_arg *gr_usermode;
44372 +
44373 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
44374 +
44375 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
44376 +extern void gr_clear_learn_entries(void);
44377 +
44378 +#ifdef CONFIG_GRKERNSEC_RESLOG
44379 +extern void gr_log_resource(const struct task_struct *task,
44380 + const int res, const unsigned long wanted, const int gt);
44381 +#endif
44382 +
44383 +unsigned char *gr_system_salt;
44384 +unsigned char *gr_system_sum;
44385 +
44386 +static struct sprole_pw **acl_special_roles = NULL;
44387 +static __u16 num_sprole_pws = 0;
44388 +
44389 +static struct acl_role_label *kernel_role = NULL;
44390 +
44391 +static unsigned int gr_auth_attempts = 0;
44392 +static unsigned long gr_auth_expires = 0UL;
44393 +
44394 +#ifdef CONFIG_NET
44395 +extern struct vfsmount *sock_mnt;
44396 +#endif
44397 +extern struct vfsmount *pipe_mnt;
44398 +extern struct vfsmount *shm_mnt;
44399 +#ifdef CONFIG_HUGETLBFS
44400 +extern struct vfsmount *hugetlbfs_vfsmount;
44401 +#endif
44402 +
44403 +static struct acl_object_label *fakefs_obj_rw;
44404 +static struct acl_object_label *fakefs_obj_rwx;
44405 +
44406 +extern int gr_init_uidset(void);
44407 +extern void gr_free_uidset(void);
44408 +extern void gr_remove_uid(uid_t uid);
44409 +extern int gr_find_uid(uid_t uid);
44410 +
44411 +__inline__ int
44412 +gr_acl_is_enabled(void)
44413 +{
44414 + return (gr_status & GR_READY);
44415 +}
44416 +
44417 +#ifdef CONFIG_BTRFS_FS
44418 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
44419 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
44420 +#endif
44421 +
44422 +static inline dev_t __get_dev(const struct dentry *dentry)
44423 +{
44424 +#ifdef CONFIG_BTRFS_FS
44425 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
44426 + return get_btrfs_dev_from_inode(dentry->d_inode);
44427 + else
44428 +#endif
44429 + return dentry->d_inode->i_sb->s_dev;
44430 +}
44431 +
44432 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
44433 +{
44434 + return __get_dev(dentry);
44435 +}
44436 +
44437 +static char gr_task_roletype_to_char(struct task_struct *task)
44438 +{
44439 + switch (task->role->roletype &
44440 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
44441 + GR_ROLE_SPECIAL)) {
44442 + case GR_ROLE_DEFAULT:
44443 + return 'D';
44444 + case GR_ROLE_USER:
44445 + return 'U';
44446 + case GR_ROLE_GROUP:
44447 + return 'G';
44448 + case GR_ROLE_SPECIAL:
44449 + return 'S';
44450 + }
44451 +
44452 + return 'X';
44453 +}
44454 +
44455 +char gr_roletype_to_char(void)
44456 +{
44457 + return gr_task_roletype_to_char(current);
44458 +}
44459 +
44460 +__inline__ int
44461 +gr_acl_tpe_check(void)
44462 +{
44463 + if (unlikely(!(gr_status & GR_READY)))
44464 + return 0;
44465 + if (current->role->roletype & GR_ROLE_TPE)
44466 + return 1;
44467 + else
44468 + return 0;
44469 +}
44470 +
44471 +int
44472 +gr_handle_rawio(const struct inode *inode)
44473 +{
44474 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
44475 + if (inode && S_ISBLK(inode->i_mode) &&
44476 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
44477 + !capable(CAP_SYS_RAWIO))
44478 + return 1;
44479 +#endif
44480 + return 0;
44481 +}
44482 +
44483 +static int
44484 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
44485 +{
44486 + if (likely(lena != lenb))
44487 + return 0;
44488 +
44489 + return !memcmp(a, b, lena);
44490 +}
44491 +
44492 +/* this must be called with vfsmount_lock and dcache_lock held */
44493 +
44494 +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
44495 + struct dentry *root, struct vfsmount *rootmnt,
44496 + char *buffer, int buflen)
44497 +{
44498 + char * end = buffer+buflen;
44499 + char * retval;
44500 + int namelen;
44501 +
44502 + *--end = '\0';
44503 + buflen--;
44504 +
44505 + if (buflen < 1)
44506 + goto Elong;
44507 + /* Get '/' right */
44508 + retval = end-1;
44509 + *retval = '/';
44510 +
44511 + for (;;) {
44512 + struct dentry * parent;
44513 +
44514 + if (dentry == root && vfsmnt == rootmnt)
44515 + break;
44516 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
44517 + /* Global root? */
44518 + if (vfsmnt->mnt_parent == vfsmnt)
44519 + goto global_root;
44520 + dentry = vfsmnt->mnt_mountpoint;
44521 + vfsmnt = vfsmnt->mnt_parent;
44522 + continue;
44523 + }
44524 + parent = dentry->d_parent;
44525 + prefetch(parent);
44526 + namelen = dentry->d_name.len;
44527 + buflen -= namelen + 1;
44528 + if (buflen < 0)
44529 + goto Elong;
44530 + end -= namelen;
44531 + memcpy(end, dentry->d_name.name, namelen);
44532 + *--end = '/';
44533 + retval = end;
44534 + dentry = parent;
44535 + }
44536 +
44537 +out:
44538 + return retval;
44539 +
44540 +global_root:
44541 + namelen = dentry->d_name.len;
44542 + buflen -= namelen;
44543 + if (buflen < 0)
44544 + goto Elong;
44545 + retval -= namelen-1; /* hit the slash */
44546 + memcpy(retval, dentry->d_name.name, namelen);
44547 + goto out;
44548 +Elong:
44549 + retval = ERR_PTR(-ENAMETOOLONG);
44550 + goto out;
44551 +}
44552 +
44553 +static char *
44554 +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
44555 + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
44556 +{
44557 + char *retval;
44558 +
44559 + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
44560 + if (unlikely(IS_ERR(retval)))
44561 + retval = strcpy(buf, "<path too long>");
44562 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
44563 + retval[1] = '\0';
44564 +
44565 + return retval;
44566 +}
44567 +
44568 +static char *
44569 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44570 + char *buf, int buflen)
44571 +{
44572 + char *res;
44573 +
44574 + /* we can use real_root, real_root_mnt, because this is only called
44575 + by the RBAC system */
44576 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
44577 +
44578 + return res;
44579 +}
44580 +
44581 +static char *
44582 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44583 + char *buf, int buflen)
44584 +{
44585 + char *res;
44586 + struct dentry *root;
44587 + struct vfsmount *rootmnt;
44588 + struct task_struct *reaper = &init_task;
44589 +
44590 + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
44591 + read_lock(&reaper->fs->lock);
44592 + root = dget(reaper->fs->root.dentry);
44593 + rootmnt = mntget(reaper->fs->root.mnt);
44594 + read_unlock(&reaper->fs->lock);
44595 +
44596 + spin_lock(&dcache_lock);
44597 + spin_lock(&vfsmount_lock);
44598 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
44599 + spin_unlock(&vfsmount_lock);
44600 + spin_unlock(&dcache_lock);
44601 +
44602 + dput(root);
44603 + mntput(rootmnt);
44604 + return res;
44605 +}
44606 +
44607 +static char *
44608 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
44609 +{
44610 + char *ret;
44611 + spin_lock(&dcache_lock);
44612 + spin_lock(&vfsmount_lock);
44613 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44614 + PAGE_SIZE);
44615 + spin_unlock(&vfsmount_lock);
44616 + spin_unlock(&dcache_lock);
44617 + return ret;
44618 +}
44619 +
44620 +char *
44621 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
44622 +{
44623 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44624 + PAGE_SIZE);
44625 +}
44626 +
44627 +char *
44628 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
44629 +{
44630 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
44631 + PAGE_SIZE);
44632 +}
44633 +
44634 +char *
44635 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
44636 +{
44637 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
44638 + PAGE_SIZE);
44639 +}
44640 +
44641 +char *
44642 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
44643 +{
44644 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
44645 + PAGE_SIZE);
44646 +}
44647 +
44648 +char *
44649 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
44650 +{
44651 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
44652 + PAGE_SIZE);
44653 +}
44654 +
44655 +__inline__ __u32
44656 +to_gr_audit(const __u32 reqmode)
44657 +{
44658 + /* masks off auditable permission flags, then shifts them to create
44659 + auditing flags, and adds the special case of append auditing if
44660 + we're requesting write */
44661 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
44662 +}
44663 +
44664 +struct acl_subject_label *
44665 +lookup_subject_map(const struct acl_subject_label *userp)
44666 +{
44667 + unsigned int index = shash(userp, subj_map_set.s_size);
44668 + struct subject_map *match;
44669 +
44670 + match = subj_map_set.s_hash[index];
44671 +
44672 + while (match && match->user != userp)
44673 + match = match->next;
44674 +
44675 + if (match != NULL)
44676 + return match->kernel;
44677 + else
44678 + return NULL;
44679 +}
44680 +
44681 +static void
44682 +insert_subj_map_entry(struct subject_map *subjmap)
44683 +{
44684 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
44685 + struct subject_map **curr;
44686 +
44687 + subjmap->prev = NULL;
44688 +
44689 + curr = &subj_map_set.s_hash[index];
44690 + if (*curr != NULL)
44691 + (*curr)->prev = subjmap;
44692 +
44693 + subjmap->next = *curr;
44694 + *curr = subjmap;
44695 +
44696 + return;
44697 +}
44698 +
44699 +static struct acl_role_label *
44700 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
44701 + const gid_t gid)
44702 +{
44703 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
44704 + struct acl_role_label *match;
44705 + struct role_allowed_ip *ipp;
44706 + unsigned int x;
44707 + u32 curr_ip = task->signal->curr_ip;
44708 +
44709 + task->signal->saved_ip = curr_ip;
44710 +
44711 + match = acl_role_set.r_hash[index];
44712 +
44713 + while (match) {
44714 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
44715 + for (x = 0; x < match->domain_child_num; x++) {
44716 + if (match->domain_children[x] == uid)
44717 + goto found;
44718 + }
44719 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
44720 + break;
44721 + match = match->next;
44722 + }
44723 +found:
44724 + if (match == NULL) {
44725 + try_group:
44726 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
44727 + match = acl_role_set.r_hash[index];
44728 +
44729 + while (match) {
44730 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
44731 + for (x = 0; x < match->domain_child_num; x++) {
44732 + if (match->domain_children[x] == gid)
44733 + goto found2;
44734 + }
44735 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
44736 + break;
44737 + match = match->next;
44738 + }
44739 +found2:
44740 + if (match == NULL)
44741 + match = default_role;
44742 + if (match->allowed_ips == NULL)
44743 + return match;
44744 + else {
44745 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44746 + if (likely
44747 + ((ntohl(curr_ip) & ipp->netmask) ==
44748 + (ntohl(ipp->addr) & ipp->netmask)))
44749 + return match;
44750 + }
44751 + match = default_role;
44752 + }
44753 + } else if (match->allowed_ips == NULL) {
44754 + return match;
44755 + } else {
44756 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44757 + if (likely
44758 + ((ntohl(curr_ip) & ipp->netmask) ==
44759 + (ntohl(ipp->addr) & ipp->netmask)))
44760 + return match;
44761 + }
44762 + goto try_group;
44763 + }
44764 +
44765 + return match;
44766 +}
44767 +
44768 +struct acl_subject_label *
44769 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
44770 + const struct acl_role_label *role)
44771 +{
44772 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
44773 + struct acl_subject_label *match;
44774 +
44775 + match = role->subj_hash[index];
44776 +
44777 + while (match && (match->inode != ino || match->device != dev ||
44778 + (match->mode & GR_DELETED))) {
44779 + match = match->next;
44780 + }
44781 +
44782 + if (match && !(match->mode & GR_DELETED))
44783 + return match;
44784 + else
44785 + return NULL;
44786 +}
44787 +
44788 +struct acl_subject_label *
44789 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
44790 + const struct acl_role_label *role)
44791 +{
44792 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
44793 + struct acl_subject_label *match;
44794 +
44795 + match = role->subj_hash[index];
44796 +
44797 + while (match && (match->inode != ino || match->device != dev ||
44798 + !(match->mode & GR_DELETED))) {
44799 + match = match->next;
44800 + }
44801 +
44802 + if (match && (match->mode & GR_DELETED))
44803 + return match;
44804 + else
44805 + return NULL;
44806 +}
44807 +
44808 +static struct acl_object_label *
44809 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
44810 + const struct acl_subject_label *subj)
44811 +{
44812 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
44813 + struct acl_object_label *match;
44814 +
44815 + match = subj->obj_hash[index];
44816 +
44817 + while (match && (match->inode != ino || match->device != dev ||
44818 + (match->mode & GR_DELETED))) {
44819 + match = match->next;
44820 + }
44821 +
44822 + if (match && !(match->mode & GR_DELETED))
44823 + return match;
44824 + else
44825 + return NULL;
44826 +}
44827 +
44828 +static struct acl_object_label *
44829 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
44830 + const struct acl_subject_label *subj)
44831 +{
44832 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
44833 + struct acl_object_label *match;
44834 +
44835 + match = subj->obj_hash[index];
44836 +
44837 + while (match && (match->inode != ino || match->device != dev ||
44838 + !(match->mode & GR_DELETED))) {
44839 + match = match->next;
44840 + }
44841 +
44842 + if (match && (match->mode & GR_DELETED))
44843 + return match;
44844 +
44845 + match = subj->obj_hash[index];
44846 +
44847 + while (match && (match->inode != ino || match->device != dev ||
44848 + (match->mode & GR_DELETED))) {
44849 + match = match->next;
44850 + }
44851 +
44852 + if (match && !(match->mode & GR_DELETED))
44853 + return match;
44854 + else
44855 + return NULL;
44856 +}
44857 +
44858 +static struct name_entry *
44859 +lookup_name_entry(const char *name)
44860 +{
44861 + unsigned int len = strlen(name);
44862 + unsigned int key = full_name_hash(name, len);
44863 + unsigned int index = key % name_set.n_size;
44864 + struct name_entry *match;
44865 +
44866 + match = name_set.n_hash[index];
44867 +
44868 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
44869 + match = match->next;
44870 +
44871 + return match;
44872 +}
44873 +
44874 +static struct name_entry *
44875 +lookup_name_entry_create(const char *name)
44876 +{
44877 + unsigned int len = strlen(name);
44878 + unsigned int key = full_name_hash(name, len);
44879 + unsigned int index = key % name_set.n_size;
44880 + struct name_entry *match;
44881 +
44882 + match = name_set.n_hash[index];
44883 +
44884 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
44885 + !match->deleted))
44886 + match = match->next;
44887 +
44888 + if (match && match->deleted)
44889 + return match;
44890 +
44891 + match = name_set.n_hash[index];
44892 +
44893 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
44894 + match->deleted))
44895 + match = match->next;
44896 +
44897 + if (match && !match->deleted)
44898 + return match;
44899 + else
44900 + return NULL;
44901 +}
44902 +
44903 +static struct inodev_entry *
44904 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
44905 +{
44906 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
44907 + struct inodev_entry *match;
44908 +
44909 + match = inodev_set.i_hash[index];
44910 +
44911 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
44912 + match = match->next;
44913 +
44914 + return match;
44915 +}
44916 +
44917 +static void
44918 +insert_inodev_entry(struct inodev_entry *entry)
44919 +{
44920 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
44921 + inodev_set.i_size);
44922 + struct inodev_entry **curr;
44923 +
44924 + entry->prev = NULL;
44925 +
44926 + curr = &inodev_set.i_hash[index];
44927 + if (*curr != NULL)
44928 + (*curr)->prev = entry;
44929 +
44930 + entry->next = *curr;
44931 + *curr = entry;
44932 +
44933 + return;
44934 +}
44935 +
44936 +static void
44937 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
44938 +{
44939 + unsigned int index =
44940 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
44941 + struct acl_role_label **curr;
44942 + struct acl_role_label *tmp;
44943 +
44944 + curr = &acl_role_set.r_hash[index];
44945 +
44946 + /* if role was already inserted due to domains and already has
44947 + a role in the same bucket as it attached, then we need to
44948 + combine these two buckets
44949 + */
44950 + if (role->next) {
44951 + tmp = role->next;
44952 + while (tmp->next)
44953 + tmp = tmp->next;
44954 + tmp->next = *curr;
44955 + } else
44956 + role->next = *curr;
44957 + *curr = role;
44958 +
44959 + return;
44960 +}
44961 +
44962 +static void
44963 +insert_acl_role_label(struct acl_role_label *role)
44964 +{
44965 + int i;
44966 +
44967 + if (role_list == NULL) {
44968 + role_list = role;
44969 + role->prev = NULL;
44970 + } else {
44971 + role->prev = role_list;
44972 + role_list = role;
44973 + }
44974 +
44975 + /* used for hash chains */
44976 + role->next = NULL;
44977 +
44978 + if (role->roletype & GR_ROLE_DOMAIN) {
44979 + for (i = 0; i < role->domain_child_num; i++)
44980 + __insert_acl_role_label(role, role->domain_children[i]);
44981 + } else
44982 + __insert_acl_role_label(role, role->uidgid);
44983 +}
44984 +
44985 +static int
44986 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
44987 +{
44988 + struct name_entry **curr, *nentry;
44989 + struct inodev_entry *ientry;
44990 + unsigned int len = strlen(name);
44991 + unsigned int key = full_name_hash(name, len);
44992 + unsigned int index = key % name_set.n_size;
44993 +
44994 + curr = &name_set.n_hash[index];
44995 +
44996 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
44997 + curr = &((*curr)->next);
44998 +
44999 + if (*curr != NULL)
45000 + return 1;
45001 +
45002 + nentry = acl_alloc(sizeof (struct name_entry));
45003 + if (nentry == NULL)
45004 + return 0;
45005 + ientry = acl_alloc(sizeof (struct inodev_entry));
45006 + if (ientry == NULL)
45007 + return 0;
45008 + ientry->nentry = nentry;
45009 +
45010 + nentry->key = key;
45011 + nentry->name = name;
45012 + nentry->inode = inode;
45013 + nentry->device = device;
45014 + nentry->len = len;
45015 + nentry->deleted = deleted;
45016 +
45017 + nentry->prev = NULL;
45018 + curr = &name_set.n_hash[index];
45019 + if (*curr != NULL)
45020 + (*curr)->prev = nentry;
45021 + nentry->next = *curr;
45022 + *curr = nentry;
45023 +
45024 + /* insert us into the table searchable by inode/dev */
45025 + insert_inodev_entry(ientry);
45026 +
45027 + return 1;
45028 +}
45029 +
45030 +static void
45031 +insert_acl_obj_label(struct acl_object_label *obj,
45032 + struct acl_subject_label *subj)
45033 +{
45034 + unsigned int index =
45035 + fhash(obj->inode, obj->device, subj->obj_hash_size);
45036 + struct acl_object_label **curr;
45037 +
45038 +
45039 + obj->prev = NULL;
45040 +
45041 + curr = &subj->obj_hash[index];
45042 + if (*curr != NULL)
45043 + (*curr)->prev = obj;
45044 +
45045 + obj->next = *curr;
45046 + *curr = obj;
45047 +
45048 + return;
45049 +}
45050 +
45051 +static void
45052 +insert_acl_subj_label(struct acl_subject_label *obj,
45053 + struct acl_role_label *role)
45054 +{
45055 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
45056 + struct acl_subject_label **curr;
45057 +
45058 + obj->prev = NULL;
45059 +
45060 + curr = &role->subj_hash[index];
45061 + if (*curr != NULL)
45062 + (*curr)->prev = obj;
45063 +
45064 + obj->next = *curr;
45065 + *curr = obj;
45066 +
45067 + return;
45068 +}
45069 +
45070 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
45071 +
45072 +static void *
45073 +create_table(__u32 * len, int elementsize)
45074 +{
45075 + unsigned int table_sizes[] = {
45076 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
45077 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
45078 + 4194301, 8388593, 16777213, 33554393, 67108859
45079 + };
45080 + void *newtable = NULL;
45081 + unsigned int pwr = 0;
45082 +
45083 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
45084 + table_sizes[pwr] <= *len)
45085 + pwr++;
45086 +
45087 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
45088 + return newtable;
45089 +
45090 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
45091 + newtable =
45092 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
45093 + else
45094 + newtable = vmalloc(table_sizes[pwr] * elementsize);
45095 +
45096 + *len = table_sizes[pwr];
45097 +
45098 + return newtable;
45099 +}
45100 +
45101 +static int
45102 +init_variables(const struct gr_arg *arg)
45103 +{
45104 + struct task_struct *reaper = &init_task;
45105 + unsigned int stacksize;
45106 +
45107 + subj_map_set.s_size = arg->role_db.num_subjects;
45108 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
45109 + name_set.n_size = arg->role_db.num_objects;
45110 + inodev_set.i_size = arg->role_db.num_objects;
45111 +
45112 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
45113 + !name_set.n_size || !inodev_set.i_size)
45114 + return 1;
45115 +
45116 + if (!gr_init_uidset())
45117 + return 1;
45118 +
45119 + /* set up the stack that holds allocation info */
45120 +
45121 + stacksize = arg->role_db.num_pointers + 5;
45122 +
45123 + if (!acl_alloc_stack_init(stacksize))
45124 + return 1;
45125 +
45126 + /* grab reference for the real root dentry and vfsmount */
45127 + read_lock(&reaper->fs->lock);
45128 + real_root = dget(reaper->fs->root.dentry);
45129 + real_root_mnt = mntget(reaper->fs->root.mnt);
45130 + read_unlock(&reaper->fs->lock);
45131 +
45132 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
45133 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
45134 +#endif
45135 +
45136 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
45137 + if (fakefs_obj_rw == NULL)
45138 + return 1;
45139 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
45140 +
45141 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
45142 + if (fakefs_obj_rwx == NULL)
45143 + return 1;
45144 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
45145 +
45146 + subj_map_set.s_hash =
45147 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
45148 + acl_role_set.r_hash =
45149 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
45150 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
45151 + inodev_set.i_hash =
45152 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
45153 +
45154 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
45155 + !name_set.n_hash || !inodev_set.i_hash)
45156 + return 1;
45157 +
45158 + memset(subj_map_set.s_hash, 0,
45159 + sizeof(struct subject_map *) * subj_map_set.s_size);
45160 + memset(acl_role_set.r_hash, 0,
45161 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
45162 + memset(name_set.n_hash, 0,
45163 + sizeof (struct name_entry *) * name_set.n_size);
45164 + memset(inodev_set.i_hash, 0,
45165 + sizeof (struct inodev_entry *) * inodev_set.i_size);
45166 +
45167 + return 0;
45168 +}
45169 +
45170 +/* free information not needed after startup
45171 + currently contains user->kernel pointer mappings for subjects
45172 +*/
45173 +
45174 +static void
45175 +free_init_variables(void)
45176 +{
45177 + __u32 i;
45178 +
45179 + if (subj_map_set.s_hash) {
45180 + for (i = 0; i < subj_map_set.s_size; i++) {
45181 + if (subj_map_set.s_hash[i]) {
45182 + kfree(subj_map_set.s_hash[i]);
45183 + subj_map_set.s_hash[i] = NULL;
45184 + }
45185 + }
45186 +
45187 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
45188 + PAGE_SIZE)
45189 + kfree(subj_map_set.s_hash);
45190 + else
45191 + vfree(subj_map_set.s_hash);
45192 + }
45193 +
45194 + return;
45195 +}
45196 +
45197 +static void
45198 +free_variables(void)
45199 +{
45200 + struct acl_subject_label *s;
45201 + struct acl_role_label *r;
45202 + struct task_struct *task, *task2;
45203 + unsigned int x;
45204 +
45205 + gr_clear_learn_entries();
45206 +
45207 + read_lock(&tasklist_lock);
45208 + do_each_thread(task2, task) {
45209 + task->acl_sp_role = 0;
45210 + task->acl_role_id = 0;
45211 + task->acl = NULL;
45212 + task->role = NULL;
45213 + } while_each_thread(task2, task);
45214 + read_unlock(&tasklist_lock);
45215 +
45216 + /* release the reference to the real root dentry and vfsmount */
45217 + if (real_root)
45218 + dput(real_root);
45219 + real_root = NULL;
45220 + if (real_root_mnt)
45221 + mntput(real_root_mnt);
45222 + real_root_mnt = NULL;
45223 +
45224 + /* free all object hash tables */
45225 +
45226 + FOR_EACH_ROLE_START(r)
45227 + if (r->subj_hash == NULL)
45228 + goto next_role;
45229 + FOR_EACH_SUBJECT_START(r, s, x)
45230 + if (s->obj_hash == NULL)
45231 + break;
45232 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
45233 + kfree(s->obj_hash);
45234 + else
45235 + vfree(s->obj_hash);
45236 + FOR_EACH_SUBJECT_END(s, x)
45237 + FOR_EACH_NESTED_SUBJECT_START(r, s)
45238 + if (s->obj_hash == NULL)
45239 + break;
45240 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
45241 + kfree(s->obj_hash);
45242 + else
45243 + vfree(s->obj_hash);
45244 + FOR_EACH_NESTED_SUBJECT_END(s)
45245 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
45246 + kfree(r->subj_hash);
45247 + else
45248 + vfree(r->subj_hash);
45249 + r->subj_hash = NULL;
45250 +next_role:
45251 + FOR_EACH_ROLE_END(r)
45252 +
45253 + acl_free_all();
45254 +
45255 + if (acl_role_set.r_hash) {
45256 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
45257 + PAGE_SIZE)
45258 + kfree(acl_role_set.r_hash);
45259 + else
45260 + vfree(acl_role_set.r_hash);
45261 + }
45262 + if (name_set.n_hash) {
45263 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
45264 + PAGE_SIZE)
45265 + kfree(name_set.n_hash);
45266 + else
45267 + vfree(name_set.n_hash);
45268 + }
45269 +
45270 + if (inodev_set.i_hash) {
45271 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
45272 + PAGE_SIZE)
45273 + kfree(inodev_set.i_hash);
45274 + else
45275 + vfree(inodev_set.i_hash);
45276 + }
45277 +
45278 + gr_free_uidset();
45279 +
45280 + memset(&name_set, 0, sizeof (struct name_db));
45281 + memset(&inodev_set, 0, sizeof (struct inodev_db));
45282 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
45283 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
45284 +
45285 + default_role = NULL;
45286 + role_list = NULL;
45287 +
45288 + return;
45289 +}
45290 +
45291 +static __u32
45292 +count_user_objs(struct acl_object_label *userp)
45293 +{
45294 + struct acl_object_label o_tmp;
45295 + __u32 num = 0;
45296 +
45297 + while (userp) {
45298 + if (copy_from_user(&o_tmp, userp,
45299 + sizeof (struct acl_object_label)))
45300 + break;
45301 +
45302 + userp = o_tmp.prev;
45303 + num++;
45304 + }
45305 +
45306 + return num;
45307 +}
45308 +
45309 +static struct acl_subject_label *
45310 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
45311 +
45312 +static int
45313 +copy_user_glob(struct acl_object_label *obj)
45314 +{
45315 + struct acl_object_label *g_tmp, **guser;
45316 + unsigned int len;
45317 + char *tmp;
45318 +
45319 + if (obj->globbed == NULL)
45320 + return 0;
45321 +
45322 + guser = &obj->globbed;
45323 + while (*guser) {
45324 + g_tmp = (struct acl_object_label *)
45325 + acl_alloc(sizeof (struct acl_object_label));
45326 + if (g_tmp == NULL)
45327 + return -ENOMEM;
45328 +
45329 + if (copy_from_user(g_tmp, *guser,
45330 + sizeof (struct acl_object_label)))
45331 + return -EFAULT;
45332 +
45333 + len = strnlen_user(g_tmp->filename, PATH_MAX);
45334 +
45335 + if (!len || len >= PATH_MAX)
45336 + return -EINVAL;
45337 +
45338 + if ((tmp = (char *) acl_alloc(len)) == NULL)
45339 + return -ENOMEM;
45340 +
45341 + if (copy_from_user(tmp, g_tmp->filename, len))
45342 + return -EFAULT;
45343 + tmp[len-1] = '\0';
45344 + g_tmp->filename = tmp;
45345 +
45346 + *guser = g_tmp;
45347 + guser = &(g_tmp->next);
45348 + }
45349 +
45350 + return 0;
45351 +}
45352 +
45353 +static int
45354 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
45355 + struct acl_role_label *role)
45356 +{
45357 + struct acl_object_label *o_tmp;
45358 + unsigned int len;
45359 + int ret;
45360 + char *tmp;
45361 +
45362 + while (userp) {
45363 + if ((o_tmp = (struct acl_object_label *)
45364 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
45365 + return -ENOMEM;
45366 +
45367 + if (copy_from_user(o_tmp, userp,
45368 + sizeof (struct acl_object_label)))
45369 + return -EFAULT;
45370 +
45371 + userp = o_tmp->prev;
45372 +
45373 + len = strnlen_user(o_tmp->filename, PATH_MAX);
45374 +
45375 + if (!len || len >= PATH_MAX)
45376 + return -EINVAL;
45377 +
45378 + if ((tmp = (char *) acl_alloc(len)) == NULL)
45379 + return -ENOMEM;
45380 +
45381 + if (copy_from_user(tmp, o_tmp->filename, len))
45382 + return -EFAULT;
45383 + tmp[len-1] = '\0';
45384 + o_tmp->filename = tmp;
45385 +
45386 + insert_acl_obj_label(o_tmp, subj);
45387 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
45388 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
45389 + return -ENOMEM;
45390 +
45391 + ret = copy_user_glob(o_tmp);
45392 + if (ret)
45393 + return ret;
45394 +
45395 + if (o_tmp->nested) {
45396 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
45397 + if (IS_ERR(o_tmp->nested))
45398 + return PTR_ERR(o_tmp->nested);
45399 +
45400 + /* insert into nested subject list */
45401 + o_tmp->nested->next = role->hash->first;
45402 + role->hash->first = o_tmp->nested;
45403 + }
45404 + }
45405 +
45406 + return 0;
45407 +}
45408 +
45409 +static __u32
45410 +count_user_subjs(struct acl_subject_label *userp)
45411 +{
45412 + struct acl_subject_label s_tmp;
45413 + __u32 num = 0;
45414 +
45415 + while (userp) {
45416 + if (copy_from_user(&s_tmp, userp,
45417 + sizeof (struct acl_subject_label)))
45418 + break;
45419 +
45420 + userp = s_tmp.prev;
45421 + /* do not count nested subjects against this count, since
45422 + they are not included in the hash table, but are
45423 + attached to objects. We have already counted
45424 + the subjects in userspace for the allocation
45425 + stack
45426 + */
45427 + if (!(s_tmp.mode & GR_NESTED))
45428 + num++;
45429 + }
45430 +
45431 + return num;
45432 +}
45433 +
45434 +static int
45435 +copy_user_allowedips(struct acl_role_label *rolep)
45436 +{
45437 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
45438 +
45439 + ruserip = rolep->allowed_ips;
45440 +
45441 + while (ruserip) {
45442 + rlast = rtmp;
45443 +
45444 + if ((rtmp = (struct role_allowed_ip *)
45445 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
45446 + return -ENOMEM;
45447 +
45448 + if (copy_from_user(rtmp, ruserip,
45449 + sizeof (struct role_allowed_ip)))
45450 + return -EFAULT;
45451 +
45452 + ruserip = rtmp->prev;
45453 +
45454 + if (!rlast) {
45455 + rtmp->prev = NULL;
45456 + rolep->allowed_ips = rtmp;
45457 + } else {
45458 + rlast->next = rtmp;
45459 + rtmp->prev = rlast;
45460 + }
45461 +
45462 + if (!ruserip)
45463 + rtmp->next = NULL;
45464 + }
45465 +
45466 + return 0;
45467 +}
45468 +
45469 +static int
45470 +copy_user_transitions(struct acl_role_label *rolep)
45471 +{
45472 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
45473 +
45474 + unsigned int len;
45475 + char *tmp;
45476 +
45477 + rusertp = rolep->transitions;
45478 +
45479 + while (rusertp) {
45480 + rlast = rtmp;
45481 +
45482 + if ((rtmp = (struct role_transition *)
45483 + acl_alloc(sizeof (struct role_transition))) == NULL)
45484 + return -ENOMEM;
45485 +
45486 + if (copy_from_user(rtmp, rusertp,
45487 + sizeof (struct role_transition)))
45488 + return -EFAULT;
45489 +
45490 + rusertp = rtmp->prev;
45491 +
45492 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
45493 +
45494 + if (!len || len >= GR_SPROLE_LEN)
45495 + return -EINVAL;
45496 +
45497 + if ((tmp = (char *) acl_alloc(len)) == NULL)
45498 + return -ENOMEM;
45499 +
45500 + if (copy_from_user(tmp, rtmp->rolename, len))
45501 + return -EFAULT;
45502 + tmp[len-1] = '\0';
45503 + rtmp->rolename = tmp;
45504 +
45505 + if (!rlast) {
45506 + rtmp->prev = NULL;
45507 + rolep->transitions = rtmp;
45508 + } else {
45509 + rlast->next = rtmp;
45510 + rtmp->prev = rlast;
45511 + }
45512 +
45513 + if (!rusertp)
45514 + rtmp->next = NULL;
45515 + }
45516 +
45517 + return 0;
45518 +}
45519 +
45520 +static struct acl_subject_label *
45521 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
45522 +{
45523 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
45524 + unsigned int len;
45525 + char *tmp;
45526 + __u32 num_objs;
45527 + struct acl_ip_label **i_tmp, *i_utmp2;
45528 + struct gr_hash_struct ghash;
45529 + struct subject_map *subjmap;
45530 + unsigned int i_num;
45531 + int err;
45532 +
45533 + s_tmp = lookup_subject_map(userp);
45534 +
45535 + /* we've already copied this subject into the kernel, just return
45536 + the reference to it, and don't copy it over again
45537 + */
45538 + if (s_tmp)
45539 + return(s_tmp);
45540 +
45541 + if ((s_tmp = (struct acl_subject_label *)
45542 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
45543 + return ERR_PTR(-ENOMEM);
45544 +
45545 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
45546 + if (subjmap == NULL)
45547 + return ERR_PTR(-ENOMEM);
45548 +
45549 + subjmap->user = userp;
45550 + subjmap->kernel = s_tmp;
45551 + insert_subj_map_entry(subjmap);
45552 +
45553 + if (copy_from_user(s_tmp, userp,
45554 + sizeof (struct acl_subject_label)))
45555 + return ERR_PTR(-EFAULT);
45556 +
45557 + len = strnlen_user(s_tmp->filename, PATH_MAX);
45558 +
45559 + if (!len || len >= PATH_MAX)
45560 + return ERR_PTR(-EINVAL);
45561 +
45562 + if ((tmp = (char *) acl_alloc(len)) == NULL)
45563 + return ERR_PTR(-ENOMEM);
45564 +
45565 + if (copy_from_user(tmp, s_tmp->filename, len))
45566 + return ERR_PTR(-EFAULT);
45567 + tmp[len-1] = '\0';
45568 + s_tmp->filename = tmp;
45569 +
45570 + if (!strcmp(s_tmp->filename, "/"))
45571 + role->root_label = s_tmp;
45572 +
45573 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
45574 + return ERR_PTR(-EFAULT);
45575 +
45576 + /* copy user and group transition tables */
45577 +
45578 + if (s_tmp->user_trans_num) {
45579 + uid_t *uidlist;
45580 +
45581 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
45582 + if (uidlist == NULL)
45583 + return ERR_PTR(-ENOMEM);
45584 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
45585 + return ERR_PTR(-EFAULT);
45586 +
45587 + s_tmp->user_transitions = uidlist;
45588 + }
45589 +
45590 + if (s_tmp->group_trans_num) {
45591 + gid_t *gidlist;
45592 +
45593 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
45594 + if (gidlist == NULL)
45595 + return ERR_PTR(-ENOMEM);
45596 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
45597 + return ERR_PTR(-EFAULT);
45598 +
45599 + s_tmp->group_transitions = gidlist;
45600 + }
45601 +
45602 + /* set up object hash table */
45603 + num_objs = count_user_objs(ghash.first);
45604 +
45605 + s_tmp->obj_hash_size = num_objs;
45606 + s_tmp->obj_hash =
45607 + (struct acl_object_label **)
45608 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
45609 +
45610 + if (!s_tmp->obj_hash)
45611 + return ERR_PTR(-ENOMEM);
45612 +
45613 + memset(s_tmp->obj_hash, 0,
45614 + s_tmp->obj_hash_size *
45615 + sizeof (struct acl_object_label *));
45616 +
45617 + /* add in objects */
45618 + err = copy_user_objs(ghash.first, s_tmp, role);
45619 +
45620 + if (err)
45621 + return ERR_PTR(err);
45622 +
45623 + /* set pointer for parent subject */
45624 + if (s_tmp->parent_subject) {
45625 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
45626 +
45627 + if (IS_ERR(s_tmp2))
45628 + return s_tmp2;
45629 +
45630 + s_tmp->parent_subject = s_tmp2;
45631 + }
45632 +
45633 + /* add in ip acls */
45634 +
45635 + if (!s_tmp->ip_num) {
45636 + s_tmp->ips = NULL;
45637 + goto insert;
45638 + }
45639 +
45640 + i_tmp =
45641 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
45642 + sizeof (struct acl_ip_label *));
45643 +
45644 + if (!i_tmp)
45645 + return ERR_PTR(-ENOMEM);
45646 +
45647 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
45648 + *(i_tmp + i_num) =
45649 + (struct acl_ip_label *)
45650 + acl_alloc(sizeof (struct acl_ip_label));
45651 + if (!*(i_tmp + i_num))
45652 + return ERR_PTR(-ENOMEM);
45653 +
45654 + if (copy_from_user
45655 + (&i_utmp2, s_tmp->ips + i_num,
45656 + sizeof (struct acl_ip_label *)))
45657 + return ERR_PTR(-EFAULT);
45658 +
45659 + if (copy_from_user
45660 + (*(i_tmp + i_num), i_utmp2,
45661 + sizeof (struct acl_ip_label)))
45662 + return ERR_PTR(-EFAULT);
45663 +
45664 + if ((*(i_tmp + i_num))->iface == NULL)
45665 + continue;
45666 +
45667 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
45668 + if (!len || len >= IFNAMSIZ)
45669 + return ERR_PTR(-EINVAL);
45670 + tmp = acl_alloc(len);
45671 + if (tmp == NULL)
45672 + return ERR_PTR(-ENOMEM);
45673 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
45674 + return ERR_PTR(-EFAULT);
45675 + (*(i_tmp + i_num))->iface = tmp;
45676 + }
45677 +
45678 + s_tmp->ips = i_tmp;
45679 +
45680 +insert:
45681 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
45682 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
45683 + return ERR_PTR(-ENOMEM);
45684 +
45685 + return s_tmp;
45686 +}
45687 +
45688 +static int
45689 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
45690 +{
45691 + struct acl_subject_label s_pre;
45692 + struct acl_subject_label * ret;
45693 + int err;
45694 +
45695 + while (userp) {
45696 + if (copy_from_user(&s_pre, userp,
45697 + sizeof (struct acl_subject_label)))
45698 + return -EFAULT;
45699 +
45700 + /* do not add nested subjects here, add
45701 + while parsing objects
45702 + */
45703 +
45704 + if (s_pre.mode & GR_NESTED) {
45705 + userp = s_pre.prev;
45706 + continue;
45707 + }
45708 +
45709 + ret = do_copy_user_subj(userp, role);
45710 +
45711 + err = PTR_ERR(ret);
45712 + if (IS_ERR(ret))
45713 + return err;
45714 +
45715 + insert_acl_subj_label(ret, role);
45716 +
45717 + userp = s_pre.prev;
45718 + }
45719 +
45720 + return 0;
45721 +}
45722 +
45723 +static int
45724 +copy_user_acl(struct gr_arg *arg)
45725 +{
45726 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
45727 + struct sprole_pw *sptmp;
45728 + struct gr_hash_struct *ghash;
45729 + uid_t *domainlist;
45730 + unsigned int r_num;
45731 + unsigned int len;
45732 + char *tmp;
45733 + int err = 0;
45734 + __u16 i;
45735 + __u32 num_subjs;
45736 +
45737 + /* we need a default and kernel role */
45738 + if (arg->role_db.num_roles < 2)
45739 + return -EINVAL;
45740 +
45741 + /* copy special role authentication info from userspace */
45742 +
45743 + num_sprole_pws = arg->num_sprole_pws;
45744 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
45745 +
45746 + if (!acl_special_roles) {
45747 + err = -ENOMEM;
45748 + goto cleanup;
45749 + }
45750 +
45751 + for (i = 0; i < num_sprole_pws; i++) {
45752 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
45753 + if (!sptmp) {
45754 + err = -ENOMEM;
45755 + goto cleanup;
45756 + }
45757 + if (copy_from_user(sptmp, arg->sprole_pws + i,
45758 + sizeof (struct sprole_pw))) {
45759 + err = -EFAULT;
45760 + goto cleanup;
45761 + }
45762 +
45763 + len =
45764 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
45765 +
45766 + if (!len || len >= GR_SPROLE_LEN) {
45767 + err = -EINVAL;
45768 + goto cleanup;
45769 + }
45770 +
45771 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
45772 + err = -ENOMEM;
45773 + goto cleanup;
45774 + }
45775 +
45776 + if (copy_from_user(tmp, sptmp->rolename, len)) {
45777 + err = -EFAULT;
45778 + goto cleanup;
45779 + }
45780 + tmp[len-1] = '\0';
45781 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
45782 + printk(KERN_ALERT "Copying special role %s\n", tmp);
45783 +#endif
45784 + sptmp->rolename = tmp;
45785 + acl_special_roles[i] = sptmp;
45786 + }
45787 +
45788 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
45789 +
45790 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
45791 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
45792 +
45793 + if (!r_tmp) {
45794 + err = -ENOMEM;
45795 + goto cleanup;
45796 + }
45797 +
45798 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
45799 + sizeof (struct acl_role_label *))) {
45800 + err = -EFAULT;
45801 + goto cleanup;
45802 + }
45803 +
45804 + if (copy_from_user(r_tmp, r_utmp2,
45805 + sizeof (struct acl_role_label))) {
45806 + err = -EFAULT;
45807 + goto cleanup;
45808 + }
45809 +
45810 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
45811 +
45812 + if (!len || len >= PATH_MAX) {
45813 + err = -EINVAL;
45814 + goto cleanup;
45815 + }
45816 +
45817 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
45818 + err = -ENOMEM;
45819 + goto cleanup;
45820 + }
45821 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
45822 + err = -EFAULT;
45823 + goto cleanup;
45824 + }
45825 + tmp[len-1] = '\0';
45826 + r_tmp->rolename = tmp;
45827 +
45828 + if (!strcmp(r_tmp->rolename, "default")
45829 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
45830 + default_role = r_tmp;
45831 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
45832 + kernel_role = r_tmp;
45833 + }
45834 +
45835 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
45836 + err = -ENOMEM;
45837 + goto cleanup;
45838 + }
45839 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
45840 + err = -EFAULT;
45841 + goto cleanup;
45842 + }
45843 +
45844 + r_tmp->hash = ghash;
45845 +
45846 + num_subjs = count_user_subjs(r_tmp->hash->first);
45847 +
45848 + r_tmp->subj_hash_size = num_subjs;
45849 + r_tmp->subj_hash =
45850 + (struct acl_subject_label **)
45851 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
45852 +
45853 + if (!r_tmp->subj_hash) {
45854 + err = -ENOMEM;
45855 + goto cleanup;
45856 + }
45857 +
45858 + err = copy_user_allowedips(r_tmp);
45859 + if (err)
45860 + goto cleanup;
45861 +
45862 + /* copy domain info */
45863 + if (r_tmp->domain_children != NULL) {
45864 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
45865 + if (domainlist == NULL) {
45866 + err = -ENOMEM;
45867 + goto cleanup;
45868 + }
45869 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
45870 + err = -EFAULT;
45871 + goto cleanup;
45872 + }
45873 + r_tmp->domain_children = domainlist;
45874 + }
45875 +
45876 + err = copy_user_transitions(r_tmp);
45877 + if (err)
45878 + goto cleanup;
45879 +
45880 + memset(r_tmp->subj_hash, 0,
45881 + r_tmp->subj_hash_size *
45882 + sizeof (struct acl_subject_label *));
45883 +
45884 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
45885 +
45886 + if (err)
45887 + goto cleanup;
45888 +
45889 + /* set nested subject list to null */
45890 + r_tmp->hash->first = NULL;
45891 +
45892 + insert_acl_role_label(r_tmp);
45893 + }
45894 +
45895 + goto return_err;
45896 + cleanup:
45897 + free_variables();
45898 + return_err:
45899 + return err;
45900 +
45901 +}
45902 +
45903 +static int
45904 +gracl_init(struct gr_arg *args)
45905 +{
45906 + int error = 0;
45907 +
45908 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
45909 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
45910 +
45911 + if (init_variables(args)) {
45912 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
45913 + error = -ENOMEM;
45914 + free_variables();
45915 + goto out;
45916 + }
45917 +
45918 + error = copy_user_acl(args);
45919 + free_init_variables();
45920 + if (error) {
45921 + free_variables();
45922 + goto out;
45923 + }
45924 +
45925 + if ((error = gr_set_acls(0))) {
45926 + free_variables();
45927 + goto out;
45928 + }
45929 +
45930 + pax_open_kernel();
45931 + gr_status |= GR_READY;
45932 + pax_close_kernel();
45933 +
45934 + out:
45935 + return error;
45936 +}
45937 +
45938 +/* derived from glibc fnmatch() 0: match, 1: no match*/
45939 +
45940 +static int
45941 +glob_match(const char *p, const char *n)
45942 +{
45943 + char c;
45944 +
45945 + while ((c = *p++) != '\0') {
45946 + switch (c) {
45947 + case '?':
45948 + if (*n == '\0')
45949 + return 1;
45950 + else if (*n == '/')
45951 + return 1;
45952 + break;
45953 + case '\\':
45954 + if (*n != c)
45955 + return 1;
45956 + break;
45957 + case '*':
45958 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
45959 + if (*n == '/')
45960 + return 1;
45961 + else if (c == '?') {
45962 + if (*n == '\0')
45963 + return 1;
45964 + else
45965 + ++n;
45966 + }
45967 + }
45968 + if (c == '\0') {
45969 + return 0;
45970 + } else {
45971 + const char *endp;
45972 +
45973 + if ((endp = strchr(n, '/')) == NULL)
45974 + endp = n + strlen(n);
45975 +
45976 + if (c == '[') {
45977 + for (--p; n < endp; ++n)
45978 + if (!glob_match(p, n))
45979 + return 0;
45980 + } else if (c == '/') {
45981 + while (*n != '\0' && *n != '/')
45982 + ++n;
45983 + if (*n == '/' && !glob_match(p, n + 1))
45984 + return 0;
45985 + } else {
45986 + for (--p; n < endp; ++n)
45987 + if (*n == c && !glob_match(p, n))
45988 + return 0;
45989 + }
45990 +
45991 + return 1;
45992 + }
45993 + case '[':
45994 + {
45995 + int not;
45996 + char cold;
45997 +
45998 + if (*n == '\0' || *n == '/')
45999 + return 1;
46000 +
46001 + not = (*p == '!' || *p == '^');
46002 + if (not)
46003 + ++p;
46004 +
46005 + c = *p++;
46006 + for (;;) {
46007 + unsigned char fn = (unsigned char)*n;
46008 +
46009 + if (c == '\0')
46010 + return 1;
46011 + else {
46012 + if (c == fn)
46013 + goto matched;
46014 + cold = c;
46015 + c = *p++;
46016 +
46017 + if (c == '-' && *p != ']') {
46018 + unsigned char cend = *p++;
46019 +
46020 + if (cend == '\0')
46021 + return 1;
46022 +
46023 + if (cold <= fn && fn <= cend)
46024 + goto matched;
46025 +
46026 + c = *p++;
46027 + }
46028 + }
46029 +
46030 + if (c == ']')
46031 + break;
46032 + }
46033 + if (!not)
46034 + return 1;
46035 + break;
46036 + matched:
46037 + while (c != ']') {
46038 + if (c == '\0')
46039 + return 1;
46040 +
46041 + c = *p++;
46042 + }
46043 + if (not)
46044 + return 1;
46045 + }
46046 + break;
46047 + default:
46048 + if (c != *n)
46049 + return 1;
46050 + }
46051 +
46052 + ++n;
46053 + }
46054 +
46055 + if (*n == '\0')
46056 + return 0;
46057 +
46058 + if (*n == '/')
46059 + return 0;
46060 +
46061 + return 1;
46062 +}
46063 +
46064 +static struct acl_object_label *
46065 +chk_glob_label(struct acl_object_label *globbed,
46066 + struct dentry *dentry, struct vfsmount *mnt, char **path)
46067 +{
46068 + struct acl_object_label *tmp;
46069 +
46070 + if (*path == NULL)
46071 + *path = gr_to_filename_nolock(dentry, mnt);
46072 +
46073 + tmp = globbed;
46074 +
46075 + while (tmp) {
46076 + if (!glob_match(tmp->filename, *path))
46077 + return tmp;
46078 + tmp = tmp->next;
46079 + }
46080 +
46081 + return NULL;
46082 +}
46083 +
46084 +static struct acl_object_label *
46085 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
46086 + const ino_t curr_ino, const dev_t curr_dev,
46087 + const struct acl_subject_label *subj, char **path, const int checkglob)
46088 +{
46089 + struct acl_subject_label *tmpsubj;
46090 + struct acl_object_label *retval;
46091 + struct acl_object_label *retval2;
46092 +
46093 + tmpsubj = (struct acl_subject_label *) subj;
46094 + read_lock(&gr_inode_lock);
46095 + do {
46096 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
46097 + if (retval) {
46098 + if (checkglob && retval->globbed) {
46099 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
46100 + (struct vfsmount *)orig_mnt, path);
46101 + if (retval2)
46102 + retval = retval2;
46103 + }
46104 + break;
46105 + }
46106 + } while ((tmpsubj = tmpsubj->parent_subject));
46107 + read_unlock(&gr_inode_lock);
46108 +
46109 + return retval;
46110 +}
46111 +
46112 +static __inline__ struct acl_object_label *
46113 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
46114 + const struct dentry *curr_dentry,
46115 + const struct acl_subject_label *subj, char **path, const int checkglob)
46116 +{
46117 + int newglob = checkglob;
46118 +
46119 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
46120 + as we don't want a / * rule to match instead of the / object
46121 + don't do this for create lookups that call this function though, since they're looking up
46122 + on the parent and thus need globbing checks on all paths
46123 + */
46124 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
46125 + newglob = GR_NO_GLOB;
46126 +
46127 + return __full_lookup(orig_dentry, orig_mnt,
46128 + curr_dentry->d_inode->i_ino,
46129 + __get_dev(curr_dentry), subj, path, newglob);
46130 +}
46131 +
46132 +static struct acl_object_label *
46133 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46134 + const struct acl_subject_label *subj, char *path, const int checkglob)
46135 +{
46136 + struct dentry *dentry = (struct dentry *) l_dentry;
46137 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
46138 + struct acl_object_label *retval;
46139 +
46140 + spin_lock(&dcache_lock);
46141 + spin_lock(&vfsmount_lock);
46142 +
46143 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
46144 +#ifdef CONFIG_NET
46145 + mnt == sock_mnt ||
46146 +#endif
46147 +#ifdef CONFIG_HUGETLBFS
46148 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
46149 +#endif
46150 + /* ignore Eric Biederman */
46151 + IS_PRIVATE(l_dentry->d_inode))) {
46152 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
46153 + goto out;
46154 + }
46155 +
46156 + for (;;) {
46157 + if (dentry == real_root && mnt == real_root_mnt)
46158 + break;
46159 +
46160 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
46161 + if (mnt->mnt_parent == mnt)
46162 + break;
46163 +
46164 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
46165 + if (retval != NULL)
46166 + goto out;
46167 +
46168 + dentry = mnt->mnt_mountpoint;
46169 + mnt = mnt->mnt_parent;
46170 + continue;
46171 + }
46172 +
46173 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
46174 + if (retval != NULL)
46175 + goto out;
46176 +
46177 + dentry = dentry->d_parent;
46178 + }
46179 +
46180 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
46181 +
46182 + if (retval == NULL)
46183 + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
46184 +out:
46185 + spin_unlock(&vfsmount_lock);
46186 + spin_unlock(&dcache_lock);
46187 +
46188 + BUG_ON(retval == NULL);
46189 +
46190 + return retval;
46191 +}
46192 +
46193 +static __inline__ struct acl_object_label *
46194 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46195 + const struct acl_subject_label *subj)
46196 +{
46197 + char *path = NULL;
46198 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
46199 +}
46200 +
46201 +static __inline__ struct acl_object_label *
46202 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46203 + const struct acl_subject_label *subj)
46204 +{
46205 + char *path = NULL;
46206 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
46207 +}
46208 +
46209 +static __inline__ struct acl_object_label *
46210 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46211 + const struct acl_subject_label *subj, char *path)
46212 +{
46213 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
46214 +}
46215 +
46216 +static struct acl_subject_label *
46217 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46218 + const struct acl_role_label *role)
46219 +{
46220 + struct dentry *dentry = (struct dentry *) l_dentry;
46221 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
46222 + struct acl_subject_label *retval;
46223 +
46224 + spin_lock(&dcache_lock);
46225 + spin_lock(&vfsmount_lock);
46226 +
46227 + for (;;) {
46228 + if (dentry == real_root && mnt == real_root_mnt)
46229 + break;
46230 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
46231 + if (mnt->mnt_parent == mnt)
46232 + break;
46233 +
46234 + read_lock(&gr_inode_lock);
46235 + retval =
46236 + lookup_acl_subj_label(dentry->d_inode->i_ino,
46237 + __get_dev(dentry), role);
46238 + read_unlock(&gr_inode_lock);
46239 + if (retval != NULL)
46240 + goto out;
46241 +
46242 + dentry = mnt->mnt_mountpoint;
46243 + mnt = mnt->mnt_parent;
46244 + continue;
46245 + }
46246 +
46247 + read_lock(&gr_inode_lock);
46248 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
46249 + __get_dev(dentry), role);
46250 + read_unlock(&gr_inode_lock);
46251 + if (retval != NULL)
46252 + goto out;
46253 +
46254 + dentry = dentry->d_parent;
46255 + }
46256 +
46257 + read_lock(&gr_inode_lock);
46258 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
46259 + __get_dev(dentry), role);
46260 + read_unlock(&gr_inode_lock);
46261 +
46262 + if (unlikely(retval == NULL)) {
46263 + read_lock(&gr_inode_lock);
46264 + retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
46265 + __get_dev(real_root), role);
46266 + read_unlock(&gr_inode_lock);
46267 + }
46268 +out:
46269 + spin_unlock(&vfsmount_lock);
46270 + spin_unlock(&dcache_lock);
46271 +
46272 + BUG_ON(retval == NULL);
46273 +
46274 + return retval;
46275 +}
46276 +
46277 +static void
46278 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
46279 +{
46280 + struct task_struct *task = current;
46281 + const struct cred *cred = current_cred();
46282 +
46283 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
46284 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46285 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46286 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
46287 +
46288 + return;
46289 +}
46290 +
46291 +static void
46292 +gr_log_learn_sysctl(const char *path, const __u32 mode)
46293 +{
46294 + struct task_struct *task = current;
46295 + const struct cred *cred = current_cred();
46296 +
46297 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
46298 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46299 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46300 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
46301 +
46302 + return;
46303 +}
46304 +
46305 +static void
46306 +gr_log_learn_id_change(const char type, const unsigned int real,
46307 + const unsigned int effective, const unsigned int fs)
46308 +{
46309 + struct task_struct *task = current;
46310 + const struct cred *cred = current_cred();
46311 +
46312 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
46313 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46314 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46315 + type, real, effective, fs, &task->signal->saved_ip);
46316 +
46317 + return;
46318 +}
46319 +
46320 +__u32
46321 +gr_check_link(const struct dentry * new_dentry,
46322 + const struct dentry * parent_dentry,
46323 + const struct vfsmount * parent_mnt,
46324 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
46325 +{
46326 + struct acl_object_label *obj;
46327 + __u32 oldmode, newmode;
46328 + __u32 needmode;
46329 +
46330 + if (unlikely(!(gr_status & GR_READY)))
46331 + return (GR_CREATE | GR_LINK);
46332 +
46333 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
46334 + oldmode = obj->mode;
46335 +
46336 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46337 + oldmode |= (GR_CREATE | GR_LINK);
46338 +
46339 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
46340 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
46341 + needmode |= GR_SETID | GR_AUDIT_SETID;
46342 +
46343 + newmode =
46344 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
46345 + oldmode | needmode);
46346 +
46347 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
46348 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
46349 + GR_INHERIT | GR_AUDIT_INHERIT);
46350 +
46351 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
46352 + goto bad;
46353 +
46354 + if ((oldmode & needmode) != needmode)
46355 + goto bad;
46356 +
46357 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
46358 + if ((newmode & needmode) != needmode)
46359 + goto bad;
46360 +
46361 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
46362 + return newmode;
46363 +bad:
46364 + needmode = oldmode;
46365 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
46366 + needmode |= GR_SETID;
46367 +
46368 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
46369 + gr_log_learn(old_dentry, old_mnt, needmode);
46370 + return (GR_CREATE | GR_LINK);
46371 + } else if (newmode & GR_SUPPRESS)
46372 + return GR_SUPPRESS;
46373 + else
46374 + return 0;
46375 +}
46376 +
46377 +__u32
46378 +gr_search_file(const struct dentry * dentry, const __u32 mode,
46379 + const struct vfsmount * mnt)
46380 +{
46381 + __u32 retval = mode;
46382 + struct acl_subject_label *curracl;
46383 + struct acl_object_label *currobj;
46384 +
46385 + if (unlikely(!(gr_status & GR_READY)))
46386 + return (mode & ~GR_AUDITS);
46387 +
46388 + curracl = current->acl;
46389 +
46390 + currobj = chk_obj_label(dentry, mnt, curracl);
46391 + retval = currobj->mode & mode;
46392 +
46393 + /* if we're opening a specified transfer file for writing
46394 + (e.g. /dev/initctl), then transfer our role to init
46395 + */
46396 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
46397 + current->role->roletype & GR_ROLE_PERSIST)) {
46398 + struct task_struct *task = init_pid_ns.child_reaper;
46399 +
46400 + if (task->role != current->role) {
46401 + task->acl_sp_role = 0;
46402 + task->acl_role_id = current->acl_role_id;
46403 + task->role = current->role;
46404 + rcu_read_lock();
46405 + read_lock(&grsec_exec_file_lock);
46406 + gr_apply_subject_to_task(task);
46407 + read_unlock(&grsec_exec_file_lock);
46408 + rcu_read_unlock();
46409 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
46410 + }
46411 + }
46412 +
46413 + if (unlikely
46414 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
46415 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
46416 + __u32 new_mode = mode;
46417 +
46418 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46419 +
46420 + retval = new_mode;
46421 +
46422 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
46423 + new_mode |= GR_INHERIT;
46424 +
46425 + if (!(mode & GR_NOLEARN))
46426 + gr_log_learn(dentry, mnt, new_mode);
46427 + }
46428 +
46429 + return retval;
46430 +}
46431 +
46432 +__u32
46433 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
46434 + const struct vfsmount * mnt, const __u32 mode)
46435 +{
46436 + struct name_entry *match;
46437 + struct acl_object_label *matchpo;
46438 + struct acl_subject_label *curracl;
46439 + char *path;
46440 + __u32 retval;
46441 +
46442 + if (unlikely(!(gr_status & GR_READY)))
46443 + return (mode & ~GR_AUDITS);
46444 +
46445 + preempt_disable();
46446 + path = gr_to_filename_rbac(new_dentry, mnt);
46447 + match = lookup_name_entry_create(path);
46448 +
46449 + if (!match)
46450 + goto check_parent;
46451 +
46452 + curracl = current->acl;
46453 +
46454 + read_lock(&gr_inode_lock);
46455 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
46456 + read_unlock(&gr_inode_lock);
46457 +
46458 + if (matchpo) {
46459 + if ((matchpo->mode & mode) !=
46460 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
46461 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
46462 + __u32 new_mode = mode;
46463 +
46464 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46465 +
46466 + gr_log_learn(new_dentry, mnt, new_mode);
46467 +
46468 + preempt_enable();
46469 + return new_mode;
46470 + }
46471 + preempt_enable();
46472 + return (matchpo->mode & mode);
46473 + }
46474 +
46475 + check_parent:
46476 + curracl = current->acl;
46477 +
46478 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
46479 + retval = matchpo->mode & mode;
46480 +
46481 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
46482 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
46483 + __u32 new_mode = mode;
46484 +
46485 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46486 +
46487 + gr_log_learn(new_dentry, mnt, new_mode);
46488 + preempt_enable();
46489 + return new_mode;
46490 + }
46491 +
46492 + preempt_enable();
46493 + return retval;
46494 +}
46495 +
46496 +int
46497 +gr_check_hidden_task(const struct task_struct *task)
46498 +{
46499 + if (unlikely(!(gr_status & GR_READY)))
46500 + return 0;
46501 +
46502 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
46503 + return 1;
46504 +
46505 + return 0;
46506 +}
46507 +
46508 +int
46509 +gr_check_protected_task(const struct task_struct *task)
46510 +{
46511 + if (unlikely(!(gr_status & GR_READY) || !task))
46512 + return 0;
46513 +
46514 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
46515 + task->acl != current->acl)
46516 + return 1;
46517 +
46518 + return 0;
46519 +}
46520 +
46521 +int
46522 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
46523 +{
46524 + struct task_struct *p;
46525 + int ret = 0;
46526 +
46527 + if (unlikely(!(gr_status & GR_READY) || !pid))
46528 + return ret;
46529 +
46530 + read_lock(&tasklist_lock);
46531 + do_each_pid_task(pid, type, p) {
46532 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
46533 + p->acl != current->acl) {
46534 + ret = 1;
46535 + goto out;
46536 + }
46537 + } while_each_pid_task(pid, type, p);
46538 +out:
46539 + read_unlock(&tasklist_lock);
46540 +
46541 + return ret;
46542 +}
46543 +
46544 +void
46545 +gr_copy_label(struct task_struct *tsk)
46546 +{
46547 + tsk->signal->used_accept = 0;
46548 + tsk->acl_sp_role = 0;
46549 + tsk->acl_role_id = current->acl_role_id;
46550 + tsk->acl = current->acl;
46551 + tsk->role = current->role;
46552 + tsk->signal->curr_ip = current->signal->curr_ip;
46553 + tsk->signal->saved_ip = current->signal->saved_ip;
46554 + if (current->exec_file)
46555 + get_file(current->exec_file);
46556 + tsk->exec_file = current->exec_file;
46557 + tsk->is_writable = current->is_writable;
46558 + if (unlikely(current->signal->used_accept)) {
46559 + current->signal->curr_ip = 0;
46560 + current->signal->saved_ip = 0;
46561 + }
46562 +
46563 + return;
46564 +}
46565 +
46566 +static void
46567 +gr_set_proc_res(struct task_struct *task)
46568 +{
46569 + struct acl_subject_label *proc;
46570 + unsigned short i;
46571 +
46572 + proc = task->acl;
46573 +
46574 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
46575 + return;
46576 +
46577 + for (i = 0; i < RLIM_NLIMITS; i++) {
46578 + if (!(proc->resmask & (1 << i)))
46579 + continue;
46580 +
46581 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
46582 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
46583 + }
46584 +
46585 + return;
46586 +}
46587 +
46588 +extern int __gr_process_user_ban(struct user_struct *user);
46589 +
46590 +int
46591 +gr_check_user_change(int real, int effective, int fs)
46592 +{
46593 + unsigned int i;
46594 + __u16 num;
46595 + uid_t *uidlist;
46596 + int curuid;
46597 + int realok = 0;
46598 + int effectiveok = 0;
46599 + int fsok = 0;
46600 +
46601 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
46602 + struct user_struct *user;
46603 +
46604 + if (real == -1)
46605 + goto skipit;
46606 +
46607 + user = find_user(real);
46608 + if (user == NULL)
46609 + goto skipit;
46610 +
46611 + if (__gr_process_user_ban(user)) {
46612 + /* for find_user */
46613 + free_uid(user);
46614 + return 1;
46615 + }
46616 +
46617 + /* for find_user */
46618 + free_uid(user);
46619 +
46620 +skipit:
46621 +#endif
46622 +
46623 + if (unlikely(!(gr_status & GR_READY)))
46624 + return 0;
46625 +
46626 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46627 + gr_log_learn_id_change('u', real, effective, fs);
46628 +
46629 + num = current->acl->user_trans_num;
46630 + uidlist = current->acl->user_transitions;
46631 +
46632 + if (uidlist == NULL)
46633 + return 0;
46634 +
46635 + if (real == -1)
46636 + realok = 1;
46637 + if (effective == -1)
46638 + effectiveok = 1;
46639 + if (fs == -1)
46640 + fsok = 1;
46641 +
46642 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
46643 + for (i = 0; i < num; i++) {
46644 + curuid = (int)uidlist[i];
46645 + if (real == curuid)
46646 + realok = 1;
46647 + if (effective == curuid)
46648 + effectiveok = 1;
46649 + if (fs == curuid)
46650 + fsok = 1;
46651 + }
46652 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
46653 + for (i = 0; i < num; i++) {
46654 + curuid = (int)uidlist[i];
46655 + if (real == curuid)
46656 + break;
46657 + if (effective == curuid)
46658 + break;
46659 + if (fs == curuid)
46660 + break;
46661 + }
46662 + /* not in deny list */
46663 + if (i == num) {
46664 + realok = 1;
46665 + effectiveok = 1;
46666 + fsok = 1;
46667 + }
46668 + }
46669 +
46670 + if (realok && effectiveok && fsok)
46671 + return 0;
46672 + else {
46673 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46674 + return 1;
46675 + }
46676 +}
46677 +
46678 +int
46679 +gr_check_group_change(int real, int effective, int fs)
46680 +{
46681 + unsigned int i;
46682 + __u16 num;
46683 + gid_t *gidlist;
46684 + int curgid;
46685 + int realok = 0;
46686 + int effectiveok = 0;
46687 + int fsok = 0;
46688 +
46689 + if (unlikely(!(gr_status & GR_READY)))
46690 + return 0;
46691 +
46692 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46693 + gr_log_learn_id_change('g', real, effective, fs);
46694 +
46695 + num = current->acl->group_trans_num;
46696 + gidlist = current->acl->group_transitions;
46697 +
46698 + if (gidlist == NULL)
46699 + return 0;
46700 +
46701 + if (real == -1)
46702 + realok = 1;
46703 + if (effective == -1)
46704 + effectiveok = 1;
46705 + if (fs == -1)
46706 + fsok = 1;
46707 +
46708 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
46709 + for (i = 0; i < num; i++) {
46710 + curgid = (int)gidlist[i];
46711 + if (real == curgid)
46712 + realok = 1;
46713 + if (effective == curgid)
46714 + effectiveok = 1;
46715 + if (fs == curgid)
46716 + fsok = 1;
46717 + }
46718 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
46719 + for (i = 0; i < num; i++) {
46720 + curgid = (int)gidlist[i];
46721 + if (real == curgid)
46722 + break;
46723 + if (effective == curgid)
46724 + break;
46725 + if (fs == curgid)
46726 + break;
46727 + }
46728 + /* not in deny list */
46729 + if (i == num) {
46730 + realok = 1;
46731 + effectiveok = 1;
46732 + fsok = 1;
46733 + }
46734 + }
46735 +
46736 + if (realok && effectiveok && fsok)
46737 + return 0;
46738 + else {
46739 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46740 + return 1;
46741 + }
46742 +}
46743 +
46744 +void
46745 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
46746 +{
46747 + struct acl_role_label *role = task->role;
46748 + struct acl_subject_label *subj = NULL;
46749 + struct acl_object_label *obj;
46750 + struct file *filp;
46751 +
46752 + if (unlikely(!(gr_status & GR_READY)))
46753 + return;
46754 +
46755 + filp = task->exec_file;
46756 +
46757 + /* kernel process, we'll give them the kernel role */
46758 + if (unlikely(!filp)) {
46759 + task->role = kernel_role;
46760 + task->acl = kernel_role->root_label;
46761 + return;
46762 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
46763 + role = lookup_acl_role_label(task, uid, gid);
46764 +
46765 + /* perform subject lookup in possibly new role
46766 + we can use this result below in the case where role == task->role
46767 + */
46768 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
46769 +
46770 + /* if we changed uid/gid, but result in the same role
46771 + and are using inheritance, don't lose the inherited subject
46772 + if current subject is other than what normal lookup
46773 + would result in, we arrived via inheritance, don't
46774 + lose subject
46775 + */
46776 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
46777 + (subj == task->acl)))
46778 + task->acl = subj;
46779 +
46780 + task->role = role;
46781 +
46782 + task->is_writable = 0;
46783 +
46784 + /* ignore additional mmap checks for processes that are writable
46785 + by the default ACL */
46786 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
46787 + if (unlikely(obj->mode & GR_WRITE))
46788 + task->is_writable = 1;
46789 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
46790 + if (unlikely(obj->mode & GR_WRITE))
46791 + task->is_writable = 1;
46792 +
46793 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46794 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46795 +#endif
46796 +
46797 + gr_set_proc_res(task);
46798 +
46799 + return;
46800 +}
46801 +
46802 +int
46803 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
46804 + const int unsafe_share)
46805 +{
46806 + struct task_struct *task = current;
46807 + struct acl_subject_label *newacl;
46808 + struct acl_object_label *obj;
46809 + __u32 retmode;
46810 +
46811 + if (unlikely(!(gr_status & GR_READY)))
46812 + return 0;
46813 +
46814 + newacl = chk_subj_label(dentry, mnt, task->role);
46815 +
46816 + task_lock(task);
46817 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
46818 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
46819 + !(task->role->roletype & GR_ROLE_GOD) &&
46820 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
46821 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
46822 + task_unlock(task);
46823 + if (unsafe_share)
46824 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
46825 + else
46826 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
46827 + return -EACCES;
46828 + }
46829 + task_unlock(task);
46830 +
46831 + obj = chk_obj_label(dentry, mnt, task->acl);
46832 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
46833 +
46834 + if (!(task->acl->mode & GR_INHERITLEARN) &&
46835 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
46836 + if (obj->nested)
46837 + task->acl = obj->nested;
46838 + else
46839 + task->acl = newacl;
46840 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
46841 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
46842 +
46843 + task->is_writable = 0;
46844 +
46845 + /* ignore additional mmap checks for processes that are writable
46846 + by the default ACL */
46847 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
46848 + if (unlikely(obj->mode & GR_WRITE))
46849 + task->is_writable = 1;
46850 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
46851 + if (unlikely(obj->mode & GR_WRITE))
46852 + task->is_writable = 1;
46853 +
46854 + gr_set_proc_res(task);
46855 +
46856 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46857 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46858 +#endif
46859 + return 0;
46860 +}
46861 +
46862 +/* always called with valid inodev ptr */
46863 +static void
46864 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
46865 +{
46866 + struct acl_object_label *matchpo;
46867 + struct acl_subject_label *matchps;
46868 + struct acl_subject_label *subj;
46869 + struct acl_role_label *role;
46870 + unsigned int x;
46871 +
46872 + FOR_EACH_ROLE_START(role)
46873 + FOR_EACH_SUBJECT_START(role, subj, x)
46874 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
46875 + matchpo->mode |= GR_DELETED;
46876 + FOR_EACH_SUBJECT_END(subj,x)
46877 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
46878 + if (subj->inode == ino && subj->device == dev)
46879 + subj->mode |= GR_DELETED;
46880 + FOR_EACH_NESTED_SUBJECT_END(subj)
46881 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
46882 + matchps->mode |= GR_DELETED;
46883 + FOR_EACH_ROLE_END(role)
46884 +
46885 + inodev->nentry->deleted = 1;
46886 +
46887 + return;
46888 +}
46889 +
46890 +void
46891 +gr_handle_delete(const ino_t ino, const dev_t dev)
46892 +{
46893 + struct inodev_entry *inodev;
46894 +
46895 + if (unlikely(!(gr_status & GR_READY)))
46896 + return;
46897 +
46898 + write_lock(&gr_inode_lock);
46899 + inodev = lookup_inodev_entry(ino, dev);
46900 + if (inodev != NULL)
46901 + do_handle_delete(inodev, ino, dev);
46902 + write_unlock(&gr_inode_lock);
46903 +
46904 + return;
46905 +}
46906 +
46907 +static void
46908 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
46909 + const ino_t newinode, const dev_t newdevice,
46910 + struct acl_subject_label *subj)
46911 +{
46912 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
46913 + struct acl_object_label *match;
46914 +
46915 + match = subj->obj_hash[index];
46916 +
46917 + while (match && (match->inode != oldinode ||
46918 + match->device != olddevice ||
46919 + !(match->mode & GR_DELETED)))
46920 + match = match->next;
46921 +
46922 + if (match && (match->inode == oldinode)
46923 + && (match->device == olddevice)
46924 + && (match->mode & GR_DELETED)) {
46925 + if (match->prev == NULL) {
46926 + subj->obj_hash[index] = match->next;
46927 + if (match->next != NULL)
46928 + match->next->prev = NULL;
46929 + } else {
46930 + match->prev->next = match->next;
46931 + if (match->next != NULL)
46932 + match->next->prev = match->prev;
46933 + }
46934 + match->prev = NULL;
46935 + match->next = NULL;
46936 + match->inode = newinode;
46937 + match->device = newdevice;
46938 + match->mode &= ~GR_DELETED;
46939 +
46940 + insert_acl_obj_label(match, subj);
46941 + }
46942 +
46943 + return;
46944 +}
46945 +
46946 +static void
46947 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
46948 + const ino_t newinode, const dev_t newdevice,
46949 + struct acl_role_label *role)
46950 +{
46951 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
46952 + struct acl_subject_label *match;
46953 +
46954 + match = role->subj_hash[index];
46955 +
46956 + while (match && (match->inode != oldinode ||
46957 + match->device != olddevice ||
46958 + !(match->mode & GR_DELETED)))
46959 + match = match->next;
46960 +
46961 + if (match && (match->inode == oldinode)
46962 + && (match->device == olddevice)
46963 + && (match->mode & GR_DELETED)) {
46964 + if (match->prev == NULL) {
46965 + role->subj_hash[index] = match->next;
46966 + if (match->next != NULL)
46967 + match->next->prev = NULL;
46968 + } else {
46969 + match->prev->next = match->next;
46970 + if (match->next != NULL)
46971 + match->next->prev = match->prev;
46972 + }
46973 + match->prev = NULL;
46974 + match->next = NULL;
46975 + match->inode = newinode;
46976 + match->device = newdevice;
46977 + match->mode &= ~GR_DELETED;
46978 +
46979 + insert_acl_subj_label(match, role);
46980 + }
46981 +
46982 + return;
46983 +}
46984 +
46985 +static void
46986 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
46987 + const ino_t newinode, const dev_t newdevice)
46988 +{
46989 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
46990 + struct inodev_entry *match;
46991 +
46992 + match = inodev_set.i_hash[index];
46993 +
46994 + while (match && (match->nentry->inode != oldinode ||
46995 + match->nentry->device != olddevice || !match->nentry->deleted))
46996 + match = match->next;
46997 +
46998 + if (match && (match->nentry->inode == oldinode)
46999 + && (match->nentry->device == olddevice) &&
47000 + match->nentry->deleted) {
47001 + if (match->prev == NULL) {
47002 + inodev_set.i_hash[index] = match->next;
47003 + if (match->next != NULL)
47004 + match->next->prev = NULL;
47005 + } else {
47006 + match->prev->next = match->next;
47007 + if (match->next != NULL)
47008 + match->next->prev = match->prev;
47009 + }
47010 + match->prev = NULL;
47011 + match->next = NULL;
47012 + match->nentry->inode = newinode;
47013 + match->nentry->device = newdevice;
47014 + match->nentry->deleted = 0;
47015 +
47016 + insert_inodev_entry(match);
47017 + }
47018 +
47019 + return;
47020 +}
47021 +
47022 +static void
47023 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
47024 + const struct vfsmount *mnt)
47025 +{
47026 + struct acl_subject_label *subj;
47027 + struct acl_role_label *role;
47028 + unsigned int x;
47029 + ino_t inode = dentry->d_inode->i_ino;
47030 + dev_t dev = __get_dev(dentry);
47031 +
47032 + FOR_EACH_ROLE_START(role)
47033 + update_acl_subj_label(matchn->inode, matchn->device,
47034 + inode, dev, role);
47035 +
47036 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
47037 + if ((subj->inode == inode) && (subj->device == dev)) {
47038 + subj->inode = inode;
47039 + subj->device = dev;
47040 + }
47041 + FOR_EACH_NESTED_SUBJECT_END(subj)
47042 + FOR_EACH_SUBJECT_START(role, subj, x)
47043 + update_acl_obj_label(matchn->inode, matchn->device,
47044 + inode, dev, subj);
47045 + FOR_EACH_SUBJECT_END(subj,x)
47046 + FOR_EACH_ROLE_END(role)
47047 +
47048 + update_inodev_entry(matchn->inode, matchn->device, inode, dev);
47049 +
47050 + return;
47051 +}
47052 +
47053 +void
47054 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
47055 +{
47056 + struct name_entry *matchn;
47057 +
47058 + if (unlikely(!(gr_status & GR_READY)))
47059 + return;
47060 +
47061 + preempt_disable();
47062 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
47063 +
47064 + if (unlikely((unsigned long)matchn)) {
47065 + write_lock(&gr_inode_lock);
47066 + do_handle_create(matchn, dentry, mnt);
47067 + write_unlock(&gr_inode_lock);
47068 + }
47069 + preempt_enable();
47070 +
47071 + return;
47072 +}
47073 +
47074 +void
47075 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
47076 + struct dentry *old_dentry,
47077 + struct dentry *new_dentry,
47078 + struct vfsmount *mnt, const __u8 replace)
47079 +{
47080 + struct name_entry *matchn;
47081 + struct inodev_entry *inodev;
47082 + ino_t oldinode = old_dentry->d_inode->i_ino;
47083 + dev_t olddev = __get_dev(old_dentry);
47084 +
47085 + /* vfs_rename swaps the name and parent link for old_dentry and
47086 + new_dentry
47087 + at this point, old_dentry has the new name, parent link, and inode
47088 + for the renamed file
47089 + if a file is being replaced by a rename, new_dentry has the inode
47090 + and name for the replaced file
47091 + */
47092 +
47093 + if (unlikely(!(gr_status & GR_READY)))
47094 + return;
47095 +
47096 + preempt_disable();
47097 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
47098 +
47099 + /* we wouldn't have to check d_inode if it weren't for
47100 + NFS silly-renaming
47101 + */
47102 +
47103 + write_lock(&gr_inode_lock);
47104 + if (unlikely(replace && new_dentry->d_inode)) {
47105 + ino_t newinode = new_dentry->d_inode->i_ino;
47106 + dev_t newdev = __get_dev(new_dentry);
47107 + inodev = lookup_inodev_entry(newinode, newdev);
47108 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
47109 + do_handle_delete(inodev, newinode, newdev);
47110 + }
47111 +
47112 + inodev = lookup_inodev_entry(oldinode, olddev);
47113 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
47114 + do_handle_delete(inodev, oldinode, olddev);
47115 +
47116 + if (unlikely((unsigned long)matchn))
47117 + do_handle_create(matchn, old_dentry, mnt);
47118 +
47119 + write_unlock(&gr_inode_lock);
47120 + preempt_enable();
47121 +
47122 + return;
47123 +}
47124 +
47125 +static int
47126 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
47127 + unsigned char **sum)
47128 +{
47129 + struct acl_role_label *r;
47130 + struct role_allowed_ip *ipp;
47131 + struct role_transition *trans;
47132 + unsigned int i;
47133 + int found = 0;
47134 + u32 curr_ip = current->signal->curr_ip;
47135 +
47136 + current->signal->saved_ip = curr_ip;
47137 +
47138 + /* check transition table */
47139 +
47140 + for (trans = current->role->transitions; trans; trans = trans->next) {
47141 + if (!strcmp(rolename, trans->rolename)) {
47142 + found = 1;
47143 + break;
47144 + }
47145 + }
47146 +
47147 + if (!found)
47148 + return 0;
47149 +
47150 + /* handle special roles that do not require authentication
47151 + and check ip */
47152 +
47153 + FOR_EACH_ROLE_START(r)
47154 + if (!strcmp(rolename, r->rolename) &&
47155 + (r->roletype & GR_ROLE_SPECIAL)) {
47156 + found = 0;
47157 + if (r->allowed_ips != NULL) {
47158 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
47159 + if ((ntohl(curr_ip) & ipp->netmask) ==
47160 + (ntohl(ipp->addr) & ipp->netmask))
47161 + found = 1;
47162 + }
47163 + } else
47164 + found = 2;
47165 + if (!found)
47166 + return 0;
47167 +
47168 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
47169 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
47170 + *salt = NULL;
47171 + *sum = NULL;
47172 + return 1;
47173 + }
47174 + }
47175 + FOR_EACH_ROLE_END(r)
47176 +
47177 + for (i = 0; i < num_sprole_pws; i++) {
47178 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
47179 + *salt = acl_special_roles[i]->salt;
47180 + *sum = acl_special_roles[i]->sum;
47181 + return 1;
47182 + }
47183 + }
47184 +
47185 + return 0;
47186 +}
47187 +
47188 +static void
47189 +assign_special_role(char *rolename)
47190 +{
47191 + struct acl_object_label *obj;
47192 + struct acl_role_label *r;
47193 + struct acl_role_label *assigned = NULL;
47194 + struct task_struct *tsk;
47195 + struct file *filp;
47196 +
47197 + FOR_EACH_ROLE_START(r)
47198 + if (!strcmp(rolename, r->rolename) &&
47199 + (r->roletype & GR_ROLE_SPECIAL)) {
47200 + assigned = r;
47201 + break;
47202 + }
47203 + FOR_EACH_ROLE_END(r)
47204 +
47205 + if (!assigned)
47206 + return;
47207 +
47208 + read_lock(&tasklist_lock);
47209 + read_lock(&grsec_exec_file_lock);
47210 +
47211 + tsk = current->real_parent;
47212 + if (tsk == NULL)
47213 + goto out_unlock;
47214 +
47215 + filp = tsk->exec_file;
47216 + if (filp == NULL)
47217 + goto out_unlock;
47218 +
47219 + tsk->is_writable = 0;
47220 +
47221 + tsk->acl_sp_role = 1;
47222 + tsk->acl_role_id = ++acl_sp_role_value;
47223 + tsk->role = assigned;
47224 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
47225 +
47226 + /* ignore additional mmap checks for processes that are writable
47227 + by the default ACL */
47228 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47229 + if (unlikely(obj->mode & GR_WRITE))
47230 + tsk->is_writable = 1;
47231 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
47232 + if (unlikely(obj->mode & GR_WRITE))
47233 + tsk->is_writable = 1;
47234 +
47235 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47236 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
47237 +#endif
47238 +
47239 +out_unlock:
47240 + read_unlock(&grsec_exec_file_lock);
47241 + read_unlock(&tasklist_lock);
47242 + return;
47243 +}
47244 +
47245 +int gr_check_secure_terminal(struct task_struct *task)
47246 +{
47247 + struct task_struct *p, *p2, *p3;
47248 + struct files_struct *files;
47249 + struct fdtable *fdt;
47250 + struct file *our_file = NULL, *file;
47251 + int i;
47252 +
47253 + if (task->signal->tty == NULL)
47254 + return 1;
47255 +
47256 + files = get_files_struct(task);
47257 + if (files != NULL) {
47258 + rcu_read_lock();
47259 + fdt = files_fdtable(files);
47260 + for (i=0; i < fdt->max_fds; i++) {
47261 + file = fcheck_files(files, i);
47262 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
47263 + get_file(file);
47264 + our_file = file;
47265 + }
47266 + }
47267 + rcu_read_unlock();
47268 + put_files_struct(files);
47269 + }
47270 +
47271 + if (our_file == NULL)
47272 + return 1;
47273 +
47274 + read_lock(&tasklist_lock);
47275 + do_each_thread(p2, p) {
47276 + files = get_files_struct(p);
47277 + if (files == NULL ||
47278 + (p->signal && p->signal->tty == task->signal->tty)) {
47279 + if (files != NULL)
47280 + put_files_struct(files);
47281 + continue;
47282 + }
47283 + rcu_read_lock();
47284 + fdt = files_fdtable(files);
47285 + for (i=0; i < fdt->max_fds; i++) {
47286 + file = fcheck_files(files, i);
47287 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
47288 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
47289 + p3 = task;
47290 + while (p3->pid > 0) {
47291 + if (p3 == p)
47292 + break;
47293 + p3 = p3->real_parent;
47294 + }
47295 + if (p3 == p)
47296 + break;
47297 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
47298 + gr_handle_alertkill(p);
47299 + rcu_read_unlock();
47300 + put_files_struct(files);
47301 + read_unlock(&tasklist_lock);
47302 + fput(our_file);
47303 + return 0;
47304 + }
47305 + }
47306 + rcu_read_unlock();
47307 + put_files_struct(files);
47308 + } while_each_thread(p2, p);
47309 + read_unlock(&tasklist_lock);
47310 +
47311 + fput(our_file);
47312 + return 1;
47313 +}
47314 +
47315 +ssize_t
47316 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
47317 +{
47318 + struct gr_arg_wrapper uwrap;
47319 + unsigned char *sprole_salt = NULL;
47320 + unsigned char *sprole_sum = NULL;
47321 + int error = sizeof (struct gr_arg_wrapper);
47322 + int error2 = 0;
47323 +
47324 + mutex_lock(&gr_dev_mutex);
47325 +
47326 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
47327 + error = -EPERM;
47328 + goto out;
47329 + }
47330 +
47331 + if (count != sizeof (struct gr_arg_wrapper)) {
47332 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
47333 + error = -EINVAL;
47334 + goto out;
47335 + }
47336 +
47337 +
47338 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
47339 + gr_auth_expires = 0;
47340 + gr_auth_attempts = 0;
47341 + }
47342 +
47343 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
47344 + error = -EFAULT;
47345 + goto out;
47346 + }
47347 +
47348 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
47349 + error = -EINVAL;
47350 + goto out;
47351 + }
47352 +
47353 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
47354 + error = -EFAULT;
47355 + goto out;
47356 + }
47357 +
47358 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
47359 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
47360 + time_after(gr_auth_expires, get_seconds())) {
47361 + error = -EBUSY;
47362 + goto out;
47363 + }
47364 +
47365 + /* if non-root trying to do anything other than use a special role,
47366 + do not attempt authentication, do not count towards authentication
47367 + locking
47368 + */
47369 +
47370 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
47371 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
47372 + current_uid()) {
47373 + error = -EPERM;
47374 + goto out;
47375 + }
47376 +
47377 + /* ensure pw and special role name are null terminated */
47378 +
47379 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
47380 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
47381 +
47382 + /* Okay.
47383 + * We have our enough of the argument structure..(we have yet
47384 + * to copy_from_user the tables themselves) . Copy the tables
47385 + * only if we need them, i.e. for loading operations. */
47386 +
47387 + switch (gr_usermode->mode) {
47388 + case GR_STATUS:
47389 + if (gr_status & GR_READY) {
47390 + error = 1;
47391 + if (!gr_check_secure_terminal(current))
47392 + error = 3;
47393 + } else
47394 + error = 2;
47395 + goto out;
47396 + case GR_SHUTDOWN:
47397 + if ((gr_status & GR_READY)
47398 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47399 + pax_open_kernel();
47400 + gr_status &= ~GR_READY;
47401 + pax_close_kernel();
47402 +
47403 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
47404 + free_variables();
47405 + memset(gr_usermode, 0, sizeof (struct gr_arg));
47406 + memset(gr_system_salt, 0, GR_SALT_LEN);
47407 + memset(gr_system_sum, 0, GR_SHA_LEN);
47408 + } else if (gr_status & GR_READY) {
47409 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
47410 + error = -EPERM;
47411 + } else {
47412 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
47413 + error = -EAGAIN;
47414 + }
47415 + break;
47416 + case GR_ENABLE:
47417 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
47418 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
47419 + else {
47420 + if (gr_status & GR_READY)
47421 + error = -EAGAIN;
47422 + else
47423 + error = error2;
47424 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
47425 + }
47426 + break;
47427 + case GR_RELOAD:
47428 + if (!(gr_status & GR_READY)) {
47429 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
47430 + error = -EAGAIN;
47431 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47432 + lock_kernel();
47433 +
47434 + pax_open_kernel();
47435 + gr_status &= ~GR_READY;
47436 + pax_close_kernel();
47437 +
47438 + free_variables();
47439 + if (!(error2 = gracl_init(gr_usermode))) {
47440 + unlock_kernel();
47441 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
47442 + } else {
47443 + unlock_kernel();
47444 + error = error2;
47445 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
47446 + }
47447 + } else {
47448 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
47449 + error = -EPERM;
47450 + }
47451 + break;
47452 + case GR_SEGVMOD:
47453 + if (unlikely(!(gr_status & GR_READY))) {
47454 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
47455 + error = -EAGAIN;
47456 + break;
47457 + }
47458 +
47459 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47460 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
47461 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
47462 + struct acl_subject_label *segvacl;
47463 + segvacl =
47464 + lookup_acl_subj_label(gr_usermode->segv_inode,
47465 + gr_usermode->segv_device,
47466 + current->role);
47467 + if (segvacl) {
47468 + segvacl->crashes = 0;
47469 + segvacl->expires = 0;
47470 + }
47471 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
47472 + gr_remove_uid(gr_usermode->segv_uid);
47473 + }
47474 + } else {
47475 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
47476 + error = -EPERM;
47477 + }
47478 + break;
47479 + case GR_SPROLE:
47480 + case GR_SPROLEPAM:
47481 + if (unlikely(!(gr_status & GR_READY))) {
47482 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
47483 + error = -EAGAIN;
47484 + break;
47485 + }
47486 +
47487 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
47488 + current->role->expires = 0;
47489 + current->role->auth_attempts = 0;
47490 + }
47491 +
47492 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
47493 + time_after(current->role->expires, get_seconds())) {
47494 + error = -EBUSY;
47495 + goto out;
47496 + }
47497 +
47498 + if (lookup_special_role_auth
47499 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
47500 + && ((!sprole_salt && !sprole_sum)
47501 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
47502 + char *p = "";
47503 + assign_special_role(gr_usermode->sp_role);
47504 + read_lock(&tasklist_lock);
47505 + if (current->real_parent)
47506 + p = current->real_parent->role->rolename;
47507 + read_unlock(&tasklist_lock);
47508 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
47509 + p, acl_sp_role_value);
47510 + } else {
47511 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
47512 + error = -EPERM;
47513 + if(!(current->role->auth_attempts++))
47514 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
47515 +
47516 + goto out;
47517 + }
47518 + break;
47519 + case GR_UNSPROLE:
47520 + if (unlikely(!(gr_status & GR_READY))) {
47521 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
47522 + error = -EAGAIN;
47523 + break;
47524 + }
47525 +
47526 + if (current->role->roletype & GR_ROLE_SPECIAL) {
47527 + char *p = "";
47528 + int i = 0;
47529 +
47530 + read_lock(&tasklist_lock);
47531 + if (current->real_parent) {
47532 + p = current->real_parent->role->rolename;
47533 + i = current->real_parent->acl_role_id;
47534 + }
47535 + read_unlock(&tasklist_lock);
47536 +
47537 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
47538 + gr_set_acls(1);
47539 + } else {
47540 + error = -EPERM;
47541 + goto out;
47542 + }
47543 + break;
47544 + default:
47545 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
47546 + error = -EINVAL;
47547 + break;
47548 + }
47549 +
47550 + if (error != -EPERM)
47551 + goto out;
47552 +
47553 + if(!(gr_auth_attempts++))
47554 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
47555 +
47556 + out:
47557 + mutex_unlock(&gr_dev_mutex);
47558 + return error;
47559 +}
47560 +
47561 +/* must be called with
47562 + rcu_read_lock();
47563 + read_lock(&tasklist_lock);
47564 + read_lock(&grsec_exec_file_lock);
47565 +*/
47566 +int gr_apply_subject_to_task(struct task_struct *task)
47567 +{
47568 + struct acl_object_label *obj;
47569 + char *tmpname;
47570 + struct acl_subject_label *tmpsubj;
47571 + struct file *filp;
47572 + struct name_entry *nmatch;
47573 +
47574 + filp = task->exec_file;
47575 + if (filp == NULL)
47576 + return 0;
47577 +
47578 + /* the following is to apply the correct subject
47579 + on binaries running when the RBAC system
47580 + is enabled, when the binaries have been
47581 + replaced or deleted since their execution
47582 + -----
47583 + when the RBAC system starts, the inode/dev
47584 + from exec_file will be one the RBAC system
47585 + is unaware of. It only knows the inode/dev
47586 + of the present file on disk, or the absence
47587 + of it.
47588 + */
47589 + preempt_disable();
47590 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
47591 +
47592 + nmatch = lookup_name_entry(tmpname);
47593 + preempt_enable();
47594 + tmpsubj = NULL;
47595 + if (nmatch) {
47596 + if (nmatch->deleted)
47597 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
47598 + else
47599 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
47600 + if (tmpsubj != NULL)
47601 + task->acl = tmpsubj;
47602 + }
47603 + if (tmpsubj == NULL)
47604 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
47605 + task->role);
47606 + if (task->acl) {
47607 + struct acl_subject_label *curr;
47608 + curr = task->acl;
47609 +
47610 + task->is_writable = 0;
47611 + /* ignore additional mmap checks for processes that are writable
47612 + by the default ACL */
47613 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47614 + if (unlikely(obj->mode & GR_WRITE))
47615 + task->is_writable = 1;
47616 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
47617 + if (unlikely(obj->mode & GR_WRITE))
47618 + task->is_writable = 1;
47619 +
47620 + gr_set_proc_res(task);
47621 +
47622 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47623 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
47624 +#endif
47625 + } else {
47626 + return 1;
47627 + }
47628 +
47629 + return 0;
47630 +}
47631 +
47632 +int
47633 +gr_set_acls(const int type)
47634 +{
47635 + struct task_struct *task, *task2;
47636 + struct acl_role_label *role = current->role;
47637 + __u16 acl_role_id = current->acl_role_id;
47638 + const struct cred *cred;
47639 + int ret;
47640 +
47641 + rcu_read_lock();
47642 + read_lock(&tasklist_lock);
47643 + read_lock(&grsec_exec_file_lock);
47644 + do_each_thread(task2, task) {
47645 + /* check to see if we're called from the exit handler,
47646 + if so, only replace ACLs that have inherited the admin
47647 + ACL */
47648 +
47649 + if (type && (task->role != role ||
47650 + task->acl_role_id != acl_role_id))
47651 + continue;
47652 +
47653 + task->acl_role_id = 0;
47654 + task->acl_sp_role = 0;
47655 +
47656 + if (task->exec_file) {
47657 + cred = __task_cred(task);
47658 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
47659 +
47660 + ret = gr_apply_subject_to_task(task);
47661 + if (ret) {
47662 + read_unlock(&grsec_exec_file_lock);
47663 + read_unlock(&tasklist_lock);
47664 + rcu_read_unlock();
47665 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
47666 + return ret;
47667 + }
47668 + } else {
47669 + // it's a kernel process
47670 + task->role = kernel_role;
47671 + task->acl = kernel_role->root_label;
47672 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
47673 + task->acl->mode &= ~GR_PROCFIND;
47674 +#endif
47675 + }
47676 + } while_each_thread(task2, task);
47677 + read_unlock(&grsec_exec_file_lock);
47678 + read_unlock(&tasklist_lock);
47679 + rcu_read_unlock();
47680 +
47681 + return 0;
47682 +}
47683 +
47684 +void
47685 +gr_learn_resource(const struct task_struct *task,
47686 + const int res, const unsigned long wanted, const int gt)
47687 +{
47688 + struct acl_subject_label *acl;
47689 + const struct cred *cred;
47690 +
47691 + if (unlikely((gr_status & GR_READY) &&
47692 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
47693 + goto skip_reslog;
47694 +
47695 +#ifdef CONFIG_GRKERNSEC_RESLOG
47696 + gr_log_resource(task, res, wanted, gt);
47697 +#endif
47698 + skip_reslog:
47699 +
47700 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
47701 + return;
47702 +
47703 + acl = task->acl;
47704 +
47705 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
47706 + !(acl->resmask & (1 << (unsigned short) res))))
47707 + return;
47708 +
47709 + if (wanted >= acl->res[res].rlim_cur) {
47710 + unsigned long res_add;
47711 +
47712 + res_add = wanted;
47713 + switch (res) {
47714 + case RLIMIT_CPU:
47715 + res_add += GR_RLIM_CPU_BUMP;
47716 + break;
47717 + case RLIMIT_FSIZE:
47718 + res_add += GR_RLIM_FSIZE_BUMP;
47719 + break;
47720 + case RLIMIT_DATA:
47721 + res_add += GR_RLIM_DATA_BUMP;
47722 + break;
47723 + case RLIMIT_STACK:
47724 + res_add += GR_RLIM_STACK_BUMP;
47725 + break;
47726 + case RLIMIT_CORE:
47727 + res_add += GR_RLIM_CORE_BUMP;
47728 + break;
47729 + case RLIMIT_RSS:
47730 + res_add += GR_RLIM_RSS_BUMP;
47731 + break;
47732 + case RLIMIT_NPROC:
47733 + res_add += GR_RLIM_NPROC_BUMP;
47734 + break;
47735 + case RLIMIT_NOFILE:
47736 + res_add += GR_RLIM_NOFILE_BUMP;
47737 + break;
47738 + case RLIMIT_MEMLOCK:
47739 + res_add += GR_RLIM_MEMLOCK_BUMP;
47740 + break;
47741 + case RLIMIT_AS:
47742 + res_add += GR_RLIM_AS_BUMP;
47743 + break;
47744 + case RLIMIT_LOCKS:
47745 + res_add += GR_RLIM_LOCKS_BUMP;
47746 + break;
47747 + case RLIMIT_SIGPENDING:
47748 + res_add += GR_RLIM_SIGPENDING_BUMP;
47749 + break;
47750 + case RLIMIT_MSGQUEUE:
47751 + res_add += GR_RLIM_MSGQUEUE_BUMP;
47752 + break;
47753 + case RLIMIT_NICE:
47754 + res_add += GR_RLIM_NICE_BUMP;
47755 + break;
47756 + case RLIMIT_RTPRIO:
47757 + res_add += GR_RLIM_RTPRIO_BUMP;
47758 + break;
47759 + case RLIMIT_RTTIME:
47760 + res_add += GR_RLIM_RTTIME_BUMP;
47761 + break;
47762 + }
47763 +
47764 + acl->res[res].rlim_cur = res_add;
47765 +
47766 + if (wanted > acl->res[res].rlim_max)
47767 + acl->res[res].rlim_max = res_add;
47768 +
47769 + /* only log the subject filename, since resource logging is supported for
47770 + single-subject learning only */
47771 + rcu_read_lock();
47772 + cred = __task_cred(task);
47773 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
47774 + task->role->roletype, cred->uid, cred->gid, acl->filename,
47775 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
47776 + "", (unsigned long) res, &task->signal->saved_ip);
47777 + rcu_read_unlock();
47778 + }
47779 +
47780 + return;
47781 +}
47782 +
47783 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
47784 +void
47785 +pax_set_initial_flags(struct linux_binprm *bprm)
47786 +{
47787 + struct task_struct *task = current;
47788 + struct acl_subject_label *proc;
47789 + unsigned long flags;
47790 +
47791 + if (unlikely(!(gr_status & GR_READY)))
47792 + return;
47793 +
47794 + flags = pax_get_flags(task);
47795 +
47796 + proc = task->acl;
47797 +
47798 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
47799 + flags &= ~MF_PAX_PAGEEXEC;
47800 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
47801 + flags &= ~MF_PAX_SEGMEXEC;
47802 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
47803 + flags &= ~MF_PAX_RANDMMAP;
47804 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
47805 + flags &= ~MF_PAX_EMUTRAMP;
47806 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
47807 + flags &= ~MF_PAX_MPROTECT;
47808 +
47809 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
47810 + flags |= MF_PAX_PAGEEXEC;
47811 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
47812 + flags |= MF_PAX_SEGMEXEC;
47813 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
47814 + flags |= MF_PAX_RANDMMAP;
47815 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
47816 + flags |= MF_PAX_EMUTRAMP;
47817 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
47818 + flags |= MF_PAX_MPROTECT;
47819 +
47820 + pax_set_flags(task, flags);
47821 +
47822 + return;
47823 +}
47824 +#endif
47825 +
47826 +#ifdef CONFIG_SYSCTL
47827 +/* Eric Biederman likes breaking userland ABI and every inode-based security
47828 + system to save 35kb of memory */
47829 +
47830 +/* we modify the passed in filename, but adjust it back before returning */
47831 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
47832 +{
47833 + struct name_entry *nmatch;
47834 + char *p, *lastp = NULL;
47835 + struct acl_object_label *obj = NULL, *tmp;
47836 + struct acl_subject_label *tmpsubj;
47837 + char c = '\0';
47838 +
47839 + read_lock(&gr_inode_lock);
47840 +
47841 + p = name + len - 1;
47842 + do {
47843 + nmatch = lookup_name_entry(name);
47844 + if (lastp != NULL)
47845 + *lastp = c;
47846 +
47847 + if (nmatch == NULL)
47848 + goto next_component;
47849 + tmpsubj = current->acl;
47850 + do {
47851 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
47852 + if (obj != NULL) {
47853 + tmp = obj->globbed;
47854 + while (tmp) {
47855 + if (!glob_match(tmp->filename, name)) {
47856 + obj = tmp;
47857 + goto found_obj;
47858 + }
47859 + tmp = tmp->next;
47860 + }
47861 + goto found_obj;
47862 + }
47863 + } while ((tmpsubj = tmpsubj->parent_subject));
47864 +next_component:
47865 + /* end case */
47866 + if (p == name)
47867 + break;
47868 +
47869 + while (*p != '/')
47870 + p--;
47871 + if (p == name)
47872 + lastp = p + 1;
47873 + else {
47874 + lastp = p;
47875 + p--;
47876 + }
47877 + c = *lastp;
47878 + *lastp = '\0';
47879 + } while (1);
47880 +found_obj:
47881 + read_unlock(&gr_inode_lock);
47882 + /* obj returned will always be non-null */
47883 + return obj;
47884 +}
47885 +
47886 +/* returns 0 when allowing, non-zero on error
47887 + op of 0 is used for readdir, so we don't log the names of hidden files
47888 +*/
47889 +__u32
47890 +gr_handle_sysctl(const struct ctl_table *table, const int op)
47891 +{
47892 + ctl_table *tmp;
47893 + const char *proc_sys = "/proc/sys";
47894 + char *path;
47895 + struct acl_object_label *obj;
47896 + unsigned short len = 0, pos = 0, depth = 0, i;
47897 + __u32 err = 0;
47898 + __u32 mode = 0;
47899 +
47900 + if (unlikely(!(gr_status & GR_READY)))
47901 + return 0;
47902 +
47903 + /* for now, ignore operations on non-sysctl entries if it's not a
47904 + readdir*/
47905 + if (table->child != NULL && op != 0)
47906 + return 0;
47907 +
47908 + mode |= GR_FIND;
47909 + /* it's only a read if it's an entry, read on dirs is for readdir */
47910 + if (op & MAY_READ)
47911 + mode |= GR_READ;
47912 + if (op & MAY_WRITE)
47913 + mode |= GR_WRITE;
47914 +
47915 + preempt_disable();
47916 +
47917 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
47918 +
47919 + /* it's only a read/write if it's an actual entry, not a dir
47920 + (which are opened for readdir)
47921 + */
47922 +
47923 + /* convert the requested sysctl entry into a pathname */
47924 +
47925 + for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
47926 + len += strlen(tmp->procname);
47927 + len++;
47928 + depth++;
47929 + }
47930 +
47931 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
47932 + /* deny */
47933 + goto out;
47934 + }
47935 +
47936 + memset(path, 0, PAGE_SIZE);
47937 +
47938 + memcpy(path, proc_sys, strlen(proc_sys));
47939 +
47940 + pos += strlen(proc_sys);
47941 +
47942 + for (; depth > 0; depth--) {
47943 + path[pos] = '/';
47944 + pos++;
47945 + for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
47946 + if (depth == i) {
47947 + memcpy(path + pos, tmp->procname,
47948 + strlen(tmp->procname));
47949 + pos += strlen(tmp->procname);
47950 + }
47951 + i++;
47952 + }
47953 + }
47954 +
47955 + obj = gr_lookup_by_name(path, pos);
47956 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
47957 +
47958 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
47959 + ((err & mode) != mode))) {
47960 + __u32 new_mode = mode;
47961 +
47962 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
47963 +
47964 + err = 0;
47965 + gr_log_learn_sysctl(path, new_mode);
47966 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
47967 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
47968 + err = -ENOENT;
47969 + } else if (!(err & GR_FIND)) {
47970 + err = -ENOENT;
47971 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
47972 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
47973 + path, (mode & GR_READ) ? " reading" : "",
47974 + (mode & GR_WRITE) ? " writing" : "");
47975 + err = -EACCES;
47976 + } else if ((err & mode) != mode) {
47977 + err = -EACCES;
47978 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
47979 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
47980 + path, (mode & GR_READ) ? " reading" : "",
47981 + (mode & GR_WRITE) ? " writing" : "");
47982 + err = 0;
47983 + } else
47984 + err = 0;
47985 +
47986 + out:
47987 + preempt_enable();
47988 +
47989 + return err;
47990 +}
47991 +#endif
47992 +
47993 +int
47994 +gr_handle_proc_ptrace(struct task_struct *task)
47995 +{
47996 + struct file *filp;
47997 + struct task_struct *tmp = task;
47998 + struct task_struct *curtemp = current;
47999 + __u32 retmode;
48000 +
48001 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
48002 + if (unlikely(!(gr_status & GR_READY)))
48003 + return 0;
48004 +#endif
48005 +
48006 + read_lock(&tasklist_lock);
48007 + read_lock(&grsec_exec_file_lock);
48008 + filp = task->exec_file;
48009 +
48010 + while (tmp->pid > 0) {
48011 + if (tmp == curtemp)
48012 + break;
48013 + tmp = tmp->real_parent;
48014 + }
48015 +
48016 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
48017 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
48018 + read_unlock(&grsec_exec_file_lock);
48019 + read_unlock(&tasklist_lock);
48020 + return 1;
48021 + }
48022 +
48023 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
48024 + if (!(gr_status & GR_READY)) {
48025 + read_unlock(&grsec_exec_file_lock);
48026 + read_unlock(&tasklist_lock);
48027 + return 0;
48028 + }
48029 +#endif
48030 +
48031 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
48032 + read_unlock(&grsec_exec_file_lock);
48033 + read_unlock(&tasklist_lock);
48034 +
48035 + if (retmode & GR_NOPTRACE)
48036 + return 1;
48037 +
48038 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
48039 + && (current->acl != task->acl || (current->acl != current->role->root_label
48040 + && current->pid != task->pid)))
48041 + return 1;
48042 +
48043 + return 0;
48044 +}
48045 +
48046 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
48047 +{
48048 + if (unlikely(!(gr_status & GR_READY)))
48049 + return;
48050 +
48051 + if (!(current->role->roletype & GR_ROLE_GOD))
48052 + return;
48053 +
48054 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
48055 + p->role->rolename, gr_task_roletype_to_char(p),
48056 + p->acl->filename);
48057 +}
48058 +
48059 +int
48060 +gr_handle_ptrace(struct task_struct *task, const long request)
48061 +{
48062 + struct task_struct *tmp = task;
48063 + struct task_struct *curtemp = current;
48064 + __u32 retmode;
48065 +
48066 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
48067 + if (unlikely(!(gr_status & GR_READY)))
48068 + return 0;
48069 +#endif
48070 +
48071 + read_lock(&tasklist_lock);
48072 + while (tmp->pid > 0) {
48073 + if (tmp == curtemp)
48074 + break;
48075 + tmp = tmp->real_parent;
48076 + }
48077 +
48078 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
48079 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
48080 + read_unlock(&tasklist_lock);
48081 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
48082 + return 1;
48083 + }
48084 + read_unlock(&tasklist_lock);
48085 +
48086 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
48087 + if (!(gr_status & GR_READY))
48088 + return 0;
48089 +#endif
48090 +
48091 + read_lock(&grsec_exec_file_lock);
48092 + if (unlikely(!task->exec_file)) {
48093 + read_unlock(&grsec_exec_file_lock);
48094 + return 0;
48095 + }
48096 +
48097 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
48098 + read_unlock(&grsec_exec_file_lock);
48099 +
48100 + if (retmode & GR_NOPTRACE) {
48101 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
48102 + return 1;
48103 + }
48104 +
48105 + if (retmode & GR_PTRACERD) {
48106 + switch (request) {
48107 + case PTRACE_POKETEXT:
48108 + case PTRACE_POKEDATA:
48109 + case PTRACE_POKEUSR:
48110 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
48111 + case PTRACE_SETREGS:
48112 + case PTRACE_SETFPREGS:
48113 +#endif
48114 +#ifdef CONFIG_X86
48115 + case PTRACE_SETFPXREGS:
48116 +#endif
48117 +#ifdef CONFIG_ALTIVEC
48118 + case PTRACE_SETVRREGS:
48119 +#endif
48120 + return 1;
48121 + default:
48122 + return 0;
48123 + }
48124 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
48125 + !(current->role->roletype & GR_ROLE_GOD) &&
48126 + (current->acl != task->acl)) {
48127 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
48128 + return 1;
48129 + }
48130 +
48131 + return 0;
48132 +}
48133 +
48134 +static int is_writable_mmap(const struct file *filp)
48135 +{
48136 + struct task_struct *task = current;
48137 + struct acl_object_label *obj, *obj2;
48138 +
48139 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
48140 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
48141 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
48142 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
48143 + task->role->root_label);
48144 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
48145 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
48146 + return 1;
48147 + }
48148 + }
48149 + return 0;
48150 +}
48151 +
48152 +int
48153 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
48154 +{
48155 + __u32 mode;
48156 +
48157 + if (unlikely(!file || !(prot & PROT_EXEC)))
48158 + return 1;
48159 +
48160 + if (is_writable_mmap(file))
48161 + return 0;
48162 +
48163 + mode =
48164 + gr_search_file(file->f_path.dentry,
48165 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
48166 + file->f_path.mnt);
48167 +
48168 + if (!gr_tpe_allow(file))
48169 + return 0;
48170 +
48171 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
48172 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48173 + return 0;
48174 + } else if (unlikely(!(mode & GR_EXEC))) {
48175 + return 0;
48176 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
48177 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48178 + return 1;
48179 + }
48180 +
48181 + return 1;
48182 +}
48183 +
48184 +int
48185 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
48186 +{
48187 + __u32 mode;
48188 +
48189 + if (unlikely(!file || !(prot & PROT_EXEC)))
48190 + return 1;
48191 +
48192 + if (is_writable_mmap(file))
48193 + return 0;
48194 +
48195 + mode =
48196 + gr_search_file(file->f_path.dentry,
48197 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
48198 + file->f_path.mnt);
48199 +
48200 + if (!gr_tpe_allow(file))
48201 + return 0;
48202 +
48203 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
48204 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48205 + return 0;
48206 + } else if (unlikely(!(mode & GR_EXEC))) {
48207 + return 0;
48208 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
48209 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48210 + return 1;
48211 + }
48212 +
48213 + return 1;
48214 +}
48215 +
48216 +void
48217 +gr_acl_handle_psacct(struct task_struct *task, const long code)
48218 +{
48219 + unsigned long runtime;
48220 + unsigned long cputime;
48221 + unsigned int wday, cday;
48222 + __u8 whr, chr;
48223 + __u8 wmin, cmin;
48224 + __u8 wsec, csec;
48225 + struct timespec timeval;
48226 +
48227 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
48228 + !(task->acl->mode & GR_PROCACCT)))
48229 + return;
48230 +
48231 + do_posix_clock_monotonic_gettime(&timeval);
48232 + runtime = timeval.tv_sec - task->start_time.tv_sec;
48233 + wday = runtime / (3600 * 24);
48234 + runtime -= wday * (3600 * 24);
48235 + whr = runtime / 3600;
48236 + runtime -= whr * 3600;
48237 + wmin = runtime / 60;
48238 + runtime -= wmin * 60;
48239 + wsec = runtime;
48240 +
48241 + cputime = (task->utime + task->stime) / HZ;
48242 + cday = cputime / (3600 * 24);
48243 + cputime -= cday * (3600 * 24);
48244 + chr = cputime / 3600;
48245 + cputime -= chr * 3600;
48246 + cmin = cputime / 60;
48247 + cputime -= cmin * 60;
48248 + csec = cputime;
48249 +
48250 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
48251 +
48252 + return;
48253 +}
48254 +
48255 +void gr_set_kernel_label(struct task_struct *task)
48256 +{
48257 + if (gr_status & GR_READY) {
48258 + task->role = kernel_role;
48259 + task->acl = kernel_role->root_label;
48260 + }
48261 + return;
48262 +}
48263 +
48264 +#ifdef CONFIG_TASKSTATS
48265 +int gr_is_taskstats_denied(int pid)
48266 +{
48267 + struct task_struct *task;
48268 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48269 + const struct cred *cred;
48270 +#endif
48271 + int ret = 0;
48272 +
48273 + /* restrict taskstats viewing to un-chrooted root users
48274 + who have the 'view' subject flag if the RBAC system is enabled
48275 + */
48276 +
48277 + rcu_read_lock();
48278 + read_lock(&tasklist_lock);
48279 + task = find_task_by_vpid(pid);
48280 + if (task) {
48281 +#ifdef CONFIG_GRKERNSEC_CHROOT
48282 + if (proc_is_chrooted(task))
48283 + ret = -EACCES;
48284 +#endif
48285 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48286 + cred = __task_cred(task);
48287 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48288 + if (cred->uid != 0)
48289 + ret = -EACCES;
48290 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48291 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
48292 + ret = -EACCES;
48293 +#endif
48294 +#endif
48295 + if (gr_status & GR_READY) {
48296 + if (!(task->acl->mode & GR_VIEW))
48297 + ret = -EACCES;
48298 + }
48299 + } else
48300 + ret = -ENOENT;
48301 +
48302 + read_unlock(&tasklist_lock);
48303 + rcu_read_unlock();
48304 +
48305 + return ret;
48306 +}
48307 +#endif
48308 +
48309 +/* AUXV entries are filled via a descendant of search_binary_handler
48310 + after we've already applied the subject for the target
48311 +*/
48312 +int gr_acl_enable_at_secure(void)
48313 +{
48314 + if (unlikely(!(gr_status & GR_READY)))
48315 + return 0;
48316 +
48317 + if (current->acl->mode & GR_ATSECURE)
48318 + return 1;
48319 +
48320 + return 0;
48321 +}
48322 +
48323 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
48324 +{
48325 + struct task_struct *task = current;
48326 + struct dentry *dentry = file->f_path.dentry;
48327 + struct vfsmount *mnt = file->f_path.mnt;
48328 + struct acl_object_label *obj, *tmp;
48329 + struct acl_subject_label *subj;
48330 + unsigned int bufsize;
48331 + int is_not_root;
48332 + char *path;
48333 + dev_t dev = __get_dev(dentry);
48334 +
48335 + if (unlikely(!(gr_status & GR_READY)))
48336 + return 1;
48337 +
48338 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
48339 + return 1;
48340 +
48341 + /* ignore Eric Biederman */
48342 + if (IS_PRIVATE(dentry->d_inode))
48343 + return 1;
48344 +
48345 + subj = task->acl;
48346 + do {
48347 + obj = lookup_acl_obj_label(ino, dev, subj);
48348 + if (obj != NULL)
48349 + return (obj->mode & GR_FIND) ? 1 : 0;
48350 + } while ((subj = subj->parent_subject));
48351 +
48352 + /* this is purely an optimization since we're looking for an object
48353 + for the directory we're doing a readdir on
48354 + if it's possible for any globbed object to match the entry we're
48355 + filling into the directory, then the object we find here will be
48356 + an anchor point with attached globbed objects
48357 + */
48358 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
48359 + if (obj->globbed == NULL)
48360 + return (obj->mode & GR_FIND) ? 1 : 0;
48361 +
48362 + is_not_root = ((obj->filename[0] == '/') &&
48363 + (obj->filename[1] == '\0')) ? 0 : 1;
48364 + bufsize = PAGE_SIZE - namelen - is_not_root;
48365 +
48366 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
48367 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
48368 + return 1;
48369 +
48370 + preempt_disable();
48371 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48372 + bufsize);
48373 +
48374 + bufsize = strlen(path);
48375 +
48376 + /* if base is "/", don't append an additional slash */
48377 + if (is_not_root)
48378 + *(path + bufsize) = '/';
48379 + memcpy(path + bufsize + is_not_root, name, namelen);
48380 + *(path + bufsize + namelen + is_not_root) = '\0';
48381 +
48382 + tmp = obj->globbed;
48383 + while (tmp) {
48384 + if (!glob_match(tmp->filename, path)) {
48385 + preempt_enable();
48386 + return (tmp->mode & GR_FIND) ? 1 : 0;
48387 + }
48388 + tmp = tmp->next;
48389 + }
48390 + preempt_enable();
48391 + return (obj->mode & GR_FIND) ? 1 : 0;
48392 +}
48393 +
48394 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
48395 +EXPORT_SYMBOL(gr_acl_is_enabled);
48396 +#endif
48397 +EXPORT_SYMBOL(gr_learn_resource);
48398 +EXPORT_SYMBOL(gr_set_kernel_label);
48399 +#ifdef CONFIG_SECURITY
48400 +EXPORT_SYMBOL(gr_check_user_change);
48401 +EXPORT_SYMBOL(gr_check_group_change);
48402 +#endif
48403 +
48404 diff -urNp linux-2.6.32.42/grsecurity/gracl_cap.c linux-2.6.32.42/grsecurity/gracl_cap.c
48405 --- linux-2.6.32.42/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
48406 +++ linux-2.6.32.42/grsecurity/gracl_cap.c 2011-04-17 15:56:46.000000000 -0400
48407 @@ -0,0 +1,138 @@
48408 +#include <linux/kernel.h>
48409 +#include <linux/module.h>
48410 +#include <linux/sched.h>
48411 +#include <linux/gracl.h>
48412 +#include <linux/grsecurity.h>
48413 +#include <linux/grinternal.h>
48414 +
48415 +static const char *captab_log[] = {
48416 + "CAP_CHOWN",
48417 + "CAP_DAC_OVERRIDE",
48418 + "CAP_DAC_READ_SEARCH",
48419 + "CAP_FOWNER",
48420 + "CAP_FSETID",
48421 + "CAP_KILL",
48422 + "CAP_SETGID",
48423 + "CAP_SETUID",
48424 + "CAP_SETPCAP",
48425 + "CAP_LINUX_IMMUTABLE",
48426 + "CAP_NET_BIND_SERVICE",
48427 + "CAP_NET_BROADCAST",
48428 + "CAP_NET_ADMIN",
48429 + "CAP_NET_RAW",
48430 + "CAP_IPC_LOCK",
48431 + "CAP_IPC_OWNER",
48432 + "CAP_SYS_MODULE",
48433 + "CAP_SYS_RAWIO",
48434 + "CAP_SYS_CHROOT",
48435 + "CAP_SYS_PTRACE",
48436 + "CAP_SYS_PACCT",
48437 + "CAP_SYS_ADMIN",
48438 + "CAP_SYS_BOOT",
48439 + "CAP_SYS_NICE",
48440 + "CAP_SYS_RESOURCE",
48441 + "CAP_SYS_TIME",
48442 + "CAP_SYS_TTY_CONFIG",
48443 + "CAP_MKNOD",
48444 + "CAP_LEASE",
48445 + "CAP_AUDIT_WRITE",
48446 + "CAP_AUDIT_CONTROL",
48447 + "CAP_SETFCAP",
48448 + "CAP_MAC_OVERRIDE",
48449 + "CAP_MAC_ADMIN"
48450 +};
48451 +
48452 +EXPORT_SYMBOL(gr_is_capable);
48453 +EXPORT_SYMBOL(gr_is_capable_nolog);
48454 +
48455 +int
48456 +gr_is_capable(const int cap)
48457 +{
48458 + struct task_struct *task = current;
48459 + const struct cred *cred = current_cred();
48460 + struct acl_subject_label *curracl;
48461 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
48462 + kernel_cap_t cap_audit = __cap_empty_set;
48463 +
48464 + if (!gr_acl_is_enabled())
48465 + return 1;
48466 +
48467 + curracl = task->acl;
48468 +
48469 + cap_drop = curracl->cap_lower;
48470 + cap_mask = curracl->cap_mask;
48471 + cap_audit = curracl->cap_invert_audit;
48472 +
48473 + while ((curracl = curracl->parent_subject)) {
48474 + /* if the cap isn't specified in the current computed mask but is specified in the
48475 + current level subject, and is lowered in the current level subject, then add
48476 + it to the set of dropped capabilities
48477 + otherwise, add the current level subject's mask to the current computed mask
48478 + */
48479 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
48480 + cap_raise(cap_mask, cap);
48481 + if (cap_raised(curracl->cap_lower, cap))
48482 + cap_raise(cap_drop, cap);
48483 + if (cap_raised(curracl->cap_invert_audit, cap))
48484 + cap_raise(cap_audit, cap);
48485 + }
48486 + }
48487 +
48488 + if (!cap_raised(cap_drop, cap)) {
48489 + if (cap_raised(cap_audit, cap))
48490 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
48491 + return 1;
48492 + }
48493 +
48494 + curracl = task->acl;
48495 +
48496 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
48497 + && cap_raised(cred->cap_effective, cap)) {
48498 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
48499 + task->role->roletype, cred->uid,
48500 + cred->gid, task->exec_file ?
48501 + gr_to_filename(task->exec_file->f_path.dentry,
48502 + task->exec_file->f_path.mnt) : curracl->filename,
48503 + curracl->filename, 0UL,
48504 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
48505 + return 1;
48506 + }
48507 +
48508 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
48509 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
48510 + return 0;
48511 +}
48512 +
48513 +int
48514 +gr_is_capable_nolog(const int cap)
48515 +{
48516 + struct acl_subject_label *curracl;
48517 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
48518 +
48519 + if (!gr_acl_is_enabled())
48520 + return 1;
48521 +
48522 + curracl = current->acl;
48523 +
48524 + cap_drop = curracl->cap_lower;
48525 + cap_mask = curracl->cap_mask;
48526 +
48527 + while ((curracl = curracl->parent_subject)) {
48528 + /* if the cap isn't specified in the current computed mask but is specified in the
48529 + current level subject, and is lowered in the current level subject, then add
48530 + it to the set of dropped capabilities
48531 + otherwise, add the current level subject's mask to the current computed mask
48532 + */
48533 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
48534 + cap_raise(cap_mask, cap);
48535 + if (cap_raised(curracl->cap_lower, cap))
48536 + cap_raise(cap_drop, cap);
48537 + }
48538 + }
48539 +
48540 + if (!cap_raised(cap_drop, cap))
48541 + return 1;
48542 +
48543 + return 0;
48544 +}
48545 +
48546 diff -urNp linux-2.6.32.42/grsecurity/gracl_fs.c linux-2.6.32.42/grsecurity/gracl_fs.c
48547 --- linux-2.6.32.42/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
48548 +++ linux-2.6.32.42/grsecurity/gracl_fs.c 2011-04-17 15:56:46.000000000 -0400
48549 @@ -0,0 +1,431 @@
48550 +#include <linux/kernel.h>
48551 +#include <linux/sched.h>
48552 +#include <linux/types.h>
48553 +#include <linux/fs.h>
48554 +#include <linux/file.h>
48555 +#include <linux/stat.h>
48556 +#include <linux/grsecurity.h>
48557 +#include <linux/grinternal.h>
48558 +#include <linux/gracl.h>
48559 +
48560 +__u32
48561 +gr_acl_handle_hidden_file(const struct dentry * dentry,
48562 + const struct vfsmount * mnt)
48563 +{
48564 + __u32 mode;
48565 +
48566 + if (unlikely(!dentry->d_inode))
48567 + return GR_FIND;
48568 +
48569 + mode =
48570 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
48571 +
48572 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
48573 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48574 + return mode;
48575 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
48576 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48577 + return 0;
48578 + } else if (unlikely(!(mode & GR_FIND)))
48579 + return 0;
48580 +
48581 + return GR_FIND;
48582 +}
48583 +
48584 +__u32
48585 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
48586 + const int fmode)
48587 +{
48588 + __u32 reqmode = GR_FIND;
48589 + __u32 mode;
48590 +
48591 + if (unlikely(!dentry->d_inode))
48592 + return reqmode;
48593 +
48594 + if (unlikely(fmode & O_APPEND))
48595 + reqmode |= GR_APPEND;
48596 + else if (unlikely(fmode & FMODE_WRITE))
48597 + reqmode |= GR_WRITE;
48598 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48599 + reqmode |= GR_READ;
48600 + if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
48601 + reqmode &= ~GR_READ;
48602 + mode =
48603 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48604 + mnt);
48605 +
48606 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48607 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48608 + reqmode & GR_READ ? " reading" : "",
48609 + reqmode & GR_WRITE ? " writing" : reqmode &
48610 + GR_APPEND ? " appending" : "");
48611 + return reqmode;
48612 + } else
48613 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48614 + {
48615 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48616 + reqmode & GR_READ ? " reading" : "",
48617 + reqmode & GR_WRITE ? " writing" : reqmode &
48618 + GR_APPEND ? " appending" : "");
48619 + return 0;
48620 + } else if (unlikely((mode & reqmode) != reqmode))
48621 + return 0;
48622 +
48623 + return reqmode;
48624 +}
48625 +
48626 +__u32
48627 +gr_acl_handle_creat(const struct dentry * dentry,
48628 + const struct dentry * p_dentry,
48629 + const struct vfsmount * p_mnt, const int fmode,
48630 + const int imode)
48631 +{
48632 + __u32 reqmode = GR_WRITE | GR_CREATE;
48633 + __u32 mode;
48634 +
48635 + if (unlikely(fmode & O_APPEND))
48636 + reqmode |= GR_APPEND;
48637 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48638 + reqmode |= GR_READ;
48639 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
48640 + reqmode |= GR_SETID;
48641 +
48642 + mode =
48643 + gr_check_create(dentry, p_dentry, p_mnt,
48644 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48645 +
48646 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48647 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48648 + reqmode & GR_READ ? " reading" : "",
48649 + reqmode & GR_WRITE ? " writing" : reqmode &
48650 + GR_APPEND ? " appending" : "");
48651 + return reqmode;
48652 + } else
48653 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48654 + {
48655 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48656 + reqmode & GR_READ ? " reading" : "",
48657 + reqmode & GR_WRITE ? " writing" : reqmode &
48658 + GR_APPEND ? " appending" : "");
48659 + return 0;
48660 + } else if (unlikely((mode & reqmode) != reqmode))
48661 + return 0;
48662 +
48663 + return reqmode;
48664 +}
48665 +
48666 +__u32
48667 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
48668 + const int fmode)
48669 +{
48670 + __u32 mode, reqmode = GR_FIND;
48671 +
48672 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
48673 + reqmode |= GR_EXEC;
48674 + if (fmode & S_IWOTH)
48675 + reqmode |= GR_WRITE;
48676 + if (fmode & S_IROTH)
48677 + reqmode |= GR_READ;
48678 +
48679 + mode =
48680 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48681 + mnt);
48682 +
48683 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48684 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48685 + reqmode & GR_READ ? " reading" : "",
48686 + reqmode & GR_WRITE ? " writing" : "",
48687 + reqmode & GR_EXEC ? " executing" : "");
48688 + return reqmode;
48689 + } else
48690 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48691 + {
48692 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48693 + reqmode & GR_READ ? " reading" : "",
48694 + reqmode & GR_WRITE ? " writing" : "",
48695 + reqmode & GR_EXEC ? " executing" : "");
48696 + return 0;
48697 + } else if (unlikely((mode & reqmode) != reqmode))
48698 + return 0;
48699 +
48700 + return reqmode;
48701 +}
48702 +
48703 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
48704 +{
48705 + __u32 mode;
48706 +
48707 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
48708 +
48709 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
48710 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
48711 + return mode;
48712 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
48713 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
48714 + return 0;
48715 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
48716 + return 0;
48717 +
48718 + return (reqmode);
48719 +}
48720 +
48721 +__u32
48722 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
48723 +{
48724 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
48725 +}
48726 +
48727 +__u32
48728 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
48729 +{
48730 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
48731 +}
48732 +
48733 +__u32
48734 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
48735 +{
48736 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
48737 +}
48738 +
48739 +__u32
48740 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
48741 +{
48742 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
48743 +}
48744 +
48745 +__u32
48746 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
48747 + mode_t mode)
48748 +{
48749 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
48750 + return 1;
48751 +
48752 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48753 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48754 + GR_FCHMOD_ACL_MSG);
48755 + } else {
48756 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
48757 + }
48758 +}
48759 +
48760 +__u32
48761 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
48762 + mode_t mode)
48763 +{
48764 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48765 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48766 + GR_CHMOD_ACL_MSG);
48767 + } else {
48768 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
48769 + }
48770 +}
48771 +
48772 +__u32
48773 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
48774 +{
48775 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
48776 +}
48777 +
48778 +__u32
48779 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
48780 +{
48781 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
48782 +}
48783 +
48784 +__u32
48785 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
48786 +{
48787 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
48788 +}
48789 +
48790 +__u32
48791 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
48792 +{
48793 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
48794 + GR_UNIXCONNECT_ACL_MSG);
48795 +}
48796 +
48797 +/* hardlinks require at minimum create permission,
48798 + any additional privilege required is based on the
48799 + privilege of the file being linked to
48800 +*/
48801 +__u32
48802 +gr_acl_handle_link(const struct dentry * new_dentry,
48803 + const struct dentry * parent_dentry,
48804 + const struct vfsmount * parent_mnt,
48805 + const struct dentry * old_dentry,
48806 + const struct vfsmount * old_mnt, const char *to)
48807 +{
48808 + __u32 mode;
48809 + __u32 needmode = GR_CREATE | GR_LINK;
48810 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
48811 +
48812 + mode =
48813 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
48814 + old_mnt);
48815 +
48816 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
48817 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48818 + return mode;
48819 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
48820 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48821 + return 0;
48822 + } else if (unlikely((mode & needmode) != needmode))
48823 + return 0;
48824 +
48825 + return 1;
48826 +}
48827 +
48828 +__u32
48829 +gr_acl_handle_symlink(const struct dentry * new_dentry,
48830 + const struct dentry * parent_dentry,
48831 + const struct vfsmount * parent_mnt, const char *from)
48832 +{
48833 + __u32 needmode = GR_WRITE | GR_CREATE;
48834 + __u32 mode;
48835 +
48836 + mode =
48837 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
48838 + GR_CREATE | GR_AUDIT_CREATE |
48839 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
48840 +
48841 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
48842 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
48843 + return mode;
48844 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
48845 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
48846 + return 0;
48847 + } else if (unlikely((mode & needmode) != needmode))
48848 + return 0;
48849 +
48850 + return (GR_WRITE | GR_CREATE);
48851 +}
48852 +
48853 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
48854 +{
48855 + __u32 mode;
48856 +
48857 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48858 +
48859 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
48860 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
48861 + return mode;
48862 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
48863 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
48864 + return 0;
48865 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
48866 + return 0;
48867 +
48868 + return (reqmode);
48869 +}
48870 +
48871 +__u32
48872 +gr_acl_handle_mknod(const struct dentry * new_dentry,
48873 + const struct dentry * parent_dentry,
48874 + const struct vfsmount * parent_mnt,
48875 + const int mode)
48876 +{
48877 + __u32 reqmode = GR_WRITE | GR_CREATE;
48878 + if (unlikely(mode & (S_ISUID | S_ISGID)))
48879 + reqmode |= GR_SETID;
48880 +
48881 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
48882 + reqmode, GR_MKNOD_ACL_MSG);
48883 +}
48884 +
48885 +__u32
48886 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
48887 + const struct dentry *parent_dentry,
48888 + const struct vfsmount *parent_mnt)
48889 +{
48890 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
48891 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
48892 +}
48893 +
48894 +#define RENAME_CHECK_SUCCESS(old, new) \
48895 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
48896 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
48897 +
48898 +int
48899 +gr_acl_handle_rename(struct dentry *new_dentry,
48900 + struct dentry *parent_dentry,
48901 + const struct vfsmount *parent_mnt,
48902 + struct dentry *old_dentry,
48903 + struct inode *old_parent_inode,
48904 + struct vfsmount *old_mnt, const char *newname)
48905 +{
48906 + __u32 comp1, comp2;
48907 + int error = 0;
48908 +
48909 + if (unlikely(!gr_acl_is_enabled()))
48910 + return 0;
48911 +
48912 + if (!new_dentry->d_inode) {
48913 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
48914 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
48915 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
48916 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
48917 + GR_DELETE | GR_AUDIT_DELETE |
48918 + GR_AUDIT_READ | GR_AUDIT_WRITE |
48919 + GR_SUPPRESS, old_mnt);
48920 + } else {
48921 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
48922 + GR_CREATE | GR_DELETE |
48923 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
48924 + GR_AUDIT_READ | GR_AUDIT_WRITE |
48925 + GR_SUPPRESS, parent_mnt);
48926 + comp2 =
48927 + gr_search_file(old_dentry,
48928 + GR_READ | GR_WRITE | GR_AUDIT_READ |
48929 + GR_DELETE | GR_AUDIT_DELETE |
48930 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
48931 + }
48932 +
48933 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
48934 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
48935 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
48936 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
48937 + && !(comp2 & GR_SUPPRESS)) {
48938 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
48939 + error = -EACCES;
48940 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
48941 + error = -EACCES;
48942 +
48943 + return error;
48944 +}
48945 +
48946 +void
48947 +gr_acl_handle_exit(void)
48948 +{
48949 + u16 id;
48950 + char *rolename;
48951 + struct file *exec_file;
48952 +
48953 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
48954 + !(current->role->roletype & GR_ROLE_PERSIST))) {
48955 + id = current->acl_role_id;
48956 + rolename = current->role->rolename;
48957 + gr_set_acls(1);
48958 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
48959 + }
48960 +
48961 + write_lock(&grsec_exec_file_lock);
48962 + exec_file = current->exec_file;
48963 + current->exec_file = NULL;
48964 + write_unlock(&grsec_exec_file_lock);
48965 +
48966 + if (exec_file)
48967 + fput(exec_file);
48968 +}
48969 +
48970 +int
48971 +gr_acl_handle_procpidmem(const struct task_struct *task)
48972 +{
48973 + if (unlikely(!gr_acl_is_enabled()))
48974 + return 0;
48975 +
48976 + if (task != current && task->acl->mode & GR_PROTPROCFD)
48977 + return -EACCES;
48978 +
48979 + return 0;
48980 +}
48981 diff -urNp linux-2.6.32.42/grsecurity/gracl_ip.c linux-2.6.32.42/grsecurity/gracl_ip.c
48982 --- linux-2.6.32.42/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
48983 +++ linux-2.6.32.42/grsecurity/gracl_ip.c 2011-04-17 15:56:46.000000000 -0400
48984 @@ -0,0 +1,382 @@
48985 +#include <linux/kernel.h>
48986 +#include <asm/uaccess.h>
48987 +#include <asm/errno.h>
48988 +#include <net/sock.h>
48989 +#include <linux/file.h>
48990 +#include <linux/fs.h>
48991 +#include <linux/net.h>
48992 +#include <linux/in.h>
48993 +#include <linux/skbuff.h>
48994 +#include <linux/ip.h>
48995 +#include <linux/udp.h>
48996 +#include <linux/smp_lock.h>
48997 +#include <linux/types.h>
48998 +#include <linux/sched.h>
48999 +#include <linux/netdevice.h>
49000 +#include <linux/inetdevice.h>
49001 +#include <linux/gracl.h>
49002 +#include <linux/grsecurity.h>
49003 +#include <linux/grinternal.h>
49004 +
49005 +#define GR_BIND 0x01
49006 +#define GR_CONNECT 0x02
49007 +#define GR_INVERT 0x04
49008 +#define GR_BINDOVERRIDE 0x08
49009 +#define GR_CONNECTOVERRIDE 0x10
49010 +#define GR_SOCK_FAMILY 0x20
49011 +
49012 +static const char * gr_protocols[IPPROTO_MAX] = {
49013 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
49014 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
49015 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
49016 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
49017 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
49018 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
49019 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
49020 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
49021 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
49022 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
49023 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
49024 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
49025 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
49026 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
49027 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
49028 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
49029 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
49030 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
49031 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
49032 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
49033 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
49034 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
49035 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
49036 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
49037 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
49038 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
49039 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
49040 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
49041 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
49042 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
49043 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
49044 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
49045 + };
49046 +
49047 +static const char * gr_socktypes[SOCK_MAX] = {
49048 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
49049 + "unknown:7", "unknown:8", "unknown:9", "packet"
49050 + };
49051 +
49052 +static const char * gr_sockfamilies[AF_MAX+1] = {
49053 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
49054 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
49055 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
49056 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
49057 + };
49058 +
49059 +const char *
49060 +gr_proto_to_name(unsigned char proto)
49061 +{
49062 + return gr_protocols[proto];
49063 +}
49064 +
49065 +const char *
49066 +gr_socktype_to_name(unsigned char type)
49067 +{
49068 + return gr_socktypes[type];
49069 +}
49070 +
49071 +const char *
49072 +gr_sockfamily_to_name(unsigned char family)
49073 +{
49074 + return gr_sockfamilies[family];
49075 +}
49076 +
49077 +int
49078 +gr_search_socket(const int domain, const int type, const int protocol)
49079 +{
49080 + struct acl_subject_label *curr;
49081 + const struct cred *cred = current_cred();
49082 +
49083 + if (unlikely(!gr_acl_is_enabled()))
49084 + goto exit;
49085 +
49086 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
49087 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
49088 + goto exit; // let the kernel handle it
49089 +
49090 + curr = current->acl;
49091 +
49092 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
49093 + /* the family is allowed, if this is PF_INET allow it only if
49094 + the extra sock type/protocol checks pass */
49095 + if (domain == PF_INET)
49096 + goto inet_check;
49097 + goto exit;
49098 + } else {
49099 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
49100 + __u32 fakeip = 0;
49101 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49102 + current->role->roletype, cred->uid,
49103 + cred->gid, current->exec_file ?
49104 + gr_to_filename(current->exec_file->f_path.dentry,
49105 + current->exec_file->f_path.mnt) :
49106 + curr->filename, curr->filename,
49107 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
49108 + &current->signal->saved_ip);
49109 + goto exit;
49110 + }
49111 + goto exit_fail;
49112 + }
49113 +
49114 +inet_check:
49115 + /* the rest of this checking is for IPv4 only */
49116 + if (!curr->ips)
49117 + goto exit;
49118 +
49119 + if ((curr->ip_type & (1 << type)) &&
49120 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
49121 + goto exit;
49122 +
49123 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
49124 + /* we don't place acls on raw sockets , and sometimes
49125 + dgram/ip sockets are opened for ioctl and not
49126 + bind/connect, so we'll fake a bind learn log */
49127 + if (type == SOCK_RAW || type == SOCK_PACKET) {
49128 + __u32 fakeip = 0;
49129 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49130 + current->role->roletype, cred->uid,
49131 + cred->gid, current->exec_file ?
49132 + gr_to_filename(current->exec_file->f_path.dentry,
49133 + current->exec_file->f_path.mnt) :
49134 + curr->filename, curr->filename,
49135 + &fakeip, 0, type,
49136 + protocol, GR_CONNECT, &current->signal->saved_ip);
49137 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
49138 + __u32 fakeip = 0;
49139 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49140 + current->role->roletype, cred->uid,
49141 + cred->gid, current->exec_file ?
49142 + gr_to_filename(current->exec_file->f_path.dentry,
49143 + current->exec_file->f_path.mnt) :
49144 + curr->filename, curr->filename,
49145 + &fakeip, 0, type,
49146 + protocol, GR_BIND, &current->signal->saved_ip);
49147 + }
49148 + /* we'll log when they use connect or bind */
49149 + goto exit;
49150 + }
49151 +
49152 +exit_fail:
49153 + if (domain == PF_INET)
49154 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
49155 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
49156 + else
49157 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
49158 + gr_socktype_to_name(type), protocol);
49159 +
49160 + return 0;
49161 +exit:
49162 + return 1;
49163 +}
49164 +
49165 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
49166 +{
49167 + if ((ip->mode & mode) &&
49168 + (ip_port >= ip->low) &&
49169 + (ip_port <= ip->high) &&
49170 + ((ntohl(ip_addr) & our_netmask) ==
49171 + (ntohl(our_addr) & our_netmask))
49172 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
49173 + && (ip->type & (1 << type))) {
49174 + if (ip->mode & GR_INVERT)
49175 + return 2; // specifically denied
49176 + else
49177 + return 1; // allowed
49178 + }
49179 +
49180 + return 0; // not specifically allowed, may continue parsing
49181 +}
49182 +
49183 +static int
49184 +gr_search_connectbind(const int full_mode, struct sock *sk,
49185 + struct sockaddr_in *addr, const int type)
49186 +{
49187 + char iface[IFNAMSIZ] = {0};
49188 + struct acl_subject_label *curr;
49189 + struct acl_ip_label *ip;
49190 + struct inet_sock *isk;
49191 + struct net_device *dev;
49192 + struct in_device *idev;
49193 + unsigned long i;
49194 + int ret;
49195 + int mode = full_mode & (GR_BIND | GR_CONNECT);
49196 + __u32 ip_addr = 0;
49197 + __u32 our_addr;
49198 + __u32 our_netmask;
49199 + char *p;
49200 + __u16 ip_port = 0;
49201 + const struct cred *cred = current_cred();
49202 +
49203 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
49204 + return 0;
49205 +
49206 + curr = current->acl;
49207 + isk = inet_sk(sk);
49208 +
49209 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
49210 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
49211 + addr->sin_addr.s_addr = curr->inaddr_any_override;
49212 + if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
49213 + struct sockaddr_in saddr;
49214 + int err;
49215 +
49216 + saddr.sin_family = AF_INET;
49217 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
49218 + saddr.sin_port = isk->sport;
49219 +
49220 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
49221 + if (err)
49222 + return err;
49223 +
49224 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
49225 + if (err)
49226 + return err;
49227 + }
49228 +
49229 + if (!curr->ips)
49230 + return 0;
49231 +
49232 + ip_addr = addr->sin_addr.s_addr;
49233 + ip_port = ntohs(addr->sin_port);
49234 +
49235 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
49236 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49237 + current->role->roletype, cred->uid,
49238 + cred->gid, current->exec_file ?
49239 + gr_to_filename(current->exec_file->f_path.dentry,
49240 + current->exec_file->f_path.mnt) :
49241 + curr->filename, curr->filename,
49242 + &ip_addr, ip_port, type,
49243 + sk->sk_protocol, mode, &current->signal->saved_ip);
49244 + return 0;
49245 + }
49246 +
49247 + for (i = 0; i < curr->ip_num; i++) {
49248 + ip = *(curr->ips + i);
49249 + if (ip->iface != NULL) {
49250 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
49251 + p = strchr(iface, ':');
49252 + if (p != NULL)
49253 + *p = '\0';
49254 + dev = dev_get_by_name(sock_net(sk), iface);
49255 + if (dev == NULL)
49256 + continue;
49257 + idev = in_dev_get(dev);
49258 + if (idev == NULL) {
49259 + dev_put(dev);
49260 + continue;
49261 + }
49262 + rcu_read_lock();
49263 + for_ifa(idev) {
49264 + if (!strcmp(ip->iface, ifa->ifa_label)) {
49265 + our_addr = ifa->ifa_address;
49266 + our_netmask = 0xffffffff;
49267 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
49268 + if (ret == 1) {
49269 + rcu_read_unlock();
49270 + in_dev_put(idev);
49271 + dev_put(dev);
49272 + return 0;
49273 + } else if (ret == 2) {
49274 + rcu_read_unlock();
49275 + in_dev_put(idev);
49276 + dev_put(dev);
49277 + goto denied;
49278 + }
49279 + }
49280 + } endfor_ifa(idev);
49281 + rcu_read_unlock();
49282 + in_dev_put(idev);
49283 + dev_put(dev);
49284 + } else {
49285 + our_addr = ip->addr;
49286 + our_netmask = ip->netmask;
49287 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
49288 + if (ret == 1)
49289 + return 0;
49290 + else if (ret == 2)
49291 + goto denied;
49292 + }
49293 + }
49294 +
49295 +denied:
49296 + if (mode == GR_BIND)
49297 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
49298 + else if (mode == GR_CONNECT)
49299 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
49300 +
49301 + return -EACCES;
49302 +}
49303 +
49304 +int
49305 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
49306 +{
49307 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
49308 +}
49309 +
49310 +int
49311 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
49312 +{
49313 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
49314 +}
49315 +
49316 +int gr_search_listen(struct socket *sock)
49317 +{
49318 + struct sock *sk = sock->sk;
49319 + struct sockaddr_in addr;
49320 +
49321 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
49322 + addr.sin_port = inet_sk(sk)->sport;
49323 +
49324 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
49325 +}
49326 +
49327 +int gr_search_accept(struct socket *sock)
49328 +{
49329 + struct sock *sk = sock->sk;
49330 + struct sockaddr_in addr;
49331 +
49332 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
49333 + addr.sin_port = inet_sk(sk)->sport;
49334 +
49335 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
49336 +}
49337 +
49338 +int
49339 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
49340 +{
49341 + if (addr)
49342 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
49343 + else {
49344 + struct sockaddr_in sin;
49345 + const struct inet_sock *inet = inet_sk(sk);
49346 +
49347 + sin.sin_addr.s_addr = inet->daddr;
49348 + sin.sin_port = inet->dport;
49349 +
49350 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
49351 + }
49352 +}
49353 +
49354 +int
49355 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
49356 +{
49357 + struct sockaddr_in sin;
49358 +
49359 + if (unlikely(skb->len < sizeof (struct udphdr)))
49360 + return 0; // skip this packet
49361 +
49362 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
49363 + sin.sin_port = udp_hdr(skb)->source;
49364 +
49365 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
49366 +}
49367 diff -urNp linux-2.6.32.42/grsecurity/gracl_learn.c linux-2.6.32.42/grsecurity/gracl_learn.c
49368 --- linux-2.6.32.42/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
49369 +++ linux-2.6.32.42/grsecurity/gracl_learn.c 2011-04-17 15:56:46.000000000 -0400
49370 @@ -0,0 +1,211 @@
49371 +#include <linux/kernel.h>
49372 +#include <linux/mm.h>
49373 +#include <linux/sched.h>
49374 +#include <linux/poll.h>
49375 +#include <linux/smp_lock.h>
49376 +#include <linux/string.h>
49377 +#include <linux/file.h>
49378 +#include <linux/types.h>
49379 +#include <linux/vmalloc.h>
49380 +#include <linux/grinternal.h>
49381 +
49382 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
49383 + size_t count, loff_t *ppos);
49384 +extern int gr_acl_is_enabled(void);
49385 +
49386 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
49387 +static int gr_learn_attached;
49388 +
49389 +/* use a 512k buffer */
49390 +#define LEARN_BUFFER_SIZE (512 * 1024)
49391 +
49392 +static DEFINE_SPINLOCK(gr_learn_lock);
49393 +static DEFINE_MUTEX(gr_learn_user_mutex);
49394 +
49395 +/* we need to maintain two buffers, so that the kernel context of grlearn
49396 + uses a semaphore around the userspace copying, and the other kernel contexts
49397 + use a spinlock when copying into the buffer, since they cannot sleep
49398 +*/
49399 +static char *learn_buffer;
49400 +static char *learn_buffer_user;
49401 +static int learn_buffer_len;
49402 +static int learn_buffer_user_len;
49403 +
49404 +static ssize_t
49405 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
49406 +{
49407 + DECLARE_WAITQUEUE(wait, current);
49408 + ssize_t retval = 0;
49409 +
49410 + add_wait_queue(&learn_wait, &wait);
49411 + set_current_state(TASK_INTERRUPTIBLE);
49412 + do {
49413 + mutex_lock(&gr_learn_user_mutex);
49414 + spin_lock(&gr_learn_lock);
49415 + if (learn_buffer_len)
49416 + break;
49417 + spin_unlock(&gr_learn_lock);
49418 + mutex_unlock(&gr_learn_user_mutex);
49419 + if (file->f_flags & O_NONBLOCK) {
49420 + retval = -EAGAIN;
49421 + goto out;
49422 + }
49423 + if (signal_pending(current)) {
49424 + retval = -ERESTARTSYS;
49425 + goto out;
49426 + }
49427 +
49428 + schedule();
49429 + } while (1);
49430 +
49431 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
49432 + learn_buffer_user_len = learn_buffer_len;
49433 + retval = learn_buffer_len;
49434 + learn_buffer_len = 0;
49435 +
49436 + spin_unlock(&gr_learn_lock);
49437 +
49438 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
49439 + retval = -EFAULT;
49440 +
49441 + mutex_unlock(&gr_learn_user_mutex);
49442 +out:
49443 + set_current_state(TASK_RUNNING);
49444 + remove_wait_queue(&learn_wait, &wait);
49445 + return retval;
49446 +}
49447 +
49448 +static unsigned int
49449 +poll_learn(struct file * file, poll_table * wait)
49450 +{
49451 + poll_wait(file, &learn_wait, wait);
49452 +
49453 + if (learn_buffer_len)
49454 + return (POLLIN | POLLRDNORM);
49455 +
49456 + return 0;
49457 +}
49458 +
49459 +void
49460 +gr_clear_learn_entries(void)
49461 +{
49462 + char *tmp;
49463 +
49464 + mutex_lock(&gr_learn_user_mutex);
49465 + if (learn_buffer != NULL) {
49466 + spin_lock(&gr_learn_lock);
49467 + tmp = learn_buffer;
49468 + learn_buffer = NULL;
49469 + spin_unlock(&gr_learn_lock);
49470 + vfree(learn_buffer);
49471 + }
49472 + if (learn_buffer_user != NULL) {
49473 + vfree(learn_buffer_user);
49474 + learn_buffer_user = NULL;
49475 + }
49476 + learn_buffer_len = 0;
49477 + mutex_unlock(&gr_learn_user_mutex);
49478 +
49479 + return;
49480 +}
49481 +
49482 +void
49483 +gr_add_learn_entry(const char *fmt, ...)
49484 +{
49485 + va_list args;
49486 + unsigned int len;
49487 +
49488 + if (!gr_learn_attached)
49489 + return;
49490 +
49491 + spin_lock(&gr_learn_lock);
49492 +
49493 + /* leave a gap at the end so we know when it's "full" but don't have to
49494 + compute the exact length of the string we're trying to append
49495 + */
49496 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
49497 + spin_unlock(&gr_learn_lock);
49498 + wake_up_interruptible(&learn_wait);
49499 + return;
49500 + }
49501 + if (learn_buffer == NULL) {
49502 + spin_unlock(&gr_learn_lock);
49503 + return;
49504 + }
49505 +
49506 + va_start(args, fmt);
49507 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
49508 + va_end(args);
49509 +
49510 + learn_buffer_len += len + 1;
49511 +
49512 + spin_unlock(&gr_learn_lock);
49513 + wake_up_interruptible(&learn_wait);
49514 +
49515 + return;
49516 +}
49517 +
49518 +static int
49519 +open_learn(struct inode *inode, struct file *file)
49520 +{
49521 + if (file->f_mode & FMODE_READ && gr_learn_attached)
49522 + return -EBUSY;
49523 + if (file->f_mode & FMODE_READ) {
49524 + int retval = 0;
49525 + mutex_lock(&gr_learn_user_mutex);
49526 + if (learn_buffer == NULL)
49527 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
49528 + if (learn_buffer_user == NULL)
49529 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
49530 + if (learn_buffer == NULL) {
49531 + retval = -ENOMEM;
49532 + goto out_error;
49533 + }
49534 + if (learn_buffer_user == NULL) {
49535 + retval = -ENOMEM;
49536 + goto out_error;
49537 + }
49538 + learn_buffer_len = 0;
49539 + learn_buffer_user_len = 0;
49540 + gr_learn_attached = 1;
49541 +out_error:
49542 + mutex_unlock(&gr_learn_user_mutex);
49543 + return retval;
49544 + }
49545 + return 0;
49546 +}
49547 +
49548 +static int
49549 +close_learn(struct inode *inode, struct file *file)
49550 +{
49551 + char *tmp;
49552 +
49553 + if (file->f_mode & FMODE_READ) {
49554 + mutex_lock(&gr_learn_user_mutex);
49555 + if (learn_buffer != NULL) {
49556 + spin_lock(&gr_learn_lock);
49557 + tmp = learn_buffer;
49558 + learn_buffer = NULL;
49559 + spin_unlock(&gr_learn_lock);
49560 + vfree(tmp);
49561 + }
49562 + if (learn_buffer_user != NULL) {
49563 + vfree(learn_buffer_user);
49564 + learn_buffer_user = NULL;
49565 + }
49566 + learn_buffer_len = 0;
49567 + learn_buffer_user_len = 0;
49568 + gr_learn_attached = 0;
49569 + mutex_unlock(&gr_learn_user_mutex);
49570 + }
49571 +
49572 + return 0;
49573 +}
49574 +
49575 +const struct file_operations grsec_fops = {
49576 + .read = read_learn,
49577 + .write = write_grsec_handler,
49578 + .open = open_learn,
49579 + .release = close_learn,
49580 + .poll = poll_learn,
49581 +};
49582 diff -urNp linux-2.6.32.42/grsecurity/gracl_res.c linux-2.6.32.42/grsecurity/gracl_res.c
49583 --- linux-2.6.32.42/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
49584 +++ linux-2.6.32.42/grsecurity/gracl_res.c 2011-04-17 15:56:46.000000000 -0400
49585 @@ -0,0 +1,67 @@
49586 +#include <linux/kernel.h>
49587 +#include <linux/sched.h>
49588 +#include <linux/gracl.h>
49589 +#include <linux/grinternal.h>
49590 +
49591 +static const char *restab_log[] = {
49592 + [RLIMIT_CPU] = "RLIMIT_CPU",
49593 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
49594 + [RLIMIT_DATA] = "RLIMIT_DATA",
49595 + [RLIMIT_STACK] = "RLIMIT_STACK",
49596 + [RLIMIT_CORE] = "RLIMIT_CORE",
49597 + [RLIMIT_RSS] = "RLIMIT_RSS",
49598 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
49599 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
49600 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
49601 + [RLIMIT_AS] = "RLIMIT_AS",
49602 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
49603 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
49604 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
49605 + [RLIMIT_NICE] = "RLIMIT_NICE",
49606 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
49607 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
49608 + [GR_CRASH_RES] = "RLIMIT_CRASH"
49609 +};
49610 +
49611 +void
49612 +gr_log_resource(const struct task_struct *task,
49613 + const int res, const unsigned long wanted, const int gt)
49614 +{
49615 + const struct cred *cred;
49616 + unsigned long rlim;
49617 +
49618 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
49619 + return;
49620 +
49621 + // not yet supported resource
49622 + if (unlikely(!restab_log[res]))
49623 + return;
49624 +
49625 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
49626 + rlim = task->signal->rlim[res].rlim_max;
49627 + else
49628 + rlim = task->signal->rlim[res].rlim_cur;
49629 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
49630 + return;
49631 +
49632 + rcu_read_lock();
49633 + cred = __task_cred(task);
49634 +
49635 + if (res == RLIMIT_NPROC &&
49636 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
49637 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
49638 + goto out_rcu_unlock;
49639 + else if (res == RLIMIT_MEMLOCK &&
49640 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
49641 + goto out_rcu_unlock;
49642 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
49643 + goto out_rcu_unlock;
49644 + rcu_read_unlock();
49645 +
49646 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
49647 +
49648 + return;
49649 +out_rcu_unlock:
49650 + rcu_read_unlock();
49651 + return;
49652 +}
49653 diff -urNp linux-2.6.32.42/grsecurity/gracl_segv.c linux-2.6.32.42/grsecurity/gracl_segv.c
49654 --- linux-2.6.32.42/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
49655 +++ linux-2.6.32.42/grsecurity/gracl_segv.c 2011-04-17 15:56:46.000000000 -0400
49656 @@ -0,0 +1,284 @@
49657 +#include <linux/kernel.h>
49658 +#include <linux/mm.h>
49659 +#include <asm/uaccess.h>
49660 +#include <asm/errno.h>
49661 +#include <asm/mman.h>
49662 +#include <net/sock.h>
49663 +#include <linux/file.h>
49664 +#include <linux/fs.h>
49665 +#include <linux/net.h>
49666 +#include <linux/in.h>
49667 +#include <linux/smp_lock.h>
49668 +#include <linux/slab.h>
49669 +#include <linux/types.h>
49670 +#include <linux/sched.h>
49671 +#include <linux/timer.h>
49672 +#include <linux/gracl.h>
49673 +#include <linux/grsecurity.h>
49674 +#include <linux/grinternal.h>
49675 +
49676 +static struct crash_uid *uid_set;
49677 +static unsigned short uid_used;
49678 +static DEFINE_SPINLOCK(gr_uid_lock);
49679 +extern rwlock_t gr_inode_lock;
49680 +extern struct acl_subject_label *
49681 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
49682 + struct acl_role_label *role);
49683 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
49684 +
49685 +int
49686 +gr_init_uidset(void)
49687 +{
49688 + uid_set =
49689 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
49690 + uid_used = 0;
49691 +
49692 + return uid_set ? 1 : 0;
49693 +}
49694 +
49695 +void
49696 +gr_free_uidset(void)
49697 +{
49698 + if (uid_set)
49699 + kfree(uid_set);
49700 +
49701 + return;
49702 +}
49703 +
49704 +int
49705 +gr_find_uid(const uid_t uid)
49706 +{
49707 + struct crash_uid *tmp = uid_set;
49708 + uid_t buid;
49709 + int low = 0, high = uid_used - 1, mid;
49710 +
49711 + while (high >= low) {
49712 + mid = (low + high) >> 1;
49713 + buid = tmp[mid].uid;
49714 + if (buid == uid)
49715 + return mid;
49716 + if (buid > uid)
49717 + high = mid - 1;
49718 + if (buid < uid)
49719 + low = mid + 1;
49720 + }
49721 +
49722 + return -1;
49723 +}
49724 +
49725 +static __inline__ void
49726 +gr_insertsort(void)
49727 +{
49728 + unsigned short i, j;
49729 + struct crash_uid index;
49730 +
49731 + for (i = 1; i < uid_used; i++) {
49732 + index = uid_set[i];
49733 + j = i;
49734 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
49735 + uid_set[j] = uid_set[j - 1];
49736 + j--;
49737 + }
49738 + uid_set[j] = index;
49739 + }
49740 +
49741 + return;
49742 +}
49743 +
49744 +static __inline__ void
49745 +gr_insert_uid(const uid_t uid, const unsigned long expires)
49746 +{
49747 + int loc;
49748 +
49749 + if (uid_used == GR_UIDTABLE_MAX)
49750 + return;
49751 +
49752 + loc = gr_find_uid(uid);
49753 +
49754 + if (loc >= 0) {
49755 + uid_set[loc].expires = expires;
49756 + return;
49757 + }
49758 +
49759 + uid_set[uid_used].uid = uid;
49760 + uid_set[uid_used].expires = expires;
49761 + uid_used++;
49762 +
49763 + gr_insertsort();
49764 +
49765 + return;
49766 +}
49767 +
49768 +void
49769 +gr_remove_uid(const unsigned short loc)
49770 +{
49771 + unsigned short i;
49772 +
49773 + for (i = loc + 1; i < uid_used; i++)
49774 + uid_set[i - 1] = uid_set[i];
49775 +
49776 + uid_used--;
49777 +
49778 + return;
49779 +}
49780 +
49781 +int
49782 +gr_check_crash_uid(const uid_t uid)
49783 +{
49784 + int loc;
49785 + int ret = 0;
49786 +
49787 + if (unlikely(!gr_acl_is_enabled()))
49788 + return 0;
49789 +
49790 + spin_lock(&gr_uid_lock);
49791 + loc = gr_find_uid(uid);
49792 +
49793 + if (loc < 0)
49794 + goto out_unlock;
49795 +
49796 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
49797 + gr_remove_uid(loc);
49798 + else
49799 + ret = 1;
49800 +
49801 +out_unlock:
49802 + spin_unlock(&gr_uid_lock);
49803 + return ret;
49804 +}
49805 +
49806 +static __inline__ int
49807 +proc_is_setxid(const struct cred *cred)
49808 +{
49809 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
49810 + cred->uid != cred->fsuid)
49811 + return 1;
49812 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
49813 + cred->gid != cred->fsgid)
49814 + return 1;
49815 +
49816 + return 0;
49817 +}
49818 +
49819 +void
49820 +gr_handle_crash(struct task_struct *task, const int sig)
49821 +{
49822 + struct acl_subject_label *curr;
49823 + struct acl_subject_label *curr2;
49824 + struct task_struct *tsk, *tsk2;
49825 + const struct cred *cred;
49826 + const struct cred *cred2;
49827 +
49828 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
49829 + return;
49830 +
49831 + if (unlikely(!gr_acl_is_enabled()))
49832 + return;
49833 +
49834 + curr = task->acl;
49835 +
49836 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
49837 + return;
49838 +
49839 + if (time_before_eq(curr->expires, get_seconds())) {
49840 + curr->expires = 0;
49841 + curr->crashes = 0;
49842 + }
49843 +
49844 + curr->crashes++;
49845 +
49846 + if (!curr->expires)
49847 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
49848 +
49849 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
49850 + time_after(curr->expires, get_seconds())) {
49851 + rcu_read_lock();
49852 + cred = __task_cred(task);
49853 + if (cred->uid && proc_is_setxid(cred)) {
49854 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
49855 + spin_lock(&gr_uid_lock);
49856 + gr_insert_uid(cred->uid, curr->expires);
49857 + spin_unlock(&gr_uid_lock);
49858 + curr->expires = 0;
49859 + curr->crashes = 0;
49860 + read_lock(&tasklist_lock);
49861 + do_each_thread(tsk2, tsk) {
49862 + cred2 = __task_cred(tsk);
49863 + if (tsk != task && cred2->uid == cred->uid)
49864 + gr_fake_force_sig(SIGKILL, tsk);
49865 + } while_each_thread(tsk2, tsk);
49866 + read_unlock(&tasklist_lock);
49867 + } else {
49868 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
49869 + read_lock(&tasklist_lock);
49870 + do_each_thread(tsk2, tsk) {
49871 + if (likely(tsk != task)) {
49872 + curr2 = tsk->acl;
49873 +
49874 + if (curr2->device == curr->device &&
49875 + curr2->inode == curr->inode)
49876 + gr_fake_force_sig(SIGKILL, tsk);
49877 + }
49878 + } while_each_thread(tsk2, tsk);
49879 + read_unlock(&tasklist_lock);
49880 + }
49881 + rcu_read_unlock();
49882 + }
49883 +
49884 + return;
49885 +}
49886 +
49887 +int
49888 +gr_check_crash_exec(const struct file *filp)
49889 +{
49890 + struct acl_subject_label *curr;
49891 +
49892 + if (unlikely(!gr_acl_is_enabled()))
49893 + return 0;
49894 +
49895 + read_lock(&gr_inode_lock);
49896 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
49897 + filp->f_path.dentry->d_inode->i_sb->s_dev,
49898 + current->role);
49899 + read_unlock(&gr_inode_lock);
49900 +
49901 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
49902 + (!curr->crashes && !curr->expires))
49903 + return 0;
49904 +
49905 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
49906 + time_after(curr->expires, get_seconds()))
49907 + return 1;
49908 + else if (time_before_eq(curr->expires, get_seconds())) {
49909 + curr->crashes = 0;
49910 + curr->expires = 0;
49911 + }
49912 +
49913 + return 0;
49914 +}
49915 +
49916 +void
49917 +gr_handle_alertkill(struct task_struct *task)
49918 +{
49919 + struct acl_subject_label *curracl;
49920 + __u32 curr_ip;
49921 + struct task_struct *p, *p2;
49922 +
49923 + if (unlikely(!gr_acl_is_enabled()))
49924 + return;
49925 +
49926 + curracl = task->acl;
49927 + curr_ip = task->signal->curr_ip;
49928 +
49929 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
49930 + read_lock(&tasklist_lock);
49931 + do_each_thread(p2, p) {
49932 + if (p->signal->curr_ip == curr_ip)
49933 + gr_fake_force_sig(SIGKILL, p);
49934 + } while_each_thread(p2, p);
49935 + read_unlock(&tasklist_lock);
49936 + } else if (curracl->mode & GR_KILLPROC)
49937 + gr_fake_force_sig(SIGKILL, task);
49938 +
49939 + return;
49940 +}
49941 diff -urNp linux-2.6.32.42/grsecurity/gracl_shm.c linux-2.6.32.42/grsecurity/gracl_shm.c
49942 --- linux-2.6.32.42/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
49943 +++ linux-2.6.32.42/grsecurity/gracl_shm.c 2011-04-17 15:56:46.000000000 -0400
49944 @@ -0,0 +1,40 @@
49945 +#include <linux/kernel.h>
49946 +#include <linux/mm.h>
49947 +#include <linux/sched.h>
49948 +#include <linux/file.h>
49949 +#include <linux/ipc.h>
49950 +#include <linux/gracl.h>
49951 +#include <linux/grsecurity.h>
49952 +#include <linux/grinternal.h>
49953 +
49954 +int
49955 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
49956 + const time_t shm_createtime, const uid_t cuid, const int shmid)
49957 +{
49958 + struct task_struct *task;
49959 +
49960 + if (!gr_acl_is_enabled())
49961 + return 1;
49962 +
49963 + rcu_read_lock();
49964 + read_lock(&tasklist_lock);
49965 +
49966 + task = find_task_by_vpid(shm_cprid);
49967 +
49968 + if (unlikely(!task))
49969 + task = find_task_by_vpid(shm_lapid);
49970 +
49971 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
49972 + (task->pid == shm_lapid)) &&
49973 + (task->acl->mode & GR_PROTSHM) &&
49974 + (task->acl != current->acl))) {
49975 + read_unlock(&tasklist_lock);
49976 + rcu_read_unlock();
49977 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
49978 + return 0;
49979 + }
49980 + read_unlock(&tasklist_lock);
49981 + rcu_read_unlock();
49982 +
49983 + return 1;
49984 +}
49985 diff -urNp linux-2.6.32.42/grsecurity/grsec_chdir.c linux-2.6.32.42/grsecurity/grsec_chdir.c
49986 --- linux-2.6.32.42/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
49987 +++ linux-2.6.32.42/grsecurity/grsec_chdir.c 2011-04-17 15:56:46.000000000 -0400
49988 @@ -0,0 +1,19 @@
49989 +#include <linux/kernel.h>
49990 +#include <linux/sched.h>
49991 +#include <linux/fs.h>
49992 +#include <linux/file.h>
49993 +#include <linux/grsecurity.h>
49994 +#include <linux/grinternal.h>
49995 +
49996 +void
49997 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
49998 +{
49999 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
50000 + if ((grsec_enable_chdir && grsec_enable_group &&
50001 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
50002 + !grsec_enable_group)) {
50003 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
50004 + }
50005 +#endif
50006 + return;
50007 +}
50008 diff -urNp linux-2.6.32.42/grsecurity/grsec_chroot.c linux-2.6.32.42/grsecurity/grsec_chroot.c
50009 --- linux-2.6.32.42/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
50010 +++ linux-2.6.32.42/grsecurity/grsec_chroot.c 2011-06-20 19:44:00.000000000 -0400
50011 @@ -0,0 +1,395 @@
50012 +#include <linux/kernel.h>
50013 +#include <linux/module.h>
50014 +#include <linux/sched.h>
50015 +#include <linux/file.h>
50016 +#include <linux/fs.h>
50017 +#include <linux/mount.h>
50018 +#include <linux/types.h>
50019 +#include <linux/pid_namespace.h>
50020 +#include <linux/grsecurity.h>
50021 +#include <linux/grinternal.h>
50022 +
50023 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
50024 +{
50025 +#ifdef CONFIG_GRKERNSEC
50026 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
50027 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
50028 + task->gr_is_chrooted = 1;
50029 + else
50030 + task->gr_is_chrooted = 0;
50031 +
50032 + task->gr_chroot_dentry = path->dentry;
50033 +#endif
50034 + return;
50035 +}
50036 +
50037 +void gr_clear_chroot_entries(struct task_struct *task)
50038 +{
50039 +#ifdef CONFIG_GRKERNSEC
50040 + task->gr_is_chrooted = 0;
50041 + task->gr_chroot_dentry = NULL;
50042 +#endif
50043 + return;
50044 +}
50045 +
50046 +int
50047 +gr_handle_chroot_unix(const pid_t pid)
50048 +{
50049 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
50050 + struct pid *spid = NULL;
50051 +
50052 + if (unlikely(!grsec_enable_chroot_unix))
50053 + return 1;
50054 +
50055 + if (likely(!proc_is_chrooted(current)))
50056 + return 1;
50057 +
50058 + rcu_read_lock();
50059 + read_lock(&tasklist_lock);
50060 +
50061 + spid = find_vpid(pid);
50062 + if (spid) {
50063 + struct task_struct *p;
50064 + p = pid_task(spid, PIDTYPE_PID);
50065 + if (unlikely(p && !have_same_root(current, p))) {
50066 + read_unlock(&tasklist_lock);
50067 + rcu_read_unlock();
50068 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
50069 + return 0;
50070 + }
50071 + }
50072 + read_unlock(&tasklist_lock);
50073 + rcu_read_unlock();
50074 +#endif
50075 + return 1;
50076 +}
50077 +
50078 +int
50079 +gr_handle_chroot_nice(void)
50080 +{
50081 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
50082 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
50083 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
50084 + return -EPERM;
50085 + }
50086 +#endif
50087 + return 0;
50088 +}
50089 +
50090 +int
50091 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
50092 +{
50093 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
50094 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
50095 + && proc_is_chrooted(current)) {
50096 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
50097 + return -EACCES;
50098 + }
50099 +#endif
50100 + return 0;
50101 +}
50102 +
50103 +int
50104 +gr_handle_chroot_rawio(const struct inode *inode)
50105 +{
50106 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50107 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
50108 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
50109 + return 1;
50110 +#endif
50111 + return 0;
50112 +}
50113 +
50114 +int
50115 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
50116 +{
50117 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
50118 + struct task_struct *p;
50119 + int ret = 0;
50120 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
50121 + return ret;
50122 +
50123 + read_lock(&tasklist_lock);
50124 + do_each_pid_task(pid, type, p) {
50125 + if (!have_same_root(current, p)) {
50126 + ret = 1;
50127 + goto out;
50128 + }
50129 + } while_each_pid_task(pid, type, p);
50130 +out:
50131 + read_unlock(&tasklist_lock);
50132 + return ret;
50133 +#endif
50134 + return 0;
50135 +}
50136 +
50137 +int
50138 +gr_pid_is_chrooted(struct task_struct *p)
50139 +{
50140 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
50141 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
50142 + return 0;
50143 +
50144 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
50145 + !have_same_root(current, p)) {
50146 + return 1;
50147 + }
50148 +#endif
50149 + return 0;
50150 +}
50151 +
50152 +EXPORT_SYMBOL(gr_pid_is_chrooted);
50153 +
50154 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
50155 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
50156 +{
50157 + struct dentry *dentry = (struct dentry *)u_dentry;
50158 + struct vfsmount *mnt = (struct vfsmount *)u_mnt;
50159 + struct dentry *realroot;
50160 + struct vfsmount *realrootmnt;
50161 + struct dentry *currentroot;
50162 + struct vfsmount *currentmnt;
50163 + struct task_struct *reaper = &init_task;
50164 + int ret = 1;
50165 +
50166 + read_lock(&reaper->fs->lock);
50167 + realrootmnt = mntget(reaper->fs->root.mnt);
50168 + realroot = dget(reaper->fs->root.dentry);
50169 + read_unlock(&reaper->fs->lock);
50170 +
50171 + read_lock(&current->fs->lock);
50172 + currentmnt = mntget(current->fs->root.mnt);
50173 + currentroot = dget(current->fs->root.dentry);
50174 + read_unlock(&current->fs->lock);
50175 +
50176 + spin_lock(&dcache_lock);
50177 + for (;;) {
50178 + if (unlikely((dentry == realroot && mnt == realrootmnt)
50179 + || (dentry == currentroot && mnt == currentmnt)))
50180 + break;
50181 + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
50182 + if (mnt->mnt_parent == mnt)
50183 + break;
50184 + dentry = mnt->mnt_mountpoint;
50185 + mnt = mnt->mnt_parent;
50186 + continue;
50187 + }
50188 + dentry = dentry->d_parent;
50189 + }
50190 + spin_unlock(&dcache_lock);
50191 +
50192 + dput(currentroot);
50193 + mntput(currentmnt);
50194 +
50195 + /* access is outside of chroot */
50196 + if (dentry == realroot && mnt == realrootmnt)
50197 + ret = 0;
50198 +
50199 + dput(realroot);
50200 + mntput(realrootmnt);
50201 + return ret;
50202 +}
50203 +#endif
50204 +
50205 +int
50206 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
50207 +{
50208 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
50209 + if (!grsec_enable_chroot_fchdir)
50210 + return 1;
50211 +
50212 + if (!proc_is_chrooted(current))
50213 + return 1;
50214 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
50215 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
50216 + return 0;
50217 + }
50218 +#endif
50219 + return 1;
50220 +}
50221 +
50222 +int
50223 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50224 + const time_t shm_createtime)
50225 +{
50226 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
50227 + struct pid *pid = NULL;
50228 + time_t starttime;
50229 +
50230 + if (unlikely(!grsec_enable_chroot_shmat))
50231 + return 1;
50232 +
50233 + if (likely(!proc_is_chrooted(current)))
50234 + return 1;
50235 +
50236 + rcu_read_lock();
50237 + read_lock(&tasklist_lock);
50238 +
50239 + pid = find_vpid(shm_cprid);
50240 + if (pid) {
50241 + struct task_struct *p;
50242 + p = pid_task(pid, PIDTYPE_PID);
50243 + if (p == NULL)
50244 + goto unlock;
50245 + starttime = p->start_time.tv_sec;
50246 + if (unlikely(!have_same_root(current, p) &&
50247 + time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime))) {
50248 + read_unlock(&tasklist_lock);
50249 + rcu_read_unlock();
50250 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
50251 + return 0;
50252 + }
50253 + } else {
50254 + pid = find_vpid(shm_lapid);
50255 + if (pid) {
50256 + struct task_struct *p;
50257 + p = pid_task(pid, PIDTYPE_PID);
50258 + if (p == NULL)
50259 + goto unlock;
50260 + if (unlikely(!have_same_root(current, p))) {
50261 + read_unlock(&tasklist_lock);
50262 + rcu_read_unlock();
50263 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
50264 + return 0;
50265 + }
50266 + }
50267 + }
50268 +
50269 +unlock:
50270 + read_unlock(&tasklist_lock);
50271 + rcu_read_unlock();
50272 +#endif
50273 + return 1;
50274 +}
50275 +
50276 +void
50277 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
50278 +{
50279 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
50280 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
50281 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
50282 +#endif
50283 + return;
50284 +}
50285 +
50286 +int
50287 +gr_handle_chroot_mknod(const struct dentry *dentry,
50288 + const struct vfsmount *mnt, const int mode)
50289 +{
50290 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
50291 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
50292 + proc_is_chrooted(current)) {
50293 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
50294 + return -EPERM;
50295 + }
50296 +#endif
50297 + return 0;
50298 +}
50299 +
50300 +int
50301 +gr_handle_chroot_mount(const struct dentry *dentry,
50302 + const struct vfsmount *mnt, const char *dev_name)
50303 +{
50304 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
50305 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
50306 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
50307 + return -EPERM;
50308 + }
50309 +#endif
50310 + return 0;
50311 +}
50312 +
50313 +int
50314 +gr_handle_chroot_pivot(void)
50315 +{
50316 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
50317 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
50318 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
50319 + return -EPERM;
50320 + }
50321 +#endif
50322 + return 0;
50323 +}
50324 +
50325 +int
50326 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
50327 +{
50328 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
50329 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
50330 + !gr_is_outside_chroot(dentry, mnt)) {
50331 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
50332 + return -EPERM;
50333 + }
50334 +#endif
50335 + return 0;
50336 +}
50337 +
50338 +int
50339 +gr_handle_chroot_caps(struct path *path)
50340 +{
50341 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50342 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
50343 + (init_task.fs->root.dentry != path->dentry) &&
50344 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
50345 +
50346 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
50347 + const struct cred *old = current_cred();
50348 + struct cred *new = prepare_creds();
50349 + if (new == NULL)
50350 + return 1;
50351 +
50352 + new->cap_permitted = cap_drop(old->cap_permitted,
50353 + chroot_caps);
50354 + new->cap_inheritable = cap_drop(old->cap_inheritable,
50355 + chroot_caps);
50356 + new->cap_effective = cap_drop(old->cap_effective,
50357 + chroot_caps);
50358 +
50359 + commit_creds(new);
50360 +
50361 + return 0;
50362 + }
50363 +#endif
50364 + return 0;
50365 +}
50366 +
50367 +int
50368 +gr_handle_chroot_sysctl(const int op)
50369 +{
50370 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
50371 + if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
50372 + && (op & MAY_WRITE))
50373 + return -EACCES;
50374 +#endif
50375 + return 0;
50376 +}
50377 +
50378 +void
50379 +gr_handle_chroot_chdir(struct path *path)
50380 +{
50381 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
50382 + if (grsec_enable_chroot_chdir)
50383 + set_fs_pwd(current->fs, path);
50384 +#endif
50385 + return;
50386 +}
50387 +
50388 +int
50389 +gr_handle_chroot_chmod(const struct dentry *dentry,
50390 + const struct vfsmount *mnt, const int mode)
50391 +{
50392 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
50393 + /* allow chmod +s on directories, but not on files */
50394 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
50395 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
50396 + proc_is_chrooted(current)) {
50397 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
50398 + return -EPERM;
50399 + }
50400 +#endif
50401 + return 0;
50402 +}
50403 +
50404 +#ifdef CONFIG_SECURITY
50405 +EXPORT_SYMBOL(gr_handle_chroot_caps);
50406 +#endif
50407 diff -urNp linux-2.6.32.42/grsecurity/grsec_disabled.c linux-2.6.32.42/grsecurity/grsec_disabled.c
50408 --- linux-2.6.32.42/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
50409 +++ linux-2.6.32.42/grsecurity/grsec_disabled.c 2011-04-17 15:56:46.000000000 -0400
50410 @@ -0,0 +1,447 @@
50411 +#include <linux/kernel.h>
50412 +#include <linux/module.h>
50413 +#include <linux/sched.h>
50414 +#include <linux/file.h>
50415 +#include <linux/fs.h>
50416 +#include <linux/kdev_t.h>
50417 +#include <linux/net.h>
50418 +#include <linux/in.h>
50419 +#include <linux/ip.h>
50420 +#include <linux/skbuff.h>
50421 +#include <linux/sysctl.h>
50422 +
50423 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
50424 +void
50425 +pax_set_initial_flags(struct linux_binprm *bprm)
50426 +{
50427 + return;
50428 +}
50429 +#endif
50430 +
50431 +#ifdef CONFIG_SYSCTL
50432 +__u32
50433 +gr_handle_sysctl(const struct ctl_table * table, const int op)
50434 +{
50435 + return 0;
50436 +}
50437 +#endif
50438 +
50439 +#ifdef CONFIG_TASKSTATS
50440 +int gr_is_taskstats_denied(int pid)
50441 +{
50442 + return 0;
50443 +}
50444 +#endif
50445 +
50446 +int
50447 +gr_acl_is_enabled(void)
50448 +{
50449 + return 0;
50450 +}
50451 +
50452 +int
50453 +gr_handle_rawio(const struct inode *inode)
50454 +{
50455 + return 0;
50456 +}
50457 +
50458 +void
50459 +gr_acl_handle_psacct(struct task_struct *task, const long code)
50460 +{
50461 + return;
50462 +}
50463 +
50464 +int
50465 +gr_handle_ptrace(struct task_struct *task, const long request)
50466 +{
50467 + return 0;
50468 +}
50469 +
50470 +int
50471 +gr_handle_proc_ptrace(struct task_struct *task)
50472 +{
50473 + return 0;
50474 +}
50475 +
50476 +void
50477 +gr_learn_resource(const struct task_struct *task,
50478 + const int res, const unsigned long wanted, const int gt)
50479 +{
50480 + return;
50481 +}
50482 +
50483 +int
50484 +gr_set_acls(const int type)
50485 +{
50486 + return 0;
50487 +}
50488 +
50489 +int
50490 +gr_check_hidden_task(const struct task_struct *tsk)
50491 +{
50492 + return 0;
50493 +}
50494 +
50495 +int
50496 +gr_check_protected_task(const struct task_struct *task)
50497 +{
50498 + return 0;
50499 +}
50500 +
50501 +int
50502 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50503 +{
50504 + return 0;
50505 +}
50506 +
50507 +void
50508 +gr_copy_label(struct task_struct *tsk)
50509 +{
50510 + return;
50511 +}
50512 +
50513 +void
50514 +gr_set_pax_flags(struct task_struct *task)
50515 +{
50516 + return;
50517 +}
50518 +
50519 +int
50520 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
50521 + const int unsafe_share)
50522 +{
50523 + return 0;
50524 +}
50525 +
50526 +void
50527 +gr_handle_delete(const ino_t ino, const dev_t dev)
50528 +{
50529 + return;
50530 +}
50531 +
50532 +void
50533 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50534 +{
50535 + return;
50536 +}
50537 +
50538 +void
50539 +gr_handle_crash(struct task_struct *task, const int sig)
50540 +{
50541 + return;
50542 +}
50543 +
50544 +int
50545 +gr_check_crash_exec(const struct file *filp)
50546 +{
50547 + return 0;
50548 +}
50549 +
50550 +int
50551 +gr_check_crash_uid(const uid_t uid)
50552 +{
50553 + return 0;
50554 +}
50555 +
50556 +void
50557 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
50558 + struct dentry *old_dentry,
50559 + struct dentry *new_dentry,
50560 + struct vfsmount *mnt, const __u8 replace)
50561 +{
50562 + return;
50563 +}
50564 +
50565 +int
50566 +gr_search_socket(const int family, const int type, const int protocol)
50567 +{
50568 + return 1;
50569 +}
50570 +
50571 +int
50572 +gr_search_connectbind(const int mode, const struct socket *sock,
50573 + const struct sockaddr_in *addr)
50574 +{
50575 + return 0;
50576 +}
50577 +
50578 +int
50579 +gr_is_capable(const int cap)
50580 +{
50581 + return 1;
50582 +}
50583 +
50584 +int
50585 +gr_is_capable_nolog(const int cap)
50586 +{
50587 + return 1;
50588 +}
50589 +
50590 +void
50591 +gr_handle_alertkill(struct task_struct *task)
50592 +{
50593 + return;
50594 +}
50595 +
50596 +__u32
50597 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
50598 +{
50599 + return 1;
50600 +}
50601 +
50602 +__u32
50603 +gr_acl_handle_hidden_file(const struct dentry * dentry,
50604 + const struct vfsmount * mnt)
50605 +{
50606 + return 1;
50607 +}
50608 +
50609 +__u32
50610 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
50611 + const int fmode)
50612 +{
50613 + return 1;
50614 +}
50615 +
50616 +__u32
50617 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
50618 +{
50619 + return 1;
50620 +}
50621 +
50622 +__u32
50623 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
50624 +{
50625 + return 1;
50626 +}
50627 +
50628 +int
50629 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
50630 + unsigned int *vm_flags)
50631 +{
50632 + return 1;
50633 +}
50634 +
50635 +__u32
50636 +gr_acl_handle_truncate(const struct dentry * dentry,
50637 + const struct vfsmount * mnt)
50638 +{
50639 + return 1;
50640 +}
50641 +
50642 +__u32
50643 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
50644 +{
50645 + return 1;
50646 +}
50647 +
50648 +__u32
50649 +gr_acl_handle_access(const struct dentry * dentry,
50650 + const struct vfsmount * mnt, const int fmode)
50651 +{
50652 + return 1;
50653 +}
50654 +
50655 +__u32
50656 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
50657 + mode_t mode)
50658 +{
50659 + return 1;
50660 +}
50661 +
50662 +__u32
50663 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
50664 + mode_t mode)
50665 +{
50666 + return 1;
50667 +}
50668 +
50669 +__u32
50670 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
50671 +{
50672 + return 1;
50673 +}
50674 +
50675 +__u32
50676 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
50677 +{
50678 + return 1;
50679 +}
50680 +
50681 +void
50682 +grsecurity_init(void)
50683 +{
50684 + return;
50685 +}
50686 +
50687 +__u32
50688 +gr_acl_handle_mknod(const struct dentry * new_dentry,
50689 + const struct dentry * parent_dentry,
50690 + const struct vfsmount * parent_mnt,
50691 + const int mode)
50692 +{
50693 + return 1;
50694 +}
50695 +
50696 +__u32
50697 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
50698 + const struct dentry * parent_dentry,
50699 + const struct vfsmount * parent_mnt)
50700 +{
50701 + return 1;
50702 +}
50703 +
50704 +__u32
50705 +gr_acl_handle_symlink(const struct dentry * new_dentry,
50706 + const struct dentry * parent_dentry,
50707 + const struct vfsmount * parent_mnt, const char *from)
50708 +{
50709 + return 1;
50710 +}
50711 +
50712 +__u32
50713 +gr_acl_handle_link(const struct dentry * new_dentry,
50714 + const struct dentry * parent_dentry,
50715 + const struct vfsmount * parent_mnt,
50716 + const struct dentry * old_dentry,
50717 + const struct vfsmount * old_mnt, const char *to)
50718 +{
50719 + return 1;
50720 +}
50721 +
50722 +int
50723 +gr_acl_handle_rename(const struct dentry *new_dentry,
50724 + const struct dentry *parent_dentry,
50725 + const struct vfsmount *parent_mnt,
50726 + const struct dentry *old_dentry,
50727 + const struct inode *old_parent_inode,
50728 + const struct vfsmount *old_mnt, const char *newname)
50729 +{
50730 + return 0;
50731 +}
50732 +
50733 +int
50734 +gr_acl_handle_filldir(const struct file *file, const char *name,
50735 + const int namelen, const ino_t ino)
50736 +{
50737 + return 1;
50738 +}
50739 +
50740 +int
50741 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50742 + const time_t shm_createtime, const uid_t cuid, const int shmid)
50743 +{
50744 + return 1;
50745 +}
50746 +
50747 +int
50748 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
50749 +{
50750 + return 0;
50751 +}
50752 +
50753 +int
50754 +gr_search_accept(const struct socket *sock)
50755 +{
50756 + return 0;
50757 +}
50758 +
50759 +int
50760 +gr_search_listen(const struct socket *sock)
50761 +{
50762 + return 0;
50763 +}
50764 +
50765 +int
50766 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
50767 +{
50768 + return 0;
50769 +}
50770 +
50771 +__u32
50772 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
50773 +{
50774 + return 1;
50775 +}
50776 +
50777 +__u32
50778 +gr_acl_handle_creat(const struct dentry * dentry,
50779 + const struct dentry * p_dentry,
50780 + const struct vfsmount * p_mnt, const int fmode,
50781 + const int imode)
50782 +{
50783 + return 1;
50784 +}
50785 +
50786 +void
50787 +gr_acl_handle_exit(void)
50788 +{
50789 + return;
50790 +}
50791 +
50792 +int
50793 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50794 +{
50795 + return 1;
50796 +}
50797 +
50798 +void
50799 +gr_set_role_label(const uid_t uid, const gid_t gid)
50800 +{
50801 + return;
50802 +}
50803 +
50804 +int
50805 +gr_acl_handle_procpidmem(const struct task_struct *task)
50806 +{
50807 + return 0;
50808 +}
50809 +
50810 +int
50811 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
50812 +{
50813 + return 0;
50814 +}
50815 +
50816 +int
50817 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
50818 +{
50819 + return 0;
50820 +}
50821 +
50822 +void
50823 +gr_set_kernel_label(struct task_struct *task)
50824 +{
50825 + return;
50826 +}
50827 +
50828 +int
50829 +gr_check_user_change(int real, int effective, int fs)
50830 +{
50831 + return 0;
50832 +}
50833 +
50834 +int
50835 +gr_check_group_change(int real, int effective, int fs)
50836 +{
50837 + return 0;
50838 +}
50839 +
50840 +int gr_acl_enable_at_secure(void)
50841 +{
50842 + return 0;
50843 +}
50844 +
50845 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50846 +{
50847 + return dentry->d_inode->i_sb->s_dev;
50848 +}
50849 +
50850 +EXPORT_SYMBOL(gr_is_capable);
50851 +EXPORT_SYMBOL(gr_is_capable_nolog);
50852 +EXPORT_SYMBOL(gr_learn_resource);
50853 +EXPORT_SYMBOL(gr_set_kernel_label);
50854 +#ifdef CONFIG_SECURITY
50855 +EXPORT_SYMBOL(gr_check_user_change);
50856 +EXPORT_SYMBOL(gr_check_group_change);
50857 +#endif
50858 diff -urNp linux-2.6.32.42/grsecurity/grsec_exec.c linux-2.6.32.42/grsecurity/grsec_exec.c
50859 --- linux-2.6.32.42/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
50860 +++ linux-2.6.32.42/grsecurity/grsec_exec.c 2011-04-17 15:56:46.000000000 -0400
50861 @@ -0,0 +1,148 @@
50862 +#include <linux/kernel.h>
50863 +#include <linux/sched.h>
50864 +#include <linux/file.h>
50865 +#include <linux/binfmts.h>
50866 +#include <linux/smp_lock.h>
50867 +#include <linux/fs.h>
50868 +#include <linux/types.h>
50869 +#include <linux/grdefs.h>
50870 +#include <linux/grinternal.h>
50871 +#include <linux/capability.h>
50872 +#include <linux/compat.h>
50873 +
50874 +#include <asm/uaccess.h>
50875 +
50876 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50877 +static char gr_exec_arg_buf[132];
50878 +static DEFINE_MUTEX(gr_exec_arg_mutex);
50879 +#endif
50880 +
50881 +int
50882 +gr_handle_nproc(void)
50883 +{
50884 +#ifdef CONFIG_GRKERNSEC_EXECVE
50885 + const struct cred *cred = current_cred();
50886 + if (grsec_enable_execve && cred->user &&
50887 + (atomic_read(&cred->user->processes) >
50888 + current->signal->rlim[RLIMIT_NPROC].rlim_cur) &&
50889 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
50890 + gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
50891 + return -EAGAIN;
50892 + }
50893 +#endif
50894 + return 0;
50895 +}
50896 +
50897 +void
50898 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
50899 +{
50900 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50901 + char *grarg = gr_exec_arg_buf;
50902 + unsigned int i, x, execlen = 0;
50903 + char c;
50904 +
50905 + if (!((grsec_enable_execlog && grsec_enable_group &&
50906 + in_group_p(grsec_audit_gid))
50907 + || (grsec_enable_execlog && !grsec_enable_group)))
50908 + return;
50909 +
50910 + mutex_lock(&gr_exec_arg_mutex);
50911 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
50912 +
50913 + if (unlikely(argv == NULL))
50914 + goto log;
50915 +
50916 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
50917 + const char __user *p;
50918 + unsigned int len;
50919 +
50920 + if (copy_from_user(&p, argv + i, sizeof(p)))
50921 + goto log;
50922 + if (!p)
50923 + goto log;
50924 + len = strnlen_user(p, 128 - execlen);
50925 + if (len > 128 - execlen)
50926 + len = 128 - execlen;
50927 + else if (len > 0)
50928 + len--;
50929 + if (copy_from_user(grarg + execlen, p, len))
50930 + goto log;
50931 +
50932 + /* rewrite unprintable characters */
50933 + for (x = 0; x < len; x++) {
50934 + c = *(grarg + execlen + x);
50935 + if (c < 32 || c > 126)
50936 + *(grarg + execlen + x) = ' ';
50937 + }
50938 +
50939 + execlen += len;
50940 + *(grarg + execlen) = ' ';
50941 + *(grarg + execlen + 1) = '\0';
50942 + execlen++;
50943 + }
50944 +
50945 + log:
50946 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
50947 + bprm->file->f_path.mnt, grarg);
50948 + mutex_unlock(&gr_exec_arg_mutex);
50949 +#endif
50950 + return;
50951 +}
50952 +
50953 +#ifdef CONFIG_COMPAT
50954 +void
50955 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
50956 +{
50957 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50958 + char *grarg = gr_exec_arg_buf;
50959 + unsigned int i, x, execlen = 0;
50960 + char c;
50961 +
50962 + if (!((grsec_enable_execlog && grsec_enable_group &&
50963 + in_group_p(grsec_audit_gid))
50964 + || (grsec_enable_execlog && !grsec_enable_group)))
50965 + return;
50966 +
50967 + mutex_lock(&gr_exec_arg_mutex);
50968 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
50969 +
50970 + if (unlikely(argv == NULL))
50971 + goto log;
50972 +
50973 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
50974 + compat_uptr_t p;
50975 + unsigned int len;
50976 +
50977 + if (get_user(p, argv + i))
50978 + goto log;
50979 + len = strnlen_user(compat_ptr(p), 128 - execlen);
50980 + if (len > 128 - execlen)
50981 + len = 128 - execlen;
50982 + else if (len > 0)
50983 + len--;
50984 + else
50985 + goto log;
50986 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
50987 + goto log;
50988 +
50989 + /* rewrite unprintable characters */
50990 + for (x = 0; x < len; x++) {
50991 + c = *(grarg + execlen + x);
50992 + if (c < 32 || c > 126)
50993 + *(grarg + execlen + x) = ' ';
50994 + }
50995 +
50996 + execlen += len;
50997 + *(grarg + execlen) = ' ';
50998 + *(grarg + execlen + 1) = '\0';
50999 + execlen++;
51000 + }
51001 +
51002 + log:
51003 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
51004 + bprm->file->f_path.mnt, grarg);
51005 + mutex_unlock(&gr_exec_arg_mutex);
51006 +#endif
51007 + return;
51008 +}
51009 +#endif
51010 diff -urNp linux-2.6.32.42/grsecurity/grsec_fifo.c linux-2.6.32.42/grsecurity/grsec_fifo.c
51011 --- linux-2.6.32.42/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
51012 +++ linux-2.6.32.42/grsecurity/grsec_fifo.c 2011-04-17 15:56:46.000000000 -0400
51013 @@ -0,0 +1,24 @@
51014 +#include <linux/kernel.h>
51015 +#include <linux/sched.h>
51016 +#include <linux/fs.h>
51017 +#include <linux/file.h>
51018 +#include <linux/grinternal.h>
51019 +
51020 +int
51021 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
51022 + const struct dentry *dir, const int flag, const int acc_mode)
51023 +{
51024 +#ifdef CONFIG_GRKERNSEC_FIFO
51025 + const struct cred *cred = current_cred();
51026 +
51027 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
51028 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
51029 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
51030 + (cred->fsuid != dentry->d_inode->i_uid)) {
51031 + if (!inode_permission(dentry->d_inode, acc_mode))
51032 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
51033 + return -EACCES;
51034 + }
51035 +#endif
51036 + return 0;
51037 +}
51038 diff -urNp linux-2.6.32.42/grsecurity/grsec_fork.c linux-2.6.32.42/grsecurity/grsec_fork.c
51039 --- linux-2.6.32.42/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
51040 +++ linux-2.6.32.42/grsecurity/grsec_fork.c 2011-04-17 15:56:46.000000000 -0400
51041 @@ -0,0 +1,23 @@
51042 +#include <linux/kernel.h>
51043 +#include <linux/sched.h>
51044 +#include <linux/grsecurity.h>
51045 +#include <linux/grinternal.h>
51046 +#include <linux/errno.h>
51047 +
51048 +void
51049 +gr_log_forkfail(const int retval)
51050 +{
51051 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
51052 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
51053 + switch (retval) {
51054 + case -EAGAIN:
51055 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
51056 + break;
51057 + case -ENOMEM:
51058 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
51059 + break;
51060 + }
51061 + }
51062 +#endif
51063 + return;
51064 +}
51065 diff -urNp linux-2.6.32.42/grsecurity/grsec_init.c linux-2.6.32.42/grsecurity/grsec_init.c
51066 --- linux-2.6.32.42/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
51067 +++ linux-2.6.32.42/grsecurity/grsec_init.c 2011-06-29 19:35:26.000000000 -0400
51068 @@ -0,0 +1,274 @@
51069 +#include <linux/kernel.h>
51070 +#include <linux/sched.h>
51071 +#include <linux/mm.h>
51072 +#include <linux/smp_lock.h>
51073 +#include <linux/gracl.h>
51074 +#include <linux/slab.h>
51075 +#include <linux/vmalloc.h>
51076 +#include <linux/percpu.h>
51077 +#include <linux/module.h>
51078 +
51079 +int grsec_enable_brute;
51080 +int grsec_enable_link;
51081 +int grsec_enable_dmesg;
51082 +int grsec_enable_harden_ptrace;
51083 +int grsec_enable_fifo;
51084 +int grsec_enable_execve;
51085 +int grsec_enable_execlog;
51086 +int grsec_enable_signal;
51087 +int grsec_enable_forkfail;
51088 +int grsec_enable_audit_ptrace;
51089 +int grsec_enable_time;
51090 +int grsec_enable_audit_textrel;
51091 +int grsec_enable_group;
51092 +int grsec_audit_gid;
51093 +int grsec_enable_chdir;
51094 +int grsec_enable_mount;
51095 +int grsec_enable_rofs;
51096 +int grsec_enable_chroot_findtask;
51097 +int grsec_enable_chroot_mount;
51098 +int grsec_enable_chroot_shmat;
51099 +int grsec_enable_chroot_fchdir;
51100 +int grsec_enable_chroot_double;
51101 +int grsec_enable_chroot_pivot;
51102 +int grsec_enable_chroot_chdir;
51103 +int grsec_enable_chroot_chmod;
51104 +int grsec_enable_chroot_mknod;
51105 +int grsec_enable_chroot_nice;
51106 +int grsec_enable_chroot_execlog;
51107 +int grsec_enable_chroot_caps;
51108 +int grsec_enable_chroot_sysctl;
51109 +int grsec_enable_chroot_unix;
51110 +int grsec_enable_tpe;
51111 +int grsec_tpe_gid;
51112 +int grsec_enable_blackhole;
51113 +#ifdef CONFIG_IPV6_MODULE
51114 +EXPORT_SYMBOL(grsec_enable_blackhole);
51115 +#endif
51116 +int grsec_lastack_retries;
51117 +int grsec_enable_tpe_all;
51118 +int grsec_enable_tpe_invert;
51119 +int grsec_enable_socket_all;
51120 +int grsec_socket_all_gid;
51121 +int grsec_enable_socket_client;
51122 +int grsec_socket_client_gid;
51123 +int grsec_enable_socket_server;
51124 +int grsec_socket_server_gid;
51125 +int grsec_resource_logging;
51126 +int grsec_disable_privio;
51127 +int grsec_enable_log_rwxmaps;
51128 +int grsec_lock;
51129 +
51130 +DEFINE_SPINLOCK(grsec_alert_lock);
51131 +unsigned long grsec_alert_wtime = 0;
51132 +unsigned long grsec_alert_fyet = 0;
51133 +
51134 +DEFINE_SPINLOCK(grsec_audit_lock);
51135 +
51136 +DEFINE_RWLOCK(grsec_exec_file_lock);
51137 +
51138 +char *gr_shared_page[4];
51139 +
51140 +char *gr_alert_log_fmt;
51141 +char *gr_audit_log_fmt;
51142 +char *gr_alert_log_buf;
51143 +char *gr_audit_log_buf;
51144 +
51145 +extern struct gr_arg *gr_usermode;
51146 +extern unsigned char *gr_system_salt;
51147 +extern unsigned char *gr_system_sum;
51148 +
51149 +void __init
51150 +grsecurity_init(void)
51151 +{
51152 + int j;
51153 + /* create the per-cpu shared pages */
51154 +
51155 +#ifdef CONFIG_X86
51156 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
51157 +#endif
51158 +
51159 + for (j = 0; j < 4; j++) {
51160 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
51161 + if (gr_shared_page[j] == NULL) {
51162 + panic("Unable to allocate grsecurity shared page");
51163 + return;
51164 + }
51165 + }
51166 +
51167 + /* allocate log buffers */
51168 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
51169 + if (!gr_alert_log_fmt) {
51170 + panic("Unable to allocate grsecurity alert log format buffer");
51171 + return;
51172 + }
51173 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
51174 + if (!gr_audit_log_fmt) {
51175 + panic("Unable to allocate grsecurity audit log format buffer");
51176 + return;
51177 + }
51178 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
51179 + if (!gr_alert_log_buf) {
51180 + panic("Unable to allocate grsecurity alert log buffer");
51181 + return;
51182 + }
51183 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
51184 + if (!gr_audit_log_buf) {
51185 + panic("Unable to allocate grsecurity audit log buffer");
51186 + return;
51187 + }
51188 +
51189 + /* allocate memory for authentication structure */
51190 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
51191 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
51192 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
51193 +
51194 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
51195 + panic("Unable to allocate grsecurity authentication structure");
51196 + return;
51197 + }
51198 +
51199 +
51200 +#ifdef CONFIG_GRKERNSEC_IO
51201 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
51202 + grsec_disable_privio = 1;
51203 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
51204 + grsec_disable_privio = 1;
51205 +#else
51206 + grsec_disable_privio = 0;
51207 +#endif
51208 +#endif
51209 +
51210 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
51211 + /* for backward compatibility, tpe_invert always defaults to on if
51212 + enabled in the kernel
51213 + */
51214 + grsec_enable_tpe_invert = 1;
51215 +#endif
51216 +
51217 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
51218 +#ifndef CONFIG_GRKERNSEC_SYSCTL
51219 + grsec_lock = 1;
51220 +#endif
51221 +
51222 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
51223 + grsec_enable_audit_textrel = 1;
51224 +#endif
51225 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51226 + grsec_enable_log_rwxmaps = 1;
51227 +#endif
51228 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
51229 + grsec_enable_group = 1;
51230 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
51231 +#endif
51232 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
51233 + grsec_enable_chdir = 1;
51234 +#endif
51235 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51236 + grsec_enable_harden_ptrace = 1;
51237 +#endif
51238 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51239 + grsec_enable_mount = 1;
51240 +#endif
51241 +#ifdef CONFIG_GRKERNSEC_LINK
51242 + grsec_enable_link = 1;
51243 +#endif
51244 +#ifdef CONFIG_GRKERNSEC_BRUTE
51245 + grsec_enable_brute = 1;
51246 +#endif
51247 +#ifdef CONFIG_GRKERNSEC_DMESG
51248 + grsec_enable_dmesg = 1;
51249 +#endif
51250 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
51251 + grsec_enable_blackhole = 1;
51252 + grsec_lastack_retries = 4;
51253 +#endif
51254 +#ifdef CONFIG_GRKERNSEC_FIFO
51255 + grsec_enable_fifo = 1;
51256 +#endif
51257 +#ifdef CONFIG_GRKERNSEC_EXECVE
51258 + grsec_enable_execve = 1;
51259 +#endif
51260 +#ifdef CONFIG_GRKERNSEC_EXECLOG
51261 + grsec_enable_execlog = 1;
51262 +#endif
51263 +#ifdef CONFIG_GRKERNSEC_SIGNAL
51264 + grsec_enable_signal = 1;
51265 +#endif
51266 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
51267 + grsec_enable_forkfail = 1;
51268 +#endif
51269 +#ifdef CONFIG_GRKERNSEC_TIME
51270 + grsec_enable_time = 1;
51271 +#endif
51272 +#ifdef CONFIG_GRKERNSEC_RESLOG
51273 + grsec_resource_logging = 1;
51274 +#endif
51275 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
51276 + grsec_enable_chroot_findtask = 1;
51277 +#endif
51278 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
51279 + grsec_enable_chroot_unix = 1;
51280 +#endif
51281 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
51282 + grsec_enable_chroot_mount = 1;
51283 +#endif
51284 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
51285 + grsec_enable_chroot_fchdir = 1;
51286 +#endif
51287 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
51288 + grsec_enable_chroot_shmat = 1;
51289 +#endif
51290 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
51291 + grsec_enable_audit_ptrace = 1;
51292 +#endif
51293 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
51294 + grsec_enable_chroot_double = 1;
51295 +#endif
51296 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
51297 + grsec_enable_chroot_pivot = 1;
51298 +#endif
51299 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
51300 + grsec_enable_chroot_chdir = 1;
51301 +#endif
51302 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
51303 + grsec_enable_chroot_chmod = 1;
51304 +#endif
51305 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
51306 + grsec_enable_chroot_mknod = 1;
51307 +#endif
51308 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
51309 + grsec_enable_chroot_nice = 1;
51310 +#endif
51311 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
51312 + grsec_enable_chroot_execlog = 1;
51313 +#endif
51314 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51315 + grsec_enable_chroot_caps = 1;
51316 +#endif
51317 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
51318 + grsec_enable_chroot_sysctl = 1;
51319 +#endif
51320 +#ifdef CONFIG_GRKERNSEC_TPE
51321 + grsec_enable_tpe = 1;
51322 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
51323 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
51324 + grsec_enable_tpe_all = 1;
51325 +#endif
51326 +#endif
51327 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
51328 + grsec_enable_socket_all = 1;
51329 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
51330 +#endif
51331 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
51332 + grsec_enable_socket_client = 1;
51333 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
51334 +#endif
51335 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51336 + grsec_enable_socket_server = 1;
51337 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
51338 +#endif
51339 +#endif
51340 +
51341 + return;
51342 +}
51343 diff -urNp linux-2.6.32.42/grsecurity/grsec_link.c linux-2.6.32.42/grsecurity/grsec_link.c
51344 --- linux-2.6.32.42/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
51345 +++ linux-2.6.32.42/grsecurity/grsec_link.c 2011-04-17 15:56:46.000000000 -0400
51346 @@ -0,0 +1,43 @@
51347 +#include <linux/kernel.h>
51348 +#include <linux/sched.h>
51349 +#include <linux/fs.h>
51350 +#include <linux/file.h>
51351 +#include <linux/grinternal.h>
51352 +
51353 +int
51354 +gr_handle_follow_link(const struct inode *parent,
51355 + const struct inode *inode,
51356 + const struct dentry *dentry, const struct vfsmount *mnt)
51357 +{
51358 +#ifdef CONFIG_GRKERNSEC_LINK
51359 + const struct cred *cred = current_cred();
51360 +
51361 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
51362 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
51363 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
51364 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
51365 + return -EACCES;
51366 + }
51367 +#endif
51368 + return 0;
51369 +}
51370 +
51371 +int
51372 +gr_handle_hardlink(const struct dentry *dentry,
51373 + const struct vfsmount *mnt,
51374 + struct inode *inode, const int mode, const char *to)
51375 +{
51376 +#ifdef CONFIG_GRKERNSEC_LINK
51377 + const struct cred *cred = current_cred();
51378 +
51379 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
51380 + (!S_ISREG(mode) || (mode & S_ISUID) ||
51381 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
51382 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
51383 + !capable(CAP_FOWNER) && cred->uid) {
51384 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
51385 + return -EPERM;
51386 + }
51387 +#endif
51388 + return 0;
51389 +}
51390 diff -urNp linux-2.6.32.42/grsecurity/grsec_log.c linux-2.6.32.42/grsecurity/grsec_log.c
51391 --- linux-2.6.32.42/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
51392 +++ linux-2.6.32.42/grsecurity/grsec_log.c 2011-05-10 21:58:49.000000000 -0400
51393 @@ -0,0 +1,310 @@
51394 +#include <linux/kernel.h>
51395 +#include <linux/sched.h>
51396 +#include <linux/file.h>
51397 +#include <linux/tty.h>
51398 +#include <linux/fs.h>
51399 +#include <linux/grinternal.h>
51400 +
51401 +#ifdef CONFIG_TREE_PREEMPT_RCU
51402 +#define DISABLE_PREEMPT() preempt_disable()
51403 +#define ENABLE_PREEMPT() preempt_enable()
51404 +#else
51405 +#define DISABLE_PREEMPT()
51406 +#define ENABLE_PREEMPT()
51407 +#endif
51408 +
51409 +#define BEGIN_LOCKS(x) \
51410 + DISABLE_PREEMPT(); \
51411 + rcu_read_lock(); \
51412 + read_lock(&tasklist_lock); \
51413 + read_lock(&grsec_exec_file_lock); \
51414 + if (x != GR_DO_AUDIT) \
51415 + spin_lock(&grsec_alert_lock); \
51416 + else \
51417 + spin_lock(&grsec_audit_lock)
51418 +
51419 +#define END_LOCKS(x) \
51420 + if (x != GR_DO_AUDIT) \
51421 + spin_unlock(&grsec_alert_lock); \
51422 + else \
51423 + spin_unlock(&grsec_audit_lock); \
51424 + read_unlock(&grsec_exec_file_lock); \
51425 + read_unlock(&tasklist_lock); \
51426 + rcu_read_unlock(); \
51427 + ENABLE_PREEMPT(); \
51428 + if (x == GR_DONT_AUDIT) \
51429 + gr_handle_alertkill(current)
51430 +
51431 +enum {
51432 + FLOODING,
51433 + NO_FLOODING
51434 +};
51435 +
51436 +extern char *gr_alert_log_fmt;
51437 +extern char *gr_audit_log_fmt;
51438 +extern char *gr_alert_log_buf;
51439 +extern char *gr_audit_log_buf;
51440 +
51441 +static int gr_log_start(int audit)
51442 +{
51443 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
51444 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
51445 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51446 +
51447 + if (audit == GR_DO_AUDIT)
51448 + goto set_fmt;
51449 +
51450 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
51451 + grsec_alert_wtime = jiffies;
51452 + grsec_alert_fyet = 0;
51453 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
51454 + grsec_alert_fyet++;
51455 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
51456 + grsec_alert_wtime = jiffies;
51457 + grsec_alert_fyet++;
51458 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
51459 + return FLOODING;
51460 + } else return FLOODING;
51461 +
51462 +set_fmt:
51463 + memset(buf, 0, PAGE_SIZE);
51464 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
51465 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
51466 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
51467 + } else if (current->signal->curr_ip) {
51468 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
51469 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
51470 + } else if (gr_acl_is_enabled()) {
51471 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
51472 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
51473 + } else {
51474 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
51475 + strcpy(buf, fmt);
51476 + }
51477 +
51478 + return NO_FLOODING;
51479 +}
51480 +
51481 +static void gr_log_middle(int audit, const char *msg, va_list ap)
51482 + __attribute__ ((format (printf, 2, 0)));
51483 +
51484 +static void gr_log_middle(int audit, const char *msg, va_list ap)
51485 +{
51486 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51487 + unsigned int len = strlen(buf);
51488 +
51489 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
51490 +
51491 + return;
51492 +}
51493 +
51494 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
51495 + __attribute__ ((format (printf, 2, 3)));
51496 +
51497 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
51498 +{
51499 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51500 + unsigned int len = strlen(buf);
51501 + va_list ap;
51502 +
51503 + va_start(ap, msg);
51504 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
51505 + va_end(ap);
51506 +
51507 + return;
51508 +}
51509 +
51510 +static void gr_log_end(int audit)
51511 +{
51512 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51513 + unsigned int len = strlen(buf);
51514 +
51515 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
51516 + printk("%s\n", buf);
51517 +
51518 + return;
51519 +}
51520 +
51521 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
51522 +{
51523 + int logtype;
51524 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
51525 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
51526 + void *voidptr = NULL;
51527 + int num1 = 0, num2 = 0;
51528 + unsigned long ulong1 = 0, ulong2 = 0;
51529 + struct dentry *dentry = NULL;
51530 + struct vfsmount *mnt = NULL;
51531 + struct file *file = NULL;
51532 + struct task_struct *task = NULL;
51533 + const struct cred *cred, *pcred;
51534 + va_list ap;
51535 +
51536 + BEGIN_LOCKS(audit);
51537 + logtype = gr_log_start(audit);
51538 + if (logtype == FLOODING) {
51539 + END_LOCKS(audit);
51540 + return;
51541 + }
51542 + va_start(ap, argtypes);
51543 + switch (argtypes) {
51544 + case GR_TTYSNIFF:
51545 + task = va_arg(ap, struct task_struct *);
51546 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
51547 + break;
51548 + case GR_SYSCTL_HIDDEN:
51549 + str1 = va_arg(ap, char *);
51550 + gr_log_middle_varargs(audit, msg, result, str1);
51551 + break;
51552 + case GR_RBAC:
51553 + dentry = va_arg(ap, struct dentry *);
51554 + mnt = va_arg(ap, struct vfsmount *);
51555 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
51556 + break;
51557 + case GR_RBAC_STR:
51558 + dentry = va_arg(ap, struct dentry *);
51559 + mnt = va_arg(ap, struct vfsmount *);
51560 + str1 = va_arg(ap, char *);
51561 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
51562 + break;
51563 + case GR_STR_RBAC:
51564 + str1 = va_arg(ap, char *);
51565 + dentry = va_arg(ap, struct dentry *);
51566 + mnt = va_arg(ap, struct vfsmount *);
51567 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
51568 + break;
51569 + case GR_RBAC_MODE2:
51570 + dentry = va_arg(ap, struct dentry *);
51571 + mnt = va_arg(ap, struct vfsmount *);
51572 + str1 = va_arg(ap, char *);
51573 + str2 = va_arg(ap, char *);
51574 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
51575 + break;
51576 + case GR_RBAC_MODE3:
51577 + dentry = va_arg(ap, struct dentry *);
51578 + mnt = va_arg(ap, struct vfsmount *);
51579 + str1 = va_arg(ap, char *);
51580 + str2 = va_arg(ap, char *);
51581 + str3 = va_arg(ap, char *);
51582 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
51583 + break;
51584 + case GR_FILENAME:
51585 + dentry = va_arg(ap, struct dentry *);
51586 + mnt = va_arg(ap, struct vfsmount *);
51587 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
51588 + break;
51589 + case GR_STR_FILENAME:
51590 + str1 = va_arg(ap, char *);
51591 + dentry = va_arg(ap, struct dentry *);
51592 + mnt = va_arg(ap, struct vfsmount *);
51593 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
51594 + break;
51595 + case GR_FILENAME_STR:
51596 + dentry = va_arg(ap, struct dentry *);
51597 + mnt = va_arg(ap, struct vfsmount *);
51598 + str1 = va_arg(ap, char *);
51599 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
51600 + break;
51601 + case GR_FILENAME_TWO_INT:
51602 + dentry = va_arg(ap, struct dentry *);
51603 + mnt = va_arg(ap, struct vfsmount *);
51604 + num1 = va_arg(ap, int);
51605 + num2 = va_arg(ap, int);
51606 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
51607 + break;
51608 + case GR_FILENAME_TWO_INT_STR:
51609 + dentry = va_arg(ap, struct dentry *);
51610 + mnt = va_arg(ap, struct vfsmount *);
51611 + num1 = va_arg(ap, int);
51612 + num2 = va_arg(ap, int);
51613 + str1 = va_arg(ap, char *);
51614 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
51615 + break;
51616 + case GR_TEXTREL:
51617 + file = va_arg(ap, struct file *);
51618 + ulong1 = va_arg(ap, unsigned long);
51619 + ulong2 = va_arg(ap, unsigned long);
51620 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
51621 + break;
51622 + case GR_PTRACE:
51623 + task = va_arg(ap, struct task_struct *);
51624 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
51625 + break;
51626 + case GR_RESOURCE:
51627 + task = va_arg(ap, struct task_struct *);
51628 + cred = __task_cred(task);
51629 + pcred = __task_cred(task->real_parent);
51630 + ulong1 = va_arg(ap, unsigned long);
51631 + str1 = va_arg(ap, char *);
51632 + ulong2 = va_arg(ap, unsigned long);
51633 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51634 + break;
51635 + case GR_CAP:
51636 + task = va_arg(ap, struct task_struct *);
51637 + cred = __task_cred(task);
51638 + pcred = __task_cred(task->real_parent);
51639 + str1 = va_arg(ap, char *);
51640 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51641 + break;
51642 + case GR_SIG:
51643 + str1 = va_arg(ap, char *);
51644 + voidptr = va_arg(ap, void *);
51645 + gr_log_middle_varargs(audit, msg, str1, voidptr);
51646 + break;
51647 + case GR_SIG2:
51648 + task = va_arg(ap, struct task_struct *);
51649 + cred = __task_cred(task);
51650 + pcred = __task_cred(task->real_parent);
51651 + num1 = va_arg(ap, int);
51652 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51653 + break;
51654 + case GR_CRASH1:
51655 + task = va_arg(ap, struct task_struct *);
51656 + cred = __task_cred(task);
51657 + pcred = __task_cred(task->real_parent);
51658 + ulong1 = va_arg(ap, unsigned long);
51659 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
51660 + break;
51661 + case GR_CRASH2:
51662 + task = va_arg(ap, struct task_struct *);
51663 + cred = __task_cred(task);
51664 + pcred = __task_cred(task->real_parent);
51665 + ulong1 = va_arg(ap, unsigned long);
51666 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
51667 + break;
51668 + case GR_RWXMAP:
51669 + file = va_arg(ap, struct file *);
51670 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
51671 + break;
51672 + case GR_PSACCT:
51673 + {
51674 + unsigned int wday, cday;
51675 + __u8 whr, chr;
51676 + __u8 wmin, cmin;
51677 + __u8 wsec, csec;
51678 + char cur_tty[64] = { 0 };
51679 + char parent_tty[64] = { 0 };
51680 +
51681 + task = va_arg(ap, struct task_struct *);
51682 + wday = va_arg(ap, unsigned int);
51683 + cday = va_arg(ap, unsigned int);
51684 + whr = va_arg(ap, int);
51685 + chr = va_arg(ap, int);
51686 + wmin = va_arg(ap, int);
51687 + cmin = va_arg(ap, int);
51688 + wsec = va_arg(ap, int);
51689 + csec = va_arg(ap, int);
51690 + ulong1 = va_arg(ap, unsigned long);
51691 + cred = __task_cred(task);
51692 + pcred = __task_cred(task->real_parent);
51693 +
51694 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51695 + }
51696 + break;
51697 + default:
51698 + gr_log_middle(audit, msg, ap);
51699 + }
51700 + va_end(ap);
51701 + gr_log_end(audit);
51702 + END_LOCKS(audit);
51703 +}
51704 diff -urNp linux-2.6.32.42/grsecurity/grsec_mem.c linux-2.6.32.42/grsecurity/grsec_mem.c
51705 --- linux-2.6.32.42/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
51706 +++ linux-2.6.32.42/grsecurity/grsec_mem.c 2011-04-17 15:56:46.000000000 -0400
51707 @@ -0,0 +1,33 @@
51708 +#include <linux/kernel.h>
51709 +#include <linux/sched.h>
51710 +#include <linux/mm.h>
51711 +#include <linux/mman.h>
51712 +#include <linux/grinternal.h>
51713 +
51714 +void
51715 +gr_handle_ioperm(void)
51716 +{
51717 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
51718 + return;
51719 +}
51720 +
51721 +void
51722 +gr_handle_iopl(void)
51723 +{
51724 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
51725 + return;
51726 +}
51727 +
51728 +void
51729 +gr_handle_mem_readwrite(u64 from, u64 to)
51730 +{
51731 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
51732 + return;
51733 +}
51734 +
51735 +void
51736 +gr_handle_vm86(void)
51737 +{
51738 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
51739 + return;
51740 +}
51741 diff -urNp linux-2.6.32.42/grsecurity/grsec_mount.c linux-2.6.32.42/grsecurity/grsec_mount.c
51742 --- linux-2.6.32.42/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
51743 +++ linux-2.6.32.42/grsecurity/grsec_mount.c 2011-06-20 19:47:03.000000000 -0400
51744 @@ -0,0 +1,62 @@
51745 +#include <linux/kernel.h>
51746 +#include <linux/sched.h>
51747 +#include <linux/mount.h>
51748 +#include <linux/grsecurity.h>
51749 +#include <linux/grinternal.h>
51750 +
51751 +void
51752 +gr_log_remount(const char *devname, const int retval)
51753 +{
51754 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51755 + if (grsec_enable_mount && (retval >= 0))
51756 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
51757 +#endif
51758 + return;
51759 +}
51760 +
51761 +void
51762 +gr_log_unmount(const char *devname, const int retval)
51763 +{
51764 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51765 + if (grsec_enable_mount && (retval >= 0))
51766 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
51767 +#endif
51768 + return;
51769 +}
51770 +
51771 +void
51772 +gr_log_mount(const char *from, const char *to, const int retval)
51773 +{
51774 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51775 + if (grsec_enable_mount && (retval >= 0))
51776 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
51777 +#endif
51778 + return;
51779 +}
51780 +
51781 +int
51782 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
51783 +{
51784 +#ifdef CONFIG_GRKERNSEC_ROFS
51785 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
51786 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
51787 + return -EPERM;
51788 + } else
51789 + return 0;
51790 +#endif
51791 + return 0;
51792 +}
51793 +
51794 +int
51795 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
51796 +{
51797 +#ifdef CONFIG_GRKERNSEC_ROFS
51798 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
51799 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
51800 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
51801 + return -EPERM;
51802 + } else
51803 + return 0;
51804 +#endif
51805 + return 0;
51806 +}
51807 diff -urNp linux-2.6.32.42/grsecurity/grsec_pax.c linux-2.6.32.42/grsecurity/grsec_pax.c
51808 --- linux-2.6.32.42/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
51809 +++ linux-2.6.32.42/grsecurity/grsec_pax.c 2011-04-17 15:56:46.000000000 -0400
51810 @@ -0,0 +1,36 @@
51811 +#include <linux/kernel.h>
51812 +#include <linux/sched.h>
51813 +#include <linux/mm.h>
51814 +#include <linux/file.h>
51815 +#include <linux/grinternal.h>
51816 +#include <linux/grsecurity.h>
51817 +
51818 +void
51819 +gr_log_textrel(struct vm_area_struct * vma)
51820 +{
51821 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
51822 + if (grsec_enable_audit_textrel)
51823 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
51824 +#endif
51825 + return;
51826 +}
51827 +
51828 +void
51829 +gr_log_rwxmmap(struct file *file)
51830 +{
51831 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51832 + if (grsec_enable_log_rwxmaps)
51833 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
51834 +#endif
51835 + return;
51836 +}
51837 +
51838 +void
51839 +gr_log_rwxmprotect(struct file *file)
51840 +{
51841 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51842 + if (grsec_enable_log_rwxmaps)
51843 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
51844 +#endif
51845 + return;
51846 +}
51847 diff -urNp linux-2.6.32.42/grsecurity/grsec_ptrace.c linux-2.6.32.42/grsecurity/grsec_ptrace.c
51848 --- linux-2.6.32.42/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
51849 +++ linux-2.6.32.42/grsecurity/grsec_ptrace.c 2011-04-17 15:56:46.000000000 -0400
51850 @@ -0,0 +1,14 @@
51851 +#include <linux/kernel.h>
51852 +#include <linux/sched.h>
51853 +#include <linux/grinternal.h>
51854 +#include <linux/grsecurity.h>
51855 +
51856 +void
51857 +gr_audit_ptrace(struct task_struct *task)
51858 +{
51859 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
51860 + if (grsec_enable_audit_ptrace)
51861 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
51862 +#endif
51863 + return;
51864 +}
51865 diff -urNp linux-2.6.32.42/grsecurity/grsec_sig.c linux-2.6.32.42/grsecurity/grsec_sig.c
51866 --- linux-2.6.32.42/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
51867 +++ linux-2.6.32.42/grsecurity/grsec_sig.c 2011-06-29 19:40:31.000000000 -0400
51868 @@ -0,0 +1,205 @@
51869 +#include <linux/kernel.h>
51870 +#include <linux/sched.h>
51871 +#include <linux/delay.h>
51872 +#include <linux/grsecurity.h>
51873 +#include <linux/grinternal.h>
51874 +#include <linux/hardirq.h>
51875 +
51876 +char *signames[] = {
51877 + [SIGSEGV] = "Segmentation fault",
51878 + [SIGILL] = "Illegal instruction",
51879 + [SIGABRT] = "Abort",
51880 + [SIGBUS] = "Invalid alignment/Bus error"
51881 +};
51882 +
51883 +void
51884 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
51885 +{
51886 +#ifdef CONFIG_GRKERNSEC_SIGNAL
51887 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
51888 + (sig == SIGABRT) || (sig == SIGBUS))) {
51889 + if (t->pid == current->pid) {
51890 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
51891 + } else {
51892 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
51893 + }
51894 + }
51895 +#endif
51896 + return;
51897 +}
51898 +
51899 +int
51900 +gr_handle_signal(const struct task_struct *p, const int sig)
51901 +{
51902 +#ifdef CONFIG_GRKERNSEC
51903 + if (current->pid > 1 && gr_check_protected_task(p)) {
51904 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
51905 + return -EPERM;
51906 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
51907 + return -EPERM;
51908 + }
51909 +#endif
51910 + return 0;
51911 +}
51912 +
51913 +#ifdef CONFIG_GRKERNSEC
51914 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
51915 +
51916 +int gr_fake_force_sig(int sig, struct task_struct *t)
51917 +{
51918 + unsigned long int flags;
51919 + int ret, blocked, ignored;
51920 + struct k_sigaction *action;
51921 +
51922 + spin_lock_irqsave(&t->sighand->siglock, flags);
51923 + action = &t->sighand->action[sig-1];
51924 + ignored = action->sa.sa_handler == SIG_IGN;
51925 + blocked = sigismember(&t->blocked, sig);
51926 + if (blocked || ignored) {
51927 + action->sa.sa_handler = SIG_DFL;
51928 + if (blocked) {
51929 + sigdelset(&t->blocked, sig);
51930 + recalc_sigpending_and_wake(t);
51931 + }
51932 + }
51933 + if (action->sa.sa_handler == SIG_DFL)
51934 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
51935 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
51936 +
51937 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
51938 +
51939 + return ret;
51940 +}
51941 +#endif
51942 +
51943 +#ifdef CONFIG_GRKERNSEC_BRUTE
51944 +#define GR_USER_BAN_TIME (15 * 60)
51945 +
51946 +static int __get_dumpable(unsigned long mm_flags)
51947 +{
51948 + int ret;
51949 +
51950 + ret = mm_flags & MMF_DUMPABLE_MASK;
51951 + return (ret >= 2) ? 2 : ret;
51952 +}
51953 +#endif
51954 +
51955 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
51956 +{
51957 +#ifdef CONFIG_GRKERNSEC_BRUTE
51958 + uid_t uid = 0;
51959 +
51960 + if (!grsec_enable_brute)
51961 + return;
51962 +
51963 + rcu_read_lock();
51964 + read_lock(&tasklist_lock);
51965 + read_lock(&grsec_exec_file_lock);
51966 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
51967 + p->real_parent->brute = 1;
51968 + else {
51969 + const struct cred *cred = __task_cred(p), *cred2;
51970 + struct task_struct *tsk, *tsk2;
51971 +
51972 + if (!__get_dumpable(mm_flags) && cred->uid) {
51973 + struct user_struct *user;
51974 +
51975 + uid = cred->uid;
51976 +
51977 + /* this is put upon execution past expiration */
51978 + user = find_user(uid);
51979 + if (user == NULL)
51980 + goto unlock;
51981 + user->banned = 1;
51982 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
51983 + if (user->ban_expires == ~0UL)
51984 + user->ban_expires--;
51985 +
51986 + do_each_thread(tsk2, tsk) {
51987 + cred2 = __task_cred(tsk);
51988 + if (tsk != p && cred2->uid == uid)
51989 + gr_fake_force_sig(SIGKILL, tsk);
51990 + } while_each_thread(tsk2, tsk);
51991 + }
51992 + }
51993 +unlock:
51994 + read_unlock(&grsec_exec_file_lock);
51995 + read_unlock(&tasklist_lock);
51996 + rcu_read_unlock();
51997 +
51998 + if (uid)
51999 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
52000 +#endif
52001 + return;
52002 +}
52003 +
52004 +void gr_handle_brute_check(void)
52005 +{
52006 +#ifdef CONFIG_GRKERNSEC_BRUTE
52007 + if (current->brute)
52008 + msleep(30 * 1000);
52009 +#endif
52010 + return;
52011 +}
52012 +
52013 +void gr_handle_kernel_exploit(void)
52014 +{
52015 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
52016 + const struct cred *cred;
52017 + struct task_struct *tsk, *tsk2;
52018 + struct user_struct *user;
52019 + uid_t uid;
52020 +
52021 + if (in_irq() || in_serving_softirq() || in_nmi())
52022 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
52023 +
52024 + uid = current_uid();
52025 +
52026 + if (uid == 0)
52027 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
52028 + else {
52029 + /* kill all the processes of this user, hold a reference
52030 + to their creds struct, and prevent them from creating
52031 + another process until system reset
52032 + */
52033 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
52034 + /* we intentionally leak this ref */
52035 + user = get_uid(current->cred->user);
52036 + if (user) {
52037 + user->banned = 1;
52038 + user->ban_expires = ~0UL;
52039 + }
52040 +
52041 + read_lock(&tasklist_lock);
52042 + do_each_thread(tsk2, tsk) {
52043 + cred = __task_cred(tsk);
52044 + if (cred->uid == uid)
52045 + gr_fake_force_sig(SIGKILL, tsk);
52046 + } while_each_thread(tsk2, tsk);
52047 + read_unlock(&tasklist_lock);
52048 + }
52049 +#endif
52050 +}
52051 +
52052 +int __gr_process_user_ban(struct user_struct *user)
52053 +{
52054 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52055 + if (unlikely(user->banned)) {
52056 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
52057 + user->banned = 0;
52058 + user->ban_expires = 0;
52059 + free_uid(user);
52060 + } else
52061 + return -EPERM;
52062 + }
52063 +#endif
52064 + return 0;
52065 +}
52066 +
52067 +int gr_process_user_ban(void)
52068 +{
52069 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52070 + return __gr_process_user_ban(current->cred->user);
52071 +#endif
52072 + return 0;
52073 +}
52074 diff -urNp linux-2.6.32.42/grsecurity/grsec_sock.c linux-2.6.32.42/grsecurity/grsec_sock.c
52075 --- linux-2.6.32.42/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
52076 +++ linux-2.6.32.42/grsecurity/grsec_sock.c 2011-04-17 15:56:46.000000000 -0400
52077 @@ -0,0 +1,275 @@
52078 +#include <linux/kernel.h>
52079 +#include <linux/module.h>
52080 +#include <linux/sched.h>
52081 +#include <linux/file.h>
52082 +#include <linux/net.h>
52083 +#include <linux/in.h>
52084 +#include <linux/ip.h>
52085 +#include <net/sock.h>
52086 +#include <net/inet_sock.h>
52087 +#include <linux/grsecurity.h>
52088 +#include <linux/grinternal.h>
52089 +#include <linux/gracl.h>
52090 +
52091 +kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
52092 +EXPORT_SYMBOL(gr_cap_rtnetlink);
52093 +
52094 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
52095 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
52096 +
52097 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
52098 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
52099 +
52100 +#ifdef CONFIG_UNIX_MODULE
52101 +EXPORT_SYMBOL(gr_acl_handle_unix);
52102 +EXPORT_SYMBOL(gr_acl_handle_mknod);
52103 +EXPORT_SYMBOL(gr_handle_chroot_unix);
52104 +EXPORT_SYMBOL(gr_handle_create);
52105 +#endif
52106 +
52107 +#ifdef CONFIG_GRKERNSEC
52108 +#define gr_conn_table_size 32749
52109 +struct conn_table_entry {
52110 + struct conn_table_entry *next;
52111 + struct signal_struct *sig;
52112 +};
52113 +
52114 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
52115 +DEFINE_SPINLOCK(gr_conn_table_lock);
52116 +
52117 +extern const char * gr_socktype_to_name(unsigned char type);
52118 +extern const char * gr_proto_to_name(unsigned char proto);
52119 +extern const char * gr_sockfamily_to_name(unsigned char family);
52120 +
52121 +static __inline__ int
52122 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
52123 +{
52124 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
52125 +}
52126 +
52127 +static __inline__ int
52128 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
52129 + __u16 sport, __u16 dport)
52130 +{
52131 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
52132 + sig->gr_sport == sport && sig->gr_dport == dport))
52133 + return 1;
52134 + else
52135 + return 0;
52136 +}
52137 +
52138 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
52139 +{
52140 + struct conn_table_entry **match;
52141 + unsigned int index;
52142 +
52143 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
52144 + sig->gr_sport, sig->gr_dport,
52145 + gr_conn_table_size);
52146 +
52147 + newent->sig = sig;
52148 +
52149 + match = &gr_conn_table[index];
52150 + newent->next = *match;
52151 + *match = newent;
52152 +
52153 + return;
52154 +}
52155 +
52156 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
52157 +{
52158 + struct conn_table_entry *match, *last = NULL;
52159 + unsigned int index;
52160 +
52161 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
52162 + sig->gr_sport, sig->gr_dport,
52163 + gr_conn_table_size);
52164 +
52165 + match = gr_conn_table[index];
52166 + while (match && !conn_match(match->sig,
52167 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
52168 + sig->gr_dport)) {
52169 + last = match;
52170 + match = match->next;
52171 + }
52172 +
52173 + if (match) {
52174 + if (last)
52175 + last->next = match->next;
52176 + else
52177 + gr_conn_table[index] = NULL;
52178 + kfree(match);
52179 + }
52180 +
52181 + return;
52182 +}
52183 +
52184 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
52185 + __u16 sport, __u16 dport)
52186 +{
52187 + struct conn_table_entry *match;
52188 + unsigned int index;
52189 +
52190 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
52191 +
52192 + match = gr_conn_table[index];
52193 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
52194 + match = match->next;
52195 +
52196 + if (match)
52197 + return match->sig;
52198 + else
52199 + return NULL;
52200 +}
52201 +
52202 +#endif
52203 +
52204 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
52205 +{
52206 +#ifdef CONFIG_GRKERNSEC
52207 + struct signal_struct *sig = task->signal;
52208 + struct conn_table_entry *newent;
52209 +
52210 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
52211 + if (newent == NULL)
52212 + return;
52213 + /* no bh lock needed since we are called with bh disabled */
52214 + spin_lock(&gr_conn_table_lock);
52215 + gr_del_task_from_ip_table_nolock(sig);
52216 + sig->gr_saddr = inet->rcv_saddr;
52217 + sig->gr_daddr = inet->daddr;
52218 + sig->gr_sport = inet->sport;
52219 + sig->gr_dport = inet->dport;
52220 + gr_add_to_task_ip_table_nolock(sig, newent);
52221 + spin_unlock(&gr_conn_table_lock);
52222 +#endif
52223 + return;
52224 +}
52225 +
52226 +void gr_del_task_from_ip_table(struct task_struct *task)
52227 +{
52228 +#ifdef CONFIG_GRKERNSEC
52229 + spin_lock_bh(&gr_conn_table_lock);
52230 + gr_del_task_from_ip_table_nolock(task->signal);
52231 + spin_unlock_bh(&gr_conn_table_lock);
52232 +#endif
52233 + return;
52234 +}
52235 +
52236 +void
52237 +gr_attach_curr_ip(const struct sock *sk)
52238 +{
52239 +#ifdef CONFIG_GRKERNSEC
52240 + struct signal_struct *p, *set;
52241 + const struct inet_sock *inet = inet_sk(sk);
52242 +
52243 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
52244 + return;
52245 +
52246 + set = current->signal;
52247 +
52248 + spin_lock_bh(&gr_conn_table_lock);
52249 + p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
52250 + inet->dport, inet->sport);
52251 + if (unlikely(p != NULL)) {
52252 + set->curr_ip = p->curr_ip;
52253 + set->used_accept = 1;
52254 + gr_del_task_from_ip_table_nolock(p);
52255 + spin_unlock_bh(&gr_conn_table_lock);
52256 + return;
52257 + }
52258 + spin_unlock_bh(&gr_conn_table_lock);
52259 +
52260 + set->curr_ip = inet->daddr;
52261 + set->used_accept = 1;
52262 +#endif
52263 + return;
52264 +}
52265 +
52266 +int
52267 +gr_handle_sock_all(const int family, const int type, const int protocol)
52268 +{
52269 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
52270 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
52271 + (family != AF_UNIX)) {
52272 + if (family == AF_INET)
52273 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
52274 + else
52275 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
52276 + return -EACCES;
52277 + }
52278 +#endif
52279 + return 0;
52280 +}
52281 +
52282 +int
52283 +gr_handle_sock_server(const struct sockaddr *sck)
52284 +{
52285 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52286 + if (grsec_enable_socket_server &&
52287 + in_group_p(grsec_socket_server_gid) &&
52288 + sck && (sck->sa_family != AF_UNIX) &&
52289 + (sck->sa_family != AF_LOCAL)) {
52290 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
52291 + return -EACCES;
52292 + }
52293 +#endif
52294 + return 0;
52295 +}
52296 +
52297 +int
52298 +gr_handle_sock_server_other(const struct sock *sck)
52299 +{
52300 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52301 + if (grsec_enable_socket_server &&
52302 + in_group_p(grsec_socket_server_gid) &&
52303 + sck && (sck->sk_family != AF_UNIX) &&
52304 + (sck->sk_family != AF_LOCAL)) {
52305 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
52306 + return -EACCES;
52307 + }
52308 +#endif
52309 + return 0;
52310 +}
52311 +
52312 +int
52313 +gr_handle_sock_client(const struct sockaddr *sck)
52314 +{
52315 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52316 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
52317 + sck && (sck->sa_family != AF_UNIX) &&
52318 + (sck->sa_family != AF_LOCAL)) {
52319 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
52320 + return -EACCES;
52321 + }
52322 +#endif
52323 + return 0;
52324 +}
52325 +
52326 +kernel_cap_t
52327 +gr_cap_rtnetlink(struct sock *sock)
52328 +{
52329 +#ifdef CONFIG_GRKERNSEC
52330 + if (!gr_acl_is_enabled())
52331 + return current_cap();
52332 + else if (sock->sk_protocol == NETLINK_ISCSI &&
52333 + cap_raised(current_cap(), CAP_SYS_ADMIN) &&
52334 + gr_is_capable(CAP_SYS_ADMIN))
52335 + return current_cap();
52336 + else if (sock->sk_protocol == NETLINK_AUDIT &&
52337 + cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
52338 + gr_is_capable(CAP_AUDIT_WRITE) &&
52339 + cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
52340 + gr_is_capable(CAP_AUDIT_CONTROL))
52341 + return current_cap();
52342 + else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
52343 + ((sock->sk_protocol == NETLINK_ROUTE) ?
52344 + gr_is_capable_nolog(CAP_NET_ADMIN) :
52345 + gr_is_capable(CAP_NET_ADMIN)))
52346 + return current_cap();
52347 + else
52348 + return __cap_empty_set;
52349 +#else
52350 + return current_cap();
52351 +#endif
52352 +}
52353 diff -urNp linux-2.6.32.42/grsecurity/grsec_sysctl.c linux-2.6.32.42/grsecurity/grsec_sysctl.c
52354 --- linux-2.6.32.42/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
52355 +++ linux-2.6.32.42/grsecurity/grsec_sysctl.c 2011-06-29 19:37:19.000000000 -0400
52356 @@ -0,0 +1,489 @@
52357 +#include <linux/kernel.h>
52358 +#include <linux/sched.h>
52359 +#include <linux/sysctl.h>
52360 +#include <linux/grsecurity.h>
52361 +#include <linux/grinternal.h>
52362 +
52363 +int
52364 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
52365 +{
52366 +#ifdef CONFIG_GRKERNSEC_SYSCTL
52367 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
52368 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
52369 + return -EACCES;
52370 + }
52371 +#endif
52372 + return 0;
52373 +}
52374 +
52375 +#ifdef CONFIG_GRKERNSEC_ROFS
52376 +static int __maybe_unused one = 1;
52377 +#endif
52378 +
52379 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
52380 +ctl_table grsecurity_table[] = {
52381 +#ifdef CONFIG_GRKERNSEC_SYSCTL
52382 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
52383 +#ifdef CONFIG_GRKERNSEC_IO
52384 + {
52385 + .ctl_name = CTL_UNNUMBERED,
52386 + .procname = "disable_priv_io",
52387 + .data = &grsec_disable_privio,
52388 + .maxlen = sizeof(int),
52389 + .mode = 0600,
52390 + .proc_handler = &proc_dointvec,
52391 + },
52392 +#endif
52393 +#endif
52394 +#ifdef CONFIG_GRKERNSEC_LINK
52395 + {
52396 + .ctl_name = CTL_UNNUMBERED,
52397 + .procname = "linking_restrictions",
52398 + .data = &grsec_enable_link,
52399 + .maxlen = sizeof(int),
52400 + .mode = 0600,
52401 + .proc_handler = &proc_dointvec,
52402 + },
52403 +#endif
52404 +#ifdef CONFIG_GRKERNSEC_BRUTE
52405 + {
52406 + .ctl_name = CTL_UNNUMBERED,
52407 + .procname = "deter_bruteforce",
52408 + .data = &grsec_enable_brute,
52409 + .maxlen = sizeof(int),
52410 + .mode = 0600,
52411 + .proc_handler = &proc_dointvec,
52412 + },
52413 +#endif
52414 +#ifdef CONFIG_GRKERNSEC_FIFO
52415 + {
52416 + .ctl_name = CTL_UNNUMBERED,
52417 + .procname = "fifo_restrictions",
52418 + .data = &grsec_enable_fifo,
52419 + .maxlen = sizeof(int),
52420 + .mode = 0600,
52421 + .proc_handler = &proc_dointvec,
52422 + },
52423 +#endif
52424 +#ifdef CONFIG_GRKERNSEC_EXECVE
52425 + {
52426 + .ctl_name = CTL_UNNUMBERED,
52427 + .procname = "execve_limiting",
52428 + .data = &grsec_enable_execve,
52429 + .maxlen = sizeof(int),
52430 + .mode = 0600,
52431 + .proc_handler = &proc_dointvec,
52432 + },
52433 +#endif
52434 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52435 + {
52436 + .ctl_name = CTL_UNNUMBERED,
52437 + .procname = "ip_blackhole",
52438 + .data = &grsec_enable_blackhole,
52439 + .maxlen = sizeof(int),
52440 + .mode = 0600,
52441 + .proc_handler = &proc_dointvec,
52442 + },
52443 + {
52444 + .ctl_name = CTL_UNNUMBERED,
52445 + .procname = "lastack_retries",
52446 + .data = &grsec_lastack_retries,
52447 + .maxlen = sizeof(int),
52448 + .mode = 0600,
52449 + .proc_handler = &proc_dointvec,
52450 + },
52451 +#endif
52452 +#ifdef CONFIG_GRKERNSEC_EXECLOG
52453 + {
52454 + .ctl_name = CTL_UNNUMBERED,
52455 + .procname = "exec_logging",
52456 + .data = &grsec_enable_execlog,
52457 + .maxlen = sizeof(int),
52458 + .mode = 0600,
52459 + .proc_handler = &proc_dointvec,
52460 + },
52461 +#endif
52462 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52463 + {
52464 + .ctl_name = CTL_UNNUMBERED,
52465 + .procname = "rwxmap_logging",
52466 + .data = &grsec_enable_log_rwxmaps,
52467 + .maxlen = sizeof(int),
52468 + .mode = 0600,
52469 + .proc_handler = &proc_dointvec,
52470 + },
52471 +#endif
52472 +#ifdef CONFIG_GRKERNSEC_SIGNAL
52473 + {
52474 + .ctl_name = CTL_UNNUMBERED,
52475 + .procname = "signal_logging",
52476 + .data = &grsec_enable_signal,
52477 + .maxlen = sizeof(int),
52478 + .mode = 0600,
52479 + .proc_handler = &proc_dointvec,
52480 + },
52481 +#endif
52482 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
52483 + {
52484 + .ctl_name = CTL_UNNUMBERED,
52485 + .procname = "forkfail_logging",
52486 + .data = &grsec_enable_forkfail,
52487 + .maxlen = sizeof(int),
52488 + .mode = 0600,
52489 + .proc_handler = &proc_dointvec,
52490 + },
52491 +#endif
52492 +#ifdef CONFIG_GRKERNSEC_TIME
52493 + {
52494 + .ctl_name = CTL_UNNUMBERED,
52495 + .procname = "timechange_logging",
52496 + .data = &grsec_enable_time,
52497 + .maxlen = sizeof(int),
52498 + .mode = 0600,
52499 + .proc_handler = &proc_dointvec,
52500 + },
52501 +#endif
52502 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
52503 + {
52504 + .ctl_name = CTL_UNNUMBERED,
52505 + .procname = "chroot_deny_shmat",
52506 + .data = &grsec_enable_chroot_shmat,
52507 + .maxlen = sizeof(int),
52508 + .mode = 0600,
52509 + .proc_handler = &proc_dointvec,
52510 + },
52511 +#endif
52512 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52513 + {
52514 + .ctl_name = CTL_UNNUMBERED,
52515 + .procname = "chroot_deny_unix",
52516 + .data = &grsec_enable_chroot_unix,
52517 + .maxlen = sizeof(int),
52518 + .mode = 0600,
52519 + .proc_handler = &proc_dointvec,
52520 + },
52521 +#endif
52522 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52523 + {
52524 + .ctl_name = CTL_UNNUMBERED,
52525 + .procname = "chroot_deny_mount",
52526 + .data = &grsec_enable_chroot_mount,
52527 + .maxlen = sizeof(int),
52528 + .mode = 0600,
52529 + .proc_handler = &proc_dointvec,
52530 + },
52531 +#endif
52532 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52533 + {
52534 + .ctl_name = CTL_UNNUMBERED,
52535 + .procname = "chroot_deny_fchdir",
52536 + .data = &grsec_enable_chroot_fchdir,
52537 + .maxlen = sizeof(int),
52538 + .mode = 0600,
52539 + .proc_handler = &proc_dointvec,
52540 + },
52541 +#endif
52542 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52543 + {
52544 + .ctl_name = CTL_UNNUMBERED,
52545 + .procname = "chroot_deny_chroot",
52546 + .data = &grsec_enable_chroot_double,
52547 + .maxlen = sizeof(int),
52548 + .mode = 0600,
52549 + .proc_handler = &proc_dointvec,
52550 + },
52551 +#endif
52552 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52553 + {
52554 + .ctl_name = CTL_UNNUMBERED,
52555 + .procname = "chroot_deny_pivot",
52556 + .data = &grsec_enable_chroot_pivot,
52557 + .maxlen = sizeof(int),
52558 + .mode = 0600,
52559 + .proc_handler = &proc_dointvec,
52560 + },
52561 +#endif
52562 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
52563 + {
52564 + .ctl_name = CTL_UNNUMBERED,
52565 + .procname = "chroot_enforce_chdir",
52566 + .data = &grsec_enable_chroot_chdir,
52567 + .maxlen = sizeof(int),
52568 + .mode = 0600,
52569 + .proc_handler = &proc_dointvec,
52570 + },
52571 +#endif
52572 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
52573 + {
52574 + .ctl_name = CTL_UNNUMBERED,
52575 + .procname = "chroot_deny_chmod",
52576 + .data = &grsec_enable_chroot_chmod,
52577 + .maxlen = sizeof(int),
52578 + .mode = 0600,
52579 + .proc_handler = &proc_dointvec,
52580 + },
52581 +#endif
52582 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52583 + {
52584 + .ctl_name = CTL_UNNUMBERED,
52585 + .procname = "chroot_deny_mknod",
52586 + .data = &grsec_enable_chroot_mknod,
52587 + .maxlen = sizeof(int),
52588 + .mode = 0600,
52589 + .proc_handler = &proc_dointvec,
52590 + },
52591 +#endif
52592 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52593 + {
52594 + .ctl_name = CTL_UNNUMBERED,
52595 + .procname = "chroot_restrict_nice",
52596 + .data = &grsec_enable_chroot_nice,
52597 + .maxlen = sizeof(int),
52598 + .mode = 0600,
52599 + .proc_handler = &proc_dointvec,
52600 + },
52601 +#endif
52602 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52603 + {
52604 + .ctl_name = CTL_UNNUMBERED,
52605 + .procname = "chroot_execlog",
52606 + .data = &grsec_enable_chroot_execlog,
52607 + .maxlen = sizeof(int),
52608 + .mode = 0600,
52609 + .proc_handler = &proc_dointvec,
52610 + },
52611 +#endif
52612 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52613 + {
52614 + .ctl_name = CTL_UNNUMBERED,
52615 + .procname = "chroot_caps",
52616 + .data = &grsec_enable_chroot_caps,
52617 + .maxlen = sizeof(int),
52618 + .mode = 0600,
52619 + .proc_handler = &proc_dointvec,
52620 + },
52621 +#endif
52622 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52623 + {
52624 + .ctl_name = CTL_UNNUMBERED,
52625 + .procname = "chroot_deny_sysctl",
52626 + .data = &grsec_enable_chroot_sysctl,
52627 + .maxlen = sizeof(int),
52628 + .mode = 0600,
52629 + .proc_handler = &proc_dointvec,
52630 + },
52631 +#endif
52632 +#ifdef CONFIG_GRKERNSEC_TPE
52633 + {
52634 + .ctl_name = CTL_UNNUMBERED,
52635 + .procname = "tpe",
52636 + .data = &grsec_enable_tpe,
52637 + .maxlen = sizeof(int),
52638 + .mode = 0600,
52639 + .proc_handler = &proc_dointvec,
52640 + },
52641 + {
52642 + .ctl_name = CTL_UNNUMBERED,
52643 + .procname = "tpe_gid",
52644 + .data = &grsec_tpe_gid,
52645 + .maxlen = sizeof(int),
52646 + .mode = 0600,
52647 + .proc_handler = &proc_dointvec,
52648 + },
52649 +#endif
52650 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52651 + {
52652 + .ctl_name = CTL_UNNUMBERED,
52653 + .procname = "tpe_invert",
52654 + .data = &grsec_enable_tpe_invert,
52655 + .maxlen = sizeof(int),
52656 + .mode = 0600,
52657 + .proc_handler = &proc_dointvec,
52658 + },
52659 +#endif
52660 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
52661 + {
52662 + .ctl_name = CTL_UNNUMBERED,
52663 + .procname = "tpe_restrict_all",
52664 + .data = &grsec_enable_tpe_all,
52665 + .maxlen = sizeof(int),
52666 + .mode = 0600,
52667 + .proc_handler = &proc_dointvec,
52668 + },
52669 +#endif
52670 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
52671 + {
52672 + .ctl_name = CTL_UNNUMBERED,
52673 + .procname = "socket_all",
52674 + .data = &grsec_enable_socket_all,
52675 + .maxlen = sizeof(int),
52676 + .mode = 0600,
52677 + .proc_handler = &proc_dointvec,
52678 + },
52679 + {
52680 + .ctl_name = CTL_UNNUMBERED,
52681 + .procname = "socket_all_gid",
52682 + .data = &grsec_socket_all_gid,
52683 + .maxlen = sizeof(int),
52684 + .mode = 0600,
52685 + .proc_handler = &proc_dointvec,
52686 + },
52687 +#endif
52688 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52689 + {
52690 + .ctl_name = CTL_UNNUMBERED,
52691 + .procname = "socket_client",
52692 + .data = &grsec_enable_socket_client,
52693 + .maxlen = sizeof(int),
52694 + .mode = 0600,
52695 + .proc_handler = &proc_dointvec,
52696 + },
52697 + {
52698 + .ctl_name = CTL_UNNUMBERED,
52699 + .procname = "socket_client_gid",
52700 + .data = &grsec_socket_client_gid,
52701 + .maxlen = sizeof(int),
52702 + .mode = 0600,
52703 + .proc_handler = &proc_dointvec,
52704 + },
52705 +#endif
52706 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52707 + {
52708 + .ctl_name = CTL_UNNUMBERED,
52709 + .procname = "socket_server",
52710 + .data = &grsec_enable_socket_server,
52711 + .maxlen = sizeof(int),
52712 + .mode = 0600,
52713 + .proc_handler = &proc_dointvec,
52714 + },
52715 + {
52716 + .ctl_name = CTL_UNNUMBERED,
52717 + .procname = "socket_server_gid",
52718 + .data = &grsec_socket_server_gid,
52719 + .maxlen = sizeof(int),
52720 + .mode = 0600,
52721 + .proc_handler = &proc_dointvec,
52722 + },
52723 +#endif
52724 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
52725 + {
52726 + .ctl_name = CTL_UNNUMBERED,
52727 + .procname = "audit_group",
52728 + .data = &grsec_enable_group,
52729 + .maxlen = sizeof(int),
52730 + .mode = 0600,
52731 + .proc_handler = &proc_dointvec,
52732 + },
52733 + {
52734 + .ctl_name = CTL_UNNUMBERED,
52735 + .procname = "audit_gid",
52736 + .data = &grsec_audit_gid,
52737 + .maxlen = sizeof(int),
52738 + .mode = 0600,
52739 + .proc_handler = &proc_dointvec,
52740 + },
52741 +#endif
52742 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52743 + {
52744 + .ctl_name = CTL_UNNUMBERED,
52745 + .procname = "audit_chdir",
52746 + .data = &grsec_enable_chdir,
52747 + .maxlen = sizeof(int),
52748 + .mode = 0600,
52749 + .proc_handler = &proc_dointvec,
52750 + },
52751 +#endif
52752 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52753 + {
52754 + .ctl_name = CTL_UNNUMBERED,
52755 + .procname = "audit_mount",
52756 + .data = &grsec_enable_mount,
52757 + .maxlen = sizeof(int),
52758 + .mode = 0600,
52759 + .proc_handler = &proc_dointvec,
52760 + },
52761 +#endif
52762 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
52763 + {
52764 + .ctl_name = CTL_UNNUMBERED,
52765 + .procname = "audit_textrel",
52766 + .data = &grsec_enable_audit_textrel,
52767 + .maxlen = sizeof(int),
52768 + .mode = 0600,
52769 + .proc_handler = &proc_dointvec,
52770 + },
52771 +#endif
52772 +#ifdef CONFIG_GRKERNSEC_DMESG
52773 + {
52774 + .ctl_name = CTL_UNNUMBERED,
52775 + .procname = "dmesg",
52776 + .data = &grsec_enable_dmesg,
52777 + .maxlen = sizeof(int),
52778 + .mode = 0600,
52779 + .proc_handler = &proc_dointvec,
52780 + },
52781 +#endif
52782 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52783 + {
52784 + .ctl_name = CTL_UNNUMBERED,
52785 + .procname = "chroot_findtask",
52786 + .data = &grsec_enable_chroot_findtask,
52787 + .maxlen = sizeof(int),
52788 + .mode = 0600,
52789 + .proc_handler = &proc_dointvec,
52790 + },
52791 +#endif
52792 +#ifdef CONFIG_GRKERNSEC_RESLOG
52793 + {
52794 + .ctl_name = CTL_UNNUMBERED,
52795 + .procname = "resource_logging",
52796 + .data = &grsec_resource_logging,
52797 + .maxlen = sizeof(int),
52798 + .mode = 0600,
52799 + .proc_handler = &proc_dointvec,
52800 + },
52801 +#endif
52802 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
52803 + {
52804 + .ctl_name = CTL_UNNUMBERED,
52805 + .procname = "audit_ptrace",
52806 + .data = &grsec_enable_audit_ptrace,
52807 + .maxlen = sizeof(int),
52808 + .mode = 0600,
52809 + .proc_handler = &proc_dointvec,
52810 + },
52811 +#endif
52812 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52813 + {
52814 + .ctl_name = CTL_UNNUMBERED,
52815 + .procname = "harden_ptrace",
52816 + .data = &grsec_enable_harden_ptrace,
52817 + .maxlen = sizeof(int),
52818 + .mode = 0600,
52819 + .proc_handler = &proc_dointvec,
52820 + },
52821 +#endif
52822 + {
52823 + .ctl_name = CTL_UNNUMBERED,
52824 + .procname = "grsec_lock",
52825 + .data = &grsec_lock,
52826 + .maxlen = sizeof(int),
52827 + .mode = 0600,
52828 + .proc_handler = &proc_dointvec,
52829 + },
52830 +#endif
52831 +#ifdef CONFIG_GRKERNSEC_ROFS
52832 + {
52833 + .ctl_name = CTL_UNNUMBERED,
52834 + .procname = "romount_protect",
52835 + .data = &grsec_enable_rofs,
52836 + .maxlen = sizeof(int),
52837 + .mode = 0600,
52838 + .proc_handler = &proc_dointvec_minmax,
52839 + .extra1 = &one,
52840 + .extra2 = &one,
52841 + },
52842 +#endif
52843 + { .ctl_name = 0 }
52844 +};
52845 +#endif
52846 diff -urNp linux-2.6.32.42/grsecurity/grsec_time.c linux-2.6.32.42/grsecurity/grsec_time.c
52847 --- linux-2.6.32.42/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
52848 +++ linux-2.6.32.42/grsecurity/grsec_time.c 2011-04-17 15:56:46.000000000 -0400
52849 @@ -0,0 +1,16 @@
52850 +#include <linux/kernel.h>
52851 +#include <linux/sched.h>
52852 +#include <linux/grinternal.h>
52853 +#include <linux/module.h>
52854 +
52855 +void
52856 +gr_log_timechange(void)
52857 +{
52858 +#ifdef CONFIG_GRKERNSEC_TIME
52859 + if (grsec_enable_time)
52860 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
52861 +#endif
52862 + return;
52863 +}
52864 +
52865 +EXPORT_SYMBOL(gr_log_timechange);
52866 diff -urNp linux-2.6.32.42/grsecurity/grsec_tpe.c linux-2.6.32.42/grsecurity/grsec_tpe.c
52867 --- linux-2.6.32.42/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
52868 +++ linux-2.6.32.42/grsecurity/grsec_tpe.c 2011-04-17 15:56:46.000000000 -0400
52869 @@ -0,0 +1,39 @@
52870 +#include <linux/kernel.h>
52871 +#include <linux/sched.h>
52872 +#include <linux/file.h>
52873 +#include <linux/fs.h>
52874 +#include <linux/grinternal.h>
52875 +
52876 +extern int gr_acl_tpe_check(void);
52877 +
52878 +int
52879 +gr_tpe_allow(const struct file *file)
52880 +{
52881 +#ifdef CONFIG_GRKERNSEC
52882 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
52883 + const struct cred *cred = current_cred();
52884 +
52885 + if (cred->uid && ((grsec_enable_tpe &&
52886 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52887 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
52888 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
52889 +#else
52890 + in_group_p(grsec_tpe_gid)
52891 +#endif
52892 + ) || gr_acl_tpe_check()) &&
52893 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
52894 + (inode->i_mode & S_IWOTH))))) {
52895 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
52896 + return 0;
52897 + }
52898 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
52899 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
52900 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
52901 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
52902 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
52903 + return 0;
52904 + }
52905 +#endif
52906 +#endif
52907 + return 1;
52908 +}
52909 diff -urNp linux-2.6.32.42/grsecurity/grsum.c linux-2.6.32.42/grsecurity/grsum.c
52910 --- linux-2.6.32.42/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
52911 +++ linux-2.6.32.42/grsecurity/grsum.c 2011-04-17 15:56:46.000000000 -0400
52912 @@ -0,0 +1,61 @@
52913 +#include <linux/err.h>
52914 +#include <linux/kernel.h>
52915 +#include <linux/sched.h>
52916 +#include <linux/mm.h>
52917 +#include <linux/scatterlist.h>
52918 +#include <linux/crypto.h>
52919 +#include <linux/gracl.h>
52920 +
52921 +
52922 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
52923 +#error "crypto and sha256 must be built into the kernel"
52924 +#endif
52925 +
52926 +int
52927 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
52928 +{
52929 + char *p;
52930 + struct crypto_hash *tfm;
52931 + struct hash_desc desc;
52932 + struct scatterlist sg;
52933 + unsigned char temp_sum[GR_SHA_LEN];
52934 + volatile int retval = 0;
52935 + volatile int dummy = 0;
52936 + unsigned int i;
52937 +
52938 + sg_init_table(&sg, 1);
52939 +
52940 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
52941 + if (IS_ERR(tfm)) {
52942 + /* should never happen, since sha256 should be built in */
52943 + return 1;
52944 + }
52945 +
52946 + desc.tfm = tfm;
52947 + desc.flags = 0;
52948 +
52949 + crypto_hash_init(&desc);
52950 +
52951 + p = salt;
52952 + sg_set_buf(&sg, p, GR_SALT_LEN);
52953 + crypto_hash_update(&desc, &sg, sg.length);
52954 +
52955 + p = entry->pw;
52956 + sg_set_buf(&sg, p, strlen(p));
52957 +
52958 + crypto_hash_update(&desc, &sg, sg.length);
52959 +
52960 + crypto_hash_final(&desc, temp_sum);
52961 +
52962 + memset(entry->pw, 0, GR_PW_LEN);
52963 +
52964 + for (i = 0; i < GR_SHA_LEN; i++)
52965 + if (sum[i] != temp_sum[i])
52966 + retval = 1;
52967 + else
52968 + dummy = 1; // waste a cycle
52969 +
52970 + crypto_free_hash(tfm);
52971 +
52972 + return retval;
52973 +}
52974 diff -urNp linux-2.6.32.42/grsecurity/Kconfig linux-2.6.32.42/grsecurity/Kconfig
52975 --- linux-2.6.32.42/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
52976 +++ linux-2.6.32.42/grsecurity/Kconfig 2011-06-29 20:55:13.000000000 -0400
52977 @@ -0,0 +1,1047 @@
52978 +#
52979 +# grecurity configuration
52980 +#
52981 +
52982 +menu "Grsecurity"
52983 +
52984 +config GRKERNSEC
52985 + bool "Grsecurity"
52986 + select CRYPTO
52987 + select CRYPTO_SHA256
52988 + help
52989 + If you say Y here, you will be able to configure many features
52990 + that will enhance the security of your system. It is highly
52991 + recommended that you say Y here and read through the help
52992 + for each option so that you fully understand the features and
52993 + can evaluate their usefulness for your machine.
52994 +
52995 +choice
52996 + prompt "Security Level"
52997 + depends on GRKERNSEC
52998 + default GRKERNSEC_CUSTOM
52999 +
53000 +config GRKERNSEC_LOW
53001 + bool "Low"
53002 + select GRKERNSEC_LINK
53003 + select GRKERNSEC_FIFO
53004 + select GRKERNSEC_EXECVE
53005 + select GRKERNSEC_RANDNET
53006 + select GRKERNSEC_DMESG
53007 + select GRKERNSEC_CHROOT
53008 + select GRKERNSEC_CHROOT_CHDIR
53009 +
53010 + help
53011 + If you choose this option, several of the grsecurity options will
53012 + be enabled that will give you greater protection against a number
53013 + of attacks, while assuring that none of your software will have any
53014 + conflicts with the additional security measures. If you run a lot
53015 + of unusual software, or you are having problems with the higher
53016 + security levels, you should say Y here. With this option, the
53017 + following features are enabled:
53018 +
53019 + - Linking restrictions
53020 + - FIFO restrictions
53021 + - Enforcing RLIMIT_NPROC on execve
53022 + - Restricted dmesg
53023 + - Enforced chdir("/") on chroot
53024 + - Runtime module disabling
53025 +
53026 +config GRKERNSEC_MEDIUM
53027 + bool "Medium"
53028 + select PAX
53029 + select PAX_EI_PAX
53030 + select PAX_PT_PAX_FLAGS
53031 + select PAX_HAVE_ACL_FLAGS
53032 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
53033 + select GRKERNSEC_CHROOT
53034 + select GRKERNSEC_CHROOT_SYSCTL
53035 + select GRKERNSEC_LINK
53036 + select GRKERNSEC_FIFO
53037 + select GRKERNSEC_EXECVE
53038 + select GRKERNSEC_DMESG
53039 + select GRKERNSEC_RANDNET
53040 + select GRKERNSEC_FORKFAIL
53041 + select GRKERNSEC_TIME
53042 + select GRKERNSEC_SIGNAL
53043 + select GRKERNSEC_CHROOT
53044 + select GRKERNSEC_CHROOT_UNIX
53045 + select GRKERNSEC_CHROOT_MOUNT
53046 + select GRKERNSEC_CHROOT_PIVOT
53047 + select GRKERNSEC_CHROOT_DOUBLE
53048 + select GRKERNSEC_CHROOT_CHDIR
53049 + select GRKERNSEC_CHROOT_MKNOD
53050 + select GRKERNSEC_PROC
53051 + select GRKERNSEC_PROC_USERGROUP
53052 + select PAX_RANDUSTACK
53053 + select PAX_ASLR
53054 + select PAX_RANDMMAP
53055 + select PAX_REFCOUNT if (X86 || SPARC64)
53056 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB))
53057 +
53058 + help
53059 + If you say Y here, several features in addition to those included
53060 + in the low additional security level will be enabled. These
53061 + features provide even more security to your system, though in rare
53062 + cases they may be incompatible with very old or poorly written
53063 + software. If you enable this option, make sure that your auth
53064 + service (identd) is running as gid 1001. With this option,
53065 + the following features (in addition to those provided in the
53066 + low additional security level) will be enabled:
53067 +
53068 + - Failed fork logging
53069 + - Time change logging
53070 + - Signal logging
53071 + - Deny mounts in chroot
53072 + - Deny double chrooting
53073 + - Deny sysctl writes in chroot
53074 + - Deny mknod in chroot
53075 + - Deny access to abstract AF_UNIX sockets out of chroot
53076 + - Deny pivot_root in chroot
53077 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
53078 + - /proc restrictions with special GID set to 10 (usually wheel)
53079 + - Address Space Layout Randomization (ASLR)
53080 + - Prevent exploitation of most refcount overflows
53081 + - Bounds checking of copying between the kernel and userland
53082 +
53083 +config GRKERNSEC_HIGH
53084 + bool "High"
53085 + select GRKERNSEC_LINK
53086 + select GRKERNSEC_FIFO
53087 + select GRKERNSEC_EXECVE
53088 + select GRKERNSEC_DMESG
53089 + select GRKERNSEC_FORKFAIL
53090 + select GRKERNSEC_TIME
53091 + select GRKERNSEC_SIGNAL
53092 + select GRKERNSEC_CHROOT
53093 + select GRKERNSEC_CHROOT_SHMAT
53094 + select GRKERNSEC_CHROOT_UNIX
53095 + select GRKERNSEC_CHROOT_MOUNT
53096 + select GRKERNSEC_CHROOT_FCHDIR
53097 + select GRKERNSEC_CHROOT_PIVOT
53098 + select GRKERNSEC_CHROOT_DOUBLE
53099 + select GRKERNSEC_CHROOT_CHDIR
53100 + select GRKERNSEC_CHROOT_MKNOD
53101 + select GRKERNSEC_CHROOT_CAPS
53102 + select GRKERNSEC_CHROOT_SYSCTL
53103 + select GRKERNSEC_CHROOT_FINDTASK
53104 + select GRKERNSEC_SYSFS_RESTRICT
53105 + select GRKERNSEC_PROC
53106 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
53107 + select GRKERNSEC_HIDESYM
53108 + select GRKERNSEC_BRUTE
53109 + select GRKERNSEC_PROC_USERGROUP
53110 + select GRKERNSEC_KMEM
53111 + select GRKERNSEC_RESLOG
53112 + select GRKERNSEC_RANDNET
53113 + select GRKERNSEC_PROC_ADD
53114 + select GRKERNSEC_CHROOT_CHMOD
53115 + select GRKERNSEC_CHROOT_NICE
53116 + select GRKERNSEC_AUDIT_MOUNT
53117 + select GRKERNSEC_MODHARDEN if (MODULES)
53118 + select GRKERNSEC_HARDEN_PTRACE
53119 + select GRKERNSEC_VM86 if (X86_32)
53120 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
53121 + select PAX
53122 + select PAX_RANDUSTACK
53123 + select PAX_ASLR
53124 + select PAX_RANDMMAP
53125 + select PAX_NOEXEC
53126 + select PAX_MPROTECT
53127 + select PAX_EI_PAX
53128 + select PAX_PT_PAX_FLAGS
53129 + select PAX_HAVE_ACL_FLAGS
53130 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
53131 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
53132 + select PAX_RANDKSTACK if (X86_TSC && X86)
53133 + select PAX_SEGMEXEC if (X86_32)
53134 + select PAX_PAGEEXEC
53135 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
53136 + select PAX_EMUTRAMP if (PARISC)
53137 + select PAX_EMUSIGRT if (PARISC)
53138 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
53139 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
53140 + select PAX_REFCOUNT if (X86 || SPARC64)
53141 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB))
53142 + help
53143 + If you say Y here, many of the features of grsecurity will be
53144 + enabled, which will protect you against many kinds of attacks
53145 + against your system. The heightened security comes at a cost
53146 + of an increased chance of incompatibilities with rare software
53147 + on your machine. Since this security level enables PaX, you should
53148 + view <http://pax.grsecurity.net> and read about the PaX
53149 + project. While you are there, download chpax and run it on
53150 + binaries that cause problems with PaX. Also remember that
53151 + since the /proc restrictions are enabled, you must run your
53152 + identd as gid 1001. This security level enables the following
53153 + features in addition to those listed in the low and medium
53154 + security levels:
53155 +
53156 + - Additional /proc restrictions
53157 + - Chmod restrictions in chroot
53158 + - No signals, ptrace, or viewing of processes outside of chroot
53159 + - Capability restrictions in chroot
53160 + - Deny fchdir out of chroot
53161 + - Priority restrictions in chroot
53162 + - Segmentation-based implementation of PaX
53163 + - Mprotect restrictions
53164 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
53165 + - Kernel stack randomization
53166 + - Mount/unmount/remount logging
53167 + - Kernel symbol hiding
53168 + - Prevention of memory exhaustion-based exploits
53169 + - Hardening of module auto-loading
53170 + - Ptrace restrictions
53171 + - Restricted vm86 mode
53172 + - Restricted sysfs/debugfs
53173 + - Active kernel exploit response
53174 +
53175 +config GRKERNSEC_CUSTOM
53176 + bool "Custom"
53177 + help
53178 + If you say Y here, you will be able to configure every grsecurity
53179 + option, which allows you to enable many more features that aren't
53180 + covered in the basic security levels. These additional features
53181 + include TPE, socket restrictions, and the sysctl system for
53182 + grsecurity. It is advised that you read through the help for
53183 + each option to determine its usefulness in your situation.
53184 +
53185 +endchoice
53186 +
53187 +menu "Address Space Protection"
53188 +depends on GRKERNSEC
53189 +
53190 +config GRKERNSEC_KMEM
53191 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
53192 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
53193 + help
53194 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
53195 + be written to via mmap or otherwise to modify the running kernel.
53196 + /dev/port will also not be allowed to be opened. If you have module
53197 + support disabled, enabling this will close up four ways that are
53198 + currently used to insert malicious code into the running kernel.
53199 + Even with all these features enabled, we still highly recommend that
53200 + you use the RBAC system, as it is still possible for an attacker to
53201 + modify the running kernel through privileged I/O granted by ioperm/iopl.
53202 + If you are not using XFree86, you may be able to stop this additional
53203 + case by enabling the 'Disable privileged I/O' option. Though nothing
53204 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
53205 + but only to video memory, which is the only writing we allow in this
53206 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
53207 + not be allowed to mprotect it with PROT_WRITE later.
53208 + It is highly recommended that you say Y here if you meet all the
53209 + conditions above.
53210 +
53211 +config GRKERNSEC_VM86
53212 + bool "Restrict VM86 mode"
53213 + depends on X86_32
53214 +
53215 + help
53216 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
53217 + make use of a special execution mode on 32bit x86 processors called
53218 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
53219 + video cards and will still work with this option enabled. The purpose
53220 + of the option is to prevent exploitation of emulation errors in
53221 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
53222 + Nearly all users should be able to enable this option.
53223 +
53224 +config GRKERNSEC_IO
53225 + bool "Disable privileged I/O"
53226 + depends on X86
53227 + select RTC_CLASS
53228 + select RTC_INTF_DEV
53229 + select RTC_DRV_CMOS
53230 +
53231 + help
53232 + If you say Y here, all ioperm and iopl calls will return an error.
53233 + Ioperm and iopl can be used to modify the running kernel.
53234 + Unfortunately, some programs need this access to operate properly,
53235 + the most notable of which are XFree86 and hwclock. hwclock can be
53236 + remedied by having RTC support in the kernel, so real-time
53237 + clock support is enabled if this option is enabled, to ensure
53238 + that hwclock operates correctly. XFree86 still will not
53239 + operate correctly with this option enabled, so DO NOT CHOOSE Y
53240 + IF YOU USE XFree86. If you use XFree86 and you still want to
53241 + protect your kernel against modification, use the RBAC system.
53242 +
53243 +config GRKERNSEC_PROC_MEMMAP
53244 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
53245 + default y if (PAX_NOEXEC || PAX_ASLR)
53246 + depends on PAX_NOEXEC || PAX_ASLR
53247 + help
53248 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
53249 + give no information about the addresses of its mappings if
53250 + PaX features that rely on random addresses are enabled on the task.
53251 + If you use PaX it is greatly recommended that you say Y here as it
53252 + closes up a hole that makes the full ASLR useless for suid
53253 + binaries.
53254 +
53255 +config GRKERNSEC_BRUTE
53256 + bool "Deter exploit bruteforcing"
53257 + help
53258 + If you say Y here, attempts to bruteforce exploits against forking
53259 + daemons such as apache or sshd, as well as against suid/sgid binaries
53260 + will be deterred. When a child of a forking daemon is killed by PaX
53261 + or crashes due to an illegal instruction or other suspicious signal,
53262 + the parent process will be delayed 30 seconds upon every subsequent
53263 + fork until the administrator is able to assess the situation and
53264 + restart the daemon.
53265 + In the suid/sgid case, the attempt is logged, the user has all their
53266 + processes terminated, and they are prevented from executing any further
53267 + processes for 15 minutes.
53268 + It is recommended that you also enable signal logging in the auditing
53269 + section so that logs are generated when a process triggers a suspicious
53270 + signal.
53271 + If the sysctl option is enabled, a sysctl option with name
53272 + "deter_bruteforce" is created.
53273 +
53274 +config GRKERNSEC_MODHARDEN
53275 + bool "Harden module auto-loading"
53276 + depends on MODULES
53277 + help
53278 + If you say Y here, module auto-loading in response to use of some
53279 + feature implemented by an unloaded module will be restricted to
53280 + root users. Enabling this option helps defend against attacks
53281 + by unprivileged users who abuse the auto-loading behavior to
53282 + cause a vulnerable module to load that is then exploited.
53283 +
53284 + If this option prevents a legitimate use of auto-loading for a
53285 + non-root user, the administrator can execute modprobe manually
53286 + with the exact name of the module mentioned in the alert log.
53287 + Alternatively, the administrator can add the module to the list
53288 + of modules loaded at boot by modifying init scripts.
53289 +
53290 + Modification of init scripts will most likely be needed on
53291 + Ubuntu servers with encrypted home directory support enabled,
53292 + as the first non-root user logging in will cause the ecb(aes),
53293 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
53294 +
53295 +config GRKERNSEC_HIDESYM
53296 + bool "Hide kernel symbols"
53297 + help
53298 + If you say Y here, getting information on loaded modules, and
53299 + displaying all kernel symbols through a syscall will be restricted
53300 + to users with CAP_SYS_MODULE. For software compatibility reasons,
53301 + /proc/kallsyms will be restricted to the root user. The RBAC
53302 + system can hide that entry even from root.
53303 +
53304 + This option also prevents leaking of kernel addresses through
53305 + several /proc entries.
53306 +
53307 + Note that this option is only effective provided the following
53308 + conditions are met:
53309 + 1) The kernel using grsecurity is not precompiled by some distribution
53310 + 2) You have also enabled GRKERNSEC_DMESG
53311 + 3) You are using the RBAC system and hiding other files such as your
53312 + kernel image and System.map. Alternatively, enabling this option
53313 + causes the permissions on /boot, /lib/modules, and the kernel
53314 + source directory to change at compile time to prevent
53315 + reading by non-root users.
53316 + If the above conditions are met, this option will aid in providing a
53317 + useful protection against local kernel exploitation of overflows
53318 + and arbitrary read/write vulnerabilities.
53319 +
53320 +config GRKERNSEC_KERN_LOCKOUT
53321 + bool "Active kernel exploit response"
53322 + depends on X86 || ARM || PPC || SPARC
53323 + help
53324 + If you say Y here, when a PaX alert is triggered due to suspicious
53325 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
53326 + or an OOPs occurs due to bad memory accesses, instead of just
53327 + terminating the offending process (and potentially allowing
53328 + a subsequent exploit from the same user), we will take one of two
53329 + actions:
53330 + If the user was root, we will panic the system
53331 + If the user was non-root, we will log the attempt, terminate
53332 + all processes owned by the user, then prevent them from creating
53333 + any new processes until the system is restarted
53334 + This deters repeated kernel exploitation/bruteforcing attempts
53335 + and is useful for later forensics.
53336 +
53337 +endmenu
53338 +menu "Role Based Access Control Options"
53339 +depends on GRKERNSEC
53340 +
53341 +config GRKERNSEC_RBAC_DEBUG
53342 + bool
53343 +
53344 +config GRKERNSEC_NO_RBAC
53345 + bool "Disable RBAC system"
53346 + help
53347 + If you say Y here, the /dev/grsec device will be removed from the kernel,
53348 + preventing the RBAC system from being enabled. You should only say Y
53349 + here if you have no intention of using the RBAC system, so as to prevent
53350 + an attacker with root access from misusing the RBAC system to hide files
53351 + and processes when loadable module support and /dev/[k]mem have been
53352 + locked down.
53353 +
53354 +config GRKERNSEC_ACL_HIDEKERN
53355 + bool "Hide kernel processes"
53356 + help
53357 + If you say Y here, all kernel threads will be hidden to all
53358 + processes but those whose subject has the "view hidden processes"
53359 + flag.
53360 +
53361 +config GRKERNSEC_ACL_MAXTRIES
53362 + int "Maximum tries before password lockout"
53363 + default 3
53364 + help
53365 + This option enforces the maximum number of times a user can attempt
53366 + to authorize themselves with the grsecurity RBAC system before being
53367 + denied the ability to attempt authorization again for a specified time.
53368 + The lower the number, the harder it will be to brute-force a password.
53369 +
53370 +config GRKERNSEC_ACL_TIMEOUT
53371 + int "Time to wait after max password tries, in seconds"
53372 + default 30
53373 + help
53374 + This option specifies the time the user must wait after attempting to
53375 + authorize to the RBAC system with the maximum number of invalid
53376 + passwords. The higher the number, the harder it will be to brute-force
53377 + a password.
53378 +
53379 +endmenu
53380 +menu "Filesystem Protections"
53381 +depends on GRKERNSEC
53382 +
53383 +config GRKERNSEC_PROC
53384 + bool "Proc restrictions"
53385 + help
53386 + If you say Y here, the permissions of the /proc filesystem
53387 + will be altered to enhance system security and privacy. You MUST
53388 + choose either a user only restriction or a user and group restriction.
53389 + Depending upon the option you choose, you can either restrict users to
53390 + see only the processes they themselves run, or choose a group that can
53391 + view all processes and files normally restricted to root if you choose
53392 + the "restrict to user only" option. NOTE: If you're running identd as
53393 + a non-root user, you will have to run it as the group you specify here.
53394 +
53395 +config GRKERNSEC_PROC_USER
53396 + bool "Restrict /proc to user only"
53397 + depends on GRKERNSEC_PROC
53398 + help
53399 + If you say Y here, non-root users will only be able to view their own
53400 + processes, and restricts them from viewing network-related information,
53401 + and viewing kernel symbol and module information.
53402 +
53403 +config GRKERNSEC_PROC_USERGROUP
53404 + bool "Allow special group"
53405 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
53406 + help
53407 + If you say Y here, you will be able to select a group that will be
53408 + able to view all processes and network-related information. If you've
53409 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
53410 + remain hidden. This option is useful if you want to run identd as
53411 + a non-root user.
53412 +
53413 +config GRKERNSEC_PROC_GID
53414 + int "GID for special group"
53415 + depends on GRKERNSEC_PROC_USERGROUP
53416 + default 1001
53417 +
53418 +config GRKERNSEC_PROC_ADD
53419 + bool "Additional restrictions"
53420 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
53421 + help
53422 + If you say Y here, additional restrictions will be placed on
53423 + /proc that keep normal users from viewing device information and
53424 + slabinfo information that could be useful for exploits.
53425 +
53426 +config GRKERNSEC_LINK
53427 + bool "Linking restrictions"
53428 + help
53429 + If you say Y here, /tmp race exploits will be prevented, since users
53430 + will no longer be able to follow symlinks owned by other users in
53431 + world-writable +t directories (e.g. /tmp), unless the owner of the
53432 + symlink is the owner of the directory. users will also not be
53433 + able to hardlink to files they do not own. If the sysctl option is
53434 + enabled, a sysctl option with name "linking_restrictions" is created.
53435 +
53436 +config GRKERNSEC_FIFO
53437 + bool "FIFO restrictions"
53438 + help
53439 + If you say Y here, users will not be able to write to FIFOs they don't
53440 + own in world-writable +t directories (e.g. /tmp), unless the owner of
53441 + the FIFO is the same owner of the directory it's held in. If the sysctl
53442 + option is enabled, a sysctl option with name "fifo_restrictions" is
53443 + created.
53444 +
53445 +config GRKERNSEC_SYSFS_RESTRICT
53446 + bool "Sysfs/debugfs restriction"
53447 + depends on SYSFS
53448 + help
53449 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
53450 + any filesystem normally mounted under it (e.g. debugfs) will only
53451 + be accessible by root. These filesystems generally provide access
53452 + to hardware and debug information that isn't appropriate for unprivileged
53453 + users of the system. Sysfs and debugfs have also become a large source
53454 + of new vulnerabilities, ranging from infoleaks to local compromise.
53455 + There has been very little oversight with an eye toward security involved
53456 + in adding new exporters of information to these filesystems, so their
53457 + use is discouraged.
53458 + This option is equivalent to a chmod 0700 of the mount paths.
53459 +
53460 +config GRKERNSEC_ROFS
53461 + bool "Runtime read-only mount protection"
53462 + help
53463 + If you say Y here, a sysctl option with name "romount_protect" will
53464 + be created. By setting this option to 1 at runtime, filesystems
53465 + will be protected in the following ways:
53466 + * No new writable mounts will be allowed
53467 + * Existing read-only mounts won't be able to be remounted read/write
53468 + * Write operations will be denied on all block devices
53469 + This option acts independently of grsec_lock: once it is set to 1,
53470 + it cannot be turned off. Therefore, please be mindful of the resulting
53471 + behavior if this option is enabled in an init script on a read-only
53472 + filesystem. This feature is mainly intended for secure embedded systems.
53473 +
53474 +config GRKERNSEC_CHROOT
53475 + bool "Chroot jail restrictions"
53476 + help
53477 + If you say Y here, you will be able to choose several options that will
53478 + make breaking out of a chrooted jail much more difficult. If you
53479 + encounter no software incompatibilities with the following options, it
53480 + is recommended that you enable each one.
53481 +
53482 +config GRKERNSEC_CHROOT_MOUNT
53483 + bool "Deny mounts"
53484 + depends on GRKERNSEC_CHROOT
53485 + help
53486 + If you say Y here, processes inside a chroot will not be able to
53487 + mount or remount filesystems. If the sysctl option is enabled, a
53488 + sysctl option with name "chroot_deny_mount" is created.
53489 +
53490 +config GRKERNSEC_CHROOT_DOUBLE
53491 + bool "Deny double-chroots"
53492 + depends on GRKERNSEC_CHROOT
53493 + help
53494 + If you say Y here, processes inside a chroot will not be able to chroot
53495 + again outside the chroot. This is a widely used method of breaking
53496 + out of a chroot jail and should not be allowed. If the sysctl
53497 + option is enabled, a sysctl option with name
53498 + "chroot_deny_chroot" is created.
53499 +
53500 +config GRKERNSEC_CHROOT_PIVOT
53501 + bool "Deny pivot_root in chroot"
53502 + depends on GRKERNSEC_CHROOT
53503 + help
53504 + If you say Y here, processes inside a chroot will not be able to use
53505 + a function called pivot_root() that was introduced in Linux 2.3.41. It
53506 + works similar to chroot in that it changes the root filesystem. This
53507 + function could be misused in a chrooted process to attempt to break out
53508 + of the chroot, and therefore should not be allowed. If the sysctl
53509 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
53510 + created.
53511 +
53512 +config GRKERNSEC_CHROOT_CHDIR
53513 + bool "Enforce chdir(\"/\") on all chroots"
53514 + depends on GRKERNSEC_CHROOT
53515 + help
53516 + If you say Y here, the current working directory of all newly-chrooted
53517 + applications will be set to the the root directory of the chroot.
53518 + The man page on chroot(2) states:
53519 + Note that this call does not change the current working
53520 + directory, so that `.' can be outside the tree rooted at
53521 + `/'. In particular, the super-user can escape from a
53522 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
53523 +
53524 + It is recommended that you say Y here, since it's not known to break
53525 + any software. If the sysctl option is enabled, a sysctl option with
53526 + name "chroot_enforce_chdir" is created.
53527 +
53528 +config GRKERNSEC_CHROOT_CHMOD
53529 + bool "Deny (f)chmod +s"
53530 + depends on GRKERNSEC_CHROOT
53531 + help
53532 + If you say Y here, processes inside a chroot will not be able to chmod
53533 + or fchmod files to make them have suid or sgid bits. This protects
53534 + against another published method of breaking a chroot. If the sysctl
53535 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
53536 + created.
53537 +
53538 +config GRKERNSEC_CHROOT_FCHDIR
53539 + bool "Deny fchdir out of chroot"
53540 + depends on GRKERNSEC_CHROOT
53541 + help
53542 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
53543 + to a file descriptor of the chrooting process that points to a directory
53544 + outside the filesystem will be stopped. If the sysctl option
53545 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
53546 +
53547 +config GRKERNSEC_CHROOT_MKNOD
53548 + bool "Deny mknod"
53549 + depends on GRKERNSEC_CHROOT
53550 + help
53551 + If you say Y here, processes inside a chroot will not be allowed to
53552 + mknod. The problem with using mknod inside a chroot is that it
53553 + would allow an attacker to create a device entry that is the same
53554 + as one on the physical root of your system, which could range from
53555 + anything from the console device to a device for your harddrive (which
53556 + they could then use to wipe the drive or steal data). It is recommended
53557 + that you say Y here, unless you run into software incompatibilities.
53558 + If the sysctl option is enabled, a sysctl option with name
53559 + "chroot_deny_mknod" is created.
53560 +
53561 +config GRKERNSEC_CHROOT_SHMAT
53562 + bool "Deny shmat() out of chroot"
53563 + depends on GRKERNSEC_CHROOT
53564 + help
53565 + If you say Y here, processes inside a chroot will not be able to attach
53566 + to shared memory segments that were created outside of the chroot jail.
53567 + It is recommended that you say Y here. If the sysctl option is enabled,
53568 + a sysctl option with name "chroot_deny_shmat" is created.
53569 +
53570 +config GRKERNSEC_CHROOT_UNIX
53571 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
53572 + depends on GRKERNSEC_CHROOT
53573 + help
53574 + If you say Y here, processes inside a chroot will not be able to
53575 + connect to abstract (meaning not belonging to a filesystem) Unix
53576 + domain sockets that were bound outside of a chroot. It is recommended
53577 + that you say Y here. If the sysctl option is enabled, a sysctl option
53578 + with name "chroot_deny_unix" is created.
53579 +
53580 +config GRKERNSEC_CHROOT_FINDTASK
53581 + bool "Protect outside processes"
53582 + depends on GRKERNSEC_CHROOT
53583 + help
53584 + If you say Y here, processes inside a chroot will not be able to
53585 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
53586 + getsid, or view any process outside of the chroot. If the sysctl
53587 + option is enabled, a sysctl option with name "chroot_findtask" is
53588 + created.
53589 +
53590 +config GRKERNSEC_CHROOT_NICE
53591 + bool "Restrict priority changes"
53592 + depends on GRKERNSEC_CHROOT
53593 + help
53594 + If you say Y here, processes inside a chroot will not be able to raise
53595 + the priority of processes in the chroot, or alter the priority of
53596 + processes outside the chroot. This provides more security than simply
53597 + removing CAP_SYS_NICE from the process' capability set. If the
53598 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
53599 + is created.
53600 +
53601 +config GRKERNSEC_CHROOT_SYSCTL
53602 + bool "Deny sysctl writes"
53603 + depends on GRKERNSEC_CHROOT
53604 + help
53605 + If you say Y here, an attacker in a chroot will not be able to
53606 + write to sysctl entries, either by sysctl(2) or through a /proc
53607 + interface. It is strongly recommended that you say Y here. If the
53608 + sysctl option is enabled, a sysctl option with name
53609 + "chroot_deny_sysctl" is created.
53610 +
53611 +config GRKERNSEC_CHROOT_CAPS
53612 + bool "Capability restrictions"
53613 + depends on GRKERNSEC_CHROOT
53614 + help
53615 + If you say Y here, the capabilities on all root processes within a
53616 + chroot jail will be lowered to stop module insertion, raw i/o,
53617 + system and net admin tasks, rebooting the system, modifying immutable
53618 + files, modifying IPC owned by another, and changing the system time.
53619 + This is left an option because it can break some apps. Disable this
53620 + if your chrooted apps are having problems performing those kinds of
53621 + tasks. If the sysctl option is enabled, a sysctl option with
53622 + name "chroot_caps" is created.
53623 +
53624 +endmenu
53625 +menu "Kernel Auditing"
53626 +depends on GRKERNSEC
53627 +
53628 +config GRKERNSEC_AUDIT_GROUP
53629 + bool "Single group for auditing"
53630 + help
53631 + If you say Y here, the exec, chdir, and (un)mount logging features
53632 + will only operate on a group you specify. This option is recommended
53633 + if you only want to watch certain users instead of having a large
53634 + amount of logs from the entire system. If the sysctl option is enabled,
53635 + a sysctl option with name "audit_group" is created.
53636 +
53637 +config GRKERNSEC_AUDIT_GID
53638 + int "GID for auditing"
53639 + depends on GRKERNSEC_AUDIT_GROUP
53640 + default 1007
53641 +
53642 +config GRKERNSEC_EXECLOG
53643 + bool "Exec logging"
53644 + help
53645 + If you say Y here, all execve() calls will be logged (since the
53646 + other exec*() calls are frontends to execve(), all execution
53647 + will be logged). Useful for shell-servers that like to keep track
53648 + of their users. If the sysctl option is enabled, a sysctl option with
53649 + name "exec_logging" is created.
53650 + WARNING: This option when enabled will produce a LOT of logs, especially
53651 + on an active system.
53652 +
53653 +config GRKERNSEC_RESLOG
53654 + bool "Resource logging"
53655 + help
53656 + If you say Y here, all attempts to overstep resource limits will
53657 + be logged with the resource name, the requested size, and the current
53658 + limit. It is highly recommended that you say Y here. If the sysctl
53659 + option is enabled, a sysctl option with name "resource_logging" is
53660 + created. If the RBAC system is enabled, the sysctl value is ignored.
53661 +
53662 +config GRKERNSEC_CHROOT_EXECLOG
53663 + bool "Log execs within chroot"
53664 + help
53665 + If you say Y here, all executions inside a chroot jail will be logged
53666 + to syslog. This can cause a large amount of logs if certain
53667 + applications (eg. djb's daemontools) are installed on the system, and
53668 + is therefore left as an option. If the sysctl option is enabled, a
53669 + sysctl option with name "chroot_execlog" is created.
53670 +
53671 +config GRKERNSEC_AUDIT_PTRACE
53672 + bool "Ptrace logging"
53673 + help
53674 + If you say Y here, all attempts to attach to a process via ptrace
53675 + will be logged. If the sysctl option is enabled, a sysctl option
53676 + with name "audit_ptrace" is created.
53677 +
53678 +config GRKERNSEC_AUDIT_CHDIR
53679 + bool "Chdir logging"
53680 + help
53681 + If you say Y here, all chdir() calls will be logged. If the sysctl
53682 + option is enabled, a sysctl option with name "audit_chdir" is created.
53683 +
53684 +config GRKERNSEC_AUDIT_MOUNT
53685 + bool "(Un)Mount logging"
53686 + help
53687 + If you say Y here, all mounts and unmounts will be logged. If the
53688 + sysctl option is enabled, a sysctl option with name "audit_mount" is
53689 + created.
53690 +
53691 +config GRKERNSEC_SIGNAL
53692 + bool "Signal logging"
53693 + help
53694 + If you say Y here, certain important signals will be logged, such as
53695 + SIGSEGV, which will as a result inform you of when a error in a program
53696 + occurred, which in some cases could mean a possible exploit attempt.
53697 + If the sysctl option is enabled, a sysctl option with name
53698 + "signal_logging" is created.
53699 +
53700 +config GRKERNSEC_FORKFAIL
53701 + bool "Fork failure logging"
53702 + help
53703 + If you say Y here, all failed fork() attempts will be logged.
53704 + This could suggest a fork bomb, or someone attempting to overstep
53705 + their process limit. If the sysctl option is enabled, a sysctl option
53706 + with name "forkfail_logging" is created.
53707 +
53708 +config GRKERNSEC_TIME
53709 + bool "Time change logging"
53710 + help
53711 + If you say Y here, any changes of the system clock will be logged.
53712 + If the sysctl option is enabled, a sysctl option with name
53713 + "timechange_logging" is created.
53714 +
53715 +config GRKERNSEC_PROC_IPADDR
53716 + bool "/proc/<pid>/ipaddr support"
53717 + help
53718 + If you say Y here, a new entry will be added to each /proc/<pid>
53719 + directory that contains the IP address of the person using the task.
53720 + The IP is carried across local TCP and AF_UNIX stream sockets.
53721 + This information can be useful for IDS/IPSes to perform remote response
53722 + to a local attack. The entry is readable by only the owner of the
53723 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
53724 + the RBAC system), and thus does not create privacy concerns.
53725 +
53726 +config GRKERNSEC_RWXMAP_LOG
53727 + bool 'Denied RWX mmap/mprotect logging'
53728 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
53729 + help
53730 + If you say Y here, calls to mmap() and mprotect() with explicit
53731 + usage of PROT_WRITE and PROT_EXEC together will be logged when
53732 + denied by the PAX_MPROTECT feature. If the sysctl option is
53733 + enabled, a sysctl option with name "rwxmap_logging" is created.
53734 +
53735 +config GRKERNSEC_AUDIT_TEXTREL
53736 + bool 'ELF text relocations logging (READ HELP)'
53737 + depends on PAX_MPROTECT
53738 + help
53739 + If you say Y here, text relocations will be logged with the filename
53740 + of the offending library or binary. The purpose of the feature is
53741 + to help Linux distribution developers get rid of libraries and
53742 + binaries that need text relocations which hinder the future progress
53743 + of PaX. Only Linux distribution developers should say Y here, and
53744 + never on a production machine, as this option creates an information
53745 + leak that could aid an attacker in defeating the randomization of
53746 + a single memory region. If the sysctl option is enabled, a sysctl
53747 + option with name "audit_textrel" is created.
53748 +
53749 +endmenu
53750 +
53751 +menu "Executable Protections"
53752 +depends on GRKERNSEC
53753 +
53754 +config GRKERNSEC_EXECVE
53755 + bool "Enforce RLIMIT_NPROC on execs"
53756 + help
53757 + If you say Y here, users with a resource limit on processes will
53758 + have the value checked during execve() calls. The current system
53759 + only checks the system limit during fork() calls. If the sysctl option
53760 + is enabled, a sysctl option with name "execve_limiting" is created.
53761 +
53762 +config GRKERNSEC_DMESG
53763 + bool "Dmesg(8) restriction"
53764 + help
53765 + If you say Y here, non-root users will not be able to use dmesg(8)
53766 + to view up to the last 4kb of messages in the kernel's log buffer.
53767 + The kernel's log buffer often contains kernel addresses and other
53768 + identifying information useful to an attacker in fingerprinting a
53769 + system for a targeted exploit.
53770 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
53771 + created.
53772 +
53773 +config GRKERNSEC_HARDEN_PTRACE
53774 + bool "Deter ptrace-based process snooping"
53775 + help
53776 + If you say Y here, TTY sniffers and other malicious monitoring
53777 + programs implemented through ptrace will be defeated. If you
53778 + have been using the RBAC system, this option has already been
53779 + enabled for several years for all users, with the ability to make
53780 + fine-grained exceptions.
53781 +
53782 + This option only affects the ability of non-root users to ptrace
53783 + processes that are not a descendent of the ptracing process.
53784 + This means that strace ./binary and gdb ./binary will still work,
53785 + but attaching to arbitrary processes will not. If the sysctl
53786 + option is enabled, a sysctl option with name "harden_ptrace" is
53787 + created.
53788 +
53789 +config GRKERNSEC_TPE
53790 + bool "Trusted Path Execution (TPE)"
53791 + help
53792 + If you say Y here, you will be able to choose a gid to add to the
53793 + supplementary groups of users you want to mark as "untrusted."
53794 + These users will not be able to execute any files that are not in
53795 + root-owned directories writable only by root. If the sysctl option
53796 + is enabled, a sysctl option with name "tpe" is created.
53797 +
53798 +config GRKERNSEC_TPE_ALL
53799 + bool "Partially restrict all non-root users"
53800 + depends on GRKERNSEC_TPE
53801 + help
53802 + If you say Y here, all non-root users will be covered under
53803 + a weaker TPE restriction. This is separate from, and in addition to,
53804 + the main TPE options that you have selected elsewhere. Thus, if a
53805 + "trusted" GID is chosen, this restriction applies to even that GID.
53806 + Under this restriction, all non-root users will only be allowed to
53807 + execute files in directories they own that are not group or
53808 + world-writable, or in directories owned by root and writable only by
53809 + root. If the sysctl option is enabled, a sysctl option with name
53810 + "tpe_restrict_all" is created.
53811 +
53812 +config GRKERNSEC_TPE_INVERT
53813 + bool "Invert GID option"
53814 + depends on GRKERNSEC_TPE
53815 + help
53816 + If you say Y here, the group you specify in the TPE configuration will
53817 + decide what group TPE restrictions will be *disabled* for. This
53818 + option is useful if you want TPE restrictions to be applied to most
53819 + users on the system. If the sysctl option is enabled, a sysctl option
53820 + with name "tpe_invert" is created. Unlike other sysctl options, this
53821 + entry will default to on for backward-compatibility.
53822 +
53823 +config GRKERNSEC_TPE_GID
53824 + int "GID for untrusted users"
53825 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
53826 + default 1005
53827 + help
53828 + Setting this GID determines what group TPE restrictions will be
53829 + *enabled* for. If the sysctl option is enabled, a sysctl option
53830 + with name "tpe_gid" is created.
53831 +
53832 +config GRKERNSEC_TPE_GID
53833 + int "GID for trusted users"
53834 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
53835 + default 1005
53836 + help
53837 + Setting this GID determines what group TPE restrictions will be
53838 + *disabled* for. If the sysctl option is enabled, a sysctl option
53839 + with name "tpe_gid" is created.
53840 +
53841 +endmenu
53842 +menu "Network Protections"
53843 +depends on GRKERNSEC
53844 +
53845 +config GRKERNSEC_RANDNET
53846 + bool "Larger entropy pools"
53847 + help
53848 + If you say Y here, the entropy pools used for many features of Linux
53849 + and grsecurity will be doubled in size. Since several grsecurity
53850 + features use additional randomness, it is recommended that you say Y
53851 + here. Saying Y here has a similar effect as modifying
53852 + /proc/sys/kernel/random/poolsize.
53853 +
53854 +config GRKERNSEC_BLACKHOLE
53855 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
53856 + help
53857 + If you say Y here, neither TCP resets nor ICMP
53858 + destination-unreachable packets will be sent in response to packets
53859 + sent to ports for which no associated listening process exists.
53860 + This feature supports both IPV4 and IPV6 and exempts the
53861 + loopback interface from blackholing. Enabling this feature
53862 + makes a host more resilient to DoS attacks and reduces network
53863 + visibility against scanners.
53864 +
53865 + The blackhole feature as-implemented is equivalent to the FreeBSD
53866 + blackhole feature, as it prevents RST responses to all packets, not
53867 + just SYNs. Under most application behavior this causes no
53868 + problems, but applications (like haproxy) may not close certain
53869 + connections in a way that cleanly terminates them on the remote
53870 + end, leaving the remote host in LAST_ACK state. Because of this
53871 + side-effect and to prevent intentional LAST_ACK DoSes, this
53872 + feature also adds automatic mitigation against such attacks.
53873 + The mitigation drastically reduces the amount of time a socket
53874 + can spend in LAST_ACK state. If you're using haproxy and not
53875 + all servers it connects to have this option enabled, consider
53876 + disabling this feature on the haproxy host.
53877 +
53878 + If the sysctl option is enabled, two sysctl options with names
53879 + "ip_blackhole" and "lastack_retries" will be created.
53880 + While "ip_blackhole" takes the standard zero/non-zero on/off
53881 + toggle, "lastack_retries" uses the same kinds of values as
53882 + "tcp_retries1" and "tcp_retries2". The default value of 4
53883 + prevents a socket from lasting more than 45 seconds in LAST_ACK
53884 + state.
53885 +
53886 +config GRKERNSEC_SOCKET
53887 + bool "Socket restrictions"
53888 + help
53889 + If you say Y here, you will be able to choose from several options.
53890 + If you assign a GID on your system and add it to the supplementary
53891 + groups of users you want to restrict socket access to, this patch
53892 + will perform up to three things, based on the option(s) you choose.
53893 +
53894 +config GRKERNSEC_SOCKET_ALL
53895 + bool "Deny any sockets to group"
53896 + depends on GRKERNSEC_SOCKET
53897 + help
53898 + If you say Y here, you will be able to choose a GID of whose users will
53899 + be unable to connect to other hosts from your machine or run server
53900 + applications from your machine. If the sysctl option is enabled, a
53901 + sysctl option with name "socket_all" is created.
53902 +
53903 +config GRKERNSEC_SOCKET_ALL_GID
53904 + int "GID to deny all sockets for"
53905 + depends on GRKERNSEC_SOCKET_ALL
53906 + default 1004
53907 + help
53908 + Here you can choose the GID to disable socket access for. Remember to
53909 + add the users you want socket access disabled for to the GID
53910 + specified here. If the sysctl option is enabled, a sysctl option
53911 + with name "socket_all_gid" is created.
53912 +
53913 +config GRKERNSEC_SOCKET_CLIENT
53914 + bool "Deny client sockets to group"
53915 + depends on GRKERNSEC_SOCKET
53916 + help
53917 + If you say Y here, you will be able to choose a GID of whose users will
53918 + be unable to connect to other hosts from your machine, but will be
53919 + able to run servers. If this option is enabled, all users in the group
53920 + you specify will have to use passive mode when initiating ftp transfers
53921 + from the shell on your machine. If the sysctl option is enabled, a
53922 + sysctl option with name "socket_client" is created.
53923 +
53924 +config GRKERNSEC_SOCKET_CLIENT_GID
53925 + int "GID to deny client sockets for"
53926 + depends on GRKERNSEC_SOCKET_CLIENT
53927 + default 1003
53928 + help
53929 + Here you can choose the GID to disable client socket access for.
53930 + Remember to add the users you want client socket access disabled for to
53931 + the GID specified here. If the sysctl option is enabled, a sysctl
53932 + option with name "socket_client_gid" is created.
53933 +
53934 +config GRKERNSEC_SOCKET_SERVER
53935 + bool "Deny server sockets to group"
53936 + depends on GRKERNSEC_SOCKET
53937 + help
53938 + If you say Y here, you will be able to choose a GID of whose users will
53939 + be unable to run server applications from your machine. If the sysctl
53940 + option is enabled, a sysctl option with name "socket_server" is created.
53941 +
53942 +config GRKERNSEC_SOCKET_SERVER_GID
53943 + int "GID to deny server sockets for"
53944 + depends on GRKERNSEC_SOCKET_SERVER
53945 + default 1002
53946 + help
53947 + Here you can choose the GID to disable server socket access for.
53948 + Remember to add the users you want server socket access disabled for to
53949 + the GID specified here. If the sysctl option is enabled, a sysctl
53950 + option with name "socket_server_gid" is created.
53951 +
53952 +endmenu
53953 +menu "Sysctl support"
53954 +depends on GRKERNSEC && SYSCTL
53955 +
53956 +config GRKERNSEC_SYSCTL
53957 + bool "Sysctl support"
53958 + help
53959 + If you say Y here, you will be able to change the options that
53960 + grsecurity runs with at bootup, without having to recompile your
53961 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
53962 + to enable (1) or disable (0) various features. All the sysctl entries
53963 + are mutable until the "grsec_lock" entry is set to a non-zero value.
53964 + All features enabled in the kernel configuration are disabled at boot
53965 + if you do not say Y to the "Turn on features by default" option.
53966 + All options should be set at startup, and the grsec_lock entry should
53967 + be set to a non-zero value after all the options are set.
53968 + *THIS IS EXTREMELY IMPORTANT*
53969 +
53970 +config GRKERNSEC_SYSCTL_DISTRO
53971 + bool "Extra sysctl support for distro makers (READ HELP)"
53972 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
53973 + help
53974 + If you say Y here, additional sysctl options will be created
53975 + for features that affect processes running as root. Therefore,
53976 + it is critical when using this option that the grsec_lock entry be
53977 + enabled after boot. Only distros with prebuilt kernel packages
53978 + with this option enabled that can ensure grsec_lock is enabled
53979 + after boot should use this option.
53980 + *Failure to set grsec_lock after boot makes all grsec features
53981 + this option covers useless*
53982 +
53983 + Currently this option creates the following sysctl entries:
53984 + "Disable Privileged I/O": "disable_priv_io"
53985 +
53986 +config GRKERNSEC_SYSCTL_ON
53987 + bool "Turn on features by default"
53988 + depends on GRKERNSEC_SYSCTL
53989 + help
53990 + If you say Y here, instead of having all features enabled in the
53991 + kernel configuration disabled at boot time, the features will be
53992 + enabled at boot time. It is recommended you say Y here unless
53993 + there is some reason you would want all sysctl-tunable features to
53994 + be disabled by default. As mentioned elsewhere, it is important
53995 + to enable the grsec_lock entry once you have finished modifying
53996 + the sysctl entries.
53997 +
53998 +endmenu
53999 +menu "Logging Options"
54000 +depends on GRKERNSEC
54001 +
54002 +config GRKERNSEC_FLOODTIME
54003 + int "Seconds in between log messages (minimum)"
54004 + default 10
54005 + help
54006 + This option allows you to enforce the number of seconds between
54007 + grsecurity log messages. The default should be suitable for most
54008 + people, however, if you choose to change it, choose a value small enough
54009 + to allow informative logs to be produced, but large enough to
54010 + prevent flooding.
54011 +
54012 +config GRKERNSEC_FLOODBURST
54013 + int "Number of messages in a burst (maximum)"
54014 + default 4
54015 + help
54016 + This option allows you to choose the maximum number of messages allowed
54017 + within the flood time interval you chose in a separate option. The
54018 + default should be suitable for most people, however if you find that
54019 + many of your logs are being interpreted as flooding, you may want to
54020 + raise this value.
54021 +
54022 +endmenu
54023 +
54024 +endmenu
54025 diff -urNp linux-2.6.32.42/grsecurity/Makefile linux-2.6.32.42/grsecurity/Makefile
54026 --- linux-2.6.32.42/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
54027 +++ linux-2.6.32.42/grsecurity/Makefile 2011-05-24 20:27:46.000000000 -0400
54028 @@ -0,0 +1,33 @@
54029 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
54030 +# during 2001-2009 it has been completely redesigned by Brad Spengler
54031 +# into an RBAC system
54032 +#
54033 +# All code in this directory and various hooks inserted throughout the kernel
54034 +# are copyright Brad Spengler - Open Source Security, Inc., and released
54035 +# under the GPL v2 or higher
54036 +
54037 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
54038 + grsec_mount.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
54039 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
54040 +
54041 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
54042 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
54043 + gracl_learn.o grsec_log.o
54044 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
54045 +
54046 +ifdef CONFIG_NET
54047 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
54048 +endif
54049 +
54050 +ifndef CONFIG_GRKERNSEC
54051 +obj-y += grsec_disabled.o
54052 +endif
54053 +
54054 +ifdef CONFIG_GRKERNSEC_HIDESYM
54055 +extra-y := grsec_hidesym.o
54056 +$(obj)/grsec_hidesym.o:
54057 + @-chmod -f 500 /boot
54058 + @-chmod -f 500 /lib/modules
54059 + @-chmod -f 700 .
54060 + @echo ' grsec: protected kernel image paths'
54061 +endif
54062 diff -urNp linux-2.6.32.42/include/acpi/acpi_drivers.h linux-2.6.32.42/include/acpi/acpi_drivers.h
54063 --- linux-2.6.32.42/include/acpi/acpi_drivers.h 2011-03-27 14:31:47.000000000 -0400
54064 +++ linux-2.6.32.42/include/acpi/acpi_drivers.h 2011-04-17 15:56:46.000000000 -0400
54065 @@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acp
54066 Dock Station
54067 -------------------------------------------------------------------------- */
54068 struct acpi_dock_ops {
54069 - acpi_notify_handler handler;
54070 - acpi_notify_handler uevent;
54071 + const acpi_notify_handler handler;
54072 + const acpi_notify_handler uevent;
54073 };
54074
54075 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
54076 @@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
54077 extern int register_dock_notifier(struct notifier_block *nb);
54078 extern void unregister_dock_notifier(struct notifier_block *nb);
54079 extern int register_hotplug_dock_device(acpi_handle handle,
54080 - struct acpi_dock_ops *ops,
54081 + const struct acpi_dock_ops *ops,
54082 void *context);
54083 extern void unregister_hotplug_dock_device(acpi_handle handle);
54084 #else
54085 @@ -144,7 +144,7 @@ static inline void unregister_dock_notif
54086 {
54087 }
54088 static inline int register_hotplug_dock_device(acpi_handle handle,
54089 - struct acpi_dock_ops *ops,
54090 + const struct acpi_dock_ops *ops,
54091 void *context)
54092 {
54093 return -ENODEV;
54094 diff -urNp linux-2.6.32.42/include/asm-generic/atomic-long.h linux-2.6.32.42/include/asm-generic/atomic-long.h
54095 --- linux-2.6.32.42/include/asm-generic/atomic-long.h 2011-03-27 14:31:47.000000000 -0400
54096 +++ linux-2.6.32.42/include/asm-generic/atomic-long.h 2011-05-16 21:46:57.000000000 -0400
54097 @@ -22,6 +22,12 @@
54098
54099 typedef atomic64_t atomic_long_t;
54100
54101 +#ifdef CONFIG_PAX_REFCOUNT
54102 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
54103 +#else
54104 +typedef atomic64_t atomic_long_unchecked_t;
54105 +#endif
54106 +
54107 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
54108
54109 static inline long atomic_long_read(atomic_long_t *l)
54110 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
54111 return (long)atomic64_read(v);
54112 }
54113
54114 +#ifdef CONFIG_PAX_REFCOUNT
54115 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
54116 +{
54117 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54118 +
54119 + return (long)atomic64_read_unchecked(v);
54120 +}
54121 +#endif
54122 +
54123 static inline void atomic_long_set(atomic_long_t *l, long i)
54124 {
54125 atomic64_t *v = (atomic64_t *)l;
54126 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
54127 atomic64_set(v, i);
54128 }
54129
54130 +#ifdef CONFIG_PAX_REFCOUNT
54131 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
54132 +{
54133 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54134 +
54135 + atomic64_set_unchecked(v, i);
54136 +}
54137 +#endif
54138 +
54139 static inline void atomic_long_inc(atomic_long_t *l)
54140 {
54141 atomic64_t *v = (atomic64_t *)l;
54142 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
54143 atomic64_inc(v);
54144 }
54145
54146 +#ifdef CONFIG_PAX_REFCOUNT
54147 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
54148 +{
54149 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54150 +
54151 + atomic64_inc_unchecked(v);
54152 +}
54153 +#endif
54154 +
54155 static inline void atomic_long_dec(atomic_long_t *l)
54156 {
54157 atomic64_t *v = (atomic64_t *)l;
54158 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
54159 atomic64_dec(v);
54160 }
54161
54162 +#ifdef CONFIG_PAX_REFCOUNT
54163 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
54164 +{
54165 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54166 +
54167 + atomic64_dec_unchecked(v);
54168 +}
54169 +#endif
54170 +
54171 static inline void atomic_long_add(long i, atomic_long_t *l)
54172 {
54173 atomic64_t *v = (atomic64_t *)l;
54174 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
54175 atomic64_add(i, v);
54176 }
54177
54178 +#ifdef CONFIG_PAX_REFCOUNT
54179 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
54180 +{
54181 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54182 +
54183 + atomic64_add_unchecked(i, v);
54184 +}
54185 +#endif
54186 +
54187 static inline void atomic_long_sub(long i, atomic_long_t *l)
54188 {
54189 atomic64_t *v = (atomic64_t *)l;
54190 @@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
54191 return (long)atomic64_inc_return(v);
54192 }
54193
54194 +#ifdef CONFIG_PAX_REFCOUNT
54195 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
54196 +{
54197 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54198 +
54199 + return (long)atomic64_inc_return_unchecked(v);
54200 +}
54201 +#endif
54202 +
54203 static inline long atomic_long_dec_return(atomic_long_t *l)
54204 {
54205 atomic64_t *v = (atomic64_t *)l;
54206 @@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
54207
54208 typedef atomic_t atomic_long_t;
54209
54210 +#ifdef CONFIG_PAX_REFCOUNT
54211 +typedef atomic_unchecked_t atomic_long_unchecked_t;
54212 +#else
54213 +typedef atomic_t atomic_long_unchecked_t;
54214 +#endif
54215 +
54216 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
54217 static inline long atomic_long_read(atomic_long_t *l)
54218 {
54219 @@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
54220 return (long)atomic_read(v);
54221 }
54222
54223 +#ifdef CONFIG_PAX_REFCOUNT
54224 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
54225 +{
54226 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54227 +
54228 + return (long)atomic_read_unchecked(v);
54229 +}
54230 +#endif
54231 +
54232 static inline void atomic_long_set(atomic_long_t *l, long i)
54233 {
54234 atomic_t *v = (atomic_t *)l;
54235 @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
54236 atomic_set(v, i);
54237 }
54238
54239 +#ifdef CONFIG_PAX_REFCOUNT
54240 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
54241 +{
54242 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54243 +
54244 + atomic_set_unchecked(v, i);
54245 +}
54246 +#endif
54247 +
54248 static inline void atomic_long_inc(atomic_long_t *l)
54249 {
54250 atomic_t *v = (atomic_t *)l;
54251 @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
54252 atomic_inc(v);
54253 }
54254
54255 +#ifdef CONFIG_PAX_REFCOUNT
54256 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
54257 +{
54258 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54259 +
54260 + atomic_inc_unchecked(v);
54261 +}
54262 +#endif
54263 +
54264 static inline void atomic_long_dec(atomic_long_t *l)
54265 {
54266 atomic_t *v = (atomic_t *)l;
54267 @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
54268 atomic_dec(v);
54269 }
54270
54271 +#ifdef CONFIG_PAX_REFCOUNT
54272 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
54273 +{
54274 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54275 +
54276 + atomic_dec_unchecked(v);
54277 +}
54278 +#endif
54279 +
54280 static inline void atomic_long_add(long i, atomic_long_t *l)
54281 {
54282 atomic_t *v = (atomic_t *)l;
54283 @@ -176,6 +278,15 @@ static inline void atomic_long_add(long
54284 atomic_add(i, v);
54285 }
54286
54287 +#ifdef CONFIG_PAX_REFCOUNT
54288 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
54289 +{
54290 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54291 +
54292 + atomic_add_unchecked(i, v);
54293 +}
54294 +#endif
54295 +
54296 static inline void atomic_long_sub(long i, atomic_long_t *l)
54297 {
54298 atomic_t *v = (atomic_t *)l;
54299 @@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
54300 return (long)atomic_inc_return(v);
54301 }
54302
54303 +#ifdef CONFIG_PAX_REFCOUNT
54304 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
54305 +{
54306 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54307 +
54308 + return (long)atomic_inc_return_unchecked(v);
54309 +}
54310 +#endif
54311 +
54312 static inline long atomic_long_dec_return(atomic_long_t *l)
54313 {
54314 atomic_t *v = (atomic_t *)l;
54315 @@ -255,4 +375,47 @@ static inline long atomic_long_add_unles
54316
54317 #endif /* BITS_PER_LONG == 64 */
54318
54319 +#ifdef CONFIG_PAX_REFCOUNT
54320 +static inline void pax_refcount_needs_these_functions(void)
54321 +{
54322 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
54323 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
54324 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
54325 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
54326 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
54327 + atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
54328 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
54329 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
54330 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
54331 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
54332 + atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
54333 +
54334 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
54335 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
54336 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
54337 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
54338 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
54339 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
54340 +}
54341 +#else
54342 +#define atomic_read_unchecked(v) atomic_read(v)
54343 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
54344 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
54345 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
54346 +#define atomic_inc_unchecked(v) atomic_inc(v)
54347 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
54348 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
54349 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
54350 +#define atomic_dec_unchecked(v) atomic_dec(v)
54351 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
54352 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
54353 +
54354 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
54355 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
54356 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
54357 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
54358 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
54359 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
54360 +#endif
54361 +
54362 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
54363 diff -urNp linux-2.6.32.42/include/asm-generic/cache.h linux-2.6.32.42/include/asm-generic/cache.h
54364 --- linux-2.6.32.42/include/asm-generic/cache.h 2011-03-27 14:31:47.000000000 -0400
54365 +++ linux-2.6.32.42/include/asm-generic/cache.h 2011-05-04 17:56:28.000000000 -0400
54366 @@ -6,7 +6,7 @@
54367 * cache lines need to provide their own cache.h.
54368 */
54369
54370 -#define L1_CACHE_SHIFT 5
54371 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
54372 +#define L1_CACHE_SHIFT 5U
54373 +#define L1_CACHE_BYTES (1U << L1_CACHE_SHIFT)
54374
54375 #endif /* __ASM_GENERIC_CACHE_H */
54376 diff -urNp linux-2.6.32.42/include/asm-generic/dma-mapping-common.h linux-2.6.32.42/include/asm-generic/dma-mapping-common.h
54377 --- linux-2.6.32.42/include/asm-generic/dma-mapping-common.h 2011-03-27 14:31:47.000000000 -0400
54378 +++ linux-2.6.32.42/include/asm-generic/dma-mapping-common.h 2011-04-17 15:56:46.000000000 -0400
54379 @@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
54380 enum dma_data_direction dir,
54381 struct dma_attrs *attrs)
54382 {
54383 - struct dma_map_ops *ops = get_dma_ops(dev);
54384 + const struct dma_map_ops *ops = get_dma_ops(dev);
54385 dma_addr_t addr;
54386
54387 kmemcheck_mark_initialized(ptr, size);
54388 @@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
54389 enum dma_data_direction dir,
54390 struct dma_attrs *attrs)
54391 {
54392 - struct dma_map_ops *ops = get_dma_ops(dev);
54393 + const struct dma_map_ops *ops = get_dma_ops(dev);
54394
54395 BUG_ON(!valid_dma_direction(dir));
54396 if (ops->unmap_page)
54397 @@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
54398 int nents, enum dma_data_direction dir,
54399 struct dma_attrs *attrs)
54400 {
54401 - struct dma_map_ops *ops = get_dma_ops(dev);
54402 + const struct dma_map_ops *ops = get_dma_ops(dev);
54403 int i, ents;
54404 struct scatterlist *s;
54405
54406 @@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
54407 int nents, enum dma_data_direction dir,
54408 struct dma_attrs *attrs)
54409 {
54410 - struct dma_map_ops *ops = get_dma_ops(dev);
54411 + const struct dma_map_ops *ops = get_dma_ops(dev);
54412
54413 BUG_ON(!valid_dma_direction(dir));
54414 debug_dma_unmap_sg(dev, sg, nents, dir);
54415 @@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
54416 size_t offset, size_t size,
54417 enum dma_data_direction dir)
54418 {
54419 - struct dma_map_ops *ops = get_dma_ops(dev);
54420 + const struct dma_map_ops *ops = get_dma_ops(dev);
54421 dma_addr_t addr;
54422
54423 kmemcheck_mark_initialized(page_address(page) + offset, size);
54424 @@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
54425 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
54426 size_t size, enum dma_data_direction dir)
54427 {
54428 - struct dma_map_ops *ops = get_dma_ops(dev);
54429 + const struct dma_map_ops *ops = get_dma_ops(dev);
54430
54431 BUG_ON(!valid_dma_direction(dir));
54432 if (ops->unmap_page)
54433 @@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
54434 size_t size,
54435 enum dma_data_direction dir)
54436 {
54437 - struct dma_map_ops *ops = get_dma_ops(dev);
54438 + const struct dma_map_ops *ops = get_dma_ops(dev);
54439
54440 BUG_ON(!valid_dma_direction(dir));
54441 if (ops->sync_single_for_cpu)
54442 @@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
54443 dma_addr_t addr, size_t size,
54444 enum dma_data_direction dir)
54445 {
54446 - struct dma_map_ops *ops = get_dma_ops(dev);
54447 + const struct dma_map_ops *ops = get_dma_ops(dev);
54448
54449 BUG_ON(!valid_dma_direction(dir));
54450 if (ops->sync_single_for_device)
54451 @@ -123,7 +123,7 @@ static inline void dma_sync_single_range
54452 size_t size,
54453 enum dma_data_direction dir)
54454 {
54455 - struct dma_map_ops *ops = get_dma_ops(dev);
54456 + const struct dma_map_ops *ops = get_dma_ops(dev);
54457
54458 BUG_ON(!valid_dma_direction(dir));
54459 if (ops->sync_single_range_for_cpu) {
54460 @@ -140,7 +140,7 @@ static inline void dma_sync_single_range
54461 size_t size,
54462 enum dma_data_direction dir)
54463 {
54464 - struct dma_map_ops *ops = get_dma_ops(dev);
54465 + const struct dma_map_ops *ops = get_dma_ops(dev);
54466
54467 BUG_ON(!valid_dma_direction(dir));
54468 if (ops->sync_single_range_for_device) {
54469 @@ -155,7 +155,7 @@ static inline void
54470 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
54471 int nelems, enum dma_data_direction dir)
54472 {
54473 - struct dma_map_ops *ops = get_dma_ops(dev);
54474 + const struct dma_map_ops *ops = get_dma_ops(dev);
54475
54476 BUG_ON(!valid_dma_direction(dir));
54477 if (ops->sync_sg_for_cpu)
54478 @@ -167,7 +167,7 @@ static inline void
54479 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
54480 int nelems, enum dma_data_direction dir)
54481 {
54482 - struct dma_map_ops *ops = get_dma_ops(dev);
54483 + const struct dma_map_ops *ops = get_dma_ops(dev);
54484
54485 BUG_ON(!valid_dma_direction(dir));
54486 if (ops->sync_sg_for_device)
54487 diff -urNp linux-2.6.32.42/include/asm-generic/futex.h linux-2.6.32.42/include/asm-generic/futex.h
54488 --- linux-2.6.32.42/include/asm-generic/futex.h 2011-03-27 14:31:47.000000000 -0400
54489 +++ linux-2.6.32.42/include/asm-generic/futex.h 2011-04-17 15:56:46.000000000 -0400
54490 @@ -6,7 +6,7 @@
54491 #include <asm/errno.h>
54492
54493 static inline int
54494 -futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
54495 +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
54496 {
54497 int op = (encoded_op >> 28) & 7;
54498 int cmp = (encoded_op >> 24) & 15;
54499 @@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
54500 }
54501
54502 static inline int
54503 -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
54504 +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
54505 {
54506 return -ENOSYS;
54507 }
54508 diff -urNp linux-2.6.32.42/include/asm-generic/int-l64.h linux-2.6.32.42/include/asm-generic/int-l64.h
54509 --- linux-2.6.32.42/include/asm-generic/int-l64.h 2011-03-27 14:31:47.000000000 -0400
54510 +++ linux-2.6.32.42/include/asm-generic/int-l64.h 2011-04-17 15:56:46.000000000 -0400
54511 @@ -46,6 +46,8 @@ typedef unsigned int u32;
54512 typedef signed long s64;
54513 typedef unsigned long u64;
54514
54515 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
54516 +
54517 #define S8_C(x) x
54518 #define U8_C(x) x ## U
54519 #define S16_C(x) x
54520 diff -urNp linux-2.6.32.42/include/asm-generic/int-ll64.h linux-2.6.32.42/include/asm-generic/int-ll64.h
54521 --- linux-2.6.32.42/include/asm-generic/int-ll64.h 2011-03-27 14:31:47.000000000 -0400
54522 +++ linux-2.6.32.42/include/asm-generic/int-ll64.h 2011-04-17 15:56:46.000000000 -0400
54523 @@ -51,6 +51,8 @@ typedef unsigned int u32;
54524 typedef signed long long s64;
54525 typedef unsigned long long u64;
54526
54527 +typedef unsigned long long intoverflow_t;
54528 +
54529 #define S8_C(x) x
54530 #define U8_C(x) x ## U
54531 #define S16_C(x) x
54532 diff -urNp linux-2.6.32.42/include/asm-generic/kmap_types.h linux-2.6.32.42/include/asm-generic/kmap_types.h
54533 --- linux-2.6.32.42/include/asm-generic/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
54534 +++ linux-2.6.32.42/include/asm-generic/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
54535 @@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
54536 KMAP_D(16) KM_IRQ_PTE,
54537 KMAP_D(17) KM_NMI,
54538 KMAP_D(18) KM_NMI_PTE,
54539 -KMAP_D(19) KM_TYPE_NR
54540 +KMAP_D(19) KM_CLEARPAGE,
54541 +KMAP_D(20) KM_TYPE_NR
54542 };
54543
54544 #undef KMAP_D
54545 diff -urNp linux-2.6.32.42/include/asm-generic/pgtable.h linux-2.6.32.42/include/asm-generic/pgtable.h
54546 --- linux-2.6.32.42/include/asm-generic/pgtable.h 2011-03-27 14:31:47.000000000 -0400
54547 +++ linux-2.6.32.42/include/asm-generic/pgtable.h 2011-04-17 15:56:46.000000000 -0400
54548 @@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
54549 unsigned long size);
54550 #endif
54551
54552 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
54553 +static inline unsigned long pax_open_kernel(void) { return 0; }
54554 +#endif
54555 +
54556 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
54557 +static inline unsigned long pax_close_kernel(void) { return 0; }
54558 +#endif
54559 +
54560 #endif /* !__ASSEMBLY__ */
54561
54562 #endif /* _ASM_GENERIC_PGTABLE_H */
54563 diff -urNp linux-2.6.32.42/include/asm-generic/pgtable-nopmd.h linux-2.6.32.42/include/asm-generic/pgtable-nopmd.h
54564 --- linux-2.6.32.42/include/asm-generic/pgtable-nopmd.h 2011-03-27 14:31:47.000000000 -0400
54565 +++ linux-2.6.32.42/include/asm-generic/pgtable-nopmd.h 2011-04-17 15:56:46.000000000 -0400
54566 @@ -1,14 +1,19 @@
54567 #ifndef _PGTABLE_NOPMD_H
54568 #define _PGTABLE_NOPMD_H
54569
54570 -#ifndef __ASSEMBLY__
54571 -
54572 #include <asm-generic/pgtable-nopud.h>
54573
54574 -struct mm_struct;
54575 -
54576 #define __PAGETABLE_PMD_FOLDED
54577
54578 +#define PMD_SHIFT PUD_SHIFT
54579 +#define PTRS_PER_PMD 1
54580 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
54581 +#define PMD_MASK (~(PMD_SIZE-1))
54582 +
54583 +#ifndef __ASSEMBLY__
54584 +
54585 +struct mm_struct;
54586 +
54587 /*
54588 * Having the pmd type consist of a pud gets the size right, and allows
54589 * us to conceptually access the pud entry that this pmd is folded into
54590 @@ -16,11 +21,6 @@ struct mm_struct;
54591 */
54592 typedef struct { pud_t pud; } pmd_t;
54593
54594 -#define PMD_SHIFT PUD_SHIFT
54595 -#define PTRS_PER_PMD 1
54596 -#define PMD_SIZE (1UL << PMD_SHIFT)
54597 -#define PMD_MASK (~(PMD_SIZE-1))
54598 -
54599 /*
54600 * The "pud_xxx()" functions here are trivial for a folded two-level
54601 * setup: the pmd is never bad, and a pmd always exists (as it's folded
54602 diff -urNp linux-2.6.32.42/include/asm-generic/pgtable-nopud.h linux-2.6.32.42/include/asm-generic/pgtable-nopud.h
54603 --- linux-2.6.32.42/include/asm-generic/pgtable-nopud.h 2011-03-27 14:31:47.000000000 -0400
54604 +++ linux-2.6.32.42/include/asm-generic/pgtable-nopud.h 2011-04-17 15:56:46.000000000 -0400
54605 @@ -1,10 +1,15 @@
54606 #ifndef _PGTABLE_NOPUD_H
54607 #define _PGTABLE_NOPUD_H
54608
54609 -#ifndef __ASSEMBLY__
54610 -
54611 #define __PAGETABLE_PUD_FOLDED
54612
54613 +#define PUD_SHIFT PGDIR_SHIFT
54614 +#define PTRS_PER_PUD 1
54615 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
54616 +#define PUD_MASK (~(PUD_SIZE-1))
54617 +
54618 +#ifndef __ASSEMBLY__
54619 +
54620 /*
54621 * Having the pud type consist of a pgd gets the size right, and allows
54622 * us to conceptually access the pgd entry that this pud is folded into
54623 @@ -12,11 +17,6 @@
54624 */
54625 typedef struct { pgd_t pgd; } pud_t;
54626
54627 -#define PUD_SHIFT PGDIR_SHIFT
54628 -#define PTRS_PER_PUD 1
54629 -#define PUD_SIZE (1UL << PUD_SHIFT)
54630 -#define PUD_MASK (~(PUD_SIZE-1))
54631 -
54632 /*
54633 * The "pgd_xxx()" functions here are trivial for a folded two-level
54634 * setup: the pud is never bad, and a pud always exists (as it's folded
54635 diff -urNp linux-2.6.32.42/include/asm-generic/vmlinux.lds.h linux-2.6.32.42/include/asm-generic/vmlinux.lds.h
54636 --- linux-2.6.32.42/include/asm-generic/vmlinux.lds.h 2011-03-27 14:31:47.000000000 -0400
54637 +++ linux-2.6.32.42/include/asm-generic/vmlinux.lds.h 2011-04-17 15:56:46.000000000 -0400
54638 @@ -199,6 +199,7 @@
54639 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
54640 VMLINUX_SYMBOL(__start_rodata) = .; \
54641 *(.rodata) *(.rodata.*) \
54642 + *(.data.read_only) \
54643 *(__vermagic) /* Kernel version magic */ \
54644 *(__markers_strings) /* Markers: strings */ \
54645 *(__tracepoints_strings)/* Tracepoints: strings */ \
54646 @@ -656,22 +657,24 @@
54647 * section in the linker script will go there too. @phdr should have
54648 * a leading colon.
54649 *
54650 - * Note that this macros defines __per_cpu_load as an absolute symbol.
54651 + * Note that this macros defines per_cpu_load as an absolute symbol.
54652 * If there is no need to put the percpu section at a predetermined
54653 * address, use PERCPU().
54654 */
54655 #define PERCPU_VADDR(vaddr, phdr) \
54656 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
54657 - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
54658 + per_cpu_load = .; \
54659 + .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
54660 - LOAD_OFFSET) { \
54661 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
54662 VMLINUX_SYMBOL(__per_cpu_start) = .; \
54663 *(.data.percpu.first) \
54664 - *(.data.percpu.page_aligned) \
54665 *(.data.percpu) \
54666 + . = ALIGN(PAGE_SIZE); \
54667 + *(.data.percpu.page_aligned) \
54668 *(.data.percpu.shared_aligned) \
54669 VMLINUX_SYMBOL(__per_cpu_end) = .; \
54670 } phdr \
54671 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
54672 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
54673
54674 /**
54675 * PERCPU - define output section for percpu area, simple version
54676 diff -urNp linux-2.6.32.42/include/drm/drmP.h linux-2.6.32.42/include/drm/drmP.h
54677 --- linux-2.6.32.42/include/drm/drmP.h 2011-03-27 14:31:47.000000000 -0400
54678 +++ linux-2.6.32.42/include/drm/drmP.h 2011-04-17 15:56:46.000000000 -0400
54679 @@ -71,6 +71,7 @@
54680 #include <linux/workqueue.h>
54681 #include <linux/poll.h>
54682 #include <asm/pgalloc.h>
54683 +#include <asm/local.h>
54684 #include "drm.h"
54685
54686 #include <linux/idr.h>
54687 @@ -814,7 +815,7 @@ struct drm_driver {
54688 void (*vgaarb_irq)(struct drm_device *dev, bool state);
54689
54690 /* Driver private ops for this object */
54691 - struct vm_operations_struct *gem_vm_ops;
54692 + const struct vm_operations_struct *gem_vm_ops;
54693
54694 int major;
54695 int minor;
54696 @@ -917,7 +918,7 @@ struct drm_device {
54697
54698 /** \name Usage Counters */
54699 /*@{ */
54700 - int open_count; /**< Outstanding files open */
54701 + local_t open_count; /**< Outstanding files open */
54702 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
54703 atomic_t vma_count; /**< Outstanding vma areas open */
54704 int buf_use; /**< Buffers in use -- cannot alloc */
54705 @@ -928,7 +929,7 @@ struct drm_device {
54706 /*@{ */
54707 unsigned long counters;
54708 enum drm_stat_type types[15];
54709 - atomic_t counts[15];
54710 + atomic_unchecked_t counts[15];
54711 /*@} */
54712
54713 struct list_head filelist;
54714 @@ -1016,7 +1017,7 @@ struct drm_device {
54715 struct pci_controller *hose;
54716 #endif
54717 struct drm_sg_mem *sg; /**< Scatter gather memory */
54718 - unsigned int num_crtcs; /**< Number of CRTCs on this device */
54719 + unsigned int num_crtcs; /**< Number of CRTCs on this device */
54720 void *dev_private; /**< device private data */
54721 void *mm_private;
54722 struct address_space *dev_mapping;
54723 @@ -1042,11 +1043,11 @@ struct drm_device {
54724 spinlock_t object_name_lock;
54725 struct idr object_name_idr;
54726 atomic_t object_count;
54727 - atomic_t object_memory;
54728 + atomic_unchecked_t object_memory;
54729 atomic_t pin_count;
54730 - atomic_t pin_memory;
54731 + atomic_unchecked_t pin_memory;
54732 atomic_t gtt_count;
54733 - atomic_t gtt_memory;
54734 + atomic_unchecked_t gtt_memory;
54735 uint32_t gtt_total;
54736 uint32_t invalidate_domains; /* domains pending invalidation */
54737 uint32_t flush_domains; /* domains pending flush */
54738 diff -urNp linux-2.6.32.42/include/linux/a.out.h linux-2.6.32.42/include/linux/a.out.h
54739 --- linux-2.6.32.42/include/linux/a.out.h 2011-03-27 14:31:47.000000000 -0400
54740 +++ linux-2.6.32.42/include/linux/a.out.h 2011-04-17 15:56:46.000000000 -0400
54741 @@ -39,6 +39,14 @@ enum machine_type {
54742 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
54743 };
54744
54745 +/* Constants for the N_FLAGS field */
54746 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
54747 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
54748 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
54749 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
54750 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
54751 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
54752 +
54753 #if !defined (N_MAGIC)
54754 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
54755 #endif
54756 diff -urNp linux-2.6.32.42/include/linux/atmdev.h linux-2.6.32.42/include/linux/atmdev.h
54757 --- linux-2.6.32.42/include/linux/atmdev.h 2011-03-27 14:31:47.000000000 -0400
54758 +++ linux-2.6.32.42/include/linux/atmdev.h 2011-04-17 15:56:46.000000000 -0400
54759 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
54760 #endif
54761
54762 struct k_atm_aal_stats {
54763 -#define __HANDLE_ITEM(i) atomic_t i
54764 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
54765 __AAL_STAT_ITEMS
54766 #undef __HANDLE_ITEM
54767 };
54768 diff -urNp linux-2.6.32.42/include/linux/backlight.h linux-2.6.32.42/include/linux/backlight.h
54769 --- linux-2.6.32.42/include/linux/backlight.h 2011-03-27 14:31:47.000000000 -0400
54770 +++ linux-2.6.32.42/include/linux/backlight.h 2011-04-17 15:56:46.000000000 -0400
54771 @@ -36,18 +36,18 @@ struct backlight_device;
54772 struct fb_info;
54773
54774 struct backlight_ops {
54775 - unsigned int options;
54776 + const unsigned int options;
54777
54778 #define BL_CORE_SUSPENDRESUME (1 << 0)
54779
54780 /* Notify the backlight driver some property has changed */
54781 - int (*update_status)(struct backlight_device *);
54782 + int (* const update_status)(struct backlight_device *);
54783 /* Return the current backlight brightness (accounting for power,
54784 fb_blank etc.) */
54785 - int (*get_brightness)(struct backlight_device *);
54786 + int (* const get_brightness)(struct backlight_device *);
54787 /* Check if given framebuffer device is the one bound to this backlight;
54788 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
54789 - int (*check_fb)(struct fb_info *);
54790 + int (* const check_fb)(struct fb_info *);
54791 };
54792
54793 /* This structure defines all the properties of a backlight */
54794 @@ -86,7 +86,7 @@ struct backlight_device {
54795 registered this device has been unloaded, and if class_get_devdata()
54796 points to something in the body of that driver, it is also invalid. */
54797 struct mutex ops_lock;
54798 - struct backlight_ops *ops;
54799 + const struct backlight_ops *ops;
54800
54801 /* The framebuffer notifier block */
54802 struct notifier_block fb_notif;
54803 @@ -103,7 +103,7 @@ static inline void backlight_update_stat
54804 }
54805
54806 extern struct backlight_device *backlight_device_register(const char *name,
54807 - struct device *dev, void *devdata, struct backlight_ops *ops);
54808 + struct device *dev, void *devdata, const struct backlight_ops *ops);
54809 extern void backlight_device_unregister(struct backlight_device *bd);
54810 extern void backlight_force_update(struct backlight_device *bd,
54811 enum backlight_update_reason reason);
54812 diff -urNp linux-2.6.32.42/include/linux/binfmts.h linux-2.6.32.42/include/linux/binfmts.h
54813 --- linux-2.6.32.42/include/linux/binfmts.h 2011-04-17 17:00:52.000000000 -0400
54814 +++ linux-2.6.32.42/include/linux/binfmts.h 2011-04-17 15:56:46.000000000 -0400
54815 @@ -83,6 +83,7 @@ struct linux_binfmt {
54816 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
54817 int (*load_shlib)(struct file *);
54818 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
54819 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
54820 unsigned long min_coredump; /* minimal dump size */
54821 int hasvdso;
54822 };
54823 diff -urNp linux-2.6.32.42/include/linux/blkdev.h linux-2.6.32.42/include/linux/blkdev.h
54824 --- linux-2.6.32.42/include/linux/blkdev.h 2011-03-27 14:31:47.000000000 -0400
54825 +++ linux-2.6.32.42/include/linux/blkdev.h 2011-04-17 15:56:46.000000000 -0400
54826 @@ -1265,19 +1265,19 @@ static inline int blk_integrity_rq(struc
54827 #endif /* CONFIG_BLK_DEV_INTEGRITY */
54828
54829 struct block_device_operations {
54830 - int (*open) (struct block_device *, fmode_t);
54831 - int (*release) (struct gendisk *, fmode_t);
54832 - int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54833 - int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54834 - int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54835 - int (*direct_access) (struct block_device *, sector_t,
54836 + int (* const open) (struct block_device *, fmode_t);
54837 + int (* const release) (struct gendisk *, fmode_t);
54838 + int (* const locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54839 + int (* const ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54840 + int (* const compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54841 + int (* const direct_access) (struct block_device *, sector_t,
54842 void **, unsigned long *);
54843 - int (*media_changed) (struct gendisk *);
54844 - unsigned long long (*set_capacity) (struct gendisk *,
54845 + int (* const media_changed) (struct gendisk *);
54846 + unsigned long long (* const set_capacity) (struct gendisk *,
54847 unsigned long long);
54848 - int (*revalidate_disk) (struct gendisk *);
54849 - int (*getgeo)(struct block_device *, struct hd_geometry *);
54850 - struct module *owner;
54851 + int (* const revalidate_disk) (struct gendisk *);
54852 + int (*const getgeo)(struct block_device *, struct hd_geometry *);
54853 + struct module * const owner;
54854 };
54855
54856 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
54857 diff -urNp linux-2.6.32.42/include/linux/blktrace_api.h linux-2.6.32.42/include/linux/blktrace_api.h
54858 --- linux-2.6.32.42/include/linux/blktrace_api.h 2011-03-27 14:31:47.000000000 -0400
54859 +++ linux-2.6.32.42/include/linux/blktrace_api.h 2011-05-04 17:56:28.000000000 -0400
54860 @@ -160,7 +160,7 @@ struct blk_trace {
54861 struct dentry *dir;
54862 struct dentry *dropped_file;
54863 struct dentry *msg_file;
54864 - atomic_t dropped;
54865 + atomic_unchecked_t dropped;
54866 };
54867
54868 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
54869 diff -urNp linux-2.6.32.42/include/linux/byteorder/little_endian.h linux-2.6.32.42/include/linux/byteorder/little_endian.h
54870 --- linux-2.6.32.42/include/linux/byteorder/little_endian.h 2011-03-27 14:31:47.000000000 -0400
54871 +++ linux-2.6.32.42/include/linux/byteorder/little_endian.h 2011-04-17 15:56:46.000000000 -0400
54872 @@ -42,51 +42,51 @@
54873
54874 static inline __le64 __cpu_to_le64p(const __u64 *p)
54875 {
54876 - return (__force __le64)*p;
54877 + return (__force const __le64)*p;
54878 }
54879 static inline __u64 __le64_to_cpup(const __le64 *p)
54880 {
54881 - return (__force __u64)*p;
54882 + return (__force const __u64)*p;
54883 }
54884 static inline __le32 __cpu_to_le32p(const __u32 *p)
54885 {
54886 - return (__force __le32)*p;
54887 + return (__force const __le32)*p;
54888 }
54889 static inline __u32 __le32_to_cpup(const __le32 *p)
54890 {
54891 - return (__force __u32)*p;
54892 + return (__force const __u32)*p;
54893 }
54894 static inline __le16 __cpu_to_le16p(const __u16 *p)
54895 {
54896 - return (__force __le16)*p;
54897 + return (__force const __le16)*p;
54898 }
54899 static inline __u16 __le16_to_cpup(const __le16 *p)
54900 {
54901 - return (__force __u16)*p;
54902 + return (__force const __u16)*p;
54903 }
54904 static inline __be64 __cpu_to_be64p(const __u64 *p)
54905 {
54906 - return (__force __be64)__swab64p(p);
54907 + return (__force const __be64)__swab64p(p);
54908 }
54909 static inline __u64 __be64_to_cpup(const __be64 *p)
54910 {
54911 - return __swab64p((__u64 *)p);
54912 + return __swab64p((const __u64 *)p);
54913 }
54914 static inline __be32 __cpu_to_be32p(const __u32 *p)
54915 {
54916 - return (__force __be32)__swab32p(p);
54917 + return (__force const __be32)__swab32p(p);
54918 }
54919 static inline __u32 __be32_to_cpup(const __be32 *p)
54920 {
54921 - return __swab32p((__u32 *)p);
54922 + return __swab32p((const __u32 *)p);
54923 }
54924 static inline __be16 __cpu_to_be16p(const __u16 *p)
54925 {
54926 - return (__force __be16)__swab16p(p);
54927 + return (__force const __be16)__swab16p(p);
54928 }
54929 static inline __u16 __be16_to_cpup(const __be16 *p)
54930 {
54931 - return __swab16p((__u16 *)p);
54932 + return __swab16p((const __u16 *)p);
54933 }
54934 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
54935 #define __le64_to_cpus(x) do { (void)(x); } while (0)
54936 diff -urNp linux-2.6.32.42/include/linux/cache.h linux-2.6.32.42/include/linux/cache.h
54937 --- linux-2.6.32.42/include/linux/cache.h 2011-03-27 14:31:47.000000000 -0400
54938 +++ linux-2.6.32.42/include/linux/cache.h 2011-04-17 15:56:46.000000000 -0400
54939 @@ -16,6 +16,10 @@
54940 #define __read_mostly
54941 #endif
54942
54943 +#ifndef __read_only
54944 +#define __read_only __read_mostly
54945 +#endif
54946 +
54947 #ifndef ____cacheline_aligned
54948 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
54949 #endif
54950 diff -urNp linux-2.6.32.42/include/linux/capability.h linux-2.6.32.42/include/linux/capability.h
54951 --- linux-2.6.32.42/include/linux/capability.h 2011-03-27 14:31:47.000000000 -0400
54952 +++ linux-2.6.32.42/include/linux/capability.h 2011-04-17 15:56:46.000000000 -0400
54953 @@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff
54954 (security_real_capable_noaudit((t), (cap)) == 0)
54955
54956 extern int capable(int cap);
54957 +int capable_nolog(int cap);
54958
54959 /* audit system wants to get cap info from files as well */
54960 struct dentry;
54961 diff -urNp linux-2.6.32.42/include/linux/compiler-gcc4.h linux-2.6.32.42/include/linux/compiler-gcc4.h
54962 --- linux-2.6.32.42/include/linux/compiler-gcc4.h 2011-03-27 14:31:47.000000000 -0400
54963 +++ linux-2.6.32.42/include/linux/compiler-gcc4.h 2011-04-17 15:56:46.000000000 -0400
54964 @@ -36,4 +36,8 @@
54965 the kernel context */
54966 #define __cold __attribute__((__cold__))
54967
54968 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
54969 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
54970 +#define __bos0(ptr) __bos((ptr), 0)
54971 +#define __bos1(ptr) __bos((ptr), 1)
54972 #endif
54973 diff -urNp linux-2.6.32.42/include/linux/compiler.h linux-2.6.32.42/include/linux/compiler.h
54974 --- linux-2.6.32.42/include/linux/compiler.h 2011-03-27 14:31:47.000000000 -0400
54975 +++ linux-2.6.32.42/include/linux/compiler.h 2011-04-17 15:56:46.000000000 -0400
54976 @@ -256,6 +256,22 @@ void ftrace_likely_update(struct ftrace_
54977 #define __cold
54978 #endif
54979
54980 +#ifndef __alloc_size
54981 +#define __alloc_size
54982 +#endif
54983 +
54984 +#ifndef __bos
54985 +#define __bos
54986 +#endif
54987 +
54988 +#ifndef __bos0
54989 +#define __bos0
54990 +#endif
54991 +
54992 +#ifndef __bos1
54993 +#define __bos1
54994 +#endif
54995 +
54996 /* Simple shorthand for a section definition */
54997 #ifndef __section
54998 # define __section(S) __attribute__ ((__section__(#S)))
54999 @@ -278,6 +294,7 @@ void ftrace_likely_update(struct ftrace_
55000 * use is to mediate communication between process-level code and irq/NMI
55001 * handlers, all running on the same CPU.
55002 */
55003 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
55004 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
55005 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
55006
55007 #endif /* __LINUX_COMPILER_H */
55008 diff -urNp linux-2.6.32.42/include/linux/dcache.h linux-2.6.32.42/include/linux/dcache.h
55009 --- linux-2.6.32.42/include/linux/dcache.h 2011-03-27 14:31:47.000000000 -0400
55010 +++ linux-2.6.32.42/include/linux/dcache.h 2011-04-23 13:34:46.000000000 -0400
55011 @@ -119,6 +119,8 @@ struct dentry {
55012 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
55013 };
55014
55015 +#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
55016 +
55017 /*
55018 * dentry->d_lock spinlock nesting subclasses:
55019 *
55020 diff -urNp linux-2.6.32.42/include/linux/decompress/mm.h linux-2.6.32.42/include/linux/decompress/mm.h
55021 --- linux-2.6.32.42/include/linux/decompress/mm.h 2011-03-27 14:31:47.000000000 -0400
55022 +++ linux-2.6.32.42/include/linux/decompress/mm.h 2011-04-17 15:56:46.000000000 -0400
55023 @@ -78,7 +78,7 @@ static void free(void *where)
55024 * warnings when not needed (indeed large_malloc / large_free are not
55025 * needed by inflate */
55026
55027 -#define malloc(a) kmalloc(a, GFP_KERNEL)
55028 +#define malloc(a) kmalloc((a), GFP_KERNEL)
55029 #define free(a) kfree(a)
55030
55031 #define large_malloc(a) vmalloc(a)
55032 diff -urNp linux-2.6.32.42/include/linux/dma-mapping.h linux-2.6.32.42/include/linux/dma-mapping.h
55033 --- linux-2.6.32.42/include/linux/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
55034 +++ linux-2.6.32.42/include/linux/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
55035 @@ -16,50 +16,50 @@ enum dma_data_direction {
55036 };
55037
55038 struct dma_map_ops {
55039 - void* (*alloc_coherent)(struct device *dev, size_t size,
55040 + void* (* const alloc_coherent)(struct device *dev, size_t size,
55041 dma_addr_t *dma_handle, gfp_t gfp);
55042 - void (*free_coherent)(struct device *dev, size_t size,
55043 + void (* const free_coherent)(struct device *dev, size_t size,
55044 void *vaddr, dma_addr_t dma_handle);
55045 - dma_addr_t (*map_page)(struct device *dev, struct page *page,
55046 + dma_addr_t (* const map_page)(struct device *dev, struct page *page,
55047 unsigned long offset, size_t size,
55048 enum dma_data_direction dir,
55049 struct dma_attrs *attrs);
55050 - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
55051 + void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
55052 size_t size, enum dma_data_direction dir,
55053 struct dma_attrs *attrs);
55054 - int (*map_sg)(struct device *dev, struct scatterlist *sg,
55055 + int (* const map_sg)(struct device *dev, struct scatterlist *sg,
55056 int nents, enum dma_data_direction dir,
55057 struct dma_attrs *attrs);
55058 - void (*unmap_sg)(struct device *dev,
55059 + void (* const unmap_sg)(struct device *dev,
55060 struct scatterlist *sg, int nents,
55061 enum dma_data_direction dir,
55062 struct dma_attrs *attrs);
55063 - void (*sync_single_for_cpu)(struct device *dev,
55064 + void (* const sync_single_for_cpu)(struct device *dev,
55065 dma_addr_t dma_handle, size_t size,
55066 enum dma_data_direction dir);
55067 - void (*sync_single_for_device)(struct device *dev,
55068 + void (* const sync_single_for_device)(struct device *dev,
55069 dma_addr_t dma_handle, size_t size,
55070 enum dma_data_direction dir);
55071 - void (*sync_single_range_for_cpu)(struct device *dev,
55072 + void (* const sync_single_range_for_cpu)(struct device *dev,
55073 dma_addr_t dma_handle,
55074 unsigned long offset,
55075 size_t size,
55076 enum dma_data_direction dir);
55077 - void (*sync_single_range_for_device)(struct device *dev,
55078 + void (* const sync_single_range_for_device)(struct device *dev,
55079 dma_addr_t dma_handle,
55080 unsigned long offset,
55081 size_t size,
55082 enum dma_data_direction dir);
55083 - void (*sync_sg_for_cpu)(struct device *dev,
55084 + void (* const sync_sg_for_cpu)(struct device *dev,
55085 struct scatterlist *sg, int nents,
55086 enum dma_data_direction dir);
55087 - void (*sync_sg_for_device)(struct device *dev,
55088 + void (* const sync_sg_for_device)(struct device *dev,
55089 struct scatterlist *sg, int nents,
55090 enum dma_data_direction dir);
55091 - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
55092 - int (*dma_supported)(struct device *dev, u64 mask);
55093 + int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
55094 + int (* const dma_supported)(struct device *dev, u64 mask);
55095 int (*set_dma_mask)(struct device *dev, u64 mask);
55096 - int is_phys;
55097 + const int is_phys;
55098 };
55099
55100 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
55101 diff -urNp linux-2.6.32.42/include/linux/dst.h linux-2.6.32.42/include/linux/dst.h
55102 --- linux-2.6.32.42/include/linux/dst.h 2011-03-27 14:31:47.000000000 -0400
55103 +++ linux-2.6.32.42/include/linux/dst.h 2011-04-17 15:56:46.000000000 -0400
55104 @@ -380,7 +380,7 @@ struct dst_node
55105 struct thread_pool *pool;
55106
55107 /* Transaction IDs live here */
55108 - atomic_long_t gen;
55109 + atomic_long_unchecked_t gen;
55110
55111 /*
55112 * How frequently and how many times transaction
55113 diff -urNp linux-2.6.32.42/include/linux/elf.h linux-2.6.32.42/include/linux/elf.h
55114 --- linux-2.6.32.42/include/linux/elf.h 2011-03-27 14:31:47.000000000 -0400
55115 +++ linux-2.6.32.42/include/linux/elf.h 2011-04-17 15:56:46.000000000 -0400
55116 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
55117 #define PT_GNU_EH_FRAME 0x6474e550
55118
55119 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
55120 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
55121 +
55122 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
55123 +
55124 +/* Constants for the e_flags field */
55125 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
55126 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
55127 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
55128 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
55129 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
55130 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
55131
55132 /* These constants define the different elf file types */
55133 #define ET_NONE 0
55134 @@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
55135 #define DT_DEBUG 21
55136 #define DT_TEXTREL 22
55137 #define DT_JMPREL 23
55138 +#define DT_FLAGS 30
55139 + #define DF_TEXTREL 0x00000004
55140 #define DT_ENCODING 32
55141 #define OLD_DT_LOOS 0x60000000
55142 #define DT_LOOS 0x6000000d
55143 @@ -230,6 +243,19 @@ typedef struct elf64_hdr {
55144 #define PF_W 0x2
55145 #define PF_X 0x1
55146
55147 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
55148 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
55149 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
55150 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
55151 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
55152 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
55153 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
55154 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
55155 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
55156 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
55157 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
55158 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
55159 +
55160 typedef struct elf32_phdr{
55161 Elf32_Word p_type;
55162 Elf32_Off p_offset;
55163 @@ -322,6 +348,8 @@ typedef struct elf64_shdr {
55164 #define EI_OSABI 7
55165 #define EI_PAD 8
55166
55167 +#define EI_PAX 14
55168 +
55169 #define ELFMAG0 0x7f /* EI_MAG */
55170 #define ELFMAG1 'E'
55171 #define ELFMAG2 'L'
55172 @@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
55173 #define elf_phdr elf32_phdr
55174 #define elf_note elf32_note
55175 #define elf_addr_t Elf32_Off
55176 +#define elf_dyn Elf32_Dyn
55177
55178 #else
55179
55180 @@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
55181 #define elf_phdr elf64_phdr
55182 #define elf_note elf64_note
55183 #define elf_addr_t Elf64_Off
55184 +#define elf_dyn Elf64_Dyn
55185
55186 #endif
55187
55188 diff -urNp linux-2.6.32.42/include/linux/fscache-cache.h linux-2.6.32.42/include/linux/fscache-cache.h
55189 --- linux-2.6.32.42/include/linux/fscache-cache.h 2011-03-27 14:31:47.000000000 -0400
55190 +++ linux-2.6.32.42/include/linux/fscache-cache.h 2011-05-04 17:56:28.000000000 -0400
55191 @@ -116,7 +116,7 @@ struct fscache_operation {
55192 #endif
55193 };
55194
55195 -extern atomic_t fscache_op_debug_id;
55196 +extern atomic_unchecked_t fscache_op_debug_id;
55197 extern const struct slow_work_ops fscache_op_slow_work_ops;
55198
55199 extern void fscache_enqueue_operation(struct fscache_operation *);
55200 @@ -134,7 +134,7 @@ static inline void fscache_operation_ini
55201 fscache_operation_release_t release)
55202 {
55203 atomic_set(&op->usage, 1);
55204 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
55205 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
55206 op->release = release;
55207 INIT_LIST_HEAD(&op->pend_link);
55208 fscache_set_op_state(op, "Init");
55209 diff -urNp linux-2.6.32.42/include/linux/fs.h linux-2.6.32.42/include/linux/fs.h
55210 --- linux-2.6.32.42/include/linux/fs.h 2011-03-27 14:31:47.000000000 -0400
55211 +++ linux-2.6.32.42/include/linux/fs.h 2011-04-17 15:56:46.000000000 -0400
55212 @@ -90,6 +90,11 @@ struct inodes_stat_t {
55213 /* Expect random access pattern */
55214 #define FMODE_RANDOM ((__force fmode_t)4096)
55215
55216 +/* Hack for grsec so as not to require read permission simply to execute
55217 + * a binary
55218 + */
55219 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
55220 +
55221 /*
55222 * The below are the various read and write types that we support. Some of
55223 * them include behavioral modifiers that send information down to the
55224 @@ -568,41 +573,41 @@ typedef int (*read_actor_t)(read_descrip
55225 unsigned long, unsigned long);
55226
55227 struct address_space_operations {
55228 - int (*writepage)(struct page *page, struct writeback_control *wbc);
55229 - int (*readpage)(struct file *, struct page *);
55230 - void (*sync_page)(struct page *);
55231 + int (* const writepage)(struct page *page, struct writeback_control *wbc);
55232 + int (* const readpage)(struct file *, struct page *);
55233 + void (* const sync_page)(struct page *);
55234
55235 /* Write back some dirty pages from this mapping. */
55236 - int (*writepages)(struct address_space *, struct writeback_control *);
55237 + int (* const writepages)(struct address_space *, struct writeback_control *);
55238
55239 /* Set a page dirty. Return true if this dirtied it */
55240 - int (*set_page_dirty)(struct page *page);
55241 + int (* const set_page_dirty)(struct page *page);
55242
55243 - int (*readpages)(struct file *filp, struct address_space *mapping,
55244 + int (* const readpages)(struct file *filp, struct address_space *mapping,
55245 struct list_head *pages, unsigned nr_pages);
55246
55247 - int (*write_begin)(struct file *, struct address_space *mapping,
55248 + int (* const write_begin)(struct file *, struct address_space *mapping,
55249 loff_t pos, unsigned len, unsigned flags,
55250 struct page **pagep, void **fsdata);
55251 - int (*write_end)(struct file *, struct address_space *mapping,
55252 + int (* const write_end)(struct file *, struct address_space *mapping,
55253 loff_t pos, unsigned len, unsigned copied,
55254 struct page *page, void *fsdata);
55255
55256 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
55257 - sector_t (*bmap)(struct address_space *, sector_t);
55258 - void (*invalidatepage) (struct page *, unsigned long);
55259 - int (*releasepage) (struct page *, gfp_t);
55260 - ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
55261 + sector_t (* const bmap)(struct address_space *, sector_t);
55262 + void (* const invalidatepage) (struct page *, unsigned long);
55263 + int (* const releasepage) (struct page *, gfp_t);
55264 + ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
55265 loff_t offset, unsigned long nr_segs);
55266 - int (*get_xip_mem)(struct address_space *, pgoff_t, int,
55267 + int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
55268 void **, unsigned long *);
55269 /* migrate the contents of a page to the specified target */
55270 - int (*migratepage) (struct address_space *,
55271 + int (* const migratepage) (struct address_space *,
55272 struct page *, struct page *);
55273 - int (*launder_page) (struct page *);
55274 - int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
55275 + int (* const launder_page) (struct page *);
55276 + int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
55277 unsigned long);
55278 - int (*error_remove_page)(struct address_space *, struct page *);
55279 + int (* const error_remove_page)(struct address_space *, struct page *);
55280 };
55281
55282 /*
55283 @@ -1030,19 +1035,19 @@ static inline int file_check_writeable(s
55284 typedef struct files_struct *fl_owner_t;
55285
55286 struct file_lock_operations {
55287 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
55288 - void (*fl_release_private)(struct file_lock *);
55289 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
55290 + void (* const fl_release_private)(struct file_lock *);
55291 };
55292
55293 struct lock_manager_operations {
55294 - int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
55295 - void (*fl_notify)(struct file_lock *); /* unblock callback */
55296 - int (*fl_grant)(struct file_lock *, struct file_lock *, int);
55297 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
55298 - void (*fl_release_private)(struct file_lock *);
55299 - void (*fl_break)(struct file_lock *);
55300 - int (*fl_mylease)(struct file_lock *, struct file_lock *);
55301 - int (*fl_change)(struct file_lock **, int);
55302 + int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
55303 + void (* const fl_notify)(struct file_lock *); /* unblock callback */
55304 + int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
55305 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
55306 + void (* const fl_release_private)(struct file_lock *);
55307 + void (* const fl_break)(struct file_lock *);
55308 + int (* const fl_mylease)(struct file_lock *, struct file_lock *);
55309 + int (* const fl_change)(struct file_lock **, int);
55310 };
55311
55312 struct lock_manager {
55313 @@ -1441,7 +1446,7 @@ struct fiemap_extent_info {
55314 unsigned int fi_flags; /* Flags as passed from user */
55315 unsigned int fi_extents_mapped; /* Number of mapped extents */
55316 unsigned int fi_extents_max; /* Size of fiemap_extent array */
55317 - struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
55318 + struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
55319 * array */
55320 };
55321 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
55322 @@ -1558,30 +1563,30 @@ extern ssize_t vfs_writev(struct file *,
55323 unsigned long, loff_t *);
55324
55325 struct super_operations {
55326 - struct inode *(*alloc_inode)(struct super_block *sb);
55327 - void (*destroy_inode)(struct inode *);
55328 + struct inode *(* const alloc_inode)(struct super_block *sb);
55329 + void (* const destroy_inode)(struct inode *);
55330
55331 - void (*dirty_inode) (struct inode *);
55332 - int (*write_inode) (struct inode *, int);
55333 - void (*drop_inode) (struct inode *);
55334 - void (*delete_inode) (struct inode *);
55335 - void (*put_super) (struct super_block *);
55336 - void (*write_super) (struct super_block *);
55337 - int (*sync_fs)(struct super_block *sb, int wait);
55338 - int (*freeze_fs) (struct super_block *);
55339 - int (*unfreeze_fs) (struct super_block *);
55340 - int (*statfs) (struct dentry *, struct kstatfs *);
55341 - int (*remount_fs) (struct super_block *, int *, char *);
55342 - void (*clear_inode) (struct inode *);
55343 - void (*umount_begin) (struct super_block *);
55344 + void (* const dirty_inode) (struct inode *);
55345 + int (* const write_inode) (struct inode *, int);
55346 + void (* const drop_inode) (struct inode *);
55347 + void (* const delete_inode) (struct inode *);
55348 + void (* const put_super) (struct super_block *);
55349 + void (* const write_super) (struct super_block *);
55350 + int (* const sync_fs)(struct super_block *sb, int wait);
55351 + int (* const freeze_fs) (struct super_block *);
55352 + int (* const unfreeze_fs) (struct super_block *);
55353 + int (* const statfs) (struct dentry *, struct kstatfs *);
55354 + int (* const remount_fs) (struct super_block *, int *, char *);
55355 + void (* const clear_inode) (struct inode *);
55356 + void (* const umount_begin) (struct super_block *);
55357
55358 - int (*show_options)(struct seq_file *, struct vfsmount *);
55359 - int (*show_stats)(struct seq_file *, struct vfsmount *);
55360 + int (* const show_options)(struct seq_file *, struct vfsmount *);
55361 + int (* const show_stats)(struct seq_file *, struct vfsmount *);
55362 #ifdef CONFIG_QUOTA
55363 - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
55364 - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
55365 + ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
55366 + ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
55367 #endif
55368 - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
55369 + int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
55370 };
55371
55372 /*
55373 diff -urNp linux-2.6.32.42/include/linux/fs_struct.h linux-2.6.32.42/include/linux/fs_struct.h
55374 --- linux-2.6.32.42/include/linux/fs_struct.h 2011-03-27 14:31:47.000000000 -0400
55375 +++ linux-2.6.32.42/include/linux/fs_struct.h 2011-04-17 15:56:46.000000000 -0400
55376 @@ -4,7 +4,7 @@
55377 #include <linux/path.h>
55378
55379 struct fs_struct {
55380 - int users;
55381 + atomic_t users;
55382 rwlock_t lock;
55383 int umask;
55384 int in_exec;
55385 diff -urNp linux-2.6.32.42/include/linux/ftrace_event.h linux-2.6.32.42/include/linux/ftrace_event.h
55386 --- linux-2.6.32.42/include/linux/ftrace_event.h 2011-03-27 14:31:47.000000000 -0400
55387 +++ linux-2.6.32.42/include/linux/ftrace_event.h 2011-05-04 17:56:28.000000000 -0400
55388 @@ -163,7 +163,7 @@ extern int trace_define_field(struct ftr
55389 int filter_type);
55390 extern int trace_define_common_fields(struct ftrace_event_call *call);
55391
55392 -#define is_signed_type(type) (((type)(-1)) < 0)
55393 +#define is_signed_type(type) (((type)(-1)) < (type)1)
55394
55395 int trace_set_clr_event(const char *system, const char *event, int set);
55396
55397 diff -urNp linux-2.6.32.42/include/linux/genhd.h linux-2.6.32.42/include/linux/genhd.h
55398 --- linux-2.6.32.42/include/linux/genhd.h 2011-03-27 14:31:47.000000000 -0400
55399 +++ linux-2.6.32.42/include/linux/genhd.h 2011-04-17 15:56:46.000000000 -0400
55400 @@ -161,7 +161,7 @@ struct gendisk {
55401
55402 struct timer_rand_state *random;
55403
55404 - atomic_t sync_io; /* RAID */
55405 + atomic_unchecked_t sync_io; /* RAID */
55406 struct work_struct async_notify;
55407 #ifdef CONFIG_BLK_DEV_INTEGRITY
55408 struct blk_integrity *integrity;
55409 diff -urNp linux-2.6.32.42/include/linux/gracl.h linux-2.6.32.42/include/linux/gracl.h
55410 --- linux-2.6.32.42/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
55411 +++ linux-2.6.32.42/include/linux/gracl.h 2011-04-17 15:56:46.000000000 -0400
55412 @@ -0,0 +1,317 @@
55413 +#ifndef GR_ACL_H
55414 +#define GR_ACL_H
55415 +
55416 +#include <linux/grdefs.h>
55417 +#include <linux/resource.h>
55418 +#include <linux/capability.h>
55419 +#include <linux/dcache.h>
55420 +#include <asm/resource.h>
55421 +
55422 +/* Major status information */
55423 +
55424 +#define GR_VERSION "grsecurity 2.2.2"
55425 +#define GRSECURITY_VERSION 0x2202
55426 +
55427 +enum {
55428 + GR_SHUTDOWN = 0,
55429 + GR_ENABLE = 1,
55430 + GR_SPROLE = 2,
55431 + GR_RELOAD = 3,
55432 + GR_SEGVMOD = 4,
55433 + GR_STATUS = 5,
55434 + GR_UNSPROLE = 6,
55435 + GR_PASSSET = 7,
55436 + GR_SPROLEPAM = 8,
55437 +};
55438 +
55439 +/* Password setup definitions
55440 + * kernel/grhash.c */
55441 +enum {
55442 + GR_PW_LEN = 128,
55443 + GR_SALT_LEN = 16,
55444 + GR_SHA_LEN = 32,
55445 +};
55446 +
55447 +enum {
55448 + GR_SPROLE_LEN = 64,
55449 +};
55450 +
55451 +enum {
55452 + GR_NO_GLOB = 0,
55453 + GR_REG_GLOB,
55454 + GR_CREATE_GLOB
55455 +};
55456 +
55457 +#define GR_NLIMITS 32
55458 +
55459 +/* Begin Data Structures */
55460 +
55461 +struct sprole_pw {
55462 + unsigned char *rolename;
55463 + unsigned char salt[GR_SALT_LEN];
55464 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
55465 +};
55466 +
55467 +struct name_entry {
55468 + __u32 key;
55469 + ino_t inode;
55470 + dev_t device;
55471 + char *name;
55472 + __u16 len;
55473 + __u8 deleted;
55474 + struct name_entry *prev;
55475 + struct name_entry *next;
55476 +};
55477 +
55478 +struct inodev_entry {
55479 + struct name_entry *nentry;
55480 + struct inodev_entry *prev;
55481 + struct inodev_entry *next;
55482 +};
55483 +
55484 +struct acl_role_db {
55485 + struct acl_role_label **r_hash;
55486 + __u32 r_size;
55487 +};
55488 +
55489 +struct inodev_db {
55490 + struct inodev_entry **i_hash;
55491 + __u32 i_size;
55492 +};
55493 +
55494 +struct name_db {
55495 + struct name_entry **n_hash;
55496 + __u32 n_size;
55497 +};
55498 +
55499 +struct crash_uid {
55500 + uid_t uid;
55501 + unsigned long expires;
55502 +};
55503 +
55504 +struct gr_hash_struct {
55505 + void **table;
55506 + void **nametable;
55507 + void *first;
55508 + __u32 table_size;
55509 + __u32 used_size;
55510 + int type;
55511 +};
55512 +
55513 +/* Userspace Grsecurity ACL data structures */
55514 +
55515 +struct acl_subject_label {
55516 + char *filename;
55517 + ino_t inode;
55518 + dev_t device;
55519 + __u32 mode;
55520 + kernel_cap_t cap_mask;
55521 + kernel_cap_t cap_lower;
55522 + kernel_cap_t cap_invert_audit;
55523 +
55524 + struct rlimit res[GR_NLIMITS];
55525 + __u32 resmask;
55526 +
55527 + __u8 user_trans_type;
55528 + __u8 group_trans_type;
55529 + uid_t *user_transitions;
55530 + gid_t *group_transitions;
55531 + __u16 user_trans_num;
55532 + __u16 group_trans_num;
55533 +
55534 + __u32 sock_families[2];
55535 + __u32 ip_proto[8];
55536 + __u32 ip_type;
55537 + struct acl_ip_label **ips;
55538 + __u32 ip_num;
55539 + __u32 inaddr_any_override;
55540 +
55541 + __u32 crashes;
55542 + unsigned long expires;
55543 +
55544 + struct acl_subject_label *parent_subject;
55545 + struct gr_hash_struct *hash;
55546 + struct acl_subject_label *prev;
55547 + struct acl_subject_label *next;
55548 +
55549 + struct acl_object_label **obj_hash;
55550 + __u32 obj_hash_size;
55551 + __u16 pax_flags;
55552 +};
55553 +
55554 +struct role_allowed_ip {
55555 + __u32 addr;
55556 + __u32 netmask;
55557 +
55558 + struct role_allowed_ip *prev;
55559 + struct role_allowed_ip *next;
55560 +};
55561 +
55562 +struct role_transition {
55563 + char *rolename;
55564 +
55565 + struct role_transition *prev;
55566 + struct role_transition *next;
55567 +};
55568 +
55569 +struct acl_role_label {
55570 + char *rolename;
55571 + uid_t uidgid;
55572 + __u16 roletype;
55573 +
55574 + __u16 auth_attempts;
55575 + unsigned long expires;
55576 +
55577 + struct acl_subject_label *root_label;
55578 + struct gr_hash_struct *hash;
55579 +
55580 + struct acl_role_label *prev;
55581 + struct acl_role_label *next;
55582 +
55583 + struct role_transition *transitions;
55584 + struct role_allowed_ip *allowed_ips;
55585 + uid_t *domain_children;
55586 + __u16 domain_child_num;
55587 +
55588 + struct acl_subject_label **subj_hash;
55589 + __u32 subj_hash_size;
55590 +};
55591 +
55592 +struct user_acl_role_db {
55593 + struct acl_role_label **r_table;
55594 + __u32 num_pointers; /* Number of allocations to track */
55595 + __u32 num_roles; /* Number of roles */
55596 + __u32 num_domain_children; /* Number of domain children */
55597 + __u32 num_subjects; /* Number of subjects */
55598 + __u32 num_objects; /* Number of objects */
55599 +};
55600 +
55601 +struct acl_object_label {
55602 + char *filename;
55603 + ino_t inode;
55604 + dev_t device;
55605 + __u32 mode;
55606 +
55607 + struct acl_subject_label *nested;
55608 + struct acl_object_label *globbed;
55609 +
55610 + /* next two structures not used */
55611 +
55612 + struct acl_object_label *prev;
55613 + struct acl_object_label *next;
55614 +};
55615 +
55616 +struct acl_ip_label {
55617 + char *iface;
55618 + __u32 addr;
55619 + __u32 netmask;
55620 + __u16 low, high;
55621 + __u8 mode;
55622 + __u32 type;
55623 + __u32 proto[8];
55624 +
55625 + /* next two structures not used */
55626 +
55627 + struct acl_ip_label *prev;
55628 + struct acl_ip_label *next;
55629 +};
55630 +
55631 +struct gr_arg {
55632 + struct user_acl_role_db role_db;
55633 + unsigned char pw[GR_PW_LEN];
55634 + unsigned char salt[GR_SALT_LEN];
55635 + unsigned char sum[GR_SHA_LEN];
55636 + unsigned char sp_role[GR_SPROLE_LEN];
55637 + struct sprole_pw *sprole_pws;
55638 + dev_t segv_device;
55639 + ino_t segv_inode;
55640 + uid_t segv_uid;
55641 + __u16 num_sprole_pws;
55642 + __u16 mode;
55643 +};
55644 +
55645 +struct gr_arg_wrapper {
55646 + struct gr_arg *arg;
55647 + __u32 version;
55648 + __u32 size;
55649 +};
55650 +
55651 +struct subject_map {
55652 + struct acl_subject_label *user;
55653 + struct acl_subject_label *kernel;
55654 + struct subject_map *prev;
55655 + struct subject_map *next;
55656 +};
55657 +
55658 +struct acl_subj_map_db {
55659 + struct subject_map **s_hash;
55660 + __u32 s_size;
55661 +};
55662 +
55663 +/* End Data Structures Section */
55664 +
55665 +/* Hash functions generated by empirical testing by Brad Spengler
55666 + Makes good use of the low bits of the inode. Generally 0-1 times
55667 + in loop for successful match. 0-3 for unsuccessful match.
55668 + Shift/add algorithm with modulus of table size and an XOR*/
55669 +
55670 +static __inline__ unsigned int
55671 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
55672 +{
55673 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
55674 +}
55675 +
55676 + static __inline__ unsigned int
55677 +shash(const struct acl_subject_label *userp, const unsigned int sz)
55678 +{
55679 + return ((const unsigned long)userp % sz);
55680 +}
55681 +
55682 +static __inline__ unsigned int
55683 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
55684 +{
55685 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
55686 +}
55687 +
55688 +static __inline__ unsigned int
55689 +nhash(const char *name, const __u16 len, const unsigned int sz)
55690 +{
55691 + return full_name_hash((const unsigned char *)name, len) % sz;
55692 +}
55693 +
55694 +#define FOR_EACH_ROLE_START(role) \
55695 + role = role_list; \
55696 + while (role) {
55697 +
55698 +#define FOR_EACH_ROLE_END(role) \
55699 + role = role->prev; \
55700 + }
55701 +
55702 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
55703 + subj = NULL; \
55704 + iter = 0; \
55705 + while (iter < role->subj_hash_size) { \
55706 + if (subj == NULL) \
55707 + subj = role->subj_hash[iter]; \
55708 + if (subj == NULL) { \
55709 + iter++; \
55710 + continue; \
55711 + }
55712 +
55713 +#define FOR_EACH_SUBJECT_END(subj,iter) \
55714 + subj = subj->next; \
55715 + if (subj == NULL) \
55716 + iter++; \
55717 + }
55718 +
55719 +
55720 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
55721 + subj = role->hash->first; \
55722 + while (subj != NULL) {
55723 +
55724 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
55725 + subj = subj->next; \
55726 + }
55727 +
55728 +#endif
55729 +
55730 diff -urNp linux-2.6.32.42/include/linux/gralloc.h linux-2.6.32.42/include/linux/gralloc.h
55731 --- linux-2.6.32.42/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
55732 +++ linux-2.6.32.42/include/linux/gralloc.h 2011-04-17 15:56:46.000000000 -0400
55733 @@ -0,0 +1,9 @@
55734 +#ifndef __GRALLOC_H
55735 +#define __GRALLOC_H
55736 +
55737 +void acl_free_all(void);
55738 +int acl_alloc_stack_init(unsigned long size);
55739 +void *acl_alloc(unsigned long len);
55740 +void *acl_alloc_num(unsigned long num, unsigned long len);
55741 +
55742 +#endif
55743 diff -urNp linux-2.6.32.42/include/linux/grdefs.h linux-2.6.32.42/include/linux/grdefs.h
55744 --- linux-2.6.32.42/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
55745 +++ linux-2.6.32.42/include/linux/grdefs.h 2011-06-11 16:20:26.000000000 -0400
55746 @@ -0,0 +1,140 @@
55747 +#ifndef GRDEFS_H
55748 +#define GRDEFS_H
55749 +
55750 +/* Begin grsecurity status declarations */
55751 +
55752 +enum {
55753 + GR_READY = 0x01,
55754 + GR_STATUS_INIT = 0x00 // disabled state
55755 +};
55756 +
55757 +/* Begin ACL declarations */
55758 +
55759 +/* Role flags */
55760 +
55761 +enum {
55762 + GR_ROLE_USER = 0x0001,
55763 + GR_ROLE_GROUP = 0x0002,
55764 + GR_ROLE_DEFAULT = 0x0004,
55765 + GR_ROLE_SPECIAL = 0x0008,
55766 + GR_ROLE_AUTH = 0x0010,
55767 + GR_ROLE_NOPW = 0x0020,
55768 + GR_ROLE_GOD = 0x0040,
55769 + GR_ROLE_LEARN = 0x0080,
55770 + GR_ROLE_TPE = 0x0100,
55771 + GR_ROLE_DOMAIN = 0x0200,
55772 + GR_ROLE_PAM = 0x0400,
55773 + GR_ROLE_PERSIST = 0x800
55774 +};
55775 +
55776 +/* ACL Subject and Object mode flags */
55777 +enum {
55778 + GR_DELETED = 0x80000000
55779 +};
55780 +
55781 +/* ACL Object-only mode flags */
55782 +enum {
55783 + GR_READ = 0x00000001,
55784 + GR_APPEND = 0x00000002,
55785 + GR_WRITE = 0x00000004,
55786 + GR_EXEC = 0x00000008,
55787 + GR_FIND = 0x00000010,
55788 + GR_INHERIT = 0x00000020,
55789 + GR_SETID = 0x00000040,
55790 + GR_CREATE = 0x00000080,
55791 + GR_DELETE = 0x00000100,
55792 + GR_LINK = 0x00000200,
55793 + GR_AUDIT_READ = 0x00000400,
55794 + GR_AUDIT_APPEND = 0x00000800,
55795 + GR_AUDIT_WRITE = 0x00001000,
55796 + GR_AUDIT_EXEC = 0x00002000,
55797 + GR_AUDIT_FIND = 0x00004000,
55798 + GR_AUDIT_INHERIT= 0x00008000,
55799 + GR_AUDIT_SETID = 0x00010000,
55800 + GR_AUDIT_CREATE = 0x00020000,
55801 + GR_AUDIT_DELETE = 0x00040000,
55802 + GR_AUDIT_LINK = 0x00080000,
55803 + GR_PTRACERD = 0x00100000,
55804 + GR_NOPTRACE = 0x00200000,
55805 + GR_SUPPRESS = 0x00400000,
55806 + GR_NOLEARN = 0x00800000,
55807 + GR_INIT_TRANSFER= 0x01000000
55808 +};
55809 +
55810 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
55811 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
55812 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
55813 +
55814 +/* ACL subject-only mode flags */
55815 +enum {
55816 + GR_KILL = 0x00000001,
55817 + GR_VIEW = 0x00000002,
55818 + GR_PROTECTED = 0x00000004,
55819 + GR_LEARN = 0x00000008,
55820 + GR_OVERRIDE = 0x00000010,
55821 + /* just a placeholder, this mode is only used in userspace */
55822 + GR_DUMMY = 0x00000020,
55823 + GR_PROTSHM = 0x00000040,
55824 + GR_KILLPROC = 0x00000080,
55825 + GR_KILLIPPROC = 0x00000100,
55826 + /* just a placeholder, this mode is only used in userspace */
55827 + GR_NOTROJAN = 0x00000200,
55828 + GR_PROTPROCFD = 0x00000400,
55829 + GR_PROCACCT = 0x00000800,
55830 + GR_RELAXPTRACE = 0x00001000,
55831 + GR_NESTED = 0x00002000,
55832 + GR_INHERITLEARN = 0x00004000,
55833 + GR_PROCFIND = 0x00008000,
55834 + GR_POVERRIDE = 0x00010000,
55835 + GR_KERNELAUTH = 0x00020000,
55836 + GR_ATSECURE = 0x00040000,
55837 + GR_SHMEXEC = 0x00080000
55838 +};
55839 +
55840 +enum {
55841 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
55842 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
55843 + GR_PAX_ENABLE_MPROTECT = 0x0004,
55844 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
55845 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
55846 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
55847 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
55848 + GR_PAX_DISABLE_MPROTECT = 0x0400,
55849 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
55850 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
55851 +};
55852 +
55853 +enum {
55854 + GR_ID_USER = 0x01,
55855 + GR_ID_GROUP = 0x02,
55856 +};
55857 +
55858 +enum {
55859 + GR_ID_ALLOW = 0x01,
55860 + GR_ID_DENY = 0x02,
55861 +};
55862 +
55863 +#define GR_CRASH_RES 31
55864 +#define GR_UIDTABLE_MAX 500
55865 +
55866 +/* begin resource learning section */
55867 +enum {
55868 + GR_RLIM_CPU_BUMP = 60,
55869 + GR_RLIM_FSIZE_BUMP = 50000,
55870 + GR_RLIM_DATA_BUMP = 10000,
55871 + GR_RLIM_STACK_BUMP = 1000,
55872 + GR_RLIM_CORE_BUMP = 10000,
55873 + GR_RLIM_RSS_BUMP = 500000,
55874 + GR_RLIM_NPROC_BUMP = 1,
55875 + GR_RLIM_NOFILE_BUMP = 5,
55876 + GR_RLIM_MEMLOCK_BUMP = 50000,
55877 + GR_RLIM_AS_BUMP = 500000,
55878 + GR_RLIM_LOCKS_BUMP = 2,
55879 + GR_RLIM_SIGPENDING_BUMP = 5,
55880 + GR_RLIM_MSGQUEUE_BUMP = 10000,
55881 + GR_RLIM_NICE_BUMP = 1,
55882 + GR_RLIM_RTPRIO_BUMP = 1,
55883 + GR_RLIM_RTTIME_BUMP = 1000000
55884 +};
55885 +
55886 +#endif
55887 diff -urNp linux-2.6.32.42/include/linux/grinternal.h linux-2.6.32.42/include/linux/grinternal.h
55888 --- linux-2.6.32.42/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
55889 +++ linux-2.6.32.42/include/linux/grinternal.h 2011-06-29 19:41:14.000000000 -0400
55890 @@ -0,0 +1,219 @@
55891 +#ifndef __GRINTERNAL_H
55892 +#define __GRINTERNAL_H
55893 +
55894 +#ifdef CONFIG_GRKERNSEC
55895 +
55896 +#include <linux/fs.h>
55897 +#include <linux/mnt_namespace.h>
55898 +#include <linux/nsproxy.h>
55899 +#include <linux/gracl.h>
55900 +#include <linux/grdefs.h>
55901 +#include <linux/grmsg.h>
55902 +
55903 +void gr_add_learn_entry(const char *fmt, ...)
55904 + __attribute__ ((format (printf, 1, 2)));
55905 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
55906 + const struct vfsmount *mnt);
55907 +__u32 gr_check_create(const struct dentry *new_dentry,
55908 + const struct dentry *parent,
55909 + const struct vfsmount *mnt, const __u32 mode);
55910 +int gr_check_protected_task(const struct task_struct *task);
55911 +__u32 to_gr_audit(const __u32 reqmode);
55912 +int gr_set_acls(const int type);
55913 +int gr_apply_subject_to_task(struct task_struct *task);
55914 +int gr_acl_is_enabled(void);
55915 +char gr_roletype_to_char(void);
55916 +
55917 +void gr_handle_alertkill(struct task_struct *task);
55918 +char *gr_to_filename(const struct dentry *dentry,
55919 + const struct vfsmount *mnt);
55920 +char *gr_to_filename1(const struct dentry *dentry,
55921 + const struct vfsmount *mnt);
55922 +char *gr_to_filename2(const struct dentry *dentry,
55923 + const struct vfsmount *mnt);
55924 +char *gr_to_filename3(const struct dentry *dentry,
55925 + const struct vfsmount *mnt);
55926 +
55927 +extern int grsec_enable_harden_ptrace;
55928 +extern int grsec_enable_link;
55929 +extern int grsec_enable_fifo;
55930 +extern int grsec_enable_execve;
55931 +extern int grsec_enable_shm;
55932 +extern int grsec_enable_execlog;
55933 +extern int grsec_enable_signal;
55934 +extern int grsec_enable_audit_ptrace;
55935 +extern int grsec_enable_forkfail;
55936 +extern int grsec_enable_time;
55937 +extern int grsec_enable_rofs;
55938 +extern int grsec_enable_chroot_shmat;
55939 +extern int grsec_enable_chroot_findtask;
55940 +extern int grsec_enable_chroot_mount;
55941 +extern int grsec_enable_chroot_double;
55942 +extern int grsec_enable_chroot_pivot;
55943 +extern int grsec_enable_chroot_chdir;
55944 +extern int grsec_enable_chroot_chmod;
55945 +extern int grsec_enable_chroot_mknod;
55946 +extern int grsec_enable_chroot_fchdir;
55947 +extern int grsec_enable_chroot_nice;
55948 +extern int grsec_enable_chroot_execlog;
55949 +extern int grsec_enable_chroot_caps;
55950 +extern int grsec_enable_chroot_sysctl;
55951 +extern int grsec_enable_chroot_unix;
55952 +extern int grsec_enable_tpe;
55953 +extern int grsec_tpe_gid;
55954 +extern int grsec_enable_tpe_all;
55955 +extern int grsec_enable_tpe_invert;
55956 +extern int grsec_enable_socket_all;
55957 +extern int grsec_socket_all_gid;
55958 +extern int grsec_enable_socket_client;
55959 +extern int grsec_socket_client_gid;
55960 +extern int grsec_enable_socket_server;
55961 +extern int grsec_socket_server_gid;
55962 +extern int grsec_audit_gid;
55963 +extern int grsec_enable_group;
55964 +extern int grsec_enable_audit_textrel;
55965 +extern int grsec_enable_log_rwxmaps;
55966 +extern int grsec_enable_mount;
55967 +extern int grsec_enable_chdir;
55968 +extern int grsec_resource_logging;
55969 +extern int grsec_enable_blackhole;
55970 +extern int grsec_lastack_retries;
55971 +extern int grsec_enable_brute;
55972 +extern int grsec_lock;
55973 +
55974 +extern spinlock_t grsec_alert_lock;
55975 +extern unsigned long grsec_alert_wtime;
55976 +extern unsigned long grsec_alert_fyet;
55977 +
55978 +extern spinlock_t grsec_audit_lock;
55979 +
55980 +extern rwlock_t grsec_exec_file_lock;
55981 +
55982 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
55983 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
55984 + (tsk)->exec_file->f_vfsmnt) : "/")
55985 +
55986 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
55987 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
55988 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55989 +
55990 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
55991 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
55992 + (tsk)->exec_file->f_vfsmnt) : "/")
55993 +
55994 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
55995 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
55996 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55997 +
55998 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
55999 +
56000 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
56001 +
56002 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
56003 + (task)->pid, (cred)->uid, \
56004 + (cred)->euid, (cred)->gid, (cred)->egid, \
56005 + gr_parent_task_fullpath(task), \
56006 + (task)->real_parent->comm, (task)->real_parent->pid, \
56007 + (pcred)->uid, (pcred)->euid, \
56008 + (pcred)->gid, (pcred)->egid
56009 +
56010 +#define GR_CHROOT_CAPS {{ \
56011 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
56012 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
56013 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
56014 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
56015 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
56016 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
56017 +
56018 +#define security_learn(normal_msg,args...) \
56019 +({ \
56020 + read_lock(&grsec_exec_file_lock); \
56021 + gr_add_learn_entry(normal_msg "\n", ## args); \
56022 + read_unlock(&grsec_exec_file_lock); \
56023 +})
56024 +
56025 +enum {
56026 + GR_DO_AUDIT,
56027 + GR_DONT_AUDIT,
56028 + GR_DONT_AUDIT_GOOD
56029 +};
56030 +
56031 +enum {
56032 + GR_TTYSNIFF,
56033 + GR_RBAC,
56034 + GR_RBAC_STR,
56035 + GR_STR_RBAC,
56036 + GR_RBAC_MODE2,
56037 + GR_RBAC_MODE3,
56038 + GR_FILENAME,
56039 + GR_SYSCTL_HIDDEN,
56040 + GR_NOARGS,
56041 + GR_ONE_INT,
56042 + GR_ONE_INT_TWO_STR,
56043 + GR_ONE_STR,
56044 + GR_STR_INT,
56045 + GR_TWO_STR_INT,
56046 + GR_TWO_INT,
56047 + GR_TWO_U64,
56048 + GR_THREE_INT,
56049 + GR_FIVE_INT_TWO_STR,
56050 + GR_TWO_STR,
56051 + GR_THREE_STR,
56052 + GR_FOUR_STR,
56053 + GR_STR_FILENAME,
56054 + GR_FILENAME_STR,
56055 + GR_FILENAME_TWO_INT,
56056 + GR_FILENAME_TWO_INT_STR,
56057 + GR_TEXTREL,
56058 + GR_PTRACE,
56059 + GR_RESOURCE,
56060 + GR_CAP,
56061 + GR_SIG,
56062 + GR_SIG2,
56063 + GR_CRASH1,
56064 + GR_CRASH2,
56065 + GR_PSACCT,
56066 + GR_RWXMAP
56067 +};
56068 +
56069 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
56070 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
56071 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
56072 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
56073 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
56074 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
56075 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
56076 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
56077 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
56078 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
56079 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
56080 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
56081 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
56082 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
56083 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
56084 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
56085 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
56086 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
56087 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
56088 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
56089 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
56090 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
56091 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
56092 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
56093 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
56094 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
56095 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
56096 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
56097 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
56098 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
56099 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
56100 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
56101 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
56102 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
56103 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
56104 +
56105 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
56106 +
56107 +#endif
56108 +
56109 +#endif
56110 diff -urNp linux-2.6.32.42/include/linux/grmsg.h linux-2.6.32.42/include/linux/grmsg.h
56111 --- linux-2.6.32.42/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
56112 +++ linux-2.6.32.42/include/linux/grmsg.h 2011-04-17 15:56:46.000000000 -0400
56113 @@ -0,0 +1,108 @@
56114 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
56115 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
56116 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
56117 +#define GR_STOPMOD_MSG "denied modification of module state by "
56118 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
56119 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
56120 +#define GR_IOPERM_MSG "denied use of ioperm() by "
56121 +#define GR_IOPL_MSG "denied use of iopl() by "
56122 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
56123 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
56124 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
56125 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
56126 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
56127 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
56128 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
56129 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
56130 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
56131 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
56132 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
56133 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
56134 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
56135 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
56136 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
56137 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
56138 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
56139 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
56140 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
56141 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
56142 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
56143 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
56144 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
56145 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
56146 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
56147 +#define GR_NPROC_MSG "denied overstep of process limit by "
56148 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
56149 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
56150 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
56151 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
56152 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
56153 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
56154 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
56155 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
56156 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
56157 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
56158 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
56159 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
56160 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
56161 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
56162 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
56163 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
56164 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
56165 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
56166 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
56167 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
56168 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
56169 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
56170 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
56171 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
56172 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
56173 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
56174 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
56175 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
56176 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
56177 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
56178 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
56179 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
56180 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
56181 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
56182 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
56183 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
56184 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
56185 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
56186 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
56187 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
56188 +#define GR_NICE_CHROOT_MSG "denied priority change by "
56189 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
56190 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
56191 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
56192 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
56193 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
56194 +#define GR_TIME_MSG "time set by "
56195 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
56196 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
56197 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
56198 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
56199 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
56200 +#define GR_BIND_MSG "denied bind() by "
56201 +#define GR_CONNECT_MSG "denied connect() by "
56202 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
56203 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
56204 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
56205 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
56206 +#define GR_CAP_ACL_MSG "use of %s denied for "
56207 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
56208 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
56209 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
56210 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
56211 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
56212 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
56213 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
56214 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
56215 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
56216 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
56217 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
56218 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
56219 +#define GR_VM86_MSG "denied use of vm86 by "
56220 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
56221 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
56222 diff -urNp linux-2.6.32.42/include/linux/grsecurity.h linux-2.6.32.42/include/linux/grsecurity.h
56223 --- linux-2.6.32.42/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
56224 +++ linux-2.6.32.42/include/linux/grsecurity.h 2011-04-17 15:56:46.000000000 -0400
56225 @@ -0,0 +1,212 @@
56226 +#ifndef GR_SECURITY_H
56227 +#define GR_SECURITY_H
56228 +#include <linux/fs.h>
56229 +#include <linux/fs_struct.h>
56230 +#include <linux/binfmts.h>
56231 +#include <linux/gracl.h>
56232 +#include <linux/compat.h>
56233 +
56234 +/* notify of brain-dead configs */
56235 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
56236 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
56237 +#endif
56238 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
56239 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
56240 +#endif
56241 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
56242 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
56243 +#endif
56244 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
56245 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
56246 +#endif
56247 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
56248 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
56249 +#endif
56250 +
56251 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
56252 +void gr_handle_brute_check(void);
56253 +void gr_handle_kernel_exploit(void);
56254 +int gr_process_user_ban(void);
56255 +
56256 +char gr_roletype_to_char(void);
56257 +
56258 +int gr_acl_enable_at_secure(void);
56259 +
56260 +int gr_check_user_change(int real, int effective, int fs);
56261 +int gr_check_group_change(int real, int effective, int fs);
56262 +
56263 +void gr_del_task_from_ip_table(struct task_struct *p);
56264 +
56265 +int gr_pid_is_chrooted(struct task_struct *p);
56266 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
56267 +int gr_handle_chroot_nice(void);
56268 +int gr_handle_chroot_sysctl(const int op);
56269 +int gr_handle_chroot_setpriority(struct task_struct *p,
56270 + const int niceval);
56271 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
56272 +int gr_handle_chroot_chroot(const struct dentry *dentry,
56273 + const struct vfsmount *mnt);
56274 +int gr_handle_chroot_caps(struct path *path);
56275 +void gr_handle_chroot_chdir(struct path *path);
56276 +int gr_handle_chroot_chmod(const struct dentry *dentry,
56277 + const struct vfsmount *mnt, const int mode);
56278 +int gr_handle_chroot_mknod(const struct dentry *dentry,
56279 + const struct vfsmount *mnt, const int mode);
56280 +int gr_handle_chroot_mount(const struct dentry *dentry,
56281 + const struct vfsmount *mnt,
56282 + const char *dev_name);
56283 +int gr_handle_chroot_pivot(void);
56284 +int gr_handle_chroot_unix(const pid_t pid);
56285 +
56286 +int gr_handle_rawio(const struct inode *inode);
56287 +int gr_handle_nproc(void);
56288 +
56289 +void gr_handle_ioperm(void);
56290 +void gr_handle_iopl(void);
56291 +
56292 +int gr_tpe_allow(const struct file *file);
56293 +
56294 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
56295 +void gr_clear_chroot_entries(struct task_struct *task);
56296 +
56297 +void gr_log_forkfail(const int retval);
56298 +void gr_log_timechange(void);
56299 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
56300 +void gr_log_chdir(const struct dentry *dentry,
56301 + const struct vfsmount *mnt);
56302 +void gr_log_chroot_exec(const struct dentry *dentry,
56303 + const struct vfsmount *mnt);
56304 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
56305 +#ifdef CONFIG_COMPAT
56306 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
56307 +#endif
56308 +void gr_log_remount(const char *devname, const int retval);
56309 +void gr_log_unmount(const char *devname, const int retval);
56310 +void gr_log_mount(const char *from, const char *to, const int retval);
56311 +void gr_log_textrel(struct vm_area_struct *vma);
56312 +void gr_log_rwxmmap(struct file *file);
56313 +void gr_log_rwxmprotect(struct file *file);
56314 +
56315 +int gr_handle_follow_link(const struct inode *parent,
56316 + const struct inode *inode,
56317 + const struct dentry *dentry,
56318 + const struct vfsmount *mnt);
56319 +int gr_handle_fifo(const struct dentry *dentry,
56320 + const struct vfsmount *mnt,
56321 + const struct dentry *dir, const int flag,
56322 + const int acc_mode);
56323 +int gr_handle_hardlink(const struct dentry *dentry,
56324 + const struct vfsmount *mnt,
56325 + struct inode *inode,
56326 + const int mode, const char *to);
56327 +
56328 +int gr_is_capable(const int cap);
56329 +int gr_is_capable_nolog(const int cap);
56330 +void gr_learn_resource(const struct task_struct *task, const int limit,
56331 + const unsigned long wanted, const int gt);
56332 +void gr_copy_label(struct task_struct *tsk);
56333 +void gr_handle_crash(struct task_struct *task, const int sig);
56334 +int gr_handle_signal(const struct task_struct *p, const int sig);
56335 +int gr_check_crash_uid(const uid_t uid);
56336 +int gr_check_protected_task(const struct task_struct *task);
56337 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
56338 +int gr_acl_handle_mmap(const struct file *file,
56339 + const unsigned long prot);
56340 +int gr_acl_handle_mprotect(const struct file *file,
56341 + const unsigned long prot);
56342 +int gr_check_hidden_task(const struct task_struct *tsk);
56343 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
56344 + const struct vfsmount *mnt);
56345 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
56346 + const struct vfsmount *mnt);
56347 +__u32 gr_acl_handle_access(const struct dentry *dentry,
56348 + const struct vfsmount *mnt, const int fmode);
56349 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
56350 + const struct vfsmount *mnt, mode_t mode);
56351 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
56352 + const struct vfsmount *mnt, mode_t mode);
56353 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
56354 + const struct vfsmount *mnt);
56355 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
56356 + const struct vfsmount *mnt);
56357 +int gr_handle_ptrace(struct task_struct *task, const long request);
56358 +int gr_handle_proc_ptrace(struct task_struct *task);
56359 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
56360 + const struct vfsmount *mnt);
56361 +int gr_check_crash_exec(const struct file *filp);
56362 +int gr_acl_is_enabled(void);
56363 +void gr_set_kernel_label(struct task_struct *task);
56364 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
56365 + const gid_t gid);
56366 +int gr_set_proc_label(const struct dentry *dentry,
56367 + const struct vfsmount *mnt,
56368 + const int unsafe_share);
56369 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
56370 + const struct vfsmount *mnt);
56371 +__u32 gr_acl_handle_open(const struct dentry *dentry,
56372 + const struct vfsmount *mnt, const int fmode);
56373 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
56374 + const struct dentry *p_dentry,
56375 + const struct vfsmount *p_mnt, const int fmode,
56376 + const int imode);
56377 +void gr_handle_create(const struct dentry *dentry,
56378 + const struct vfsmount *mnt);
56379 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
56380 + const struct dentry *parent_dentry,
56381 + const struct vfsmount *parent_mnt,
56382 + const int mode);
56383 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
56384 + const struct dentry *parent_dentry,
56385 + const struct vfsmount *parent_mnt);
56386 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
56387 + const struct vfsmount *mnt);
56388 +void gr_handle_delete(const ino_t ino, const dev_t dev);
56389 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
56390 + const struct vfsmount *mnt);
56391 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
56392 + const struct dentry *parent_dentry,
56393 + const struct vfsmount *parent_mnt,
56394 + const char *from);
56395 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
56396 + const struct dentry *parent_dentry,
56397 + const struct vfsmount *parent_mnt,
56398 + const struct dentry *old_dentry,
56399 + const struct vfsmount *old_mnt, const char *to);
56400 +int gr_acl_handle_rename(struct dentry *new_dentry,
56401 + struct dentry *parent_dentry,
56402 + const struct vfsmount *parent_mnt,
56403 + struct dentry *old_dentry,
56404 + struct inode *old_parent_inode,
56405 + struct vfsmount *old_mnt, const char *newname);
56406 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
56407 + struct dentry *old_dentry,
56408 + struct dentry *new_dentry,
56409 + struct vfsmount *mnt, const __u8 replace);
56410 +__u32 gr_check_link(const struct dentry *new_dentry,
56411 + const struct dentry *parent_dentry,
56412 + const struct vfsmount *parent_mnt,
56413 + const struct dentry *old_dentry,
56414 + const struct vfsmount *old_mnt);
56415 +int gr_acl_handle_filldir(const struct file *file, const char *name,
56416 + const unsigned int namelen, const ino_t ino);
56417 +
56418 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
56419 + const struct vfsmount *mnt);
56420 +void gr_acl_handle_exit(void);
56421 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
56422 +int gr_acl_handle_procpidmem(const struct task_struct *task);
56423 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
56424 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
56425 +void gr_audit_ptrace(struct task_struct *task);
56426 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
56427 +
56428 +#ifdef CONFIG_GRKERNSEC
56429 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
56430 +void gr_handle_vm86(void);
56431 +void gr_handle_mem_readwrite(u64 from, u64 to);
56432 +
56433 +extern int grsec_enable_dmesg;
56434 +extern int grsec_disable_privio;
56435 +#endif
56436 +
56437 +#endif
56438 diff -urNp linux-2.6.32.42/include/linux/hdpu_features.h linux-2.6.32.42/include/linux/hdpu_features.h
56439 --- linux-2.6.32.42/include/linux/hdpu_features.h 2011-03-27 14:31:47.000000000 -0400
56440 +++ linux-2.6.32.42/include/linux/hdpu_features.h 2011-04-17 15:56:46.000000000 -0400
56441 @@ -3,7 +3,7 @@
56442 struct cpustate_t {
56443 spinlock_t lock;
56444 int excl;
56445 - int open_count;
56446 + atomic_t open_count;
56447 unsigned char cached_val;
56448 int inited;
56449 unsigned long *set_addr;
56450 diff -urNp linux-2.6.32.42/include/linux/highmem.h linux-2.6.32.42/include/linux/highmem.h
56451 --- linux-2.6.32.42/include/linux/highmem.h 2011-03-27 14:31:47.000000000 -0400
56452 +++ linux-2.6.32.42/include/linux/highmem.h 2011-04-17 15:56:46.000000000 -0400
56453 @@ -137,6 +137,18 @@ static inline void clear_highpage(struct
56454 kunmap_atomic(kaddr, KM_USER0);
56455 }
56456
56457 +static inline void sanitize_highpage(struct page *page)
56458 +{
56459 + void *kaddr;
56460 + unsigned long flags;
56461 +
56462 + local_irq_save(flags);
56463 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
56464 + clear_page(kaddr);
56465 + kunmap_atomic(kaddr, KM_CLEARPAGE);
56466 + local_irq_restore(flags);
56467 +}
56468 +
56469 static inline void zero_user_segments(struct page *page,
56470 unsigned start1, unsigned end1,
56471 unsigned start2, unsigned end2)
56472 diff -urNp linux-2.6.32.42/include/linux/i2o.h linux-2.6.32.42/include/linux/i2o.h
56473 --- linux-2.6.32.42/include/linux/i2o.h 2011-03-27 14:31:47.000000000 -0400
56474 +++ linux-2.6.32.42/include/linux/i2o.h 2011-05-04 17:56:28.000000000 -0400
56475 @@ -564,7 +564,7 @@ struct i2o_controller {
56476 struct i2o_device *exec; /* Executive */
56477 #if BITS_PER_LONG == 64
56478 spinlock_t context_list_lock; /* lock for context_list */
56479 - atomic_t context_list_counter; /* needed for unique contexts */
56480 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
56481 struct list_head context_list; /* list of context id's
56482 and pointers */
56483 #endif
56484 diff -urNp linux-2.6.32.42/include/linux/init_task.h linux-2.6.32.42/include/linux/init_task.h
56485 --- linux-2.6.32.42/include/linux/init_task.h 2011-03-27 14:31:47.000000000 -0400
56486 +++ linux-2.6.32.42/include/linux/init_task.h 2011-05-18 20:44:59.000000000 -0400
56487 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
56488 #define INIT_IDS
56489 #endif
56490
56491 +#ifdef CONFIG_X86
56492 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
56493 +#else
56494 +#define INIT_TASK_THREAD_INFO
56495 +#endif
56496 +
56497 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
56498 /*
56499 * Because of the reduced scope of CAP_SETPCAP when filesystem
56500 @@ -156,6 +162,7 @@ extern struct cred init_cred;
56501 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
56502 .comm = "swapper", \
56503 .thread = INIT_THREAD, \
56504 + INIT_TASK_THREAD_INFO \
56505 .fs = &init_fs, \
56506 .files = &init_files, \
56507 .signal = &init_signals, \
56508 diff -urNp linux-2.6.32.42/include/linux/interrupt.h linux-2.6.32.42/include/linux/interrupt.h
56509 --- linux-2.6.32.42/include/linux/interrupt.h 2011-06-25 12:55:35.000000000 -0400
56510 +++ linux-2.6.32.42/include/linux/interrupt.h 2011-06-25 12:56:37.000000000 -0400
56511 @@ -363,7 +363,7 @@ enum
56512 /* map softirq index to softirq name. update 'softirq_to_name' in
56513 * kernel/softirq.c when adding a new softirq.
56514 */
56515 -extern char *softirq_to_name[NR_SOFTIRQS];
56516 +extern const char * const softirq_to_name[NR_SOFTIRQS];
56517
56518 /* softirq mask and active fields moved to irq_cpustat_t in
56519 * asm/hardirq.h to get better cache usage. KAO
56520 @@ -371,12 +371,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
56521
56522 struct softirq_action
56523 {
56524 - void (*action)(struct softirq_action *);
56525 + void (*action)(void);
56526 };
56527
56528 asmlinkage void do_softirq(void);
56529 asmlinkage void __do_softirq(void);
56530 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
56531 +extern void open_softirq(int nr, void (*action)(void));
56532 extern void softirq_init(void);
56533 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
56534 extern void raise_softirq_irqoff(unsigned int nr);
56535 diff -urNp linux-2.6.32.42/include/linux/irq.h linux-2.6.32.42/include/linux/irq.h
56536 --- linux-2.6.32.42/include/linux/irq.h 2011-03-27 14:31:47.000000000 -0400
56537 +++ linux-2.6.32.42/include/linux/irq.h 2011-04-17 15:56:46.000000000 -0400
56538 @@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq,
56539 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
56540 bool boot)
56541 {
56542 +#ifdef CONFIG_CPUMASK_OFFSTACK
56543 gfp_t gfp = GFP_ATOMIC;
56544
56545 if (boot)
56546 gfp = GFP_NOWAIT;
56547
56548 -#ifdef CONFIG_CPUMASK_OFFSTACK
56549 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
56550 return false;
56551
56552 diff -urNp linux-2.6.32.42/include/linux/kallsyms.h linux-2.6.32.42/include/linux/kallsyms.h
56553 --- linux-2.6.32.42/include/linux/kallsyms.h 2011-03-27 14:31:47.000000000 -0400
56554 +++ linux-2.6.32.42/include/linux/kallsyms.h 2011-04-17 15:56:46.000000000 -0400
56555 @@ -15,7 +15,8 @@
56556
56557 struct module;
56558
56559 -#ifdef CONFIG_KALLSYMS
56560 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
56561 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
56562 /* Lookup the address for a symbol. Returns 0 if not found. */
56563 unsigned long kallsyms_lookup_name(const char *name);
56564
56565 @@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(un
56566 /* Stupid that this does nothing, but I didn't create this mess. */
56567 #define __print_symbol(fmt, addr)
56568 #endif /*CONFIG_KALLSYMS*/
56569 +#else /* when included by kallsyms.c, vsnprintf.c, or
56570 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
56571 +extern void __print_symbol(const char *fmt, unsigned long address);
56572 +extern int sprint_symbol(char *buffer, unsigned long address);
56573 +const char *kallsyms_lookup(unsigned long addr,
56574 + unsigned long *symbolsize,
56575 + unsigned long *offset,
56576 + char **modname, char *namebuf);
56577 +#endif
56578
56579 /* This macro allows us to keep printk typechecking */
56580 static void __check_printsym_format(const char *fmt, ...)
56581 diff -urNp linux-2.6.32.42/include/linux/kgdb.h linux-2.6.32.42/include/linux/kgdb.h
56582 --- linux-2.6.32.42/include/linux/kgdb.h 2011-03-27 14:31:47.000000000 -0400
56583 +++ linux-2.6.32.42/include/linux/kgdb.h 2011-05-04 17:56:20.000000000 -0400
56584 @@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
56585
56586 extern int kgdb_connected;
56587
56588 -extern atomic_t kgdb_setting_breakpoint;
56589 -extern atomic_t kgdb_cpu_doing_single_step;
56590 +extern atomic_unchecked_t kgdb_setting_breakpoint;
56591 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
56592
56593 extern struct task_struct *kgdb_usethread;
56594 extern struct task_struct *kgdb_contthread;
56595 @@ -251,20 +251,20 @@ struct kgdb_arch {
56596 */
56597 struct kgdb_io {
56598 const char *name;
56599 - int (*read_char) (void);
56600 - void (*write_char) (u8);
56601 - void (*flush) (void);
56602 - int (*init) (void);
56603 - void (*pre_exception) (void);
56604 - void (*post_exception) (void);
56605 + int (* const read_char) (void);
56606 + void (* const write_char) (u8);
56607 + void (* const flush) (void);
56608 + int (* const init) (void);
56609 + void (* const pre_exception) (void);
56610 + void (* const post_exception) (void);
56611 };
56612
56613 -extern struct kgdb_arch arch_kgdb_ops;
56614 +extern const struct kgdb_arch arch_kgdb_ops;
56615
56616 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
56617
56618 -extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
56619 -extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
56620 +extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
56621 +extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
56622
56623 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
56624 extern int kgdb_mem2hex(char *mem, char *buf, int count);
56625 diff -urNp linux-2.6.32.42/include/linux/kmod.h linux-2.6.32.42/include/linux/kmod.h
56626 --- linux-2.6.32.42/include/linux/kmod.h 2011-03-27 14:31:47.000000000 -0400
56627 +++ linux-2.6.32.42/include/linux/kmod.h 2011-04-17 15:56:46.000000000 -0400
56628 @@ -31,6 +31,8 @@
56629 * usually useless though. */
56630 extern int __request_module(bool wait, const char *name, ...) \
56631 __attribute__((format(printf, 2, 3)));
56632 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
56633 + __attribute__((format(printf, 3, 4)));
56634 #define request_module(mod...) __request_module(true, mod)
56635 #define request_module_nowait(mod...) __request_module(false, mod)
56636 #define try_then_request_module(x, mod...) \
56637 diff -urNp linux-2.6.32.42/include/linux/kobject.h linux-2.6.32.42/include/linux/kobject.h
56638 --- linux-2.6.32.42/include/linux/kobject.h 2011-03-27 14:31:47.000000000 -0400
56639 +++ linux-2.6.32.42/include/linux/kobject.h 2011-04-17 15:56:46.000000000 -0400
56640 @@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kob
56641
56642 struct kobj_type {
56643 void (*release)(struct kobject *kobj);
56644 - struct sysfs_ops *sysfs_ops;
56645 + const struct sysfs_ops *sysfs_ops;
56646 struct attribute **default_attrs;
56647 };
56648
56649 @@ -118,9 +118,9 @@ struct kobj_uevent_env {
56650 };
56651
56652 struct kset_uevent_ops {
56653 - int (*filter)(struct kset *kset, struct kobject *kobj);
56654 - const char *(*name)(struct kset *kset, struct kobject *kobj);
56655 - int (*uevent)(struct kset *kset, struct kobject *kobj,
56656 + int (* const filter)(struct kset *kset, struct kobject *kobj);
56657 + const char *(* const name)(struct kset *kset, struct kobject *kobj);
56658 + int (* const uevent)(struct kset *kset, struct kobject *kobj,
56659 struct kobj_uevent_env *env);
56660 };
56661
56662 @@ -132,7 +132,7 @@ struct kobj_attribute {
56663 const char *buf, size_t count);
56664 };
56665
56666 -extern struct sysfs_ops kobj_sysfs_ops;
56667 +extern const struct sysfs_ops kobj_sysfs_ops;
56668
56669 /**
56670 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
56671 @@ -155,14 +155,14 @@ struct kset {
56672 struct list_head list;
56673 spinlock_t list_lock;
56674 struct kobject kobj;
56675 - struct kset_uevent_ops *uevent_ops;
56676 + const struct kset_uevent_ops *uevent_ops;
56677 };
56678
56679 extern void kset_init(struct kset *kset);
56680 extern int __must_check kset_register(struct kset *kset);
56681 extern void kset_unregister(struct kset *kset);
56682 extern struct kset * __must_check kset_create_and_add(const char *name,
56683 - struct kset_uevent_ops *u,
56684 + const struct kset_uevent_ops *u,
56685 struct kobject *parent_kobj);
56686
56687 static inline struct kset *to_kset(struct kobject *kobj)
56688 diff -urNp linux-2.6.32.42/include/linux/kvm_host.h linux-2.6.32.42/include/linux/kvm_host.h
56689 --- linux-2.6.32.42/include/linux/kvm_host.h 2011-03-27 14:31:47.000000000 -0400
56690 +++ linux-2.6.32.42/include/linux/kvm_host.h 2011-04-17 15:56:46.000000000 -0400
56691 @@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
56692 void vcpu_load(struct kvm_vcpu *vcpu);
56693 void vcpu_put(struct kvm_vcpu *vcpu);
56694
56695 -int kvm_init(void *opaque, unsigned int vcpu_size,
56696 +int kvm_init(const void *opaque, unsigned int vcpu_size,
56697 struct module *module);
56698 void kvm_exit(void);
56699
56700 @@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
56701 struct kvm_guest_debug *dbg);
56702 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
56703
56704 -int kvm_arch_init(void *opaque);
56705 +int kvm_arch_init(const void *opaque);
56706 void kvm_arch_exit(void);
56707
56708 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
56709 diff -urNp linux-2.6.32.42/include/linux/libata.h linux-2.6.32.42/include/linux/libata.h
56710 --- linux-2.6.32.42/include/linux/libata.h 2011-03-27 14:31:47.000000000 -0400
56711 +++ linux-2.6.32.42/include/linux/libata.h 2011-04-23 12:56:11.000000000 -0400
56712 @@ -525,11 +525,11 @@ struct ata_ioports {
56713
56714 struct ata_host {
56715 spinlock_t lock;
56716 - struct device *dev;
56717 + struct device *dev;
56718 void __iomem * const *iomap;
56719 unsigned int n_ports;
56720 void *private_data;
56721 - struct ata_port_operations *ops;
56722 + const struct ata_port_operations *ops;
56723 unsigned long flags;
56724 #ifdef CONFIG_ATA_ACPI
56725 acpi_handle acpi_handle;
56726 @@ -710,7 +710,7 @@ struct ata_link {
56727
56728 struct ata_port {
56729 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
56730 - struct ata_port_operations *ops;
56731 + const struct ata_port_operations *ops;
56732 spinlock_t *lock;
56733 /* Flags owned by the EH context. Only EH should touch these once the
56734 port is active */
56735 @@ -892,7 +892,7 @@ struct ata_port_info {
56736 unsigned long pio_mask;
56737 unsigned long mwdma_mask;
56738 unsigned long udma_mask;
56739 - struct ata_port_operations *port_ops;
56740 + const struct ata_port_operations *port_ops;
56741 void *private_data;
56742 };
56743
56744 @@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timi
56745 extern const unsigned long sata_deb_timing_hotplug[];
56746 extern const unsigned long sata_deb_timing_long[];
56747
56748 -extern struct ata_port_operations ata_dummy_port_ops;
56749 +extern const struct ata_port_operations ata_dummy_port_ops;
56750 extern const struct ata_port_info ata_dummy_port_info;
56751
56752 static inline const unsigned long *
56753 @@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_
56754 struct scsi_host_template *sht);
56755 extern void ata_host_detach(struct ata_host *host);
56756 extern void ata_host_init(struct ata_host *, struct device *,
56757 - unsigned long, struct ata_port_operations *);
56758 + unsigned long, const struct ata_port_operations *);
56759 extern int ata_scsi_detect(struct scsi_host_template *sht);
56760 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
56761 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
56762 diff -urNp linux-2.6.32.42/include/linux/lockd/bind.h linux-2.6.32.42/include/linux/lockd/bind.h
56763 --- linux-2.6.32.42/include/linux/lockd/bind.h 2011-03-27 14:31:47.000000000 -0400
56764 +++ linux-2.6.32.42/include/linux/lockd/bind.h 2011-04-17 15:56:46.000000000 -0400
56765 @@ -23,13 +23,13 @@ struct svc_rqst;
56766 * This is the set of functions for lockd->nfsd communication
56767 */
56768 struct nlmsvc_binding {
56769 - __be32 (*fopen)(struct svc_rqst *,
56770 + __be32 (* const fopen)(struct svc_rqst *,
56771 struct nfs_fh *,
56772 struct file **);
56773 - void (*fclose)(struct file *);
56774 + void (* const fclose)(struct file *);
56775 };
56776
56777 -extern struct nlmsvc_binding * nlmsvc_ops;
56778 +extern const struct nlmsvc_binding * nlmsvc_ops;
56779
56780 /*
56781 * Similar to nfs_client_initdata, but without the NFS-specific
56782 diff -urNp linux-2.6.32.42/include/linux/mm.h linux-2.6.32.42/include/linux/mm.h
56783 --- linux-2.6.32.42/include/linux/mm.h 2011-03-27 14:31:47.000000000 -0400
56784 +++ linux-2.6.32.42/include/linux/mm.h 2011-04-17 15:56:46.000000000 -0400
56785 @@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void
56786
56787 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
56788 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
56789 +
56790 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
56791 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
56792 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
56793 +#else
56794 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
56795 +#endif
56796 +
56797 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
56798 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
56799
56800 @@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
56801 int set_page_dirty_lock(struct page *page);
56802 int clear_page_dirty_for_io(struct page *page);
56803
56804 -/* Is the vma a continuation of the stack vma above it? */
56805 -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
56806 -{
56807 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
56808 -}
56809 -
56810 extern unsigned long move_page_tables(struct vm_area_struct *vma,
56811 unsigned long old_addr, struct vm_area_struct *new_vma,
56812 unsigned long new_addr, unsigned long len);
56813 @@ -890,6 +891,8 @@ struct shrinker {
56814 extern void register_shrinker(struct shrinker *);
56815 extern void unregister_shrinker(struct shrinker *);
56816
56817 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
56818 +
56819 int vma_wants_writenotify(struct vm_area_struct *vma);
56820
56821 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
56822 @@ -1162,6 +1165,7 @@ out:
56823 }
56824
56825 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
56826 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
56827
56828 extern unsigned long do_brk(unsigned long, unsigned long);
56829
56830 @@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(
56831 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
56832 struct vm_area_struct **pprev);
56833
56834 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
56835 +extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
56836 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
56837 +
56838 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
56839 NULL if none. Assume start_addr < end_addr. */
56840 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
56841 @@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(st
56842 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
56843 }
56844
56845 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
56846 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
56847 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
56848 unsigned long pfn, unsigned long size, pgprot_t);
56849 @@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long
56850 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
56851 extern int sysctl_memory_failure_early_kill;
56852 extern int sysctl_memory_failure_recovery;
56853 -extern atomic_long_t mce_bad_pages;
56854 +extern atomic_long_unchecked_t mce_bad_pages;
56855 +
56856 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
56857 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
56858 +#else
56859 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
56860 +#endif
56861
56862 #endif /* __KERNEL__ */
56863 #endif /* _LINUX_MM_H */
56864 diff -urNp linux-2.6.32.42/include/linux/mm_types.h linux-2.6.32.42/include/linux/mm_types.h
56865 --- linux-2.6.32.42/include/linux/mm_types.h 2011-03-27 14:31:47.000000000 -0400
56866 +++ linux-2.6.32.42/include/linux/mm_types.h 2011-04-17 15:56:46.000000000 -0400
56867 @@ -186,6 +186,8 @@ struct vm_area_struct {
56868 #ifdef CONFIG_NUMA
56869 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
56870 #endif
56871 +
56872 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
56873 };
56874
56875 struct core_thread {
56876 @@ -287,6 +289,24 @@ struct mm_struct {
56877 #ifdef CONFIG_MMU_NOTIFIER
56878 struct mmu_notifier_mm *mmu_notifier_mm;
56879 #endif
56880 +
56881 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56882 + unsigned long pax_flags;
56883 +#endif
56884 +
56885 +#ifdef CONFIG_PAX_DLRESOLVE
56886 + unsigned long call_dl_resolve;
56887 +#endif
56888 +
56889 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
56890 + unsigned long call_syscall;
56891 +#endif
56892 +
56893 +#ifdef CONFIG_PAX_ASLR
56894 + unsigned long delta_mmap; /* randomized offset */
56895 + unsigned long delta_stack; /* randomized offset */
56896 +#endif
56897 +
56898 };
56899
56900 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
56901 diff -urNp linux-2.6.32.42/include/linux/mmu_notifier.h linux-2.6.32.42/include/linux/mmu_notifier.h
56902 --- linux-2.6.32.42/include/linux/mmu_notifier.h 2011-03-27 14:31:47.000000000 -0400
56903 +++ linux-2.6.32.42/include/linux/mmu_notifier.h 2011-04-17 15:56:46.000000000 -0400
56904 @@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
56905 */
56906 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
56907 ({ \
56908 - pte_t __pte; \
56909 + pte_t ___pte; \
56910 struct vm_area_struct *___vma = __vma; \
56911 unsigned long ___address = __address; \
56912 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
56913 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
56914 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
56915 - __pte; \
56916 + ___pte; \
56917 })
56918
56919 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
56920 diff -urNp linux-2.6.32.42/include/linux/mmzone.h linux-2.6.32.42/include/linux/mmzone.h
56921 --- linux-2.6.32.42/include/linux/mmzone.h 2011-03-27 14:31:47.000000000 -0400
56922 +++ linux-2.6.32.42/include/linux/mmzone.h 2011-04-17 15:56:46.000000000 -0400
56923 @@ -350,7 +350,7 @@ struct zone {
56924 unsigned long flags; /* zone flags, see below */
56925
56926 /* Zone statistics */
56927 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56928 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56929
56930 /*
56931 * prev_priority holds the scanning priority for this zone. It is
56932 diff -urNp linux-2.6.32.42/include/linux/mod_devicetable.h linux-2.6.32.42/include/linux/mod_devicetable.h
56933 --- linux-2.6.32.42/include/linux/mod_devicetable.h 2011-03-27 14:31:47.000000000 -0400
56934 +++ linux-2.6.32.42/include/linux/mod_devicetable.h 2011-04-17 15:56:46.000000000 -0400
56935 @@ -12,7 +12,7 @@
56936 typedef unsigned long kernel_ulong_t;
56937 #endif
56938
56939 -#define PCI_ANY_ID (~0)
56940 +#define PCI_ANY_ID ((__u16)~0)
56941
56942 struct pci_device_id {
56943 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
56944 @@ -131,7 +131,7 @@ struct usb_device_id {
56945 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
56946 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
56947
56948 -#define HID_ANY_ID (~0)
56949 +#define HID_ANY_ID (~0U)
56950
56951 struct hid_device_id {
56952 __u16 bus;
56953 diff -urNp linux-2.6.32.42/include/linux/module.h linux-2.6.32.42/include/linux/module.h
56954 --- linux-2.6.32.42/include/linux/module.h 2011-03-27 14:31:47.000000000 -0400
56955 +++ linux-2.6.32.42/include/linux/module.h 2011-04-17 15:56:46.000000000 -0400
56956 @@ -287,16 +287,16 @@ struct module
56957 int (*init)(void);
56958
56959 /* If this is non-NULL, vfree after init() returns */
56960 - void *module_init;
56961 + void *module_init_rx, *module_init_rw;
56962
56963 /* Here is the actual code + data, vfree'd on unload. */
56964 - void *module_core;
56965 + void *module_core_rx, *module_core_rw;
56966
56967 /* Here are the sizes of the init and core sections */
56968 - unsigned int init_size, core_size;
56969 + unsigned int init_size_rw, core_size_rw;
56970
56971 /* The size of the executable code in each section. */
56972 - unsigned int init_text_size, core_text_size;
56973 + unsigned int init_size_rx, core_size_rx;
56974
56975 /* Arch-specific module values */
56976 struct mod_arch_specific arch;
56977 @@ -393,16 +393,46 @@ struct module *__module_address(unsigned
56978 bool is_module_address(unsigned long addr);
56979 bool is_module_text_address(unsigned long addr);
56980
56981 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
56982 +{
56983 +
56984 +#ifdef CONFIG_PAX_KERNEXEC
56985 + if (ktla_ktva(addr) >= (unsigned long)start &&
56986 + ktla_ktva(addr) < (unsigned long)start + size)
56987 + return 1;
56988 +#endif
56989 +
56990 + return ((void *)addr >= start && (void *)addr < start + size);
56991 +}
56992 +
56993 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
56994 +{
56995 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
56996 +}
56997 +
56998 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
56999 +{
57000 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
57001 +}
57002 +
57003 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
57004 +{
57005 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
57006 +}
57007 +
57008 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
57009 +{
57010 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
57011 +}
57012 +
57013 static inline int within_module_core(unsigned long addr, struct module *mod)
57014 {
57015 - return (unsigned long)mod->module_core <= addr &&
57016 - addr < (unsigned long)mod->module_core + mod->core_size;
57017 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
57018 }
57019
57020 static inline int within_module_init(unsigned long addr, struct module *mod)
57021 {
57022 - return (unsigned long)mod->module_init <= addr &&
57023 - addr < (unsigned long)mod->module_init + mod->init_size;
57024 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
57025 }
57026
57027 /* Search for module by name: must hold module_mutex. */
57028 diff -urNp linux-2.6.32.42/include/linux/moduleloader.h linux-2.6.32.42/include/linux/moduleloader.h
57029 --- linux-2.6.32.42/include/linux/moduleloader.h 2011-03-27 14:31:47.000000000 -0400
57030 +++ linux-2.6.32.42/include/linux/moduleloader.h 2011-04-17 15:56:46.000000000 -0400
57031 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
57032 sections. Returns NULL on failure. */
57033 void *module_alloc(unsigned long size);
57034
57035 +#ifdef CONFIG_PAX_KERNEXEC
57036 +void *module_alloc_exec(unsigned long size);
57037 +#else
57038 +#define module_alloc_exec(x) module_alloc(x)
57039 +#endif
57040 +
57041 /* Free memory returned from module_alloc. */
57042 void module_free(struct module *mod, void *module_region);
57043
57044 +#ifdef CONFIG_PAX_KERNEXEC
57045 +void module_free_exec(struct module *mod, void *module_region);
57046 +#else
57047 +#define module_free_exec(x, y) module_free((x), (y))
57048 +#endif
57049 +
57050 /* Apply the given relocation to the (simplified) ELF. Return -error
57051 or 0. */
57052 int apply_relocate(Elf_Shdr *sechdrs,
57053 diff -urNp linux-2.6.32.42/include/linux/moduleparam.h linux-2.6.32.42/include/linux/moduleparam.h
57054 --- linux-2.6.32.42/include/linux/moduleparam.h 2011-03-27 14:31:47.000000000 -0400
57055 +++ linux-2.6.32.42/include/linux/moduleparam.h 2011-04-17 15:56:46.000000000 -0400
57056 @@ -132,7 +132,7 @@ struct kparam_array
57057
57058 /* Actually copy string: maxlen param is usually sizeof(string). */
57059 #define module_param_string(name, string, len, perm) \
57060 - static const struct kparam_string __param_string_##name \
57061 + static const struct kparam_string __param_string_##name __used \
57062 = { len, string }; \
57063 __module_param_call(MODULE_PARAM_PREFIX, name, \
57064 param_set_copystring, param_get_string, \
57065 @@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffe
57066
57067 /* Comma-separated array: *nump is set to number they actually specified. */
57068 #define module_param_array_named(name, array, type, nump, perm) \
57069 - static const struct kparam_array __param_arr_##name \
57070 + static const struct kparam_array __param_arr_##name __used \
57071 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
57072 sizeof(array[0]), array }; \
57073 __module_param_call(MODULE_PARAM_PREFIX, name, \
57074 diff -urNp linux-2.6.32.42/include/linux/mutex.h linux-2.6.32.42/include/linux/mutex.h
57075 --- linux-2.6.32.42/include/linux/mutex.h 2011-03-27 14:31:47.000000000 -0400
57076 +++ linux-2.6.32.42/include/linux/mutex.h 2011-04-17 15:56:46.000000000 -0400
57077 @@ -51,7 +51,7 @@ struct mutex {
57078 spinlock_t wait_lock;
57079 struct list_head wait_list;
57080 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
57081 - struct thread_info *owner;
57082 + struct task_struct *owner;
57083 #endif
57084 #ifdef CONFIG_DEBUG_MUTEXES
57085 const char *name;
57086 diff -urNp linux-2.6.32.42/include/linux/namei.h linux-2.6.32.42/include/linux/namei.h
57087 --- linux-2.6.32.42/include/linux/namei.h 2011-03-27 14:31:47.000000000 -0400
57088 +++ linux-2.6.32.42/include/linux/namei.h 2011-04-17 15:56:46.000000000 -0400
57089 @@ -22,7 +22,7 @@ struct nameidata {
57090 unsigned int flags;
57091 int last_type;
57092 unsigned depth;
57093 - char *saved_names[MAX_NESTED_LINKS + 1];
57094 + const char *saved_names[MAX_NESTED_LINKS + 1];
57095
57096 /* Intent data */
57097 union {
57098 @@ -84,12 +84,12 @@ extern int follow_up(struct path *);
57099 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
57100 extern void unlock_rename(struct dentry *, struct dentry *);
57101
57102 -static inline void nd_set_link(struct nameidata *nd, char *path)
57103 +static inline void nd_set_link(struct nameidata *nd, const char *path)
57104 {
57105 nd->saved_names[nd->depth] = path;
57106 }
57107
57108 -static inline char *nd_get_link(struct nameidata *nd)
57109 +static inline const char *nd_get_link(const struct nameidata *nd)
57110 {
57111 return nd->saved_names[nd->depth];
57112 }
57113 diff -urNp linux-2.6.32.42/include/linux/netfilter/xt_gradm.h linux-2.6.32.42/include/linux/netfilter/xt_gradm.h
57114 --- linux-2.6.32.42/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
57115 +++ linux-2.6.32.42/include/linux/netfilter/xt_gradm.h 2011-04-17 15:56:46.000000000 -0400
57116 @@ -0,0 +1,9 @@
57117 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
57118 +#define _LINUX_NETFILTER_XT_GRADM_H 1
57119 +
57120 +struct xt_gradm_mtinfo {
57121 + __u16 flags;
57122 + __u16 invflags;
57123 +};
57124 +
57125 +#endif
57126 diff -urNp linux-2.6.32.42/include/linux/nodemask.h linux-2.6.32.42/include/linux/nodemask.h
57127 --- linux-2.6.32.42/include/linux/nodemask.h 2011-03-27 14:31:47.000000000 -0400
57128 +++ linux-2.6.32.42/include/linux/nodemask.h 2011-04-17 15:56:46.000000000 -0400
57129 @@ -464,11 +464,11 @@ static inline int num_node_state(enum no
57130
57131 #define any_online_node(mask) \
57132 ({ \
57133 - int node; \
57134 - for_each_node_mask(node, (mask)) \
57135 - if (node_online(node)) \
57136 + int __node; \
57137 + for_each_node_mask(__node, (mask)) \
57138 + if (node_online(__node)) \
57139 break; \
57140 - node; \
57141 + __node; \
57142 })
57143
57144 #define num_online_nodes() num_node_state(N_ONLINE)
57145 diff -urNp linux-2.6.32.42/include/linux/oprofile.h linux-2.6.32.42/include/linux/oprofile.h
57146 --- linux-2.6.32.42/include/linux/oprofile.h 2011-03-27 14:31:47.000000000 -0400
57147 +++ linux-2.6.32.42/include/linux/oprofile.h 2011-04-17 15:56:46.000000000 -0400
57148 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
57149 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
57150 char const * name, ulong * val);
57151
57152 -/** Create a file for read-only access to an atomic_t. */
57153 +/** Create a file for read-only access to an atomic_unchecked_t. */
57154 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
57155 - char const * name, atomic_t * val);
57156 + char const * name, atomic_unchecked_t * val);
57157
57158 /** create a directory */
57159 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
57160 diff -urNp linux-2.6.32.42/include/linux/perf_event.h linux-2.6.32.42/include/linux/perf_event.h
57161 --- linux-2.6.32.42/include/linux/perf_event.h 2011-03-27 14:31:47.000000000 -0400
57162 +++ linux-2.6.32.42/include/linux/perf_event.h 2011-05-04 17:56:28.000000000 -0400
57163 @@ -476,7 +476,7 @@ struct hw_perf_event {
57164 struct hrtimer hrtimer;
57165 };
57166 };
57167 - atomic64_t prev_count;
57168 + atomic64_unchecked_t prev_count;
57169 u64 sample_period;
57170 u64 last_period;
57171 atomic64_t period_left;
57172 @@ -557,7 +557,7 @@ struct perf_event {
57173 const struct pmu *pmu;
57174
57175 enum perf_event_active_state state;
57176 - atomic64_t count;
57177 + atomic64_unchecked_t count;
57178
57179 /*
57180 * These are the total time in nanoseconds that the event
57181 @@ -595,8 +595,8 @@ struct perf_event {
57182 * These accumulate total time (in nanoseconds) that children
57183 * events have been enabled and running, respectively.
57184 */
57185 - atomic64_t child_total_time_enabled;
57186 - atomic64_t child_total_time_running;
57187 + atomic64_unchecked_t child_total_time_enabled;
57188 + atomic64_unchecked_t child_total_time_running;
57189
57190 /*
57191 * Protect attach/detach and child_list:
57192 diff -urNp linux-2.6.32.42/include/linux/pipe_fs_i.h linux-2.6.32.42/include/linux/pipe_fs_i.h
57193 --- linux-2.6.32.42/include/linux/pipe_fs_i.h 2011-03-27 14:31:47.000000000 -0400
57194 +++ linux-2.6.32.42/include/linux/pipe_fs_i.h 2011-04-17 15:56:46.000000000 -0400
57195 @@ -46,9 +46,9 @@ struct pipe_inode_info {
57196 wait_queue_head_t wait;
57197 unsigned int nrbufs, curbuf;
57198 struct page *tmp_page;
57199 - unsigned int readers;
57200 - unsigned int writers;
57201 - unsigned int waiting_writers;
57202 + atomic_t readers;
57203 + atomic_t writers;
57204 + atomic_t waiting_writers;
57205 unsigned int r_counter;
57206 unsigned int w_counter;
57207 struct fasync_struct *fasync_readers;
57208 diff -urNp linux-2.6.32.42/include/linux/poison.h linux-2.6.32.42/include/linux/poison.h
57209 --- linux-2.6.32.42/include/linux/poison.h 2011-03-27 14:31:47.000000000 -0400
57210 +++ linux-2.6.32.42/include/linux/poison.h 2011-04-17 15:56:46.000000000 -0400
57211 @@ -19,8 +19,8 @@
57212 * under normal circumstances, used to verify that nobody uses
57213 * non-initialized list entries.
57214 */
57215 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
57216 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
57217 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
57218 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
57219
57220 /********** include/linux/timer.h **********/
57221 /*
57222 diff -urNp linux-2.6.32.42/include/linux/proc_fs.h linux-2.6.32.42/include/linux/proc_fs.h
57223 --- linux-2.6.32.42/include/linux/proc_fs.h 2011-03-27 14:31:47.000000000 -0400
57224 +++ linux-2.6.32.42/include/linux/proc_fs.h 2011-04-17 15:56:46.000000000 -0400
57225 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
57226 return proc_create_data(name, mode, parent, proc_fops, NULL);
57227 }
57228
57229 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
57230 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
57231 +{
57232 +#ifdef CONFIG_GRKERNSEC_PROC_USER
57233 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
57234 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57235 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
57236 +#else
57237 + return proc_create_data(name, mode, parent, proc_fops, NULL);
57238 +#endif
57239 +}
57240 +
57241 +
57242 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
57243 mode_t mode, struct proc_dir_entry *base,
57244 read_proc_t *read_proc, void * data)
57245 diff -urNp linux-2.6.32.42/include/linux/ptrace.h linux-2.6.32.42/include/linux/ptrace.h
57246 --- linux-2.6.32.42/include/linux/ptrace.h 2011-03-27 14:31:47.000000000 -0400
57247 +++ linux-2.6.32.42/include/linux/ptrace.h 2011-04-17 15:56:46.000000000 -0400
57248 @@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_
57249 extern void exit_ptrace(struct task_struct *tracer);
57250 #define PTRACE_MODE_READ 1
57251 #define PTRACE_MODE_ATTACH 2
57252 -/* Returns 0 on success, -errno on denial. */
57253 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
57254 /* Returns true on success, false on denial. */
57255 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
57256 +/* Returns true on success, false on denial. */
57257 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
57258
57259 static inline int ptrace_reparented(struct task_struct *child)
57260 {
57261 diff -urNp linux-2.6.32.42/include/linux/random.h linux-2.6.32.42/include/linux/random.h
57262 --- linux-2.6.32.42/include/linux/random.h 2011-03-27 14:31:47.000000000 -0400
57263 +++ linux-2.6.32.42/include/linux/random.h 2011-04-17 15:56:46.000000000 -0400
57264 @@ -74,6 +74,11 @@ unsigned long randomize_range(unsigned l
57265 u32 random32(void);
57266 void srandom32(u32 seed);
57267
57268 +static inline unsigned long pax_get_random_long(void)
57269 +{
57270 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
57271 +}
57272 +
57273 #endif /* __KERNEL___ */
57274
57275 #endif /* _LINUX_RANDOM_H */
57276 diff -urNp linux-2.6.32.42/include/linux/reboot.h linux-2.6.32.42/include/linux/reboot.h
57277 --- linux-2.6.32.42/include/linux/reboot.h 2011-03-27 14:31:47.000000000 -0400
57278 +++ linux-2.6.32.42/include/linux/reboot.h 2011-05-22 23:02:06.000000000 -0400
57279 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
57280 * Architecture-specific implementations of sys_reboot commands.
57281 */
57282
57283 -extern void machine_restart(char *cmd);
57284 -extern void machine_halt(void);
57285 -extern void machine_power_off(void);
57286 +extern void machine_restart(char *cmd) __noreturn;
57287 +extern void machine_halt(void) __noreturn;
57288 +extern void machine_power_off(void) __noreturn;
57289
57290 extern void machine_shutdown(void);
57291 struct pt_regs;
57292 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
57293 */
57294
57295 extern void kernel_restart_prepare(char *cmd);
57296 -extern void kernel_restart(char *cmd);
57297 -extern void kernel_halt(void);
57298 -extern void kernel_power_off(void);
57299 +extern void kernel_restart(char *cmd) __noreturn;
57300 +extern void kernel_halt(void) __noreturn;
57301 +extern void kernel_power_off(void) __noreturn;
57302
57303 void ctrl_alt_del(void);
57304
57305 @@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
57306 * Emergency restart, callable from an interrupt handler.
57307 */
57308
57309 -extern void emergency_restart(void);
57310 +extern void emergency_restart(void) __noreturn;
57311 #include <asm/emergency-restart.h>
57312
57313 #endif
57314 diff -urNp linux-2.6.32.42/include/linux/reiserfs_fs.h linux-2.6.32.42/include/linux/reiserfs_fs.h
57315 --- linux-2.6.32.42/include/linux/reiserfs_fs.h 2011-03-27 14:31:47.000000000 -0400
57316 +++ linux-2.6.32.42/include/linux/reiserfs_fs.h 2011-04-17 15:56:46.000000000 -0400
57317 @@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset
57318 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
57319
57320 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
57321 -#define get_generation(s) atomic_read (&fs_generation(s))
57322 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
57323 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
57324 #define __fs_changed(gen,s) (gen != get_generation (s))
57325 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
57326 @@ -1534,24 +1534,24 @@ static inline struct super_block *sb_fro
57327 */
57328
57329 struct item_operations {
57330 - int (*bytes_number) (struct item_head * ih, int block_size);
57331 - void (*decrement_key) (struct cpu_key *);
57332 - int (*is_left_mergeable) (struct reiserfs_key * ih,
57333 + int (* const bytes_number) (struct item_head * ih, int block_size);
57334 + void (* const decrement_key) (struct cpu_key *);
57335 + int (* const is_left_mergeable) (struct reiserfs_key * ih,
57336 unsigned long bsize);
57337 - void (*print_item) (struct item_head *, char *item);
57338 - void (*check_item) (struct item_head *, char *item);
57339 + void (* const print_item) (struct item_head *, char *item);
57340 + void (* const check_item) (struct item_head *, char *item);
57341
57342 - int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
57343 + int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
57344 int is_affected, int insert_size);
57345 - int (*check_left) (struct virtual_item * vi, int free,
57346 + int (* const check_left) (struct virtual_item * vi, int free,
57347 int start_skip, int end_skip);
57348 - int (*check_right) (struct virtual_item * vi, int free);
57349 - int (*part_size) (struct virtual_item * vi, int from, int to);
57350 - int (*unit_num) (struct virtual_item * vi);
57351 - void (*print_vi) (struct virtual_item * vi);
57352 + int (* const check_right) (struct virtual_item * vi, int free);
57353 + int (* const part_size) (struct virtual_item * vi, int from, int to);
57354 + int (* const unit_num) (struct virtual_item * vi);
57355 + void (* const print_vi) (struct virtual_item * vi);
57356 };
57357
57358 -extern struct item_operations *item_ops[TYPE_ANY + 1];
57359 +extern const struct item_operations * const item_ops[TYPE_ANY + 1];
57360
57361 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
57362 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
57363 diff -urNp linux-2.6.32.42/include/linux/reiserfs_fs_sb.h linux-2.6.32.42/include/linux/reiserfs_fs_sb.h
57364 --- linux-2.6.32.42/include/linux/reiserfs_fs_sb.h 2011-03-27 14:31:47.000000000 -0400
57365 +++ linux-2.6.32.42/include/linux/reiserfs_fs_sb.h 2011-04-17 15:56:46.000000000 -0400
57366 @@ -377,7 +377,7 @@ struct reiserfs_sb_info {
57367 /* Comment? -Hans */
57368 wait_queue_head_t s_wait;
57369 /* To be obsoleted soon by per buffer seals.. -Hans */
57370 - atomic_t s_generation_counter; // increased by one every time the
57371 + atomic_unchecked_t s_generation_counter; // increased by one every time the
57372 // tree gets re-balanced
57373 unsigned long s_properties; /* File system properties. Currently holds
57374 on-disk FS format */
57375 diff -urNp linux-2.6.32.42/include/linux/sched.h linux-2.6.32.42/include/linux/sched.h
57376 --- linux-2.6.32.42/include/linux/sched.h 2011-03-27 14:31:47.000000000 -0400
57377 +++ linux-2.6.32.42/include/linux/sched.h 2011-06-04 20:42:54.000000000 -0400
57378 @@ -101,6 +101,7 @@ struct bio;
57379 struct fs_struct;
57380 struct bts_context;
57381 struct perf_event_context;
57382 +struct linux_binprm;
57383
57384 /*
57385 * List of flags we want to share for kernel threads,
57386 @@ -350,7 +351,7 @@ extern signed long schedule_timeout_kill
57387 extern signed long schedule_timeout_uninterruptible(signed long timeout);
57388 asmlinkage void __schedule(void);
57389 asmlinkage void schedule(void);
57390 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
57391 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
57392
57393 struct nsproxy;
57394 struct user_namespace;
57395 @@ -371,9 +372,12 @@ struct user_namespace;
57396 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
57397
57398 extern int sysctl_max_map_count;
57399 +extern unsigned long sysctl_heap_stack_gap;
57400
57401 #include <linux/aio.h>
57402
57403 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
57404 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
57405 extern unsigned long
57406 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
57407 unsigned long, unsigned long);
57408 @@ -666,6 +670,16 @@ struct signal_struct {
57409 struct tty_audit_buf *tty_audit_buf;
57410 #endif
57411
57412 +#ifdef CONFIG_GRKERNSEC
57413 + u32 curr_ip;
57414 + u32 saved_ip;
57415 + u32 gr_saddr;
57416 + u32 gr_daddr;
57417 + u16 gr_sport;
57418 + u16 gr_dport;
57419 + u8 used_accept:1;
57420 +#endif
57421 +
57422 int oom_adj; /* OOM kill score adjustment (bit shift) */
57423 };
57424
57425 @@ -723,6 +737,11 @@ struct user_struct {
57426 struct key *session_keyring; /* UID's default session keyring */
57427 #endif
57428
57429 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57430 + unsigned int banned;
57431 + unsigned long ban_expires;
57432 +#endif
57433 +
57434 /* Hash table maintenance information */
57435 struct hlist_node uidhash_node;
57436 uid_t uid;
57437 @@ -1328,8 +1347,8 @@ struct task_struct {
57438 struct list_head thread_group;
57439
57440 struct completion *vfork_done; /* for vfork() */
57441 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
57442 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
57443 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
57444 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
57445
57446 cputime_t utime, stime, utimescaled, stimescaled;
57447 cputime_t gtime;
57448 @@ -1343,16 +1362,6 @@ struct task_struct {
57449 struct task_cputime cputime_expires;
57450 struct list_head cpu_timers[3];
57451
57452 -/* process credentials */
57453 - const struct cred *real_cred; /* objective and real subjective task
57454 - * credentials (COW) */
57455 - const struct cred *cred; /* effective (overridable) subjective task
57456 - * credentials (COW) */
57457 - struct mutex cred_guard_mutex; /* guard against foreign influences on
57458 - * credential calculations
57459 - * (notably. ptrace) */
57460 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
57461 -
57462 char comm[TASK_COMM_LEN]; /* executable name excluding path
57463 - access with [gs]et_task_comm (which lock
57464 it with task_lock())
57465 @@ -1369,6 +1378,10 @@ struct task_struct {
57466 #endif
57467 /* CPU-specific state of this task */
57468 struct thread_struct thread;
57469 +/* thread_info moved to task_struct */
57470 +#ifdef CONFIG_X86
57471 + struct thread_info tinfo;
57472 +#endif
57473 /* filesystem information */
57474 struct fs_struct *fs;
57475 /* open file information */
57476 @@ -1436,6 +1449,15 @@ struct task_struct {
57477 int hardirq_context;
57478 int softirq_context;
57479 #endif
57480 +
57481 +/* process credentials */
57482 + const struct cred *real_cred; /* objective and real subjective task
57483 + * credentials (COW) */
57484 + struct mutex cred_guard_mutex; /* guard against foreign influences on
57485 + * credential calculations
57486 + * (notably. ptrace) */
57487 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
57488 +
57489 #ifdef CONFIG_LOCKDEP
57490 # define MAX_LOCK_DEPTH 48UL
57491 u64 curr_chain_key;
57492 @@ -1456,6 +1478,9 @@ struct task_struct {
57493
57494 struct backing_dev_info *backing_dev_info;
57495
57496 + const struct cred *cred; /* effective (overridable) subjective task
57497 + * credentials (COW) */
57498 +
57499 struct io_context *io_context;
57500
57501 unsigned long ptrace_message;
57502 @@ -1519,6 +1544,21 @@ struct task_struct {
57503 unsigned long default_timer_slack_ns;
57504
57505 struct list_head *scm_work_list;
57506 +
57507 +#ifdef CONFIG_GRKERNSEC
57508 + /* grsecurity */
57509 + struct dentry *gr_chroot_dentry;
57510 + struct acl_subject_label *acl;
57511 + struct acl_role_label *role;
57512 + struct file *exec_file;
57513 + u16 acl_role_id;
57514 + /* is this the task that authenticated to the special role */
57515 + u8 acl_sp_role;
57516 + u8 is_writable;
57517 + u8 brute;
57518 + u8 gr_is_chrooted;
57519 +#endif
57520 +
57521 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
57522 /* Index of current stored adress in ret_stack */
57523 int curr_ret_stack;
57524 @@ -1542,6 +1582,57 @@ struct task_struct {
57525 #endif /* CONFIG_TRACING */
57526 };
57527
57528 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
57529 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
57530 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
57531 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
57532 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
57533 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
57534 +
57535 +#ifdef CONFIG_PAX_SOFTMODE
57536 +extern unsigned int pax_softmode;
57537 +#endif
57538 +
57539 +extern int pax_check_flags(unsigned long *);
57540 +
57541 +/* if tsk != current then task_lock must be held on it */
57542 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
57543 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
57544 +{
57545 + if (likely(tsk->mm))
57546 + return tsk->mm->pax_flags;
57547 + else
57548 + return 0UL;
57549 +}
57550 +
57551 +/* if tsk != current then task_lock must be held on it */
57552 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
57553 +{
57554 + if (likely(tsk->mm)) {
57555 + tsk->mm->pax_flags = flags;
57556 + return 0;
57557 + }
57558 + return -EINVAL;
57559 +}
57560 +#endif
57561 +
57562 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
57563 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
57564 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
57565 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
57566 +#endif
57567 +
57568 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
57569 +void pax_report_insns(void *pc, void *sp);
57570 +void pax_report_refcount_overflow(struct pt_regs *regs);
57571 +void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
57572 +
57573 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
57574 +extern void pax_track_stack(void);
57575 +#else
57576 +static inline void pax_track_stack(void) {}
57577 +#endif
57578 +
57579 /* Future-safe accessor for struct task_struct's cpus_allowed. */
57580 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
57581
57582 @@ -1978,7 +2069,9 @@ void yield(void);
57583 extern struct exec_domain default_exec_domain;
57584
57585 union thread_union {
57586 +#ifndef CONFIG_X86
57587 struct thread_info thread_info;
57588 +#endif
57589 unsigned long stack[THREAD_SIZE/sizeof(long)];
57590 };
57591
57592 @@ -2155,7 +2248,7 @@ extern void __cleanup_sighand(struct sig
57593 extern void exit_itimers(struct signal_struct *);
57594 extern void flush_itimer_signals(void);
57595
57596 -extern NORET_TYPE void do_group_exit(int);
57597 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
57598
57599 extern void daemonize(const char *, ...);
57600 extern int allow_signal(int);
57601 @@ -2284,13 +2377,17 @@ static inline unsigned long *end_of_stac
57602
57603 #endif
57604
57605 -static inline int object_is_on_stack(void *obj)
57606 +static inline int object_starts_on_stack(void *obj)
57607 {
57608 - void *stack = task_stack_page(current);
57609 + const void *stack = task_stack_page(current);
57610
57611 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
57612 }
57613
57614 +#ifdef CONFIG_PAX_USERCOPY
57615 +extern int object_is_on_stack(const void *obj, unsigned long len);
57616 +#endif
57617 +
57618 extern void thread_info_cache_init(void);
57619
57620 #ifdef CONFIG_DEBUG_STACK_USAGE
57621 diff -urNp linux-2.6.32.42/include/linux/screen_info.h linux-2.6.32.42/include/linux/screen_info.h
57622 --- linux-2.6.32.42/include/linux/screen_info.h 2011-03-27 14:31:47.000000000 -0400
57623 +++ linux-2.6.32.42/include/linux/screen_info.h 2011-04-17 15:56:46.000000000 -0400
57624 @@ -42,7 +42,8 @@ struct screen_info {
57625 __u16 pages; /* 0x32 */
57626 __u16 vesa_attributes; /* 0x34 */
57627 __u32 capabilities; /* 0x36 */
57628 - __u8 _reserved[6]; /* 0x3a */
57629 + __u16 vesapm_size; /* 0x3a */
57630 + __u8 _reserved[4]; /* 0x3c */
57631 } __attribute__((packed));
57632
57633 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
57634 diff -urNp linux-2.6.32.42/include/linux/security.h linux-2.6.32.42/include/linux/security.h
57635 --- linux-2.6.32.42/include/linux/security.h 2011-03-27 14:31:47.000000000 -0400
57636 +++ linux-2.6.32.42/include/linux/security.h 2011-04-17 15:56:46.000000000 -0400
57637 @@ -34,6 +34,7 @@
57638 #include <linux/key.h>
57639 #include <linux/xfrm.h>
57640 #include <linux/gfp.h>
57641 +#include <linux/grsecurity.h>
57642 #include <net/flow.h>
57643
57644 /* Maximum number of letters for an LSM name string */
57645 diff -urNp linux-2.6.32.42/include/linux/shm.h linux-2.6.32.42/include/linux/shm.h
57646 --- linux-2.6.32.42/include/linux/shm.h 2011-03-27 14:31:47.000000000 -0400
57647 +++ linux-2.6.32.42/include/linux/shm.h 2011-04-17 15:56:46.000000000 -0400
57648 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
57649 pid_t shm_cprid;
57650 pid_t shm_lprid;
57651 struct user_struct *mlock_user;
57652 +#ifdef CONFIG_GRKERNSEC
57653 + time_t shm_createtime;
57654 + pid_t shm_lapid;
57655 +#endif
57656 };
57657
57658 /* shm_mode upper byte flags */
57659 diff -urNp linux-2.6.32.42/include/linux/skbuff.h linux-2.6.32.42/include/linux/skbuff.h
57660 --- linux-2.6.32.42/include/linux/skbuff.h 2011-03-27 14:31:47.000000000 -0400
57661 +++ linux-2.6.32.42/include/linux/skbuff.h 2011-05-04 17:56:20.000000000 -0400
57662 @@ -544,7 +544,7 @@ static inline union skb_shared_tx *skb_t
57663 */
57664 static inline int skb_queue_empty(const struct sk_buff_head *list)
57665 {
57666 - return list->next == (struct sk_buff *)list;
57667 + return list->next == (const struct sk_buff *)list;
57668 }
57669
57670 /**
57671 @@ -557,7 +557,7 @@ static inline int skb_queue_empty(const
57672 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
57673 const struct sk_buff *skb)
57674 {
57675 - return (skb->next == (struct sk_buff *) list);
57676 + return (skb->next == (const struct sk_buff *) list);
57677 }
57678
57679 /**
57680 @@ -570,7 +570,7 @@ static inline bool skb_queue_is_last(con
57681 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
57682 const struct sk_buff *skb)
57683 {
57684 - return (skb->prev == (struct sk_buff *) list);
57685 + return (skb->prev == (const struct sk_buff *) list);
57686 }
57687
57688 /**
57689 @@ -1367,7 +1367,7 @@ static inline int skb_network_offset(con
57690 * headroom, you should not reduce this.
57691 */
57692 #ifndef NET_SKB_PAD
57693 -#define NET_SKB_PAD 32
57694 +#define NET_SKB_PAD (_AC(32,U))
57695 #endif
57696
57697 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
57698 diff -urNp linux-2.6.32.42/include/linux/slab_def.h linux-2.6.32.42/include/linux/slab_def.h
57699 --- linux-2.6.32.42/include/linux/slab_def.h 2011-03-27 14:31:47.000000000 -0400
57700 +++ linux-2.6.32.42/include/linux/slab_def.h 2011-05-04 17:56:28.000000000 -0400
57701 @@ -69,10 +69,10 @@ struct kmem_cache {
57702 unsigned long node_allocs;
57703 unsigned long node_frees;
57704 unsigned long node_overflow;
57705 - atomic_t allochit;
57706 - atomic_t allocmiss;
57707 - atomic_t freehit;
57708 - atomic_t freemiss;
57709 + atomic_unchecked_t allochit;
57710 + atomic_unchecked_t allocmiss;
57711 + atomic_unchecked_t freehit;
57712 + atomic_unchecked_t freemiss;
57713
57714 /*
57715 * If debugging is enabled, then the allocator can add additional
57716 diff -urNp linux-2.6.32.42/include/linux/slab.h linux-2.6.32.42/include/linux/slab.h
57717 --- linux-2.6.32.42/include/linux/slab.h 2011-03-27 14:31:47.000000000 -0400
57718 +++ linux-2.6.32.42/include/linux/slab.h 2011-04-17 15:56:46.000000000 -0400
57719 @@ -11,12 +11,20 @@
57720
57721 #include <linux/gfp.h>
57722 #include <linux/types.h>
57723 +#include <linux/err.h>
57724
57725 /*
57726 * Flags to pass to kmem_cache_create().
57727 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
57728 */
57729 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
57730 +
57731 +#ifdef CONFIG_PAX_USERCOPY
57732 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
57733 +#else
57734 +#define SLAB_USERCOPY 0x00000000UL
57735 +#endif
57736 +
57737 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
57738 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
57739 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
57740 @@ -82,10 +90,13 @@
57741 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
57742 * Both make kfree a no-op.
57743 */
57744 -#define ZERO_SIZE_PTR ((void *)16)
57745 +#define ZERO_SIZE_PTR \
57746 +({ \
57747 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
57748 + (void *)(-MAX_ERRNO-1L); \
57749 +})
57750
57751 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
57752 - (unsigned long)ZERO_SIZE_PTR)
57753 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
57754
57755 /*
57756 * struct kmem_cache related prototypes
57757 @@ -138,6 +149,7 @@ void * __must_check krealloc(const void
57758 void kfree(const void *);
57759 void kzfree(const void *);
57760 size_t ksize(const void *);
57761 +void check_object_size(const void *ptr, unsigned long n, bool to);
57762
57763 /*
57764 * Allocator specific definitions. These are mainly used to establish optimized
57765 @@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t
57766
57767 void __init kmem_cache_init_late(void);
57768
57769 +#define kmalloc(x, y) \
57770 +({ \
57771 + void *___retval; \
57772 + intoverflow_t ___x = (intoverflow_t)x; \
57773 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
57774 + ___retval = NULL; \
57775 + else \
57776 + ___retval = kmalloc((size_t)___x, (y)); \
57777 + ___retval; \
57778 +})
57779 +
57780 +#define kmalloc_node(x, y, z) \
57781 +({ \
57782 + void *___retval; \
57783 + intoverflow_t ___x = (intoverflow_t)x; \
57784 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
57785 + ___retval = NULL; \
57786 + else \
57787 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
57788 + ___retval; \
57789 +})
57790 +
57791 +#define kzalloc(x, y) \
57792 +({ \
57793 + void *___retval; \
57794 + intoverflow_t ___x = (intoverflow_t)x; \
57795 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
57796 + ___retval = NULL; \
57797 + else \
57798 + ___retval = kzalloc((size_t)___x, (y)); \
57799 + ___retval; \
57800 +})
57801 +
57802 #endif /* _LINUX_SLAB_H */
57803 diff -urNp linux-2.6.32.42/include/linux/slub_def.h linux-2.6.32.42/include/linux/slub_def.h
57804 --- linux-2.6.32.42/include/linux/slub_def.h 2011-03-27 14:31:47.000000000 -0400
57805 +++ linux-2.6.32.42/include/linux/slub_def.h 2011-04-17 15:56:46.000000000 -0400
57806 @@ -86,7 +86,7 @@ struct kmem_cache {
57807 struct kmem_cache_order_objects max;
57808 struct kmem_cache_order_objects min;
57809 gfp_t allocflags; /* gfp flags to use on each alloc */
57810 - int refcount; /* Refcount for slab cache destroy */
57811 + atomic_t refcount; /* Refcount for slab cache destroy */
57812 void (*ctor)(void *);
57813 int inuse; /* Offset to metadata */
57814 int align; /* Alignment */
57815 diff -urNp linux-2.6.32.42/include/linux/sonet.h linux-2.6.32.42/include/linux/sonet.h
57816 --- linux-2.6.32.42/include/linux/sonet.h 2011-03-27 14:31:47.000000000 -0400
57817 +++ linux-2.6.32.42/include/linux/sonet.h 2011-04-17 15:56:46.000000000 -0400
57818 @@ -61,7 +61,7 @@ struct sonet_stats {
57819 #include <asm/atomic.h>
57820
57821 struct k_sonet_stats {
57822 -#define __HANDLE_ITEM(i) atomic_t i
57823 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
57824 __SONET_ITEMS
57825 #undef __HANDLE_ITEM
57826 };
57827 diff -urNp linux-2.6.32.42/include/linux/sunrpc/clnt.h linux-2.6.32.42/include/linux/sunrpc/clnt.h
57828 --- linux-2.6.32.42/include/linux/sunrpc/clnt.h 2011-03-27 14:31:47.000000000 -0400
57829 +++ linux-2.6.32.42/include/linux/sunrpc/clnt.h 2011-04-17 15:56:46.000000000 -0400
57830 @@ -167,9 +167,9 @@ static inline unsigned short rpc_get_por
57831 {
57832 switch (sap->sa_family) {
57833 case AF_INET:
57834 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
57835 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
57836 case AF_INET6:
57837 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
57838 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
57839 }
57840 return 0;
57841 }
57842 @@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const
57843 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
57844 const struct sockaddr *src)
57845 {
57846 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
57847 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
57848 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
57849
57850 dsin->sin_family = ssin->sin_family;
57851 @@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const
57852 if (sa->sa_family != AF_INET6)
57853 return 0;
57854
57855 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
57856 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
57857 }
57858
57859 #endif /* __KERNEL__ */
57860 diff -urNp linux-2.6.32.42/include/linux/sunrpc/svc_rdma.h linux-2.6.32.42/include/linux/sunrpc/svc_rdma.h
57861 --- linux-2.6.32.42/include/linux/sunrpc/svc_rdma.h 2011-03-27 14:31:47.000000000 -0400
57862 +++ linux-2.6.32.42/include/linux/sunrpc/svc_rdma.h 2011-05-04 17:56:28.000000000 -0400
57863 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
57864 extern unsigned int svcrdma_max_requests;
57865 extern unsigned int svcrdma_max_req_size;
57866
57867 -extern atomic_t rdma_stat_recv;
57868 -extern atomic_t rdma_stat_read;
57869 -extern atomic_t rdma_stat_write;
57870 -extern atomic_t rdma_stat_sq_starve;
57871 -extern atomic_t rdma_stat_rq_starve;
57872 -extern atomic_t rdma_stat_rq_poll;
57873 -extern atomic_t rdma_stat_rq_prod;
57874 -extern atomic_t rdma_stat_sq_poll;
57875 -extern atomic_t rdma_stat_sq_prod;
57876 +extern atomic_unchecked_t rdma_stat_recv;
57877 +extern atomic_unchecked_t rdma_stat_read;
57878 +extern atomic_unchecked_t rdma_stat_write;
57879 +extern atomic_unchecked_t rdma_stat_sq_starve;
57880 +extern atomic_unchecked_t rdma_stat_rq_starve;
57881 +extern atomic_unchecked_t rdma_stat_rq_poll;
57882 +extern atomic_unchecked_t rdma_stat_rq_prod;
57883 +extern atomic_unchecked_t rdma_stat_sq_poll;
57884 +extern atomic_unchecked_t rdma_stat_sq_prod;
57885
57886 #define RPCRDMA_VERSION 1
57887
57888 diff -urNp linux-2.6.32.42/include/linux/suspend.h linux-2.6.32.42/include/linux/suspend.h
57889 --- linux-2.6.32.42/include/linux/suspend.h 2011-03-27 14:31:47.000000000 -0400
57890 +++ linux-2.6.32.42/include/linux/suspend.h 2011-04-17 15:56:46.000000000 -0400
57891 @@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
57892 * which require special recovery actions in that situation.
57893 */
57894 struct platform_suspend_ops {
57895 - int (*valid)(suspend_state_t state);
57896 - int (*begin)(suspend_state_t state);
57897 - int (*prepare)(void);
57898 - int (*prepare_late)(void);
57899 - int (*enter)(suspend_state_t state);
57900 - void (*wake)(void);
57901 - void (*finish)(void);
57902 - void (*end)(void);
57903 - void (*recover)(void);
57904 + int (* const valid)(suspend_state_t state);
57905 + int (* const begin)(suspend_state_t state);
57906 + int (* const prepare)(void);
57907 + int (* const prepare_late)(void);
57908 + int (* const enter)(suspend_state_t state);
57909 + void (* const wake)(void);
57910 + void (* const finish)(void);
57911 + void (* const end)(void);
57912 + void (* const recover)(void);
57913 };
57914
57915 #ifdef CONFIG_SUSPEND
57916 @@ -120,7 +120,7 @@ struct platform_suspend_ops {
57917 * suspend_set_ops - set platform dependent suspend operations
57918 * @ops: The new suspend operations to set.
57919 */
57920 -extern void suspend_set_ops(struct platform_suspend_ops *ops);
57921 +extern void suspend_set_ops(const struct platform_suspend_ops *ops);
57922 extern int suspend_valid_only_mem(suspend_state_t state);
57923
57924 /**
57925 @@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
57926 #else /* !CONFIG_SUSPEND */
57927 #define suspend_valid_only_mem NULL
57928
57929 -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
57930 +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
57931 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
57932 #endif /* !CONFIG_SUSPEND */
57933
57934 @@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
57935 * platforms which require special recovery actions in that situation.
57936 */
57937 struct platform_hibernation_ops {
57938 - int (*begin)(void);
57939 - void (*end)(void);
57940 - int (*pre_snapshot)(void);
57941 - void (*finish)(void);
57942 - int (*prepare)(void);
57943 - int (*enter)(void);
57944 - void (*leave)(void);
57945 - int (*pre_restore)(void);
57946 - void (*restore_cleanup)(void);
57947 - void (*recover)(void);
57948 + int (* const begin)(void);
57949 + void (* const end)(void);
57950 + int (* const pre_snapshot)(void);
57951 + void (* const finish)(void);
57952 + int (* const prepare)(void);
57953 + int (* const enter)(void);
57954 + void (* const leave)(void);
57955 + int (* const pre_restore)(void);
57956 + void (* const restore_cleanup)(void);
57957 + void (* const recover)(void);
57958 };
57959
57960 #ifdef CONFIG_HIBERNATION
57961 @@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
57962 extern void swsusp_unset_page_free(struct page *);
57963 extern unsigned long get_safe_page(gfp_t gfp_mask);
57964
57965 -extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
57966 +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
57967 extern int hibernate(void);
57968 extern bool system_entering_hibernation(void);
57969 #else /* CONFIG_HIBERNATION */
57970 @@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
57971 static inline void swsusp_set_page_free(struct page *p) {}
57972 static inline void swsusp_unset_page_free(struct page *p) {}
57973
57974 -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
57975 +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
57976 static inline int hibernate(void) { return -ENOSYS; }
57977 static inline bool system_entering_hibernation(void) { return false; }
57978 #endif /* CONFIG_HIBERNATION */
57979 diff -urNp linux-2.6.32.42/include/linux/sysctl.h linux-2.6.32.42/include/linux/sysctl.h
57980 --- linux-2.6.32.42/include/linux/sysctl.h 2011-03-27 14:31:47.000000000 -0400
57981 +++ linux-2.6.32.42/include/linux/sysctl.h 2011-04-17 15:56:46.000000000 -0400
57982 @@ -164,7 +164,11 @@ enum
57983 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
57984 };
57985
57986 -
57987 +#ifdef CONFIG_PAX_SOFTMODE
57988 +enum {
57989 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
57990 +};
57991 +#endif
57992
57993 /* CTL_VM names: */
57994 enum
57995 @@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_tab
57996
57997 extern int proc_dostring(struct ctl_table *, int,
57998 void __user *, size_t *, loff_t *);
57999 +extern int proc_dostring_modpriv(struct ctl_table *, int,
58000 + void __user *, size_t *, loff_t *);
58001 extern int proc_dointvec(struct ctl_table *, int,
58002 void __user *, size_t *, loff_t *);
58003 extern int proc_dointvec_minmax(struct ctl_table *, int,
58004 @@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name,
58005
58006 extern ctl_handler sysctl_data;
58007 extern ctl_handler sysctl_string;
58008 +extern ctl_handler sysctl_string_modpriv;
58009 extern ctl_handler sysctl_intvec;
58010 extern ctl_handler sysctl_jiffies;
58011 extern ctl_handler sysctl_ms_jiffies;
58012 diff -urNp linux-2.6.32.42/include/linux/sysfs.h linux-2.6.32.42/include/linux/sysfs.h
58013 --- linux-2.6.32.42/include/linux/sysfs.h 2011-03-27 14:31:47.000000000 -0400
58014 +++ linux-2.6.32.42/include/linux/sysfs.h 2011-04-17 15:56:46.000000000 -0400
58015 @@ -75,8 +75,8 @@ struct bin_attribute {
58016 };
58017
58018 struct sysfs_ops {
58019 - ssize_t (*show)(struct kobject *, struct attribute *,char *);
58020 - ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
58021 + ssize_t (* const show)(struct kobject *, struct attribute *,char *);
58022 + ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
58023 };
58024
58025 struct sysfs_dirent;
58026 diff -urNp linux-2.6.32.42/include/linux/thread_info.h linux-2.6.32.42/include/linux/thread_info.h
58027 --- linux-2.6.32.42/include/linux/thread_info.h 2011-03-27 14:31:47.000000000 -0400
58028 +++ linux-2.6.32.42/include/linux/thread_info.h 2011-04-17 15:56:46.000000000 -0400
58029 @@ -23,7 +23,7 @@ struct restart_block {
58030 };
58031 /* For futex_wait and futex_wait_requeue_pi */
58032 struct {
58033 - u32 *uaddr;
58034 + u32 __user *uaddr;
58035 u32 val;
58036 u32 flags;
58037 u32 bitset;
58038 diff -urNp linux-2.6.32.42/include/linux/tty.h linux-2.6.32.42/include/linux/tty.h
58039 --- linux-2.6.32.42/include/linux/tty.h 2011-03-27 14:31:47.000000000 -0400
58040 +++ linux-2.6.32.42/include/linux/tty.h 2011-04-17 15:56:46.000000000 -0400
58041 @@ -13,6 +13,7 @@
58042 #include <linux/tty_driver.h>
58043 #include <linux/tty_ldisc.h>
58044 #include <linux/mutex.h>
58045 +#include <linux/poll.h>
58046
58047 #include <asm/system.h>
58048
58049 @@ -443,7 +444,6 @@ extern int tty_perform_flush(struct tty_
58050 extern dev_t tty_devnum(struct tty_struct *tty);
58051 extern void proc_clear_tty(struct task_struct *p);
58052 extern struct tty_struct *get_current_tty(void);
58053 -extern void tty_default_fops(struct file_operations *fops);
58054 extern struct tty_struct *alloc_tty_struct(void);
58055 extern void free_tty_struct(struct tty_struct *tty);
58056 extern void initialize_tty_struct(struct tty_struct *tty,
58057 @@ -493,6 +493,18 @@ extern void tty_ldisc_begin(void);
58058 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
58059 extern void tty_ldisc_enable(struct tty_struct *tty);
58060
58061 +/* tty_io.c */
58062 +extern ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
58063 +extern ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
58064 +extern unsigned int tty_poll(struct file *, poll_table *);
58065 +#ifdef CONFIG_COMPAT
58066 +extern long tty_compat_ioctl(struct file *file, unsigned int cmd,
58067 + unsigned long arg);
58068 +#else
58069 +#define tty_compat_ioctl NULL
58070 +#endif
58071 +extern int tty_release(struct inode *, struct file *);
58072 +extern int tty_fasync(int fd, struct file *filp, int on);
58073
58074 /* n_tty.c */
58075 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
58076 diff -urNp linux-2.6.32.42/include/linux/tty_ldisc.h linux-2.6.32.42/include/linux/tty_ldisc.h
58077 --- linux-2.6.32.42/include/linux/tty_ldisc.h 2011-03-27 14:31:47.000000000 -0400
58078 +++ linux-2.6.32.42/include/linux/tty_ldisc.h 2011-04-17 15:56:46.000000000 -0400
58079 @@ -139,7 +139,7 @@ struct tty_ldisc_ops {
58080
58081 struct module *owner;
58082
58083 - int refcount;
58084 + atomic_t refcount;
58085 };
58086
58087 struct tty_ldisc {
58088 diff -urNp linux-2.6.32.42/include/linux/types.h linux-2.6.32.42/include/linux/types.h
58089 --- linux-2.6.32.42/include/linux/types.h 2011-03-27 14:31:47.000000000 -0400
58090 +++ linux-2.6.32.42/include/linux/types.h 2011-04-17 15:56:46.000000000 -0400
58091 @@ -191,10 +191,26 @@ typedef struct {
58092 volatile int counter;
58093 } atomic_t;
58094
58095 +#ifdef CONFIG_PAX_REFCOUNT
58096 +typedef struct {
58097 + volatile int counter;
58098 +} atomic_unchecked_t;
58099 +#else
58100 +typedef atomic_t atomic_unchecked_t;
58101 +#endif
58102 +
58103 #ifdef CONFIG_64BIT
58104 typedef struct {
58105 volatile long counter;
58106 } atomic64_t;
58107 +
58108 +#ifdef CONFIG_PAX_REFCOUNT
58109 +typedef struct {
58110 + volatile long counter;
58111 +} atomic64_unchecked_t;
58112 +#else
58113 +typedef atomic64_t atomic64_unchecked_t;
58114 +#endif
58115 #endif
58116
58117 struct ustat {
58118 diff -urNp linux-2.6.32.42/include/linux/uaccess.h linux-2.6.32.42/include/linux/uaccess.h
58119 --- linux-2.6.32.42/include/linux/uaccess.h 2011-03-27 14:31:47.000000000 -0400
58120 +++ linux-2.6.32.42/include/linux/uaccess.h 2011-04-17 15:56:46.000000000 -0400
58121 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
58122 long ret; \
58123 mm_segment_t old_fs = get_fs(); \
58124 \
58125 - set_fs(KERNEL_DS); \
58126 pagefault_disable(); \
58127 + set_fs(KERNEL_DS); \
58128 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
58129 - pagefault_enable(); \
58130 set_fs(old_fs); \
58131 + pagefault_enable(); \
58132 ret; \
58133 })
58134
58135 @@ -93,7 +93,7 @@ static inline unsigned long __copy_from_
58136 * Safely read from address @src to the buffer at @dst. If a kernel fault
58137 * happens, handle that and return -EFAULT.
58138 */
58139 -extern long probe_kernel_read(void *dst, void *src, size_t size);
58140 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
58141
58142 /*
58143 * probe_kernel_write(): safely attempt to write to a location
58144 @@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst,
58145 * Safely write to address @dst from the buffer at @src. If a kernel fault
58146 * happens, handle that and return -EFAULT.
58147 */
58148 -extern long probe_kernel_write(void *dst, void *src, size_t size);
58149 +extern long probe_kernel_write(void *dst, const void *src, size_t size);
58150
58151 #endif /* __LINUX_UACCESS_H__ */
58152 diff -urNp linux-2.6.32.42/include/linux/unaligned/access_ok.h linux-2.6.32.42/include/linux/unaligned/access_ok.h
58153 --- linux-2.6.32.42/include/linux/unaligned/access_ok.h 2011-03-27 14:31:47.000000000 -0400
58154 +++ linux-2.6.32.42/include/linux/unaligned/access_ok.h 2011-04-17 15:56:46.000000000 -0400
58155 @@ -6,32 +6,32 @@
58156
58157 static inline u16 get_unaligned_le16(const void *p)
58158 {
58159 - return le16_to_cpup((__le16 *)p);
58160 + return le16_to_cpup((const __le16 *)p);
58161 }
58162
58163 static inline u32 get_unaligned_le32(const void *p)
58164 {
58165 - return le32_to_cpup((__le32 *)p);
58166 + return le32_to_cpup((const __le32 *)p);
58167 }
58168
58169 static inline u64 get_unaligned_le64(const void *p)
58170 {
58171 - return le64_to_cpup((__le64 *)p);
58172 + return le64_to_cpup((const __le64 *)p);
58173 }
58174
58175 static inline u16 get_unaligned_be16(const void *p)
58176 {
58177 - return be16_to_cpup((__be16 *)p);
58178 + return be16_to_cpup((const __be16 *)p);
58179 }
58180
58181 static inline u32 get_unaligned_be32(const void *p)
58182 {
58183 - return be32_to_cpup((__be32 *)p);
58184 + return be32_to_cpup((const __be32 *)p);
58185 }
58186
58187 static inline u64 get_unaligned_be64(const void *p)
58188 {
58189 - return be64_to_cpup((__be64 *)p);
58190 + return be64_to_cpup((const __be64 *)p);
58191 }
58192
58193 static inline void put_unaligned_le16(u16 val, void *p)
58194 diff -urNp linux-2.6.32.42/include/linux/vmalloc.h linux-2.6.32.42/include/linux/vmalloc.h
58195 --- linux-2.6.32.42/include/linux/vmalloc.h 2011-03-27 14:31:47.000000000 -0400
58196 +++ linux-2.6.32.42/include/linux/vmalloc.h 2011-04-17 15:56:46.000000000 -0400
58197 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
58198 #define VM_MAP 0x00000004 /* vmap()ed pages */
58199 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
58200 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
58201 +
58202 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
58203 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
58204 +#endif
58205 +
58206 /* bits [20..32] reserved for arch specific ioremap internals */
58207
58208 /*
58209 @@ -123,4 +128,81 @@ struct vm_struct **pcpu_get_vm_areas(con
58210
58211 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
58212
58213 +#define vmalloc(x) \
58214 +({ \
58215 + void *___retval; \
58216 + intoverflow_t ___x = (intoverflow_t)x; \
58217 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
58218 + ___retval = NULL; \
58219 + else \
58220 + ___retval = vmalloc((unsigned long)___x); \
58221 + ___retval; \
58222 +})
58223 +
58224 +#define __vmalloc(x, y, z) \
58225 +({ \
58226 + void *___retval; \
58227 + intoverflow_t ___x = (intoverflow_t)x; \
58228 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
58229 + ___retval = NULL; \
58230 + else \
58231 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
58232 + ___retval; \
58233 +})
58234 +
58235 +#define vmalloc_user(x) \
58236 +({ \
58237 + void *___retval; \
58238 + intoverflow_t ___x = (intoverflow_t)x; \
58239 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
58240 + ___retval = NULL; \
58241 + else \
58242 + ___retval = vmalloc_user((unsigned long)___x); \
58243 + ___retval; \
58244 +})
58245 +
58246 +#define vmalloc_exec(x) \
58247 +({ \
58248 + void *___retval; \
58249 + intoverflow_t ___x = (intoverflow_t)x; \
58250 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
58251 + ___retval = NULL; \
58252 + else \
58253 + ___retval = vmalloc_exec((unsigned long)___x); \
58254 + ___retval; \
58255 +})
58256 +
58257 +#define vmalloc_node(x, y) \
58258 +({ \
58259 + void *___retval; \
58260 + intoverflow_t ___x = (intoverflow_t)x; \
58261 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
58262 + ___retval = NULL; \
58263 + else \
58264 + ___retval = vmalloc_node((unsigned long)___x, (y));\
58265 + ___retval; \
58266 +})
58267 +
58268 +#define vmalloc_32(x) \
58269 +({ \
58270 + void *___retval; \
58271 + intoverflow_t ___x = (intoverflow_t)x; \
58272 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
58273 + ___retval = NULL; \
58274 + else \
58275 + ___retval = vmalloc_32((unsigned long)___x); \
58276 + ___retval; \
58277 +})
58278 +
58279 +#define vmalloc_32_user(x) \
58280 +({ \
58281 + void *___retval; \
58282 + intoverflow_t ___x = (intoverflow_t)x; \
58283 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
58284 + ___retval = NULL; \
58285 + else \
58286 + ___retval = vmalloc_32_user((unsigned long)___x);\
58287 + ___retval; \
58288 +})
58289 +
58290 #endif /* _LINUX_VMALLOC_H */
58291 diff -urNp linux-2.6.32.42/include/linux/vmstat.h linux-2.6.32.42/include/linux/vmstat.h
58292 --- linux-2.6.32.42/include/linux/vmstat.h 2011-03-27 14:31:47.000000000 -0400
58293 +++ linux-2.6.32.42/include/linux/vmstat.h 2011-04-17 15:56:46.000000000 -0400
58294 @@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in
58295 /*
58296 * Zone based page accounting with per cpu differentials.
58297 */
58298 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
58299 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
58300
58301 static inline void zone_page_state_add(long x, struct zone *zone,
58302 enum zone_stat_item item)
58303 {
58304 - atomic_long_add(x, &zone->vm_stat[item]);
58305 - atomic_long_add(x, &vm_stat[item]);
58306 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
58307 + atomic_long_add_unchecked(x, &vm_stat[item]);
58308 }
58309
58310 static inline unsigned long global_page_state(enum zone_stat_item item)
58311 {
58312 - long x = atomic_long_read(&vm_stat[item]);
58313 + long x = atomic_long_read_unchecked(&vm_stat[item]);
58314 #ifdef CONFIG_SMP
58315 if (x < 0)
58316 x = 0;
58317 @@ -158,7 +158,7 @@ static inline unsigned long global_page_
58318 static inline unsigned long zone_page_state(struct zone *zone,
58319 enum zone_stat_item item)
58320 {
58321 - long x = atomic_long_read(&zone->vm_stat[item]);
58322 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
58323 #ifdef CONFIG_SMP
58324 if (x < 0)
58325 x = 0;
58326 @@ -175,7 +175,7 @@ static inline unsigned long zone_page_st
58327 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
58328 enum zone_stat_item item)
58329 {
58330 - long x = atomic_long_read(&zone->vm_stat[item]);
58331 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
58332
58333 #ifdef CONFIG_SMP
58334 int cpu;
58335 @@ -264,8 +264,8 @@ static inline void __mod_zone_page_state
58336
58337 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
58338 {
58339 - atomic_long_inc(&zone->vm_stat[item]);
58340 - atomic_long_inc(&vm_stat[item]);
58341 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
58342 + atomic_long_inc_unchecked(&vm_stat[item]);
58343 }
58344
58345 static inline void __inc_zone_page_state(struct page *page,
58346 @@ -276,8 +276,8 @@ static inline void __inc_zone_page_state
58347
58348 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
58349 {
58350 - atomic_long_dec(&zone->vm_stat[item]);
58351 - atomic_long_dec(&vm_stat[item]);
58352 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
58353 + atomic_long_dec_unchecked(&vm_stat[item]);
58354 }
58355
58356 static inline void __dec_zone_page_state(struct page *page,
58357 diff -urNp linux-2.6.32.42/include/media/v4l2-device.h linux-2.6.32.42/include/media/v4l2-device.h
58358 --- linux-2.6.32.42/include/media/v4l2-device.h 2011-03-27 14:31:47.000000000 -0400
58359 +++ linux-2.6.32.42/include/media/v4l2-device.h 2011-05-04 17:56:28.000000000 -0400
58360 @@ -71,7 +71,7 @@ int __must_check v4l2_device_register(st
58361 this function returns 0. If the name ends with a digit (e.g. cx18),
58362 then the name will be set to cx18-0 since cx180 looks really odd. */
58363 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
58364 - atomic_t *instance);
58365 + atomic_unchecked_t *instance);
58366
58367 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
58368 Since the parent disappears this ensures that v4l2_dev doesn't have an
58369 diff -urNp linux-2.6.32.42/include/net/flow.h linux-2.6.32.42/include/net/flow.h
58370 --- linux-2.6.32.42/include/net/flow.h 2011-03-27 14:31:47.000000000 -0400
58371 +++ linux-2.6.32.42/include/net/flow.h 2011-05-04 17:56:28.000000000 -0400
58372 @@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net
58373 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
58374 u8 dir, flow_resolve_t resolver);
58375 extern void flow_cache_flush(void);
58376 -extern atomic_t flow_cache_genid;
58377 +extern atomic_unchecked_t flow_cache_genid;
58378
58379 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
58380 {
58381 diff -urNp linux-2.6.32.42/include/net/inetpeer.h linux-2.6.32.42/include/net/inetpeer.h
58382 --- linux-2.6.32.42/include/net/inetpeer.h 2011-03-27 14:31:47.000000000 -0400
58383 +++ linux-2.6.32.42/include/net/inetpeer.h 2011-04-17 15:56:46.000000000 -0400
58384 @@ -24,7 +24,7 @@ struct inet_peer
58385 __u32 dtime; /* the time of last use of not
58386 * referenced entries */
58387 atomic_t refcnt;
58388 - atomic_t rid; /* Frag reception counter */
58389 + atomic_unchecked_t rid; /* Frag reception counter */
58390 __u32 tcp_ts;
58391 unsigned long tcp_ts_stamp;
58392 };
58393 diff -urNp linux-2.6.32.42/include/net/ip_vs.h linux-2.6.32.42/include/net/ip_vs.h
58394 --- linux-2.6.32.42/include/net/ip_vs.h 2011-03-27 14:31:47.000000000 -0400
58395 +++ linux-2.6.32.42/include/net/ip_vs.h 2011-05-04 17:56:28.000000000 -0400
58396 @@ -365,7 +365,7 @@ struct ip_vs_conn {
58397 struct ip_vs_conn *control; /* Master control connection */
58398 atomic_t n_control; /* Number of controlled ones */
58399 struct ip_vs_dest *dest; /* real server */
58400 - atomic_t in_pkts; /* incoming packet counter */
58401 + atomic_unchecked_t in_pkts; /* incoming packet counter */
58402
58403 /* packet transmitter for different forwarding methods. If it
58404 mangles the packet, it must return NF_DROP or better NF_STOLEN,
58405 @@ -466,7 +466,7 @@ struct ip_vs_dest {
58406 union nf_inet_addr addr; /* IP address of the server */
58407 __be16 port; /* port number of the server */
58408 volatile unsigned flags; /* dest status flags */
58409 - atomic_t conn_flags; /* flags to copy to conn */
58410 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
58411 atomic_t weight; /* server weight */
58412
58413 atomic_t refcnt; /* reference counter */
58414 diff -urNp linux-2.6.32.42/include/net/irda/ircomm_tty.h linux-2.6.32.42/include/net/irda/ircomm_tty.h
58415 --- linux-2.6.32.42/include/net/irda/ircomm_tty.h 2011-03-27 14:31:47.000000000 -0400
58416 +++ linux-2.6.32.42/include/net/irda/ircomm_tty.h 2011-04-17 15:56:46.000000000 -0400
58417 @@ -35,6 +35,7 @@
58418 #include <linux/termios.h>
58419 #include <linux/timer.h>
58420 #include <linux/tty.h> /* struct tty_struct */
58421 +#include <asm/local.h>
58422
58423 #include <net/irda/irias_object.h>
58424 #include <net/irda/ircomm_core.h>
58425 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
58426 unsigned short close_delay;
58427 unsigned short closing_wait; /* time to wait before closing */
58428
58429 - int open_count;
58430 - int blocked_open; /* # of blocked opens */
58431 + local_t open_count;
58432 + local_t blocked_open; /* # of blocked opens */
58433
58434 /* Protect concurent access to :
58435 * o self->open_count
58436 diff -urNp linux-2.6.32.42/include/net/iucv/af_iucv.h linux-2.6.32.42/include/net/iucv/af_iucv.h
58437 --- linux-2.6.32.42/include/net/iucv/af_iucv.h 2011-03-27 14:31:47.000000000 -0400
58438 +++ linux-2.6.32.42/include/net/iucv/af_iucv.h 2011-05-04 17:56:28.000000000 -0400
58439 @@ -87,7 +87,7 @@ struct iucv_sock {
58440 struct iucv_sock_list {
58441 struct hlist_head head;
58442 rwlock_t lock;
58443 - atomic_t autobind_name;
58444 + atomic_unchecked_t autobind_name;
58445 };
58446
58447 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
58448 diff -urNp linux-2.6.32.42/include/net/neighbour.h linux-2.6.32.42/include/net/neighbour.h
58449 --- linux-2.6.32.42/include/net/neighbour.h 2011-03-27 14:31:47.000000000 -0400
58450 +++ linux-2.6.32.42/include/net/neighbour.h 2011-04-17 15:56:46.000000000 -0400
58451 @@ -125,12 +125,12 @@ struct neighbour
58452 struct neigh_ops
58453 {
58454 int family;
58455 - void (*solicit)(struct neighbour *, struct sk_buff*);
58456 - void (*error_report)(struct neighbour *, struct sk_buff*);
58457 - int (*output)(struct sk_buff*);
58458 - int (*connected_output)(struct sk_buff*);
58459 - int (*hh_output)(struct sk_buff*);
58460 - int (*queue_xmit)(struct sk_buff*);
58461 + void (* const solicit)(struct neighbour *, struct sk_buff*);
58462 + void (* const error_report)(struct neighbour *, struct sk_buff*);
58463 + int (* const output)(struct sk_buff*);
58464 + int (* const connected_output)(struct sk_buff*);
58465 + int (* const hh_output)(struct sk_buff*);
58466 + int (* const queue_xmit)(struct sk_buff*);
58467 };
58468
58469 struct pneigh_entry
58470 diff -urNp linux-2.6.32.42/include/net/netlink.h linux-2.6.32.42/include/net/netlink.h
58471 --- linux-2.6.32.42/include/net/netlink.h 2011-03-27 14:31:47.000000000 -0400
58472 +++ linux-2.6.32.42/include/net/netlink.h 2011-04-17 15:56:46.000000000 -0400
58473 @@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct
58474 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
58475 {
58476 if (mark)
58477 - skb_trim(skb, (unsigned char *) mark - skb->data);
58478 + skb_trim(skb, (const unsigned char *) mark - skb->data);
58479 }
58480
58481 /**
58482 diff -urNp linux-2.6.32.42/include/net/netns/ipv4.h linux-2.6.32.42/include/net/netns/ipv4.h
58483 --- linux-2.6.32.42/include/net/netns/ipv4.h 2011-03-27 14:31:47.000000000 -0400
58484 +++ linux-2.6.32.42/include/net/netns/ipv4.h 2011-05-04 17:56:28.000000000 -0400
58485 @@ -54,7 +54,7 @@ struct netns_ipv4 {
58486 int current_rt_cache_rebuild_count;
58487
58488 struct timer_list rt_secret_timer;
58489 - atomic_t rt_genid;
58490 + atomic_unchecked_t rt_genid;
58491
58492 #ifdef CONFIG_IP_MROUTE
58493 struct sock *mroute_sk;
58494 diff -urNp linux-2.6.32.42/include/net/sctp/sctp.h linux-2.6.32.42/include/net/sctp/sctp.h
58495 --- linux-2.6.32.42/include/net/sctp/sctp.h 2011-03-27 14:31:47.000000000 -0400
58496 +++ linux-2.6.32.42/include/net/sctp/sctp.h 2011-04-17 15:56:46.000000000 -0400
58497 @@ -305,8 +305,8 @@ extern int sctp_debug_flag;
58498
58499 #else /* SCTP_DEBUG */
58500
58501 -#define SCTP_DEBUG_PRINTK(whatever...)
58502 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
58503 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
58504 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
58505 #define SCTP_ENABLE_DEBUG
58506 #define SCTP_DISABLE_DEBUG
58507 #define SCTP_ASSERT(expr, str, func)
58508 diff -urNp linux-2.6.32.42/include/net/sock.h linux-2.6.32.42/include/net/sock.h
58509 --- linux-2.6.32.42/include/net/sock.h 2011-03-27 14:31:47.000000000 -0400
58510 +++ linux-2.6.32.42/include/net/sock.h 2011-05-04 17:56:28.000000000 -0400
58511 @@ -272,7 +272,7 @@ struct sock {
58512 rwlock_t sk_callback_lock;
58513 int sk_err,
58514 sk_err_soft;
58515 - atomic_t sk_drops;
58516 + atomic_unchecked_t sk_drops;
58517 unsigned short sk_ack_backlog;
58518 unsigned short sk_max_ack_backlog;
58519 __u32 sk_priority;
58520 diff -urNp linux-2.6.32.42/include/net/tcp.h linux-2.6.32.42/include/net/tcp.h
58521 --- linux-2.6.32.42/include/net/tcp.h 2011-03-27 14:31:47.000000000 -0400
58522 +++ linux-2.6.32.42/include/net/tcp.h 2011-04-17 15:56:46.000000000 -0400
58523 @@ -1444,6 +1444,7 @@ enum tcp_seq_states {
58524 struct tcp_seq_afinfo {
58525 char *name;
58526 sa_family_t family;
58527 + /* cannot be const */
58528 struct file_operations seq_fops;
58529 struct seq_operations seq_ops;
58530 };
58531 diff -urNp linux-2.6.32.42/include/net/udp.h linux-2.6.32.42/include/net/udp.h
58532 --- linux-2.6.32.42/include/net/udp.h 2011-03-27 14:31:47.000000000 -0400
58533 +++ linux-2.6.32.42/include/net/udp.h 2011-04-17 15:56:46.000000000 -0400
58534 @@ -187,6 +187,7 @@ struct udp_seq_afinfo {
58535 char *name;
58536 sa_family_t family;
58537 struct udp_table *udp_table;
58538 + /* cannot be const */
58539 struct file_operations seq_fops;
58540 struct seq_operations seq_ops;
58541 };
58542 diff -urNp linux-2.6.32.42/include/scsi/scsi_device.h linux-2.6.32.42/include/scsi/scsi_device.h
58543 --- linux-2.6.32.42/include/scsi/scsi_device.h 2011-04-17 17:00:52.000000000 -0400
58544 +++ linux-2.6.32.42/include/scsi/scsi_device.h 2011-05-04 17:56:28.000000000 -0400
58545 @@ -156,9 +156,9 @@ struct scsi_device {
58546 unsigned int max_device_blocked; /* what device_blocked counts down from */
58547 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
58548
58549 - atomic_t iorequest_cnt;
58550 - atomic_t iodone_cnt;
58551 - atomic_t ioerr_cnt;
58552 + atomic_unchecked_t iorequest_cnt;
58553 + atomic_unchecked_t iodone_cnt;
58554 + atomic_unchecked_t ioerr_cnt;
58555
58556 struct device sdev_gendev,
58557 sdev_dev;
58558 diff -urNp linux-2.6.32.42/include/sound/ac97_codec.h linux-2.6.32.42/include/sound/ac97_codec.h
58559 --- linux-2.6.32.42/include/sound/ac97_codec.h 2011-03-27 14:31:47.000000000 -0400
58560 +++ linux-2.6.32.42/include/sound/ac97_codec.h 2011-04-17 15:56:46.000000000 -0400
58561 @@ -419,15 +419,15 @@
58562 struct snd_ac97;
58563
58564 struct snd_ac97_build_ops {
58565 - int (*build_3d) (struct snd_ac97 *ac97);
58566 - int (*build_specific) (struct snd_ac97 *ac97);
58567 - int (*build_spdif) (struct snd_ac97 *ac97);
58568 - int (*build_post_spdif) (struct snd_ac97 *ac97);
58569 + int (* const build_3d) (struct snd_ac97 *ac97);
58570 + int (* const build_specific) (struct snd_ac97 *ac97);
58571 + int (* const build_spdif) (struct snd_ac97 *ac97);
58572 + int (* const build_post_spdif) (struct snd_ac97 *ac97);
58573 #ifdef CONFIG_PM
58574 - void (*suspend) (struct snd_ac97 *ac97);
58575 - void (*resume) (struct snd_ac97 *ac97);
58576 + void (* const suspend) (struct snd_ac97 *ac97);
58577 + void (* const resume) (struct snd_ac97 *ac97);
58578 #endif
58579 - void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
58580 + void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
58581 };
58582
58583 struct snd_ac97_bus_ops {
58584 @@ -477,7 +477,7 @@ struct snd_ac97_template {
58585
58586 struct snd_ac97 {
58587 /* -- lowlevel (hardware) driver specific -- */
58588 - struct snd_ac97_build_ops * build_ops;
58589 + const struct snd_ac97_build_ops * build_ops;
58590 void *private_data;
58591 void (*private_free) (struct snd_ac97 *ac97);
58592 /* --- */
58593 diff -urNp linux-2.6.32.42/include/sound/ymfpci.h linux-2.6.32.42/include/sound/ymfpci.h
58594 --- linux-2.6.32.42/include/sound/ymfpci.h 2011-03-27 14:31:47.000000000 -0400
58595 +++ linux-2.6.32.42/include/sound/ymfpci.h 2011-05-04 17:56:28.000000000 -0400
58596 @@ -358,7 +358,7 @@ struct snd_ymfpci {
58597 spinlock_t reg_lock;
58598 spinlock_t voice_lock;
58599 wait_queue_head_t interrupt_sleep;
58600 - atomic_t interrupt_sleep_count;
58601 + atomic_unchecked_t interrupt_sleep_count;
58602 struct snd_info_entry *proc_entry;
58603 const struct firmware *dsp_microcode;
58604 const struct firmware *controller_microcode;
58605 diff -urNp linux-2.6.32.42/include/trace/events/irq.h linux-2.6.32.42/include/trace/events/irq.h
58606 --- linux-2.6.32.42/include/trace/events/irq.h 2011-03-27 14:31:47.000000000 -0400
58607 +++ linux-2.6.32.42/include/trace/events/irq.h 2011-04-17 15:56:46.000000000 -0400
58608 @@ -34,7 +34,7 @@
58609 */
58610 TRACE_EVENT(irq_handler_entry,
58611
58612 - TP_PROTO(int irq, struct irqaction *action),
58613 + TP_PROTO(int irq, const struct irqaction *action),
58614
58615 TP_ARGS(irq, action),
58616
58617 @@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
58618 */
58619 TRACE_EVENT(irq_handler_exit,
58620
58621 - TP_PROTO(int irq, struct irqaction *action, int ret),
58622 + TP_PROTO(int irq, const struct irqaction *action, int ret),
58623
58624 TP_ARGS(irq, action, ret),
58625
58626 @@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
58627 */
58628 TRACE_EVENT(softirq_entry,
58629
58630 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
58631 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
58632
58633 TP_ARGS(h, vec),
58634
58635 @@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
58636 */
58637 TRACE_EVENT(softirq_exit,
58638
58639 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
58640 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
58641
58642 TP_ARGS(h, vec),
58643
58644 diff -urNp linux-2.6.32.42/include/video/uvesafb.h linux-2.6.32.42/include/video/uvesafb.h
58645 --- linux-2.6.32.42/include/video/uvesafb.h 2011-03-27 14:31:47.000000000 -0400
58646 +++ linux-2.6.32.42/include/video/uvesafb.h 2011-04-17 15:56:46.000000000 -0400
58647 @@ -177,6 +177,7 @@ struct uvesafb_par {
58648 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
58649 u8 pmi_setpal; /* PMI for palette changes */
58650 u16 *pmi_base; /* protected mode interface location */
58651 + u8 *pmi_code; /* protected mode code location */
58652 void *pmi_start;
58653 void *pmi_pal;
58654 u8 *vbe_state_orig; /*
58655 diff -urNp linux-2.6.32.42/init/do_mounts.c linux-2.6.32.42/init/do_mounts.c
58656 --- linux-2.6.32.42/init/do_mounts.c 2011-03-27 14:31:47.000000000 -0400
58657 +++ linux-2.6.32.42/init/do_mounts.c 2011-04-17 15:56:46.000000000 -0400
58658 @@ -216,11 +216,11 @@ static void __init get_fs_names(char *pa
58659
58660 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
58661 {
58662 - int err = sys_mount(name, "/root", fs, flags, data);
58663 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
58664 if (err)
58665 return err;
58666
58667 - sys_chdir("/root");
58668 + sys_chdir((__force const char __user *)"/root");
58669 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
58670 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
58671 current->fs->pwd.mnt->mnt_sb->s_type->name,
58672 @@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...
58673 va_start(args, fmt);
58674 vsprintf(buf, fmt, args);
58675 va_end(args);
58676 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
58677 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
58678 if (fd >= 0) {
58679 sys_ioctl(fd, FDEJECT, 0);
58680 sys_close(fd);
58681 }
58682 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
58683 - fd = sys_open("/dev/console", O_RDWR, 0);
58684 + fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
58685 if (fd >= 0) {
58686 sys_ioctl(fd, TCGETS, (long)&termios);
58687 termios.c_lflag &= ~ICANON;
58688 sys_ioctl(fd, TCSETSF, (long)&termios);
58689 - sys_read(fd, &c, 1);
58690 + sys_read(fd, (char __user *)&c, 1);
58691 termios.c_lflag |= ICANON;
58692 sys_ioctl(fd, TCSETSF, (long)&termios);
58693 sys_close(fd);
58694 @@ -416,6 +416,6 @@ void __init prepare_namespace(void)
58695 mount_root();
58696 out:
58697 devtmpfs_mount("dev");
58698 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
58699 - sys_chroot(".");
58700 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58701 + sys_chroot((__force char __user *)".");
58702 }
58703 diff -urNp linux-2.6.32.42/init/do_mounts.h linux-2.6.32.42/init/do_mounts.h
58704 --- linux-2.6.32.42/init/do_mounts.h 2011-03-27 14:31:47.000000000 -0400
58705 +++ linux-2.6.32.42/init/do_mounts.h 2011-04-17 15:56:46.000000000 -0400
58706 @@ -15,15 +15,15 @@ extern int root_mountflags;
58707
58708 static inline int create_dev(char *name, dev_t dev)
58709 {
58710 - sys_unlink(name);
58711 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
58712 + sys_unlink((__force char __user *)name);
58713 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
58714 }
58715
58716 #if BITS_PER_LONG == 32
58717 static inline u32 bstat(char *name)
58718 {
58719 struct stat64 stat;
58720 - if (sys_stat64(name, &stat) != 0)
58721 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
58722 return 0;
58723 if (!S_ISBLK(stat.st_mode))
58724 return 0;
58725 diff -urNp linux-2.6.32.42/init/do_mounts_initrd.c linux-2.6.32.42/init/do_mounts_initrd.c
58726 --- linux-2.6.32.42/init/do_mounts_initrd.c 2011-03-27 14:31:47.000000000 -0400
58727 +++ linux-2.6.32.42/init/do_mounts_initrd.c 2011-04-17 15:56:46.000000000 -0400
58728 @@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shel
58729 sys_close(old_fd);sys_close(root_fd);
58730 sys_close(0);sys_close(1);sys_close(2);
58731 sys_setsid();
58732 - (void) sys_open("/dev/console",O_RDWR,0);
58733 + (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
58734 (void) sys_dup(0);
58735 (void) sys_dup(0);
58736 return kernel_execve(shell, argv, envp_init);
58737 @@ -47,13 +47,13 @@ static void __init handle_initrd(void)
58738 create_dev("/dev/root.old", Root_RAM0);
58739 /* mount initrd on rootfs' /root */
58740 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
58741 - sys_mkdir("/old", 0700);
58742 - root_fd = sys_open("/", 0, 0);
58743 - old_fd = sys_open("/old", 0, 0);
58744 + sys_mkdir((__force const char __user *)"/old", 0700);
58745 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
58746 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
58747 /* move initrd over / and chdir/chroot in initrd root */
58748 - sys_chdir("/root");
58749 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
58750 - sys_chroot(".");
58751 + sys_chdir((__force const char __user *)"/root");
58752 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58753 + sys_chroot((__force const char __user *)".");
58754
58755 /*
58756 * In case that a resume from disk is carried out by linuxrc or one of
58757 @@ -70,15 +70,15 @@ static void __init handle_initrd(void)
58758
58759 /* move initrd to rootfs' /old */
58760 sys_fchdir(old_fd);
58761 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
58762 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
58763 /* switch root and cwd back to / of rootfs */
58764 sys_fchdir(root_fd);
58765 - sys_chroot(".");
58766 + sys_chroot((__force const char __user *)".");
58767 sys_close(old_fd);
58768 sys_close(root_fd);
58769
58770 if (new_decode_dev(real_root_dev) == Root_RAM0) {
58771 - sys_chdir("/old");
58772 + sys_chdir((__force const char __user *)"/old");
58773 return;
58774 }
58775
58776 @@ -86,17 +86,17 @@ static void __init handle_initrd(void)
58777 mount_root();
58778
58779 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
58780 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
58781 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
58782 if (!error)
58783 printk("okay\n");
58784 else {
58785 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
58786 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
58787 if (error == -ENOENT)
58788 printk("/initrd does not exist. Ignored.\n");
58789 else
58790 printk("failed\n");
58791 printk(KERN_NOTICE "Unmounting old root\n");
58792 - sys_umount("/old", MNT_DETACH);
58793 + sys_umount((__force char __user *)"/old", MNT_DETACH);
58794 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
58795 if (fd < 0) {
58796 error = fd;
58797 @@ -119,11 +119,11 @@ int __init initrd_load(void)
58798 * mounted in the normal path.
58799 */
58800 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
58801 - sys_unlink("/initrd.image");
58802 + sys_unlink((__force const char __user *)"/initrd.image");
58803 handle_initrd();
58804 return 1;
58805 }
58806 }
58807 - sys_unlink("/initrd.image");
58808 + sys_unlink((__force const char __user *)"/initrd.image");
58809 return 0;
58810 }
58811 diff -urNp linux-2.6.32.42/init/do_mounts_md.c linux-2.6.32.42/init/do_mounts_md.c
58812 --- linux-2.6.32.42/init/do_mounts_md.c 2011-03-27 14:31:47.000000000 -0400
58813 +++ linux-2.6.32.42/init/do_mounts_md.c 2011-04-17 15:56:46.000000000 -0400
58814 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
58815 partitioned ? "_d" : "", minor,
58816 md_setup_args[ent].device_names);
58817
58818 - fd = sys_open(name, 0, 0);
58819 + fd = sys_open((__force char __user *)name, 0, 0);
58820 if (fd < 0) {
58821 printk(KERN_ERR "md: open failed - cannot start "
58822 "array %s\n", name);
58823 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
58824 * array without it
58825 */
58826 sys_close(fd);
58827 - fd = sys_open(name, 0, 0);
58828 + fd = sys_open((__force char __user *)name, 0, 0);
58829 sys_ioctl(fd, BLKRRPART, 0);
58830 }
58831 sys_close(fd);
58832 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
58833
58834 wait_for_device_probe();
58835
58836 - fd = sys_open("/dev/md0", 0, 0);
58837 + fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
58838 if (fd >= 0) {
58839 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
58840 sys_close(fd);
58841 diff -urNp linux-2.6.32.42/init/initramfs.c linux-2.6.32.42/init/initramfs.c
58842 --- linux-2.6.32.42/init/initramfs.c 2011-03-27 14:31:47.000000000 -0400
58843 +++ linux-2.6.32.42/init/initramfs.c 2011-04-17 15:56:46.000000000 -0400
58844 @@ -74,7 +74,7 @@ static void __init free_hash(void)
58845 }
58846 }
58847
58848 -static long __init do_utime(char __user *filename, time_t mtime)
58849 +static long __init do_utime(__force char __user *filename, time_t mtime)
58850 {
58851 struct timespec t[2];
58852
58853 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
58854 struct dir_entry *de, *tmp;
58855 list_for_each_entry_safe(de, tmp, &dir_list, list) {
58856 list_del(&de->list);
58857 - do_utime(de->name, de->mtime);
58858 + do_utime((__force char __user *)de->name, de->mtime);
58859 kfree(de->name);
58860 kfree(de);
58861 }
58862 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
58863 if (nlink >= 2) {
58864 char *old = find_link(major, minor, ino, mode, collected);
58865 if (old)
58866 - return (sys_link(old, collected) < 0) ? -1 : 1;
58867 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
58868 }
58869 return 0;
58870 }
58871 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
58872 {
58873 struct stat st;
58874
58875 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
58876 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
58877 if (S_ISDIR(st.st_mode))
58878 - sys_rmdir(path);
58879 + sys_rmdir((__force char __user *)path);
58880 else
58881 - sys_unlink(path);
58882 + sys_unlink((__force char __user *)path);
58883 }
58884 }
58885
58886 @@ -305,7 +305,7 @@ static int __init do_name(void)
58887 int openflags = O_WRONLY|O_CREAT;
58888 if (ml != 1)
58889 openflags |= O_TRUNC;
58890 - wfd = sys_open(collected, openflags, mode);
58891 + wfd = sys_open((__force char __user *)collected, openflags, mode);
58892
58893 if (wfd >= 0) {
58894 sys_fchown(wfd, uid, gid);
58895 @@ -317,17 +317,17 @@ static int __init do_name(void)
58896 }
58897 }
58898 } else if (S_ISDIR(mode)) {
58899 - sys_mkdir(collected, mode);
58900 - sys_chown(collected, uid, gid);
58901 - sys_chmod(collected, mode);
58902 + sys_mkdir((__force char __user *)collected, mode);
58903 + sys_chown((__force char __user *)collected, uid, gid);
58904 + sys_chmod((__force char __user *)collected, mode);
58905 dir_add(collected, mtime);
58906 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
58907 S_ISFIFO(mode) || S_ISSOCK(mode)) {
58908 if (maybe_link() == 0) {
58909 - sys_mknod(collected, mode, rdev);
58910 - sys_chown(collected, uid, gid);
58911 - sys_chmod(collected, mode);
58912 - do_utime(collected, mtime);
58913 + sys_mknod((__force char __user *)collected, mode, rdev);
58914 + sys_chown((__force char __user *)collected, uid, gid);
58915 + sys_chmod((__force char __user *)collected, mode);
58916 + do_utime((__force char __user *)collected, mtime);
58917 }
58918 }
58919 return 0;
58920 @@ -336,15 +336,15 @@ static int __init do_name(void)
58921 static int __init do_copy(void)
58922 {
58923 if (count >= body_len) {
58924 - sys_write(wfd, victim, body_len);
58925 + sys_write(wfd, (__force char __user *)victim, body_len);
58926 sys_close(wfd);
58927 - do_utime(vcollected, mtime);
58928 + do_utime((__force char __user *)vcollected, mtime);
58929 kfree(vcollected);
58930 eat(body_len);
58931 state = SkipIt;
58932 return 0;
58933 } else {
58934 - sys_write(wfd, victim, count);
58935 + sys_write(wfd, (__force char __user *)victim, count);
58936 body_len -= count;
58937 eat(count);
58938 return 1;
58939 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
58940 {
58941 collected[N_ALIGN(name_len) + body_len] = '\0';
58942 clean_path(collected, 0);
58943 - sys_symlink(collected + N_ALIGN(name_len), collected);
58944 - sys_lchown(collected, uid, gid);
58945 - do_utime(collected, mtime);
58946 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
58947 + sys_lchown((__force char __user *)collected, uid, gid);
58948 + do_utime((__force char __user *)collected, mtime);
58949 state = SkipIt;
58950 next_state = Reset;
58951 return 0;
58952 diff -urNp linux-2.6.32.42/init/Kconfig linux-2.6.32.42/init/Kconfig
58953 --- linux-2.6.32.42/init/Kconfig 2011-05-10 22:12:01.000000000 -0400
58954 +++ linux-2.6.32.42/init/Kconfig 2011-05-10 22:12:34.000000000 -0400
58955 @@ -1004,7 +1004,7 @@ config SLUB_DEBUG
58956
58957 config COMPAT_BRK
58958 bool "Disable heap randomization"
58959 - default y
58960 + default n
58961 help
58962 Randomizing heap placement makes heap exploits harder, but it
58963 also breaks ancient binaries (including anything libc5 based).
58964 diff -urNp linux-2.6.32.42/init/main.c linux-2.6.32.42/init/main.c
58965 --- linux-2.6.32.42/init/main.c 2011-05-10 22:12:01.000000000 -0400
58966 +++ linux-2.6.32.42/init/main.c 2011-05-22 23:02:06.000000000 -0400
58967 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void)
58968 #ifdef CONFIG_TC
58969 extern void tc_init(void);
58970 #endif
58971 +extern void grsecurity_init(void);
58972
58973 enum system_states system_state __read_mostly;
58974 EXPORT_SYMBOL(system_state);
58975 @@ -183,6 +184,49 @@ static int __init set_reset_devices(char
58976
58977 __setup("reset_devices", set_reset_devices);
58978
58979 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
58980 +extern char pax_enter_kernel_user[];
58981 +extern char pax_exit_kernel_user[];
58982 +extern pgdval_t clone_pgd_mask;
58983 +#endif
58984 +
58985 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
58986 +static int __init setup_pax_nouderef(char *str)
58987 +{
58988 +#ifdef CONFIG_X86_32
58989 + unsigned int cpu;
58990 + struct desc_struct *gdt;
58991 +
58992 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
58993 + gdt = get_cpu_gdt_table(cpu);
58994 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
58995 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
58996 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
58997 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
58998 + }
58999 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
59000 +#else
59001 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
59002 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
59003 + clone_pgd_mask = ~(pgdval_t)0UL;
59004 +#endif
59005 +
59006 + return 0;
59007 +}
59008 +early_param("pax_nouderef", setup_pax_nouderef);
59009 +#endif
59010 +
59011 +#ifdef CONFIG_PAX_SOFTMODE
59012 +unsigned int pax_softmode;
59013 +
59014 +static int __init setup_pax_softmode(char *str)
59015 +{
59016 + get_option(&str, &pax_softmode);
59017 + return 1;
59018 +}
59019 +__setup("pax_softmode=", setup_pax_softmode);
59020 +#endif
59021 +
59022 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
59023 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
59024 static const char *panic_later, *panic_param;
59025 @@ -705,52 +749,53 @@ int initcall_debug;
59026 core_param(initcall_debug, initcall_debug, bool, 0644);
59027
59028 static char msgbuf[64];
59029 -static struct boot_trace_call call;
59030 -static struct boot_trace_ret ret;
59031 +static struct boot_trace_call trace_call;
59032 +static struct boot_trace_ret trace_ret;
59033
59034 int do_one_initcall(initcall_t fn)
59035 {
59036 int count = preempt_count();
59037 ktime_t calltime, delta, rettime;
59038 + const char *msg1 = "", *msg2 = "";
59039
59040 if (initcall_debug) {
59041 - call.caller = task_pid_nr(current);
59042 - printk("calling %pF @ %i\n", fn, call.caller);
59043 + trace_call.caller = task_pid_nr(current);
59044 + printk("calling %pF @ %i\n", fn, trace_call.caller);
59045 calltime = ktime_get();
59046 - trace_boot_call(&call, fn);
59047 + trace_boot_call(&trace_call, fn);
59048 enable_boot_trace();
59049 }
59050
59051 - ret.result = fn();
59052 + trace_ret.result = fn();
59053
59054 if (initcall_debug) {
59055 disable_boot_trace();
59056 rettime = ktime_get();
59057 delta = ktime_sub(rettime, calltime);
59058 - ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
59059 - trace_boot_ret(&ret, fn);
59060 + trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
59061 + trace_boot_ret(&trace_ret, fn);
59062 printk("initcall %pF returned %d after %Ld usecs\n", fn,
59063 - ret.result, ret.duration);
59064 + trace_ret.result, trace_ret.duration);
59065 }
59066
59067 msgbuf[0] = 0;
59068
59069 - if (ret.result && ret.result != -ENODEV && initcall_debug)
59070 - sprintf(msgbuf, "error code %d ", ret.result);
59071 + if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
59072 + sprintf(msgbuf, "error code %d ", trace_ret.result);
59073
59074 if (preempt_count() != count) {
59075 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
59076 + msg1 = " preemption imbalance";
59077 preempt_count() = count;
59078 }
59079 if (irqs_disabled()) {
59080 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
59081 + msg2 = " disabled interrupts";
59082 local_irq_enable();
59083 }
59084 - if (msgbuf[0]) {
59085 - printk("initcall %pF returned with %s\n", fn, msgbuf);
59086 + if (msgbuf[0] || *msg1 || *msg2) {
59087 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
59088 }
59089
59090 - return ret.result;
59091 + return trace_ret.result;
59092 }
59093
59094
59095 @@ -893,11 +938,13 @@ static int __init kernel_init(void * unu
59096 if (!ramdisk_execute_command)
59097 ramdisk_execute_command = "/init";
59098
59099 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
59100 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
59101 ramdisk_execute_command = NULL;
59102 prepare_namespace();
59103 }
59104
59105 + grsecurity_init();
59106 +
59107 /*
59108 * Ok, we have completed the initial bootup, and
59109 * we're essentially up and running. Get rid of the
59110 diff -urNp linux-2.6.32.42/init/noinitramfs.c linux-2.6.32.42/init/noinitramfs.c
59111 --- linux-2.6.32.42/init/noinitramfs.c 2011-03-27 14:31:47.000000000 -0400
59112 +++ linux-2.6.32.42/init/noinitramfs.c 2011-04-17 15:56:46.000000000 -0400
59113 @@ -29,7 +29,7 @@ static int __init default_rootfs(void)
59114 {
59115 int err;
59116
59117 - err = sys_mkdir("/dev", 0755);
59118 + err = sys_mkdir((const char __user *)"/dev", 0755);
59119 if (err < 0)
59120 goto out;
59121
59122 @@ -39,7 +39,7 @@ static int __init default_rootfs(void)
59123 if (err < 0)
59124 goto out;
59125
59126 - err = sys_mkdir("/root", 0700);
59127 + err = sys_mkdir((const char __user *)"/root", 0700);
59128 if (err < 0)
59129 goto out;
59130
59131 diff -urNp linux-2.6.32.42/ipc/mqueue.c linux-2.6.32.42/ipc/mqueue.c
59132 --- linux-2.6.32.42/ipc/mqueue.c 2011-03-27 14:31:47.000000000 -0400
59133 +++ linux-2.6.32.42/ipc/mqueue.c 2011-04-17 15:56:46.000000000 -0400
59134 @@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(st
59135 mq_bytes = (mq_msg_tblsz +
59136 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
59137
59138 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
59139 spin_lock(&mq_lock);
59140 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
59141 u->mq_bytes + mq_bytes >
59142 diff -urNp linux-2.6.32.42/ipc/sem.c linux-2.6.32.42/ipc/sem.c
59143 --- linux-2.6.32.42/ipc/sem.c 2011-03-27 14:31:47.000000000 -0400
59144 +++ linux-2.6.32.42/ipc/sem.c 2011-05-16 21:46:57.000000000 -0400
59145 @@ -671,6 +671,8 @@ static int semctl_main(struct ipc_namesp
59146 ushort* sem_io = fast_sem_io;
59147 int nsems;
59148
59149 + pax_track_stack();
59150 +
59151 sma = sem_lock_check(ns, semid);
59152 if (IS_ERR(sma))
59153 return PTR_ERR(sma);
59154 @@ -1071,6 +1073,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
59155 unsigned long jiffies_left = 0;
59156 struct ipc_namespace *ns;
59157
59158 + pax_track_stack();
59159 +
59160 ns = current->nsproxy->ipc_ns;
59161
59162 if (nsops < 1 || semid < 0)
59163 diff -urNp linux-2.6.32.42/ipc/shm.c linux-2.6.32.42/ipc/shm.c
59164 --- linux-2.6.32.42/ipc/shm.c 2011-03-27 14:31:47.000000000 -0400
59165 +++ linux-2.6.32.42/ipc/shm.c 2011-04-17 15:56:46.000000000 -0400
59166 @@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_name
59167 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
59168 #endif
59169
59170 +#ifdef CONFIG_GRKERNSEC
59171 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
59172 + const time_t shm_createtime, const uid_t cuid,
59173 + const int shmid);
59174 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
59175 + const time_t shm_createtime);
59176 +#endif
59177 +
59178 void shm_init_ns(struct ipc_namespace *ns)
59179 {
59180 ns->shm_ctlmax = SHMMAX;
59181 @@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *
59182 shp->shm_lprid = 0;
59183 shp->shm_atim = shp->shm_dtim = 0;
59184 shp->shm_ctim = get_seconds();
59185 +#ifdef CONFIG_GRKERNSEC
59186 + {
59187 + struct timespec timeval;
59188 + do_posix_clock_monotonic_gettime(&timeval);
59189 +
59190 + shp->shm_createtime = timeval.tv_sec;
59191 + }
59192 +#endif
59193 shp->shm_segsz = size;
59194 shp->shm_nattch = 0;
59195 shp->shm_file = file;
59196 @@ -880,9 +896,21 @@ long do_shmat(int shmid, char __user *sh
59197 if (err)
59198 goto out_unlock;
59199
59200 +#ifdef CONFIG_GRKERNSEC
59201 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
59202 + shp->shm_perm.cuid, shmid) ||
59203 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
59204 + err = -EACCES;
59205 + goto out_unlock;
59206 + }
59207 +#endif
59208 +
59209 path.dentry = dget(shp->shm_file->f_path.dentry);
59210 path.mnt = shp->shm_file->f_path.mnt;
59211 shp->shm_nattch++;
59212 +#ifdef CONFIG_GRKERNSEC
59213 + shp->shm_lapid = current->pid;
59214 +#endif
59215 size = i_size_read(path.dentry->d_inode);
59216 shm_unlock(shp);
59217
59218 diff -urNp linux-2.6.32.42/kernel/acct.c linux-2.6.32.42/kernel/acct.c
59219 --- linux-2.6.32.42/kernel/acct.c 2011-03-27 14:31:47.000000000 -0400
59220 +++ linux-2.6.32.42/kernel/acct.c 2011-04-17 15:56:46.000000000 -0400
59221 @@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_a
59222 */
59223 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
59224 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
59225 - file->f_op->write(file, (char *)&ac,
59226 + file->f_op->write(file, (__force char __user *)&ac,
59227 sizeof(acct_t), &file->f_pos);
59228 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
59229 set_fs(fs);
59230 diff -urNp linux-2.6.32.42/kernel/audit.c linux-2.6.32.42/kernel/audit.c
59231 --- linux-2.6.32.42/kernel/audit.c 2011-03-27 14:31:47.000000000 -0400
59232 +++ linux-2.6.32.42/kernel/audit.c 2011-05-04 17:56:28.000000000 -0400
59233 @@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
59234 3) suppressed due to audit_rate_limit
59235 4) suppressed due to audit_backlog_limit
59236 */
59237 -static atomic_t audit_lost = ATOMIC_INIT(0);
59238 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
59239
59240 /* The netlink socket. */
59241 static struct sock *audit_sock;
59242 @@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
59243 unsigned long now;
59244 int print;
59245
59246 - atomic_inc(&audit_lost);
59247 + atomic_inc_unchecked(&audit_lost);
59248
59249 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
59250
59251 @@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
59252 printk(KERN_WARNING
59253 "audit: audit_lost=%d audit_rate_limit=%d "
59254 "audit_backlog_limit=%d\n",
59255 - atomic_read(&audit_lost),
59256 + atomic_read_unchecked(&audit_lost),
59257 audit_rate_limit,
59258 audit_backlog_limit);
59259 audit_panic(message);
59260 @@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_b
59261 status_set.pid = audit_pid;
59262 status_set.rate_limit = audit_rate_limit;
59263 status_set.backlog_limit = audit_backlog_limit;
59264 - status_set.lost = atomic_read(&audit_lost);
59265 + status_set.lost = atomic_read_unchecked(&audit_lost);
59266 status_set.backlog = skb_queue_len(&audit_skb_queue);
59267 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
59268 &status_set, sizeof(status_set));
59269 @@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_b
59270 spin_unlock_irq(&tsk->sighand->siglock);
59271 }
59272 read_unlock(&tasklist_lock);
59273 - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
59274 - &s, sizeof(s));
59275 +
59276 + if (!err)
59277 + audit_send_reply(NETLINK_CB(skb).pid, seq,
59278 + AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
59279 break;
59280 }
59281 case AUDIT_TTY_SET: {
59282 diff -urNp linux-2.6.32.42/kernel/auditsc.c linux-2.6.32.42/kernel/auditsc.c
59283 --- linux-2.6.32.42/kernel/auditsc.c 2011-03-27 14:31:47.000000000 -0400
59284 +++ linux-2.6.32.42/kernel/auditsc.c 2011-05-04 17:56:28.000000000 -0400
59285 @@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_conte
59286 }
59287
59288 /* global counter which is incremented every time something logs in */
59289 -static atomic_t session_id = ATOMIC_INIT(0);
59290 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
59291
59292 /**
59293 * audit_set_loginuid - set a task's audit_context loginuid
59294 @@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT
59295 */
59296 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
59297 {
59298 - unsigned int sessionid = atomic_inc_return(&session_id);
59299 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
59300 struct audit_context *context = task->audit_context;
59301
59302 if (context && context->in_syscall) {
59303 diff -urNp linux-2.6.32.42/kernel/capability.c linux-2.6.32.42/kernel/capability.c
59304 --- linux-2.6.32.42/kernel/capability.c 2011-03-27 14:31:47.000000000 -0400
59305 +++ linux-2.6.32.42/kernel/capability.c 2011-04-17 15:56:46.000000000 -0400
59306 @@ -305,10 +305,26 @@ int capable(int cap)
59307 BUG();
59308 }
59309
59310 - if (security_capable(cap) == 0) {
59311 + if (security_capable(cap) == 0 && gr_is_capable(cap)) {
59312 current->flags |= PF_SUPERPRIV;
59313 return 1;
59314 }
59315 return 0;
59316 }
59317 +
59318 +int capable_nolog(int cap)
59319 +{
59320 + if (unlikely(!cap_valid(cap))) {
59321 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
59322 + BUG();
59323 + }
59324 +
59325 + if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
59326 + current->flags |= PF_SUPERPRIV;
59327 + return 1;
59328 + }
59329 + return 0;
59330 +}
59331 +
59332 EXPORT_SYMBOL(capable);
59333 +EXPORT_SYMBOL(capable_nolog);
59334 diff -urNp linux-2.6.32.42/kernel/cgroup.c linux-2.6.32.42/kernel/cgroup.c
59335 --- linux-2.6.32.42/kernel/cgroup.c 2011-03-27 14:31:47.000000000 -0400
59336 +++ linux-2.6.32.42/kernel/cgroup.c 2011-05-16 21:46:57.000000000 -0400
59337 @@ -536,6 +536,8 @@ static struct css_set *find_css_set(
59338 struct hlist_head *hhead;
59339 struct cg_cgroup_link *link;
59340
59341 + pax_track_stack();
59342 +
59343 /* First see if we already have a cgroup group that matches
59344 * the desired set */
59345 read_lock(&css_set_lock);
59346 diff -urNp linux-2.6.32.42/kernel/configs.c linux-2.6.32.42/kernel/configs.c
59347 --- linux-2.6.32.42/kernel/configs.c 2011-03-27 14:31:47.000000000 -0400
59348 +++ linux-2.6.32.42/kernel/configs.c 2011-04-17 15:56:46.000000000 -0400
59349 @@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
59350 struct proc_dir_entry *entry;
59351
59352 /* create the current config file */
59353 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
59354 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
59355 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
59356 + &ikconfig_file_ops);
59357 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59358 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
59359 + &ikconfig_file_ops);
59360 +#endif
59361 +#else
59362 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
59363 &ikconfig_file_ops);
59364 +#endif
59365 +
59366 if (!entry)
59367 return -ENOMEM;
59368
59369 diff -urNp linux-2.6.32.42/kernel/cpu.c linux-2.6.32.42/kernel/cpu.c
59370 --- linux-2.6.32.42/kernel/cpu.c 2011-03-27 14:31:47.000000000 -0400
59371 +++ linux-2.6.32.42/kernel/cpu.c 2011-04-17 15:56:46.000000000 -0400
59372 @@ -19,7 +19,7 @@
59373 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
59374 static DEFINE_MUTEX(cpu_add_remove_lock);
59375
59376 -static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
59377 +static RAW_NOTIFIER_HEAD(cpu_chain);
59378
59379 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
59380 * Should always be manipulated under cpu_add_remove_lock
59381 diff -urNp linux-2.6.32.42/kernel/cred.c linux-2.6.32.42/kernel/cred.c
59382 --- linux-2.6.32.42/kernel/cred.c 2011-03-27 14:31:47.000000000 -0400
59383 +++ linux-2.6.32.42/kernel/cred.c 2011-05-17 19:26:34.000000000 -0400
59384 @@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head
59385 */
59386 void __put_cred(struct cred *cred)
59387 {
59388 + pax_track_stack();
59389 +
59390 kdebug("__put_cred(%p{%d,%d})", cred,
59391 atomic_read(&cred->usage),
59392 read_cred_subscribers(cred));
59393 @@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
59394 {
59395 struct cred *cred;
59396
59397 + pax_track_stack();
59398 +
59399 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
59400 atomic_read(&tsk->cred->usage),
59401 read_cred_subscribers(tsk->cred));
59402 @@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct
59403 {
59404 const struct cred *cred;
59405
59406 + pax_track_stack();
59407 +
59408 rcu_read_lock();
59409
59410 do {
59411 @@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
59412 {
59413 struct cred *new;
59414
59415 + pax_track_stack();
59416 +
59417 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
59418 if (!new)
59419 return NULL;
59420 @@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
59421 const struct cred *old;
59422 struct cred *new;
59423
59424 + pax_track_stack();
59425 +
59426 validate_process_creds();
59427
59428 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
59429 @@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
59430 struct thread_group_cred *tgcred = NULL;
59431 struct cred *new;
59432
59433 + pax_track_stack();
59434 +
59435 #ifdef CONFIG_KEYS
59436 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
59437 if (!tgcred)
59438 @@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, un
59439 struct cred *new;
59440 int ret;
59441
59442 + pax_track_stack();
59443 +
59444 mutex_init(&p->cred_guard_mutex);
59445
59446 if (
59447 @@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
59448 struct task_struct *task = current;
59449 const struct cred *old = task->real_cred;
59450
59451 + pax_track_stack();
59452 +
59453 kdebug("commit_creds(%p{%d,%d})", new,
59454 atomic_read(&new->usage),
59455 read_cred_subscribers(new));
59456 @@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
59457
59458 get_cred(new); /* we will require a ref for the subj creds too */
59459
59460 + gr_set_role_label(task, new->uid, new->gid);
59461 +
59462 /* dumpability changes */
59463 if (old->euid != new->euid ||
59464 old->egid != new->egid ||
59465 @@ -606,6 +624,8 @@ EXPORT_SYMBOL(commit_creds);
59466 */
59467 void abort_creds(struct cred *new)
59468 {
59469 + pax_track_stack();
59470 +
59471 kdebug("abort_creds(%p{%d,%d})", new,
59472 atomic_read(&new->usage),
59473 read_cred_subscribers(new));
59474 @@ -629,6 +649,8 @@ const struct cred *override_creds(const
59475 {
59476 const struct cred *old = current->cred;
59477
59478 + pax_track_stack();
59479 +
59480 kdebug("override_creds(%p{%d,%d})", new,
59481 atomic_read(&new->usage),
59482 read_cred_subscribers(new));
59483 @@ -658,6 +680,8 @@ void revert_creds(const struct cred *old
59484 {
59485 const struct cred *override = current->cred;
59486
59487 + pax_track_stack();
59488 +
59489 kdebug("revert_creds(%p{%d,%d})", old,
59490 atomic_read(&old->usage),
59491 read_cred_subscribers(old));
59492 @@ -704,6 +728,8 @@ struct cred *prepare_kernel_cred(struct
59493 const struct cred *old;
59494 struct cred *new;
59495
59496 + pax_track_stack();
59497 +
59498 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
59499 if (!new)
59500 return NULL;
59501 @@ -758,6 +784,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
59502 */
59503 int set_security_override(struct cred *new, u32 secid)
59504 {
59505 + pax_track_stack();
59506 +
59507 return security_kernel_act_as(new, secid);
59508 }
59509 EXPORT_SYMBOL(set_security_override);
59510 @@ -777,6 +805,8 @@ int set_security_override_from_ctx(struc
59511 u32 secid;
59512 int ret;
59513
59514 + pax_track_stack();
59515 +
59516 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
59517 if (ret < 0)
59518 return ret;
59519 diff -urNp linux-2.6.32.42/kernel/exit.c linux-2.6.32.42/kernel/exit.c
59520 --- linux-2.6.32.42/kernel/exit.c 2011-03-27 14:31:47.000000000 -0400
59521 +++ linux-2.6.32.42/kernel/exit.c 2011-04-17 15:56:46.000000000 -0400
59522 @@ -55,6 +55,10 @@
59523 #include <asm/pgtable.h>
59524 #include <asm/mmu_context.h>
59525
59526 +#ifdef CONFIG_GRKERNSEC
59527 +extern rwlock_t grsec_exec_file_lock;
59528 +#endif
59529 +
59530 static void exit_mm(struct task_struct * tsk);
59531
59532 static void __unhash_process(struct task_struct *p)
59533 @@ -174,6 +178,8 @@ void release_task(struct task_struct * p
59534 struct task_struct *leader;
59535 int zap_leader;
59536 repeat:
59537 + gr_del_task_from_ip_table(p);
59538 +
59539 tracehook_prepare_release_task(p);
59540 /* don't need to get the RCU readlock here - the process is dead and
59541 * can't be modifying its own credentials */
59542 @@ -341,11 +347,22 @@ static void reparent_to_kthreadd(void)
59543 {
59544 write_lock_irq(&tasklist_lock);
59545
59546 +#ifdef CONFIG_GRKERNSEC
59547 + write_lock(&grsec_exec_file_lock);
59548 + if (current->exec_file) {
59549 + fput(current->exec_file);
59550 + current->exec_file = NULL;
59551 + }
59552 + write_unlock(&grsec_exec_file_lock);
59553 +#endif
59554 +
59555 ptrace_unlink(current);
59556 /* Reparent to init */
59557 current->real_parent = current->parent = kthreadd_task;
59558 list_move_tail(&current->sibling, &current->real_parent->children);
59559
59560 + gr_set_kernel_label(current);
59561 +
59562 /* Set the exit signal to SIGCHLD so we signal init on exit */
59563 current->exit_signal = SIGCHLD;
59564
59565 @@ -397,7 +414,7 @@ int allow_signal(int sig)
59566 * know it'll be handled, so that they don't get converted to
59567 * SIGKILL or just silently dropped.
59568 */
59569 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
59570 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
59571 recalc_sigpending();
59572 spin_unlock_irq(&current->sighand->siglock);
59573 return 0;
59574 @@ -433,6 +450,17 @@ void daemonize(const char *name, ...)
59575 vsnprintf(current->comm, sizeof(current->comm), name, args);
59576 va_end(args);
59577
59578 +#ifdef CONFIG_GRKERNSEC
59579 + write_lock(&grsec_exec_file_lock);
59580 + if (current->exec_file) {
59581 + fput(current->exec_file);
59582 + current->exec_file = NULL;
59583 + }
59584 + write_unlock(&grsec_exec_file_lock);
59585 +#endif
59586 +
59587 + gr_set_kernel_label(current);
59588 +
59589 /*
59590 * If we were started as result of loading a module, close all of the
59591 * user space pages. We don't need them, and if we didn't close them
59592 @@ -897,17 +925,17 @@ NORET_TYPE void do_exit(long code)
59593 struct task_struct *tsk = current;
59594 int group_dead;
59595
59596 - profile_task_exit(tsk);
59597 -
59598 - WARN_ON(atomic_read(&tsk->fs_excl));
59599 -
59600 + /*
59601 + * Check this first since set_fs() below depends on
59602 + * current_thread_info(), which we better not access when we're in
59603 + * interrupt context. Other than that, we want to do the set_fs()
59604 + * as early as possible.
59605 + */
59606 if (unlikely(in_interrupt()))
59607 panic("Aiee, killing interrupt handler!");
59608 - if (unlikely(!tsk->pid))
59609 - panic("Attempted to kill the idle task!");
59610
59611 /*
59612 - * If do_exit is called because this processes oopsed, it's possible
59613 + * If do_exit is called because this processes Oops'ed, it's possible
59614 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
59615 * continuing. Amongst other possible reasons, this is to prevent
59616 * mm_release()->clear_child_tid() from writing to a user-controlled
59617 @@ -915,6 +943,13 @@ NORET_TYPE void do_exit(long code)
59618 */
59619 set_fs(USER_DS);
59620
59621 + profile_task_exit(tsk);
59622 +
59623 + WARN_ON(atomic_read(&tsk->fs_excl));
59624 +
59625 + if (unlikely(!tsk->pid))
59626 + panic("Attempted to kill the idle task!");
59627 +
59628 tracehook_report_exit(&code);
59629
59630 validate_creds_for_do_exit(tsk);
59631 @@ -973,6 +1008,9 @@ NORET_TYPE void do_exit(long code)
59632 tsk->exit_code = code;
59633 taskstats_exit(tsk, group_dead);
59634
59635 + gr_acl_handle_psacct(tsk, code);
59636 + gr_acl_handle_exit();
59637 +
59638 exit_mm(tsk);
59639
59640 if (group_dead)
59641 @@ -1188,7 +1226,7 @@ static int wait_task_zombie(struct wait_
59642
59643 if (unlikely(wo->wo_flags & WNOWAIT)) {
59644 int exit_code = p->exit_code;
59645 - int why, status;
59646 + int why;
59647
59648 get_task_struct(p);
59649 read_unlock(&tasklist_lock);
59650 diff -urNp linux-2.6.32.42/kernel/fork.c linux-2.6.32.42/kernel/fork.c
59651 --- linux-2.6.32.42/kernel/fork.c 2011-03-27 14:31:47.000000000 -0400
59652 +++ linux-2.6.32.42/kernel/fork.c 2011-04-17 15:56:46.000000000 -0400
59653 @@ -253,7 +253,7 @@ static struct task_struct *dup_task_stru
59654 *stackend = STACK_END_MAGIC; /* for overflow detection */
59655
59656 #ifdef CONFIG_CC_STACKPROTECTOR
59657 - tsk->stack_canary = get_random_int();
59658 + tsk->stack_canary = pax_get_random_long();
59659 #endif
59660
59661 /* One for us, one for whoever does the "release_task()" (usually parent) */
59662 @@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm
59663 mm->locked_vm = 0;
59664 mm->mmap = NULL;
59665 mm->mmap_cache = NULL;
59666 - mm->free_area_cache = oldmm->mmap_base;
59667 - mm->cached_hole_size = ~0UL;
59668 + mm->free_area_cache = oldmm->free_area_cache;
59669 + mm->cached_hole_size = oldmm->cached_hole_size;
59670 mm->map_count = 0;
59671 cpumask_clear(mm_cpumask(mm));
59672 mm->mm_rb = RB_ROOT;
59673 @@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm
59674 tmp->vm_flags &= ~VM_LOCKED;
59675 tmp->vm_mm = mm;
59676 tmp->vm_next = tmp->vm_prev = NULL;
59677 + tmp->vm_mirror = NULL;
59678 anon_vma_link(tmp);
59679 file = tmp->vm_file;
59680 if (file) {
59681 @@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm
59682 if (retval)
59683 goto out;
59684 }
59685 +
59686 +#ifdef CONFIG_PAX_SEGMEXEC
59687 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
59688 + struct vm_area_struct *mpnt_m;
59689 +
59690 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
59691 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
59692 +
59693 + if (!mpnt->vm_mirror)
59694 + continue;
59695 +
59696 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
59697 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
59698 + mpnt->vm_mirror = mpnt_m;
59699 + } else {
59700 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
59701 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
59702 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
59703 + mpnt->vm_mirror->vm_mirror = mpnt;
59704 + }
59705 + }
59706 + BUG_ON(mpnt_m);
59707 + }
59708 +#endif
59709 +
59710 /* a new mm has just been created */
59711 arch_dup_mmap(oldmm, mm);
59712 retval = 0;
59713 @@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_f
59714 write_unlock(&fs->lock);
59715 return -EAGAIN;
59716 }
59717 - fs->users++;
59718 + atomic_inc(&fs->users);
59719 write_unlock(&fs->lock);
59720 return 0;
59721 }
59722 tsk->fs = copy_fs_struct(fs);
59723 if (!tsk->fs)
59724 return -ENOMEM;
59725 + gr_set_chroot_entries(tsk, &tsk->fs->root);
59726 return 0;
59727 }
59728
59729 @@ -1033,10 +1060,13 @@ static struct task_struct *copy_process(
59730 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
59731 #endif
59732 retval = -EAGAIN;
59733 +
59734 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
59735 +
59736 if (atomic_read(&p->real_cred->user->processes) >=
59737 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
59738 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
59739 - p->real_cred->user != INIT_USER)
59740 + if (p->real_cred->user != INIT_USER &&
59741 + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
59742 goto bad_fork_free;
59743 }
59744
59745 @@ -1183,6 +1213,8 @@ static struct task_struct *copy_process(
59746 goto bad_fork_free_pid;
59747 }
59748
59749 + gr_copy_label(p);
59750 +
59751 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
59752 /*
59753 * Clear TID on mm_release()?
59754 @@ -1333,6 +1365,8 @@ bad_fork_cleanup_count:
59755 bad_fork_free:
59756 free_task(p);
59757 fork_out:
59758 + gr_log_forkfail(retval);
59759 +
59760 return ERR_PTR(retval);
59761 }
59762
59763 @@ -1426,6 +1460,8 @@ long do_fork(unsigned long clone_flags,
59764 if (clone_flags & CLONE_PARENT_SETTID)
59765 put_user(nr, parent_tidptr);
59766
59767 + gr_handle_brute_check();
59768 +
59769 if (clone_flags & CLONE_VFORK) {
59770 p->vfork_done = &vfork;
59771 init_completion(&vfork);
59772 @@ -1558,7 +1594,7 @@ static int unshare_fs(unsigned long unsh
59773 return 0;
59774
59775 /* don't need lock here; in the worst case we'll do useless copy */
59776 - if (fs->users == 1)
59777 + if (atomic_read(&fs->users) == 1)
59778 return 0;
59779
59780 *new_fsp = copy_fs_struct(fs);
59781 @@ -1681,7 +1717,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
59782 fs = current->fs;
59783 write_lock(&fs->lock);
59784 current->fs = new_fs;
59785 - if (--fs->users)
59786 + gr_set_chroot_entries(current, &current->fs->root);
59787 + if (atomic_dec_return(&fs->users))
59788 new_fs = NULL;
59789 else
59790 new_fs = fs;
59791 diff -urNp linux-2.6.32.42/kernel/futex.c linux-2.6.32.42/kernel/futex.c
59792 --- linux-2.6.32.42/kernel/futex.c 2011-03-27 14:31:47.000000000 -0400
59793 +++ linux-2.6.32.42/kernel/futex.c 2011-05-16 21:46:57.000000000 -0400
59794 @@ -54,6 +54,7 @@
59795 #include <linux/mount.h>
59796 #include <linux/pagemap.h>
59797 #include <linux/syscalls.h>
59798 +#include <linux/ptrace.h>
59799 #include <linux/signal.h>
59800 #include <linux/module.h>
59801 #include <linux/magic.h>
59802 @@ -221,6 +222,11 @@ get_futex_key(u32 __user *uaddr, int fsh
59803 struct page *page;
59804 int err;
59805
59806 +#ifdef CONFIG_PAX_SEGMEXEC
59807 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
59808 + return -EFAULT;
59809 +#endif
59810 +
59811 /*
59812 * The futex address must be "naturally" aligned.
59813 */
59814 @@ -1789,6 +1795,8 @@ static int futex_wait(u32 __user *uaddr,
59815 struct futex_q q;
59816 int ret;
59817
59818 + pax_track_stack();
59819 +
59820 if (!bitset)
59821 return -EINVAL;
59822
59823 @@ -1841,7 +1849,7 @@ retry:
59824
59825 restart = &current_thread_info()->restart_block;
59826 restart->fn = futex_wait_restart;
59827 - restart->futex.uaddr = (u32 *)uaddr;
59828 + restart->futex.uaddr = uaddr;
59829 restart->futex.val = val;
59830 restart->futex.time = abs_time->tv64;
59831 restart->futex.bitset = bitset;
59832 @@ -2203,6 +2211,8 @@ static int futex_wait_requeue_pi(u32 __u
59833 struct futex_q q;
59834 int res, ret;
59835
59836 + pax_track_stack();
59837 +
59838 if (!bitset)
59839 return -EINVAL;
59840
59841 @@ -2377,7 +2387,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59842 {
59843 struct robust_list_head __user *head;
59844 unsigned long ret;
59845 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
59846 const struct cred *cred = current_cred(), *pcred;
59847 +#endif
59848
59849 if (!futex_cmpxchg_enabled)
59850 return -ENOSYS;
59851 @@ -2393,11 +2405,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59852 if (!p)
59853 goto err_unlock;
59854 ret = -EPERM;
59855 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59856 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
59857 + goto err_unlock;
59858 +#else
59859 pcred = __task_cred(p);
59860 if (cred->euid != pcred->euid &&
59861 cred->euid != pcred->uid &&
59862 !capable(CAP_SYS_PTRACE))
59863 goto err_unlock;
59864 +#endif
59865 head = p->robust_list;
59866 rcu_read_unlock();
59867 }
59868 @@ -2459,7 +2476,7 @@ retry:
59869 */
59870 static inline int fetch_robust_entry(struct robust_list __user **entry,
59871 struct robust_list __user * __user *head,
59872 - int *pi)
59873 + unsigned int *pi)
59874 {
59875 unsigned long uentry;
59876
59877 @@ -2640,6 +2657,7 @@ static int __init futex_init(void)
59878 {
59879 u32 curval;
59880 int i;
59881 + mm_segment_t oldfs;
59882
59883 /*
59884 * This will fail and we want it. Some arch implementations do
59885 @@ -2651,7 +2669,10 @@ static int __init futex_init(void)
59886 * implementation, the non functional ones will return
59887 * -ENOSYS.
59888 */
59889 + oldfs = get_fs();
59890 + set_fs(USER_DS);
59891 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
59892 + set_fs(oldfs);
59893 if (curval == -EFAULT)
59894 futex_cmpxchg_enabled = 1;
59895
59896 diff -urNp linux-2.6.32.42/kernel/futex_compat.c linux-2.6.32.42/kernel/futex_compat.c
59897 --- linux-2.6.32.42/kernel/futex_compat.c 2011-03-27 14:31:47.000000000 -0400
59898 +++ linux-2.6.32.42/kernel/futex_compat.c 2011-04-17 15:56:46.000000000 -0400
59899 @@ -10,6 +10,7 @@
59900 #include <linux/compat.h>
59901 #include <linux/nsproxy.h>
59902 #include <linux/futex.h>
59903 +#include <linux/ptrace.h>
59904
59905 #include <asm/uaccess.h>
59906
59907 @@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
59908 {
59909 struct compat_robust_list_head __user *head;
59910 unsigned long ret;
59911 - const struct cred *cred = current_cred(), *pcred;
59912 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
59913 + const struct cred *cred = current_cred();
59914 + const struct cred *pcred;
59915 +#endif
59916
59917 if (!futex_cmpxchg_enabled)
59918 return -ENOSYS;
59919 @@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
59920 if (!p)
59921 goto err_unlock;
59922 ret = -EPERM;
59923 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59924 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
59925 + goto err_unlock;
59926 +#else
59927 pcred = __task_cred(p);
59928 if (cred->euid != pcred->euid &&
59929 cred->euid != pcred->uid &&
59930 !capable(CAP_SYS_PTRACE))
59931 goto err_unlock;
59932 +#endif
59933 head = p->compat_robust_list;
59934 read_unlock(&tasklist_lock);
59935 }
59936 diff -urNp linux-2.6.32.42/kernel/gcov/base.c linux-2.6.32.42/kernel/gcov/base.c
59937 --- linux-2.6.32.42/kernel/gcov/base.c 2011-03-27 14:31:47.000000000 -0400
59938 +++ linux-2.6.32.42/kernel/gcov/base.c 2011-04-17 15:56:46.000000000 -0400
59939 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
59940 }
59941
59942 #ifdef CONFIG_MODULES
59943 -static inline int within(void *addr, void *start, unsigned long size)
59944 -{
59945 - return ((addr >= start) && (addr < start + size));
59946 -}
59947 -
59948 /* Update list and generate events when modules are unloaded. */
59949 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
59950 void *data)
59951 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
59952 prev = NULL;
59953 /* Remove entries located in module from linked list. */
59954 for (info = gcov_info_head; info; info = info->next) {
59955 - if (within(info, mod->module_core, mod->core_size)) {
59956 + if (within_module_core_rw((unsigned long)info, mod)) {
59957 if (prev)
59958 prev->next = info->next;
59959 else
59960 diff -urNp linux-2.6.32.42/kernel/hrtimer.c linux-2.6.32.42/kernel/hrtimer.c
59961 --- linux-2.6.32.42/kernel/hrtimer.c 2011-03-27 14:31:47.000000000 -0400
59962 +++ linux-2.6.32.42/kernel/hrtimer.c 2011-04-17 15:56:46.000000000 -0400
59963 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
59964 local_irq_restore(flags);
59965 }
59966
59967 -static void run_hrtimer_softirq(struct softirq_action *h)
59968 +static void run_hrtimer_softirq(void)
59969 {
59970 hrtimer_peek_ahead_timers();
59971 }
59972 diff -urNp linux-2.6.32.42/kernel/kallsyms.c linux-2.6.32.42/kernel/kallsyms.c
59973 --- linux-2.6.32.42/kernel/kallsyms.c 2011-03-27 14:31:47.000000000 -0400
59974 +++ linux-2.6.32.42/kernel/kallsyms.c 2011-04-17 15:56:46.000000000 -0400
59975 @@ -11,6 +11,9 @@
59976 * Changed the compression method from stem compression to "table lookup"
59977 * compression (see scripts/kallsyms.c for a more complete description)
59978 */
59979 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59980 +#define __INCLUDED_BY_HIDESYM 1
59981 +#endif
59982 #include <linux/kallsyms.h>
59983 #include <linux/module.h>
59984 #include <linux/init.h>
59985 @@ -51,12 +54,33 @@ extern const unsigned long kallsyms_mark
59986
59987 static inline int is_kernel_inittext(unsigned long addr)
59988 {
59989 + if (system_state != SYSTEM_BOOTING)
59990 + return 0;
59991 +
59992 if (addr >= (unsigned long)_sinittext
59993 && addr <= (unsigned long)_einittext)
59994 return 1;
59995 return 0;
59996 }
59997
59998 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59999 +#ifdef CONFIG_MODULES
60000 +static inline int is_module_text(unsigned long addr)
60001 +{
60002 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
60003 + return 1;
60004 +
60005 + addr = ktla_ktva(addr);
60006 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
60007 +}
60008 +#else
60009 +static inline int is_module_text(unsigned long addr)
60010 +{
60011 + return 0;
60012 +}
60013 +#endif
60014 +#endif
60015 +
60016 static inline int is_kernel_text(unsigned long addr)
60017 {
60018 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
60019 @@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigne
60020
60021 static inline int is_kernel(unsigned long addr)
60022 {
60023 +
60024 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
60025 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
60026 + return 1;
60027 +
60028 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
60029 +#else
60030 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
60031 +#endif
60032 +
60033 return 1;
60034 return in_gate_area_no_task(addr);
60035 }
60036
60037 static int is_ksym_addr(unsigned long addr)
60038 {
60039 +
60040 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
60041 + if (is_module_text(addr))
60042 + return 0;
60043 +#endif
60044 +
60045 if (all_var)
60046 return is_kernel(addr);
60047
60048 @@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(st
60049
60050 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
60051 {
60052 - iter->name[0] = '\0';
60053 iter->nameoff = get_symbol_offset(new_pos);
60054 iter->pos = new_pos;
60055 }
60056 @@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, vo
60057 {
60058 struct kallsym_iter *iter = m->private;
60059
60060 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60061 + if (current_uid())
60062 + return 0;
60063 +#endif
60064 +
60065 /* Some debugging symbols have no name. Ignore them. */
60066 if (!iter->name[0])
60067 return 0;
60068 @@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *i
60069 struct kallsym_iter *iter;
60070 int ret;
60071
60072 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
60073 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
60074 if (!iter)
60075 return -ENOMEM;
60076 reset_iter(iter, 0);
60077 diff -urNp linux-2.6.32.42/kernel/kgdb.c linux-2.6.32.42/kernel/kgdb.c
60078 --- linux-2.6.32.42/kernel/kgdb.c 2011-04-17 17:00:52.000000000 -0400
60079 +++ linux-2.6.32.42/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
60080 @@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
60081 /* Guard for recursive entry */
60082 static int exception_level;
60083
60084 -static struct kgdb_io *kgdb_io_ops;
60085 +static const struct kgdb_io *kgdb_io_ops;
60086 static DEFINE_SPINLOCK(kgdb_registration_lock);
60087
60088 /* kgdb console driver is loaded */
60089 @@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1)
60090 */
60091 static atomic_t passive_cpu_wait[NR_CPUS];
60092 static atomic_t cpu_in_kgdb[NR_CPUS];
60093 -atomic_t kgdb_setting_breakpoint;
60094 +atomic_unchecked_t kgdb_setting_breakpoint;
60095
60096 struct task_struct *kgdb_usethread;
60097 struct task_struct *kgdb_contthread;
60098 @@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBY
60099 sizeof(unsigned long)];
60100
60101 /* to keep track of the CPU which is doing the single stepping*/
60102 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
60103 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
60104
60105 /*
60106 * If you are debugging a problem where roundup (the collection of
60107 @@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
60108 return 0;
60109 if (kgdb_connected)
60110 return 1;
60111 - if (atomic_read(&kgdb_setting_breakpoint))
60112 + if (atomic_read_unchecked(&kgdb_setting_breakpoint))
60113 return 1;
60114 if (print_wait)
60115 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
60116 @@ -1426,8 +1426,8 @@ acquirelock:
60117 * instance of the exception handler wanted to come into the
60118 * debugger on a different CPU via a single step
60119 */
60120 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
60121 - atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
60122 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
60123 + atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
60124
60125 atomic_set(&kgdb_active, -1);
60126 touch_softlockup_watchdog();
60127 @@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void
60128 *
60129 * Register it with the KGDB core.
60130 */
60131 -int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
60132 +int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
60133 {
60134 int err;
60135
60136 @@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
60137 *
60138 * Unregister it with the KGDB core.
60139 */
60140 -void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
60141 +void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
60142 {
60143 BUG_ON(kgdb_connected);
60144
60145 @@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod
60146 */
60147 void kgdb_breakpoint(void)
60148 {
60149 - atomic_set(&kgdb_setting_breakpoint, 1);
60150 + atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
60151 wmb(); /* Sync point before breakpoint */
60152 arch_kgdb_breakpoint();
60153 wmb(); /* Sync point after breakpoint */
60154 - atomic_set(&kgdb_setting_breakpoint, 0);
60155 + atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
60156 }
60157 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
60158
60159 diff -urNp linux-2.6.32.42/kernel/kmod.c linux-2.6.32.42/kernel/kmod.c
60160 --- linux-2.6.32.42/kernel/kmod.c 2011-03-27 14:31:47.000000000 -0400
60161 +++ linux-2.6.32.42/kernel/kmod.c 2011-04-17 15:56:46.000000000 -0400
60162 @@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
60163 * If module auto-loading support is disabled then this function
60164 * becomes a no-operation.
60165 */
60166 -int __request_module(bool wait, const char *fmt, ...)
60167 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
60168 {
60169 - va_list args;
60170 char module_name[MODULE_NAME_LEN];
60171 unsigned int max_modprobes;
60172 int ret;
60173 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
60174 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
60175 static char *envp[] = { "HOME=/",
60176 "TERM=linux",
60177 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
60178 @@ -84,12 +83,24 @@ int __request_module(bool wait, const ch
60179 if (ret)
60180 return ret;
60181
60182 - va_start(args, fmt);
60183 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
60184 - va_end(args);
60185 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
60186 if (ret >= MODULE_NAME_LEN)
60187 return -ENAMETOOLONG;
60188
60189 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60190 + if (!current_uid()) {
60191 + /* hack to workaround consolekit/udisks stupidity */
60192 + read_lock(&tasklist_lock);
60193 + if (!strcmp(current->comm, "mount") &&
60194 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
60195 + read_unlock(&tasklist_lock);
60196 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
60197 + return -EPERM;
60198 + }
60199 + read_unlock(&tasklist_lock);
60200 + }
60201 +#endif
60202 +
60203 /* If modprobe needs a service that is in a module, we get a recursive
60204 * loop. Limit the number of running kmod threads to max_threads/2 or
60205 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
60206 @@ -121,6 +132,48 @@ int __request_module(bool wait, const ch
60207 atomic_dec(&kmod_concurrent);
60208 return ret;
60209 }
60210 +
60211 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
60212 +{
60213 + va_list args;
60214 + int ret;
60215 +
60216 + va_start(args, fmt);
60217 + ret = ____request_module(wait, module_param, fmt, args);
60218 + va_end(args);
60219 +
60220 + return ret;
60221 +}
60222 +
60223 +int __request_module(bool wait, const char *fmt, ...)
60224 +{
60225 + va_list args;
60226 + int ret;
60227 +
60228 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60229 + if (current_uid()) {
60230 + char module_param[MODULE_NAME_LEN];
60231 +
60232 + memset(module_param, 0, sizeof(module_param));
60233 +
60234 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
60235 +
60236 + va_start(args, fmt);
60237 + ret = ____request_module(wait, module_param, fmt, args);
60238 + va_end(args);
60239 +
60240 + return ret;
60241 + }
60242 +#endif
60243 +
60244 + va_start(args, fmt);
60245 + ret = ____request_module(wait, NULL, fmt, args);
60246 + va_end(args);
60247 +
60248 + return ret;
60249 +}
60250 +
60251 +
60252 EXPORT_SYMBOL(__request_module);
60253 #endif /* CONFIG_MODULES */
60254
60255 diff -urNp linux-2.6.32.42/kernel/kprobes.c linux-2.6.32.42/kernel/kprobes.c
60256 --- linux-2.6.32.42/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
60257 +++ linux-2.6.32.42/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
60258 @@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
60259 * kernel image and loaded module images reside. This is required
60260 * so x86_64 can correctly handle the %rip-relative fixups.
60261 */
60262 - kip->insns = module_alloc(PAGE_SIZE);
60263 + kip->insns = module_alloc_exec(PAGE_SIZE);
60264 if (!kip->insns) {
60265 kfree(kip);
60266 return NULL;
60267 @@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(st
60268 */
60269 if (!list_is_singular(&kprobe_insn_pages)) {
60270 list_del(&kip->list);
60271 - module_free(NULL, kip->insns);
60272 + module_free_exec(NULL, kip->insns);
60273 kfree(kip);
60274 }
60275 return 1;
60276 @@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
60277 {
60278 int i, err = 0;
60279 unsigned long offset = 0, size = 0;
60280 - char *modname, namebuf[128];
60281 + char *modname, namebuf[KSYM_NAME_LEN];
60282 const char *symbol_name;
60283 void *addr;
60284 struct kprobe_blackpoint *kb;
60285 @@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(st
60286 const char *sym = NULL;
60287 unsigned int i = *(loff_t *) v;
60288 unsigned long offset = 0;
60289 - char *modname, namebuf[128];
60290 + char *modname, namebuf[KSYM_NAME_LEN];
60291
60292 head = &kprobe_table[i];
60293 preempt_disable();
60294 diff -urNp linux-2.6.32.42/kernel/lockdep.c linux-2.6.32.42/kernel/lockdep.c
60295 --- linux-2.6.32.42/kernel/lockdep.c 2011-06-25 12:55:35.000000000 -0400
60296 +++ linux-2.6.32.42/kernel/lockdep.c 2011-06-25 12:56:37.000000000 -0400
60297 @@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_t
60298 /*
60299 * Various lockdep statistics:
60300 */
60301 -atomic_t chain_lookup_hits;
60302 -atomic_t chain_lookup_misses;
60303 -atomic_t hardirqs_on_events;
60304 -atomic_t hardirqs_off_events;
60305 -atomic_t redundant_hardirqs_on;
60306 -atomic_t redundant_hardirqs_off;
60307 -atomic_t softirqs_on_events;
60308 -atomic_t softirqs_off_events;
60309 -atomic_t redundant_softirqs_on;
60310 -atomic_t redundant_softirqs_off;
60311 -atomic_t nr_unused_locks;
60312 -atomic_t nr_cyclic_checks;
60313 -atomic_t nr_find_usage_forwards_checks;
60314 -atomic_t nr_find_usage_backwards_checks;
60315 +atomic_unchecked_t chain_lookup_hits;
60316 +atomic_unchecked_t chain_lookup_misses;
60317 +atomic_unchecked_t hardirqs_on_events;
60318 +atomic_unchecked_t hardirqs_off_events;
60319 +atomic_unchecked_t redundant_hardirqs_on;
60320 +atomic_unchecked_t redundant_hardirqs_off;
60321 +atomic_unchecked_t softirqs_on_events;
60322 +atomic_unchecked_t softirqs_off_events;
60323 +atomic_unchecked_t redundant_softirqs_on;
60324 +atomic_unchecked_t redundant_softirqs_off;
60325 +atomic_unchecked_t nr_unused_locks;
60326 +atomic_unchecked_t nr_cyclic_checks;
60327 +atomic_unchecked_t nr_find_usage_forwards_checks;
60328 +atomic_unchecked_t nr_find_usage_backwards_checks;
60329 #endif
60330
60331 /*
60332 @@ -577,6 +577,10 @@ static int static_obj(void *obj)
60333 int i;
60334 #endif
60335
60336 +#ifdef CONFIG_PAX_KERNEXEC
60337 + start = ktla_ktva(start);
60338 +#endif
60339 +
60340 /*
60341 * static variable?
60342 */
60343 @@ -592,8 +596,7 @@ static int static_obj(void *obj)
60344 */
60345 for_each_possible_cpu(i) {
60346 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
60347 - end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
60348 - + per_cpu_offset(i);
60349 + end = start + PERCPU_ENOUGH_ROOM;
60350
60351 if ((addr >= start) && (addr < end))
60352 return 1;
60353 @@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *
60354 if (!static_obj(lock->key)) {
60355 debug_locks_off();
60356 printk("INFO: trying to register non-static key.\n");
60357 + printk("lock:%pS key:%pS.\n", lock, lock->key);
60358 printk("the code is fine but needs lockdep annotation.\n");
60359 printk("turning off the locking correctness validator.\n");
60360 dump_stack();
60361 @@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep
60362 if (!class)
60363 return 0;
60364 }
60365 - debug_atomic_inc((atomic_t *)&class->ops);
60366 + debug_atomic_inc((atomic_unchecked_t *)&class->ops);
60367 if (very_verbose(class)) {
60368 printk("\nacquire class [%p] %s", class->key, class->name);
60369 if (class->name_version > 1)
60370 diff -urNp linux-2.6.32.42/kernel/lockdep_internals.h linux-2.6.32.42/kernel/lockdep_internals.h
60371 --- linux-2.6.32.42/kernel/lockdep_internals.h 2011-03-27 14:31:47.000000000 -0400
60372 +++ linux-2.6.32.42/kernel/lockdep_internals.h 2011-04-17 15:56:46.000000000 -0400
60373 @@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_
60374 /*
60375 * Various lockdep statistics:
60376 */
60377 -extern atomic_t chain_lookup_hits;
60378 -extern atomic_t chain_lookup_misses;
60379 -extern atomic_t hardirqs_on_events;
60380 -extern atomic_t hardirqs_off_events;
60381 -extern atomic_t redundant_hardirqs_on;
60382 -extern atomic_t redundant_hardirqs_off;
60383 -extern atomic_t softirqs_on_events;
60384 -extern atomic_t softirqs_off_events;
60385 -extern atomic_t redundant_softirqs_on;
60386 -extern atomic_t redundant_softirqs_off;
60387 -extern atomic_t nr_unused_locks;
60388 -extern atomic_t nr_cyclic_checks;
60389 -extern atomic_t nr_cyclic_check_recursions;
60390 -extern atomic_t nr_find_usage_forwards_checks;
60391 -extern atomic_t nr_find_usage_forwards_recursions;
60392 -extern atomic_t nr_find_usage_backwards_checks;
60393 -extern atomic_t nr_find_usage_backwards_recursions;
60394 -# define debug_atomic_inc(ptr) atomic_inc(ptr)
60395 -# define debug_atomic_dec(ptr) atomic_dec(ptr)
60396 -# define debug_atomic_read(ptr) atomic_read(ptr)
60397 +extern atomic_unchecked_t chain_lookup_hits;
60398 +extern atomic_unchecked_t chain_lookup_misses;
60399 +extern atomic_unchecked_t hardirqs_on_events;
60400 +extern atomic_unchecked_t hardirqs_off_events;
60401 +extern atomic_unchecked_t redundant_hardirqs_on;
60402 +extern atomic_unchecked_t redundant_hardirqs_off;
60403 +extern atomic_unchecked_t softirqs_on_events;
60404 +extern atomic_unchecked_t softirqs_off_events;
60405 +extern atomic_unchecked_t redundant_softirqs_on;
60406 +extern atomic_unchecked_t redundant_softirqs_off;
60407 +extern atomic_unchecked_t nr_unused_locks;
60408 +extern atomic_unchecked_t nr_cyclic_checks;
60409 +extern atomic_unchecked_t nr_cyclic_check_recursions;
60410 +extern atomic_unchecked_t nr_find_usage_forwards_checks;
60411 +extern atomic_unchecked_t nr_find_usage_forwards_recursions;
60412 +extern atomic_unchecked_t nr_find_usage_backwards_checks;
60413 +extern atomic_unchecked_t nr_find_usage_backwards_recursions;
60414 +# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
60415 +# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
60416 +# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
60417 #else
60418 # define debug_atomic_inc(ptr) do { } while (0)
60419 # define debug_atomic_dec(ptr) do { } while (0)
60420 diff -urNp linux-2.6.32.42/kernel/lockdep_proc.c linux-2.6.32.42/kernel/lockdep_proc.c
60421 --- linux-2.6.32.42/kernel/lockdep_proc.c 2011-03-27 14:31:47.000000000 -0400
60422 +++ linux-2.6.32.42/kernel/lockdep_proc.c 2011-04-17 15:56:46.000000000 -0400
60423 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
60424
60425 static void print_name(struct seq_file *m, struct lock_class *class)
60426 {
60427 - char str[128];
60428 + char str[KSYM_NAME_LEN];
60429 const char *name = class->name;
60430
60431 if (!name) {
60432 diff -urNp linux-2.6.32.42/kernel/module.c linux-2.6.32.42/kernel/module.c
60433 --- linux-2.6.32.42/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
60434 +++ linux-2.6.32.42/kernel/module.c 2011-04-29 18:52:40.000000000 -0400
60435 @@ -55,6 +55,7 @@
60436 #include <linux/async.h>
60437 #include <linux/percpu.h>
60438 #include <linux/kmemleak.h>
60439 +#include <linux/grsecurity.h>
60440
60441 #define CREATE_TRACE_POINTS
60442 #include <trace/events/module.h>
60443 @@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
60444 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
60445
60446 /* Bounds of module allocation, for speeding __module_address */
60447 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
60448 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
60449 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
60450
60451 int register_module_notifier(struct notifier_block * nb)
60452 {
60453 @@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct
60454 return true;
60455
60456 list_for_each_entry_rcu(mod, &modules, list) {
60457 - struct symsearch arr[] = {
60458 + struct symsearch modarr[] = {
60459 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
60460 NOT_GPL_ONLY, false },
60461 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
60462 @@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct
60463 #endif
60464 };
60465
60466 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
60467 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
60468 return true;
60469 }
60470 return false;
60471 @@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned lo
60472 void *ptr;
60473 int cpu;
60474
60475 - if (align > PAGE_SIZE) {
60476 + if (align-1 >= PAGE_SIZE) {
60477 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
60478 name, align, PAGE_SIZE);
60479 align = PAGE_SIZE;
60480 @@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resol
60481 * /sys/module/foo/sections stuff
60482 * J. Corbet <corbet@lwn.net>
60483 */
60484 -#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
60485 +#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60486
60487 static inline bool sect_empty(const Elf_Shdr *sect)
60488 {
60489 @@ -1545,7 +1547,8 @@ static void free_module(struct module *m
60490 destroy_params(mod->kp, mod->num_kp);
60491
60492 /* This may be NULL, but that's OK */
60493 - module_free(mod, mod->module_init);
60494 + module_free(mod, mod->module_init_rw);
60495 + module_free_exec(mod, mod->module_init_rx);
60496 kfree(mod->args);
60497 if (mod->percpu)
60498 percpu_modfree(mod->percpu);
60499 @@ -1554,10 +1557,12 @@ static void free_module(struct module *m
60500 percpu_modfree(mod->refptr);
60501 #endif
60502 /* Free lock-classes: */
60503 - lockdep_free_key_range(mod->module_core, mod->core_size);
60504 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
60505 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
60506
60507 /* Finally, free the core (containing the module structure) */
60508 - module_free(mod, mod->module_core);
60509 + module_free_exec(mod, mod->module_core_rx);
60510 + module_free(mod, mod->module_core_rw);
60511
60512 #ifdef CONFIG_MPU
60513 update_protections(current->mm);
60514 @@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *se
60515 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
60516 int ret = 0;
60517 const struct kernel_symbol *ksym;
60518 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60519 + int is_fs_load = 0;
60520 + int register_filesystem_found = 0;
60521 + char *p;
60522 +
60523 + p = strstr(mod->args, "grsec_modharden_fs");
60524 +
60525 + if (p) {
60526 + char *endptr = p + strlen("grsec_modharden_fs");
60527 + /* copy \0 as well */
60528 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
60529 + is_fs_load = 1;
60530 + }
60531 +#endif
60532 +
60533
60534 for (i = 1; i < n; i++) {
60535 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60536 + const char *name = strtab + sym[i].st_name;
60537 +
60538 + /* it's a real shame this will never get ripped and copied
60539 + upstream! ;(
60540 + */
60541 + if (is_fs_load && !strcmp(name, "register_filesystem"))
60542 + register_filesystem_found = 1;
60543 +#endif
60544 switch (sym[i].st_shndx) {
60545 case SHN_COMMON:
60546 /* We compiled with -fno-common. These are not
60547 @@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *se
60548 strtab + sym[i].st_name, mod);
60549 /* Ok if resolved. */
60550 if (ksym) {
60551 + pax_open_kernel();
60552 sym[i].st_value = ksym->value;
60553 + pax_close_kernel();
60554 break;
60555 }
60556
60557 @@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *se
60558 secbase = (unsigned long)mod->percpu;
60559 else
60560 secbase = sechdrs[sym[i].st_shndx].sh_addr;
60561 + pax_open_kernel();
60562 sym[i].st_value += secbase;
60563 + pax_close_kernel();
60564 break;
60565 }
60566 }
60567
60568 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60569 + if (is_fs_load && !register_filesystem_found) {
60570 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
60571 + ret = -EPERM;
60572 + }
60573 +#endif
60574 +
60575 return ret;
60576 }
60577
60578 @@ -1731,11 +1771,12 @@ static void layout_sections(struct modul
60579 || s->sh_entsize != ~0UL
60580 || strstarts(secstrings + s->sh_name, ".init"))
60581 continue;
60582 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
60583 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60584 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
60585 + else
60586 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
60587 DEBUGP("\t%s\n", secstrings + s->sh_name);
60588 }
60589 - if (m == 0)
60590 - mod->core_text_size = mod->core_size;
60591 }
60592
60593 DEBUGP("Init section allocation order:\n");
60594 @@ -1748,12 +1789,13 @@ static void layout_sections(struct modul
60595 || s->sh_entsize != ~0UL
60596 || !strstarts(secstrings + s->sh_name, ".init"))
60597 continue;
60598 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
60599 - | INIT_OFFSET_MASK);
60600 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60601 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
60602 + else
60603 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
60604 + s->sh_entsize |= INIT_OFFSET_MASK;
60605 DEBUGP("\t%s\n", secstrings + s->sh_name);
60606 }
60607 - if (m == 0)
60608 - mod->init_text_size = mod->init_size;
60609 }
60610 }
60611
60612 @@ -1857,9 +1899,8 @@ static int is_exported(const char *name,
60613
60614 /* As per nm */
60615 static char elf_type(const Elf_Sym *sym,
60616 - Elf_Shdr *sechdrs,
60617 - const char *secstrings,
60618 - struct module *mod)
60619 + const Elf_Shdr *sechdrs,
60620 + const char *secstrings)
60621 {
60622 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
60623 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
60624 @@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struc
60625
60626 /* Put symbol section at end of init part of module. */
60627 symsect->sh_flags |= SHF_ALLOC;
60628 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
60629 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
60630 symindex) | INIT_OFFSET_MASK;
60631 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
60632
60633 @@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struc
60634 }
60635
60636 /* Append room for core symbols at end of core part. */
60637 - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
60638 - mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
60639 + symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
60640 + mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
60641
60642 /* Put string table section at end of init part of module. */
60643 strsect->sh_flags |= SHF_ALLOC;
60644 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
60645 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
60646 strindex) | INIT_OFFSET_MASK;
60647 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
60648
60649 /* Append room for core symbols' strings at end of core part. */
60650 - *pstroffs = mod->core_size;
60651 + *pstroffs = mod->core_size_rx;
60652 __set_bit(0, strmap);
60653 - mod->core_size += bitmap_weight(strmap, strsect->sh_size);
60654 + mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
60655
60656 return symoffs;
60657 }
60658 @@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *
60659 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
60660 mod->strtab = (void *)sechdrs[strindex].sh_addr;
60661
60662 + pax_open_kernel();
60663 +
60664 /* Set types up while we still have access to sections. */
60665 for (i = 0; i < mod->num_symtab; i++)
60666 mod->symtab[i].st_info
60667 - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
60668 + = elf_type(&mod->symtab[i], sechdrs, secstrings);
60669
60670 - mod->core_symtab = dst = mod->module_core + symoffs;
60671 + mod->core_symtab = dst = mod->module_core_rx + symoffs;
60672 src = mod->symtab;
60673 *dst = *src;
60674 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
60675 @@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *
60676 }
60677 mod->core_num_syms = ndst;
60678
60679 - mod->core_strtab = s = mod->module_core + stroffs;
60680 + mod->core_strtab = s = mod->module_core_rx + stroffs;
60681 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
60682 if (test_bit(i, strmap))
60683 *++s = mod->strtab[i];
60684 +
60685 + pax_close_kernel();
60686 }
60687 #else
60688 static inline unsigned long layout_symtab(struct module *mod,
60689 @@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _
60690 #endif
60691 }
60692
60693 -static void *module_alloc_update_bounds(unsigned long size)
60694 +static void *module_alloc_update_bounds_rw(unsigned long size)
60695 {
60696 void *ret = module_alloc(size);
60697
60698 if (ret) {
60699 /* Update module bounds. */
60700 - if ((unsigned long)ret < module_addr_min)
60701 - module_addr_min = (unsigned long)ret;
60702 - if ((unsigned long)ret + size > module_addr_max)
60703 - module_addr_max = (unsigned long)ret + size;
60704 + if ((unsigned long)ret < module_addr_min_rw)
60705 + module_addr_min_rw = (unsigned long)ret;
60706 + if ((unsigned long)ret + size > module_addr_max_rw)
60707 + module_addr_max_rw = (unsigned long)ret + size;
60708 + }
60709 + return ret;
60710 +}
60711 +
60712 +static void *module_alloc_update_bounds_rx(unsigned long size)
60713 +{
60714 + void *ret = module_alloc_exec(size);
60715 +
60716 + if (ret) {
60717 + /* Update module bounds. */
60718 + if ((unsigned long)ret < module_addr_min_rx)
60719 + module_addr_min_rx = (unsigned long)ret;
60720 + if ((unsigned long)ret + size > module_addr_max_rx)
60721 + module_addr_max_rx = (unsigned long)ret + size;
60722 }
60723 return ret;
60724 }
60725 @@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct
60726 unsigned int i;
60727
60728 /* only scan the sections containing data */
60729 - kmemleak_scan_area(mod->module_core, (unsigned long)mod -
60730 - (unsigned long)mod->module_core,
60731 + kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
60732 + (unsigned long)mod->module_core_rw,
60733 sizeof(struct module), GFP_KERNEL);
60734
60735 for (i = 1; i < hdr->e_shnum; i++) {
60736 @@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct
60737 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
60738 continue;
60739
60740 - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
60741 - (unsigned long)mod->module_core,
60742 + kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
60743 + (unsigned long)mod->module_core_rw,
60744 sechdrs[i].sh_size, GFP_KERNEL);
60745 }
60746 }
60747 @@ -2263,7 +2322,7 @@ static noinline struct module *load_modu
60748 secstrings, &stroffs, strmap);
60749
60750 /* Do the allocs. */
60751 - ptr = module_alloc_update_bounds(mod->core_size);
60752 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
60753 /*
60754 * The pointer to this block is stored in the module structure
60755 * which is inside the block. Just mark it as not being a
60756 @@ -2274,23 +2333,47 @@ static noinline struct module *load_modu
60757 err = -ENOMEM;
60758 goto free_percpu;
60759 }
60760 - memset(ptr, 0, mod->core_size);
60761 - mod->module_core = ptr;
60762 + memset(ptr, 0, mod->core_size_rw);
60763 + mod->module_core_rw = ptr;
60764
60765 - ptr = module_alloc_update_bounds(mod->init_size);
60766 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
60767 /*
60768 * The pointer to this block is stored in the module structure
60769 * which is inside the block. This block doesn't need to be
60770 * scanned as it contains data and code that will be freed
60771 * after the module is initialized.
60772 */
60773 - kmemleak_ignore(ptr);
60774 - if (!ptr && mod->init_size) {
60775 + kmemleak_not_leak(ptr);
60776 + if (!ptr && mod->init_size_rw) {
60777 + err = -ENOMEM;
60778 + goto free_core_rw;
60779 + }
60780 + memset(ptr, 0, mod->init_size_rw);
60781 + mod->module_init_rw = ptr;
60782 +
60783 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
60784 + kmemleak_not_leak(ptr);
60785 + if (!ptr) {
60786 err = -ENOMEM;
60787 - goto free_core;
60788 + goto free_init_rw;
60789 }
60790 - memset(ptr, 0, mod->init_size);
60791 - mod->module_init = ptr;
60792 +
60793 + pax_open_kernel();
60794 + memset(ptr, 0, mod->core_size_rx);
60795 + pax_close_kernel();
60796 + mod->module_core_rx = ptr;
60797 +
60798 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
60799 + kmemleak_not_leak(ptr);
60800 + if (!ptr && mod->init_size_rx) {
60801 + err = -ENOMEM;
60802 + goto free_core_rx;
60803 + }
60804 +
60805 + pax_open_kernel();
60806 + memset(ptr, 0, mod->init_size_rx);
60807 + pax_close_kernel();
60808 + mod->module_init_rx = ptr;
60809
60810 /* Transfer each section which specifies SHF_ALLOC */
60811 DEBUGP("final section addresses:\n");
60812 @@ -2300,17 +2383,45 @@ static noinline struct module *load_modu
60813 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
60814 continue;
60815
60816 - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
60817 - dest = mod->module_init
60818 - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
60819 - else
60820 - dest = mod->module_core + sechdrs[i].sh_entsize;
60821 + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
60822 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
60823 + dest = mod->module_init_rw
60824 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
60825 + else
60826 + dest = mod->module_init_rx
60827 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
60828 + } else {
60829 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
60830 + dest = mod->module_core_rw + sechdrs[i].sh_entsize;
60831 + else
60832 + dest = mod->module_core_rx + sechdrs[i].sh_entsize;
60833 + }
60834 +
60835 + if (sechdrs[i].sh_type != SHT_NOBITS) {
60836
60837 - if (sechdrs[i].sh_type != SHT_NOBITS)
60838 - memcpy(dest, (void *)sechdrs[i].sh_addr,
60839 - sechdrs[i].sh_size);
60840 +#ifdef CONFIG_PAX_KERNEXEC
60841 +#ifdef CONFIG_X86_64
60842 + if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
60843 + set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
60844 +#endif
60845 + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
60846 + pax_open_kernel();
60847 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
60848 + pax_close_kernel();
60849 + } else
60850 +#endif
60851 +
60852 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
60853 + }
60854 /* Update sh_addr to point to copy in image. */
60855 - sechdrs[i].sh_addr = (unsigned long)dest;
60856 +
60857 +#ifdef CONFIG_PAX_KERNEXEC
60858 + if (sechdrs[i].sh_flags & SHF_EXECINSTR)
60859 + sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
60860 + else
60861 +#endif
60862 +
60863 + sechdrs[i].sh_addr = (unsigned long)dest;
60864 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
60865 }
60866 /* Module has been moved. */
60867 @@ -2322,7 +2433,7 @@ static noinline struct module *load_modu
60868 mod->name);
60869 if (!mod->refptr) {
60870 err = -ENOMEM;
60871 - goto free_init;
60872 + goto free_init_rx;
60873 }
60874 #endif
60875 /* Now we've moved module, initialize linked lists, etc. */
60876 @@ -2351,6 +2462,31 @@ static noinline struct module *load_modu
60877 /* Set up MODINFO_ATTR fields */
60878 setup_modinfo(mod, sechdrs, infoindex);
60879
60880 + mod->args = args;
60881 +
60882 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60883 + {
60884 + char *p, *p2;
60885 +
60886 + if (strstr(mod->args, "grsec_modharden_netdev")) {
60887 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
60888 + err = -EPERM;
60889 + goto cleanup;
60890 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
60891 + p += strlen("grsec_modharden_normal");
60892 + p2 = strstr(p, "_");
60893 + if (p2) {
60894 + *p2 = '\0';
60895 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
60896 + *p2 = '_';
60897 + }
60898 + err = -EPERM;
60899 + goto cleanup;
60900 + }
60901 + }
60902 +#endif
60903 +
60904 +
60905 /* Fix up syms, so that st_value is a pointer to location. */
60906 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
60907 mod);
60908 @@ -2431,8 +2567,8 @@ static noinline struct module *load_modu
60909
60910 /* Now do relocations. */
60911 for (i = 1; i < hdr->e_shnum; i++) {
60912 - const char *strtab = (char *)sechdrs[strindex].sh_addr;
60913 unsigned int info = sechdrs[i].sh_info;
60914 + strtab = (char *)sechdrs[strindex].sh_addr;
60915
60916 /* Not a valid relocation section? */
60917 if (info >= hdr->e_shnum)
60918 @@ -2493,16 +2629,15 @@ static noinline struct module *load_modu
60919 * Do it before processing of module parameters, so the module
60920 * can provide parameter accessor functions of its own.
60921 */
60922 - if (mod->module_init)
60923 - flush_icache_range((unsigned long)mod->module_init,
60924 - (unsigned long)mod->module_init
60925 - + mod->init_size);
60926 - flush_icache_range((unsigned long)mod->module_core,
60927 - (unsigned long)mod->module_core + mod->core_size);
60928 + if (mod->module_init_rx)
60929 + flush_icache_range((unsigned long)mod->module_init_rx,
60930 + (unsigned long)mod->module_init_rx
60931 + + mod->init_size_rx);
60932 + flush_icache_range((unsigned long)mod->module_core_rx,
60933 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
60934
60935 set_fs(old_fs);
60936
60937 - mod->args = args;
60938 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
60939 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
60940 mod->name);
60941 @@ -2546,12 +2681,16 @@ static noinline struct module *load_modu
60942 free_unload:
60943 module_unload_free(mod);
60944 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
60945 + free_init_rx:
60946 percpu_modfree(mod->refptr);
60947 - free_init:
60948 #endif
60949 - module_free(mod, mod->module_init);
60950 - free_core:
60951 - module_free(mod, mod->module_core);
60952 + module_free_exec(mod, mod->module_init_rx);
60953 + free_core_rx:
60954 + module_free_exec(mod, mod->module_core_rx);
60955 + free_init_rw:
60956 + module_free(mod, mod->module_init_rw);
60957 + free_core_rw:
60958 + module_free(mod, mod->module_core_rw);
60959 /* mod will be freed with core. Don't access it beyond this line! */
60960 free_percpu:
60961 if (percpu)
60962 @@ -2653,10 +2792,12 @@ SYSCALL_DEFINE3(init_module, void __user
60963 mod->symtab = mod->core_symtab;
60964 mod->strtab = mod->core_strtab;
60965 #endif
60966 - module_free(mod, mod->module_init);
60967 - mod->module_init = NULL;
60968 - mod->init_size = 0;
60969 - mod->init_text_size = 0;
60970 + module_free(mod, mod->module_init_rw);
60971 + module_free_exec(mod, mod->module_init_rx);
60972 + mod->module_init_rw = NULL;
60973 + mod->module_init_rx = NULL;
60974 + mod->init_size_rw = 0;
60975 + mod->init_size_rx = 0;
60976 mutex_unlock(&module_mutex);
60977
60978 return 0;
60979 @@ -2687,10 +2828,16 @@ static const char *get_ksymbol(struct mo
60980 unsigned long nextval;
60981
60982 /* At worse, next value is at end of module */
60983 - if (within_module_init(addr, mod))
60984 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
60985 + if (within_module_init_rx(addr, mod))
60986 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
60987 + else if (within_module_init_rw(addr, mod))
60988 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
60989 + else if (within_module_core_rx(addr, mod))
60990 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
60991 + else if (within_module_core_rw(addr, mod))
60992 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
60993 else
60994 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
60995 + return NULL;
60996
60997 /* Scan for closest preceeding symbol, and next symbol. (ELF
60998 starts real symbols at 1). */
60999 @@ -2936,7 +3083,7 @@ static int m_show(struct seq_file *m, vo
61000 char buf[8];
61001
61002 seq_printf(m, "%s %u",
61003 - mod->name, mod->init_size + mod->core_size);
61004 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
61005 print_unload_info(m, mod);
61006
61007 /* Informative for users. */
61008 @@ -2945,7 +3092,7 @@ static int m_show(struct seq_file *m, vo
61009 mod->state == MODULE_STATE_COMING ? "Loading":
61010 "Live");
61011 /* Used by oprofile and other similar tools. */
61012 - seq_printf(m, " 0x%p", mod->module_core);
61013 + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
61014
61015 /* Taints info */
61016 if (mod->taints)
61017 @@ -2981,7 +3128,17 @@ static const struct file_operations proc
61018
61019 static int __init proc_modules_init(void)
61020 {
61021 +#ifndef CONFIG_GRKERNSEC_HIDESYM
61022 +#ifdef CONFIG_GRKERNSEC_PROC_USER
61023 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
61024 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61025 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
61026 +#else
61027 proc_create("modules", 0, NULL, &proc_modules_operations);
61028 +#endif
61029 +#else
61030 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
61031 +#endif
61032 return 0;
61033 }
61034 module_init(proc_modules_init);
61035 @@ -3040,12 +3197,12 @@ struct module *__module_address(unsigned
61036 {
61037 struct module *mod;
61038
61039 - if (addr < module_addr_min || addr > module_addr_max)
61040 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
61041 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
61042 return NULL;
61043
61044 list_for_each_entry_rcu(mod, &modules, list)
61045 - if (within_module_core(addr, mod)
61046 - || within_module_init(addr, mod))
61047 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
61048 return mod;
61049 return NULL;
61050 }
61051 @@ -3079,11 +3236,20 @@ bool is_module_text_address(unsigned lon
61052 */
61053 struct module *__module_text_address(unsigned long addr)
61054 {
61055 - struct module *mod = __module_address(addr);
61056 + struct module *mod;
61057 +
61058 +#ifdef CONFIG_X86_32
61059 + addr = ktla_ktva(addr);
61060 +#endif
61061 +
61062 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
61063 + return NULL;
61064 +
61065 + mod = __module_address(addr);
61066 +
61067 if (mod) {
61068 /* Make sure it's within the text section. */
61069 - if (!within(addr, mod->module_init, mod->init_text_size)
61070 - && !within(addr, mod->module_core, mod->core_text_size))
61071 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
61072 mod = NULL;
61073 }
61074 return mod;
61075 diff -urNp linux-2.6.32.42/kernel/mutex.c linux-2.6.32.42/kernel/mutex.c
61076 --- linux-2.6.32.42/kernel/mutex.c 2011-03-27 14:31:47.000000000 -0400
61077 +++ linux-2.6.32.42/kernel/mutex.c 2011-04-17 15:56:46.000000000 -0400
61078 @@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock,
61079 */
61080
61081 for (;;) {
61082 - struct thread_info *owner;
61083 + struct task_struct *owner;
61084
61085 /*
61086 * If we own the BKL, then don't spin. The owner of
61087 @@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock,
61088 spin_lock_mutex(&lock->wait_lock, flags);
61089
61090 debug_mutex_lock_common(lock, &waiter);
61091 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
61092 + debug_mutex_add_waiter(lock, &waiter, task);
61093
61094 /* add waiting tasks to the end of the waitqueue (FIFO): */
61095 list_add_tail(&waiter.list, &lock->wait_list);
61096 @@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock,
61097 * TASK_UNINTERRUPTIBLE case.)
61098 */
61099 if (unlikely(signal_pending_state(state, task))) {
61100 - mutex_remove_waiter(lock, &waiter,
61101 - task_thread_info(task));
61102 + mutex_remove_waiter(lock, &waiter, task);
61103 mutex_release(&lock->dep_map, 1, ip);
61104 spin_unlock_mutex(&lock->wait_lock, flags);
61105
61106 @@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock,
61107 done:
61108 lock_acquired(&lock->dep_map, ip);
61109 /* got the lock - rejoice! */
61110 - mutex_remove_waiter(lock, &waiter, current_thread_info());
61111 + mutex_remove_waiter(lock, &waiter, task);
61112 mutex_set_owner(lock);
61113
61114 /* set it to 0 if there are no waiters left: */
61115 diff -urNp linux-2.6.32.42/kernel/mutex-debug.c linux-2.6.32.42/kernel/mutex-debug.c
61116 --- linux-2.6.32.42/kernel/mutex-debug.c 2011-03-27 14:31:47.000000000 -0400
61117 +++ linux-2.6.32.42/kernel/mutex-debug.c 2011-04-17 15:56:46.000000000 -0400
61118 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
61119 }
61120
61121 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
61122 - struct thread_info *ti)
61123 + struct task_struct *task)
61124 {
61125 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
61126
61127 /* Mark the current thread as blocked on the lock: */
61128 - ti->task->blocked_on = waiter;
61129 + task->blocked_on = waiter;
61130 }
61131
61132 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
61133 - struct thread_info *ti)
61134 + struct task_struct *task)
61135 {
61136 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
61137 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
61138 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
61139 - ti->task->blocked_on = NULL;
61140 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
61141 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
61142 + task->blocked_on = NULL;
61143
61144 list_del_init(&waiter->list);
61145 waiter->task = NULL;
61146 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
61147 return;
61148
61149 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
61150 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
61151 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
61152 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
61153 mutex_clear_owner(lock);
61154 }
61155 diff -urNp linux-2.6.32.42/kernel/mutex-debug.h linux-2.6.32.42/kernel/mutex-debug.h
61156 --- linux-2.6.32.42/kernel/mutex-debug.h 2011-03-27 14:31:47.000000000 -0400
61157 +++ linux-2.6.32.42/kernel/mutex-debug.h 2011-04-17 15:56:46.000000000 -0400
61158 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
61159 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
61160 extern void debug_mutex_add_waiter(struct mutex *lock,
61161 struct mutex_waiter *waiter,
61162 - struct thread_info *ti);
61163 + struct task_struct *task);
61164 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
61165 - struct thread_info *ti);
61166 + struct task_struct *task);
61167 extern void debug_mutex_unlock(struct mutex *lock);
61168 extern void debug_mutex_init(struct mutex *lock, const char *name,
61169 struct lock_class_key *key);
61170
61171 static inline void mutex_set_owner(struct mutex *lock)
61172 {
61173 - lock->owner = current_thread_info();
61174 + lock->owner = current;
61175 }
61176
61177 static inline void mutex_clear_owner(struct mutex *lock)
61178 diff -urNp linux-2.6.32.42/kernel/mutex.h linux-2.6.32.42/kernel/mutex.h
61179 --- linux-2.6.32.42/kernel/mutex.h 2011-03-27 14:31:47.000000000 -0400
61180 +++ linux-2.6.32.42/kernel/mutex.h 2011-04-17 15:56:46.000000000 -0400
61181 @@ -19,7 +19,7 @@
61182 #ifdef CONFIG_SMP
61183 static inline void mutex_set_owner(struct mutex *lock)
61184 {
61185 - lock->owner = current_thread_info();
61186 + lock->owner = current;
61187 }
61188
61189 static inline void mutex_clear_owner(struct mutex *lock)
61190 diff -urNp linux-2.6.32.42/kernel/panic.c linux-2.6.32.42/kernel/panic.c
61191 --- linux-2.6.32.42/kernel/panic.c 2011-03-27 14:31:47.000000000 -0400
61192 +++ linux-2.6.32.42/kernel/panic.c 2011-04-17 15:56:46.000000000 -0400
61193 @@ -352,7 +352,7 @@ static void warn_slowpath_common(const c
61194 const char *board;
61195
61196 printk(KERN_WARNING "------------[ cut here ]------------\n");
61197 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
61198 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
61199 board = dmi_get_system_info(DMI_PRODUCT_NAME);
61200 if (board)
61201 printk(KERN_WARNING "Hardware name: %s\n", board);
61202 @@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
61203 */
61204 void __stack_chk_fail(void)
61205 {
61206 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
61207 + dump_stack();
61208 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
61209 __builtin_return_address(0));
61210 }
61211 EXPORT_SYMBOL(__stack_chk_fail);
61212 diff -urNp linux-2.6.32.42/kernel/params.c linux-2.6.32.42/kernel/params.c
61213 --- linux-2.6.32.42/kernel/params.c 2011-03-27 14:31:47.000000000 -0400
61214 +++ linux-2.6.32.42/kernel/params.c 2011-04-17 15:56:46.000000000 -0400
61215 @@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct
61216 return ret;
61217 }
61218
61219 -static struct sysfs_ops module_sysfs_ops = {
61220 +static const struct sysfs_ops module_sysfs_ops = {
61221 .show = module_attr_show,
61222 .store = module_attr_store,
61223 };
61224 @@ -739,7 +739,7 @@ static int uevent_filter(struct kset *ks
61225 return 0;
61226 }
61227
61228 -static struct kset_uevent_ops module_uevent_ops = {
61229 +static const struct kset_uevent_ops module_uevent_ops = {
61230 .filter = uevent_filter,
61231 };
61232
61233 diff -urNp linux-2.6.32.42/kernel/perf_event.c linux-2.6.32.42/kernel/perf_event.c
61234 --- linux-2.6.32.42/kernel/perf_event.c 2011-04-17 17:00:52.000000000 -0400
61235 +++ linux-2.6.32.42/kernel/perf_event.c 2011-05-04 17:56:28.000000000 -0400
61236 @@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostl
61237 */
61238 int sysctl_perf_event_sample_rate __read_mostly = 100000;
61239
61240 -static atomic64_t perf_event_id;
61241 +static atomic64_unchecked_t perf_event_id;
61242
61243 /*
61244 * Lock for (sysadmin-configurable) event reservations:
61245 @@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struc
61246 * In order to keep per-task stats reliable we need to flip the event
61247 * values when we flip the contexts.
61248 */
61249 - value = atomic64_read(&next_event->count);
61250 - value = atomic64_xchg(&event->count, value);
61251 - atomic64_set(&next_event->count, value);
61252 + value = atomic64_read_unchecked(&next_event->count);
61253 + value = atomic64_xchg_unchecked(&event->count, value);
61254 + atomic64_set_unchecked(&next_event->count, value);
61255
61256 swap(event->total_time_enabled, next_event->total_time_enabled);
61257 swap(event->total_time_running, next_event->total_time_running);
61258 @@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_e
61259 update_event_times(event);
61260 }
61261
61262 - return atomic64_read(&event->count);
61263 + return atomic64_read_unchecked(&event->count);
61264 }
61265
61266 /*
61267 @@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct
61268 values[n++] = 1 + leader->nr_siblings;
61269 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
61270 values[n++] = leader->total_time_enabled +
61271 - atomic64_read(&leader->child_total_time_enabled);
61272 + atomic64_read_unchecked(&leader->child_total_time_enabled);
61273 }
61274 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
61275 values[n++] = leader->total_time_running +
61276 - atomic64_read(&leader->child_total_time_running);
61277 + atomic64_read_unchecked(&leader->child_total_time_running);
61278 }
61279
61280 size = n * sizeof(u64);
61281 @@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct pe
61282 values[n++] = perf_event_read_value(event);
61283 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
61284 values[n++] = event->total_time_enabled +
61285 - atomic64_read(&event->child_total_time_enabled);
61286 + atomic64_read_unchecked(&event->child_total_time_enabled);
61287 }
61288 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
61289 values[n++] = event->total_time_running +
61290 - atomic64_read(&event->child_total_time_running);
61291 + atomic64_read_unchecked(&event->child_total_time_running);
61292 }
61293 if (read_format & PERF_FORMAT_ID)
61294 values[n++] = primary_event_id(event);
61295 @@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct fil
61296 static void perf_event_reset(struct perf_event *event)
61297 {
61298 (void)perf_event_read(event);
61299 - atomic64_set(&event->count, 0);
61300 + atomic64_set_unchecked(&event->count, 0);
61301 perf_event_update_userpage(event);
61302 }
61303
61304 @@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct p
61305 ++userpg->lock;
61306 barrier();
61307 userpg->index = perf_event_index(event);
61308 - userpg->offset = atomic64_read(&event->count);
61309 + userpg->offset = atomic64_read_unchecked(&event->count);
61310 if (event->state == PERF_EVENT_STATE_ACTIVE)
61311 - userpg->offset -= atomic64_read(&event->hw.prev_count);
61312 + userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
61313
61314 userpg->time_enabled = event->total_time_enabled +
61315 - atomic64_read(&event->child_total_time_enabled);
61316 + atomic64_read_unchecked(&event->child_total_time_enabled);
61317
61318 userpg->time_running = event->total_time_running +
61319 - atomic64_read(&event->child_total_time_running);
61320 + atomic64_read_unchecked(&event->child_total_time_running);
61321
61322 barrier();
61323 ++userpg->lock;
61324 @@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct
61325 u64 values[4];
61326 int n = 0;
61327
61328 - values[n++] = atomic64_read(&event->count);
61329 + values[n++] = atomic64_read_unchecked(&event->count);
61330 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
61331 values[n++] = event->total_time_enabled +
61332 - atomic64_read(&event->child_total_time_enabled);
61333 + atomic64_read_unchecked(&event->child_total_time_enabled);
61334 }
61335 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
61336 values[n++] = event->total_time_running +
61337 - atomic64_read(&event->child_total_time_running);
61338 + atomic64_read_unchecked(&event->child_total_time_running);
61339 }
61340 if (read_format & PERF_FORMAT_ID)
61341 values[n++] = primary_event_id(event);
61342 @@ -2940,7 +2940,7 @@ static void perf_output_read_group(struc
61343 if (leader != event)
61344 leader->pmu->read(leader);
61345
61346 - values[n++] = atomic64_read(&leader->count);
61347 + values[n++] = atomic64_read_unchecked(&leader->count);
61348 if (read_format & PERF_FORMAT_ID)
61349 values[n++] = primary_event_id(leader);
61350
61351 @@ -2952,7 +2952,7 @@ static void perf_output_read_group(struc
61352 if (sub != event)
61353 sub->pmu->read(sub);
61354
61355 - values[n++] = atomic64_read(&sub->count);
61356 + values[n++] = atomic64_read_unchecked(&sub->count);
61357 if (read_format & PERF_FORMAT_ID)
61358 values[n++] = primary_event_id(sub);
61359
61360 @@ -3787,7 +3787,7 @@ static void perf_swevent_add(struct perf
61361 {
61362 struct hw_perf_event *hwc = &event->hw;
61363
61364 - atomic64_add(nr, &event->count);
61365 + atomic64_add_unchecked(nr, &event->count);
61366
61367 if (!hwc->sample_period)
61368 return;
61369 @@ -4044,9 +4044,9 @@ static void cpu_clock_perf_event_update(
61370 u64 now;
61371
61372 now = cpu_clock(cpu);
61373 - prev = atomic64_read(&event->hw.prev_count);
61374 - atomic64_set(&event->hw.prev_count, now);
61375 - atomic64_add(now - prev, &event->count);
61376 + prev = atomic64_read_unchecked(&event->hw.prev_count);
61377 + atomic64_set_unchecked(&event->hw.prev_count, now);
61378 + atomic64_add_unchecked(now - prev, &event->count);
61379 }
61380
61381 static int cpu_clock_perf_event_enable(struct perf_event *event)
61382 @@ -4054,7 +4054,7 @@ static int cpu_clock_perf_event_enable(s
61383 struct hw_perf_event *hwc = &event->hw;
61384 int cpu = raw_smp_processor_id();
61385
61386 - atomic64_set(&hwc->prev_count, cpu_clock(cpu));
61387 + atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
61388 perf_swevent_start_hrtimer(event);
61389
61390 return 0;
61391 @@ -4086,9 +4086,9 @@ static void task_clock_perf_event_update
61392 u64 prev;
61393 s64 delta;
61394
61395 - prev = atomic64_xchg(&event->hw.prev_count, now);
61396 + prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
61397 delta = now - prev;
61398 - atomic64_add(delta, &event->count);
61399 + atomic64_add_unchecked(delta, &event->count);
61400 }
61401
61402 static int task_clock_perf_event_enable(struct perf_event *event)
61403 @@ -4098,7 +4098,7 @@ static int task_clock_perf_event_enable(
61404
61405 now = event->ctx->time;
61406
61407 - atomic64_set(&hwc->prev_count, now);
61408 + atomic64_set_unchecked(&hwc->prev_count, now);
61409
61410 perf_swevent_start_hrtimer(event);
61411
61412 @@ -4293,7 +4293,7 @@ perf_event_alloc(struct perf_event_attr
61413 event->parent = parent_event;
61414
61415 event->ns = get_pid_ns(current->nsproxy->pid_ns);
61416 - event->id = atomic64_inc_return(&perf_event_id);
61417 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
61418
61419 event->state = PERF_EVENT_STATE_INACTIVE;
61420
61421 @@ -4724,15 +4724,15 @@ static void sync_child_event(struct perf
61422 if (child_event->attr.inherit_stat)
61423 perf_event_read_event(child_event, child);
61424
61425 - child_val = atomic64_read(&child_event->count);
61426 + child_val = atomic64_read_unchecked(&child_event->count);
61427
61428 /*
61429 * Add back the child's count to the parent's count:
61430 */
61431 - atomic64_add(child_val, &parent_event->count);
61432 - atomic64_add(child_event->total_time_enabled,
61433 + atomic64_add_unchecked(child_val, &parent_event->count);
61434 + atomic64_add_unchecked(child_event->total_time_enabled,
61435 &parent_event->child_total_time_enabled);
61436 - atomic64_add(child_event->total_time_running,
61437 + atomic64_add_unchecked(child_event->total_time_running,
61438 &parent_event->child_total_time_running);
61439
61440 /*
61441 diff -urNp linux-2.6.32.42/kernel/pid.c linux-2.6.32.42/kernel/pid.c
61442 --- linux-2.6.32.42/kernel/pid.c 2011-04-22 19:16:29.000000000 -0400
61443 +++ linux-2.6.32.42/kernel/pid.c 2011-04-18 19:22:38.000000000 -0400
61444 @@ -33,6 +33,7 @@
61445 #include <linux/rculist.h>
61446 #include <linux/bootmem.h>
61447 #include <linux/hash.h>
61448 +#include <linux/security.h>
61449 #include <linux/pid_namespace.h>
61450 #include <linux/init_task.h>
61451 #include <linux/syscalls.h>
61452 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
61453
61454 int pid_max = PID_MAX_DEFAULT;
61455
61456 -#define RESERVED_PIDS 300
61457 +#define RESERVED_PIDS 500
61458
61459 int pid_max_min = RESERVED_PIDS + 1;
61460 int pid_max_max = PID_MAX_LIMIT;
61461 @@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
61462 */
61463 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
61464 {
61465 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
61466 + struct task_struct *task;
61467 +
61468 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
61469 +
61470 + if (gr_pid_is_chrooted(task))
61471 + return NULL;
61472 +
61473 + return task;
61474 }
61475
61476 struct task_struct *find_task_by_vpid(pid_t vnr)
61477 diff -urNp linux-2.6.32.42/kernel/posix-cpu-timers.c linux-2.6.32.42/kernel/posix-cpu-timers.c
61478 --- linux-2.6.32.42/kernel/posix-cpu-timers.c 2011-03-27 14:31:47.000000000 -0400
61479 +++ linux-2.6.32.42/kernel/posix-cpu-timers.c 2011-04-17 15:56:46.000000000 -0400
61480 @@ -6,6 +6,7 @@
61481 #include <linux/posix-timers.h>
61482 #include <linux/errno.h>
61483 #include <linux/math64.h>
61484 +#include <linux/security.h>
61485 #include <asm/uaccess.h>
61486 #include <linux/kernel_stat.h>
61487 #include <trace/events/timer.h>
61488 diff -urNp linux-2.6.32.42/kernel/posix-timers.c linux-2.6.32.42/kernel/posix-timers.c
61489 --- linux-2.6.32.42/kernel/posix-timers.c 2011-03-27 14:31:47.000000000 -0400
61490 +++ linux-2.6.32.42/kernel/posix-timers.c 2011-05-16 21:46:57.000000000 -0400
61491 @@ -42,6 +42,7 @@
61492 #include <linux/compiler.h>
61493 #include <linux/idr.h>
61494 #include <linux/posix-timers.h>
61495 +#include <linux/grsecurity.h>
61496 #include <linux/syscalls.h>
61497 #include <linux/wait.h>
61498 #include <linux/workqueue.h>
61499 @@ -296,6 +297,8 @@ static __init int init_posix_timers(void
61500 .nsleep = no_nsleep,
61501 };
61502
61503 + pax_track_stack();
61504 +
61505 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
61506 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
61507 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
61508 @@ -948,6 +951,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
61509 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
61510 return -EFAULT;
61511
61512 + /* only the CLOCK_REALTIME clock can be set, all other clocks
61513 + have their clock_set fptr set to a nosettime dummy function
61514 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
61515 + call common_clock_set, which calls do_sys_settimeofday, which
61516 + we hook
61517 + */
61518 +
61519 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
61520 }
61521
61522 diff -urNp linux-2.6.32.42/kernel/power/hibernate.c linux-2.6.32.42/kernel/power/hibernate.c
61523 --- linux-2.6.32.42/kernel/power/hibernate.c 2011-03-27 14:31:47.000000000 -0400
61524 +++ linux-2.6.32.42/kernel/power/hibernate.c 2011-04-17 15:56:46.000000000 -0400
61525 @@ -48,14 +48,14 @@ enum {
61526
61527 static int hibernation_mode = HIBERNATION_SHUTDOWN;
61528
61529 -static struct platform_hibernation_ops *hibernation_ops;
61530 +static const struct platform_hibernation_ops *hibernation_ops;
61531
61532 /**
61533 * hibernation_set_ops - set the global hibernate operations
61534 * @ops: the hibernation operations to use in subsequent hibernation transitions
61535 */
61536
61537 -void hibernation_set_ops(struct platform_hibernation_ops *ops)
61538 +void hibernation_set_ops(const struct platform_hibernation_ops *ops)
61539 {
61540 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
61541 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
61542 diff -urNp linux-2.6.32.42/kernel/power/poweroff.c linux-2.6.32.42/kernel/power/poweroff.c
61543 --- linux-2.6.32.42/kernel/power/poweroff.c 2011-03-27 14:31:47.000000000 -0400
61544 +++ linux-2.6.32.42/kernel/power/poweroff.c 2011-04-17 15:56:46.000000000 -0400
61545 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
61546 .enable_mask = SYSRQ_ENABLE_BOOT,
61547 };
61548
61549 -static int pm_sysrq_init(void)
61550 +static int __init pm_sysrq_init(void)
61551 {
61552 register_sysrq_key('o', &sysrq_poweroff_op);
61553 return 0;
61554 diff -urNp linux-2.6.32.42/kernel/power/process.c linux-2.6.32.42/kernel/power/process.c
61555 --- linux-2.6.32.42/kernel/power/process.c 2011-03-27 14:31:47.000000000 -0400
61556 +++ linux-2.6.32.42/kernel/power/process.c 2011-04-17 15:56:46.000000000 -0400
61557 @@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_
61558 struct timeval start, end;
61559 u64 elapsed_csecs64;
61560 unsigned int elapsed_csecs;
61561 + bool timedout = false;
61562
61563 do_gettimeofday(&start);
61564
61565 end_time = jiffies + TIMEOUT;
61566 do {
61567 todo = 0;
61568 + if (time_after(jiffies, end_time))
61569 + timedout = true;
61570 read_lock(&tasklist_lock);
61571 do_each_thread(g, p) {
61572 if (frozen(p) || !freezeable(p))
61573 @@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_
61574 * It is "frozen enough". If the task does wake
61575 * up, it will immediately call try_to_freeze.
61576 */
61577 - if (!task_is_stopped_or_traced(p) &&
61578 - !freezer_should_skip(p))
61579 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
61580 todo++;
61581 + if (timedout) {
61582 + printk(KERN_ERR "Task refusing to freeze:\n");
61583 + sched_show_task(p);
61584 + }
61585 + }
61586 } while_each_thread(g, p);
61587 read_unlock(&tasklist_lock);
61588 yield(); /* Yield is okay here */
61589 - if (time_after(jiffies, end_time))
61590 - break;
61591 - } while (todo);
61592 + } while (todo && !timedout);
61593
61594 do_gettimeofday(&end);
61595 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
61596 diff -urNp linux-2.6.32.42/kernel/power/suspend.c linux-2.6.32.42/kernel/power/suspend.c
61597 --- linux-2.6.32.42/kernel/power/suspend.c 2011-03-27 14:31:47.000000000 -0400
61598 +++ linux-2.6.32.42/kernel/power/suspend.c 2011-04-17 15:56:46.000000000 -0400
61599 @@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_M
61600 [PM_SUSPEND_MEM] = "mem",
61601 };
61602
61603 -static struct platform_suspend_ops *suspend_ops;
61604 +static const struct platform_suspend_ops *suspend_ops;
61605
61606 /**
61607 * suspend_set_ops - Set the global suspend method table.
61608 * @ops: Pointer to ops structure.
61609 */
61610 -void suspend_set_ops(struct platform_suspend_ops *ops)
61611 +void suspend_set_ops(const struct platform_suspend_ops *ops)
61612 {
61613 mutex_lock(&pm_mutex);
61614 suspend_ops = ops;
61615 diff -urNp linux-2.6.32.42/kernel/printk.c linux-2.6.32.42/kernel/printk.c
61616 --- linux-2.6.32.42/kernel/printk.c 2011-03-27 14:31:47.000000000 -0400
61617 +++ linux-2.6.32.42/kernel/printk.c 2011-04-17 15:56:46.000000000 -0400
61618 @@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf
61619 char c;
61620 int error = 0;
61621
61622 +#ifdef CONFIG_GRKERNSEC_DMESG
61623 + if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
61624 + return -EPERM;
61625 +#endif
61626 +
61627 error = security_syslog(type);
61628 if (error)
61629 return error;
61630 diff -urNp linux-2.6.32.42/kernel/profile.c linux-2.6.32.42/kernel/profile.c
61631 --- linux-2.6.32.42/kernel/profile.c 2011-03-27 14:31:47.000000000 -0400
61632 +++ linux-2.6.32.42/kernel/profile.c 2011-05-04 17:56:28.000000000 -0400
61633 @@ -39,7 +39,7 @@ struct profile_hit {
61634 /* Oprofile timer tick hook */
61635 static int (*timer_hook)(struct pt_regs *) __read_mostly;
61636
61637 -static atomic_t *prof_buffer;
61638 +static atomic_unchecked_t *prof_buffer;
61639 static unsigned long prof_len, prof_shift;
61640
61641 int prof_on __read_mostly;
61642 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
61643 hits[i].pc = 0;
61644 continue;
61645 }
61646 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61647 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61648 hits[i].hits = hits[i].pc = 0;
61649 }
61650 }
61651 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
61652 * Add the current hit(s) and flush the write-queue out
61653 * to the global buffer:
61654 */
61655 - atomic_add(nr_hits, &prof_buffer[pc]);
61656 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
61657 for (i = 0; i < NR_PROFILE_HIT; ++i) {
61658 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61659 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61660 hits[i].pc = hits[i].hits = 0;
61661 }
61662 out:
61663 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
61664 if (prof_on != type || !prof_buffer)
61665 return;
61666 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
61667 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61668 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61669 }
61670 #endif /* !CONFIG_SMP */
61671 EXPORT_SYMBOL_GPL(profile_hits);
61672 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
61673 return -EFAULT;
61674 buf++; p++; count--; read++;
61675 }
61676 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
61677 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
61678 if (copy_to_user(buf, (void *)pnt, count))
61679 return -EFAULT;
61680 read += count;
61681 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
61682 }
61683 #endif
61684 profile_discard_flip_buffers();
61685 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
61686 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
61687 return count;
61688 }
61689
61690 diff -urNp linux-2.6.32.42/kernel/ptrace.c linux-2.6.32.42/kernel/ptrace.c
61691 --- linux-2.6.32.42/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
61692 +++ linux-2.6.32.42/kernel/ptrace.c 2011-05-22 23:02:06.000000000 -0400
61693 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
61694 return ret;
61695 }
61696
61697 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
61698 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
61699 + unsigned int log)
61700 {
61701 const struct cred *cred = current_cred(), *tcred;
61702
61703 @@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_stru
61704 cred->gid != tcred->egid ||
61705 cred->gid != tcred->sgid ||
61706 cred->gid != tcred->gid) &&
61707 - !capable(CAP_SYS_PTRACE)) {
61708 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
61709 + (log && !capable(CAP_SYS_PTRACE)))
61710 + ) {
61711 rcu_read_unlock();
61712 return -EPERM;
61713 }
61714 @@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_stru
61715 smp_rmb();
61716 if (task->mm)
61717 dumpable = get_dumpable(task->mm);
61718 - if (!dumpable && !capable(CAP_SYS_PTRACE))
61719 + if (!dumpable &&
61720 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
61721 + (log && !capable(CAP_SYS_PTRACE))))
61722 return -EPERM;
61723
61724 return security_ptrace_access_check(task, mode);
61725 @@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struc
61726 {
61727 int err;
61728 task_lock(task);
61729 - err = __ptrace_may_access(task, mode);
61730 + err = __ptrace_may_access(task, mode, 0);
61731 + task_unlock(task);
61732 + return !err;
61733 +}
61734 +
61735 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
61736 +{
61737 + int err;
61738 + task_lock(task);
61739 + err = __ptrace_may_access(task, mode, 1);
61740 task_unlock(task);
61741 return !err;
61742 }
61743 @@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *ta
61744 goto out;
61745
61746 task_lock(task);
61747 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
61748 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
61749 task_unlock(task);
61750 if (retval)
61751 goto unlock_creds;
61752 @@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *ta
61753 goto unlock_tasklist;
61754
61755 task->ptrace = PT_PTRACED;
61756 - if (capable(CAP_SYS_PTRACE))
61757 + if (capable_nolog(CAP_SYS_PTRACE))
61758 task->ptrace |= PT_PTRACE_CAP;
61759
61760 __ptrace_link(task, current);
61761 @@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *
61762 {
61763 int copied = 0;
61764
61765 + pax_track_stack();
61766 +
61767 while (len > 0) {
61768 char buf[128];
61769 int this_len, retval;
61770 @@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct
61771 {
61772 int copied = 0;
61773
61774 + pax_track_stack();
61775 +
61776 while (len > 0) {
61777 char buf[128];
61778 int this_len, retval;
61779 @@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *c
61780 int ret = -EIO;
61781 siginfo_t siginfo;
61782
61783 + pax_track_stack();
61784 +
61785 switch (request) {
61786 case PTRACE_PEEKTEXT:
61787 case PTRACE_PEEKDATA:
61788 @@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *c
61789 ret = ptrace_setoptions(child, data);
61790 break;
61791 case PTRACE_GETEVENTMSG:
61792 - ret = put_user(child->ptrace_message, (unsigned long __user *) data);
61793 + ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
61794 break;
61795
61796 case PTRACE_GETSIGINFO:
61797 ret = ptrace_getsiginfo(child, &siginfo);
61798 if (!ret)
61799 - ret = copy_siginfo_to_user((siginfo_t __user *) data,
61800 + ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
61801 &siginfo);
61802 break;
61803
61804 case PTRACE_SETSIGINFO:
61805 - if (copy_from_user(&siginfo, (siginfo_t __user *) data,
61806 + if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
61807 sizeof siginfo))
61808 ret = -EFAULT;
61809 else
61810 @@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
61811 goto out;
61812 }
61813
61814 + if (gr_handle_ptrace(child, request)) {
61815 + ret = -EPERM;
61816 + goto out_put_task_struct;
61817 + }
61818 +
61819 if (request == PTRACE_ATTACH) {
61820 ret = ptrace_attach(child);
61821 /*
61822 * Some architectures need to do book-keeping after
61823 * a ptrace attach.
61824 */
61825 - if (!ret)
61826 + if (!ret) {
61827 arch_ptrace_attach(child);
61828 + gr_audit_ptrace(child);
61829 + }
61830 goto out_put_task_struct;
61831 }
61832
61833 @@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_
61834 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
61835 if (copied != sizeof(tmp))
61836 return -EIO;
61837 - return put_user(tmp, (unsigned long __user *)data);
61838 + return put_user(tmp, (__force unsigned long __user *)data);
61839 }
61840
61841 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
61842 @@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_st
61843 siginfo_t siginfo;
61844 int ret;
61845
61846 + pax_track_stack();
61847 +
61848 switch (request) {
61849 case PTRACE_PEEKTEXT:
61850 case PTRACE_PEEKDATA:
61851 @@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat
61852 goto out;
61853 }
61854
61855 + if (gr_handle_ptrace(child, request)) {
61856 + ret = -EPERM;
61857 + goto out_put_task_struct;
61858 + }
61859 +
61860 if (request == PTRACE_ATTACH) {
61861 ret = ptrace_attach(child);
61862 /*
61863 * Some architectures need to do book-keeping after
61864 * a ptrace attach.
61865 */
61866 - if (!ret)
61867 + if (!ret) {
61868 arch_ptrace_attach(child);
61869 + gr_audit_ptrace(child);
61870 + }
61871 goto out_put_task_struct;
61872 }
61873
61874 diff -urNp linux-2.6.32.42/kernel/rcutorture.c linux-2.6.32.42/kernel/rcutorture.c
61875 --- linux-2.6.32.42/kernel/rcutorture.c 2011-03-27 14:31:47.000000000 -0400
61876 +++ linux-2.6.32.42/kernel/rcutorture.c 2011-05-04 17:56:28.000000000 -0400
61877 @@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
61878 { 0 };
61879 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
61880 { 0 };
61881 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
61882 -static atomic_t n_rcu_torture_alloc;
61883 -static atomic_t n_rcu_torture_alloc_fail;
61884 -static atomic_t n_rcu_torture_free;
61885 -static atomic_t n_rcu_torture_mberror;
61886 -static atomic_t n_rcu_torture_error;
61887 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
61888 +static atomic_unchecked_t n_rcu_torture_alloc;
61889 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
61890 +static atomic_unchecked_t n_rcu_torture_free;
61891 +static atomic_unchecked_t n_rcu_torture_mberror;
61892 +static atomic_unchecked_t n_rcu_torture_error;
61893 static long n_rcu_torture_timers;
61894 static struct list_head rcu_torture_removed;
61895 static cpumask_var_t shuffle_tmp_mask;
61896 @@ -187,11 +187,11 @@ rcu_torture_alloc(void)
61897
61898 spin_lock_bh(&rcu_torture_lock);
61899 if (list_empty(&rcu_torture_freelist)) {
61900 - atomic_inc(&n_rcu_torture_alloc_fail);
61901 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
61902 spin_unlock_bh(&rcu_torture_lock);
61903 return NULL;
61904 }
61905 - atomic_inc(&n_rcu_torture_alloc);
61906 + atomic_inc_unchecked(&n_rcu_torture_alloc);
61907 p = rcu_torture_freelist.next;
61908 list_del_init(p);
61909 spin_unlock_bh(&rcu_torture_lock);
61910 @@ -204,7 +204,7 @@ rcu_torture_alloc(void)
61911 static void
61912 rcu_torture_free(struct rcu_torture *p)
61913 {
61914 - atomic_inc(&n_rcu_torture_free);
61915 + atomic_inc_unchecked(&n_rcu_torture_free);
61916 spin_lock_bh(&rcu_torture_lock);
61917 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
61918 spin_unlock_bh(&rcu_torture_lock);
61919 @@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
61920 i = rp->rtort_pipe_count;
61921 if (i > RCU_TORTURE_PIPE_LEN)
61922 i = RCU_TORTURE_PIPE_LEN;
61923 - atomic_inc(&rcu_torture_wcount[i]);
61924 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
61925 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
61926 rp->rtort_mbtest = 0;
61927 rcu_torture_free(rp);
61928 @@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_fr
61929 i = rp->rtort_pipe_count;
61930 if (i > RCU_TORTURE_PIPE_LEN)
61931 i = RCU_TORTURE_PIPE_LEN;
61932 - atomic_inc(&rcu_torture_wcount[i]);
61933 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
61934 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
61935 rp->rtort_mbtest = 0;
61936 list_del(&rp->rtort_free);
61937 @@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
61938 i = old_rp->rtort_pipe_count;
61939 if (i > RCU_TORTURE_PIPE_LEN)
61940 i = RCU_TORTURE_PIPE_LEN;
61941 - atomic_inc(&rcu_torture_wcount[i]);
61942 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
61943 old_rp->rtort_pipe_count++;
61944 cur_ops->deferred_free(old_rp);
61945 }
61946 @@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned l
61947 return;
61948 }
61949 if (p->rtort_mbtest == 0)
61950 - atomic_inc(&n_rcu_torture_mberror);
61951 + atomic_inc_unchecked(&n_rcu_torture_mberror);
61952 spin_lock(&rand_lock);
61953 cur_ops->read_delay(&rand);
61954 n_rcu_torture_timers++;
61955 @@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
61956 continue;
61957 }
61958 if (p->rtort_mbtest == 0)
61959 - atomic_inc(&n_rcu_torture_mberror);
61960 + atomic_inc_unchecked(&n_rcu_torture_mberror);
61961 cur_ops->read_delay(&rand);
61962 preempt_disable();
61963 pipe_count = p->rtort_pipe_count;
61964 @@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
61965 rcu_torture_current,
61966 rcu_torture_current_version,
61967 list_empty(&rcu_torture_freelist),
61968 - atomic_read(&n_rcu_torture_alloc),
61969 - atomic_read(&n_rcu_torture_alloc_fail),
61970 - atomic_read(&n_rcu_torture_free),
61971 - atomic_read(&n_rcu_torture_mberror),
61972 + atomic_read_unchecked(&n_rcu_torture_alloc),
61973 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
61974 + atomic_read_unchecked(&n_rcu_torture_free),
61975 + atomic_read_unchecked(&n_rcu_torture_mberror),
61976 n_rcu_torture_timers);
61977 - if (atomic_read(&n_rcu_torture_mberror) != 0)
61978 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
61979 cnt += sprintf(&page[cnt], " !!!");
61980 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
61981 if (i > 1) {
61982 cnt += sprintf(&page[cnt], "!!! ");
61983 - atomic_inc(&n_rcu_torture_error);
61984 + atomic_inc_unchecked(&n_rcu_torture_error);
61985 WARN_ON_ONCE(1);
61986 }
61987 cnt += sprintf(&page[cnt], "Reader Pipe: ");
61988 @@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
61989 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
61990 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
61991 cnt += sprintf(&page[cnt], " %d",
61992 - atomic_read(&rcu_torture_wcount[i]));
61993 + atomic_read_unchecked(&rcu_torture_wcount[i]));
61994 }
61995 cnt += sprintf(&page[cnt], "\n");
61996 if (cur_ops->stats)
61997 @@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
61998
61999 if (cur_ops->cleanup)
62000 cur_ops->cleanup();
62001 - if (atomic_read(&n_rcu_torture_error))
62002 + if (atomic_read_unchecked(&n_rcu_torture_error))
62003 rcu_torture_print_module_parms("End of test: FAILURE");
62004 else
62005 rcu_torture_print_module_parms("End of test: SUCCESS");
62006 @@ -1138,13 +1138,13 @@ rcu_torture_init(void)
62007
62008 rcu_torture_current = NULL;
62009 rcu_torture_current_version = 0;
62010 - atomic_set(&n_rcu_torture_alloc, 0);
62011 - atomic_set(&n_rcu_torture_alloc_fail, 0);
62012 - atomic_set(&n_rcu_torture_free, 0);
62013 - atomic_set(&n_rcu_torture_mberror, 0);
62014 - atomic_set(&n_rcu_torture_error, 0);
62015 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
62016 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
62017 + atomic_set_unchecked(&n_rcu_torture_free, 0);
62018 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
62019 + atomic_set_unchecked(&n_rcu_torture_error, 0);
62020 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
62021 - atomic_set(&rcu_torture_wcount[i], 0);
62022 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
62023 for_each_possible_cpu(cpu) {
62024 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
62025 per_cpu(rcu_torture_count, cpu)[i] = 0;
62026 diff -urNp linux-2.6.32.42/kernel/rcutree.c linux-2.6.32.42/kernel/rcutree.c
62027 --- linux-2.6.32.42/kernel/rcutree.c 2011-03-27 14:31:47.000000000 -0400
62028 +++ linux-2.6.32.42/kernel/rcutree.c 2011-04-17 15:56:46.000000000 -0400
62029 @@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state
62030 /*
62031 * Do softirq processing for the current CPU.
62032 */
62033 -static void rcu_process_callbacks(struct softirq_action *unused)
62034 +static void rcu_process_callbacks(void)
62035 {
62036 /*
62037 * Memory references from any prior RCU read-side critical sections
62038 diff -urNp linux-2.6.32.42/kernel/rcutree_plugin.h linux-2.6.32.42/kernel/rcutree_plugin.h
62039 --- linux-2.6.32.42/kernel/rcutree_plugin.h 2011-03-27 14:31:47.000000000 -0400
62040 +++ linux-2.6.32.42/kernel/rcutree_plugin.h 2011-04-17 15:56:46.000000000 -0400
62041 @@ -145,7 +145,7 @@ static void rcu_preempt_note_context_swi
62042 */
62043 void __rcu_read_lock(void)
62044 {
62045 - ACCESS_ONCE(current->rcu_read_lock_nesting)++;
62046 + ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
62047 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
62048 }
62049 EXPORT_SYMBOL_GPL(__rcu_read_lock);
62050 @@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
62051 struct task_struct *t = current;
62052
62053 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
62054 - if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
62055 + if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
62056 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
62057 rcu_read_unlock_special(t);
62058 }
62059 diff -urNp linux-2.6.32.42/kernel/relay.c linux-2.6.32.42/kernel/relay.c
62060 --- linux-2.6.32.42/kernel/relay.c 2011-03-27 14:31:47.000000000 -0400
62061 +++ linux-2.6.32.42/kernel/relay.c 2011-05-16 21:46:57.000000000 -0400
62062 @@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct fi
62063 unsigned int flags,
62064 int *nonpad_ret)
62065 {
62066 - unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
62067 + unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
62068 struct rchan_buf *rbuf = in->private_data;
62069 unsigned int subbuf_size = rbuf->chan->subbuf_size;
62070 uint64_t pos = (uint64_t) *ppos;
62071 @@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct fi
62072 .ops = &relay_pipe_buf_ops,
62073 .spd_release = relay_page_release,
62074 };
62075 + ssize_t ret;
62076 +
62077 + pax_track_stack();
62078
62079 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
62080 return 0;
62081 diff -urNp linux-2.6.32.42/kernel/resource.c linux-2.6.32.42/kernel/resource.c
62082 --- linux-2.6.32.42/kernel/resource.c 2011-03-27 14:31:47.000000000 -0400
62083 +++ linux-2.6.32.42/kernel/resource.c 2011-04-17 15:56:46.000000000 -0400
62084 @@ -132,8 +132,18 @@ static const struct file_operations proc
62085
62086 static int __init ioresources_init(void)
62087 {
62088 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
62089 +#ifdef CONFIG_GRKERNSEC_PROC_USER
62090 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
62091 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
62092 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62093 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
62094 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
62095 +#endif
62096 +#else
62097 proc_create("ioports", 0, NULL, &proc_ioports_operations);
62098 proc_create("iomem", 0, NULL, &proc_iomem_operations);
62099 +#endif
62100 return 0;
62101 }
62102 __initcall(ioresources_init);
62103 diff -urNp linux-2.6.32.42/kernel/rtmutex.c linux-2.6.32.42/kernel/rtmutex.c
62104 --- linux-2.6.32.42/kernel/rtmutex.c 2011-03-27 14:31:47.000000000 -0400
62105 +++ linux-2.6.32.42/kernel/rtmutex.c 2011-04-17 15:56:46.000000000 -0400
62106 @@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt
62107 */
62108 spin_lock_irqsave(&pendowner->pi_lock, flags);
62109
62110 - WARN_ON(!pendowner->pi_blocked_on);
62111 + BUG_ON(!pendowner->pi_blocked_on);
62112 WARN_ON(pendowner->pi_blocked_on != waiter);
62113 WARN_ON(pendowner->pi_blocked_on->lock != lock);
62114
62115 diff -urNp linux-2.6.32.42/kernel/rtmutex-tester.c linux-2.6.32.42/kernel/rtmutex-tester.c
62116 --- linux-2.6.32.42/kernel/rtmutex-tester.c 2011-03-27 14:31:47.000000000 -0400
62117 +++ linux-2.6.32.42/kernel/rtmutex-tester.c 2011-05-04 17:56:28.000000000 -0400
62118 @@ -21,7 +21,7 @@
62119 #define MAX_RT_TEST_MUTEXES 8
62120
62121 static spinlock_t rttest_lock;
62122 -static atomic_t rttest_event;
62123 +static atomic_unchecked_t rttest_event;
62124
62125 struct test_thread_data {
62126 int opcode;
62127 @@ -64,7 +64,7 @@ static int handle_op(struct test_thread_
62128
62129 case RTTEST_LOCKCONT:
62130 td->mutexes[td->opdata] = 1;
62131 - td->event = atomic_add_return(1, &rttest_event);
62132 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62133 return 0;
62134
62135 case RTTEST_RESET:
62136 @@ -82,7 +82,7 @@ static int handle_op(struct test_thread_
62137 return 0;
62138
62139 case RTTEST_RESETEVENT:
62140 - atomic_set(&rttest_event, 0);
62141 + atomic_set_unchecked(&rttest_event, 0);
62142 return 0;
62143
62144 default:
62145 @@ -99,9 +99,9 @@ static int handle_op(struct test_thread_
62146 return ret;
62147
62148 td->mutexes[id] = 1;
62149 - td->event = atomic_add_return(1, &rttest_event);
62150 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62151 rt_mutex_lock(&mutexes[id]);
62152 - td->event = atomic_add_return(1, &rttest_event);
62153 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62154 td->mutexes[id] = 4;
62155 return 0;
62156
62157 @@ -112,9 +112,9 @@ static int handle_op(struct test_thread_
62158 return ret;
62159
62160 td->mutexes[id] = 1;
62161 - td->event = atomic_add_return(1, &rttest_event);
62162 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62163 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
62164 - td->event = atomic_add_return(1, &rttest_event);
62165 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62166 td->mutexes[id] = ret ? 0 : 4;
62167 return ret ? -EINTR : 0;
62168
62169 @@ -123,9 +123,9 @@ static int handle_op(struct test_thread_
62170 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
62171 return ret;
62172
62173 - td->event = atomic_add_return(1, &rttest_event);
62174 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62175 rt_mutex_unlock(&mutexes[id]);
62176 - td->event = atomic_add_return(1, &rttest_event);
62177 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62178 td->mutexes[id] = 0;
62179 return 0;
62180
62181 @@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mu
62182 break;
62183
62184 td->mutexes[dat] = 2;
62185 - td->event = atomic_add_return(1, &rttest_event);
62186 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62187 break;
62188
62189 case RTTEST_LOCKBKL:
62190 @@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mu
62191 return;
62192
62193 td->mutexes[dat] = 3;
62194 - td->event = atomic_add_return(1, &rttest_event);
62195 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62196 break;
62197
62198 case RTTEST_LOCKNOWAIT:
62199 @@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mu
62200 return;
62201
62202 td->mutexes[dat] = 1;
62203 - td->event = atomic_add_return(1, &rttest_event);
62204 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62205 return;
62206
62207 case RTTEST_LOCKBKL:
62208 diff -urNp linux-2.6.32.42/kernel/sched.c linux-2.6.32.42/kernel/sched.c
62209 --- linux-2.6.32.42/kernel/sched.c 2011-03-27 14:31:47.000000000 -0400
62210 +++ linux-2.6.32.42/kernel/sched.c 2011-05-22 23:02:06.000000000 -0400
62211 @@ -5043,7 +5043,7 @@ out:
62212 * In CONFIG_NO_HZ case, the idle load balance owner will do the
62213 * rebalancing for all the cpus for whom scheduler ticks are stopped.
62214 */
62215 -static void run_rebalance_domains(struct softirq_action *h)
62216 +static void run_rebalance_domains(void)
62217 {
62218 int this_cpu = smp_processor_id();
62219 struct rq *this_rq = cpu_rq(this_cpu);
62220 @@ -5700,6 +5700,8 @@ asmlinkage void __sched schedule(void)
62221 struct rq *rq;
62222 int cpu;
62223
62224 + pax_track_stack();
62225 +
62226 need_resched:
62227 preempt_disable();
62228 cpu = smp_processor_id();
62229 @@ -5770,7 +5772,7 @@ EXPORT_SYMBOL(schedule);
62230 * Look out! "owner" is an entirely speculative pointer
62231 * access and not reliable.
62232 */
62233 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
62234 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
62235 {
62236 unsigned int cpu;
62237 struct rq *rq;
62238 @@ -5784,10 +5786,10 @@ int mutex_spin_on_owner(struct mutex *lo
62239 * DEBUG_PAGEALLOC could have unmapped it if
62240 * the mutex owner just released it and exited.
62241 */
62242 - if (probe_kernel_address(&owner->cpu, cpu))
62243 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
62244 return 0;
62245 #else
62246 - cpu = owner->cpu;
62247 + cpu = task_thread_info(owner)->cpu;
62248 #endif
62249
62250 /*
62251 @@ -5816,7 +5818,7 @@ int mutex_spin_on_owner(struct mutex *lo
62252 /*
62253 * Is that owner really running on that cpu?
62254 */
62255 - if (task_thread_info(rq->curr) != owner || need_resched())
62256 + if (rq->curr != owner || need_resched())
62257 return 0;
62258
62259 cpu_relax();
62260 @@ -6359,6 +6361,8 @@ int can_nice(const struct task_struct *p
62261 /* convert nice value [19,-20] to rlimit style value [1,40] */
62262 int nice_rlim = 20 - nice;
62263
62264 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
62265 +
62266 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
62267 capable(CAP_SYS_NICE));
62268 }
62269 @@ -6392,7 +6396,8 @@ SYSCALL_DEFINE1(nice, int, increment)
62270 if (nice > 19)
62271 nice = 19;
62272
62273 - if (increment < 0 && !can_nice(current, nice))
62274 + if (increment < 0 && (!can_nice(current, nice) ||
62275 + gr_handle_chroot_nice()))
62276 return -EPERM;
62277
62278 retval = security_task_setnice(current, nice);
62279 @@ -8774,7 +8779,7 @@ static void init_sched_groups_power(int
62280 long power;
62281 int weight;
62282
62283 - WARN_ON(!sd || !sd->groups);
62284 + BUG_ON(!sd || !sd->groups);
62285
62286 if (cpu != group_first_cpu(sd->groups))
62287 return;
62288 diff -urNp linux-2.6.32.42/kernel/signal.c linux-2.6.32.42/kernel/signal.c
62289 --- linux-2.6.32.42/kernel/signal.c 2011-04-17 17:00:52.000000000 -0400
62290 +++ linux-2.6.32.42/kernel/signal.c 2011-05-22 23:02:06.000000000 -0400
62291 @@ -41,12 +41,12 @@
62292
62293 static struct kmem_cache *sigqueue_cachep;
62294
62295 -static void __user *sig_handler(struct task_struct *t, int sig)
62296 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
62297 {
62298 return t->sighand->action[sig - 1].sa.sa_handler;
62299 }
62300
62301 -static int sig_handler_ignored(void __user *handler, int sig)
62302 +static int sig_handler_ignored(__sighandler_t handler, int sig)
62303 {
62304 /* Is it explicitly or implicitly ignored? */
62305 return handler == SIG_IGN ||
62306 @@ -56,7 +56,7 @@ static int sig_handler_ignored(void __us
62307 static int sig_task_ignored(struct task_struct *t, int sig,
62308 int from_ancestor_ns)
62309 {
62310 - void __user *handler;
62311 + __sighandler_t handler;
62312
62313 handler = sig_handler(t, sig);
62314
62315 @@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc
62316 */
62317 user = get_uid(__task_cred(t)->user);
62318 atomic_inc(&user->sigpending);
62319 +
62320 + if (!override_rlimit)
62321 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
62322 if (override_rlimit ||
62323 atomic_read(&user->sigpending) <=
62324 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
62325 @@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct
62326
62327 int unhandled_signal(struct task_struct *tsk, int sig)
62328 {
62329 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
62330 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
62331 if (is_global_init(tsk))
62332 return 1;
62333 if (handler != SIG_IGN && handler != SIG_DFL)
62334 @@ -627,6 +630,9 @@ static int check_kill_permission(int sig
62335 }
62336 }
62337
62338 + if (gr_handle_signal(t, sig))
62339 + return -EPERM;
62340 +
62341 return security_task_kill(t, info, sig, 0);
62342 }
62343
62344 @@ -968,7 +974,7 @@ __group_send_sig_info(int sig, struct si
62345 return send_signal(sig, info, p, 1);
62346 }
62347
62348 -static int
62349 +int
62350 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
62351 {
62352 return send_signal(sig, info, t, 0);
62353 @@ -1005,6 +1011,7 @@ force_sig_info(int sig, struct siginfo *
62354 unsigned long int flags;
62355 int ret, blocked, ignored;
62356 struct k_sigaction *action;
62357 + int is_unhandled = 0;
62358
62359 spin_lock_irqsave(&t->sighand->siglock, flags);
62360 action = &t->sighand->action[sig-1];
62361 @@ -1019,9 +1026,18 @@ force_sig_info(int sig, struct siginfo *
62362 }
62363 if (action->sa.sa_handler == SIG_DFL)
62364 t->signal->flags &= ~SIGNAL_UNKILLABLE;
62365 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
62366 + is_unhandled = 1;
62367 ret = specific_send_sig_info(sig, info, t);
62368 spin_unlock_irqrestore(&t->sighand->siglock, flags);
62369
62370 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
62371 + normal operation */
62372 + if (is_unhandled) {
62373 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
62374 + gr_handle_crash(t, sig);
62375 + }
62376 +
62377 return ret;
62378 }
62379
62380 @@ -1081,8 +1097,11 @@ int group_send_sig_info(int sig, struct
62381 {
62382 int ret = check_kill_permission(sig, info, p);
62383
62384 - if (!ret && sig)
62385 + if (!ret && sig) {
62386 ret = do_send_sig_info(sig, info, p, true);
62387 + if (!ret)
62388 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
62389 + }
62390
62391 return ret;
62392 }
62393 @@ -1644,6 +1663,8 @@ void ptrace_notify(int exit_code)
62394 {
62395 siginfo_t info;
62396
62397 + pax_track_stack();
62398 +
62399 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
62400
62401 memset(&info, 0, sizeof info);
62402 diff -urNp linux-2.6.32.42/kernel/smp.c linux-2.6.32.42/kernel/smp.c
62403 --- linux-2.6.32.42/kernel/smp.c 2011-03-27 14:31:47.000000000 -0400
62404 +++ linux-2.6.32.42/kernel/smp.c 2011-04-17 15:56:46.000000000 -0400
62405 @@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void
62406 }
62407 EXPORT_SYMBOL(smp_call_function);
62408
62409 -void ipi_call_lock(void)
62410 +void ipi_call_lock(void) __acquires(call_function.lock)
62411 {
62412 spin_lock(&call_function.lock);
62413 }
62414
62415 -void ipi_call_unlock(void)
62416 +void ipi_call_unlock(void) __releases(call_function.lock)
62417 {
62418 spin_unlock(&call_function.lock);
62419 }
62420
62421 -void ipi_call_lock_irq(void)
62422 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
62423 {
62424 spin_lock_irq(&call_function.lock);
62425 }
62426
62427 -void ipi_call_unlock_irq(void)
62428 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
62429 {
62430 spin_unlock_irq(&call_function.lock);
62431 }
62432 diff -urNp linux-2.6.32.42/kernel/softirq.c linux-2.6.32.42/kernel/softirq.c
62433 --- linux-2.6.32.42/kernel/softirq.c 2011-03-27 14:31:47.000000000 -0400
62434 +++ linux-2.6.32.42/kernel/softirq.c 2011-04-17 15:56:46.000000000 -0400
62435 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
62436
62437 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
62438
62439 -char *softirq_to_name[NR_SOFTIRQS] = {
62440 +const char * const softirq_to_name[NR_SOFTIRQS] = {
62441 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
62442 "TASKLET", "SCHED", "HRTIMER", "RCU"
62443 };
62444 @@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
62445
62446 asmlinkage void __do_softirq(void)
62447 {
62448 - struct softirq_action *h;
62449 + const struct softirq_action *h;
62450 __u32 pending;
62451 int max_restart = MAX_SOFTIRQ_RESTART;
62452 int cpu;
62453 @@ -233,7 +233,7 @@ restart:
62454 kstat_incr_softirqs_this_cpu(h - softirq_vec);
62455
62456 trace_softirq_entry(h, softirq_vec);
62457 - h->action(h);
62458 + h->action();
62459 trace_softirq_exit(h, softirq_vec);
62460 if (unlikely(prev_count != preempt_count())) {
62461 printk(KERN_ERR "huh, entered softirq %td %s %p"
62462 @@ -363,7 +363,7 @@ void raise_softirq(unsigned int nr)
62463 local_irq_restore(flags);
62464 }
62465
62466 -void open_softirq(int nr, void (*action)(struct softirq_action *))
62467 +void open_softirq(int nr, void (*action)(void))
62468 {
62469 softirq_vec[nr].action = action;
62470 }
62471 @@ -419,7 +419,7 @@ void __tasklet_hi_schedule_first(struct
62472
62473 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
62474
62475 -static void tasklet_action(struct softirq_action *a)
62476 +static void tasklet_action(void)
62477 {
62478 struct tasklet_struct *list;
62479
62480 @@ -454,7 +454,7 @@ static void tasklet_action(struct softir
62481 }
62482 }
62483
62484 -static void tasklet_hi_action(struct softirq_action *a)
62485 +static void tasklet_hi_action(void)
62486 {
62487 struct tasklet_struct *list;
62488
62489 diff -urNp linux-2.6.32.42/kernel/sys.c linux-2.6.32.42/kernel/sys.c
62490 --- linux-2.6.32.42/kernel/sys.c 2011-03-27 14:31:47.000000000 -0400
62491 +++ linux-2.6.32.42/kernel/sys.c 2011-04-17 15:56:46.000000000 -0400
62492 @@ -133,6 +133,12 @@ static int set_one_prio(struct task_stru
62493 error = -EACCES;
62494 goto out;
62495 }
62496 +
62497 + if (gr_handle_chroot_setpriority(p, niceval)) {
62498 + error = -EACCES;
62499 + goto out;
62500 + }
62501 +
62502 no_nice = security_task_setnice(p, niceval);
62503 if (no_nice) {
62504 error = no_nice;
62505 @@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which,
62506 !(user = find_user(who)))
62507 goto out_unlock; /* No processes for this user */
62508
62509 - do_each_thread(g, p)
62510 + do_each_thread(g, p) {
62511 if (__task_cred(p)->uid == who)
62512 error = set_one_prio(p, niceval, error);
62513 - while_each_thread(g, p);
62514 + } while_each_thread(g, p);
62515 if (who != cred->uid)
62516 free_uid(user); /* For find_user() */
62517 break;
62518 @@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which,
62519 !(user = find_user(who)))
62520 goto out_unlock; /* No processes for this user */
62521
62522 - do_each_thread(g, p)
62523 + do_each_thread(g, p) {
62524 if (__task_cred(p)->uid == who) {
62525 niceval = 20 - task_nice(p);
62526 if (niceval > retval)
62527 retval = niceval;
62528 }
62529 - while_each_thread(g, p);
62530 + } while_each_thread(g, p);
62531 if (who != cred->uid)
62532 free_uid(user); /* for find_user() */
62533 break;
62534 @@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
62535 goto error;
62536 }
62537
62538 + if (gr_check_group_change(new->gid, new->egid, -1))
62539 + goto error;
62540 +
62541 if (rgid != (gid_t) -1 ||
62542 (egid != (gid_t) -1 && egid != old->gid))
62543 new->sgid = new->egid;
62544 @@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
62545 goto error;
62546
62547 retval = -EPERM;
62548 +
62549 + if (gr_check_group_change(gid, gid, gid))
62550 + goto error;
62551 +
62552 if (capable(CAP_SETGID))
62553 new->gid = new->egid = new->sgid = new->fsgid = gid;
62554 else if (gid == old->gid || gid == old->sgid)
62555 @@ -627,6 +640,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
62556 goto error;
62557 }
62558
62559 + if (gr_check_user_change(new->uid, new->euid, -1))
62560 + goto error;
62561 +
62562 if (new->uid != old->uid) {
62563 retval = set_user(new);
62564 if (retval < 0)
62565 @@ -675,6 +691,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
62566 goto error;
62567
62568 retval = -EPERM;
62569 +
62570 + if (gr_check_crash_uid(uid))
62571 + goto error;
62572 + if (gr_check_user_change(uid, uid, uid))
62573 + goto error;
62574 +
62575 if (capable(CAP_SETUID)) {
62576 new->suid = new->uid = uid;
62577 if (uid != old->uid) {
62578 @@ -732,6 +754,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
62579 goto error;
62580 }
62581
62582 + if (gr_check_user_change(ruid, euid, -1))
62583 + goto error;
62584 +
62585 if (ruid != (uid_t) -1) {
62586 new->uid = ruid;
62587 if (ruid != old->uid) {
62588 @@ -800,6 +825,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
62589 goto error;
62590 }
62591
62592 + if (gr_check_group_change(rgid, egid, -1))
62593 + goto error;
62594 +
62595 if (rgid != (gid_t) -1)
62596 new->gid = rgid;
62597 if (egid != (gid_t) -1)
62598 @@ -849,6 +877,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
62599 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
62600 goto error;
62601
62602 + if (gr_check_user_change(-1, -1, uid))
62603 + goto error;
62604 +
62605 if (uid == old->uid || uid == old->euid ||
62606 uid == old->suid || uid == old->fsuid ||
62607 capable(CAP_SETUID)) {
62608 @@ -889,6 +920,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
62609 if (gid == old->gid || gid == old->egid ||
62610 gid == old->sgid || gid == old->fsgid ||
62611 capable(CAP_SETGID)) {
62612 + if (gr_check_group_change(-1, -1, gid))
62613 + goto error;
62614 +
62615 if (gid != old_fsgid) {
62616 new->fsgid = gid;
62617 goto change_okay;
62618 @@ -1454,7 +1488,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
62619 error = get_dumpable(me->mm);
62620 break;
62621 case PR_SET_DUMPABLE:
62622 - if (arg2 < 0 || arg2 > 1) {
62623 + if (arg2 > 1) {
62624 error = -EINVAL;
62625 break;
62626 }
62627 diff -urNp linux-2.6.32.42/kernel/sysctl.c linux-2.6.32.42/kernel/sysctl.c
62628 --- linux-2.6.32.42/kernel/sysctl.c 2011-03-27 14:31:47.000000000 -0400
62629 +++ linux-2.6.32.42/kernel/sysctl.c 2011-04-17 15:56:46.000000000 -0400
62630 @@ -63,6 +63,13 @@
62631 static int deprecated_sysctl_warning(struct __sysctl_args *args);
62632
62633 #if defined(CONFIG_SYSCTL)
62634 +#include <linux/grsecurity.h>
62635 +#include <linux/grinternal.h>
62636 +
62637 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
62638 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
62639 + const int op);
62640 +extern int gr_handle_chroot_sysctl(const int op);
62641
62642 /* External variables not in a header file. */
62643 extern int C_A_D;
62644 @@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_ta
62645 static int proc_taint(struct ctl_table *table, int write,
62646 void __user *buffer, size_t *lenp, loff_t *ppos);
62647 #endif
62648 +extern ctl_table grsecurity_table[];
62649
62650 static struct ctl_table root_table[];
62651 static struct ctl_table_root sysctl_table_root;
62652 @@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
62653 int sysctl_legacy_va_layout;
62654 #endif
62655
62656 +#ifdef CONFIG_PAX_SOFTMODE
62657 +static ctl_table pax_table[] = {
62658 + {
62659 + .ctl_name = CTL_UNNUMBERED,
62660 + .procname = "softmode",
62661 + .data = &pax_softmode,
62662 + .maxlen = sizeof(unsigned int),
62663 + .mode = 0600,
62664 + .proc_handler = &proc_dointvec,
62665 + },
62666 +
62667 + { .ctl_name = 0 }
62668 +};
62669 +#endif
62670 +
62671 extern int prove_locking;
62672 extern int lock_stat;
62673
62674 @@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = N
62675 #endif
62676
62677 static struct ctl_table kern_table[] = {
62678 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
62679 + {
62680 + .ctl_name = CTL_UNNUMBERED,
62681 + .procname = "grsecurity",
62682 + .mode = 0500,
62683 + .child = grsecurity_table,
62684 + },
62685 +#endif
62686 +
62687 +#ifdef CONFIG_PAX_SOFTMODE
62688 + {
62689 + .ctl_name = CTL_UNNUMBERED,
62690 + .procname = "pax",
62691 + .mode = 0500,
62692 + .child = pax_table,
62693 + },
62694 +#endif
62695 +
62696 {
62697 .ctl_name = CTL_UNNUMBERED,
62698 .procname = "sched_child_runs_first",
62699 @@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
62700 .data = &modprobe_path,
62701 .maxlen = KMOD_PATH_LEN,
62702 .mode = 0644,
62703 - .proc_handler = &proc_dostring,
62704 - .strategy = &sysctl_string,
62705 + .proc_handler = &proc_dostring_modpriv,
62706 + .strategy = &sysctl_string_modpriv,
62707 },
62708 {
62709 .ctl_name = CTL_UNNUMBERED,
62710 @@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
62711 .mode = 0644,
62712 .proc_handler = &proc_dointvec
62713 },
62714 + {
62715 + .procname = "heap_stack_gap",
62716 + .data = &sysctl_heap_stack_gap,
62717 + .maxlen = sizeof(sysctl_heap_stack_gap),
62718 + .mode = 0644,
62719 + .proc_handler = proc_doulongvec_minmax,
62720 + },
62721 #else
62722 {
62723 .ctl_name = CTL_UNNUMBERED,
62724 @@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
62725 return 0;
62726 }
62727
62728 +static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
62729 +
62730 static int parse_table(int __user *name, int nlen,
62731 void __user *oldval, size_t __user *oldlenp,
62732 void __user *newval, size_t newlen,
62733 @@ -1821,7 +1871,7 @@ repeat:
62734 if (n == table->ctl_name) {
62735 int error;
62736 if (table->child) {
62737 - if (sysctl_perm(root, table, MAY_EXEC))
62738 + if (sysctl_perm_nochk(root, table, MAY_EXEC))
62739 return -EPERM;
62740 name++;
62741 nlen--;
62742 @@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
62743 int error;
62744 int mode;
62745
62746 + if (table->parent != NULL && table->parent->procname != NULL &&
62747 + table->procname != NULL &&
62748 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
62749 + return -EACCES;
62750 + if (gr_handle_chroot_sysctl(op))
62751 + return -EACCES;
62752 + error = gr_handle_sysctl(table, op);
62753 + if (error)
62754 + return error;
62755 +
62756 + error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
62757 + if (error)
62758 + return error;
62759 +
62760 + if (root->permissions)
62761 + mode = root->permissions(root, current->nsproxy, table);
62762 + else
62763 + mode = table->mode;
62764 +
62765 + return test_perm(mode, op);
62766 +}
62767 +
62768 +int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
62769 +{
62770 + int error;
62771 + int mode;
62772 +
62773 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
62774 if (error)
62775 return error;
62776 @@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *tabl
62777 buffer, lenp, ppos);
62778 }
62779
62780 +int proc_dostring_modpriv(struct ctl_table *table, int write,
62781 + void __user *buffer, size_t *lenp, loff_t *ppos)
62782 +{
62783 + if (write && !capable(CAP_SYS_MODULE))
62784 + return -EPERM;
62785 +
62786 + return _proc_do_string(table->data, table->maxlen, write,
62787 + buffer, lenp, ppos);
62788 +}
62789 +
62790
62791 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
62792 int *valp,
62793 @@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(v
62794 vleft = table->maxlen / sizeof(unsigned long);
62795 left = *lenp;
62796
62797 - for (; left && vleft--; i++, min++, max++, first=0) {
62798 + for (; left && vleft--; i++, first=0) {
62799 if (write) {
62800 while (left) {
62801 char c;
62802 @@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *tabl
62803 return -ENOSYS;
62804 }
62805
62806 +int proc_dostring_modpriv(struct ctl_table *table, int write,
62807 + void __user *buffer, size_t *lenp, loff_t *ppos)
62808 +{
62809 + return -ENOSYS;
62810 +}
62811 +
62812 int proc_dointvec(struct ctl_table *table, int write,
62813 void __user *buffer, size_t *lenp, loff_t *ppos)
62814 {
62815 @@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *tabl
62816 return 1;
62817 }
62818
62819 +int sysctl_string_modpriv(struct ctl_table *table,
62820 + void __user *oldval, size_t __user *oldlenp,
62821 + void __user *newval, size_t newlen)
62822 +{
62823 + if (newval && newlen && !capable(CAP_SYS_MODULE))
62824 + return -EPERM;
62825 +
62826 + return sysctl_string(table, oldval, oldlenp, newval, newlen);
62827 +}
62828 +
62829 /*
62830 * This function makes sure that all of the integers in the vector
62831 * are between the minimum and maximum values given in the arrays
62832 @@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *tabl
62833 return -ENOSYS;
62834 }
62835
62836 +int sysctl_string_modpriv(struct ctl_table *table,
62837 + void __user *oldval, size_t __user *oldlenp,
62838 + void __user *newval, size_t newlen)
62839 +{
62840 + return -ENOSYS;
62841 +}
62842 +
62843 int sysctl_intvec(struct ctl_table *table,
62844 void __user *oldval, size_t __user *oldlenp,
62845 void __user *newval, size_t newlen)
62846 @@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
62847 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
62848 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
62849 EXPORT_SYMBOL(proc_dostring);
62850 +EXPORT_SYMBOL(proc_dostring_modpriv);
62851 EXPORT_SYMBOL(proc_doulongvec_minmax);
62852 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
62853 EXPORT_SYMBOL(register_sysctl_table);
62854 @@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
62855 EXPORT_SYMBOL(sysctl_jiffies);
62856 EXPORT_SYMBOL(sysctl_ms_jiffies);
62857 EXPORT_SYMBOL(sysctl_string);
62858 +EXPORT_SYMBOL(sysctl_string_modpriv);
62859 EXPORT_SYMBOL(sysctl_data);
62860 EXPORT_SYMBOL(unregister_sysctl_table);
62861 diff -urNp linux-2.6.32.42/kernel/sysctl_check.c linux-2.6.32.42/kernel/sysctl_check.c
62862 --- linux-2.6.32.42/kernel/sysctl_check.c 2011-03-27 14:31:47.000000000 -0400
62863 +++ linux-2.6.32.42/kernel/sysctl_check.c 2011-04-17 15:56:46.000000000 -0400
62864 @@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *n
62865 } else {
62866 if ((table->strategy == sysctl_data) ||
62867 (table->strategy == sysctl_string) ||
62868 + (table->strategy == sysctl_string_modpriv) ||
62869 (table->strategy == sysctl_intvec) ||
62870 (table->strategy == sysctl_jiffies) ||
62871 (table->strategy == sysctl_ms_jiffies) ||
62872 (table->proc_handler == proc_dostring) ||
62873 + (table->proc_handler == proc_dostring_modpriv) ||
62874 (table->proc_handler == proc_dointvec) ||
62875 (table->proc_handler == proc_dointvec_minmax) ||
62876 (table->proc_handler == proc_dointvec_jiffies) ||
62877 diff -urNp linux-2.6.32.42/kernel/taskstats.c linux-2.6.32.42/kernel/taskstats.c
62878 --- linux-2.6.32.42/kernel/taskstats.c 2011-03-27 14:31:47.000000000 -0400
62879 +++ linux-2.6.32.42/kernel/taskstats.c 2011-04-17 15:56:46.000000000 -0400
62880 @@ -26,9 +26,12 @@
62881 #include <linux/cgroup.h>
62882 #include <linux/fs.h>
62883 #include <linux/file.h>
62884 +#include <linux/grsecurity.h>
62885 #include <net/genetlink.h>
62886 #include <asm/atomic.h>
62887
62888 +extern int gr_is_taskstats_denied(int pid);
62889 +
62890 /*
62891 * Maximum length of a cpumask that can be specified in
62892 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
62893 @@ -433,6 +436,9 @@ static int taskstats_user_cmd(struct sk_
62894 size_t size;
62895 cpumask_var_t mask;
62896
62897 + if (gr_is_taskstats_denied(current->pid))
62898 + return -EACCES;
62899 +
62900 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
62901 return -ENOMEM;
62902
62903 diff -urNp linux-2.6.32.42/kernel/time/tick-broadcast.c linux-2.6.32.42/kernel/time/tick-broadcast.c
62904 --- linux-2.6.32.42/kernel/time/tick-broadcast.c 2011-05-23 16:56:59.000000000 -0400
62905 +++ linux-2.6.32.42/kernel/time/tick-broadcast.c 2011-05-23 16:57:13.000000000 -0400
62906 @@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
62907 * then clear the broadcast bit.
62908 */
62909 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
62910 - int cpu = smp_processor_id();
62911 + cpu = smp_processor_id();
62912
62913 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
62914 tick_broadcast_clear_oneshot(cpu);
62915 diff -urNp linux-2.6.32.42/kernel/time/timekeeping.c linux-2.6.32.42/kernel/time/timekeeping.c
62916 --- linux-2.6.32.42/kernel/time/timekeeping.c 2011-06-25 12:55:35.000000000 -0400
62917 +++ linux-2.6.32.42/kernel/time/timekeeping.c 2011-06-25 12:56:37.000000000 -0400
62918 @@ -14,6 +14,7 @@
62919 #include <linux/init.h>
62920 #include <linux/mm.h>
62921 #include <linux/sched.h>
62922 +#include <linux/grsecurity.h>
62923 #include <linux/sysdev.h>
62924 #include <linux/clocksource.h>
62925 #include <linux/jiffies.h>
62926 @@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
62927 */
62928 struct timespec ts = xtime;
62929 timespec_add_ns(&ts, nsec);
62930 - ACCESS_ONCE(xtime_cache) = ts;
62931 + ACCESS_ONCE_RW(xtime_cache) = ts;
62932 }
62933
62934 /* must hold xtime_lock */
62935 @@ -333,6 +334,8 @@ int do_settimeofday(struct timespec *tv)
62936 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
62937 return -EINVAL;
62938
62939 + gr_log_timechange();
62940 +
62941 write_seqlock_irqsave(&xtime_lock, flags);
62942
62943 timekeeping_forward_now();
62944 diff -urNp linux-2.6.32.42/kernel/time/timer_list.c linux-2.6.32.42/kernel/time/timer_list.c
62945 --- linux-2.6.32.42/kernel/time/timer_list.c 2011-03-27 14:31:47.000000000 -0400
62946 +++ linux-2.6.32.42/kernel/time/timer_list.c 2011-04-17 15:56:46.000000000 -0400
62947 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
62948
62949 static void print_name_offset(struct seq_file *m, void *sym)
62950 {
62951 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62952 + SEQ_printf(m, "<%p>", NULL);
62953 +#else
62954 char symname[KSYM_NAME_LEN];
62955
62956 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
62957 SEQ_printf(m, "<%p>", sym);
62958 else
62959 SEQ_printf(m, "%s", symname);
62960 +#endif
62961 }
62962
62963 static void
62964 @@ -112,7 +116,11 @@ next_one:
62965 static void
62966 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
62967 {
62968 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62969 + SEQ_printf(m, " .base: %p\n", NULL);
62970 +#else
62971 SEQ_printf(m, " .base: %p\n", base);
62972 +#endif
62973 SEQ_printf(m, " .index: %d\n",
62974 base->index);
62975 SEQ_printf(m, " .resolution: %Lu nsecs\n",
62976 @@ -289,7 +297,11 @@ static int __init init_timer_list_procfs
62977 {
62978 struct proc_dir_entry *pe;
62979
62980 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
62981 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
62982 +#else
62983 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
62984 +#endif
62985 if (!pe)
62986 return -ENOMEM;
62987 return 0;
62988 diff -urNp linux-2.6.32.42/kernel/time/timer_stats.c linux-2.6.32.42/kernel/time/timer_stats.c
62989 --- linux-2.6.32.42/kernel/time/timer_stats.c 2011-03-27 14:31:47.000000000 -0400
62990 +++ linux-2.6.32.42/kernel/time/timer_stats.c 2011-05-04 17:56:28.000000000 -0400
62991 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
62992 static unsigned long nr_entries;
62993 static struct entry entries[MAX_ENTRIES];
62994
62995 -static atomic_t overflow_count;
62996 +static atomic_unchecked_t overflow_count;
62997
62998 /*
62999 * The entries are in a hash-table, for fast lookup:
63000 @@ -140,7 +140,7 @@ static void reset_entries(void)
63001 nr_entries = 0;
63002 memset(entries, 0, sizeof(entries));
63003 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
63004 - atomic_set(&overflow_count, 0);
63005 + atomic_set_unchecked(&overflow_count, 0);
63006 }
63007
63008 static struct entry *alloc_entry(void)
63009 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
63010 if (likely(entry))
63011 entry->count++;
63012 else
63013 - atomic_inc(&overflow_count);
63014 + atomic_inc_unchecked(&overflow_count);
63015
63016 out_unlock:
63017 spin_unlock_irqrestore(lock, flags);
63018 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
63019
63020 static void print_name_offset(struct seq_file *m, unsigned long addr)
63021 {
63022 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63023 + seq_printf(m, "<%p>", NULL);
63024 +#else
63025 char symname[KSYM_NAME_LEN];
63026
63027 if (lookup_symbol_name(addr, symname) < 0)
63028 seq_printf(m, "<%p>", (void *)addr);
63029 else
63030 seq_printf(m, "%s", symname);
63031 +#endif
63032 }
63033
63034 static int tstats_show(struct seq_file *m, void *v)
63035 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
63036
63037 seq_puts(m, "Timer Stats Version: v0.2\n");
63038 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
63039 - if (atomic_read(&overflow_count))
63040 + if (atomic_read_unchecked(&overflow_count))
63041 seq_printf(m, "Overflow: %d entries\n",
63042 - atomic_read(&overflow_count));
63043 + atomic_read_unchecked(&overflow_count));
63044
63045 for (i = 0; i < nr_entries; i++) {
63046 entry = entries + i;
63047 @@ -415,7 +419,11 @@ static int __init init_tstats_procfs(voi
63048 {
63049 struct proc_dir_entry *pe;
63050
63051 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
63052 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
63053 +#else
63054 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
63055 +#endif
63056 if (!pe)
63057 return -ENOMEM;
63058 return 0;
63059 diff -urNp linux-2.6.32.42/kernel/time.c linux-2.6.32.42/kernel/time.c
63060 --- linux-2.6.32.42/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
63061 +++ linux-2.6.32.42/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
63062 @@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec
63063 return error;
63064
63065 if (tz) {
63066 + /* we log in do_settimeofday called below, so don't log twice
63067 + */
63068 + if (!tv)
63069 + gr_log_timechange();
63070 +
63071 /* SMP safe, global irq locking makes it work. */
63072 sys_tz = *tz;
63073 update_vsyscall_tz();
63074 @@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
63075 * Avoid unnecessary multiplications/divisions in the
63076 * two most common HZ cases:
63077 */
63078 -unsigned int inline jiffies_to_msecs(const unsigned long j)
63079 +inline unsigned int jiffies_to_msecs(const unsigned long j)
63080 {
63081 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
63082 return (MSEC_PER_SEC / HZ) * j;
63083 @@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(con
63084 }
63085 EXPORT_SYMBOL(jiffies_to_msecs);
63086
63087 -unsigned int inline jiffies_to_usecs(const unsigned long j)
63088 +inline unsigned int jiffies_to_usecs(const unsigned long j)
63089 {
63090 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
63091 return (USEC_PER_SEC / HZ) * j;
63092 diff -urNp linux-2.6.32.42/kernel/timer.c linux-2.6.32.42/kernel/timer.c
63093 --- linux-2.6.32.42/kernel/timer.c 2011-03-27 14:31:47.000000000 -0400
63094 +++ linux-2.6.32.42/kernel/timer.c 2011-04-17 15:56:46.000000000 -0400
63095 @@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
63096 /*
63097 * This function runs timers and the timer-tq in bottom half context.
63098 */
63099 -static void run_timer_softirq(struct softirq_action *h)
63100 +static void run_timer_softirq(void)
63101 {
63102 struct tvec_base *base = __get_cpu_var(tvec_bases);
63103
63104 diff -urNp linux-2.6.32.42/kernel/trace/blktrace.c linux-2.6.32.42/kernel/trace/blktrace.c
63105 --- linux-2.6.32.42/kernel/trace/blktrace.c 2011-03-27 14:31:47.000000000 -0400
63106 +++ linux-2.6.32.42/kernel/trace/blktrace.c 2011-05-04 17:56:28.000000000 -0400
63107 @@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct f
63108 struct blk_trace *bt = filp->private_data;
63109 char buf[16];
63110
63111 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
63112 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
63113
63114 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
63115 }
63116 @@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(str
63117 return 1;
63118
63119 bt = buf->chan->private_data;
63120 - atomic_inc(&bt->dropped);
63121 + atomic_inc_unchecked(&bt->dropped);
63122 return 0;
63123 }
63124
63125 @@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_qu
63126
63127 bt->dir = dir;
63128 bt->dev = dev;
63129 - atomic_set(&bt->dropped, 0);
63130 + atomic_set_unchecked(&bt->dropped, 0);
63131
63132 ret = -EIO;
63133 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
63134 diff -urNp linux-2.6.32.42/kernel/trace/ftrace.c linux-2.6.32.42/kernel/trace/ftrace.c
63135 --- linux-2.6.32.42/kernel/trace/ftrace.c 2011-06-25 12:55:35.000000000 -0400
63136 +++ linux-2.6.32.42/kernel/trace/ftrace.c 2011-06-25 12:56:37.000000000 -0400
63137 @@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod,
63138
63139 ip = rec->ip;
63140
63141 + ret = ftrace_arch_code_modify_prepare();
63142 + FTRACE_WARN_ON(ret);
63143 + if (ret)
63144 + return 0;
63145 +
63146 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
63147 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
63148 if (ret) {
63149 ftrace_bug(ret, ip);
63150 rec->flags |= FTRACE_FL_FAILED;
63151 - return 0;
63152 }
63153 - return 1;
63154 + return ret ? 0 : 1;
63155 }
63156
63157 /*
63158 diff -urNp linux-2.6.32.42/kernel/trace/ring_buffer.c linux-2.6.32.42/kernel/trace/ring_buffer.c
63159 --- linux-2.6.32.42/kernel/trace/ring_buffer.c 2011-03-27 14:31:47.000000000 -0400
63160 +++ linux-2.6.32.42/kernel/trace/ring_buffer.c 2011-04-17 15:56:46.000000000 -0400
63161 @@ -606,7 +606,7 @@ static struct list_head *rb_list_head(st
63162 * the reader page). But if the next page is a header page,
63163 * its flags will be non zero.
63164 */
63165 -static int inline
63166 +static inline int
63167 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
63168 struct buffer_page *page, struct list_head *list)
63169 {
63170 diff -urNp linux-2.6.32.42/kernel/trace/trace.c linux-2.6.32.42/kernel/trace/trace.c
63171 --- linux-2.6.32.42/kernel/trace/trace.c 2011-03-27 14:31:47.000000000 -0400
63172 +++ linux-2.6.32.42/kernel/trace/trace.c 2011-05-16 21:46:57.000000000 -0400
63173 @@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(
63174 size_t rem;
63175 unsigned int i;
63176
63177 + pax_track_stack();
63178 +
63179 /* copy the tracer to avoid using a global lock all around */
63180 mutex_lock(&trace_types_lock);
63181 if (unlikely(old_tracer != current_trace && current_trace)) {
63182 @@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file
63183 int entries, size, i;
63184 size_t ret;
63185
63186 + pax_track_stack();
63187 +
63188 if (*ppos & (PAGE_SIZE - 1)) {
63189 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
63190 return -EINVAL;
63191 @@ -3816,10 +3820,9 @@ static const struct file_operations trac
63192 };
63193 #endif
63194
63195 -static struct dentry *d_tracer;
63196 -
63197 struct dentry *tracing_init_dentry(void)
63198 {
63199 + static struct dentry *d_tracer;
63200 static int once;
63201
63202 if (d_tracer)
63203 @@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
63204 return d_tracer;
63205 }
63206
63207 -static struct dentry *d_percpu;
63208 -
63209 struct dentry *tracing_dentry_percpu(void)
63210 {
63211 + static struct dentry *d_percpu;
63212 static int once;
63213 struct dentry *d_tracer;
63214
63215 diff -urNp linux-2.6.32.42/kernel/trace/trace_events.c linux-2.6.32.42/kernel/trace/trace_events.c
63216 --- linux-2.6.32.42/kernel/trace/trace_events.c 2011-03-27 14:31:47.000000000 -0400
63217 +++ linux-2.6.32.42/kernel/trace/trace_events.c 2011-04-17 15:56:46.000000000 -0400
63218 @@ -951,6 +951,8 @@ static LIST_HEAD(ftrace_module_file_list
63219 * Modules must own their file_operations to keep up with
63220 * reference counting.
63221 */
63222 +
63223 +/* cannot be const */
63224 struct ftrace_module_file_ops {
63225 struct list_head list;
63226 struct module *mod;
63227 diff -urNp linux-2.6.32.42/kernel/trace/trace_mmiotrace.c linux-2.6.32.42/kernel/trace/trace_mmiotrace.c
63228 --- linux-2.6.32.42/kernel/trace/trace_mmiotrace.c 2011-03-27 14:31:47.000000000 -0400
63229 +++ linux-2.6.32.42/kernel/trace/trace_mmiotrace.c 2011-05-04 17:56:28.000000000 -0400
63230 @@ -23,7 +23,7 @@ struct header_iter {
63231 static struct trace_array *mmio_trace_array;
63232 static bool overrun_detected;
63233 static unsigned long prev_overruns;
63234 -static atomic_t dropped_count;
63235 +static atomic_unchecked_t dropped_count;
63236
63237 static void mmio_reset_data(struct trace_array *tr)
63238 {
63239 @@ -126,7 +126,7 @@ static void mmio_close(struct trace_iter
63240
63241 static unsigned long count_overruns(struct trace_iterator *iter)
63242 {
63243 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
63244 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
63245 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
63246
63247 if (over > prev_overruns)
63248 @@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct
63249 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
63250 sizeof(*entry), 0, pc);
63251 if (!event) {
63252 - atomic_inc(&dropped_count);
63253 + atomic_inc_unchecked(&dropped_count);
63254 return;
63255 }
63256 entry = ring_buffer_event_data(event);
63257 @@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct
63258 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
63259 sizeof(*entry), 0, pc);
63260 if (!event) {
63261 - atomic_inc(&dropped_count);
63262 + atomic_inc_unchecked(&dropped_count);
63263 return;
63264 }
63265 entry = ring_buffer_event_data(event);
63266 diff -urNp linux-2.6.32.42/kernel/trace/trace_output.c linux-2.6.32.42/kernel/trace/trace_output.c
63267 --- linux-2.6.32.42/kernel/trace/trace_output.c 2011-03-27 14:31:47.000000000 -0400
63268 +++ linux-2.6.32.42/kernel/trace/trace_output.c 2011-04-17 15:56:46.000000000 -0400
63269 @@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s,
63270 return 0;
63271 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
63272 if (!IS_ERR(p)) {
63273 - p = mangle_path(s->buffer + s->len, p, "\n");
63274 + p = mangle_path(s->buffer + s->len, p, "\n\\");
63275 if (p) {
63276 s->len = p - s->buffer;
63277 return 1;
63278 diff -urNp linux-2.6.32.42/kernel/trace/trace_stack.c linux-2.6.32.42/kernel/trace/trace_stack.c
63279 --- linux-2.6.32.42/kernel/trace/trace_stack.c 2011-03-27 14:31:47.000000000 -0400
63280 +++ linux-2.6.32.42/kernel/trace/trace_stack.c 2011-04-17 15:56:46.000000000 -0400
63281 @@ -50,7 +50,7 @@ static inline void check_stack(void)
63282 return;
63283
63284 /* we do not handle interrupt stacks yet */
63285 - if (!object_is_on_stack(&this_size))
63286 + if (!object_starts_on_stack(&this_size))
63287 return;
63288
63289 local_irq_save(flags);
63290 diff -urNp linux-2.6.32.42/kernel/trace/trace_workqueue.c linux-2.6.32.42/kernel/trace/trace_workqueue.c
63291 --- linux-2.6.32.42/kernel/trace/trace_workqueue.c 2011-03-27 14:31:47.000000000 -0400
63292 +++ linux-2.6.32.42/kernel/trace/trace_workqueue.c 2011-04-17 15:56:46.000000000 -0400
63293 @@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
63294 int cpu;
63295 pid_t pid;
63296 /* Can be inserted from interrupt or user context, need to be atomic */
63297 - atomic_t inserted;
63298 + atomic_unchecked_t inserted;
63299 /*
63300 * Don't need to be atomic, works are serialized in a single workqueue thread
63301 * on a single CPU.
63302 @@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_st
63303 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
63304 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
63305 if (node->pid == wq_thread->pid) {
63306 - atomic_inc(&node->inserted);
63307 + atomic_inc_unchecked(&node->inserted);
63308 goto found;
63309 }
63310 }
63311 @@ -205,7 +205,7 @@ static int workqueue_stat_show(struct se
63312 tsk = get_pid_task(pid, PIDTYPE_PID);
63313 if (tsk) {
63314 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
63315 - atomic_read(&cws->inserted), cws->executed,
63316 + atomic_read_unchecked(&cws->inserted), cws->executed,
63317 tsk->comm);
63318 put_task_struct(tsk);
63319 }
63320 diff -urNp linux-2.6.32.42/kernel/user.c linux-2.6.32.42/kernel/user.c
63321 --- linux-2.6.32.42/kernel/user.c 2011-03-27 14:31:47.000000000 -0400
63322 +++ linux-2.6.32.42/kernel/user.c 2011-04-17 15:56:46.000000000 -0400
63323 @@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct use
63324 spin_lock_irq(&uidhash_lock);
63325 up = uid_hash_find(uid, hashent);
63326 if (up) {
63327 + put_user_ns(ns);
63328 key_put(new->uid_keyring);
63329 key_put(new->session_keyring);
63330 kmem_cache_free(uid_cachep, new);
63331 diff -urNp linux-2.6.32.42/lib/bug.c linux-2.6.32.42/lib/bug.c
63332 --- linux-2.6.32.42/lib/bug.c 2011-03-27 14:31:47.000000000 -0400
63333 +++ linux-2.6.32.42/lib/bug.c 2011-04-17 15:56:46.000000000 -0400
63334 @@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
63335 return BUG_TRAP_TYPE_NONE;
63336
63337 bug = find_bug(bugaddr);
63338 + if (!bug)
63339 + return BUG_TRAP_TYPE_NONE;
63340
63341 printk(KERN_EMERG "------------[ cut here ]------------\n");
63342
63343 diff -urNp linux-2.6.32.42/lib/debugobjects.c linux-2.6.32.42/lib/debugobjects.c
63344 --- linux-2.6.32.42/lib/debugobjects.c 2011-03-27 14:31:47.000000000 -0400
63345 +++ linux-2.6.32.42/lib/debugobjects.c 2011-04-17 15:56:46.000000000 -0400
63346 @@ -277,7 +277,7 @@ static void debug_object_is_on_stack(voi
63347 if (limit > 4)
63348 return;
63349
63350 - is_on_stack = object_is_on_stack(addr);
63351 + is_on_stack = object_starts_on_stack(addr);
63352 if (is_on_stack == onstack)
63353 return;
63354
63355 diff -urNp linux-2.6.32.42/lib/dma-debug.c linux-2.6.32.42/lib/dma-debug.c
63356 --- linux-2.6.32.42/lib/dma-debug.c 2011-03-27 14:31:47.000000000 -0400
63357 +++ linux-2.6.32.42/lib/dma-debug.c 2011-04-17 15:56:46.000000000 -0400
63358 @@ -861,7 +861,7 @@ out:
63359
63360 static void check_for_stack(struct device *dev, void *addr)
63361 {
63362 - if (object_is_on_stack(addr))
63363 + if (object_starts_on_stack(addr))
63364 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
63365 "stack [addr=%p]\n", addr);
63366 }
63367 diff -urNp linux-2.6.32.42/lib/idr.c linux-2.6.32.42/lib/idr.c
63368 --- linux-2.6.32.42/lib/idr.c 2011-03-27 14:31:47.000000000 -0400
63369 +++ linux-2.6.32.42/lib/idr.c 2011-04-17 15:56:46.000000000 -0400
63370 @@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, in
63371 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
63372
63373 /* if already at the top layer, we need to grow */
63374 - if (id >= 1 << (idp->layers * IDR_BITS)) {
63375 + if (id >= (1 << (idp->layers * IDR_BITS))) {
63376 *starting_id = id;
63377 return IDR_NEED_TO_GROW;
63378 }
63379 diff -urNp linux-2.6.32.42/lib/inflate.c linux-2.6.32.42/lib/inflate.c
63380 --- linux-2.6.32.42/lib/inflate.c 2011-03-27 14:31:47.000000000 -0400
63381 +++ linux-2.6.32.42/lib/inflate.c 2011-04-17 15:56:46.000000000 -0400
63382 @@ -266,7 +266,7 @@ static void free(void *where)
63383 malloc_ptr = free_mem_ptr;
63384 }
63385 #else
63386 -#define malloc(a) kmalloc(a, GFP_KERNEL)
63387 +#define malloc(a) kmalloc((a), GFP_KERNEL)
63388 #define free(a) kfree(a)
63389 #endif
63390
63391 diff -urNp linux-2.6.32.42/lib/Kconfig.debug linux-2.6.32.42/lib/Kconfig.debug
63392 --- linux-2.6.32.42/lib/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
63393 +++ linux-2.6.32.42/lib/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
63394 @@ -905,7 +905,7 @@ config LATENCYTOP
63395 select STACKTRACE
63396 select SCHEDSTATS
63397 select SCHED_DEBUG
63398 - depends on HAVE_LATENCYTOP_SUPPORT
63399 + depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
63400 help
63401 Enable this option if you want to use the LatencyTOP tool
63402 to find out which userspace is blocking on what kernel operations.
63403 diff -urNp linux-2.6.32.42/lib/kobject.c linux-2.6.32.42/lib/kobject.c
63404 --- linux-2.6.32.42/lib/kobject.c 2011-03-27 14:31:47.000000000 -0400
63405 +++ linux-2.6.32.42/lib/kobject.c 2011-04-17 15:56:46.000000000 -0400
63406 @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct ko
63407 return ret;
63408 }
63409
63410 -struct sysfs_ops kobj_sysfs_ops = {
63411 +const struct sysfs_ops kobj_sysfs_ops = {
63412 .show = kobj_attr_show,
63413 .store = kobj_attr_store,
63414 };
63415 @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
63416 * If the kset was not able to be created, NULL will be returned.
63417 */
63418 static struct kset *kset_create(const char *name,
63419 - struct kset_uevent_ops *uevent_ops,
63420 + const struct kset_uevent_ops *uevent_ops,
63421 struct kobject *parent_kobj)
63422 {
63423 struct kset *kset;
63424 @@ -832,7 +832,7 @@ static struct kset *kset_create(const ch
63425 * If the kset was not able to be created, NULL will be returned.
63426 */
63427 struct kset *kset_create_and_add(const char *name,
63428 - struct kset_uevent_ops *uevent_ops,
63429 + const struct kset_uevent_ops *uevent_ops,
63430 struct kobject *parent_kobj)
63431 {
63432 struct kset *kset;
63433 diff -urNp linux-2.6.32.42/lib/kobject_uevent.c linux-2.6.32.42/lib/kobject_uevent.c
63434 --- linux-2.6.32.42/lib/kobject_uevent.c 2011-03-27 14:31:47.000000000 -0400
63435 +++ linux-2.6.32.42/lib/kobject_uevent.c 2011-04-17 15:56:46.000000000 -0400
63436 @@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *k
63437 const char *subsystem;
63438 struct kobject *top_kobj;
63439 struct kset *kset;
63440 - struct kset_uevent_ops *uevent_ops;
63441 + const struct kset_uevent_ops *uevent_ops;
63442 u64 seq;
63443 int i = 0;
63444 int retval = 0;
63445 diff -urNp linux-2.6.32.42/lib/kref.c linux-2.6.32.42/lib/kref.c
63446 --- linux-2.6.32.42/lib/kref.c 2011-03-27 14:31:47.000000000 -0400
63447 +++ linux-2.6.32.42/lib/kref.c 2011-04-17 15:56:46.000000000 -0400
63448 @@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
63449 */
63450 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
63451 {
63452 - WARN_ON(release == NULL);
63453 + BUG_ON(release == NULL);
63454 WARN_ON(release == (void (*)(struct kref *))kfree);
63455
63456 if (atomic_dec_and_test(&kref->refcount)) {
63457 diff -urNp linux-2.6.32.42/lib/parser.c linux-2.6.32.42/lib/parser.c
63458 --- linux-2.6.32.42/lib/parser.c 2011-03-27 14:31:47.000000000 -0400
63459 +++ linux-2.6.32.42/lib/parser.c 2011-04-17 15:56:46.000000000 -0400
63460 @@ -126,7 +126,7 @@ static int match_number(substring_t *s,
63461 char *buf;
63462 int ret;
63463
63464 - buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
63465 + buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
63466 if (!buf)
63467 return -ENOMEM;
63468 memcpy(buf, s->from, s->to - s->from);
63469 diff -urNp linux-2.6.32.42/lib/radix-tree.c linux-2.6.32.42/lib/radix-tree.c
63470 --- linux-2.6.32.42/lib/radix-tree.c 2011-03-27 14:31:47.000000000 -0400
63471 +++ linux-2.6.32.42/lib/radix-tree.c 2011-04-17 15:56:46.000000000 -0400
63472 @@ -81,7 +81,7 @@ struct radix_tree_preload {
63473 int nr;
63474 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
63475 };
63476 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
63477 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
63478
63479 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
63480 {
63481 diff -urNp linux-2.6.32.42/lib/random32.c linux-2.6.32.42/lib/random32.c
63482 --- linux-2.6.32.42/lib/random32.c 2011-03-27 14:31:47.000000000 -0400
63483 +++ linux-2.6.32.42/lib/random32.c 2011-04-17 15:56:46.000000000 -0400
63484 @@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
63485 */
63486 static inline u32 __seed(u32 x, u32 m)
63487 {
63488 - return (x < m) ? x + m : x;
63489 + return (x <= m) ? x + m + 1 : x;
63490 }
63491
63492 /**
63493 diff -urNp linux-2.6.32.42/lib/vsprintf.c linux-2.6.32.42/lib/vsprintf.c
63494 --- linux-2.6.32.42/lib/vsprintf.c 2011-03-27 14:31:47.000000000 -0400
63495 +++ linux-2.6.32.42/lib/vsprintf.c 2011-04-17 15:56:46.000000000 -0400
63496 @@ -16,6 +16,9 @@
63497 * - scnprintf and vscnprintf
63498 */
63499
63500 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63501 +#define __INCLUDED_BY_HIDESYM 1
63502 +#endif
63503 #include <stdarg.h>
63504 #include <linux/module.h>
63505 #include <linux/types.h>
63506 @@ -546,12 +549,12 @@ static char *number(char *buf, char *end
63507 return buf;
63508 }
63509
63510 -static char *string(char *buf, char *end, char *s, struct printf_spec spec)
63511 +static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
63512 {
63513 int len, i;
63514
63515 if ((unsigned long)s < PAGE_SIZE)
63516 - s = "<NULL>";
63517 + s = "(null)";
63518
63519 len = strnlen(s, spec.precision);
63520
63521 @@ -581,7 +584,7 @@ static char *symbol_string(char *buf, ch
63522 unsigned long value = (unsigned long) ptr;
63523 #ifdef CONFIG_KALLSYMS
63524 char sym[KSYM_SYMBOL_LEN];
63525 - if (ext != 'f' && ext != 's')
63526 + if (ext != 'f' && ext != 's' && ext != 'a')
63527 sprint_symbol(sym, value);
63528 else
63529 kallsyms_lookup(value, NULL, NULL, NULL, sym);
63530 @@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf,
63531 * - 'f' For simple symbolic function names without offset
63532 * - 'S' For symbolic direct pointers with offset
63533 * - 's' For symbolic direct pointers without offset
63534 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
63535 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
63536 * - 'R' For a struct resource pointer, it prints the range of
63537 * addresses (not the name nor the flags)
63538 * - 'M' For a 6-byte MAC address, it prints the address in the
63539 @@ -822,7 +827,7 @@ static char *pointer(const char *fmt, ch
63540 struct printf_spec spec)
63541 {
63542 if (!ptr)
63543 - return string(buf, end, "(null)", spec);
63544 + return string(buf, end, "(nil)", spec);
63545
63546 switch (*fmt) {
63547 case 'F':
63548 @@ -831,6 +836,14 @@ static char *pointer(const char *fmt, ch
63549 case 's':
63550 /* Fallthrough */
63551 case 'S':
63552 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63553 + break;
63554 +#else
63555 + return symbol_string(buf, end, ptr, spec, *fmt);
63556 +#endif
63557 + case 'a':
63558 + /* Fallthrough */
63559 + case 'A':
63560 return symbol_string(buf, end, ptr, spec, *fmt);
63561 case 'R':
63562 return resource_string(buf, end, ptr, spec);
63563 @@ -1445,7 +1458,7 @@ do { \
63564 size_t len;
63565 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
63566 || (unsigned long)save_str < PAGE_SIZE)
63567 - save_str = "<NULL>";
63568 + save_str = "(null)";
63569 len = strlen(save_str);
63570 if (str + len + 1 < end)
63571 memcpy(str, save_str, len + 1);
63572 @@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size,
63573 typeof(type) value; \
63574 if (sizeof(type) == 8) { \
63575 args = PTR_ALIGN(args, sizeof(u32)); \
63576 - *(u32 *)&value = *(u32 *)args; \
63577 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
63578 + *(u32 *)&value = *(const u32 *)args; \
63579 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
63580 } else { \
63581 args = PTR_ALIGN(args, sizeof(type)); \
63582 - value = *(typeof(type) *)args; \
63583 + value = *(const typeof(type) *)args; \
63584 } \
63585 args += sizeof(type); \
63586 value; \
63587 @@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size,
63588 const char *str_arg = args;
63589 size_t len = strlen(str_arg);
63590 args += len + 1;
63591 - str = string(str, end, (char *)str_arg, spec);
63592 + str = string(str, end, str_arg, spec);
63593 break;
63594 }
63595
63596 diff -urNp linux-2.6.32.42/localversion-grsec linux-2.6.32.42/localversion-grsec
63597 --- linux-2.6.32.42/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
63598 +++ linux-2.6.32.42/localversion-grsec 2011-04-17 15:56:46.000000000 -0400
63599 @@ -0,0 +1 @@
63600 +-grsec
63601 diff -urNp linux-2.6.32.42/Makefile linux-2.6.32.42/Makefile
63602 --- linux-2.6.32.42/Makefile 2011-06-25 12:55:34.000000000 -0400
63603 +++ linux-2.6.32.42/Makefile 2011-06-25 12:56:37.000000000 -0400
63604 @@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
63605
63606 HOSTCC = gcc
63607 HOSTCXX = g++
63608 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
63609 -HOSTCXXFLAGS = -O2
63610 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
63611 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
63612 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
63613
63614 # Decide whether to build built-in, modular, or both.
63615 # Normally, just do built-in.
63616 @@ -342,10 +343,12 @@ LINUXINCLUDE := -Iinclude \
63617 KBUILD_CPPFLAGS := -D__KERNEL__
63618
63619 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
63620 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
63621 -fno-strict-aliasing -fno-common \
63622 -Werror-implicit-function-declaration \
63623 -Wno-format-security \
63624 -fno-delete-null-pointer-checks
63625 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
63626 KBUILD_AFLAGS := -D__ASSEMBLY__
63627
63628 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
63629 @@ -403,7 +406,7 @@ endif
63630 # of make so .config is not included in this case either (for *config).
63631
63632 no-dot-config-targets := clean mrproper distclean \
63633 - cscope TAGS tags help %docs check% \
63634 + cscope gtags TAGS tags help %docs check% \
63635 include/linux/version.h headers_% \
63636 kernelrelease kernelversion
63637
63638 @@ -644,7 +647,7 @@ export mod_strip_cmd
63639
63640
63641 ifeq ($(KBUILD_EXTMOD),)
63642 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
63643 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
63644
63645 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
63646 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
63647 @@ -949,7 +952,19 @@ include/config/kernel.release: include/c
63648 # version.h and scripts_basic is processed / created.
63649
63650 # Listed in dependency order
63651 -PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
63652 +PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3 pax-plugin
63653 +
63654 +ifeq ($(CONFIG_PAX_MEMORY_STACKLEAK),y)
63655 +KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0405, -fplugin=$(objtree)/tools/gcc/pax_plugin.so -fplugin-arg-pax_plugin-track-lowest-sp=100)
63656 +endif
63657 +pax-plugin:
63658 +ifneq (,$(findstring pax_plugin, $(KBUILD_CFLAGS)))
63659 + $(Q)$(MAKE) $(build)=tools/gcc
63660 +else
63661 +ifeq ($(CONFIG_PAX_MEMORY_STACKLEAK),y)
63662 + $(Q)echo "warning, your gcc does not support plugins, PAX_MEMORY_STACKLEAK will be less secure"
63663 +endif
63664 +endif
63665
63666 # prepare3 is used to check if we are building in a separate output directory,
63667 # and if so do:
63668 @@ -970,7 +985,7 @@ ifneq ($(KBUILD_SRC),)
63669 endif
63670
63671 # prepare2 creates a makefile if using a separate output directory
63672 -prepare2: prepare3 outputmakefile
63673 +prepare2: prepare3 outputmakefile pax-plugin
63674
63675 prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \
63676 include/asm include/config/auto.conf
63677 @@ -1198,7 +1213,7 @@ MRPROPER_FILES += .config .config.old in
63678 include/linux/autoconf.h include/linux/version.h \
63679 include/linux/utsrelease.h \
63680 include/linux/bounds.h include/asm*/asm-offsets.h \
63681 - Module.symvers Module.markers tags TAGS cscope*
63682 + Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
63683
63684 # clean - Delete most, but leave enough to build external modules
63685 #
63686 @@ -1289,6 +1304,7 @@ help:
63687 @echo ' modules_prepare - Set up for building external modules'
63688 @echo ' tags/TAGS - Generate tags file for editors'
63689 @echo ' cscope - Generate cscope index'
63690 + @echo ' gtags - Generate GNU GLOBAL index'
63691 @echo ' kernelrelease - Output the release version string'
63692 @echo ' kernelversion - Output the version stored in Makefile'
63693 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
63694 @@ -1445,7 +1461,7 @@ endif # KBUILD_EXTMOD
63695 quiet_cmd_tags = GEN $@
63696 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
63697
63698 -tags TAGS cscope: FORCE
63699 +tags TAGS cscope gtags: FORCE
63700 $(call cmd,tags)
63701
63702 # Scripts to check various things for consistency
63703 diff -urNp linux-2.6.32.42/mm/backing-dev.c linux-2.6.32.42/mm/backing-dev.c
63704 --- linux-2.6.32.42/mm/backing-dev.c 2011-03-27 14:31:47.000000000 -0400
63705 +++ linux-2.6.32.42/mm/backing-dev.c 2011-05-04 17:56:28.000000000 -0400
63706 @@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rc
63707 * Add the default flusher task that gets created for any bdi
63708 * that has dirty data pending writeout
63709 */
63710 -void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
63711 +static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
63712 {
63713 if (!bdi_cap_writeback_dirty(bdi))
63714 return;
63715 diff -urNp linux-2.6.32.42/mm/filemap.c linux-2.6.32.42/mm/filemap.c
63716 --- linux-2.6.32.42/mm/filemap.c 2011-03-27 14:31:47.000000000 -0400
63717 +++ linux-2.6.32.42/mm/filemap.c 2011-04-17 15:56:46.000000000 -0400
63718 @@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file
63719 struct address_space *mapping = file->f_mapping;
63720
63721 if (!mapping->a_ops->readpage)
63722 - return -ENOEXEC;
63723 + return -ENODEV;
63724 file_accessed(file);
63725 vma->vm_ops = &generic_file_vm_ops;
63726 vma->vm_flags |= VM_CAN_NONLINEAR;
63727 @@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct f
63728 *pos = i_size_read(inode);
63729
63730 if (limit != RLIM_INFINITY) {
63731 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
63732 if (*pos >= limit) {
63733 send_sig(SIGXFSZ, current, 0);
63734 return -EFBIG;
63735 diff -urNp linux-2.6.32.42/mm/fremap.c linux-2.6.32.42/mm/fremap.c
63736 --- linux-2.6.32.42/mm/fremap.c 2011-03-27 14:31:47.000000000 -0400
63737 +++ linux-2.6.32.42/mm/fremap.c 2011-04-17 15:56:46.000000000 -0400
63738 @@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
63739 retry:
63740 vma = find_vma(mm, start);
63741
63742 +#ifdef CONFIG_PAX_SEGMEXEC
63743 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
63744 + goto out;
63745 +#endif
63746 +
63747 /*
63748 * Make sure the vma is shared, that it supports prefaulting,
63749 * and that the remapped range is valid and fully within
63750 @@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
63751 /*
63752 * drop PG_Mlocked flag for over-mapped range
63753 */
63754 - unsigned int saved_flags = vma->vm_flags;
63755 + unsigned long saved_flags = vma->vm_flags;
63756 munlock_vma_pages_range(vma, start, start + size);
63757 vma->vm_flags = saved_flags;
63758 }
63759 diff -urNp linux-2.6.32.42/mm/highmem.c linux-2.6.32.42/mm/highmem.c
63760 --- linux-2.6.32.42/mm/highmem.c 2011-03-27 14:31:47.000000000 -0400
63761 +++ linux-2.6.32.42/mm/highmem.c 2011-04-17 15:56:46.000000000 -0400
63762 @@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
63763 * So no dangers, even with speculative execution.
63764 */
63765 page = pte_page(pkmap_page_table[i]);
63766 + pax_open_kernel();
63767 pte_clear(&init_mm, (unsigned long)page_address(page),
63768 &pkmap_page_table[i]);
63769 -
63770 + pax_close_kernel();
63771 set_page_address(page, NULL);
63772 need_flush = 1;
63773 }
63774 @@ -177,9 +178,11 @@ start:
63775 }
63776 }
63777 vaddr = PKMAP_ADDR(last_pkmap_nr);
63778 +
63779 + pax_open_kernel();
63780 set_pte_at(&init_mm, vaddr,
63781 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
63782 -
63783 + pax_close_kernel();
63784 pkmap_count[last_pkmap_nr] = 1;
63785 set_page_address(page, (void *)vaddr);
63786
63787 diff -urNp linux-2.6.32.42/mm/hugetlb.c linux-2.6.32.42/mm/hugetlb.c
63788 --- linux-2.6.32.42/mm/hugetlb.c 2011-06-25 12:55:35.000000000 -0400
63789 +++ linux-2.6.32.42/mm/hugetlb.c 2011-06-25 12:56:37.000000000 -0400
63790 @@ -1925,6 +1925,26 @@ static int unmap_ref_private(struct mm_s
63791 return 1;
63792 }
63793
63794 +#ifdef CONFIG_PAX_SEGMEXEC
63795 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
63796 +{
63797 + struct mm_struct *mm = vma->vm_mm;
63798 + struct vm_area_struct *vma_m;
63799 + unsigned long address_m;
63800 + pte_t *ptep_m;
63801 +
63802 + vma_m = pax_find_mirror_vma(vma);
63803 + if (!vma_m)
63804 + return;
63805 +
63806 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63807 + address_m = address + SEGMEXEC_TASK_SIZE;
63808 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
63809 + get_page(page_m);
63810 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
63811 +}
63812 +#endif
63813 +
63814 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
63815 unsigned long address, pte_t *ptep, pte_t pte,
63816 struct page *pagecache_page)
63817 @@ -1996,6 +2016,11 @@ retry_avoidcopy:
63818 huge_ptep_clear_flush(vma, address, ptep);
63819 set_huge_pte_at(mm, address, ptep,
63820 make_huge_pte(vma, new_page, 1));
63821 +
63822 +#ifdef CONFIG_PAX_SEGMEXEC
63823 + pax_mirror_huge_pte(vma, address, new_page);
63824 +#endif
63825 +
63826 /* Make the old page be freed below */
63827 new_page = old_page;
63828 }
63829 @@ -2127,6 +2152,10 @@ retry:
63830 && (vma->vm_flags & VM_SHARED)));
63831 set_huge_pte_at(mm, address, ptep, new_pte);
63832
63833 +#ifdef CONFIG_PAX_SEGMEXEC
63834 + pax_mirror_huge_pte(vma, address, page);
63835 +#endif
63836 +
63837 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
63838 /* Optimization, do the COW without a second fault */
63839 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
63840 @@ -2155,6 +2184,28 @@ int hugetlb_fault(struct mm_struct *mm,
63841 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
63842 struct hstate *h = hstate_vma(vma);
63843
63844 +#ifdef CONFIG_PAX_SEGMEXEC
63845 + struct vm_area_struct *vma_m;
63846 +
63847 + vma_m = pax_find_mirror_vma(vma);
63848 + if (vma_m) {
63849 + unsigned long address_m;
63850 +
63851 + if (vma->vm_start > vma_m->vm_start) {
63852 + address_m = address;
63853 + address -= SEGMEXEC_TASK_SIZE;
63854 + vma = vma_m;
63855 + h = hstate_vma(vma);
63856 + } else
63857 + address_m = address + SEGMEXEC_TASK_SIZE;
63858 +
63859 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
63860 + return VM_FAULT_OOM;
63861 + address_m &= HPAGE_MASK;
63862 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
63863 + }
63864 +#endif
63865 +
63866 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
63867 if (!ptep)
63868 return VM_FAULT_OOM;
63869 diff -urNp linux-2.6.32.42/mm/Kconfig linux-2.6.32.42/mm/Kconfig
63870 --- linux-2.6.32.42/mm/Kconfig 2011-03-27 14:31:47.000000000 -0400
63871 +++ linux-2.6.32.42/mm/Kconfig 2011-04-17 15:56:46.000000000 -0400
63872 @@ -228,7 +228,7 @@ config KSM
63873 config DEFAULT_MMAP_MIN_ADDR
63874 int "Low address space to protect from user allocation"
63875 depends on MMU
63876 - default 4096
63877 + default 65536
63878 help
63879 This is the portion of low virtual memory which should be protected
63880 from userspace allocation. Keeping a user from writing to low pages
63881 diff -urNp linux-2.6.32.42/mm/kmemleak.c linux-2.6.32.42/mm/kmemleak.c
63882 --- linux-2.6.32.42/mm/kmemleak.c 2011-06-25 12:55:35.000000000 -0400
63883 +++ linux-2.6.32.42/mm/kmemleak.c 2011-06-25 12:56:37.000000000 -0400
63884 @@ -358,7 +358,7 @@ static void print_unreferenced(struct se
63885
63886 for (i = 0; i < object->trace_len; i++) {
63887 void *ptr = (void *)object->trace[i];
63888 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
63889 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
63890 }
63891 }
63892
63893 diff -urNp linux-2.6.32.42/mm/ksm.c linux-2.6.32.42/mm/ksm.c
63894 --- linux-2.6.32.42/mm/ksm.c 2011-03-27 14:31:47.000000000 -0400
63895 +++ linux-2.6.32.42/mm/ksm.c 2011-06-20 19:38:36.000000000 -0400
63896 @@ -1215,6 +1215,12 @@ static struct rmap_item *scan_get_next_r
63897 slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
63898 ksm_scan.mm_slot = slot;
63899 spin_unlock(&ksm_mmlist_lock);
63900 + /*
63901 + * Although we tested list_empty() above, a racing __ksm_exit
63902 + * of the last mm on the list may have removed it since then.
63903 + */
63904 + if (slot == &ksm_mm_head)
63905 + return NULL;
63906 next_mm:
63907 ksm_scan.address = 0;
63908 ksm_scan.rmap_item = list_entry(&slot->rmap_list,
63909 diff -urNp linux-2.6.32.42/mm/maccess.c linux-2.6.32.42/mm/maccess.c
63910 --- linux-2.6.32.42/mm/maccess.c 2011-03-27 14:31:47.000000000 -0400
63911 +++ linux-2.6.32.42/mm/maccess.c 2011-04-17 15:56:46.000000000 -0400
63912 @@ -14,7 +14,7 @@
63913 * Safely read from address @src to the buffer at @dst. If a kernel fault
63914 * happens, handle that and return -EFAULT.
63915 */
63916 -long probe_kernel_read(void *dst, void *src, size_t size)
63917 +long probe_kernel_read(void *dst, const void *src, size_t size)
63918 {
63919 long ret;
63920 mm_segment_t old_fs = get_fs();
63921 @@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
63922 * Safely write to address @dst from the buffer at @src. If a kernel fault
63923 * happens, handle that and return -EFAULT.
63924 */
63925 -long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
63926 +long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
63927 {
63928 long ret;
63929 mm_segment_t old_fs = get_fs();
63930 diff -urNp linux-2.6.32.42/mm/madvise.c linux-2.6.32.42/mm/madvise.c
63931 --- linux-2.6.32.42/mm/madvise.c 2011-03-27 14:31:47.000000000 -0400
63932 +++ linux-2.6.32.42/mm/madvise.c 2011-04-17 15:56:46.000000000 -0400
63933 @@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_a
63934 pgoff_t pgoff;
63935 unsigned long new_flags = vma->vm_flags;
63936
63937 +#ifdef CONFIG_PAX_SEGMEXEC
63938 + struct vm_area_struct *vma_m;
63939 +#endif
63940 +
63941 switch (behavior) {
63942 case MADV_NORMAL:
63943 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
63944 @@ -103,6 +107,13 @@ success:
63945 /*
63946 * vm_flags is protected by the mmap_sem held in write mode.
63947 */
63948 +
63949 +#ifdef CONFIG_PAX_SEGMEXEC
63950 + vma_m = pax_find_mirror_vma(vma);
63951 + if (vma_m)
63952 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
63953 +#endif
63954 +
63955 vma->vm_flags = new_flags;
63956
63957 out:
63958 @@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_a
63959 struct vm_area_struct ** prev,
63960 unsigned long start, unsigned long end)
63961 {
63962 +
63963 +#ifdef CONFIG_PAX_SEGMEXEC
63964 + struct vm_area_struct *vma_m;
63965 +#endif
63966 +
63967 *prev = vma;
63968 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
63969 return -EINVAL;
63970 @@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_a
63971 zap_page_range(vma, start, end - start, &details);
63972 } else
63973 zap_page_range(vma, start, end - start, NULL);
63974 +
63975 +#ifdef CONFIG_PAX_SEGMEXEC
63976 + vma_m = pax_find_mirror_vma(vma);
63977 + if (vma_m) {
63978 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
63979 + struct zap_details details = {
63980 + .nonlinear_vma = vma_m,
63981 + .last_index = ULONG_MAX,
63982 + };
63983 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
63984 + } else
63985 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
63986 + }
63987 +#endif
63988 +
63989 return 0;
63990 }
63991
63992 @@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
63993 if (end < start)
63994 goto out;
63995
63996 +#ifdef CONFIG_PAX_SEGMEXEC
63997 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
63998 + if (end > SEGMEXEC_TASK_SIZE)
63999 + goto out;
64000 + } else
64001 +#endif
64002 +
64003 + if (end > TASK_SIZE)
64004 + goto out;
64005 +
64006 error = 0;
64007 if (end == start)
64008 goto out;
64009 diff -urNp linux-2.6.32.42/mm/memory.c linux-2.6.32.42/mm/memory.c
64010 --- linux-2.6.32.42/mm/memory.c 2011-03-27 14:31:47.000000000 -0400
64011 +++ linux-2.6.32.42/mm/memory.c 2011-04-17 15:56:46.000000000 -0400
64012 @@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
64013 return;
64014
64015 pmd = pmd_offset(pud, start);
64016 +
64017 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
64018 pud_clear(pud);
64019 pmd_free_tlb(tlb, pmd, start);
64020 +#endif
64021 +
64022 }
64023
64024 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
64025 @@ -219,9 +223,12 @@ static inline void free_pud_range(struct
64026 if (end - 1 > ceiling - 1)
64027 return;
64028
64029 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
64030 pud = pud_offset(pgd, start);
64031 pgd_clear(pgd);
64032 pud_free_tlb(tlb, pud, start);
64033 +#endif
64034 +
64035 }
64036
64037 /*
64038 @@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct
64039 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
64040 i = 0;
64041
64042 - do {
64043 + while (nr_pages) {
64044 struct vm_area_struct *vma;
64045
64046 - vma = find_extend_vma(mm, start);
64047 + vma = find_vma(mm, start);
64048 if (!vma && in_gate_area(tsk, start)) {
64049 unsigned long pg = start & PAGE_MASK;
64050 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
64051 @@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct
64052 continue;
64053 }
64054
64055 - if (!vma ||
64056 + if (!vma || start < vma->vm_start ||
64057 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
64058 !(vm_flags & vma->vm_flags))
64059 return i ? : -EFAULT;
64060 @@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct
64061 start += PAGE_SIZE;
64062 nr_pages--;
64063 } while (nr_pages && start < vma->vm_end);
64064 - } while (nr_pages);
64065 + }
64066 return i;
64067 }
64068
64069 @@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_st
64070 page_add_file_rmap(page);
64071 set_pte_at(mm, addr, pte, mk_pte(page, prot));
64072
64073 +#ifdef CONFIG_PAX_SEGMEXEC
64074 + pax_mirror_file_pte(vma, addr, page, ptl);
64075 +#endif
64076 +
64077 retval = 0;
64078 pte_unmap_unlock(pte, ptl);
64079 return retval;
64080 @@ -1560,10 +1571,22 @@ out:
64081 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
64082 struct page *page)
64083 {
64084 +
64085 +#ifdef CONFIG_PAX_SEGMEXEC
64086 + struct vm_area_struct *vma_m;
64087 +#endif
64088 +
64089 if (addr < vma->vm_start || addr >= vma->vm_end)
64090 return -EFAULT;
64091 if (!page_count(page))
64092 return -EINVAL;
64093 +
64094 +#ifdef CONFIG_PAX_SEGMEXEC
64095 + vma_m = pax_find_mirror_vma(vma);
64096 + if (vma_m)
64097 + vma_m->vm_flags |= VM_INSERTPAGE;
64098 +#endif
64099 +
64100 vma->vm_flags |= VM_INSERTPAGE;
64101 return insert_page(vma, addr, page, vma->vm_page_prot);
64102 }
64103 @@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struc
64104 unsigned long pfn)
64105 {
64106 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
64107 + BUG_ON(vma->vm_mirror);
64108
64109 if (addr < vma->vm_start || addr >= vma->vm_end)
64110 return -EFAULT;
64111 @@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct
64112 copy_user_highpage(dst, src, va, vma);
64113 }
64114
64115 +#ifdef CONFIG_PAX_SEGMEXEC
64116 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
64117 +{
64118 + struct mm_struct *mm = vma->vm_mm;
64119 + spinlock_t *ptl;
64120 + pte_t *pte, entry;
64121 +
64122 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
64123 + entry = *pte;
64124 + if (!pte_present(entry)) {
64125 + if (!pte_none(entry)) {
64126 + BUG_ON(pte_file(entry));
64127 + free_swap_and_cache(pte_to_swp_entry(entry));
64128 + pte_clear_not_present_full(mm, address, pte, 0);
64129 + }
64130 + } else {
64131 + struct page *page;
64132 +
64133 + flush_cache_page(vma, address, pte_pfn(entry));
64134 + entry = ptep_clear_flush(vma, address, pte);
64135 + BUG_ON(pte_dirty(entry));
64136 + page = vm_normal_page(vma, address, entry);
64137 + if (page) {
64138 + update_hiwater_rss(mm);
64139 + if (PageAnon(page))
64140 + dec_mm_counter(mm, anon_rss);
64141 + else
64142 + dec_mm_counter(mm, file_rss);
64143 + page_remove_rmap(page);
64144 + page_cache_release(page);
64145 + }
64146 + }
64147 + pte_unmap_unlock(pte, ptl);
64148 +}
64149 +
64150 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
64151 + *
64152 + * the ptl of the lower mapped page is held on entry and is not released on exit
64153 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
64154 + */
64155 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
64156 +{
64157 + struct mm_struct *mm = vma->vm_mm;
64158 + unsigned long address_m;
64159 + spinlock_t *ptl_m;
64160 + struct vm_area_struct *vma_m;
64161 + pmd_t *pmd_m;
64162 + pte_t *pte_m, entry_m;
64163 +
64164 + BUG_ON(!page_m || !PageAnon(page_m));
64165 +
64166 + vma_m = pax_find_mirror_vma(vma);
64167 + if (!vma_m)
64168 + return;
64169 +
64170 + BUG_ON(!PageLocked(page_m));
64171 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
64172 + address_m = address + SEGMEXEC_TASK_SIZE;
64173 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
64174 + pte_m = pte_offset_map_nested(pmd_m, address_m);
64175 + ptl_m = pte_lockptr(mm, pmd_m);
64176 + if (ptl != ptl_m) {
64177 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
64178 + if (!pte_none(*pte_m))
64179 + goto out;
64180 + }
64181 +
64182 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
64183 + page_cache_get(page_m);
64184 + page_add_anon_rmap(page_m, vma_m, address_m);
64185 + inc_mm_counter(mm, anon_rss);
64186 + set_pte_at(mm, address_m, pte_m, entry_m);
64187 + update_mmu_cache(vma_m, address_m, entry_m);
64188 +out:
64189 + if (ptl != ptl_m)
64190 + spin_unlock(ptl_m);
64191 + pte_unmap_nested(pte_m);
64192 + unlock_page(page_m);
64193 +}
64194 +
64195 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
64196 +{
64197 + struct mm_struct *mm = vma->vm_mm;
64198 + unsigned long address_m;
64199 + spinlock_t *ptl_m;
64200 + struct vm_area_struct *vma_m;
64201 + pmd_t *pmd_m;
64202 + pte_t *pte_m, entry_m;
64203 +
64204 + BUG_ON(!page_m || PageAnon(page_m));
64205 +
64206 + vma_m = pax_find_mirror_vma(vma);
64207 + if (!vma_m)
64208 + return;
64209 +
64210 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
64211 + address_m = address + SEGMEXEC_TASK_SIZE;
64212 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
64213 + pte_m = pte_offset_map_nested(pmd_m, address_m);
64214 + ptl_m = pte_lockptr(mm, pmd_m);
64215 + if (ptl != ptl_m) {
64216 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
64217 + if (!pte_none(*pte_m))
64218 + goto out;
64219 + }
64220 +
64221 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
64222 + page_cache_get(page_m);
64223 + page_add_file_rmap(page_m);
64224 + inc_mm_counter(mm, file_rss);
64225 + set_pte_at(mm, address_m, pte_m, entry_m);
64226 + update_mmu_cache(vma_m, address_m, entry_m);
64227 +out:
64228 + if (ptl != ptl_m)
64229 + spin_unlock(ptl_m);
64230 + pte_unmap_nested(pte_m);
64231 +}
64232 +
64233 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
64234 +{
64235 + struct mm_struct *mm = vma->vm_mm;
64236 + unsigned long address_m;
64237 + spinlock_t *ptl_m;
64238 + struct vm_area_struct *vma_m;
64239 + pmd_t *pmd_m;
64240 + pte_t *pte_m, entry_m;
64241 +
64242 + vma_m = pax_find_mirror_vma(vma);
64243 + if (!vma_m)
64244 + return;
64245 +
64246 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
64247 + address_m = address + SEGMEXEC_TASK_SIZE;
64248 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
64249 + pte_m = pte_offset_map_nested(pmd_m, address_m);
64250 + ptl_m = pte_lockptr(mm, pmd_m);
64251 + if (ptl != ptl_m) {
64252 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
64253 + if (!pte_none(*pte_m))
64254 + goto out;
64255 + }
64256 +
64257 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
64258 + set_pte_at(mm, address_m, pte_m, entry_m);
64259 +out:
64260 + if (ptl != ptl_m)
64261 + spin_unlock(ptl_m);
64262 + pte_unmap_nested(pte_m);
64263 +}
64264 +
64265 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
64266 +{
64267 + struct page *page_m;
64268 + pte_t entry;
64269 +
64270 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
64271 + goto out;
64272 +
64273 + entry = *pte;
64274 + page_m = vm_normal_page(vma, address, entry);
64275 + if (!page_m)
64276 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
64277 + else if (PageAnon(page_m)) {
64278 + if (pax_find_mirror_vma(vma)) {
64279 + pte_unmap_unlock(pte, ptl);
64280 + lock_page(page_m);
64281 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
64282 + if (pte_same(entry, *pte))
64283 + pax_mirror_anon_pte(vma, address, page_m, ptl);
64284 + else
64285 + unlock_page(page_m);
64286 + }
64287 + } else
64288 + pax_mirror_file_pte(vma, address, page_m, ptl);
64289 +
64290 +out:
64291 + pte_unmap_unlock(pte, ptl);
64292 +}
64293 +#endif
64294 +
64295 /*
64296 * This routine handles present pages, when users try to write
64297 * to a shared page. It is done by copying the page to a new address
64298 @@ -2156,6 +2360,12 @@ gotten:
64299 */
64300 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
64301 if (likely(pte_same(*page_table, orig_pte))) {
64302 +
64303 +#ifdef CONFIG_PAX_SEGMEXEC
64304 + if (pax_find_mirror_vma(vma))
64305 + BUG_ON(!trylock_page(new_page));
64306 +#endif
64307 +
64308 if (old_page) {
64309 if (!PageAnon(old_page)) {
64310 dec_mm_counter(mm, file_rss);
64311 @@ -2207,6 +2417,10 @@ gotten:
64312 page_remove_rmap(old_page);
64313 }
64314
64315 +#ifdef CONFIG_PAX_SEGMEXEC
64316 + pax_mirror_anon_pte(vma, address, new_page, ptl);
64317 +#endif
64318 +
64319 /* Free the old page.. */
64320 new_page = old_page;
64321 ret |= VM_FAULT_WRITE;
64322 @@ -2604,6 +2818,11 @@ static int do_swap_page(struct mm_struct
64323 swap_free(entry);
64324 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
64325 try_to_free_swap(page);
64326 +
64327 +#ifdef CONFIG_PAX_SEGMEXEC
64328 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
64329 +#endif
64330 +
64331 unlock_page(page);
64332
64333 if (flags & FAULT_FLAG_WRITE) {
64334 @@ -2615,6 +2834,11 @@ static int do_swap_page(struct mm_struct
64335
64336 /* No need to invalidate - it was non-present before */
64337 update_mmu_cache(vma, address, pte);
64338 +
64339 +#ifdef CONFIG_PAX_SEGMEXEC
64340 + pax_mirror_anon_pte(vma, address, page, ptl);
64341 +#endif
64342 +
64343 unlock:
64344 pte_unmap_unlock(page_table, ptl);
64345 out:
64346 @@ -2630,40 +2854,6 @@ out_release:
64347 }
64348
64349 /*
64350 - * This is like a special single-page "expand_{down|up}wards()",
64351 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
64352 - * doesn't hit another vma.
64353 - */
64354 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
64355 -{
64356 - address &= PAGE_MASK;
64357 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
64358 - struct vm_area_struct *prev = vma->vm_prev;
64359 -
64360 - /*
64361 - * Is there a mapping abutting this one below?
64362 - *
64363 - * That's only ok if it's the same stack mapping
64364 - * that has gotten split..
64365 - */
64366 - if (prev && prev->vm_end == address)
64367 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
64368 -
64369 - expand_stack(vma, address - PAGE_SIZE);
64370 - }
64371 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
64372 - struct vm_area_struct *next = vma->vm_next;
64373 -
64374 - /* As VM_GROWSDOWN but s/below/above/ */
64375 - if (next && next->vm_start == address + PAGE_SIZE)
64376 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
64377 -
64378 - expand_upwards(vma, address + PAGE_SIZE);
64379 - }
64380 - return 0;
64381 -}
64382 -
64383 -/*
64384 * We enter with non-exclusive mmap_sem (to exclude vma changes,
64385 * but allow concurrent faults), and pte mapped but not yet locked.
64386 * We return with mmap_sem still held, but pte unmapped and unlocked.
64387 @@ -2672,27 +2862,23 @@ static int do_anonymous_page(struct mm_s
64388 unsigned long address, pte_t *page_table, pmd_t *pmd,
64389 unsigned int flags)
64390 {
64391 - struct page *page;
64392 + struct page *page = NULL;
64393 spinlock_t *ptl;
64394 pte_t entry;
64395
64396 - pte_unmap(page_table);
64397 -
64398 - /* Check if we need to add a guard page to the stack */
64399 - if (check_stack_guard_page(vma, address) < 0)
64400 - return VM_FAULT_SIGBUS;
64401 -
64402 - /* Use the zero-page for reads */
64403 if (!(flags & FAULT_FLAG_WRITE)) {
64404 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
64405 vma->vm_page_prot));
64406 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
64407 + ptl = pte_lockptr(mm, pmd);
64408 + spin_lock(ptl);
64409 if (!pte_none(*page_table))
64410 goto unlock;
64411 goto setpte;
64412 }
64413
64414 /* Allocate our own private page. */
64415 + pte_unmap(page_table);
64416 +
64417 if (unlikely(anon_vma_prepare(vma)))
64418 goto oom;
64419 page = alloc_zeroed_user_highpage_movable(vma, address);
64420 @@ -2711,6 +2897,11 @@ static int do_anonymous_page(struct mm_s
64421 if (!pte_none(*page_table))
64422 goto release;
64423
64424 +#ifdef CONFIG_PAX_SEGMEXEC
64425 + if (pax_find_mirror_vma(vma))
64426 + BUG_ON(!trylock_page(page));
64427 +#endif
64428 +
64429 inc_mm_counter(mm, anon_rss);
64430 page_add_new_anon_rmap(page, vma, address);
64431 setpte:
64432 @@ -2718,6 +2909,12 @@ setpte:
64433
64434 /* No need to invalidate - it was non-present before */
64435 update_mmu_cache(vma, address, entry);
64436 +
64437 +#ifdef CONFIG_PAX_SEGMEXEC
64438 + if (page)
64439 + pax_mirror_anon_pte(vma, address, page, ptl);
64440 +#endif
64441 +
64442 unlock:
64443 pte_unmap_unlock(page_table, ptl);
64444 return 0;
64445 @@ -2860,6 +3057,12 @@ static int __do_fault(struct mm_struct *
64446 */
64447 /* Only go through if we didn't race with anybody else... */
64448 if (likely(pte_same(*page_table, orig_pte))) {
64449 +
64450 +#ifdef CONFIG_PAX_SEGMEXEC
64451 + if (anon && pax_find_mirror_vma(vma))
64452 + BUG_ON(!trylock_page(page));
64453 +#endif
64454 +
64455 flush_icache_page(vma, page);
64456 entry = mk_pte(page, vma->vm_page_prot);
64457 if (flags & FAULT_FLAG_WRITE)
64458 @@ -2879,6 +3082,14 @@ static int __do_fault(struct mm_struct *
64459
64460 /* no need to invalidate: a not-present page won't be cached */
64461 update_mmu_cache(vma, address, entry);
64462 +
64463 +#ifdef CONFIG_PAX_SEGMEXEC
64464 + if (anon)
64465 + pax_mirror_anon_pte(vma, address, page, ptl);
64466 + else
64467 + pax_mirror_file_pte(vma, address, page, ptl);
64468 +#endif
64469 +
64470 } else {
64471 if (charged)
64472 mem_cgroup_uncharge_page(page);
64473 @@ -3026,6 +3237,12 @@ static inline int handle_pte_fault(struc
64474 if (flags & FAULT_FLAG_WRITE)
64475 flush_tlb_page(vma, address);
64476 }
64477 +
64478 +#ifdef CONFIG_PAX_SEGMEXEC
64479 + pax_mirror_pte(vma, address, pte, pmd, ptl);
64480 + return 0;
64481 +#endif
64482 +
64483 unlock:
64484 pte_unmap_unlock(pte, ptl);
64485 return 0;
64486 @@ -3042,6 +3259,10 @@ int handle_mm_fault(struct mm_struct *mm
64487 pmd_t *pmd;
64488 pte_t *pte;
64489
64490 +#ifdef CONFIG_PAX_SEGMEXEC
64491 + struct vm_area_struct *vma_m;
64492 +#endif
64493 +
64494 __set_current_state(TASK_RUNNING);
64495
64496 count_vm_event(PGFAULT);
64497 @@ -3049,6 +3270,34 @@ int handle_mm_fault(struct mm_struct *mm
64498 if (unlikely(is_vm_hugetlb_page(vma)))
64499 return hugetlb_fault(mm, vma, address, flags);
64500
64501 +#ifdef CONFIG_PAX_SEGMEXEC
64502 + vma_m = pax_find_mirror_vma(vma);
64503 + if (vma_m) {
64504 + unsigned long address_m;
64505 + pgd_t *pgd_m;
64506 + pud_t *pud_m;
64507 + pmd_t *pmd_m;
64508 +
64509 + if (vma->vm_start > vma_m->vm_start) {
64510 + address_m = address;
64511 + address -= SEGMEXEC_TASK_SIZE;
64512 + vma = vma_m;
64513 + } else
64514 + address_m = address + SEGMEXEC_TASK_SIZE;
64515 +
64516 + pgd_m = pgd_offset(mm, address_m);
64517 + pud_m = pud_alloc(mm, pgd_m, address_m);
64518 + if (!pud_m)
64519 + return VM_FAULT_OOM;
64520 + pmd_m = pmd_alloc(mm, pud_m, address_m);
64521 + if (!pmd_m)
64522 + return VM_FAULT_OOM;
64523 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
64524 + return VM_FAULT_OOM;
64525 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
64526 + }
64527 +#endif
64528 +
64529 pgd = pgd_offset(mm, address);
64530 pud = pud_alloc(mm, pgd, address);
64531 if (!pud)
64532 @@ -3146,7 +3395,7 @@ static int __init gate_vma_init(void)
64533 gate_vma.vm_start = FIXADDR_USER_START;
64534 gate_vma.vm_end = FIXADDR_USER_END;
64535 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
64536 - gate_vma.vm_page_prot = __P101;
64537 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
64538 /*
64539 * Make sure the vDSO gets into every core dump.
64540 * Dumping its contents makes post-mortem fully interpretable later
64541 diff -urNp linux-2.6.32.42/mm/memory-failure.c linux-2.6.32.42/mm/memory-failure.c
64542 --- linux-2.6.32.42/mm/memory-failure.c 2011-03-27 14:31:47.000000000 -0400
64543 +++ linux-2.6.32.42/mm/memory-failure.c 2011-04-17 15:56:46.000000000 -0400
64544 @@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __r
64545
64546 int sysctl_memory_failure_recovery __read_mostly = 1;
64547
64548 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64549 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64550
64551 /*
64552 * Send all the processes who have the page mapped an ``action optional''
64553 @@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn,
64554 return 0;
64555 }
64556
64557 - atomic_long_add(1, &mce_bad_pages);
64558 + atomic_long_add_unchecked(1, &mce_bad_pages);
64559
64560 /*
64561 * We need/can do nothing about count=0 pages.
64562 diff -urNp linux-2.6.32.42/mm/mempolicy.c linux-2.6.32.42/mm/mempolicy.c
64563 --- linux-2.6.32.42/mm/mempolicy.c 2011-03-27 14:31:47.000000000 -0400
64564 +++ linux-2.6.32.42/mm/mempolicy.c 2011-04-17 15:56:46.000000000 -0400
64565 @@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_st
64566 struct vm_area_struct *next;
64567 int err;
64568
64569 +#ifdef CONFIG_PAX_SEGMEXEC
64570 + struct vm_area_struct *vma_m;
64571 +#endif
64572 +
64573 err = 0;
64574 for (; vma && vma->vm_start < end; vma = next) {
64575 next = vma->vm_next;
64576 @@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_st
64577 err = policy_vma(vma, new);
64578 if (err)
64579 break;
64580 +
64581 +#ifdef CONFIG_PAX_SEGMEXEC
64582 + vma_m = pax_find_mirror_vma(vma);
64583 + if (vma_m) {
64584 + err = policy_vma(vma_m, new);
64585 + if (err)
64586 + break;
64587 + }
64588 +#endif
64589 +
64590 }
64591 return err;
64592 }
64593 @@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start
64594
64595 if (end < start)
64596 return -EINVAL;
64597 +
64598 +#ifdef CONFIG_PAX_SEGMEXEC
64599 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
64600 + if (end > SEGMEXEC_TASK_SIZE)
64601 + return -EINVAL;
64602 + } else
64603 +#endif
64604 +
64605 + if (end > TASK_SIZE)
64606 + return -EINVAL;
64607 +
64608 if (end == start)
64609 return 0;
64610
64611 @@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
64612 if (!mm)
64613 return -EINVAL;
64614
64615 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64616 + if (mm != current->mm &&
64617 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
64618 + err = -EPERM;
64619 + goto out;
64620 + }
64621 +#endif
64622 +
64623 /*
64624 * Check if this process has the right to modify the specified
64625 * process. The right exists if the process has administrative
64626 @@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
64627 rcu_read_lock();
64628 tcred = __task_cred(task);
64629 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
64630 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
64631 - !capable(CAP_SYS_NICE)) {
64632 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
64633 rcu_read_unlock();
64634 err = -EPERM;
64635 goto out;
64636 @@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, vo
64637
64638 if (file) {
64639 seq_printf(m, " file=");
64640 - seq_path(m, &file->f_path, "\n\t= ");
64641 + seq_path(m, &file->f_path, "\n\t\\= ");
64642 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
64643 seq_printf(m, " heap");
64644 } else if (vma->vm_start <= mm->start_stack &&
64645 diff -urNp linux-2.6.32.42/mm/migrate.c linux-2.6.32.42/mm/migrate.c
64646 --- linux-2.6.32.42/mm/migrate.c 2011-03-27 14:31:47.000000000 -0400
64647 +++ linux-2.6.32.42/mm/migrate.c 2011-05-16 21:46:57.000000000 -0400
64648 @@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struc
64649 unsigned long chunk_start;
64650 int err;
64651
64652 + pax_track_stack();
64653 +
64654 task_nodes = cpuset_mems_allowed(task);
64655
64656 err = -ENOMEM;
64657 @@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
64658 if (!mm)
64659 return -EINVAL;
64660
64661 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64662 + if (mm != current->mm &&
64663 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
64664 + err = -EPERM;
64665 + goto out;
64666 + }
64667 +#endif
64668 +
64669 /*
64670 * Check if this process has the right to modify the specified
64671 * process. The right exists if the process has administrative
64672 @@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
64673 rcu_read_lock();
64674 tcred = __task_cred(task);
64675 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
64676 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
64677 - !capable(CAP_SYS_NICE)) {
64678 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
64679 rcu_read_unlock();
64680 err = -EPERM;
64681 goto out;
64682 diff -urNp linux-2.6.32.42/mm/mlock.c linux-2.6.32.42/mm/mlock.c
64683 --- linux-2.6.32.42/mm/mlock.c 2011-03-27 14:31:47.000000000 -0400
64684 +++ linux-2.6.32.42/mm/mlock.c 2011-04-17 15:56:46.000000000 -0400
64685 @@ -13,6 +13,7 @@
64686 #include <linux/pagemap.h>
64687 #include <linux/mempolicy.h>
64688 #include <linux/syscalls.h>
64689 +#include <linux/security.h>
64690 #include <linux/sched.h>
64691 #include <linux/module.h>
64692 #include <linux/rmap.h>
64693 @@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
64694 }
64695 }
64696
64697 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
64698 -{
64699 - return (vma->vm_flags & VM_GROWSDOWN) &&
64700 - (vma->vm_start == addr) &&
64701 - !vma_stack_continue(vma->vm_prev, addr);
64702 -}
64703 -
64704 /**
64705 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
64706 * @vma: target vma
64707 @@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(stru
64708 if (vma->vm_flags & VM_WRITE)
64709 gup_flags |= FOLL_WRITE;
64710
64711 - /* We don't try to access the guard page of a stack vma */
64712 - if (stack_guard_page(vma, start)) {
64713 - addr += PAGE_SIZE;
64714 - nr_pages--;
64715 - }
64716 -
64717 while (nr_pages > 0) {
64718 int i;
64719
64720 @@ -440,7 +428,7 @@ static int do_mlock(unsigned long start,
64721 {
64722 unsigned long nstart, end, tmp;
64723 struct vm_area_struct * vma, * prev;
64724 - int error;
64725 + int error = -EINVAL;
64726
64727 len = PAGE_ALIGN(len);
64728 end = start + len;
64729 @@ -448,6 +436,9 @@ static int do_mlock(unsigned long start,
64730 return -EINVAL;
64731 if (end == start)
64732 return 0;
64733 + if (end > TASK_SIZE)
64734 + return -EINVAL;
64735 +
64736 vma = find_vma_prev(current->mm, start, &prev);
64737 if (!vma || vma->vm_start > start)
64738 return -ENOMEM;
64739 @@ -458,6 +449,11 @@ static int do_mlock(unsigned long start,
64740 for (nstart = start ; ; ) {
64741 unsigned int newflags;
64742
64743 +#ifdef CONFIG_PAX_SEGMEXEC
64744 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
64745 + break;
64746 +#endif
64747 +
64748 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
64749
64750 newflags = vma->vm_flags | VM_LOCKED;
64751 @@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
64752 lock_limit >>= PAGE_SHIFT;
64753
64754 /* check against resource limits */
64755 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
64756 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
64757 error = do_mlock(start, len, 1);
64758 up_write(&current->mm->mmap_sem);
64759 @@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
64760 static int do_mlockall(int flags)
64761 {
64762 struct vm_area_struct * vma, * prev = NULL;
64763 - unsigned int def_flags = 0;
64764
64765 if (flags & MCL_FUTURE)
64766 - def_flags = VM_LOCKED;
64767 - current->mm->def_flags = def_flags;
64768 + current->mm->def_flags |= VM_LOCKED;
64769 + else
64770 + current->mm->def_flags &= ~VM_LOCKED;
64771 if (flags == MCL_FUTURE)
64772 goto out;
64773
64774 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
64775 - unsigned int newflags;
64776 + unsigned long newflags;
64777 +
64778 +#ifdef CONFIG_PAX_SEGMEXEC
64779 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
64780 + break;
64781 +#endif
64782
64783 + BUG_ON(vma->vm_end > TASK_SIZE);
64784 newflags = vma->vm_flags | VM_LOCKED;
64785 if (!(flags & MCL_CURRENT))
64786 newflags &= ~VM_LOCKED;
64787 @@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
64788 lock_limit >>= PAGE_SHIFT;
64789
64790 ret = -ENOMEM;
64791 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
64792 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
64793 capable(CAP_IPC_LOCK))
64794 ret = do_mlockall(flags);
64795 diff -urNp linux-2.6.32.42/mm/mmap.c linux-2.6.32.42/mm/mmap.c
64796 --- linux-2.6.32.42/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
64797 +++ linux-2.6.32.42/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
64798 @@ -45,6 +45,16 @@
64799 #define arch_rebalance_pgtables(addr, len) (addr)
64800 #endif
64801
64802 +static inline void verify_mm_writelocked(struct mm_struct *mm)
64803 +{
64804 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
64805 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
64806 + up_read(&mm->mmap_sem);
64807 + BUG();
64808 + }
64809 +#endif
64810 +}
64811 +
64812 static void unmap_region(struct mm_struct *mm,
64813 struct vm_area_struct *vma, struct vm_area_struct *prev,
64814 unsigned long start, unsigned long end);
64815 @@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
64816 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
64817 *
64818 */
64819 -pgprot_t protection_map[16] = {
64820 +pgprot_t protection_map[16] __read_only = {
64821 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
64822 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
64823 };
64824
64825 pgprot_t vm_get_page_prot(unsigned long vm_flags)
64826 {
64827 - return __pgprot(pgprot_val(protection_map[vm_flags &
64828 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
64829 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
64830 pgprot_val(arch_vm_get_page_prot(vm_flags)));
64831 +
64832 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64833 + if (!nx_enabled &&
64834 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
64835 + (vm_flags & (VM_READ | VM_WRITE)))
64836 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
64837 +#endif
64838 +
64839 + return prot;
64840 }
64841 EXPORT_SYMBOL(vm_get_page_prot);
64842
64843 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
64844 int sysctl_overcommit_ratio = 50; /* default is 50% */
64845 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
64846 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
64847 struct percpu_counter vm_committed_as;
64848
64849 /*
64850 @@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
64851 struct vm_area_struct *next = vma->vm_next;
64852
64853 might_sleep();
64854 + BUG_ON(vma->vm_mirror);
64855 if (vma->vm_ops && vma->vm_ops->close)
64856 vma->vm_ops->close(vma);
64857 if (vma->vm_file) {
64858 @@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
64859 * not page aligned -Ram Gupta
64860 */
64861 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
64862 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
64863 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
64864 (mm->end_data - mm->start_data) > rlim)
64865 goto out;
64866 @@ -704,6 +726,12 @@ static int
64867 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
64868 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
64869 {
64870 +
64871 +#ifdef CONFIG_PAX_SEGMEXEC
64872 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
64873 + return 0;
64874 +#endif
64875 +
64876 if (is_mergeable_vma(vma, file, vm_flags) &&
64877 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
64878 if (vma->vm_pgoff == vm_pgoff)
64879 @@ -723,6 +751,12 @@ static int
64880 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
64881 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
64882 {
64883 +
64884 +#ifdef CONFIG_PAX_SEGMEXEC
64885 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
64886 + return 0;
64887 +#endif
64888 +
64889 if (is_mergeable_vma(vma, file, vm_flags) &&
64890 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
64891 pgoff_t vm_pglen;
64892 @@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
64893 struct vm_area_struct *vma_merge(struct mm_struct *mm,
64894 struct vm_area_struct *prev, unsigned long addr,
64895 unsigned long end, unsigned long vm_flags,
64896 - struct anon_vma *anon_vma, struct file *file,
64897 + struct anon_vma *anon_vma, struct file *file,
64898 pgoff_t pgoff, struct mempolicy *policy)
64899 {
64900 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
64901 struct vm_area_struct *area, *next;
64902
64903 +#ifdef CONFIG_PAX_SEGMEXEC
64904 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
64905 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
64906 +
64907 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
64908 +#endif
64909 +
64910 /*
64911 * We later require that vma->vm_flags == vm_flags,
64912 * so this tests vma->vm_flags & VM_SPECIAL, too.
64913 @@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
64914 if (next && next->vm_end == end) /* cases 6, 7, 8 */
64915 next = next->vm_next;
64916
64917 +#ifdef CONFIG_PAX_SEGMEXEC
64918 + if (prev)
64919 + prev_m = pax_find_mirror_vma(prev);
64920 + if (area)
64921 + area_m = pax_find_mirror_vma(area);
64922 + if (next)
64923 + next_m = pax_find_mirror_vma(next);
64924 +#endif
64925 +
64926 /*
64927 * Can it merge with the predecessor?
64928 */
64929 @@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
64930 /* cases 1, 6 */
64931 vma_adjust(prev, prev->vm_start,
64932 next->vm_end, prev->vm_pgoff, NULL);
64933 - } else /* cases 2, 5, 7 */
64934 +
64935 +#ifdef CONFIG_PAX_SEGMEXEC
64936 + if (prev_m)
64937 + vma_adjust(prev_m, prev_m->vm_start,
64938 + next_m->vm_end, prev_m->vm_pgoff, NULL);
64939 +#endif
64940 +
64941 + } else { /* cases 2, 5, 7 */
64942 vma_adjust(prev, prev->vm_start,
64943 end, prev->vm_pgoff, NULL);
64944 +
64945 +#ifdef CONFIG_PAX_SEGMEXEC
64946 + if (prev_m)
64947 + vma_adjust(prev_m, prev_m->vm_start,
64948 + end_m, prev_m->vm_pgoff, NULL);
64949 +#endif
64950 +
64951 + }
64952 return prev;
64953 }
64954
64955 @@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
64956 mpol_equal(policy, vma_policy(next)) &&
64957 can_vma_merge_before(next, vm_flags,
64958 anon_vma, file, pgoff+pglen)) {
64959 - if (prev && addr < prev->vm_end) /* case 4 */
64960 + if (prev && addr < prev->vm_end) { /* case 4 */
64961 vma_adjust(prev, prev->vm_start,
64962 addr, prev->vm_pgoff, NULL);
64963 - else /* cases 3, 8 */
64964 +
64965 +#ifdef CONFIG_PAX_SEGMEXEC
64966 + if (prev_m)
64967 + vma_adjust(prev_m, prev_m->vm_start,
64968 + addr_m, prev_m->vm_pgoff, NULL);
64969 +#endif
64970 +
64971 + } else { /* cases 3, 8 */
64972 vma_adjust(area, addr, next->vm_end,
64973 next->vm_pgoff - pglen, NULL);
64974 +
64975 +#ifdef CONFIG_PAX_SEGMEXEC
64976 + if (area_m)
64977 + vma_adjust(area_m, addr_m, next_m->vm_end,
64978 + next_m->vm_pgoff - pglen, NULL);
64979 +#endif
64980 +
64981 + }
64982 return area;
64983 }
64984
64985 @@ -898,14 +978,11 @@ none:
64986 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
64987 struct file *file, long pages)
64988 {
64989 - const unsigned long stack_flags
64990 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
64991 -
64992 if (file) {
64993 mm->shared_vm += pages;
64994 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
64995 mm->exec_vm += pages;
64996 - } else if (flags & stack_flags)
64997 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
64998 mm->stack_vm += pages;
64999 if (flags & (VM_RESERVED|VM_IO))
65000 mm->reserved_vm += pages;
65001 @@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
65002 * (the exception is when the underlying filesystem is noexec
65003 * mounted, in which case we dont add PROT_EXEC.)
65004 */
65005 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
65006 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
65007 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
65008 prot |= PROT_EXEC;
65009
65010 @@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
65011 /* Obtain the address to map to. we verify (or select) it and ensure
65012 * that it represents a valid section of the address space.
65013 */
65014 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
65015 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
65016 if (addr & ~PAGE_MASK)
65017 return addr;
65018
65019 @@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file
65020 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
65021 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
65022
65023 +#ifdef CONFIG_PAX_MPROTECT
65024 + if (mm->pax_flags & MF_PAX_MPROTECT) {
65025 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
65026 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
65027 + gr_log_rwxmmap(file);
65028 +
65029 +#ifdef CONFIG_PAX_EMUPLT
65030 + vm_flags &= ~VM_EXEC;
65031 +#else
65032 + return -EPERM;
65033 +#endif
65034 +
65035 + }
65036 +
65037 + if (!(vm_flags & VM_EXEC))
65038 + vm_flags &= ~VM_MAYEXEC;
65039 +#else
65040 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
65041 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
65042 +#endif
65043 + else
65044 + vm_flags &= ~VM_MAYWRITE;
65045 + }
65046 +#endif
65047 +
65048 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
65049 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
65050 + vm_flags &= ~VM_PAGEEXEC;
65051 +#endif
65052 +
65053 if (flags & MAP_LOCKED)
65054 if (!can_do_mlock())
65055 return -EPERM;
65056 @@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file
65057 locked += mm->locked_vm;
65058 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
65059 lock_limit >>= PAGE_SHIFT;
65060 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
65061 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
65062 return -EAGAIN;
65063 }
65064 @@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file
65065 if (error)
65066 return error;
65067
65068 + if (!gr_acl_handle_mmap(file, prot))
65069 + return -EACCES;
65070 +
65071 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
65072 }
65073 EXPORT_SYMBOL(do_mmap_pgoff);
65074 @@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
65075 */
65076 int vma_wants_writenotify(struct vm_area_struct *vma)
65077 {
65078 - unsigned int vm_flags = vma->vm_flags;
65079 + unsigned long vm_flags = vma->vm_flags;
65080
65081 /* If it was private or non-writable, the write bit is already clear */
65082 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
65083 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
65084 return 0;
65085
65086 /* The backer wishes to know when pages are first written to? */
65087 @@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *f
65088 unsigned long charged = 0;
65089 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
65090
65091 +#ifdef CONFIG_PAX_SEGMEXEC
65092 + struct vm_area_struct *vma_m = NULL;
65093 +#endif
65094 +
65095 + /*
65096 + * mm->mmap_sem is required to protect against another thread
65097 + * changing the mappings in case we sleep.
65098 + */
65099 + verify_mm_writelocked(mm);
65100 +
65101 /* Clear old maps */
65102 error = -ENOMEM;
65103 -munmap_back:
65104 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65105 if (vma && vma->vm_start < addr + len) {
65106 if (do_munmap(mm, addr, len))
65107 return -ENOMEM;
65108 - goto munmap_back;
65109 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65110 + BUG_ON(vma && vma->vm_start < addr + len);
65111 }
65112
65113 /* Check against address space limit. */
65114 @@ -1173,6 +1294,16 @@ munmap_back:
65115 goto unacct_error;
65116 }
65117
65118 +#ifdef CONFIG_PAX_SEGMEXEC
65119 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
65120 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65121 + if (!vma_m) {
65122 + error = -ENOMEM;
65123 + goto free_vma;
65124 + }
65125 + }
65126 +#endif
65127 +
65128 vma->vm_mm = mm;
65129 vma->vm_start = addr;
65130 vma->vm_end = addr + len;
65131 @@ -1195,6 +1326,19 @@ munmap_back:
65132 error = file->f_op->mmap(file, vma);
65133 if (error)
65134 goto unmap_and_free_vma;
65135 +
65136 +#ifdef CONFIG_PAX_SEGMEXEC
65137 + if (vma_m && (vm_flags & VM_EXECUTABLE))
65138 + added_exe_file_vma(mm);
65139 +#endif
65140 +
65141 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
65142 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
65143 + vma->vm_flags |= VM_PAGEEXEC;
65144 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
65145 + }
65146 +#endif
65147 +
65148 if (vm_flags & VM_EXECUTABLE)
65149 added_exe_file_vma(mm);
65150
65151 @@ -1218,6 +1362,11 @@ munmap_back:
65152 vma_link(mm, vma, prev, rb_link, rb_parent);
65153 file = vma->vm_file;
65154
65155 +#ifdef CONFIG_PAX_SEGMEXEC
65156 + if (vma_m)
65157 + pax_mirror_vma(vma_m, vma);
65158 +#endif
65159 +
65160 /* Once vma denies write, undo our temporary denial count */
65161 if (correct_wcount)
65162 atomic_inc(&inode->i_writecount);
65163 @@ -1226,6 +1375,7 @@ out:
65164
65165 mm->total_vm += len >> PAGE_SHIFT;
65166 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
65167 + track_exec_limit(mm, addr, addr + len, vm_flags);
65168 if (vm_flags & VM_LOCKED) {
65169 /*
65170 * makes pages present; downgrades, drops, reacquires mmap_sem
65171 @@ -1248,6 +1398,12 @@ unmap_and_free_vma:
65172 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
65173 charged = 0;
65174 free_vma:
65175 +
65176 +#ifdef CONFIG_PAX_SEGMEXEC
65177 + if (vma_m)
65178 + kmem_cache_free(vm_area_cachep, vma_m);
65179 +#endif
65180 +
65181 kmem_cache_free(vm_area_cachep, vma);
65182 unacct_error:
65183 if (charged)
65184 @@ -1255,6 +1411,44 @@ unacct_error:
65185 return error;
65186 }
65187
65188 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
65189 +{
65190 + if (!vma) {
65191 +#ifdef CONFIG_STACK_GROWSUP
65192 + if (addr > sysctl_heap_stack_gap)
65193 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
65194 + else
65195 + vma = find_vma(current->mm, 0);
65196 + if (vma && (vma->vm_flags & VM_GROWSUP))
65197 + return false;
65198 +#endif
65199 + return true;
65200 + }
65201 +
65202 + if (addr + len > vma->vm_start)
65203 + return false;
65204 +
65205 + if (vma->vm_flags & VM_GROWSDOWN)
65206 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
65207 +#ifdef CONFIG_STACK_GROWSUP
65208 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
65209 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
65210 +#endif
65211 +
65212 + return true;
65213 +}
65214 +
65215 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
65216 +{
65217 + if (vma->vm_start < len)
65218 + return -ENOMEM;
65219 + if (!(vma->vm_flags & VM_GROWSDOWN))
65220 + return vma->vm_start - len;
65221 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
65222 + return vma->vm_start - len - sysctl_heap_stack_gap;
65223 + return -ENOMEM;
65224 +}
65225 +
65226 /* Get an address range which is currently unmapped.
65227 * For shmat() with addr=0.
65228 *
65229 @@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp
65230 if (flags & MAP_FIXED)
65231 return addr;
65232
65233 +#ifdef CONFIG_PAX_RANDMMAP
65234 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
65235 +#endif
65236 +
65237 if (addr) {
65238 addr = PAGE_ALIGN(addr);
65239 - vma = find_vma(mm, addr);
65240 - if (TASK_SIZE - len >= addr &&
65241 - (!vma || addr + len <= vma->vm_start))
65242 - return addr;
65243 + if (TASK_SIZE - len >= addr) {
65244 + vma = find_vma(mm, addr);
65245 + if (check_heap_stack_gap(vma, addr, len))
65246 + return addr;
65247 + }
65248 }
65249 if (len > mm->cached_hole_size) {
65250 - start_addr = addr = mm->free_area_cache;
65251 + start_addr = addr = mm->free_area_cache;
65252 } else {
65253 - start_addr = addr = TASK_UNMAPPED_BASE;
65254 - mm->cached_hole_size = 0;
65255 + start_addr = addr = mm->mmap_base;
65256 + mm->cached_hole_size = 0;
65257 }
65258
65259 full_search:
65260 @@ -1303,34 +1502,40 @@ full_search:
65261 * Start a new search - just in case we missed
65262 * some holes.
65263 */
65264 - if (start_addr != TASK_UNMAPPED_BASE) {
65265 - addr = TASK_UNMAPPED_BASE;
65266 - start_addr = addr;
65267 + if (start_addr != mm->mmap_base) {
65268 + start_addr = addr = mm->mmap_base;
65269 mm->cached_hole_size = 0;
65270 goto full_search;
65271 }
65272 return -ENOMEM;
65273 }
65274 - if (!vma || addr + len <= vma->vm_start) {
65275 - /*
65276 - * Remember the place where we stopped the search:
65277 - */
65278 - mm->free_area_cache = addr + len;
65279 - return addr;
65280 - }
65281 + if (check_heap_stack_gap(vma, addr, len))
65282 + break;
65283 if (addr + mm->cached_hole_size < vma->vm_start)
65284 mm->cached_hole_size = vma->vm_start - addr;
65285 addr = vma->vm_end;
65286 }
65287 +
65288 + /*
65289 + * Remember the place where we stopped the search:
65290 + */
65291 + mm->free_area_cache = addr + len;
65292 + return addr;
65293 }
65294 #endif
65295
65296 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
65297 {
65298 +
65299 +#ifdef CONFIG_PAX_SEGMEXEC
65300 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
65301 + return;
65302 +#endif
65303 +
65304 /*
65305 * Is this a new hole at the lowest possible address?
65306 */
65307 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
65308 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
65309 mm->free_area_cache = addr;
65310 mm->cached_hole_size = ~0UL;
65311 }
65312 @@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct fi
65313 {
65314 struct vm_area_struct *vma;
65315 struct mm_struct *mm = current->mm;
65316 - unsigned long addr = addr0;
65317 + unsigned long base = mm->mmap_base, addr = addr0;
65318
65319 /* requested length too big for entire address space */
65320 if (len > TASK_SIZE)
65321 @@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct fi
65322 if (flags & MAP_FIXED)
65323 return addr;
65324
65325 +#ifdef CONFIG_PAX_RANDMMAP
65326 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
65327 +#endif
65328 +
65329 /* requesting a specific address */
65330 if (addr) {
65331 addr = PAGE_ALIGN(addr);
65332 - vma = find_vma(mm, addr);
65333 - if (TASK_SIZE - len >= addr &&
65334 - (!vma || addr + len <= vma->vm_start))
65335 - return addr;
65336 + if (TASK_SIZE - len >= addr) {
65337 + vma = find_vma(mm, addr);
65338 + if (check_heap_stack_gap(vma, addr, len))
65339 + return addr;
65340 + }
65341 }
65342
65343 /* check if free_area_cache is useful for us */
65344 @@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct fi
65345 /* make sure it can fit in the remaining address space */
65346 if (addr > len) {
65347 vma = find_vma(mm, addr-len);
65348 - if (!vma || addr <= vma->vm_start)
65349 + if (check_heap_stack_gap(vma, addr - len, len))
65350 /* remember the address as a hint for next time */
65351 return (mm->free_area_cache = addr-len);
65352 }
65353 @@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct fi
65354 * return with success:
65355 */
65356 vma = find_vma(mm, addr);
65357 - if (!vma || addr+len <= vma->vm_start)
65358 + if (check_heap_stack_gap(vma, addr, len))
65359 /* remember the address as a hint for next time */
65360 return (mm->free_area_cache = addr);
65361
65362 @@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct fi
65363 mm->cached_hole_size = vma->vm_start - addr;
65364
65365 /* try just below the current vma->vm_start */
65366 - addr = vma->vm_start-len;
65367 - } while (len < vma->vm_start);
65368 + addr = skip_heap_stack_gap(vma, len);
65369 + } while (!IS_ERR_VALUE(addr));
65370
65371 bottomup:
65372 /*
65373 @@ -1414,13 +1624,21 @@ bottomup:
65374 * can happen with large stack limits and large mmap()
65375 * allocations.
65376 */
65377 + mm->mmap_base = TASK_UNMAPPED_BASE;
65378 +
65379 +#ifdef CONFIG_PAX_RANDMMAP
65380 + if (mm->pax_flags & MF_PAX_RANDMMAP)
65381 + mm->mmap_base += mm->delta_mmap;
65382 +#endif
65383 +
65384 + mm->free_area_cache = mm->mmap_base;
65385 mm->cached_hole_size = ~0UL;
65386 - mm->free_area_cache = TASK_UNMAPPED_BASE;
65387 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
65388 /*
65389 * Restore the topdown base:
65390 */
65391 - mm->free_area_cache = mm->mmap_base;
65392 + mm->mmap_base = base;
65393 + mm->free_area_cache = base;
65394 mm->cached_hole_size = ~0UL;
65395
65396 return addr;
65397 @@ -1429,6 +1647,12 @@ bottomup:
65398
65399 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
65400 {
65401 +
65402 +#ifdef CONFIG_PAX_SEGMEXEC
65403 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
65404 + return;
65405 +#endif
65406 +
65407 /*
65408 * Is this a new hole at the highest possible address?
65409 */
65410 @@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_s
65411 mm->free_area_cache = addr;
65412
65413 /* dont allow allocations above current base */
65414 - if (mm->free_area_cache > mm->mmap_base)
65415 + if (mm->free_area_cache > mm->mmap_base) {
65416 mm->free_area_cache = mm->mmap_base;
65417 + mm->cached_hole_size = ~0UL;
65418 + }
65419 }
65420
65421 unsigned long
65422 @@ -1545,6 +1771,27 @@ out:
65423 return prev ? prev->vm_next : vma;
65424 }
65425
65426 +#ifdef CONFIG_PAX_SEGMEXEC
65427 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
65428 +{
65429 + struct vm_area_struct *vma_m;
65430 +
65431 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
65432 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
65433 + BUG_ON(vma->vm_mirror);
65434 + return NULL;
65435 + }
65436 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
65437 + vma_m = vma->vm_mirror;
65438 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
65439 + BUG_ON(vma->vm_file != vma_m->vm_file);
65440 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
65441 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
65442 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
65443 + return vma_m;
65444 +}
65445 +#endif
65446 +
65447 /*
65448 * Verify that the stack growth is acceptable and
65449 * update accounting. This is shared with both the
65450 @@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_a
65451 return -ENOMEM;
65452
65453 /* Stack limit test */
65454 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
65455 if (size > rlim[RLIMIT_STACK].rlim_cur)
65456 return -ENOMEM;
65457
65458 @@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_a
65459 unsigned long limit;
65460 locked = mm->locked_vm + grow;
65461 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
65462 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
65463 if (locked > limit && !capable(CAP_IPC_LOCK))
65464 return -ENOMEM;
65465 }
65466 @@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_a
65467 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
65468 * vma is the last one with address > vma->vm_end. Have to extend vma.
65469 */
65470 +#ifndef CONFIG_IA64
65471 +static
65472 +#endif
65473 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
65474 {
65475 int error;
65476 + bool locknext;
65477
65478 if (!(vma->vm_flags & VM_GROWSUP))
65479 return -EFAULT;
65480
65481 + /* Also guard against wrapping around to address 0. */
65482 + if (address < PAGE_ALIGN(address+1))
65483 + address = PAGE_ALIGN(address+1);
65484 + else
65485 + return -ENOMEM;
65486 +
65487 /*
65488 * We must make sure the anon_vma is allocated
65489 * so that the anon_vma locking is not a noop.
65490 */
65491 if (unlikely(anon_vma_prepare(vma)))
65492 return -ENOMEM;
65493 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
65494 + if (locknext && anon_vma_prepare(vma->vm_next))
65495 + return -ENOMEM;
65496 anon_vma_lock(vma);
65497 + if (locknext)
65498 + anon_vma_lock(vma->vm_next);
65499
65500 /*
65501 * vma->vm_start/vm_end cannot change under us because the caller
65502 * is required to hold the mmap_sem in read mode. We need the
65503 - * anon_vma lock to serialize against concurrent expand_stacks.
65504 - * Also guard against wrapping around to address 0.
65505 + * anon_vma locks to serialize against concurrent expand_stacks
65506 + * and expand_upwards.
65507 */
65508 - if (address < PAGE_ALIGN(address+4))
65509 - address = PAGE_ALIGN(address+4);
65510 - else {
65511 - anon_vma_unlock(vma);
65512 - return -ENOMEM;
65513 - }
65514 error = 0;
65515
65516 /* Somebody else might have raced and expanded it already */
65517 - if (address > vma->vm_end) {
65518 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
65519 + error = -ENOMEM;
65520 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
65521 unsigned long size, grow;
65522
65523 size = address - vma->vm_start;
65524 @@ -1640,6 +1900,8 @@ int expand_upwards(struct vm_area_struct
65525 if (!error)
65526 vma->vm_end = address;
65527 }
65528 + if (locknext)
65529 + anon_vma_unlock(vma->vm_next);
65530 anon_vma_unlock(vma);
65531 return error;
65532 }
65533 @@ -1652,6 +1914,8 @@ static int expand_downwards(struct vm_ar
65534 unsigned long address)
65535 {
65536 int error;
65537 + bool lockprev = false;
65538 + struct vm_area_struct *prev;
65539
65540 /*
65541 * We must make sure the anon_vma is allocated
65542 @@ -1665,6 +1929,15 @@ static int expand_downwards(struct vm_ar
65543 if (error)
65544 return error;
65545
65546 + prev = vma->vm_prev;
65547 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
65548 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
65549 +#endif
65550 + if (lockprev && anon_vma_prepare(prev))
65551 + return -ENOMEM;
65552 + if (lockprev)
65553 + anon_vma_lock(prev);
65554 +
65555 anon_vma_lock(vma);
65556
65557 /*
65558 @@ -1674,9 +1947,17 @@ static int expand_downwards(struct vm_ar
65559 */
65560
65561 /* Somebody else might have raced and expanded it already */
65562 - if (address < vma->vm_start) {
65563 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
65564 + error = -ENOMEM;
65565 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
65566 unsigned long size, grow;
65567
65568 +#ifdef CONFIG_PAX_SEGMEXEC
65569 + struct vm_area_struct *vma_m;
65570 +
65571 + vma_m = pax_find_mirror_vma(vma);
65572 +#endif
65573 +
65574 size = vma->vm_end - address;
65575 grow = (vma->vm_start - address) >> PAGE_SHIFT;
65576
65577 @@ -1684,9 +1965,20 @@ static int expand_downwards(struct vm_ar
65578 if (!error) {
65579 vma->vm_start = address;
65580 vma->vm_pgoff -= grow;
65581 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
65582 +
65583 +#ifdef CONFIG_PAX_SEGMEXEC
65584 + if (vma_m) {
65585 + vma_m->vm_start -= grow << PAGE_SHIFT;
65586 + vma_m->vm_pgoff -= grow;
65587 + }
65588 +#endif
65589 +
65590 }
65591 }
65592 anon_vma_unlock(vma);
65593 + if (lockprev)
65594 + anon_vma_unlock(prev);
65595 return error;
65596 }
65597
65598 @@ -1762,6 +2054,13 @@ static void remove_vma_list(struct mm_st
65599 do {
65600 long nrpages = vma_pages(vma);
65601
65602 +#ifdef CONFIG_PAX_SEGMEXEC
65603 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
65604 + vma = remove_vma(vma);
65605 + continue;
65606 + }
65607 +#endif
65608 +
65609 mm->total_vm -= nrpages;
65610 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
65611 vma = remove_vma(vma);
65612 @@ -1807,6 +2106,16 @@ detach_vmas_to_be_unmapped(struct mm_str
65613 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
65614 vma->vm_prev = NULL;
65615 do {
65616 +
65617 +#ifdef CONFIG_PAX_SEGMEXEC
65618 + if (vma->vm_mirror) {
65619 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
65620 + vma->vm_mirror->vm_mirror = NULL;
65621 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
65622 + vma->vm_mirror = NULL;
65623 + }
65624 +#endif
65625 +
65626 rb_erase(&vma->vm_rb, &mm->mm_rb);
65627 mm->map_count--;
65628 tail_vma = vma;
65629 @@ -1834,10 +2143,25 @@ int split_vma(struct mm_struct * mm, str
65630 struct mempolicy *pol;
65631 struct vm_area_struct *new;
65632
65633 +#ifdef CONFIG_PAX_SEGMEXEC
65634 + struct vm_area_struct *vma_m, *new_m = NULL;
65635 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
65636 +#endif
65637 +
65638 if (is_vm_hugetlb_page(vma) && (addr &
65639 ~(huge_page_mask(hstate_vma(vma)))))
65640 return -EINVAL;
65641
65642 +#ifdef CONFIG_PAX_SEGMEXEC
65643 + vma_m = pax_find_mirror_vma(vma);
65644 +
65645 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
65646 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
65647 + if (mm->map_count >= sysctl_max_map_count-1)
65648 + return -ENOMEM;
65649 + } else
65650 +#endif
65651 +
65652 if (mm->map_count >= sysctl_max_map_count)
65653 return -ENOMEM;
65654
65655 @@ -1845,6 +2169,16 @@ int split_vma(struct mm_struct * mm, str
65656 if (!new)
65657 return -ENOMEM;
65658
65659 +#ifdef CONFIG_PAX_SEGMEXEC
65660 + if (vma_m) {
65661 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65662 + if (!new_m) {
65663 + kmem_cache_free(vm_area_cachep, new);
65664 + return -ENOMEM;
65665 + }
65666 + }
65667 +#endif
65668 +
65669 /* most fields are the same, copy all, and then fixup */
65670 *new = *vma;
65671
65672 @@ -1855,8 +2189,29 @@ int split_vma(struct mm_struct * mm, str
65673 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
65674 }
65675
65676 +#ifdef CONFIG_PAX_SEGMEXEC
65677 + if (vma_m) {
65678 + *new_m = *vma_m;
65679 + new_m->vm_mirror = new;
65680 + new->vm_mirror = new_m;
65681 +
65682 + if (new_below)
65683 + new_m->vm_end = addr_m;
65684 + else {
65685 + new_m->vm_start = addr_m;
65686 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
65687 + }
65688 + }
65689 +#endif
65690 +
65691 pol = mpol_dup(vma_policy(vma));
65692 if (IS_ERR(pol)) {
65693 +
65694 +#ifdef CONFIG_PAX_SEGMEXEC
65695 + if (new_m)
65696 + kmem_cache_free(vm_area_cachep, new_m);
65697 +#endif
65698 +
65699 kmem_cache_free(vm_area_cachep, new);
65700 return PTR_ERR(pol);
65701 }
65702 @@ -1877,6 +2232,28 @@ int split_vma(struct mm_struct * mm, str
65703 else
65704 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
65705
65706 +#ifdef CONFIG_PAX_SEGMEXEC
65707 + if (vma_m) {
65708 + mpol_get(pol);
65709 + vma_set_policy(new_m, pol);
65710 +
65711 + if (new_m->vm_file) {
65712 + get_file(new_m->vm_file);
65713 + if (vma_m->vm_flags & VM_EXECUTABLE)
65714 + added_exe_file_vma(mm);
65715 + }
65716 +
65717 + if (new_m->vm_ops && new_m->vm_ops->open)
65718 + new_m->vm_ops->open(new_m);
65719 +
65720 + if (new_below)
65721 + vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
65722 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
65723 + else
65724 + vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
65725 + }
65726 +#endif
65727 +
65728 return 0;
65729 }
65730
65731 @@ -1885,11 +2262,30 @@ int split_vma(struct mm_struct * mm, str
65732 * work. This now handles partial unmappings.
65733 * Jeremy Fitzhardinge <jeremy@goop.org>
65734 */
65735 +#ifdef CONFIG_PAX_SEGMEXEC
65736 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65737 +{
65738 + int ret = __do_munmap(mm, start, len);
65739 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
65740 + return ret;
65741 +
65742 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
65743 +}
65744 +
65745 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65746 +#else
65747 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65748 +#endif
65749 {
65750 unsigned long end;
65751 struct vm_area_struct *vma, *prev, *last;
65752
65753 + /*
65754 + * mm->mmap_sem is required to protect against another thread
65755 + * changing the mappings in case we sleep.
65756 + */
65757 + verify_mm_writelocked(mm);
65758 +
65759 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
65760 return -EINVAL;
65761
65762 @@ -1953,6 +2349,8 @@ int do_munmap(struct mm_struct *mm, unsi
65763 /* Fix up all other VM information */
65764 remove_vma_list(mm, vma);
65765
65766 + track_exec_limit(mm, start, end, 0UL);
65767 +
65768 return 0;
65769 }
65770
65771 @@ -1965,22 +2363,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
65772
65773 profile_munmap(addr);
65774
65775 +#ifdef CONFIG_PAX_SEGMEXEC
65776 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
65777 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
65778 + return -EINVAL;
65779 +#endif
65780 +
65781 down_write(&mm->mmap_sem);
65782 ret = do_munmap(mm, addr, len);
65783 up_write(&mm->mmap_sem);
65784 return ret;
65785 }
65786
65787 -static inline void verify_mm_writelocked(struct mm_struct *mm)
65788 -{
65789 -#ifdef CONFIG_DEBUG_VM
65790 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
65791 - WARN_ON(1);
65792 - up_read(&mm->mmap_sem);
65793 - }
65794 -#endif
65795 -}
65796 -
65797 /*
65798 * this is really a simplified "do_mmap". it only handles
65799 * anonymous maps. eventually we may be able to do some
65800 @@ -1994,6 +2388,7 @@ unsigned long do_brk(unsigned long addr,
65801 struct rb_node ** rb_link, * rb_parent;
65802 pgoff_t pgoff = addr >> PAGE_SHIFT;
65803 int error;
65804 + unsigned long charged;
65805
65806 len = PAGE_ALIGN(len);
65807 if (!len)
65808 @@ -2005,16 +2400,30 @@ unsigned long do_brk(unsigned long addr,
65809
65810 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
65811
65812 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
65813 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65814 + flags &= ~VM_EXEC;
65815 +
65816 +#ifdef CONFIG_PAX_MPROTECT
65817 + if (mm->pax_flags & MF_PAX_MPROTECT)
65818 + flags &= ~VM_MAYEXEC;
65819 +#endif
65820 +
65821 + }
65822 +#endif
65823 +
65824 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
65825 if (error & ~PAGE_MASK)
65826 return error;
65827
65828 + charged = len >> PAGE_SHIFT;
65829 +
65830 /*
65831 * mlock MCL_FUTURE?
65832 */
65833 if (mm->def_flags & VM_LOCKED) {
65834 unsigned long locked, lock_limit;
65835 - locked = len >> PAGE_SHIFT;
65836 + locked = charged;
65837 locked += mm->locked_vm;
65838 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
65839 lock_limit >>= PAGE_SHIFT;
65840 @@ -2031,22 +2440,22 @@ unsigned long do_brk(unsigned long addr,
65841 /*
65842 * Clear old maps. this also does some error checking for us
65843 */
65844 - munmap_back:
65845 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65846 if (vma && vma->vm_start < addr + len) {
65847 if (do_munmap(mm, addr, len))
65848 return -ENOMEM;
65849 - goto munmap_back;
65850 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65851 + BUG_ON(vma && vma->vm_start < addr + len);
65852 }
65853
65854 /* Check against address space limits *after* clearing old maps... */
65855 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
65856 + if (!may_expand_vm(mm, charged))
65857 return -ENOMEM;
65858
65859 if (mm->map_count > sysctl_max_map_count)
65860 return -ENOMEM;
65861
65862 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
65863 + if (security_vm_enough_memory(charged))
65864 return -ENOMEM;
65865
65866 /* Can we just expand an old private anonymous mapping? */
65867 @@ -2060,7 +2469,7 @@ unsigned long do_brk(unsigned long addr,
65868 */
65869 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65870 if (!vma) {
65871 - vm_unacct_memory(len >> PAGE_SHIFT);
65872 + vm_unacct_memory(charged);
65873 return -ENOMEM;
65874 }
65875
65876 @@ -2072,11 +2481,12 @@ unsigned long do_brk(unsigned long addr,
65877 vma->vm_page_prot = vm_get_page_prot(flags);
65878 vma_link(mm, vma, prev, rb_link, rb_parent);
65879 out:
65880 - mm->total_vm += len >> PAGE_SHIFT;
65881 + mm->total_vm += charged;
65882 if (flags & VM_LOCKED) {
65883 if (!mlock_vma_pages_range(vma, addr, addr + len))
65884 - mm->locked_vm += (len >> PAGE_SHIFT);
65885 + mm->locked_vm += charged;
65886 }
65887 + track_exec_limit(mm, addr, addr + len, flags);
65888 return addr;
65889 }
65890
65891 @@ -2123,8 +2533,10 @@ void exit_mmap(struct mm_struct *mm)
65892 * Walk the list again, actually closing and freeing it,
65893 * with preemption enabled, without holding any MM locks.
65894 */
65895 - while (vma)
65896 + while (vma) {
65897 + vma->vm_mirror = NULL;
65898 vma = remove_vma(vma);
65899 + }
65900
65901 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
65902 }
65903 @@ -2138,6 +2550,10 @@ int insert_vm_struct(struct mm_struct *
65904 struct vm_area_struct * __vma, * prev;
65905 struct rb_node ** rb_link, * rb_parent;
65906
65907 +#ifdef CONFIG_PAX_SEGMEXEC
65908 + struct vm_area_struct *vma_m = NULL;
65909 +#endif
65910 +
65911 /*
65912 * The vm_pgoff of a purely anonymous vma should be irrelevant
65913 * until its first write fault, when page's anon_vma and index
65914 @@ -2160,7 +2576,22 @@ int insert_vm_struct(struct mm_struct *
65915 if ((vma->vm_flags & VM_ACCOUNT) &&
65916 security_vm_enough_memory_mm(mm, vma_pages(vma)))
65917 return -ENOMEM;
65918 +
65919 +#ifdef CONFIG_PAX_SEGMEXEC
65920 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
65921 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65922 + if (!vma_m)
65923 + return -ENOMEM;
65924 + }
65925 +#endif
65926 +
65927 vma_link(mm, vma, prev, rb_link, rb_parent);
65928 +
65929 +#ifdef CONFIG_PAX_SEGMEXEC
65930 + if (vma_m)
65931 + pax_mirror_vma(vma_m, vma);
65932 +#endif
65933 +
65934 return 0;
65935 }
65936
65937 @@ -2178,6 +2609,8 @@ struct vm_area_struct *copy_vma(struct v
65938 struct rb_node **rb_link, *rb_parent;
65939 struct mempolicy *pol;
65940
65941 + BUG_ON(vma->vm_mirror);
65942 +
65943 /*
65944 * If anonymous vma has not yet been faulted, update new pgoff
65945 * to match new location, to increase its chance of merging.
65946 @@ -2221,6 +2654,35 @@ struct vm_area_struct *copy_vma(struct v
65947 return new_vma;
65948 }
65949
65950 +#ifdef CONFIG_PAX_SEGMEXEC
65951 +void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
65952 +{
65953 + struct vm_area_struct *prev_m;
65954 + struct rb_node **rb_link_m, *rb_parent_m;
65955 + struct mempolicy *pol_m;
65956 +
65957 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
65958 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
65959 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
65960 + *vma_m = *vma;
65961 + pol_m = vma_policy(vma_m);
65962 + mpol_get(pol_m);
65963 + vma_set_policy(vma_m, pol_m);
65964 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
65965 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
65966 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
65967 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
65968 + if (vma_m->vm_file)
65969 + get_file(vma_m->vm_file);
65970 + if (vma_m->vm_ops && vma_m->vm_ops->open)
65971 + vma_m->vm_ops->open(vma_m);
65972 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
65973 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
65974 + vma_m->vm_mirror = vma;
65975 + vma->vm_mirror = vma_m;
65976 +}
65977 +#endif
65978 +
65979 /*
65980 * Return true if the calling process may expand its vm space by the passed
65981 * number of pages
65982 @@ -2231,7 +2693,7 @@ int may_expand_vm(struct mm_struct *mm,
65983 unsigned long lim;
65984
65985 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
65986 -
65987 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
65988 if (cur + npages > lim)
65989 return 0;
65990 return 1;
65991 @@ -2301,6 +2763,22 @@ int install_special_mapping(struct mm_st
65992 vma->vm_start = addr;
65993 vma->vm_end = addr + len;
65994
65995 +#ifdef CONFIG_PAX_MPROTECT
65996 + if (mm->pax_flags & MF_PAX_MPROTECT) {
65997 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
65998 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
65999 + return -EPERM;
66000 + if (!(vm_flags & VM_EXEC))
66001 + vm_flags &= ~VM_MAYEXEC;
66002 +#else
66003 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
66004 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
66005 +#endif
66006 + else
66007 + vm_flags &= ~VM_MAYWRITE;
66008 + }
66009 +#endif
66010 +
66011 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
66012 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
66013
66014 diff -urNp linux-2.6.32.42/mm/mprotect.c linux-2.6.32.42/mm/mprotect.c
66015 --- linux-2.6.32.42/mm/mprotect.c 2011-03-27 14:31:47.000000000 -0400
66016 +++ linux-2.6.32.42/mm/mprotect.c 2011-04-17 15:56:46.000000000 -0400
66017 @@ -24,10 +24,16 @@
66018 #include <linux/mmu_notifier.h>
66019 #include <linux/migrate.h>
66020 #include <linux/perf_event.h>
66021 +
66022 +#ifdef CONFIG_PAX_MPROTECT
66023 +#include <linux/elf.h>
66024 +#endif
66025 +
66026 #include <asm/uaccess.h>
66027 #include <asm/pgtable.h>
66028 #include <asm/cacheflush.h>
66029 #include <asm/tlbflush.h>
66030 +#include <asm/mmu_context.h>
66031
66032 #ifndef pgprot_modify
66033 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
66034 @@ -132,6 +138,48 @@ static void change_protection(struct vm_
66035 flush_tlb_range(vma, start, end);
66036 }
66037
66038 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66039 +/* called while holding the mmap semaphor for writing except stack expansion */
66040 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
66041 +{
66042 + unsigned long oldlimit, newlimit = 0UL;
66043 +
66044 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
66045 + return;
66046 +
66047 + spin_lock(&mm->page_table_lock);
66048 + oldlimit = mm->context.user_cs_limit;
66049 + if ((prot & VM_EXEC) && oldlimit < end)
66050 + /* USER_CS limit moved up */
66051 + newlimit = end;
66052 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
66053 + /* USER_CS limit moved down */
66054 + newlimit = start;
66055 +
66056 + if (newlimit) {
66057 + mm->context.user_cs_limit = newlimit;
66058 +
66059 +#ifdef CONFIG_SMP
66060 + wmb();
66061 + cpus_clear(mm->context.cpu_user_cs_mask);
66062 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
66063 +#endif
66064 +
66065 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
66066 + }
66067 + spin_unlock(&mm->page_table_lock);
66068 + if (newlimit == end) {
66069 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
66070 +
66071 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
66072 + if (is_vm_hugetlb_page(vma))
66073 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
66074 + else
66075 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
66076 + }
66077 +}
66078 +#endif
66079 +
66080 int
66081 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
66082 unsigned long start, unsigned long end, unsigned long newflags)
66083 @@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
66084 int error;
66085 int dirty_accountable = 0;
66086
66087 +#ifdef CONFIG_PAX_SEGMEXEC
66088 + struct vm_area_struct *vma_m = NULL;
66089 + unsigned long start_m, end_m;
66090 +
66091 + start_m = start + SEGMEXEC_TASK_SIZE;
66092 + end_m = end + SEGMEXEC_TASK_SIZE;
66093 +#endif
66094 +
66095 if (newflags == oldflags) {
66096 *pprev = vma;
66097 return 0;
66098 }
66099
66100 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
66101 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
66102 +
66103 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
66104 + return -ENOMEM;
66105 +
66106 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
66107 + return -ENOMEM;
66108 + }
66109 +
66110 /*
66111 * If we make a private mapping writable we increase our commit;
66112 * but (without finer accounting) cannot reduce our commit if we
66113 @@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
66114 }
66115 }
66116
66117 +#ifdef CONFIG_PAX_SEGMEXEC
66118 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
66119 + if (start != vma->vm_start) {
66120 + error = split_vma(mm, vma, start, 1);
66121 + if (error)
66122 + goto fail;
66123 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
66124 + *pprev = (*pprev)->vm_next;
66125 + }
66126 +
66127 + if (end != vma->vm_end) {
66128 + error = split_vma(mm, vma, end, 0);
66129 + if (error)
66130 + goto fail;
66131 + }
66132 +
66133 + if (pax_find_mirror_vma(vma)) {
66134 + error = __do_munmap(mm, start_m, end_m - start_m);
66135 + if (error)
66136 + goto fail;
66137 + } else {
66138 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
66139 + if (!vma_m) {
66140 + error = -ENOMEM;
66141 + goto fail;
66142 + }
66143 + vma->vm_flags = newflags;
66144 + pax_mirror_vma(vma_m, vma);
66145 + }
66146 + }
66147 +#endif
66148 +
66149 /*
66150 * First try to merge with previous and/or next vma.
66151 */
66152 @@ -195,9 +293,21 @@ success:
66153 * vm_flags and vm_page_prot are protected by the mmap_sem
66154 * held in write mode.
66155 */
66156 +
66157 +#ifdef CONFIG_PAX_SEGMEXEC
66158 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
66159 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
66160 +#endif
66161 +
66162 vma->vm_flags = newflags;
66163 +
66164 +#ifdef CONFIG_PAX_MPROTECT
66165 + if (mm->binfmt && mm->binfmt->handle_mprotect)
66166 + mm->binfmt->handle_mprotect(vma, newflags);
66167 +#endif
66168 +
66169 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
66170 - vm_get_page_prot(newflags));
66171 + vm_get_page_prot(vma->vm_flags));
66172
66173 if (vma_wants_writenotify(vma)) {
66174 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
66175 @@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66176 end = start + len;
66177 if (end <= start)
66178 return -ENOMEM;
66179 +
66180 +#ifdef CONFIG_PAX_SEGMEXEC
66181 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
66182 + if (end > SEGMEXEC_TASK_SIZE)
66183 + return -EINVAL;
66184 + } else
66185 +#endif
66186 +
66187 + if (end > TASK_SIZE)
66188 + return -EINVAL;
66189 +
66190 if (!arch_validate_prot(prot))
66191 return -EINVAL;
66192
66193 @@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66194 /*
66195 * Does the application expect PROT_READ to imply PROT_EXEC:
66196 */
66197 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
66198 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
66199 prot |= PROT_EXEC;
66200
66201 vm_flags = calc_vm_prot_bits(prot);
66202 @@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66203 if (start > vma->vm_start)
66204 prev = vma;
66205
66206 +#ifdef CONFIG_PAX_MPROTECT
66207 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
66208 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
66209 +#endif
66210 +
66211 for (nstart = start ; ; ) {
66212 unsigned long newflags;
66213
66214 @@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66215
66216 /* newflags >> 4 shift VM_MAY% in place of VM_% */
66217 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
66218 + if (prot & (PROT_WRITE | PROT_EXEC))
66219 + gr_log_rwxmprotect(vma->vm_file);
66220 +
66221 + error = -EACCES;
66222 + goto out;
66223 + }
66224 +
66225 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
66226 error = -EACCES;
66227 goto out;
66228 }
66229 @@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66230 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
66231 if (error)
66232 goto out;
66233 +
66234 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
66235 +
66236 nstart = tmp;
66237
66238 if (nstart < prev->vm_end)
66239 diff -urNp linux-2.6.32.42/mm/mremap.c linux-2.6.32.42/mm/mremap.c
66240 --- linux-2.6.32.42/mm/mremap.c 2011-04-17 17:00:52.000000000 -0400
66241 +++ linux-2.6.32.42/mm/mremap.c 2011-04-17 17:03:58.000000000 -0400
66242 @@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_str
66243 continue;
66244 pte = ptep_clear_flush(vma, old_addr, old_pte);
66245 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
66246 +
66247 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66248 + if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
66249 + pte = pte_exprotect(pte);
66250 +#endif
66251 +
66252 set_pte_at(mm, new_addr, new_pte, pte);
66253 }
66254
66255 @@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_res
66256 if (is_vm_hugetlb_page(vma))
66257 goto Einval;
66258
66259 +#ifdef CONFIG_PAX_SEGMEXEC
66260 + if (pax_find_mirror_vma(vma))
66261 + goto Einval;
66262 +#endif
66263 +
66264 /* We can't remap across vm area boundaries */
66265 if (old_len > vma->vm_end - addr)
66266 goto Efault;
66267 @@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned
66268 unsigned long ret = -EINVAL;
66269 unsigned long charged = 0;
66270 unsigned long map_flags;
66271 + unsigned long pax_task_size = TASK_SIZE;
66272
66273 if (new_addr & ~PAGE_MASK)
66274 goto out;
66275
66276 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
66277 +#ifdef CONFIG_PAX_SEGMEXEC
66278 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
66279 + pax_task_size = SEGMEXEC_TASK_SIZE;
66280 +#endif
66281 +
66282 + pax_task_size -= PAGE_SIZE;
66283 +
66284 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
66285 goto out;
66286
66287 /* Check if the location we're moving into overlaps the
66288 * old location at all, and fail if it does.
66289 */
66290 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
66291 - goto out;
66292 -
66293 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
66294 + if (addr + old_len > new_addr && new_addr + new_len > addr)
66295 goto out;
66296
66297 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
66298 @@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long ad
66299 struct vm_area_struct *vma;
66300 unsigned long ret = -EINVAL;
66301 unsigned long charged = 0;
66302 + unsigned long pax_task_size = TASK_SIZE;
66303
66304 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
66305 goto out;
66306 @@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long ad
66307 if (!new_len)
66308 goto out;
66309
66310 +#ifdef CONFIG_PAX_SEGMEXEC
66311 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
66312 + pax_task_size = SEGMEXEC_TASK_SIZE;
66313 +#endif
66314 +
66315 + pax_task_size -= PAGE_SIZE;
66316 +
66317 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
66318 + old_len > pax_task_size || addr > pax_task_size-old_len)
66319 + goto out;
66320 +
66321 if (flags & MREMAP_FIXED) {
66322 if (flags & MREMAP_MAYMOVE)
66323 ret = mremap_to(addr, old_len, new_addr, new_len);
66324 @@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long ad
66325 addr + new_len);
66326 }
66327 ret = addr;
66328 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
66329 goto out;
66330 }
66331 }
66332 @@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long ad
66333 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
66334 if (ret)
66335 goto out;
66336 +
66337 + map_flags = vma->vm_flags;
66338 ret = move_vma(vma, addr, old_len, new_len, new_addr);
66339 + if (!(ret & ~PAGE_MASK)) {
66340 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
66341 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
66342 + }
66343 }
66344 out:
66345 if (ret & ~PAGE_MASK)
66346 diff -urNp linux-2.6.32.42/mm/nommu.c linux-2.6.32.42/mm/nommu.c
66347 --- linux-2.6.32.42/mm/nommu.c 2011-03-27 14:31:47.000000000 -0400
66348 +++ linux-2.6.32.42/mm/nommu.c 2011-04-17 15:56:46.000000000 -0400
66349 @@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
66350 int sysctl_overcommit_ratio = 50; /* default is 50% */
66351 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
66352 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
66353 -int heap_stack_gap = 0;
66354
66355 atomic_long_t mmap_pages_allocated;
66356
66357 @@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
66358 EXPORT_SYMBOL(find_vma);
66359
66360 /*
66361 - * find a VMA
66362 - * - we don't extend stack VMAs under NOMMU conditions
66363 - */
66364 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
66365 -{
66366 - return find_vma(mm, addr);
66367 -}
66368 -
66369 -/*
66370 * expand a stack to a given address
66371 * - not supported under NOMMU conditions
66372 */
66373 diff -urNp linux-2.6.32.42/mm/page_alloc.c linux-2.6.32.42/mm/page_alloc.c
66374 --- linux-2.6.32.42/mm/page_alloc.c 2011-06-25 12:55:35.000000000 -0400
66375 +++ linux-2.6.32.42/mm/page_alloc.c 2011-06-25 12:56:37.000000000 -0400
66376 @@ -587,6 +587,10 @@ static void __free_pages_ok(struct page
66377 int bad = 0;
66378 int wasMlocked = __TestClearPageMlocked(page);
66379
66380 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
66381 + unsigned long index = 1UL << order;
66382 +#endif
66383 +
66384 kmemcheck_free_shadow(page, order);
66385
66386 for (i = 0 ; i < (1 << order) ; ++i)
66387 @@ -599,6 +603,12 @@ static void __free_pages_ok(struct page
66388 debug_check_no_obj_freed(page_address(page),
66389 PAGE_SIZE << order);
66390 }
66391 +
66392 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
66393 + for (; index; --index)
66394 + sanitize_highpage(page + index - 1);
66395 +#endif
66396 +
66397 arch_free_page(page, order);
66398 kernel_map_pages(page, 1 << order, 0);
66399
66400 @@ -702,8 +712,10 @@ static int prep_new_page(struct page *pa
66401 arch_alloc_page(page, order);
66402 kernel_map_pages(page, 1 << order, 1);
66403
66404 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
66405 if (gfp_flags & __GFP_ZERO)
66406 prep_zero_page(page, order, gfp_flags);
66407 +#endif
66408
66409 if (order && (gfp_flags & __GFP_COMP))
66410 prep_compound_page(page, order);
66411 @@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct pa
66412 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
66413 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
66414 }
66415 +
66416 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
66417 + sanitize_highpage(page);
66418 +#endif
66419 +
66420 arch_free_page(page, 0);
66421 kernel_map_pages(page, 1, 0);
66422
66423 @@ -2179,6 +2196,8 @@ void show_free_areas(void)
66424 int cpu;
66425 struct zone *zone;
66426
66427 + pax_track_stack();
66428 +
66429 for_each_populated_zone(zone) {
66430 show_node(zone);
66431 printk("%s per-cpu:\n", zone->name);
66432 @@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct p
66433 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
66434 }
66435 #else
66436 -static void inline setup_usemap(struct pglist_data *pgdat,
66437 +static inline void setup_usemap(struct pglist_data *pgdat,
66438 struct zone *zone, unsigned long zonesize) {}
66439 #endif /* CONFIG_SPARSEMEM */
66440
66441 diff -urNp linux-2.6.32.42/mm/percpu.c linux-2.6.32.42/mm/percpu.c
66442 --- linux-2.6.32.42/mm/percpu.c 2011-03-27 14:31:47.000000000 -0400
66443 +++ linux-2.6.32.42/mm/percpu.c 2011-04-17 15:56:46.000000000 -0400
66444 @@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu
66445 static unsigned int pcpu_last_unit_cpu __read_mostly;
66446
66447 /* the address of the first chunk which starts with the kernel static area */
66448 -void *pcpu_base_addr __read_mostly;
66449 +void *pcpu_base_addr __read_only;
66450 EXPORT_SYMBOL_GPL(pcpu_base_addr);
66451
66452 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
66453 diff -urNp linux-2.6.32.42/mm/rmap.c linux-2.6.32.42/mm/rmap.c
66454 --- linux-2.6.32.42/mm/rmap.c 2011-03-27 14:31:47.000000000 -0400
66455 +++ linux-2.6.32.42/mm/rmap.c 2011-04-17 15:56:46.000000000 -0400
66456 @@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_stru
66457 /* page_table_lock to protect against threads */
66458 spin_lock(&mm->page_table_lock);
66459 if (likely(!vma->anon_vma)) {
66460 +
66461 +#ifdef CONFIG_PAX_SEGMEXEC
66462 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
66463 +
66464 + if (vma_m) {
66465 + BUG_ON(vma_m->anon_vma);
66466 + vma_m->anon_vma = anon_vma;
66467 + list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
66468 + }
66469 +#endif
66470 +
66471 vma->anon_vma = anon_vma;
66472 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
66473 allocated = NULL;
66474 diff -urNp linux-2.6.32.42/mm/shmem.c linux-2.6.32.42/mm/shmem.c
66475 --- linux-2.6.32.42/mm/shmem.c 2011-03-27 14:31:47.000000000 -0400
66476 +++ linux-2.6.32.42/mm/shmem.c 2011-05-18 20:09:37.000000000 -0400
66477 @@ -31,7 +31,7 @@
66478 #include <linux/swap.h>
66479 #include <linux/ima.h>
66480
66481 -static struct vfsmount *shm_mnt;
66482 +struct vfsmount *shm_mnt;
66483
66484 #ifdef CONFIG_SHMEM
66485 /*
66486 @@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *
66487 goto unlock;
66488 }
66489 entry = shmem_swp_entry(info, index, NULL);
66490 + if (!entry)
66491 + goto unlock;
66492 if (entry->val) {
66493 /*
66494 * The more uptodate page coming down from a stacked
66495 @@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_ent
66496 struct vm_area_struct pvma;
66497 struct page *page;
66498
66499 + pax_track_stack();
66500 +
66501 spol = mpol_cond_copy(&mpol,
66502 mpol_shared_policy_lookup(&info->policy, idx));
66503
66504 @@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *d
66505
66506 info = SHMEM_I(inode);
66507 inode->i_size = len-1;
66508 - if (len <= (char *)inode - (char *)info) {
66509 + if (len <= (char *)inode - (char *)info && len <= 64) {
66510 /* do it inline */
66511 memcpy(info, symname, len);
66512 inode->i_op = &shmem_symlink_inline_operations;
66513 @@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block
66514 int err = -ENOMEM;
66515
66516 /* Round up to L1_CACHE_BYTES to resist false sharing */
66517 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
66518 - L1_CACHE_BYTES), GFP_KERNEL);
66519 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
66520 if (!sbinfo)
66521 return -ENOMEM;
66522
66523 diff -urNp linux-2.6.32.42/mm/slab.c linux-2.6.32.42/mm/slab.c
66524 --- linux-2.6.32.42/mm/slab.c 2011-03-27 14:31:47.000000000 -0400
66525 +++ linux-2.6.32.42/mm/slab.c 2011-05-04 17:56:20.000000000 -0400
66526 @@ -174,7 +174,7 @@
66527
66528 /* Legal flag mask for kmem_cache_create(). */
66529 #if DEBUG
66530 -# define CREATE_MASK (SLAB_RED_ZONE | \
66531 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
66532 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
66533 SLAB_CACHE_DMA | \
66534 SLAB_STORE_USER | \
66535 @@ -182,7 +182,7 @@
66536 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
66537 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
66538 #else
66539 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
66540 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
66541 SLAB_CACHE_DMA | \
66542 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
66543 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
66544 @@ -308,7 +308,7 @@ struct kmem_list3 {
66545 * Need this for bootstrapping a per node allocator.
66546 */
66547 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
66548 -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
66549 +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
66550 #define CACHE_CACHE 0
66551 #define SIZE_AC MAX_NUMNODES
66552 #define SIZE_L3 (2 * MAX_NUMNODES)
66553 @@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_
66554 if ((x)->max_freeable < i) \
66555 (x)->max_freeable = i; \
66556 } while (0)
66557 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
66558 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
66559 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
66560 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
66561 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
66562 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
66563 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
66564 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
66565 #else
66566 #define STATS_INC_ACTIVE(x) do { } while (0)
66567 #define STATS_DEC_ACTIVE(x) do { } while (0)
66568 @@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
66569 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
66570 */
66571 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
66572 - const struct slab *slab, void *obj)
66573 + const struct slab *slab, const void *obj)
66574 {
66575 u32 offset = (obj - slab->s_mem);
66576 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
66577 @@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
66578 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
66579 sizes[INDEX_AC].cs_size,
66580 ARCH_KMALLOC_MINALIGN,
66581 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66582 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66583 NULL);
66584
66585 if (INDEX_AC != INDEX_L3) {
66586 @@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
66587 kmem_cache_create(names[INDEX_L3].name,
66588 sizes[INDEX_L3].cs_size,
66589 ARCH_KMALLOC_MINALIGN,
66590 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66591 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66592 NULL);
66593 }
66594
66595 @@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
66596 sizes->cs_cachep = kmem_cache_create(names->name,
66597 sizes->cs_size,
66598 ARCH_KMALLOC_MINALIGN,
66599 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66600 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66601 NULL);
66602 }
66603 #ifdef CONFIG_ZONE_DMA
66604 @@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, vo
66605 }
66606 /* cpu stats */
66607 {
66608 - unsigned long allochit = atomic_read(&cachep->allochit);
66609 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
66610 - unsigned long freehit = atomic_read(&cachep->freehit);
66611 - unsigned long freemiss = atomic_read(&cachep->freemiss);
66612 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
66613 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
66614 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
66615 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
66616
66617 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
66618 allochit, allocmiss, freehit, freemiss);
66619 @@ -4471,15 +4471,66 @@ static const struct file_operations proc
66620
66621 static int __init slab_proc_init(void)
66622 {
66623 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
66624 + mode_t gr_mode = S_IRUGO;
66625 +
66626 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66627 + gr_mode = S_IRUSR;
66628 +#endif
66629 +
66630 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
66631 #ifdef CONFIG_DEBUG_SLAB_LEAK
66632 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
66633 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
66634 #endif
66635 return 0;
66636 }
66637 module_init(slab_proc_init);
66638 #endif
66639
66640 +void check_object_size(const void *ptr, unsigned long n, bool to)
66641 +{
66642 +
66643 +#ifdef CONFIG_PAX_USERCOPY
66644 + struct page *page;
66645 + struct kmem_cache *cachep = NULL;
66646 + struct slab *slabp;
66647 + unsigned int objnr;
66648 + unsigned long offset;
66649 +
66650 + if (!n)
66651 + return;
66652 +
66653 + if (ZERO_OR_NULL_PTR(ptr))
66654 + goto report;
66655 +
66656 + if (!virt_addr_valid(ptr))
66657 + return;
66658 +
66659 + page = virt_to_head_page(ptr);
66660 +
66661 + if (!PageSlab(page)) {
66662 + if (object_is_on_stack(ptr, n) == -1)
66663 + goto report;
66664 + return;
66665 + }
66666 +
66667 + cachep = page_get_cache(page);
66668 + if (!(cachep->flags & SLAB_USERCOPY))
66669 + goto report;
66670 +
66671 + slabp = page_get_slab(page);
66672 + objnr = obj_to_index(cachep, slabp, ptr);
66673 + BUG_ON(objnr >= cachep->num);
66674 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
66675 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
66676 + return;
66677 +
66678 +report:
66679 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
66680 +#endif
66681 +
66682 +}
66683 +EXPORT_SYMBOL(check_object_size);
66684 +
66685 /**
66686 * ksize - get the actual amount of memory allocated for a given object
66687 * @objp: Pointer to the object
66688 diff -urNp linux-2.6.32.42/mm/slob.c linux-2.6.32.42/mm/slob.c
66689 --- linux-2.6.32.42/mm/slob.c 2011-03-27 14:31:47.000000000 -0400
66690 +++ linux-2.6.32.42/mm/slob.c 2011-04-17 15:56:46.000000000 -0400
66691 @@ -29,7 +29,7 @@
66692 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
66693 * alloc_pages() directly, allocating compound pages so the page order
66694 * does not have to be separately tracked, and also stores the exact
66695 - * allocation size in page->private so that it can be used to accurately
66696 + * allocation size in slob_page->size so that it can be used to accurately
66697 * provide ksize(). These objects are detected in kfree() because slob_page()
66698 * is false for them.
66699 *
66700 @@ -58,6 +58,7 @@
66701 */
66702
66703 #include <linux/kernel.h>
66704 +#include <linux/sched.h>
66705 #include <linux/slab.h>
66706 #include <linux/mm.h>
66707 #include <linux/swap.h> /* struct reclaim_state */
66708 @@ -100,7 +101,8 @@ struct slob_page {
66709 unsigned long flags; /* mandatory */
66710 atomic_t _count; /* mandatory */
66711 slobidx_t units; /* free units left in page */
66712 - unsigned long pad[2];
66713 + unsigned long pad[1];
66714 + unsigned long size; /* size when >=PAGE_SIZE */
66715 slob_t *free; /* first free slob_t in page */
66716 struct list_head list; /* linked list of free pages */
66717 };
66718 @@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
66719 */
66720 static inline int is_slob_page(struct slob_page *sp)
66721 {
66722 - return PageSlab((struct page *)sp);
66723 + return PageSlab((struct page *)sp) && !sp->size;
66724 }
66725
66726 static inline void set_slob_page(struct slob_page *sp)
66727 @@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
66728
66729 static inline struct slob_page *slob_page(const void *addr)
66730 {
66731 - return (struct slob_page *)virt_to_page(addr);
66732 + return (struct slob_page *)virt_to_head_page(addr);
66733 }
66734
66735 /*
66736 @@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
66737 /*
66738 * Return the size of a slob block.
66739 */
66740 -static slobidx_t slob_units(slob_t *s)
66741 +static slobidx_t slob_units(const slob_t *s)
66742 {
66743 if (s->units > 0)
66744 return s->units;
66745 @@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
66746 /*
66747 * Return the next free slob block pointer after this one.
66748 */
66749 -static slob_t *slob_next(slob_t *s)
66750 +static slob_t *slob_next(const slob_t *s)
66751 {
66752 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
66753 slobidx_t next;
66754 @@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
66755 /*
66756 * Returns true if s is the last free block in its page.
66757 */
66758 -static int slob_last(slob_t *s)
66759 +static int slob_last(const slob_t *s)
66760 {
66761 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
66762 }
66763 @@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
66764 if (!page)
66765 return NULL;
66766
66767 + set_slob_page(page);
66768 return page_address(page);
66769 }
66770
66771 @@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
66772 if (!b)
66773 return NULL;
66774 sp = slob_page(b);
66775 - set_slob_page(sp);
66776
66777 spin_lock_irqsave(&slob_lock, flags);
66778 sp->units = SLOB_UNITS(PAGE_SIZE);
66779 sp->free = b;
66780 + sp->size = 0;
66781 INIT_LIST_HEAD(&sp->list);
66782 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
66783 set_slob_page_free(sp, slob_list);
66784 @@ -475,10 +478,9 @@ out:
66785 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
66786 #endif
66787
66788 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
66789 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
66790 {
66791 - unsigned int *m;
66792 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66793 + slob_t *m;
66794 void *ret;
66795
66796 lockdep_trace_alloc(gfp);
66797 @@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
66798
66799 if (!m)
66800 return NULL;
66801 - *m = size;
66802 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
66803 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
66804 + m[0].units = size;
66805 + m[1].units = align;
66806 ret = (void *)m + align;
66807
66808 trace_kmalloc_node(_RET_IP_, ret,
66809 @@ -501,9 +506,9 @@ void *__kmalloc_node(size_t size, gfp_t
66810
66811 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
66812 if (ret) {
66813 - struct page *page;
66814 - page = virt_to_page(ret);
66815 - page->private = size;
66816 + struct slob_page *sp;
66817 + sp = slob_page(ret);
66818 + sp->size = size;
66819 }
66820
66821 trace_kmalloc_node(_RET_IP_, ret,
66822 @@ -513,6 +518,13 @@ void *__kmalloc_node(size_t size, gfp_t
66823 kmemleak_alloc(ret, size, 1, gfp);
66824 return ret;
66825 }
66826 +
66827 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
66828 +{
66829 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66830 +
66831 + return __kmalloc_node_align(size, gfp, node, align);
66832 +}
66833 EXPORT_SYMBOL(__kmalloc_node);
66834
66835 void kfree(const void *block)
66836 @@ -528,13 +540,81 @@ void kfree(const void *block)
66837 sp = slob_page(block);
66838 if (is_slob_page(sp)) {
66839 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66840 - unsigned int *m = (unsigned int *)(block - align);
66841 - slob_free(m, *m + align);
66842 - } else
66843 + slob_t *m = (slob_t *)(block - align);
66844 + slob_free(m, m[0].units + align);
66845 + } else {
66846 + clear_slob_page(sp);
66847 + free_slob_page(sp);
66848 + sp->size = 0;
66849 put_page(&sp->page);
66850 + }
66851 }
66852 EXPORT_SYMBOL(kfree);
66853
66854 +void check_object_size(const void *ptr, unsigned long n, bool to)
66855 +{
66856 +
66857 +#ifdef CONFIG_PAX_USERCOPY
66858 + struct slob_page *sp;
66859 + const slob_t *free;
66860 + const void *base;
66861 +
66862 + if (!n)
66863 + return;
66864 +
66865 + if (ZERO_OR_NULL_PTR(ptr))
66866 + goto report;
66867 +
66868 + if (!virt_addr_valid(ptr))
66869 + return;
66870 +
66871 + sp = slob_page(ptr);
66872 + if (!PageSlab((struct page*)sp)) {
66873 + if (object_is_on_stack(ptr, n) == -1)
66874 + goto report;
66875 + return;
66876 + }
66877 +
66878 + if (sp->size) {
66879 + base = page_address(&sp->page);
66880 + if (base <= ptr && n <= sp->size - (ptr - base))
66881 + return;
66882 + goto report;
66883 + }
66884 +
66885 + /* some tricky double walking to find the chunk */
66886 + base = (void *)((unsigned long)ptr & PAGE_MASK);
66887 + free = sp->free;
66888 +
66889 + while (!slob_last(free) && (void *)free <= ptr) {
66890 + base = free + slob_units(free);
66891 + free = slob_next(free);
66892 + }
66893 +
66894 + while (base < (void *)free) {
66895 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
66896 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
66897 + int offset;
66898 +
66899 + if (ptr < base + align)
66900 + goto report;
66901 +
66902 + offset = ptr - base - align;
66903 + if (offset < m) {
66904 + if (n <= m - offset)
66905 + return;
66906 + goto report;
66907 + }
66908 + base += size;
66909 + }
66910 +
66911 +report:
66912 + pax_report_usercopy(ptr, n, to, NULL);
66913 +#endif
66914 +
66915 +}
66916 +EXPORT_SYMBOL(check_object_size);
66917 +
66918 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
66919 size_t ksize(const void *block)
66920 {
66921 @@ -547,10 +627,10 @@ size_t ksize(const void *block)
66922 sp = slob_page(block);
66923 if (is_slob_page(sp)) {
66924 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66925 - unsigned int *m = (unsigned int *)(block - align);
66926 - return SLOB_UNITS(*m) * SLOB_UNIT;
66927 + slob_t *m = (slob_t *)(block - align);
66928 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
66929 } else
66930 - return sp->page.private;
66931 + return sp->size;
66932 }
66933 EXPORT_SYMBOL(ksize);
66934
66935 @@ -605,17 +685,25 @@ void *kmem_cache_alloc_node(struct kmem_
66936 {
66937 void *b;
66938
66939 +#ifdef CONFIG_PAX_USERCOPY
66940 + b = __kmalloc_node_align(c->size, flags, node, c->align);
66941 +#else
66942 if (c->size < PAGE_SIZE) {
66943 b = slob_alloc(c->size, flags, c->align, node);
66944 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
66945 SLOB_UNITS(c->size) * SLOB_UNIT,
66946 flags, node);
66947 } else {
66948 + struct slob_page *sp;
66949 +
66950 b = slob_new_pages(flags, get_order(c->size), node);
66951 + sp = slob_page(b);
66952 + sp->size = c->size;
66953 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
66954 PAGE_SIZE << get_order(c->size),
66955 flags, node);
66956 }
66957 +#endif
66958
66959 if (c->ctor)
66960 c->ctor(b);
66961 @@ -627,10 +715,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
66962
66963 static void __kmem_cache_free(void *b, int size)
66964 {
66965 - if (size < PAGE_SIZE)
66966 + struct slob_page *sp = slob_page(b);
66967 +
66968 + if (is_slob_page(sp))
66969 slob_free(b, size);
66970 - else
66971 + else {
66972 + clear_slob_page(sp);
66973 + free_slob_page(sp);
66974 + sp->size = 0;
66975 slob_free_pages(b, get_order(size));
66976 + }
66977 }
66978
66979 static void kmem_rcu_free(struct rcu_head *head)
66980 @@ -643,15 +737,24 @@ static void kmem_rcu_free(struct rcu_hea
66981
66982 void kmem_cache_free(struct kmem_cache *c, void *b)
66983 {
66984 + int size = c->size;
66985 +
66986 +#ifdef CONFIG_PAX_USERCOPY
66987 + if (size + c->align < PAGE_SIZE) {
66988 + size += c->align;
66989 + b -= c->align;
66990 + }
66991 +#endif
66992 +
66993 kmemleak_free_recursive(b, c->flags);
66994 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
66995 struct slob_rcu *slob_rcu;
66996 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
66997 + slob_rcu = b + (size - sizeof(struct slob_rcu));
66998 INIT_RCU_HEAD(&slob_rcu->head);
66999 - slob_rcu->size = c->size;
67000 + slob_rcu->size = size;
67001 call_rcu(&slob_rcu->head, kmem_rcu_free);
67002 } else {
67003 - __kmem_cache_free(b, c->size);
67004 + __kmem_cache_free(b, size);
67005 }
67006
67007 trace_kmem_cache_free(_RET_IP_, b);
67008 diff -urNp linux-2.6.32.42/mm/slub.c linux-2.6.32.42/mm/slub.c
67009 --- linux-2.6.32.42/mm/slub.c 2011-03-27 14:31:47.000000000 -0400
67010 +++ linux-2.6.32.42/mm/slub.c 2011-04-17 15:56:46.000000000 -0400
67011 @@ -410,7 +410,7 @@ static void print_track(const char *s, s
67012 if (!t->addr)
67013 return;
67014
67015 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
67016 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
67017 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
67018 }
67019
67020 @@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *
67021
67022 page = virt_to_head_page(x);
67023
67024 + BUG_ON(!PageSlab(page));
67025 +
67026 slab_free(s, page, x, _RET_IP_);
67027
67028 trace_kmem_cache_free(_RET_IP_, x);
67029 @@ -1937,7 +1939,7 @@ static int slub_min_objects;
67030 * Merge control. If this is set then no merging of slab caches will occur.
67031 * (Could be removed. This was introduced to pacify the merge skeptics.)
67032 */
67033 -static int slub_nomerge;
67034 +static int slub_nomerge = 1;
67035
67036 /*
67037 * Calculate the order of allocation given an slab object size.
67038 @@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_c
67039 * list to avoid pounding the page allocator excessively.
67040 */
67041 set_min_partial(s, ilog2(s->size));
67042 - s->refcount = 1;
67043 + atomic_set(&s->refcount, 1);
67044 #ifdef CONFIG_NUMA
67045 s->remote_node_defrag_ratio = 1000;
67046 #endif
67047 @@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struc
67048 void kmem_cache_destroy(struct kmem_cache *s)
67049 {
67050 down_write(&slub_lock);
67051 - s->refcount--;
67052 - if (!s->refcount) {
67053 + if (atomic_dec_and_test(&s->refcount)) {
67054 list_del(&s->list);
67055 up_write(&slub_lock);
67056 if (kmem_cache_close(s)) {
67057 @@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(cha
67058 __setup("slub_nomerge", setup_slub_nomerge);
67059
67060 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
67061 - const char *name, int size, gfp_t gfp_flags)
67062 + const char *name, int size, gfp_t gfp_flags, unsigned int flags)
67063 {
67064 - unsigned int flags = 0;
67065 -
67066 if (gfp_flags & SLUB_DMA)
67067 - flags = SLAB_CACHE_DMA;
67068 + flags |= SLAB_CACHE_DMA;
67069
67070 /*
67071 * This function is called with IRQs disabled during early-boot on
67072 @@ -2915,6 +2914,46 @@ void *__kmalloc_node(size_t size, gfp_t
67073 EXPORT_SYMBOL(__kmalloc_node);
67074 #endif
67075
67076 +void check_object_size(const void *ptr, unsigned long n, bool to)
67077 +{
67078 +
67079 +#ifdef CONFIG_PAX_USERCOPY
67080 + struct page *page;
67081 + struct kmem_cache *s = NULL;
67082 + unsigned long offset;
67083 +
67084 + if (!n)
67085 + return;
67086 +
67087 + if (ZERO_OR_NULL_PTR(ptr))
67088 + goto report;
67089 +
67090 + if (!virt_addr_valid(ptr))
67091 + return;
67092 +
67093 + page = get_object_page(ptr);
67094 +
67095 + if (!page) {
67096 + if (object_is_on_stack(ptr, n) == -1)
67097 + goto report;
67098 + return;
67099 + }
67100 +
67101 + s = page->slab;
67102 + if (!(s->flags & SLAB_USERCOPY))
67103 + goto report;
67104 +
67105 + offset = (ptr - page_address(page)) % s->size;
67106 + if (offset <= s->objsize && n <= s->objsize - offset)
67107 + return;
67108 +
67109 +report:
67110 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
67111 +#endif
67112 +
67113 +}
67114 +EXPORT_SYMBOL(check_object_size);
67115 +
67116 size_t ksize(const void *object)
67117 {
67118 struct page *page;
67119 @@ -3185,8 +3224,8 @@ void __init kmem_cache_init(void)
67120 * kmem_cache_open for slab_state == DOWN.
67121 */
67122 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
67123 - sizeof(struct kmem_cache_node), GFP_NOWAIT);
67124 - kmalloc_caches[0].refcount = -1;
67125 + sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
67126 + atomic_set(&kmalloc_caches[0].refcount, -1);
67127 caches++;
67128
67129 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
67130 @@ -3198,18 +3237,18 @@ void __init kmem_cache_init(void)
67131 /* Caches that are not of the two-to-the-power-of size */
67132 if (KMALLOC_MIN_SIZE <= 32) {
67133 create_kmalloc_cache(&kmalloc_caches[1],
67134 - "kmalloc-96", 96, GFP_NOWAIT);
67135 + "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
67136 caches++;
67137 }
67138 if (KMALLOC_MIN_SIZE <= 64) {
67139 create_kmalloc_cache(&kmalloc_caches[2],
67140 - "kmalloc-192", 192, GFP_NOWAIT);
67141 + "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
67142 caches++;
67143 }
67144
67145 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
67146 create_kmalloc_cache(&kmalloc_caches[i],
67147 - "kmalloc", 1 << i, GFP_NOWAIT);
67148 + "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
67149 caches++;
67150 }
67151
67152 @@ -3293,7 +3332,7 @@ static int slab_unmergeable(struct kmem_
67153 /*
67154 * We may have set a slab to be unmergeable during bootstrap.
67155 */
67156 - if (s->refcount < 0)
67157 + if (atomic_read(&s->refcount) < 0)
67158 return 1;
67159
67160 return 0;
67161 @@ -3353,7 +3392,7 @@ struct kmem_cache *kmem_cache_create(con
67162 if (s) {
67163 int cpu;
67164
67165 - s->refcount++;
67166 + atomic_inc(&s->refcount);
67167 /*
67168 * Adjust the object sizes so that we clear
67169 * the complete object on kzalloc.
67170 @@ -3372,7 +3411,7 @@ struct kmem_cache *kmem_cache_create(con
67171
67172 if (sysfs_slab_alias(s, name)) {
67173 down_write(&slub_lock);
67174 - s->refcount--;
67175 + atomic_dec(&s->refcount);
67176 up_write(&slub_lock);
67177 goto err;
67178 }
67179 @@ -4101,7 +4140,7 @@ SLAB_ATTR_RO(ctor);
67180
67181 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
67182 {
67183 - return sprintf(buf, "%d\n", s->refcount - 1);
67184 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
67185 }
67186 SLAB_ATTR_RO(aliases);
67187
67188 @@ -4503,7 +4542,7 @@ static void kmem_cache_release(struct ko
67189 kfree(s);
67190 }
67191
67192 -static struct sysfs_ops slab_sysfs_ops = {
67193 +static const struct sysfs_ops slab_sysfs_ops = {
67194 .show = slab_attr_show,
67195 .store = slab_attr_store,
67196 };
67197 @@ -4522,7 +4561,7 @@ static int uevent_filter(struct kset *ks
67198 return 0;
67199 }
67200
67201 -static struct kset_uevent_ops slab_uevent_ops = {
67202 +static const struct kset_uevent_ops slab_uevent_ops = {
67203 .filter = uevent_filter,
67204 };
67205
67206 @@ -4785,7 +4824,13 @@ static const struct file_operations proc
67207
67208 static int __init slab_proc_init(void)
67209 {
67210 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
67211 + mode_t gr_mode = S_IRUGO;
67212 +
67213 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
67214 + gr_mode = S_IRUSR;
67215 +#endif
67216 +
67217 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
67218 return 0;
67219 }
67220 module_init(slab_proc_init);
67221 diff -urNp linux-2.6.32.42/mm/util.c linux-2.6.32.42/mm/util.c
67222 --- linux-2.6.32.42/mm/util.c 2011-03-27 14:31:47.000000000 -0400
67223 +++ linux-2.6.32.42/mm/util.c 2011-04-17 15:56:46.000000000 -0400
67224 @@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
67225 void arch_pick_mmap_layout(struct mm_struct *mm)
67226 {
67227 mm->mmap_base = TASK_UNMAPPED_BASE;
67228 +
67229 +#ifdef CONFIG_PAX_RANDMMAP
67230 + if (mm->pax_flags & MF_PAX_RANDMMAP)
67231 + mm->mmap_base += mm->delta_mmap;
67232 +#endif
67233 +
67234 mm->get_unmapped_area = arch_get_unmapped_area;
67235 mm->unmap_area = arch_unmap_area;
67236 }
67237 diff -urNp linux-2.6.32.42/mm/vmalloc.c linux-2.6.32.42/mm/vmalloc.c
67238 --- linux-2.6.32.42/mm/vmalloc.c 2011-03-27 14:31:47.000000000 -0400
67239 +++ linux-2.6.32.42/mm/vmalloc.c 2011-04-17 15:56:46.000000000 -0400
67240 @@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
67241
67242 pte = pte_offset_kernel(pmd, addr);
67243 do {
67244 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
67245 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
67246 +
67247 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67248 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
67249 + BUG_ON(!pte_exec(*pte));
67250 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
67251 + continue;
67252 + }
67253 +#endif
67254 +
67255 + {
67256 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
67257 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
67258 + }
67259 } while (pte++, addr += PAGE_SIZE, addr != end);
67260 }
67261
67262 @@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
67263 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
67264 {
67265 pte_t *pte;
67266 + int ret = -ENOMEM;
67267
67268 /*
67269 * nr is a running index into the array which helps higher level
67270 @@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, un
67271 pte = pte_alloc_kernel(pmd, addr);
67272 if (!pte)
67273 return -ENOMEM;
67274 +
67275 + pax_open_kernel();
67276 do {
67277 struct page *page = pages[*nr];
67278
67279 - if (WARN_ON(!pte_none(*pte)))
67280 - return -EBUSY;
67281 - if (WARN_ON(!page))
67282 - return -ENOMEM;
67283 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67284 + if (!(pgprot_val(prot) & _PAGE_NX))
67285 + BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
67286 + else
67287 +#endif
67288 +
67289 + if (WARN_ON(!pte_none(*pte))) {
67290 + ret = -EBUSY;
67291 + goto out;
67292 + }
67293 + if (WARN_ON(!page)) {
67294 + ret = -ENOMEM;
67295 + goto out;
67296 + }
67297 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
67298 (*nr)++;
67299 } while (pte++, addr += PAGE_SIZE, addr != end);
67300 - return 0;
67301 + ret = 0;
67302 +out:
67303 + pax_close_kernel();
67304 + return ret;
67305 }
67306
67307 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
67308 @@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void
67309 * and fall back on vmalloc() if that fails. Others
67310 * just put it in the vmalloc space.
67311 */
67312 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
67313 +#ifdef CONFIG_MODULES
67314 +#ifdef MODULES_VADDR
67315 unsigned long addr = (unsigned long)x;
67316 if (addr >= MODULES_VADDR && addr < MODULES_END)
67317 return 1;
67318 #endif
67319 +
67320 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67321 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
67322 + return 1;
67323 +#endif
67324 +
67325 +#endif
67326 +
67327 return is_vmalloc_addr(x);
67328 }
67329
67330 @@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void
67331
67332 if (!pgd_none(*pgd)) {
67333 pud_t *pud = pud_offset(pgd, addr);
67334 +#ifdef CONFIG_X86
67335 + if (!pud_large(*pud))
67336 +#endif
67337 if (!pud_none(*pud)) {
67338 pmd_t *pmd = pmd_offset(pud, addr);
67339 +#ifdef CONFIG_X86
67340 + if (!pmd_large(*pmd))
67341 +#endif
67342 if (!pmd_none(*pmd)) {
67343 pte_t *ptep, pte;
67344
67345 @@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vm
67346 struct rb_node *tmp;
67347
67348 while (*p) {
67349 - struct vmap_area *tmp;
67350 + struct vmap_area *varea;
67351
67352 parent = *p;
67353 - tmp = rb_entry(parent, struct vmap_area, rb_node);
67354 - if (va->va_start < tmp->va_end)
67355 + varea = rb_entry(parent, struct vmap_area, rb_node);
67356 + if (va->va_start < varea->va_end)
67357 p = &(*p)->rb_left;
67358 - else if (va->va_end > tmp->va_start)
67359 + else if (va->va_end > varea->va_start)
67360 p = &(*p)->rb_right;
67361 else
67362 BUG();
67363 @@ -1232,6 +1274,16 @@ static struct vm_struct *__get_vm_area_n
67364 struct vm_struct *area;
67365
67366 BUG_ON(in_interrupt());
67367 +
67368 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67369 + if (flags & VM_KERNEXEC) {
67370 + if (start != VMALLOC_START || end != VMALLOC_END)
67371 + return NULL;
67372 + start = (unsigned long)MODULES_EXEC_VADDR;
67373 + end = (unsigned long)MODULES_EXEC_END;
67374 + }
67375 +#endif
67376 +
67377 if (flags & VM_IOREMAP) {
67378 int bit = fls(size);
67379
67380 @@ -1457,6 +1509,11 @@ void *vmap(struct page **pages, unsigned
67381 if (count > totalram_pages)
67382 return NULL;
67383
67384 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67385 + if (!(pgprot_val(prot) & _PAGE_NX))
67386 + flags |= VM_KERNEXEC;
67387 +#endif
67388 +
67389 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
67390 __builtin_return_address(0));
67391 if (!area)
67392 @@ -1567,6 +1624,13 @@ static void *__vmalloc_node(unsigned lon
67393 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
67394 return NULL;
67395
67396 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67397 + if (!(pgprot_val(prot) & _PAGE_NX))
67398 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
67399 + node, gfp_mask, caller);
67400 + else
67401 +#endif
67402 +
67403 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
67404 VMALLOC_END, node, gfp_mask, caller);
67405
67406 @@ -1585,6 +1649,7 @@ static void *__vmalloc_node(unsigned lon
67407 return addr;
67408 }
67409
67410 +#undef __vmalloc
67411 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
67412 {
67413 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
67414 @@ -1601,6 +1666,7 @@ EXPORT_SYMBOL(__vmalloc);
67415 * For tight control over page level allocator and protection flags
67416 * use __vmalloc() instead.
67417 */
67418 +#undef vmalloc
67419 void *vmalloc(unsigned long size)
67420 {
67421 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
67422 @@ -1615,6 +1681,7 @@ EXPORT_SYMBOL(vmalloc);
67423 * The resulting memory area is zeroed so it can be mapped to userspace
67424 * without leaking data.
67425 */
67426 +#undef vmalloc_user
67427 void *vmalloc_user(unsigned long size)
67428 {
67429 struct vm_struct *area;
67430 @@ -1642,6 +1709,7 @@ EXPORT_SYMBOL(vmalloc_user);
67431 * For tight control over page level allocator and protection flags
67432 * use __vmalloc() instead.
67433 */
67434 +#undef vmalloc_node
67435 void *vmalloc_node(unsigned long size, int node)
67436 {
67437 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
67438 @@ -1664,10 +1732,10 @@ EXPORT_SYMBOL(vmalloc_node);
67439 * For tight control over page level allocator and protection flags
67440 * use __vmalloc() instead.
67441 */
67442 -
67443 +#undef vmalloc_exec
67444 void *vmalloc_exec(unsigned long size)
67445 {
67446 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
67447 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
67448 -1, __builtin_return_address(0));
67449 }
67450
67451 @@ -1686,6 +1754,7 @@ void *vmalloc_exec(unsigned long size)
67452 * Allocate enough 32bit PA addressable pages to cover @size from the
67453 * page level allocator and map them into contiguous kernel virtual space.
67454 */
67455 +#undef vmalloc_32
67456 void *vmalloc_32(unsigned long size)
67457 {
67458 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
67459 @@ -1700,6 +1769,7 @@ EXPORT_SYMBOL(vmalloc_32);
67460 * The resulting memory area is 32bit addressable and zeroed so it can be
67461 * mapped to userspace without leaking data.
67462 */
67463 +#undef vmalloc_32_user
67464 void *vmalloc_32_user(unsigned long size)
67465 {
67466 struct vm_struct *area;
67467 @@ -1964,6 +2034,8 @@ int remap_vmalloc_range(struct vm_area_s
67468 unsigned long uaddr = vma->vm_start;
67469 unsigned long usize = vma->vm_end - vma->vm_start;
67470
67471 + BUG_ON(vma->vm_mirror);
67472 +
67473 if ((PAGE_SIZE-1) & (unsigned long)addr)
67474 return -EINVAL;
67475
67476 diff -urNp linux-2.6.32.42/mm/vmstat.c linux-2.6.32.42/mm/vmstat.c
67477 --- linux-2.6.32.42/mm/vmstat.c 2011-03-27 14:31:47.000000000 -0400
67478 +++ linux-2.6.32.42/mm/vmstat.c 2011-04-17 15:56:46.000000000 -0400
67479 @@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
67480 *
67481 * vm_stat contains the global counters
67482 */
67483 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67484 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67485 EXPORT_SYMBOL(vm_stat);
67486
67487 #ifdef CONFIG_SMP
67488 @@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
67489 v = p->vm_stat_diff[i];
67490 p->vm_stat_diff[i] = 0;
67491 local_irq_restore(flags);
67492 - atomic_long_add(v, &zone->vm_stat[i]);
67493 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
67494 global_diff[i] += v;
67495 #ifdef CONFIG_NUMA
67496 /* 3 seconds idle till flush */
67497 @@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
67498
67499 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
67500 if (global_diff[i])
67501 - atomic_long_add(global_diff[i], &vm_stat[i]);
67502 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
67503 }
67504
67505 #endif
67506 @@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
67507 start_cpu_timer(cpu);
67508 #endif
67509 #ifdef CONFIG_PROC_FS
67510 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
67511 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
67512 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
67513 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
67514 + {
67515 + mode_t gr_mode = S_IRUGO;
67516 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
67517 + gr_mode = S_IRUSR;
67518 +#endif
67519 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
67520 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
67521 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67522 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
67523 +#else
67524 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
67525 +#endif
67526 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
67527 + }
67528 #endif
67529 return 0;
67530 }
67531 diff -urNp linux-2.6.32.42/net/8021q/vlan.c linux-2.6.32.42/net/8021q/vlan.c
67532 --- linux-2.6.32.42/net/8021q/vlan.c 2011-03-27 14:31:47.000000000 -0400
67533 +++ linux-2.6.32.42/net/8021q/vlan.c 2011-04-17 15:56:46.000000000 -0400
67534 @@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net
67535 err = -EPERM;
67536 if (!capable(CAP_NET_ADMIN))
67537 break;
67538 - if ((args.u.name_type >= 0) &&
67539 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
67540 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
67541 struct vlan_net *vn;
67542
67543 vn = net_generic(net, vlan_net_id);
67544 diff -urNp linux-2.6.32.42/net/atm/atm_misc.c linux-2.6.32.42/net/atm/atm_misc.c
67545 --- linux-2.6.32.42/net/atm/atm_misc.c 2011-03-27 14:31:47.000000000 -0400
67546 +++ linux-2.6.32.42/net/atm/atm_misc.c 2011-04-17 15:56:46.000000000 -0400
67547 @@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t
67548 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
67549 return 1;
67550 atm_return(vcc,truesize);
67551 - atomic_inc(&vcc->stats->rx_drop);
67552 + atomic_inc_unchecked(&vcc->stats->rx_drop);
67553 return 0;
67554 }
67555
67556 @@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct
67557 }
67558 }
67559 atm_return(vcc,guess);
67560 - atomic_inc(&vcc->stats->rx_drop);
67561 + atomic_inc_unchecked(&vcc->stats->rx_drop);
67562 return NULL;
67563 }
67564
67565 @@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr
67566
67567 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
67568 {
67569 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
67570 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
67571 __SONET_ITEMS
67572 #undef __HANDLE_ITEM
67573 }
67574 @@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta
67575
67576 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
67577 {
67578 -#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
67579 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
67580 __SONET_ITEMS
67581 #undef __HANDLE_ITEM
67582 }
67583 diff -urNp linux-2.6.32.42/net/atm/mpoa_caches.c linux-2.6.32.42/net/atm/mpoa_caches.c
67584 --- linux-2.6.32.42/net/atm/mpoa_caches.c 2011-03-27 14:31:47.000000000 -0400
67585 +++ linux-2.6.32.42/net/atm/mpoa_caches.c 2011-05-16 21:46:57.000000000 -0400
67586 @@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_cl
67587 struct timeval now;
67588 struct k_message msg;
67589
67590 + pax_track_stack();
67591 +
67592 do_gettimeofday(&now);
67593
67594 write_lock_irq(&client->egress_lock);
67595 diff -urNp linux-2.6.32.42/net/atm/proc.c linux-2.6.32.42/net/atm/proc.c
67596 --- linux-2.6.32.42/net/atm/proc.c 2011-03-27 14:31:47.000000000 -0400
67597 +++ linux-2.6.32.42/net/atm/proc.c 2011-04-17 15:56:46.000000000 -0400
67598 @@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s
67599 const struct k_atm_aal_stats *stats)
67600 {
67601 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
67602 - atomic_read(&stats->tx),atomic_read(&stats->tx_err),
67603 - atomic_read(&stats->rx),atomic_read(&stats->rx_err),
67604 - atomic_read(&stats->rx_drop));
67605 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
67606 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
67607 + atomic_read_unchecked(&stats->rx_drop));
67608 }
67609
67610 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
67611 @@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *se
67612 {
67613 struct sock *sk = sk_atm(vcc);
67614
67615 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67616 + seq_printf(seq, "%p ", NULL);
67617 +#else
67618 seq_printf(seq, "%p ", vcc);
67619 +#endif
67620 +
67621 if (!vcc->dev)
67622 seq_printf(seq, "Unassigned ");
67623 else
67624 @@ -214,7 +219,11 @@ static void svc_info(struct seq_file *se
67625 {
67626 if (!vcc->dev)
67627 seq_printf(seq, sizeof(void *) == 4 ?
67628 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67629 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
67630 +#else
67631 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
67632 +#endif
67633 else
67634 seq_printf(seq, "%3d %3d %5d ",
67635 vcc->dev->number, vcc->vpi, vcc->vci);
67636 diff -urNp linux-2.6.32.42/net/atm/resources.c linux-2.6.32.42/net/atm/resources.c
67637 --- linux-2.6.32.42/net/atm/resources.c 2011-03-27 14:31:47.000000000 -0400
67638 +++ linux-2.6.32.42/net/atm/resources.c 2011-04-17 15:56:46.000000000 -0400
67639 @@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *
67640 static void copy_aal_stats(struct k_atm_aal_stats *from,
67641 struct atm_aal_stats *to)
67642 {
67643 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
67644 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
67645 __AAL_STAT_ITEMS
67646 #undef __HANDLE_ITEM
67647 }
67648 @@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_
67649 static void subtract_aal_stats(struct k_atm_aal_stats *from,
67650 struct atm_aal_stats *to)
67651 {
67652 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
67653 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
67654 __AAL_STAT_ITEMS
67655 #undef __HANDLE_ITEM
67656 }
67657 diff -urNp linux-2.6.32.42/net/bluetooth/l2cap.c linux-2.6.32.42/net/bluetooth/l2cap.c
67658 --- linux-2.6.32.42/net/bluetooth/l2cap.c 2011-03-27 14:31:47.000000000 -0400
67659 +++ linux-2.6.32.42/net/bluetooth/l2cap.c 2011-06-25 14:36:21.000000000 -0400
67660 @@ -1885,7 +1885,7 @@ static int l2cap_sock_getsockopt_old(str
67661 err = -ENOTCONN;
67662 break;
67663 }
67664 -
67665 + memset(&cinfo, 0, sizeof(cinfo));
67666 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
67667 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
67668
67669 @@ -2719,7 +2719,7 @@ static inline int l2cap_config_req(struc
67670
67671 /* Reject if config buffer is too small. */
67672 len = cmd_len - sizeof(*req);
67673 - if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
67674 + if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
67675 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
67676 l2cap_build_conf_rsp(sk, rsp,
67677 L2CAP_CONF_REJECT, flags), rsp);
67678 diff -urNp linux-2.6.32.42/net/bluetooth/rfcomm/sock.c linux-2.6.32.42/net/bluetooth/rfcomm/sock.c
67679 --- linux-2.6.32.42/net/bluetooth/rfcomm/sock.c 2011-03-27 14:31:47.000000000 -0400
67680 +++ linux-2.6.32.42/net/bluetooth/rfcomm/sock.c 2011-06-12 06:35:00.000000000 -0400
67681 @@ -878,6 +878,7 @@ static int rfcomm_sock_getsockopt_old(st
67682
67683 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
67684
67685 + memset(&cinfo, 0, sizeof(cinfo));
67686 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
67687 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
67688
67689 diff -urNp linux-2.6.32.42/net/bridge/br_private.h linux-2.6.32.42/net/bridge/br_private.h
67690 --- linux-2.6.32.42/net/bridge/br_private.h 2011-03-27 14:31:47.000000000 -0400
67691 +++ linux-2.6.32.42/net/bridge/br_private.h 2011-04-17 15:56:46.000000000 -0400
67692 @@ -254,7 +254,7 @@ extern void br_ifinfo_notify(int event,
67693
67694 #ifdef CONFIG_SYSFS
67695 /* br_sysfs_if.c */
67696 -extern struct sysfs_ops brport_sysfs_ops;
67697 +extern const struct sysfs_ops brport_sysfs_ops;
67698 extern int br_sysfs_addif(struct net_bridge_port *p);
67699
67700 /* br_sysfs_br.c */
67701 diff -urNp linux-2.6.32.42/net/bridge/br_stp_if.c linux-2.6.32.42/net/bridge/br_stp_if.c
67702 --- linux-2.6.32.42/net/bridge/br_stp_if.c 2011-03-27 14:31:47.000000000 -0400
67703 +++ linux-2.6.32.42/net/bridge/br_stp_if.c 2011-04-17 15:56:46.000000000 -0400
67704 @@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridg
67705 char *envp[] = { NULL };
67706
67707 if (br->stp_enabled == BR_USER_STP) {
67708 - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
67709 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
67710 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
67711 br->dev->name, r);
67712
67713 diff -urNp linux-2.6.32.42/net/bridge/br_sysfs_if.c linux-2.6.32.42/net/bridge/br_sysfs_if.c
67714 --- linux-2.6.32.42/net/bridge/br_sysfs_if.c 2011-03-27 14:31:47.000000000 -0400
67715 +++ linux-2.6.32.42/net/bridge/br_sysfs_if.c 2011-04-17 15:56:46.000000000 -0400
67716 @@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobje
67717 return ret;
67718 }
67719
67720 -struct sysfs_ops brport_sysfs_ops = {
67721 +const struct sysfs_ops brport_sysfs_ops = {
67722 .show = brport_show,
67723 .store = brport_store,
67724 };
67725 diff -urNp linux-2.6.32.42/net/bridge/netfilter/ebtables.c linux-2.6.32.42/net/bridge/netfilter/ebtables.c
67726 --- linux-2.6.32.42/net/bridge/netfilter/ebtables.c 2011-04-17 17:00:52.000000000 -0400
67727 +++ linux-2.6.32.42/net/bridge/netfilter/ebtables.c 2011-05-16 21:46:57.000000000 -0400
67728 @@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struc
67729 unsigned int entries_size, nentries;
67730 char *entries;
67731
67732 + pax_track_stack();
67733 +
67734 if (cmd == EBT_SO_GET_ENTRIES) {
67735 entries_size = t->private->entries_size;
67736 nentries = t->private->nentries;
67737 diff -urNp linux-2.6.32.42/net/can/bcm.c linux-2.6.32.42/net/can/bcm.c
67738 --- linux-2.6.32.42/net/can/bcm.c 2011-05-10 22:12:01.000000000 -0400
67739 +++ linux-2.6.32.42/net/can/bcm.c 2011-05-10 22:12:34.000000000 -0400
67740 @@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file
67741 struct bcm_sock *bo = bcm_sk(sk);
67742 struct bcm_op *op;
67743
67744 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67745 + seq_printf(m, ">>> socket %p", NULL);
67746 + seq_printf(m, " / sk %p", NULL);
67747 + seq_printf(m, " / bo %p", NULL);
67748 +#else
67749 seq_printf(m, ">>> socket %p", sk->sk_socket);
67750 seq_printf(m, " / sk %p", sk);
67751 seq_printf(m, " / bo %p", bo);
67752 +#endif
67753 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
67754 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
67755 seq_printf(m, " <<<\n");
67756 diff -urNp linux-2.6.32.42/net/core/dev.c linux-2.6.32.42/net/core/dev.c
67757 --- linux-2.6.32.42/net/core/dev.c 2011-04-17 17:00:52.000000000 -0400
67758 +++ linux-2.6.32.42/net/core/dev.c 2011-04-17 17:04:18.000000000 -0400
67759 @@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const cha
67760 if (no_module && capable(CAP_NET_ADMIN))
67761 no_module = request_module("netdev-%s", name);
67762 if (no_module && capable(CAP_SYS_MODULE)) {
67763 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67764 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
67765 +#else
67766 if (!request_module("%s", name))
67767 pr_err("Loading kernel module for a network device "
67768 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
67769 "instead\n", name);
67770 +#endif
67771 }
67772 }
67773 EXPORT_SYMBOL(dev_load);
67774 @@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
67775 }
67776 EXPORT_SYMBOL(netif_rx_ni);
67777
67778 -static void net_tx_action(struct softirq_action *h)
67779 +static void net_tx_action(void)
67780 {
67781 struct softnet_data *sd = &__get_cpu_var(softnet_data);
67782
67783 @@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *
67784 EXPORT_SYMBOL(netif_napi_del);
67785
67786
67787 -static void net_rx_action(struct softirq_action *h)
67788 +static void net_rx_action(void)
67789 {
67790 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
67791 unsigned long time_limit = jiffies + 2;
67792 diff -urNp linux-2.6.32.42/net/core/flow.c linux-2.6.32.42/net/core/flow.c
67793 --- linux-2.6.32.42/net/core/flow.c 2011-03-27 14:31:47.000000000 -0400
67794 +++ linux-2.6.32.42/net/core/flow.c 2011-05-04 17:56:20.000000000 -0400
67795 @@ -35,11 +35,11 @@ struct flow_cache_entry {
67796 atomic_t *object_ref;
67797 };
67798
67799 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
67800 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
67801
67802 static u32 flow_hash_shift;
67803 #define flow_hash_size (1 << flow_hash_shift)
67804 -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
67805 +static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
67806
67807 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
67808
67809 @@ -52,7 +52,7 @@ struct flow_percpu_info {
67810 u32 hash_rnd;
67811 int count;
67812 };
67813 -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
67814 +static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
67815
67816 #define flow_hash_rnd_recalc(cpu) \
67817 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
67818 @@ -69,7 +69,7 @@ struct flow_flush_info {
67819 atomic_t cpuleft;
67820 struct completion completion;
67821 };
67822 -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
67823 +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
67824
67825 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
67826
67827 @@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net,
67828 if (fle->family == family &&
67829 fle->dir == dir &&
67830 flow_key_compare(key, &fle->key) == 0) {
67831 - if (fle->genid == atomic_read(&flow_cache_genid)) {
67832 + if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
67833 void *ret = fle->object;
67834
67835 if (ret)
67836 @@ -228,7 +228,7 @@ nocache:
67837 err = resolver(net, key, family, dir, &obj, &obj_ref);
67838
67839 if (fle && !err) {
67840 - fle->genid = atomic_read(&flow_cache_genid);
67841 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
67842
67843 if (fle->object)
67844 atomic_dec(fle->object_ref);
67845 @@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(uns
67846
67847 fle = flow_table(cpu)[i];
67848 for (; fle; fle = fle->next) {
67849 - unsigned genid = atomic_read(&flow_cache_genid);
67850 + unsigned genid = atomic_read_unchecked(&flow_cache_genid);
67851
67852 if (!fle->object || fle->genid == genid)
67853 continue;
67854 diff -urNp linux-2.6.32.42/net/core/skbuff.c linux-2.6.32.42/net/core/skbuff.c
67855 --- linux-2.6.32.42/net/core/skbuff.c 2011-03-27 14:31:47.000000000 -0400
67856 +++ linux-2.6.32.42/net/core/skbuff.c 2011-05-16 21:46:57.000000000 -0400
67857 @@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb,
67858 struct sk_buff *frag_iter;
67859 struct sock *sk = skb->sk;
67860
67861 + pax_track_stack();
67862 +
67863 /*
67864 * __skb_splice_bits() only fails if the output has no room left,
67865 * so no point in going over the frag_list for the error case.
67866 diff -urNp linux-2.6.32.42/net/core/sock.c linux-2.6.32.42/net/core/sock.c
67867 --- linux-2.6.32.42/net/core/sock.c 2011-03-27 14:31:47.000000000 -0400
67868 +++ linux-2.6.32.42/net/core/sock.c 2011-05-04 17:56:20.000000000 -0400
67869 @@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock,
67870 break;
67871
67872 case SO_PEERCRED:
67873 + {
67874 + struct ucred peercred;
67875 if (len > sizeof(sk->sk_peercred))
67876 len = sizeof(sk->sk_peercred);
67877 - if (copy_to_user(optval, &sk->sk_peercred, len))
67878 + peercred = sk->sk_peercred;
67879 + if (copy_to_user(optval, &peercred, len))
67880 return -EFAULT;
67881 goto lenout;
67882 + }
67883
67884 case SO_PEERNAME:
67885 {
67886 @@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock,
67887 */
67888 smp_wmb();
67889 atomic_set(&sk->sk_refcnt, 1);
67890 - atomic_set(&sk->sk_drops, 0);
67891 + atomic_set_unchecked(&sk->sk_drops, 0);
67892 }
67893 EXPORT_SYMBOL(sock_init_data);
67894
67895 diff -urNp linux-2.6.32.42/net/decnet/sysctl_net_decnet.c linux-2.6.32.42/net/decnet/sysctl_net_decnet.c
67896 --- linux-2.6.32.42/net/decnet/sysctl_net_decnet.c 2011-03-27 14:31:47.000000000 -0400
67897 +++ linux-2.6.32.42/net/decnet/sysctl_net_decnet.c 2011-04-17 15:56:46.000000000 -0400
67898 @@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_t
67899
67900 if (len > *lenp) len = *lenp;
67901
67902 - if (copy_to_user(buffer, addr, len))
67903 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
67904 return -EFAULT;
67905
67906 *lenp = len;
67907 @@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table
67908
67909 if (len > *lenp) len = *lenp;
67910
67911 - if (copy_to_user(buffer, devname, len))
67912 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
67913 return -EFAULT;
67914
67915 *lenp = len;
67916 diff -urNp linux-2.6.32.42/net/econet/Kconfig linux-2.6.32.42/net/econet/Kconfig
67917 --- linux-2.6.32.42/net/econet/Kconfig 2011-03-27 14:31:47.000000000 -0400
67918 +++ linux-2.6.32.42/net/econet/Kconfig 2011-04-17 15:56:46.000000000 -0400
67919 @@ -4,7 +4,7 @@
67920
67921 config ECONET
67922 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
67923 - depends on EXPERIMENTAL && INET
67924 + depends on EXPERIMENTAL && INET && BROKEN
67925 ---help---
67926 Econet is a fairly old and slow networking protocol mainly used by
67927 Acorn computers to access file and print servers. It uses native
67928 diff -urNp linux-2.6.32.42/net/ieee802154/dgram.c linux-2.6.32.42/net/ieee802154/dgram.c
67929 --- linux-2.6.32.42/net/ieee802154/dgram.c 2011-03-27 14:31:47.000000000 -0400
67930 +++ linux-2.6.32.42/net/ieee802154/dgram.c 2011-05-04 17:56:28.000000000 -0400
67931 @@ -318,7 +318,7 @@ out:
67932 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
67933 {
67934 if (sock_queue_rcv_skb(sk, skb) < 0) {
67935 - atomic_inc(&sk->sk_drops);
67936 + atomic_inc_unchecked(&sk->sk_drops);
67937 kfree_skb(skb);
67938 return NET_RX_DROP;
67939 }
67940 diff -urNp linux-2.6.32.42/net/ieee802154/raw.c linux-2.6.32.42/net/ieee802154/raw.c
67941 --- linux-2.6.32.42/net/ieee802154/raw.c 2011-03-27 14:31:47.000000000 -0400
67942 +++ linux-2.6.32.42/net/ieee802154/raw.c 2011-05-04 17:56:28.000000000 -0400
67943 @@ -206,7 +206,7 @@ out:
67944 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
67945 {
67946 if (sock_queue_rcv_skb(sk, skb) < 0) {
67947 - atomic_inc(&sk->sk_drops);
67948 + atomic_inc_unchecked(&sk->sk_drops);
67949 kfree_skb(skb);
67950 return NET_RX_DROP;
67951 }
67952 diff -urNp linux-2.6.32.42/net/ipv4/inet_diag.c linux-2.6.32.42/net/ipv4/inet_diag.c
67953 --- linux-2.6.32.42/net/ipv4/inet_diag.c 2011-04-17 17:00:52.000000000 -0400
67954 +++ linux-2.6.32.42/net/ipv4/inet_diag.c 2011-06-20 19:31:13.000000000 -0400
67955 @@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct soc
67956 r->idiag_retrans = 0;
67957
67958 r->id.idiag_if = sk->sk_bound_dev_if;
67959 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67960 + r->id.idiag_cookie[0] = 0;
67961 + r->id.idiag_cookie[1] = 0;
67962 +#else
67963 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
67964 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
67965 +#endif
67966
67967 r->id.idiag_sport = inet->sport;
67968 r->id.idiag_dport = inet->dport;
67969 @@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct in
67970 r->idiag_family = tw->tw_family;
67971 r->idiag_retrans = 0;
67972 r->id.idiag_if = tw->tw_bound_dev_if;
67973 +
67974 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67975 + r->id.idiag_cookie[0] = 0;
67976 + r->id.idiag_cookie[1] = 0;
67977 +#else
67978 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
67979 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
67980 +#endif
67981 +
67982 r->id.idiag_sport = tw->tw_sport;
67983 r->id.idiag_dport = tw->tw_dport;
67984 r->id.idiag_src[0] = tw->tw_rcv_saddr;
67985 @@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk
67986 if (sk == NULL)
67987 goto unlock;
67988
67989 +#ifndef CONFIG_GRKERNSEC_HIDESYM
67990 err = -ESTALE;
67991 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
67992 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
67993 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
67994 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
67995 goto out;
67996 +#endif
67997
67998 err = -ENOMEM;
67999 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
68000 @@ -436,7 +450,7 @@ static int valid_cc(const void *bc, int
68001 return 0;
68002 if (cc == len)
68003 return 1;
68004 - if (op->yes < 4)
68005 + if (op->yes < 4 || op->yes & 3)
68006 return 0;
68007 len -= op->yes;
68008 bc += op->yes;
68009 @@ -446,11 +460,11 @@ static int valid_cc(const void *bc, int
68010
68011 static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
68012 {
68013 - const unsigned char *bc = bytecode;
68014 + const void *bc = bytecode;
68015 int len = bytecode_len;
68016
68017 while (len > 0) {
68018 - struct inet_diag_bc_op *op = (struct inet_diag_bc_op *)bc;
68019 + const struct inet_diag_bc_op *op = bc;
68020
68021 //printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
68022 switch (op->code) {
68023 @@ -461,22 +475,20 @@ static int inet_diag_bc_audit(const void
68024 case INET_DIAG_BC_S_LE:
68025 case INET_DIAG_BC_D_GE:
68026 case INET_DIAG_BC_D_LE:
68027 - if (op->yes < 4 || op->yes > len + 4)
68028 - return -EINVAL;
68029 case INET_DIAG_BC_JMP:
68030 - if (op->no < 4 || op->no > len + 4)
68031 + if (op->no < 4 || op->no > len + 4 || op->no & 3)
68032 return -EINVAL;
68033 if (op->no < len &&
68034 !valid_cc(bytecode, bytecode_len, len - op->no))
68035 return -EINVAL;
68036 break;
68037 case INET_DIAG_BC_NOP:
68038 - if (op->yes < 4 || op->yes > len + 4)
68039 - return -EINVAL;
68040 break;
68041 default:
68042 return -EINVAL;
68043 }
68044 + if (op->yes < 4 || op->yes > len + 4 || op->yes & 3)
68045 + return -EINVAL;
68046 bc += op->yes;
68047 len -= op->yes;
68048 }
68049 @@ -581,8 +593,14 @@ static int inet_diag_fill_req(struct sk_
68050 r->idiag_retrans = req->retrans;
68051
68052 r->id.idiag_if = sk->sk_bound_dev_if;
68053 +
68054 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68055 + r->id.idiag_cookie[0] = 0;
68056 + r->id.idiag_cookie[1] = 0;
68057 +#else
68058 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
68059 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
68060 +#endif
68061
68062 tmo = req->expires - jiffies;
68063 if (tmo < 0)
68064 diff -urNp linux-2.6.32.42/net/ipv4/inet_hashtables.c linux-2.6.32.42/net/ipv4/inet_hashtables.c
68065 --- linux-2.6.32.42/net/ipv4/inet_hashtables.c 2011-03-27 14:31:47.000000000 -0400
68066 +++ linux-2.6.32.42/net/ipv4/inet_hashtables.c 2011-04-17 15:56:46.000000000 -0400
68067 @@ -18,11 +18,14 @@
68068 #include <linux/sched.h>
68069 #include <linux/slab.h>
68070 #include <linux/wait.h>
68071 +#include <linux/security.h>
68072
68073 #include <net/inet_connection_sock.h>
68074 #include <net/inet_hashtables.h>
68075 #include <net/ip.h>
68076
68077 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
68078 +
68079 /*
68080 * Allocate and initialize a new local port bind bucket.
68081 * The bindhash mutex for snum's hash chain must be held here.
68082 @@ -490,6 +493,8 @@ ok:
68083 }
68084 spin_unlock(&head->lock);
68085
68086 + gr_update_task_in_ip_table(current, inet_sk(sk));
68087 +
68088 if (tw) {
68089 inet_twsk_deschedule(tw, death_row);
68090 inet_twsk_put(tw);
68091 diff -urNp linux-2.6.32.42/net/ipv4/inetpeer.c linux-2.6.32.42/net/ipv4/inetpeer.c
68092 --- linux-2.6.32.42/net/ipv4/inetpeer.c 2011-03-27 14:31:47.000000000 -0400
68093 +++ linux-2.6.32.42/net/ipv4/inetpeer.c 2011-05-16 21:46:57.000000000 -0400
68094 @@ -366,6 +366,8 @@ struct inet_peer *inet_getpeer(__be32 da
68095 struct inet_peer *p, *n;
68096 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
68097
68098 + pax_track_stack();
68099 +
68100 /* Look up for the address quickly. */
68101 read_lock_bh(&peer_pool_lock);
68102 p = lookup(daddr, NULL);
68103 @@ -389,7 +391,7 @@ struct inet_peer *inet_getpeer(__be32 da
68104 return NULL;
68105 n->v4daddr = daddr;
68106 atomic_set(&n->refcnt, 1);
68107 - atomic_set(&n->rid, 0);
68108 + atomic_set_unchecked(&n->rid, 0);
68109 n->ip_id_count = secure_ip_id(daddr);
68110 n->tcp_ts_stamp = 0;
68111
68112 diff -urNp linux-2.6.32.42/net/ipv4/ip_fragment.c linux-2.6.32.42/net/ipv4/ip_fragment.c
68113 --- linux-2.6.32.42/net/ipv4/ip_fragment.c 2011-03-27 14:31:47.000000000 -0400
68114 +++ linux-2.6.32.42/net/ipv4/ip_fragment.c 2011-04-17 15:56:46.000000000 -0400
68115 @@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct
68116 return 0;
68117
68118 start = qp->rid;
68119 - end = atomic_inc_return(&peer->rid);
68120 + end = atomic_inc_return_unchecked(&peer->rid);
68121 qp->rid = end;
68122
68123 rc = qp->q.fragments && (end - start) > max;
68124 diff -urNp linux-2.6.32.42/net/ipv4/ip_sockglue.c linux-2.6.32.42/net/ipv4/ip_sockglue.c
68125 --- linux-2.6.32.42/net/ipv4/ip_sockglue.c 2011-03-27 14:31:47.000000000 -0400
68126 +++ linux-2.6.32.42/net/ipv4/ip_sockglue.c 2011-05-16 21:46:57.000000000 -0400
68127 @@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock
68128 int val;
68129 int len;
68130
68131 + pax_track_stack();
68132 +
68133 if (level != SOL_IP)
68134 return -EOPNOTSUPP;
68135
68136 diff -urNp linux-2.6.32.42/net/ipv4/netfilter/arp_tables.c linux-2.6.32.42/net/ipv4/netfilter/arp_tables.c
68137 --- linux-2.6.32.42/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:00:52.000000000 -0400
68138 +++ linux-2.6.32.42/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:04:18.000000000 -0400
68139 @@ -934,6 +934,7 @@ static int get_info(struct net *net, voi
68140 private = &tmp;
68141 }
68142 #endif
68143 + memset(&info, 0, sizeof(info));
68144 info.valid_hooks = t->valid_hooks;
68145 memcpy(info.hook_entry, private->hook_entry,
68146 sizeof(info.hook_entry));
68147 diff -urNp linux-2.6.32.42/net/ipv4/netfilter/ip_tables.c linux-2.6.32.42/net/ipv4/netfilter/ip_tables.c
68148 --- linux-2.6.32.42/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:00:52.000000000 -0400
68149 +++ linux-2.6.32.42/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:04:18.000000000 -0400
68150 @@ -1141,6 +1141,7 @@ static int get_info(struct net *net, voi
68151 private = &tmp;
68152 }
68153 #endif
68154 + memset(&info, 0, sizeof(info));
68155 info.valid_hooks = t->valid_hooks;
68156 memcpy(info.hook_entry, private->hook_entry,
68157 sizeof(info.hook_entry));
68158 diff -urNp linux-2.6.32.42/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.32.42/net/ipv4/netfilter/nf_nat_snmp_basic.c
68159 --- linux-2.6.32.42/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-03-27 14:31:47.000000000 -0400
68160 +++ linux-2.6.32.42/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-04-17 15:56:46.000000000 -0400
68161 @@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(
68162
68163 *len = 0;
68164
68165 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
68166 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
68167 if (*octets == NULL) {
68168 if (net_ratelimit())
68169 printk("OOM in bsalg (%d)\n", __LINE__);
68170 diff -urNp linux-2.6.32.42/net/ipv4/raw.c linux-2.6.32.42/net/ipv4/raw.c
68171 --- linux-2.6.32.42/net/ipv4/raw.c 2011-03-27 14:31:47.000000000 -0400
68172 +++ linux-2.6.32.42/net/ipv4/raw.c 2011-05-04 17:59:08.000000000 -0400
68173 @@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk,
68174 /* Charge it to the socket. */
68175
68176 if (sock_queue_rcv_skb(sk, skb) < 0) {
68177 - atomic_inc(&sk->sk_drops);
68178 + atomic_inc_unchecked(&sk->sk_drops);
68179 kfree_skb(skb);
68180 return NET_RX_DROP;
68181 }
68182 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk,
68183 int raw_rcv(struct sock *sk, struct sk_buff *skb)
68184 {
68185 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
68186 - atomic_inc(&sk->sk_drops);
68187 + atomic_inc_unchecked(&sk->sk_drops);
68188 kfree_skb(skb);
68189 return NET_RX_DROP;
68190 }
68191 @@ -724,15 +724,22 @@ static int raw_init(struct sock *sk)
68192
68193 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
68194 {
68195 + struct icmp_filter filter;
68196 +
68197 + if (optlen < 0)
68198 + return -EINVAL;
68199 if (optlen > sizeof(struct icmp_filter))
68200 optlen = sizeof(struct icmp_filter);
68201 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
68202 + if (copy_from_user(&filter, optval, optlen))
68203 return -EFAULT;
68204 + memcpy(&raw_sk(sk)->filter, &filter, optlen);
68205 +
68206 return 0;
68207 }
68208
68209 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
68210 {
68211 + struct icmp_filter filter;
68212 int len, ret = -EFAULT;
68213
68214 if (get_user(len, optlen))
68215 @@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock
68216 if (len > sizeof(struct icmp_filter))
68217 len = sizeof(struct icmp_filter);
68218 ret = -EFAULT;
68219 + memcpy(&filter, &raw_sk(sk)->filter, len);
68220 if (put_user(len, optlen) ||
68221 - copy_to_user(optval, &raw_sk(sk)->filter, len))
68222 + copy_to_user(optval, &filter, len))
68223 goto out;
68224 ret = 0;
68225 out: return ret;
68226 @@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq
68227 sk_wmem_alloc_get(sp),
68228 sk_rmem_alloc_get(sp),
68229 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
68230 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
68231 + atomic_read(&sp->sk_refcnt),
68232 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68233 + NULL,
68234 +#else
68235 + sp,
68236 +#endif
68237 + atomic_read_unchecked(&sp->sk_drops));
68238 }
68239
68240 static int raw_seq_show(struct seq_file *seq, void *v)
68241 diff -urNp linux-2.6.32.42/net/ipv4/route.c linux-2.6.32.42/net/ipv4/route.c
68242 --- linux-2.6.32.42/net/ipv4/route.c 2011-03-27 14:31:47.000000000 -0400
68243 +++ linux-2.6.32.42/net/ipv4/route.c 2011-05-04 17:56:28.000000000 -0400
68244 @@ -268,7 +268,7 @@ static inline unsigned int rt_hash(__be3
68245
68246 static inline int rt_genid(struct net *net)
68247 {
68248 - return atomic_read(&net->ipv4.rt_genid);
68249 + return atomic_read_unchecked(&net->ipv4.rt_genid);
68250 }
68251
68252 #ifdef CONFIG_PROC_FS
68253 @@ -888,7 +888,7 @@ static void rt_cache_invalidate(struct n
68254 unsigned char shuffle;
68255
68256 get_random_bytes(&shuffle, sizeof(shuffle));
68257 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
68258 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
68259 }
68260
68261 /*
68262 @@ -3356,7 +3356,7 @@ static __net_initdata struct pernet_oper
68263
68264 static __net_init int rt_secret_timer_init(struct net *net)
68265 {
68266 - atomic_set(&net->ipv4.rt_genid,
68267 + atomic_set_unchecked(&net->ipv4.rt_genid,
68268 (int) ((num_physpages ^ (num_physpages>>8)) ^
68269 (jiffies ^ (jiffies >> 7))));
68270
68271 diff -urNp linux-2.6.32.42/net/ipv4/tcp.c linux-2.6.32.42/net/ipv4/tcp.c
68272 --- linux-2.6.32.42/net/ipv4/tcp.c 2011-03-27 14:31:47.000000000 -0400
68273 +++ linux-2.6.32.42/net/ipv4/tcp.c 2011-05-16 21:46:57.000000000 -0400
68274 @@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock
68275 int val;
68276 int err = 0;
68277
68278 + pax_track_stack();
68279 +
68280 /* This is a string value all the others are int's */
68281 if (optname == TCP_CONGESTION) {
68282 char name[TCP_CA_NAME_MAX];
68283 @@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock
68284 struct tcp_sock *tp = tcp_sk(sk);
68285 int val, len;
68286
68287 + pax_track_stack();
68288 +
68289 if (get_user(len, optlen))
68290 return -EFAULT;
68291
68292 diff -urNp linux-2.6.32.42/net/ipv4/tcp_ipv4.c linux-2.6.32.42/net/ipv4/tcp_ipv4.c
68293 --- linux-2.6.32.42/net/ipv4/tcp_ipv4.c 2011-03-27 14:31:47.000000000 -0400
68294 +++ linux-2.6.32.42/net/ipv4/tcp_ipv4.c 2011-04-17 15:56:46.000000000 -0400
68295 @@ -84,6 +84,9 @@
68296 int sysctl_tcp_tw_reuse __read_mostly;
68297 int sysctl_tcp_low_latency __read_mostly;
68298
68299 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68300 +extern int grsec_enable_blackhole;
68301 +#endif
68302
68303 #ifdef CONFIG_TCP_MD5SIG
68304 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
68305 @@ -1542,6 +1545,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
68306 return 0;
68307
68308 reset:
68309 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68310 + if (!grsec_enable_blackhole)
68311 +#endif
68312 tcp_v4_send_reset(rsk, skb);
68313 discard:
68314 kfree_skb(skb);
68315 @@ -1603,12 +1609,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
68316 TCP_SKB_CB(skb)->sacked = 0;
68317
68318 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
68319 - if (!sk)
68320 + if (!sk) {
68321 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68322 + ret = 1;
68323 +#endif
68324 goto no_tcp_socket;
68325 + }
68326
68327 process:
68328 - if (sk->sk_state == TCP_TIME_WAIT)
68329 + if (sk->sk_state == TCP_TIME_WAIT) {
68330 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68331 + ret = 2;
68332 +#endif
68333 goto do_time_wait;
68334 + }
68335
68336 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
68337 goto discard_and_relse;
68338 @@ -1650,6 +1664,10 @@ no_tcp_socket:
68339 bad_packet:
68340 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
68341 } else {
68342 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68343 + if (!grsec_enable_blackhole || (ret == 1 &&
68344 + (skb->dev->flags & IFF_LOOPBACK)))
68345 +#endif
68346 tcp_v4_send_reset(NULL, skb);
68347 }
68348
68349 @@ -2237,7 +2255,11 @@ static void get_openreq4(struct sock *sk
68350 0, /* non standard timer */
68351 0, /* open_requests have no inode */
68352 atomic_read(&sk->sk_refcnt),
68353 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68354 + NULL,
68355 +#else
68356 req,
68357 +#endif
68358 len);
68359 }
68360
68361 @@ -2279,7 +2301,12 @@ static void get_tcp4_sock(struct sock *s
68362 sock_i_uid(sk),
68363 icsk->icsk_probes_out,
68364 sock_i_ino(sk),
68365 - atomic_read(&sk->sk_refcnt), sk,
68366 + atomic_read(&sk->sk_refcnt),
68367 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68368 + NULL,
68369 +#else
68370 + sk,
68371 +#endif
68372 jiffies_to_clock_t(icsk->icsk_rto),
68373 jiffies_to_clock_t(icsk->icsk_ack.ato),
68374 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
68375 @@ -2307,7 +2334,13 @@ static void get_timewait4_sock(struct in
68376 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
68377 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
68378 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
68379 - atomic_read(&tw->tw_refcnt), tw, len);
68380 + atomic_read(&tw->tw_refcnt),
68381 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68382 + NULL,
68383 +#else
68384 + tw,
68385 +#endif
68386 + len);
68387 }
68388
68389 #define TMPSZ 150
68390 diff -urNp linux-2.6.32.42/net/ipv4/tcp_minisocks.c linux-2.6.32.42/net/ipv4/tcp_minisocks.c
68391 --- linux-2.6.32.42/net/ipv4/tcp_minisocks.c 2011-03-27 14:31:47.000000000 -0400
68392 +++ linux-2.6.32.42/net/ipv4/tcp_minisocks.c 2011-04-17 15:56:46.000000000 -0400
68393 @@ -26,6 +26,10 @@
68394 #include <net/inet_common.h>
68395 #include <net/xfrm.h>
68396
68397 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68398 +extern int grsec_enable_blackhole;
68399 +#endif
68400 +
68401 #ifdef CONFIG_SYSCTL
68402 #define SYNC_INIT 0 /* let the user enable it */
68403 #else
68404 @@ -672,6 +676,10 @@ listen_overflow:
68405
68406 embryonic_reset:
68407 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
68408 +
68409 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68410 + if (!grsec_enable_blackhole)
68411 +#endif
68412 if (!(flg & TCP_FLAG_RST))
68413 req->rsk_ops->send_reset(sk, skb);
68414
68415 diff -urNp linux-2.6.32.42/net/ipv4/tcp_output.c linux-2.6.32.42/net/ipv4/tcp_output.c
68416 --- linux-2.6.32.42/net/ipv4/tcp_output.c 2011-03-27 14:31:47.000000000 -0400
68417 +++ linux-2.6.32.42/net/ipv4/tcp_output.c 2011-05-16 21:46:57.000000000 -0400
68418 @@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct s
68419 __u8 *md5_hash_location;
68420 int mss;
68421
68422 + pax_track_stack();
68423 +
68424 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
68425 if (skb == NULL)
68426 return NULL;
68427 diff -urNp linux-2.6.32.42/net/ipv4/tcp_probe.c linux-2.6.32.42/net/ipv4/tcp_probe.c
68428 --- linux-2.6.32.42/net/ipv4/tcp_probe.c 2011-03-27 14:31:47.000000000 -0400
68429 +++ linux-2.6.32.42/net/ipv4/tcp_probe.c 2011-04-17 15:56:46.000000000 -0400
68430 @@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file
68431 if (cnt + width >= len)
68432 break;
68433
68434 - if (copy_to_user(buf + cnt, tbuf, width))
68435 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
68436 return -EFAULT;
68437 cnt += width;
68438 }
68439 diff -urNp linux-2.6.32.42/net/ipv4/tcp_timer.c linux-2.6.32.42/net/ipv4/tcp_timer.c
68440 --- linux-2.6.32.42/net/ipv4/tcp_timer.c 2011-03-27 14:31:47.000000000 -0400
68441 +++ linux-2.6.32.42/net/ipv4/tcp_timer.c 2011-04-17 15:56:46.000000000 -0400
68442 @@ -21,6 +21,10 @@
68443 #include <linux/module.h>
68444 #include <net/tcp.h>
68445
68446 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68447 +extern int grsec_lastack_retries;
68448 +#endif
68449 +
68450 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
68451 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
68452 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
68453 @@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock
68454 }
68455 }
68456
68457 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68458 + if ((sk->sk_state == TCP_LAST_ACK) &&
68459 + (grsec_lastack_retries > 0) &&
68460 + (grsec_lastack_retries < retry_until))
68461 + retry_until = grsec_lastack_retries;
68462 +#endif
68463 +
68464 if (retransmits_timed_out(sk, retry_until)) {
68465 /* Has it gone just too far? */
68466 tcp_write_err(sk);
68467 diff -urNp linux-2.6.32.42/net/ipv4/udp.c linux-2.6.32.42/net/ipv4/udp.c
68468 --- linux-2.6.32.42/net/ipv4/udp.c 2011-03-27 14:31:47.000000000 -0400
68469 +++ linux-2.6.32.42/net/ipv4/udp.c 2011-05-04 17:57:28.000000000 -0400
68470 @@ -86,6 +86,7 @@
68471 #include <linux/types.h>
68472 #include <linux/fcntl.h>
68473 #include <linux/module.h>
68474 +#include <linux/security.h>
68475 #include <linux/socket.h>
68476 #include <linux/sockios.h>
68477 #include <linux/igmp.h>
68478 @@ -106,6 +107,10 @@
68479 #include <net/xfrm.h>
68480 #include "udp_impl.h"
68481
68482 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68483 +extern int grsec_enable_blackhole;
68484 +#endif
68485 +
68486 struct udp_table udp_table;
68487 EXPORT_SYMBOL(udp_table);
68488
68489 @@ -371,6 +376,9 @@ found:
68490 return s;
68491 }
68492
68493 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
68494 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
68495 +
68496 /*
68497 * This routine is called by the ICMP module when it gets some
68498 * sort of error condition. If err < 0 then the socket should
68499 @@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
68500 dport = usin->sin_port;
68501 if (dport == 0)
68502 return -EINVAL;
68503 +
68504 + err = gr_search_udp_sendmsg(sk, usin);
68505 + if (err)
68506 + return err;
68507 } else {
68508 if (sk->sk_state != TCP_ESTABLISHED)
68509 return -EDESTADDRREQ;
68510 +
68511 + err = gr_search_udp_sendmsg(sk, NULL);
68512 + if (err)
68513 + return err;
68514 +
68515 daddr = inet->daddr;
68516 dport = inet->dport;
68517 /* Open fast path for connected socket.
68518 @@ -945,6 +962,10 @@ try_again:
68519 if (!skb)
68520 goto out;
68521
68522 + err = gr_search_udp_recvmsg(sk, skb);
68523 + if (err)
68524 + goto out_free;
68525 +
68526 ulen = skb->len - sizeof(struct udphdr);
68527 copied = len;
68528 if (copied > ulen)
68529 @@ -1065,7 +1086,7 @@ static int __udp_queue_rcv_skb(struct so
68530 if (rc == -ENOMEM) {
68531 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
68532 is_udplite);
68533 - atomic_inc(&sk->sk_drops);
68534 + atomic_inc_unchecked(&sk->sk_drops);
68535 }
68536 goto drop;
68537 }
68538 @@ -1335,6 +1356,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
68539 goto csum_error;
68540
68541 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
68542 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68543 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
68544 +#endif
68545 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
68546
68547 /*
68548 @@ -1755,8 +1779,13 @@ static void udp4_format_sock(struct sock
68549 sk_wmem_alloc_get(sp),
68550 sk_rmem_alloc_get(sp),
68551 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
68552 - atomic_read(&sp->sk_refcnt), sp,
68553 - atomic_read(&sp->sk_drops), len);
68554 + atomic_read(&sp->sk_refcnt),
68555 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68556 + NULL,
68557 +#else
68558 + sp,
68559 +#endif
68560 + atomic_read_unchecked(&sp->sk_drops), len);
68561 }
68562
68563 int udp4_seq_show(struct seq_file *seq, void *v)
68564 diff -urNp linux-2.6.32.42/net/ipv6/inet6_connection_sock.c linux-2.6.32.42/net/ipv6/inet6_connection_sock.c
68565 --- linux-2.6.32.42/net/ipv6/inet6_connection_sock.c 2011-03-27 14:31:47.000000000 -0400
68566 +++ linux-2.6.32.42/net/ipv6/inet6_connection_sock.c 2011-05-04 17:56:28.000000000 -0400
68567 @@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *
68568 #ifdef CONFIG_XFRM
68569 {
68570 struct rt6_info *rt = (struct rt6_info *)dst;
68571 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
68572 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
68573 }
68574 #endif
68575 }
68576 @@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(
68577 #ifdef CONFIG_XFRM
68578 if (dst) {
68579 struct rt6_info *rt = (struct rt6_info *)dst;
68580 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
68581 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
68582 sk->sk_dst_cache = NULL;
68583 dst_release(dst);
68584 dst = NULL;
68585 diff -urNp linux-2.6.32.42/net/ipv6/inet6_hashtables.c linux-2.6.32.42/net/ipv6/inet6_hashtables.c
68586 --- linux-2.6.32.42/net/ipv6/inet6_hashtables.c 2011-03-27 14:31:47.000000000 -0400
68587 +++ linux-2.6.32.42/net/ipv6/inet6_hashtables.c 2011-05-04 17:56:28.000000000 -0400
68588 @@ -118,7 +118,7 @@ out:
68589 }
68590 EXPORT_SYMBOL(__inet6_lookup_established);
68591
68592 -static int inline compute_score(struct sock *sk, struct net *net,
68593 +static inline int compute_score(struct sock *sk, struct net *net,
68594 const unsigned short hnum,
68595 const struct in6_addr *daddr,
68596 const int dif)
68597 diff -urNp linux-2.6.32.42/net/ipv6/ipv6_sockglue.c linux-2.6.32.42/net/ipv6/ipv6_sockglue.c
68598 --- linux-2.6.32.42/net/ipv6/ipv6_sockglue.c 2011-03-27 14:31:47.000000000 -0400
68599 +++ linux-2.6.32.42/net/ipv6/ipv6_sockglue.c 2011-05-16 21:46:57.000000000 -0400
68600 @@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct soc
68601 int val, valbool;
68602 int retv = -ENOPROTOOPT;
68603
68604 + pax_track_stack();
68605 +
68606 if (optval == NULL)
68607 val=0;
68608 else {
68609 @@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct soc
68610 int len;
68611 int val;
68612
68613 + pax_track_stack();
68614 +
68615 if (ip6_mroute_opt(optname))
68616 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
68617
68618 diff -urNp linux-2.6.32.42/net/ipv6/netfilter/ip6_tables.c linux-2.6.32.42/net/ipv6/netfilter/ip6_tables.c
68619 --- linux-2.6.32.42/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:00:52.000000000 -0400
68620 +++ linux-2.6.32.42/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:04:18.000000000 -0400
68621 @@ -1173,6 +1173,7 @@ static int get_info(struct net *net, voi
68622 private = &tmp;
68623 }
68624 #endif
68625 + memset(&info, 0, sizeof(info));
68626 info.valid_hooks = t->valid_hooks;
68627 memcpy(info.hook_entry, private->hook_entry,
68628 sizeof(info.hook_entry));
68629 diff -urNp linux-2.6.32.42/net/ipv6/raw.c linux-2.6.32.42/net/ipv6/raw.c
68630 --- linux-2.6.32.42/net/ipv6/raw.c 2011-03-27 14:31:47.000000000 -0400
68631 +++ linux-2.6.32.42/net/ipv6/raw.c 2011-05-16 21:46:57.000000000 -0400
68632 @@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct s
68633 {
68634 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
68635 skb_checksum_complete(skb)) {
68636 - atomic_inc(&sk->sk_drops);
68637 + atomic_inc_unchecked(&sk->sk_drops);
68638 kfree_skb(skb);
68639 return NET_RX_DROP;
68640 }
68641
68642 /* Charge it to the socket. */
68643 if (sock_queue_rcv_skb(sk,skb)<0) {
68644 - atomic_inc(&sk->sk_drops);
68645 + atomic_inc_unchecked(&sk->sk_drops);
68646 kfree_skb(skb);
68647 return NET_RX_DROP;
68648 }
68649 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
68650 struct raw6_sock *rp = raw6_sk(sk);
68651
68652 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
68653 - atomic_inc(&sk->sk_drops);
68654 + atomic_inc_unchecked(&sk->sk_drops);
68655 kfree_skb(skb);
68656 return NET_RX_DROP;
68657 }
68658 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
68659
68660 if (inet->hdrincl) {
68661 if (skb_checksum_complete(skb)) {
68662 - atomic_inc(&sk->sk_drops);
68663 + atomic_inc_unchecked(&sk->sk_drops);
68664 kfree_skb(skb);
68665 return NET_RX_DROP;
68666 }
68667 @@ -518,7 +518,7 @@ csum_copy_err:
68668 as some normal condition.
68669 */
68670 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
68671 - atomic_inc(&sk->sk_drops);
68672 + atomic_inc_unchecked(&sk->sk_drops);
68673 goto out;
68674 }
68675
68676 @@ -600,7 +600,7 @@ out:
68677 return err;
68678 }
68679
68680 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
68681 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
68682 struct flowi *fl, struct rt6_info *rt,
68683 unsigned int flags)
68684 {
68685 @@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *i
68686 u16 proto;
68687 int err;
68688
68689 + pax_track_stack();
68690 +
68691 /* Rough check on arithmetic overflow,
68692 better check is made in ip6_append_data().
68693 */
68694 @@ -916,12 +918,17 @@ do_confirm:
68695 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
68696 char __user *optval, int optlen)
68697 {
68698 + struct icmp6_filter filter;
68699 +
68700 switch (optname) {
68701 case ICMPV6_FILTER:
68702 + if (optlen < 0)
68703 + return -EINVAL;
68704 if (optlen > sizeof(struct icmp6_filter))
68705 optlen = sizeof(struct icmp6_filter);
68706 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
68707 + if (copy_from_user(&filter, optval, optlen))
68708 return -EFAULT;
68709 + memcpy(&raw6_sk(sk)->filter, &filter, optlen);
68710 return 0;
68711 default:
68712 return -ENOPROTOOPT;
68713 @@ -933,6 +940,7 @@ static int rawv6_seticmpfilter(struct so
68714 static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
68715 char __user *optval, int __user *optlen)
68716 {
68717 + struct icmp6_filter filter;
68718 int len;
68719
68720 switch (optname) {
68721 @@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct so
68722 len = sizeof(struct icmp6_filter);
68723 if (put_user(len, optlen))
68724 return -EFAULT;
68725 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
68726 + memcpy(&filter, &raw6_sk(sk)->filter, len);
68727 + if (copy_to_user(optval, &filter, len))
68728 return -EFAULT;
68729 return 0;
68730 default:
68731 @@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct se
68732 0, 0L, 0,
68733 sock_i_uid(sp), 0,
68734 sock_i_ino(sp),
68735 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
68736 + atomic_read(&sp->sk_refcnt),
68737 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68738 + NULL,
68739 +#else
68740 + sp,
68741 +#endif
68742 + atomic_read_unchecked(&sp->sk_drops));
68743 }
68744
68745 static int raw6_seq_show(struct seq_file *seq, void *v)
68746 diff -urNp linux-2.6.32.42/net/ipv6/tcp_ipv6.c linux-2.6.32.42/net/ipv6/tcp_ipv6.c
68747 --- linux-2.6.32.42/net/ipv6/tcp_ipv6.c 2011-03-27 14:31:47.000000000 -0400
68748 +++ linux-2.6.32.42/net/ipv6/tcp_ipv6.c 2011-04-17 15:56:46.000000000 -0400
68749 @@ -88,6 +88,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
68750 }
68751 #endif
68752
68753 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68754 +extern int grsec_enable_blackhole;
68755 +#endif
68756 +
68757 static void tcp_v6_hash(struct sock *sk)
68758 {
68759 if (sk->sk_state != TCP_CLOSE) {
68760 @@ -1578,6 +1582,9 @@ static int tcp_v6_do_rcv(struct sock *sk
68761 return 0;
68762
68763 reset:
68764 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68765 + if (!grsec_enable_blackhole)
68766 +#endif
68767 tcp_v6_send_reset(sk, skb);
68768 discard:
68769 if (opt_skb)
68770 @@ -1655,12 +1662,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
68771 TCP_SKB_CB(skb)->sacked = 0;
68772
68773 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
68774 - if (!sk)
68775 + if (!sk) {
68776 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68777 + ret = 1;
68778 +#endif
68779 goto no_tcp_socket;
68780 + }
68781
68782 process:
68783 - if (sk->sk_state == TCP_TIME_WAIT)
68784 + if (sk->sk_state == TCP_TIME_WAIT) {
68785 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68786 + ret = 2;
68787 +#endif
68788 goto do_time_wait;
68789 + }
68790
68791 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
68792 goto discard_and_relse;
68793 @@ -1700,6 +1715,10 @@ no_tcp_socket:
68794 bad_packet:
68795 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
68796 } else {
68797 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68798 + if (!grsec_enable_blackhole || (ret == 1 &&
68799 + (skb->dev->flags & IFF_LOOPBACK)))
68800 +#endif
68801 tcp_v6_send_reset(NULL, skb);
68802 }
68803
68804 @@ -1915,7 +1934,13 @@ static void get_openreq6(struct seq_file
68805 uid,
68806 0, /* non standard timer */
68807 0, /* open_requests have no inode */
68808 - 0, req);
68809 + 0,
68810 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68811 + NULL
68812 +#else
68813 + req
68814 +#endif
68815 + );
68816 }
68817
68818 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
68819 @@ -1965,7 +1990,12 @@ static void get_tcp6_sock(struct seq_fil
68820 sock_i_uid(sp),
68821 icsk->icsk_probes_out,
68822 sock_i_ino(sp),
68823 - atomic_read(&sp->sk_refcnt), sp,
68824 + atomic_read(&sp->sk_refcnt),
68825 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68826 + NULL,
68827 +#else
68828 + sp,
68829 +#endif
68830 jiffies_to_clock_t(icsk->icsk_rto),
68831 jiffies_to_clock_t(icsk->icsk_ack.ato),
68832 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
68833 @@ -2000,7 +2030,13 @@ static void get_timewait6_sock(struct se
68834 dest->s6_addr32[2], dest->s6_addr32[3], destp,
68835 tw->tw_substate, 0, 0,
68836 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
68837 - atomic_read(&tw->tw_refcnt), tw);
68838 + atomic_read(&tw->tw_refcnt),
68839 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68840 + NULL
68841 +#else
68842 + tw
68843 +#endif
68844 + );
68845 }
68846
68847 static int tcp6_seq_show(struct seq_file *seq, void *v)
68848 diff -urNp linux-2.6.32.42/net/ipv6/udp.c linux-2.6.32.42/net/ipv6/udp.c
68849 --- linux-2.6.32.42/net/ipv6/udp.c 2011-03-27 14:31:47.000000000 -0400
68850 +++ linux-2.6.32.42/net/ipv6/udp.c 2011-05-04 17:58:16.000000000 -0400
68851 @@ -49,6 +49,10 @@
68852 #include <linux/seq_file.h>
68853 #include "udp_impl.h"
68854
68855 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68856 +extern int grsec_enable_blackhole;
68857 +#endif
68858 +
68859 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
68860 {
68861 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
68862 @@ -388,7 +392,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
68863 if (rc == -ENOMEM) {
68864 UDP6_INC_STATS_BH(sock_net(sk),
68865 UDP_MIB_RCVBUFERRORS, is_udplite);
68866 - atomic_inc(&sk->sk_drops);
68867 + atomic_inc_unchecked(&sk->sk_drops);
68868 }
68869 goto drop;
68870 }
68871 @@ -587,6 +591,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
68872 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
68873 proto == IPPROTO_UDPLITE);
68874
68875 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68876 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
68877 +#endif
68878 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
68879
68880 kfree_skb(skb);
68881 @@ -1206,8 +1213,13 @@ static void udp6_sock_seq_show(struct se
68882 0, 0L, 0,
68883 sock_i_uid(sp), 0,
68884 sock_i_ino(sp),
68885 - atomic_read(&sp->sk_refcnt), sp,
68886 - atomic_read(&sp->sk_drops));
68887 + atomic_read(&sp->sk_refcnt),
68888 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68889 + NULL,
68890 +#else
68891 + sp,
68892 +#endif
68893 + atomic_read_unchecked(&sp->sk_drops));
68894 }
68895
68896 int udp6_seq_show(struct seq_file *seq, void *v)
68897 diff -urNp linux-2.6.32.42/net/irda/ircomm/ircomm_tty.c linux-2.6.32.42/net/irda/ircomm/ircomm_tty.c
68898 --- linux-2.6.32.42/net/irda/ircomm/ircomm_tty.c 2011-03-27 14:31:47.000000000 -0400
68899 +++ linux-2.6.32.42/net/irda/ircomm/ircomm_tty.c 2011-04-17 15:56:46.000000000 -0400
68900 @@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(st
68901 add_wait_queue(&self->open_wait, &wait);
68902
68903 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
68904 - __FILE__,__LINE__, tty->driver->name, self->open_count );
68905 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
68906
68907 /* As far as I can see, we protect open_count - Jean II */
68908 spin_lock_irqsave(&self->spinlock, flags);
68909 if (!tty_hung_up_p(filp)) {
68910 extra_count = 1;
68911 - self->open_count--;
68912 + local_dec(&self->open_count);
68913 }
68914 spin_unlock_irqrestore(&self->spinlock, flags);
68915 - self->blocked_open++;
68916 + local_inc(&self->blocked_open);
68917
68918 while (1) {
68919 if (tty->termios->c_cflag & CBAUD) {
68920 @@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(st
68921 }
68922
68923 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
68924 - __FILE__,__LINE__, tty->driver->name, self->open_count );
68925 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
68926
68927 schedule();
68928 }
68929 @@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(st
68930 if (extra_count) {
68931 /* ++ is not atomic, so this should be protected - Jean II */
68932 spin_lock_irqsave(&self->spinlock, flags);
68933 - self->open_count++;
68934 + local_inc(&self->open_count);
68935 spin_unlock_irqrestore(&self->spinlock, flags);
68936 }
68937 - self->blocked_open--;
68938 + local_dec(&self->blocked_open);
68939
68940 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
68941 - __FILE__,__LINE__, tty->driver->name, self->open_count);
68942 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
68943
68944 if (!retval)
68945 self->flags |= ASYNC_NORMAL_ACTIVE;
68946 @@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_st
68947 }
68948 /* ++ is not atomic, so this should be protected - Jean II */
68949 spin_lock_irqsave(&self->spinlock, flags);
68950 - self->open_count++;
68951 + local_inc(&self->open_count);
68952
68953 tty->driver_data = self;
68954 self->tty = tty;
68955 spin_unlock_irqrestore(&self->spinlock, flags);
68956
68957 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
68958 - self->line, self->open_count);
68959 + self->line, local_read(&self->open_count));
68960
68961 /* Not really used by us, but lets do it anyway */
68962 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
68963 @@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_
68964 return;
68965 }
68966
68967 - if ((tty->count == 1) && (self->open_count != 1)) {
68968 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
68969 /*
68970 * Uh, oh. tty->count is 1, which means that the tty
68971 * structure will be freed. state->count should always
68972 @@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_
68973 */
68974 IRDA_DEBUG(0, "%s(), bad serial port count; "
68975 "tty->count is 1, state->count is %d\n", __func__ ,
68976 - self->open_count);
68977 - self->open_count = 1;
68978 + local_read(&self->open_count));
68979 + local_set(&self->open_count, 1);
68980 }
68981
68982 - if (--self->open_count < 0) {
68983 + if (local_dec_return(&self->open_count) < 0) {
68984 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
68985 - __func__, self->line, self->open_count);
68986 - self->open_count = 0;
68987 + __func__, self->line, local_read(&self->open_count));
68988 + local_set(&self->open_count, 0);
68989 }
68990 - if (self->open_count) {
68991 + if (local_read(&self->open_count)) {
68992 spin_unlock_irqrestore(&self->spinlock, flags);
68993
68994 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
68995 @@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_
68996 tty->closing = 0;
68997 self->tty = NULL;
68998
68999 - if (self->blocked_open) {
69000 + if (local_read(&self->blocked_open)) {
69001 if (self->close_delay)
69002 schedule_timeout_interruptible(self->close_delay);
69003 wake_up_interruptible(&self->open_wait);
69004 @@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty
69005 spin_lock_irqsave(&self->spinlock, flags);
69006 self->flags &= ~ASYNC_NORMAL_ACTIVE;
69007 self->tty = NULL;
69008 - self->open_count = 0;
69009 + local_set(&self->open_count, 0);
69010 spin_unlock_irqrestore(&self->spinlock, flags);
69011
69012 wake_up_interruptible(&self->open_wait);
69013 @@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct
69014 seq_putc(m, '\n');
69015
69016 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
69017 - seq_printf(m, "Open count: %d\n", self->open_count);
69018 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
69019 seq_printf(m, "Max data size: %d\n", self->max_data_size);
69020 seq_printf(m, "Max header size: %d\n", self->max_header_size);
69021
69022 diff -urNp linux-2.6.32.42/net/iucv/af_iucv.c linux-2.6.32.42/net/iucv/af_iucv.c
69023 --- linux-2.6.32.42/net/iucv/af_iucv.c 2011-03-27 14:31:47.000000000 -0400
69024 +++ linux-2.6.32.42/net/iucv/af_iucv.c 2011-05-04 17:56:28.000000000 -0400
69025 @@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct soc
69026
69027 write_lock_bh(&iucv_sk_list.lock);
69028
69029 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
69030 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
69031 while (__iucv_get_sock_by_name(name)) {
69032 sprintf(name, "%08x",
69033 - atomic_inc_return(&iucv_sk_list.autobind_name));
69034 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
69035 }
69036
69037 write_unlock_bh(&iucv_sk_list.lock);
69038 diff -urNp linux-2.6.32.42/net/key/af_key.c linux-2.6.32.42/net/key/af_key.c
69039 --- linux-2.6.32.42/net/key/af_key.c 2011-03-27 14:31:47.000000000 -0400
69040 +++ linux-2.6.32.42/net/key/af_key.c 2011-05-16 21:46:57.000000000 -0400
69041 @@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk
69042 struct xfrm_migrate m[XFRM_MAX_DEPTH];
69043 struct xfrm_kmaddress k;
69044
69045 + pax_track_stack();
69046 +
69047 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
69048 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
69049 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
69050 @@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_fil
69051 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
69052 else
69053 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
69054 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69055 + NULL,
69056 +#else
69057 s,
69058 +#endif
69059 atomic_read(&s->sk_refcnt),
69060 sk_rmem_alloc_get(s),
69061 sk_wmem_alloc_get(s),
69062 diff -urNp linux-2.6.32.42/net/mac80211/cfg.c linux-2.6.32.42/net/mac80211/cfg.c
69063 --- linux-2.6.32.42/net/mac80211/cfg.c 2011-03-27 14:31:47.000000000 -0400
69064 +++ linux-2.6.32.42/net/mac80211/cfg.c 2011-04-17 15:56:46.000000000 -0400
69065 @@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(st
69066 return err;
69067 }
69068
69069 -struct cfg80211_ops mac80211_config_ops = {
69070 +const struct cfg80211_ops mac80211_config_ops = {
69071 .add_virtual_intf = ieee80211_add_iface,
69072 .del_virtual_intf = ieee80211_del_iface,
69073 .change_virtual_intf = ieee80211_change_iface,
69074 diff -urNp linux-2.6.32.42/net/mac80211/cfg.h linux-2.6.32.42/net/mac80211/cfg.h
69075 --- linux-2.6.32.42/net/mac80211/cfg.h 2011-03-27 14:31:47.000000000 -0400
69076 +++ linux-2.6.32.42/net/mac80211/cfg.h 2011-04-17 15:56:46.000000000 -0400
69077 @@ -4,6 +4,6 @@
69078 #ifndef __CFG_H
69079 #define __CFG_H
69080
69081 -extern struct cfg80211_ops mac80211_config_ops;
69082 +extern const struct cfg80211_ops mac80211_config_ops;
69083
69084 #endif /* __CFG_H */
69085 diff -urNp linux-2.6.32.42/net/mac80211/debugfs_key.c linux-2.6.32.42/net/mac80211/debugfs_key.c
69086 --- linux-2.6.32.42/net/mac80211/debugfs_key.c 2011-03-27 14:31:47.000000000 -0400
69087 +++ linux-2.6.32.42/net/mac80211/debugfs_key.c 2011-04-17 15:56:46.000000000 -0400
69088 @@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file
69089 size_t count, loff_t *ppos)
69090 {
69091 struct ieee80211_key *key = file->private_data;
69092 - int i, res, bufsize = 2 * key->conf.keylen + 2;
69093 + int i, bufsize = 2 * key->conf.keylen + 2;
69094 char *buf = kmalloc(bufsize, GFP_KERNEL);
69095 char *p = buf;
69096 + ssize_t res;
69097 +
69098 + if (buf == NULL)
69099 + return -ENOMEM;
69100
69101 for (i = 0; i < key->conf.keylen; i++)
69102 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
69103 diff -urNp linux-2.6.32.42/net/mac80211/debugfs_sta.c linux-2.6.32.42/net/mac80211/debugfs_sta.c
69104 --- linux-2.6.32.42/net/mac80211/debugfs_sta.c 2011-03-27 14:31:47.000000000 -0400
69105 +++ linux-2.6.32.42/net/mac80211/debugfs_sta.c 2011-05-16 21:46:57.000000000 -0400
69106 @@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struc
69107 int i;
69108 struct sta_info *sta = file->private_data;
69109
69110 + pax_track_stack();
69111 +
69112 spin_lock_bh(&sta->lock);
69113 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
69114 sta->ampdu_mlme.dialog_token_allocator + 1);
69115 diff -urNp linux-2.6.32.42/net/mac80211/ieee80211_i.h linux-2.6.32.42/net/mac80211/ieee80211_i.h
69116 --- linux-2.6.32.42/net/mac80211/ieee80211_i.h 2011-03-27 14:31:47.000000000 -0400
69117 +++ linux-2.6.32.42/net/mac80211/ieee80211_i.h 2011-04-17 15:56:46.000000000 -0400
69118 @@ -25,6 +25,7 @@
69119 #include <linux/etherdevice.h>
69120 #include <net/cfg80211.h>
69121 #include <net/mac80211.h>
69122 +#include <asm/local.h>
69123 #include "key.h"
69124 #include "sta_info.h"
69125
69126 @@ -635,7 +636,7 @@ struct ieee80211_local {
69127 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
69128 spinlock_t queue_stop_reason_lock;
69129
69130 - int open_count;
69131 + local_t open_count;
69132 int monitors, cooked_mntrs;
69133 /* number of interfaces with corresponding FIF_ flags */
69134 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
69135 diff -urNp linux-2.6.32.42/net/mac80211/iface.c linux-2.6.32.42/net/mac80211/iface.c
69136 --- linux-2.6.32.42/net/mac80211/iface.c 2011-03-27 14:31:47.000000000 -0400
69137 +++ linux-2.6.32.42/net/mac80211/iface.c 2011-04-17 15:56:46.000000000 -0400
69138 @@ -166,7 +166,7 @@ static int ieee80211_open(struct net_dev
69139 break;
69140 }
69141
69142 - if (local->open_count == 0) {
69143 + if (local_read(&local->open_count) == 0) {
69144 res = drv_start(local);
69145 if (res)
69146 goto err_del_bss;
69147 @@ -196,7 +196,7 @@ static int ieee80211_open(struct net_dev
69148 * Validate the MAC address for this device.
69149 */
69150 if (!is_valid_ether_addr(dev->dev_addr)) {
69151 - if (!local->open_count)
69152 + if (!local_read(&local->open_count))
69153 drv_stop(local);
69154 return -EADDRNOTAVAIL;
69155 }
69156 @@ -292,7 +292,7 @@ static int ieee80211_open(struct net_dev
69157
69158 hw_reconf_flags |= __ieee80211_recalc_idle(local);
69159
69160 - local->open_count++;
69161 + local_inc(&local->open_count);
69162 if (hw_reconf_flags) {
69163 ieee80211_hw_config(local, hw_reconf_flags);
69164 /*
69165 @@ -320,7 +320,7 @@ static int ieee80211_open(struct net_dev
69166 err_del_interface:
69167 drv_remove_interface(local, &conf);
69168 err_stop:
69169 - if (!local->open_count)
69170 + if (!local_read(&local->open_count))
69171 drv_stop(local);
69172 err_del_bss:
69173 sdata->bss = NULL;
69174 @@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_dev
69175 WARN_ON(!list_empty(&sdata->u.ap.vlans));
69176 }
69177
69178 - local->open_count--;
69179 + local_dec(&local->open_count);
69180
69181 switch (sdata->vif.type) {
69182 case NL80211_IFTYPE_AP_VLAN:
69183 @@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_dev
69184
69185 ieee80211_recalc_ps(local, -1);
69186
69187 - if (local->open_count == 0) {
69188 + if (local_read(&local->open_count) == 0) {
69189 ieee80211_clear_tx_pending(local);
69190 ieee80211_stop_device(local);
69191
69192 diff -urNp linux-2.6.32.42/net/mac80211/main.c linux-2.6.32.42/net/mac80211/main.c
69193 --- linux-2.6.32.42/net/mac80211/main.c 2011-05-10 22:12:02.000000000 -0400
69194 +++ linux-2.6.32.42/net/mac80211/main.c 2011-05-10 22:12:34.000000000 -0400
69195 @@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211
69196 local->hw.conf.power_level = power;
69197 }
69198
69199 - if (changed && local->open_count) {
69200 + if (changed && local_read(&local->open_count)) {
69201 ret = drv_config(local, changed);
69202 /*
69203 * Goal:
69204 diff -urNp linux-2.6.32.42/net/mac80211/mlme.c linux-2.6.32.42/net/mac80211/mlme.c
69205 --- linux-2.6.32.42/net/mac80211/mlme.c 2011-03-27 14:31:47.000000000 -0400
69206 +++ linux-2.6.32.42/net/mac80211/mlme.c 2011-05-16 21:46:57.000000000 -0400
69207 @@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee
69208 bool have_higher_than_11mbit = false, newsta = false;
69209 u16 ap_ht_cap_flags;
69210
69211 + pax_track_stack();
69212 +
69213 /*
69214 * AssocResp and ReassocResp have identical structure, so process both
69215 * of them in this function.
69216 diff -urNp linux-2.6.32.42/net/mac80211/pm.c linux-2.6.32.42/net/mac80211/pm.c
69217 --- linux-2.6.32.42/net/mac80211/pm.c 2011-03-27 14:31:47.000000000 -0400
69218 +++ linux-2.6.32.42/net/mac80211/pm.c 2011-04-17 15:56:46.000000000 -0400
69219 @@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211
69220 }
69221
69222 /* stop hardware - this must stop RX */
69223 - if (local->open_count)
69224 + if (local_read(&local->open_count))
69225 ieee80211_stop_device(local);
69226
69227 local->suspended = true;
69228 diff -urNp linux-2.6.32.42/net/mac80211/rate.c linux-2.6.32.42/net/mac80211/rate.c
69229 --- linux-2.6.32.42/net/mac80211/rate.c 2011-03-27 14:31:47.000000000 -0400
69230 +++ linux-2.6.32.42/net/mac80211/rate.c 2011-04-17 15:56:46.000000000 -0400
69231 @@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct
69232 struct rate_control_ref *ref, *old;
69233
69234 ASSERT_RTNL();
69235 - if (local->open_count)
69236 + if (local_read(&local->open_count))
69237 return -EBUSY;
69238
69239 ref = rate_control_alloc(name, local);
69240 diff -urNp linux-2.6.32.42/net/mac80211/tx.c linux-2.6.32.42/net/mac80211/tx.c
69241 --- linux-2.6.32.42/net/mac80211/tx.c 2011-03-27 14:31:47.000000000 -0400
69242 +++ linux-2.6.32.42/net/mac80211/tx.c 2011-04-17 15:56:46.000000000 -0400
69243 @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
69244 return cpu_to_le16(dur);
69245 }
69246
69247 -static int inline is_ieee80211_device(struct ieee80211_local *local,
69248 +static inline int is_ieee80211_device(struct ieee80211_local *local,
69249 struct net_device *dev)
69250 {
69251 return local == wdev_priv(dev->ieee80211_ptr);
69252 diff -urNp linux-2.6.32.42/net/mac80211/util.c linux-2.6.32.42/net/mac80211/util.c
69253 --- linux-2.6.32.42/net/mac80211/util.c 2011-03-27 14:31:47.000000000 -0400
69254 +++ linux-2.6.32.42/net/mac80211/util.c 2011-04-17 15:56:46.000000000 -0400
69255 @@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_
69256 local->resuming = true;
69257
69258 /* restart hardware */
69259 - if (local->open_count) {
69260 + if (local_read(&local->open_count)) {
69261 /*
69262 * Upon resume hardware can sometimes be goofy due to
69263 * various platform / driver / bus issues, so restarting
69264 diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_app.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_app.c
69265 --- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_app.c 2011-03-27 14:31:47.000000000 -0400
69266 +++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_app.c 2011-05-17 19:26:34.000000000 -0400
69267 @@ -564,7 +564,7 @@ static const struct file_operations ip_v
69268 .open = ip_vs_app_open,
69269 .read = seq_read,
69270 .llseek = seq_lseek,
69271 - .release = seq_release,
69272 + .release = seq_release_net,
69273 };
69274 #endif
69275
69276 diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_conn.c
69277 --- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_conn.c 2011-03-27 14:31:47.000000000 -0400
69278 +++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_conn.c 2011-05-17 19:26:34.000000000 -0400
69279 @@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
69280 /* if the connection is not template and is created
69281 * by sync, preserve the activity flag.
69282 */
69283 - cp->flags |= atomic_read(&dest->conn_flags) &
69284 + cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
69285 (~IP_VS_CONN_F_INACTIVE);
69286 else
69287 - cp->flags |= atomic_read(&dest->conn_flags);
69288 + cp->flags |= atomic_read_unchecked(&dest->conn_flags);
69289 cp->dest = dest;
69290
69291 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
69292 @@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const
69293 atomic_set(&cp->refcnt, 1);
69294
69295 atomic_set(&cp->n_control, 0);
69296 - atomic_set(&cp->in_pkts, 0);
69297 + atomic_set_unchecked(&cp->in_pkts, 0);
69298
69299 atomic_inc(&ip_vs_conn_count);
69300 if (flags & IP_VS_CONN_F_NO_CPORT)
69301 @@ -871,7 +871,7 @@ static const struct file_operations ip_v
69302 .open = ip_vs_conn_open,
69303 .read = seq_read,
69304 .llseek = seq_lseek,
69305 - .release = seq_release,
69306 + .release = seq_release_net,
69307 };
69308
69309 static const char *ip_vs_origin_name(unsigned flags)
69310 @@ -934,7 +934,7 @@ static const struct file_operations ip_v
69311 .open = ip_vs_conn_sync_open,
69312 .read = seq_read,
69313 .llseek = seq_lseek,
69314 - .release = seq_release,
69315 + .release = seq_release_net,
69316 };
69317
69318 #endif
69319 @@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip
69320
69321 /* Don't drop the entry if its number of incoming packets is not
69322 located in [0, 8] */
69323 - i = atomic_read(&cp->in_pkts);
69324 + i = atomic_read_unchecked(&cp->in_pkts);
69325 if (i > 8 || i < 0) return 0;
69326
69327 if (!todrop_rate[i]) return 0;
69328 diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_core.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_core.c
69329 --- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_core.c 2011-03-27 14:31:47.000000000 -0400
69330 +++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_core.c 2011-05-04 17:56:28.000000000 -0400
69331 @@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *sv
69332 ret = cp->packet_xmit(skb, cp, pp);
69333 /* do not touch skb anymore */
69334
69335 - atomic_inc(&cp->in_pkts);
69336 + atomic_inc_unchecked(&cp->in_pkts);
69337 ip_vs_conn_put(cp);
69338 return ret;
69339 }
69340 @@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk
69341 * Sync connection if it is about to close to
69342 * encorage the standby servers to update the connections timeout
69343 */
69344 - pkts = atomic_add_return(1, &cp->in_pkts);
69345 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
69346 if (af == AF_INET &&
69347 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
69348 (((cp->protocol != IPPROTO_TCP ||
69349 diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_ctl.c
69350 --- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_ctl.c 2011-03-27 14:31:47.000000000 -0400
69351 +++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-17 19:26:34.000000000 -0400
69352 @@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service
69353 ip_vs_rs_hash(dest);
69354 write_unlock_bh(&__ip_vs_rs_lock);
69355 }
69356 - atomic_set(&dest->conn_flags, conn_flags);
69357 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
69358
69359 /* bind the service */
69360 if (!dest->svc) {
69361 @@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct se
69362 " %-7s %-6d %-10d %-10d\n",
69363 &dest->addr.in6,
69364 ntohs(dest->port),
69365 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
69366 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
69367 atomic_read(&dest->weight),
69368 atomic_read(&dest->activeconns),
69369 atomic_read(&dest->inactconns));
69370 @@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct se
69371 "%-7s %-6d %-10d %-10d\n",
69372 ntohl(dest->addr.ip),
69373 ntohs(dest->port),
69374 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
69375 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
69376 atomic_read(&dest->weight),
69377 atomic_read(&dest->activeconns),
69378 atomic_read(&dest->inactconns));
69379 @@ -1927,7 +1927,7 @@ static const struct file_operations ip_v
69380 .open = ip_vs_info_open,
69381 .read = seq_read,
69382 .llseek = seq_lseek,
69383 - .release = seq_release_private,
69384 + .release = seq_release_net,
69385 };
69386
69387 #endif
69388 @@ -1976,7 +1976,7 @@ static const struct file_operations ip_v
69389 .open = ip_vs_stats_seq_open,
69390 .read = seq_read,
69391 .llseek = seq_lseek,
69392 - .release = single_release,
69393 + .release = single_release_net,
69394 };
69395
69396 #endif
69397 @@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip
69398
69399 entry.addr = dest->addr.ip;
69400 entry.port = dest->port;
69401 - entry.conn_flags = atomic_read(&dest->conn_flags);
69402 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
69403 entry.weight = atomic_read(&dest->weight);
69404 entry.u_threshold = dest->u_threshold;
69405 entry.l_threshold = dest->l_threshold;
69406 @@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cm
69407 unsigned char arg[128];
69408 int ret = 0;
69409
69410 + pax_track_stack();
69411 +
69412 if (!capable(CAP_NET_ADMIN))
69413 return -EPERM;
69414
69415 @@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct s
69416 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
69417
69418 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
69419 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
69420 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
69421 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
69422 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
69423 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
69424 diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_sync.c
69425 --- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_sync.c 2011-03-27 14:31:47.000000000 -0400
69426 +++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_sync.c 2011-05-04 17:56:28.000000000 -0400
69427 @@ -438,7 +438,7 @@ static void ip_vs_process_message(const
69428
69429 if (opt)
69430 memcpy(&cp->in_seq, opt, sizeof(*opt));
69431 - atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
69432 + atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
69433 cp->state = state;
69434 cp->old_state = cp->state;
69435 /*
69436 diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_xmit.c
69437 --- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_xmit.c 2011-03-27 14:31:47.000000000 -0400
69438 +++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-04 17:56:28.000000000 -0400
69439 @@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
69440 else
69441 rc = NF_ACCEPT;
69442 /* do not touch skb anymore */
69443 - atomic_inc(&cp->in_pkts);
69444 + atomic_inc_unchecked(&cp->in_pkts);
69445 goto out;
69446 }
69447
69448 @@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
69449 else
69450 rc = NF_ACCEPT;
69451 /* do not touch skb anymore */
69452 - atomic_inc(&cp->in_pkts);
69453 + atomic_inc_unchecked(&cp->in_pkts);
69454 goto out;
69455 }
69456
69457 diff -urNp linux-2.6.32.42/net/netfilter/Kconfig linux-2.6.32.42/net/netfilter/Kconfig
69458 --- linux-2.6.32.42/net/netfilter/Kconfig 2011-03-27 14:31:47.000000000 -0400
69459 +++ linux-2.6.32.42/net/netfilter/Kconfig 2011-04-17 15:56:46.000000000 -0400
69460 @@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
69461
69462 To compile it as a module, choose M here. If unsure, say N.
69463
69464 +config NETFILTER_XT_MATCH_GRADM
69465 + tristate '"gradm" match support'
69466 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
69467 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
69468 + ---help---
69469 + The gradm match allows to match on grsecurity RBAC being enabled.
69470 + It is useful when iptables rules are applied early on bootup to
69471 + prevent connections to the machine (except from a trusted host)
69472 + while the RBAC system is disabled.
69473 +
69474 config NETFILTER_XT_MATCH_HASHLIMIT
69475 tristate '"hashlimit" match support'
69476 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
69477 diff -urNp linux-2.6.32.42/net/netfilter/Makefile linux-2.6.32.42/net/netfilter/Makefile
69478 --- linux-2.6.32.42/net/netfilter/Makefile 2011-03-27 14:31:47.000000000 -0400
69479 +++ linux-2.6.32.42/net/netfilter/Makefile 2011-04-17 15:56:46.000000000 -0400
69480 @@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRAC
69481 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
69482 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
69483 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
69484 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
69485 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
69486 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
69487 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
69488 diff -urNp linux-2.6.32.42/net/netfilter/nf_conntrack_netlink.c linux-2.6.32.42/net/netfilter/nf_conntrack_netlink.c
69489 --- linux-2.6.32.42/net/netfilter/nf_conntrack_netlink.c 2011-03-27 14:31:47.000000000 -0400
69490 +++ linux-2.6.32.42/net/netfilter/nf_conntrack_netlink.c 2011-04-17 15:56:46.000000000 -0400
69491 @@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlatt
69492 static int
69493 ctnetlink_parse_tuple(const struct nlattr * const cda[],
69494 struct nf_conntrack_tuple *tuple,
69495 - enum ctattr_tuple type, u_int8_t l3num)
69496 + enum ctattr_type type, u_int8_t l3num)
69497 {
69498 struct nlattr *tb[CTA_TUPLE_MAX+1];
69499 int err;
69500 diff -urNp linux-2.6.32.42/net/netfilter/nfnetlink_log.c linux-2.6.32.42/net/netfilter/nfnetlink_log.c
69501 --- linux-2.6.32.42/net/netfilter/nfnetlink_log.c 2011-03-27 14:31:47.000000000 -0400
69502 +++ linux-2.6.32.42/net/netfilter/nfnetlink_log.c 2011-05-04 17:56:28.000000000 -0400
69503 @@ -68,7 +68,7 @@ struct nfulnl_instance {
69504 };
69505
69506 static DEFINE_RWLOCK(instances_lock);
69507 -static atomic_t global_seq;
69508 +static atomic_unchecked_t global_seq;
69509
69510 #define INSTANCE_BUCKETS 16
69511 static struct hlist_head instance_table[INSTANCE_BUCKETS];
69512 @@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_ins
69513 /* global sequence number */
69514 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
69515 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
69516 - htonl(atomic_inc_return(&global_seq)));
69517 + htonl(atomic_inc_return_unchecked(&global_seq)));
69518
69519 if (data_len) {
69520 struct nlattr *nla;
69521 diff -urNp linux-2.6.32.42/net/netfilter/xt_gradm.c linux-2.6.32.42/net/netfilter/xt_gradm.c
69522 --- linux-2.6.32.42/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
69523 +++ linux-2.6.32.42/net/netfilter/xt_gradm.c 2011-04-17 15:56:46.000000000 -0400
69524 @@ -0,0 +1,51 @@
69525 +/*
69526 + * gradm match for netfilter
69527 + * Copyright © Zbigniew Krzystolik, 2010
69528 + *
69529 + * This program is free software; you can redistribute it and/or modify
69530 + * it under the terms of the GNU General Public License; either version
69531 + * 2 or 3 as published by the Free Software Foundation.
69532 + */
69533 +#include <linux/module.h>
69534 +#include <linux/moduleparam.h>
69535 +#include <linux/skbuff.h>
69536 +#include <linux/netfilter/x_tables.h>
69537 +#include <linux/grsecurity.h>
69538 +#include <linux/netfilter/xt_gradm.h>
69539 +
69540 +static bool
69541 +gradm_mt(const struct sk_buff *skb, const struct xt_match_param *par)
69542 +{
69543 + const struct xt_gradm_mtinfo *info = par->matchinfo;
69544 + bool retval = false;
69545 + if (gr_acl_is_enabled())
69546 + retval = true;
69547 + return retval ^ info->invflags;
69548 +}
69549 +
69550 +static struct xt_match gradm_mt_reg __read_mostly = {
69551 + .name = "gradm",
69552 + .revision = 0,
69553 + .family = NFPROTO_UNSPEC,
69554 + .match = gradm_mt,
69555 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
69556 + .me = THIS_MODULE,
69557 +};
69558 +
69559 +static int __init gradm_mt_init(void)
69560 +{
69561 + return xt_register_match(&gradm_mt_reg);
69562 +}
69563 +
69564 +static void __exit gradm_mt_exit(void)
69565 +{
69566 + xt_unregister_match(&gradm_mt_reg);
69567 +}
69568 +
69569 +module_init(gradm_mt_init);
69570 +module_exit(gradm_mt_exit);
69571 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
69572 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
69573 +MODULE_LICENSE("GPL");
69574 +MODULE_ALIAS("ipt_gradm");
69575 +MODULE_ALIAS("ip6t_gradm");
69576 diff -urNp linux-2.6.32.42/net/netlink/af_netlink.c linux-2.6.32.42/net/netlink/af_netlink.c
69577 --- linux-2.6.32.42/net/netlink/af_netlink.c 2011-03-27 14:31:47.000000000 -0400
69578 +++ linux-2.6.32.42/net/netlink/af_netlink.c 2011-05-04 17:56:28.000000000 -0400
69579 @@ -733,7 +733,7 @@ static void netlink_overrun(struct sock
69580 sk->sk_error_report(sk);
69581 }
69582 }
69583 - atomic_inc(&sk->sk_drops);
69584 + atomic_inc_unchecked(&sk->sk_drops);
69585 }
69586
69587 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
69588 @@ -1964,15 +1964,23 @@ static int netlink_seq_show(struct seq_f
69589 struct netlink_sock *nlk = nlk_sk(s);
69590
69591 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n",
69592 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69593 + NULL,
69594 +#else
69595 s,
69596 +#endif
69597 s->sk_protocol,
69598 nlk->pid,
69599 nlk->groups ? (u32)nlk->groups[0] : 0,
69600 sk_rmem_alloc_get(s),
69601 sk_wmem_alloc_get(s),
69602 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69603 + NULL,
69604 +#else
69605 nlk->cb,
69606 +#endif
69607 atomic_read(&s->sk_refcnt),
69608 - atomic_read(&s->sk_drops)
69609 + atomic_read_unchecked(&s->sk_drops)
69610 );
69611
69612 }
69613 diff -urNp linux-2.6.32.42/net/netrom/af_netrom.c linux-2.6.32.42/net/netrom/af_netrom.c
69614 --- linux-2.6.32.42/net/netrom/af_netrom.c 2011-03-27 14:31:47.000000000 -0400
69615 +++ linux-2.6.32.42/net/netrom/af_netrom.c 2011-04-17 15:56:46.000000000 -0400
69616 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *soc
69617 struct sock *sk = sock->sk;
69618 struct nr_sock *nr = nr_sk(sk);
69619
69620 + memset(sax, 0, sizeof(*sax));
69621 lock_sock(sk);
69622 if (peer != 0) {
69623 if (sk->sk_state != TCP_ESTABLISHED) {
69624 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *soc
69625 *uaddr_len = sizeof(struct full_sockaddr_ax25);
69626 } else {
69627 sax->fsa_ax25.sax25_family = AF_NETROM;
69628 - sax->fsa_ax25.sax25_ndigis = 0;
69629 sax->fsa_ax25.sax25_call = nr->source_addr;
69630 *uaddr_len = sizeof(struct sockaddr_ax25);
69631 }
69632 diff -urNp linux-2.6.32.42/net/packet/af_packet.c linux-2.6.32.42/net/packet/af_packet.c
69633 --- linux-2.6.32.42/net/packet/af_packet.c 2011-04-17 17:00:52.000000000 -0400
69634 +++ linux-2.6.32.42/net/packet/af_packet.c 2011-04-17 15:56:46.000000000 -0400
69635 @@ -2427,7 +2427,11 @@ static int packet_seq_show(struct seq_fi
69636
69637 seq_printf(seq,
69638 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
69639 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69640 + NULL,
69641 +#else
69642 s,
69643 +#endif
69644 atomic_read(&s->sk_refcnt),
69645 s->sk_type,
69646 ntohs(po->num),
69647 diff -urNp linux-2.6.32.42/net/phonet/af_phonet.c linux-2.6.32.42/net/phonet/af_phonet.c
69648 --- linux-2.6.32.42/net/phonet/af_phonet.c 2011-03-27 14:31:47.000000000 -0400
69649 +++ linux-2.6.32.42/net/phonet/af_phonet.c 2011-04-17 15:56:46.000000000 -0400
69650 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
69651 {
69652 struct phonet_protocol *pp;
69653
69654 - if (protocol >= PHONET_NPROTO)
69655 + if (protocol < 0 || protocol >= PHONET_NPROTO)
69656 return NULL;
69657
69658 spin_lock(&proto_tab_lock);
69659 @@ -402,7 +402,7 @@ int __init_or_module phonet_proto_regist
69660 {
69661 int err = 0;
69662
69663 - if (protocol >= PHONET_NPROTO)
69664 + if (protocol < 0 || protocol >= PHONET_NPROTO)
69665 return -EINVAL;
69666
69667 err = proto_register(pp->prot, 1);
69668 diff -urNp linux-2.6.32.42/net/phonet/datagram.c linux-2.6.32.42/net/phonet/datagram.c
69669 --- linux-2.6.32.42/net/phonet/datagram.c 2011-03-27 14:31:47.000000000 -0400
69670 +++ linux-2.6.32.42/net/phonet/datagram.c 2011-05-04 17:56:28.000000000 -0400
69671 @@ -162,7 +162,7 @@ static int pn_backlog_rcv(struct sock *s
69672 if (err < 0) {
69673 kfree_skb(skb);
69674 if (err == -ENOMEM)
69675 - atomic_inc(&sk->sk_drops);
69676 + atomic_inc_unchecked(&sk->sk_drops);
69677 }
69678 return err ? NET_RX_DROP : NET_RX_SUCCESS;
69679 }
69680 diff -urNp linux-2.6.32.42/net/phonet/pep.c linux-2.6.32.42/net/phonet/pep.c
69681 --- linux-2.6.32.42/net/phonet/pep.c 2011-03-27 14:31:47.000000000 -0400
69682 +++ linux-2.6.32.42/net/phonet/pep.c 2011-05-04 17:56:28.000000000 -0400
69683 @@ -348,7 +348,7 @@ static int pipe_do_rcv(struct sock *sk,
69684
69685 case PNS_PEP_CTRL_REQ:
69686 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
69687 - atomic_inc(&sk->sk_drops);
69688 + atomic_inc_unchecked(&sk->sk_drops);
69689 break;
69690 }
69691 __skb_pull(skb, 4);
69692 @@ -362,12 +362,12 @@ static int pipe_do_rcv(struct sock *sk,
69693 if (!err)
69694 return 0;
69695 if (err == -ENOMEM)
69696 - atomic_inc(&sk->sk_drops);
69697 + atomic_inc_unchecked(&sk->sk_drops);
69698 break;
69699 }
69700
69701 if (pn->rx_credits == 0) {
69702 - atomic_inc(&sk->sk_drops);
69703 + atomic_inc_unchecked(&sk->sk_drops);
69704 err = -ENOBUFS;
69705 break;
69706 }
69707 diff -urNp linux-2.6.32.42/net/phonet/socket.c linux-2.6.32.42/net/phonet/socket.c
69708 --- linux-2.6.32.42/net/phonet/socket.c 2011-03-27 14:31:47.000000000 -0400
69709 +++ linux-2.6.32.42/net/phonet/socket.c 2011-05-04 17:57:07.000000000 -0400
69710 @@ -482,8 +482,13 @@ static int pn_sock_seq_show(struct seq_f
69711 sk->sk_state,
69712 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
69713 sock_i_uid(sk), sock_i_ino(sk),
69714 - atomic_read(&sk->sk_refcnt), sk,
69715 - atomic_read(&sk->sk_drops), &len);
69716 + atomic_read(&sk->sk_refcnt),
69717 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69718 + NULL,
69719 +#else
69720 + sk,
69721 +#endif
69722 + atomic_read_unchecked(&sk->sk_drops), &len);
69723 }
69724 seq_printf(seq, "%*s\n", 127 - len, "");
69725 return 0;
69726 diff -urNp linux-2.6.32.42/net/rds/cong.c linux-2.6.32.42/net/rds/cong.c
69727 --- linux-2.6.32.42/net/rds/cong.c 2011-03-27 14:31:47.000000000 -0400
69728 +++ linux-2.6.32.42/net/rds/cong.c 2011-05-04 17:56:28.000000000 -0400
69729 @@ -77,7 +77,7 @@
69730 * finds that the saved generation number is smaller than the global generation
69731 * number, it wakes up the process.
69732 */
69733 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
69734 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
69735
69736 /*
69737 * Congestion monitoring
69738 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
69739 rdsdebug("waking map %p for %pI4\n",
69740 map, &map->m_addr);
69741 rds_stats_inc(s_cong_update_received);
69742 - atomic_inc(&rds_cong_generation);
69743 + atomic_inc_unchecked(&rds_cong_generation);
69744 if (waitqueue_active(&map->m_waitq))
69745 wake_up(&map->m_waitq);
69746 if (waitqueue_active(&rds_poll_waitq))
69747 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
69748
69749 int rds_cong_updated_since(unsigned long *recent)
69750 {
69751 - unsigned long gen = atomic_read(&rds_cong_generation);
69752 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
69753
69754 if (likely(*recent == gen))
69755 return 0;
69756 diff -urNp linux-2.6.32.42/net/rds/iw_rdma.c linux-2.6.32.42/net/rds/iw_rdma.c
69757 --- linux-2.6.32.42/net/rds/iw_rdma.c 2011-03-27 14:31:47.000000000 -0400
69758 +++ linux-2.6.32.42/net/rds/iw_rdma.c 2011-05-16 21:46:57.000000000 -0400
69759 @@ -181,6 +181,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
69760 struct rdma_cm_id *pcm_id;
69761 int rc;
69762
69763 + pax_track_stack();
69764 +
69765 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
69766 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
69767
69768 diff -urNp linux-2.6.32.42/net/rds/Kconfig linux-2.6.32.42/net/rds/Kconfig
69769 --- linux-2.6.32.42/net/rds/Kconfig 2011-03-27 14:31:47.000000000 -0400
69770 +++ linux-2.6.32.42/net/rds/Kconfig 2011-04-17 15:56:46.000000000 -0400
69771 @@ -1,7 +1,7 @@
69772
69773 config RDS
69774 tristate "The RDS Protocol (EXPERIMENTAL)"
69775 - depends on INET && EXPERIMENTAL
69776 + depends on INET && EXPERIMENTAL && BROKEN
69777 ---help---
69778 The RDS (Reliable Datagram Sockets) protocol provides reliable,
69779 sequenced delivery of datagrams over Infiniband, iWARP,
69780 diff -urNp linux-2.6.32.42/net/rxrpc/af_rxrpc.c linux-2.6.32.42/net/rxrpc/af_rxrpc.c
69781 --- linux-2.6.32.42/net/rxrpc/af_rxrpc.c 2011-03-27 14:31:47.000000000 -0400
69782 +++ linux-2.6.32.42/net/rxrpc/af_rxrpc.c 2011-05-04 17:56:28.000000000 -0400
69783 @@ -38,7 +38,7 @@ static const struct proto_ops rxrpc_rpc_
69784 __be32 rxrpc_epoch;
69785
69786 /* current debugging ID */
69787 -atomic_t rxrpc_debug_id;
69788 +atomic_unchecked_t rxrpc_debug_id;
69789
69790 /* count of skbs currently in use */
69791 atomic_t rxrpc_n_skbs;
69792 diff -urNp linux-2.6.32.42/net/rxrpc/ar-ack.c linux-2.6.32.42/net/rxrpc/ar-ack.c
69793 --- linux-2.6.32.42/net/rxrpc/ar-ack.c 2011-03-27 14:31:47.000000000 -0400
69794 +++ linux-2.6.32.42/net/rxrpc/ar-ack.c 2011-05-16 21:46:57.000000000 -0400
69795 @@ -174,7 +174,7 @@ static void rxrpc_resend(struct rxrpc_ca
69796
69797 _enter("{%d,%d,%d,%d},",
69798 call->acks_hard, call->acks_unacked,
69799 - atomic_read(&call->sequence),
69800 + atomic_read_unchecked(&call->sequence),
69801 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
69802
69803 stop = 0;
69804 @@ -198,7 +198,7 @@ static void rxrpc_resend(struct rxrpc_ca
69805
69806 /* each Tx packet has a new serial number */
69807 sp->hdr.serial =
69808 - htonl(atomic_inc_return(&call->conn->serial));
69809 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
69810
69811 hdr = (struct rxrpc_header *) txb->head;
69812 hdr->serial = sp->hdr.serial;
69813 @@ -401,7 +401,7 @@ static void rxrpc_rotate_tx_window(struc
69814 */
69815 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
69816 {
69817 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
69818 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
69819 }
69820
69821 /*
69822 @@ -627,7 +627,7 @@ process_further:
69823
69824 latest = ntohl(sp->hdr.serial);
69825 hard = ntohl(ack.firstPacket);
69826 - tx = atomic_read(&call->sequence);
69827 + tx = atomic_read_unchecked(&call->sequence);
69828
69829 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
69830 latest,
69831 @@ -840,6 +840,8 @@ void rxrpc_process_call(struct work_stru
69832 u32 abort_code = RX_PROTOCOL_ERROR;
69833 u8 *acks = NULL;
69834
69835 + pax_track_stack();
69836 +
69837 //printk("\n--------------------\n");
69838 _enter("{%d,%s,%lx} [%lu]",
69839 call->debug_id, rxrpc_call_states[call->state], call->events,
69840 @@ -1159,7 +1161,7 @@ void rxrpc_process_call(struct work_stru
69841 goto maybe_reschedule;
69842
69843 send_ACK_with_skew:
69844 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
69845 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
69846 ntohl(ack.serial));
69847 send_ACK:
69848 mtu = call->conn->trans->peer->if_mtu;
69849 @@ -1171,7 +1173,7 @@ send_ACK:
69850 ackinfo.rxMTU = htonl(5692);
69851 ackinfo.jumbo_max = htonl(4);
69852
69853 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
69854 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
69855 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
69856 ntohl(hdr.serial),
69857 ntohs(ack.maxSkew),
69858 @@ -1189,7 +1191,7 @@ send_ACK:
69859 send_message:
69860 _debug("send message");
69861
69862 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
69863 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
69864 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
69865 send_message_2:
69866
69867 diff -urNp linux-2.6.32.42/net/rxrpc/ar-call.c linux-2.6.32.42/net/rxrpc/ar-call.c
69868 --- linux-2.6.32.42/net/rxrpc/ar-call.c 2011-03-27 14:31:47.000000000 -0400
69869 +++ linux-2.6.32.42/net/rxrpc/ar-call.c 2011-05-04 17:56:28.000000000 -0400
69870 @@ -82,7 +82,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
69871 spin_lock_init(&call->lock);
69872 rwlock_init(&call->state_lock);
69873 atomic_set(&call->usage, 1);
69874 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
69875 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69876 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
69877
69878 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
69879 diff -urNp linux-2.6.32.42/net/rxrpc/ar-connection.c linux-2.6.32.42/net/rxrpc/ar-connection.c
69880 --- linux-2.6.32.42/net/rxrpc/ar-connection.c 2011-03-27 14:31:47.000000000 -0400
69881 +++ linux-2.6.32.42/net/rxrpc/ar-connection.c 2011-05-04 17:56:28.000000000 -0400
69882 @@ -205,7 +205,7 @@ static struct rxrpc_connection *rxrpc_al
69883 rwlock_init(&conn->lock);
69884 spin_lock_init(&conn->state_lock);
69885 atomic_set(&conn->usage, 1);
69886 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
69887 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69888 conn->avail_calls = RXRPC_MAXCALLS;
69889 conn->size_align = 4;
69890 conn->header_size = sizeof(struct rxrpc_header);
69891 diff -urNp linux-2.6.32.42/net/rxrpc/ar-connevent.c linux-2.6.32.42/net/rxrpc/ar-connevent.c
69892 --- linux-2.6.32.42/net/rxrpc/ar-connevent.c 2011-03-27 14:31:47.000000000 -0400
69893 +++ linux-2.6.32.42/net/rxrpc/ar-connevent.c 2011-05-04 17:56:28.000000000 -0400
69894 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
69895
69896 len = iov[0].iov_len + iov[1].iov_len;
69897
69898 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
69899 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
69900 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
69901
69902 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
69903 diff -urNp linux-2.6.32.42/net/rxrpc/ar-input.c linux-2.6.32.42/net/rxrpc/ar-input.c
69904 --- linux-2.6.32.42/net/rxrpc/ar-input.c 2011-03-27 14:31:47.000000000 -0400
69905 +++ linux-2.6.32.42/net/rxrpc/ar-input.c 2011-05-04 17:56:28.000000000 -0400
69906 @@ -339,9 +339,9 @@ void rxrpc_fast_process_packet(struct rx
69907 /* track the latest serial number on this connection for ACK packet
69908 * information */
69909 serial = ntohl(sp->hdr.serial);
69910 - hi_serial = atomic_read(&call->conn->hi_serial);
69911 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
69912 while (serial > hi_serial)
69913 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
69914 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
69915 serial);
69916
69917 /* request ACK generation for any ACK or DATA packet that requests
69918 diff -urNp linux-2.6.32.42/net/rxrpc/ar-internal.h linux-2.6.32.42/net/rxrpc/ar-internal.h
69919 --- linux-2.6.32.42/net/rxrpc/ar-internal.h 2011-03-27 14:31:47.000000000 -0400
69920 +++ linux-2.6.32.42/net/rxrpc/ar-internal.h 2011-05-04 17:56:28.000000000 -0400
69921 @@ -272,8 +272,8 @@ struct rxrpc_connection {
69922 int error; /* error code for local abort */
69923 int debug_id; /* debug ID for printks */
69924 unsigned call_counter; /* call ID counter */
69925 - atomic_t serial; /* packet serial number counter */
69926 - atomic_t hi_serial; /* highest serial number received */
69927 + atomic_unchecked_t serial; /* packet serial number counter */
69928 + atomic_unchecked_t hi_serial; /* highest serial number received */
69929 u8 avail_calls; /* number of calls available */
69930 u8 size_align; /* data size alignment (for security) */
69931 u8 header_size; /* rxrpc + security header size */
69932 @@ -346,7 +346,7 @@ struct rxrpc_call {
69933 spinlock_t lock;
69934 rwlock_t state_lock; /* lock for state transition */
69935 atomic_t usage;
69936 - atomic_t sequence; /* Tx data packet sequence counter */
69937 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
69938 u32 abort_code; /* local/remote abort code */
69939 enum { /* current state of call */
69940 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
69941 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
69942 */
69943 extern atomic_t rxrpc_n_skbs;
69944 extern __be32 rxrpc_epoch;
69945 -extern atomic_t rxrpc_debug_id;
69946 +extern atomic_unchecked_t rxrpc_debug_id;
69947 extern struct workqueue_struct *rxrpc_workqueue;
69948
69949 /*
69950 diff -urNp linux-2.6.32.42/net/rxrpc/ar-key.c linux-2.6.32.42/net/rxrpc/ar-key.c
69951 --- linux-2.6.32.42/net/rxrpc/ar-key.c 2011-03-27 14:31:47.000000000 -0400
69952 +++ linux-2.6.32.42/net/rxrpc/ar-key.c 2011-04-17 15:56:46.000000000 -0400
69953 @@ -88,11 +88,11 @@ static int rxrpc_instantiate_xdr_rxkad(s
69954 return ret;
69955
69956 plen -= sizeof(*token);
69957 - token = kmalloc(sizeof(*token), GFP_KERNEL);
69958 + token = kzalloc(sizeof(*token), GFP_KERNEL);
69959 if (!token)
69960 return -ENOMEM;
69961
69962 - token->kad = kmalloc(plen, GFP_KERNEL);
69963 + token->kad = kzalloc(plen, GFP_KERNEL);
69964 if (!token->kad) {
69965 kfree(token);
69966 return -ENOMEM;
69967 @@ -730,10 +730,10 @@ static int rxrpc_instantiate(struct key
69968 goto error;
69969
69970 ret = -ENOMEM;
69971 - token = kmalloc(sizeof(*token), GFP_KERNEL);
69972 + token = kzalloc(sizeof(*token), GFP_KERNEL);
69973 if (!token)
69974 goto error;
69975 - token->kad = kmalloc(plen, GFP_KERNEL);
69976 + token->kad = kzalloc(plen, GFP_KERNEL);
69977 if (!token->kad)
69978 goto error_free;
69979
69980 diff -urNp linux-2.6.32.42/net/rxrpc/ar-local.c linux-2.6.32.42/net/rxrpc/ar-local.c
69981 --- linux-2.6.32.42/net/rxrpc/ar-local.c 2011-03-27 14:31:47.000000000 -0400
69982 +++ linux-2.6.32.42/net/rxrpc/ar-local.c 2011-05-04 17:56:28.000000000 -0400
69983 @@ -44,7 +44,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
69984 spin_lock_init(&local->lock);
69985 rwlock_init(&local->services_lock);
69986 atomic_set(&local->usage, 1);
69987 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
69988 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69989 memcpy(&local->srx, srx, sizeof(*srx));
69990 }
69991
69992 diff -urNp linux-2.6.32.42/net/rxrpc/ar-output.c linux-2.6.32.42/net/rxrpc/ar-output.c
69993 --- linux-2.6.32.42/net/rxrpc/ar-output.c 2011-03-27 14:31:47.000000000 -0400
69994 +++ linux-2.6.32.42/net/rxrpc/ar-output.c 2011-05-04 17:56:28.000000000 -0400
69995 @@ -680,9 +680,9 @@ static int rxrpc_send_data(struct kiocb
69996 sp->hdr.cid = call->cid;
69997 sp->hdr.callNumber = call->call_id;
69998 sp->hdr.seq =
69999 - htonl(atomic_inc_return(&call->sequence));
70000 + htonl(atomic_inc_return_unchecked(&call->sequence));
70001 sp->hdr.serial =
70002 - htonl(atomic_inc_return(&conn->serial));
70003 + htonl(atomic_inc_return_unchecked(&conn->serial));
70004 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
70005 sp->hdr.userStatus = 0;
70006 sp->hdr.securityIndex = conn->security_ix;
70007 diff -urNp linux-2.6.32.42/net/rxrpc/ar-peer.c linux-2.6.32.42/net/rxrpc/ar-peer.c
70008 --- linux-2.6.32.42/net/rxrpc/ar-peer.c 2011-03-27 14:31:47.000000000 -0400
70009 +++ linux-2.6.32.42/net/rxrpc/ar-peer.c 2011-05-04 17:56:28.000000000 -0400
70010 @@ -86,7 +86,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
70011 INIT_LIST_HEAD(&peer->error_targets);
70012 spin_lock_init(&peer->lock);
70013 atomic_set(&peer->usage, 1);
70014 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
70015 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
70016 memcpy(&peer->srx, srx, sizeof(*srx));
70017
70018 rxrpc_assess_MTU_size(peer);
70019 diff -urNp linux-2.6.32.42/net/rxrpc/ar-proc.c linux-2.6.32.42/net/rxrpc/ar-proc.c
70020 --- linux-2.6.32.42/net/rxrpc/ar-proc.c 2011-03-27 14:31:47.000000000 -0400
70021 +++ linux-2.6.32.42/net/rxrpc/ar-proc.c 2011-05-04 17:56:28.000000000 -0400
70022 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
70023 atomic_read(&conn->usage),
70024 rxrpc_conn_states[conn->state],
70025 key_serial(conn->key),
70026 - atomic_read(&conn->serial),
70027 - atomic_read(&conn->hi_serial));
70028 + atomic_read_unchecked(&conn->serial),
70029 + atomic_read_unchecked(&conn->hi_serial));
70030
70031 return 0;
70032 }
70033 diff -urNp linux-2.6.32.42/net/rxrpc/ar-transport.c linux-2.6.32.42/net/rxrpc/ar-transport.c
70034 --- linux-2.6.32.42/net/rxrpc/ar-transport.c 2011-03-27 14:31:47.000000000 -0400
70035 +++ linux-2.6.32.42/net/rxrpc/ar-transport.c 2011-05-04 17:56:28.000000000 -0400
70036 @@ -46,7 +46,7 @@ static struct rxrpc_transport *rxrpc_all
70037 spin_lock_init(&trans->client_lock);
70038 rwlock_init(&trans->conn_lock);
70039 atomic_set(&trans->usage, 1);
70040 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
70041 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
70042
70043 if (peer->srx.transport.family == AF_INET) {
70044 switch (peer->srx.transport_type) {
70045 diff -urNp linux-2.6.32.42/net/rxrpc/rxkad.c linux-2.6.32.42/net/rxrpc/rxkad.c
70046 --- linux-2.6.32.42/net/rxrpc/rxkad.c 2011-03-27 14:31:47.000000000 -0400
70047 +++ linux-2.6.32.42/net/rxrpc/rxkad.c 2011-05-16 21:46:57.000000000 -0400
70048 @@ -210,6 +210,8 @@ static int rxkad_secure_packet_encrypt(c
70049 u16 check;
70050 int nsg;
70051
70052 + pax_track_stack();
70053 +
70054 sp = rxrpc_skb(skb);
70055
70056 _enter("");
70057 @@ -337,6 +339,8 @@ static int rxkad_verify_packet_auth(cons
70058 u16 check;
70059 int nsg;
70060
70061 + pax_track_stack();
70062 +
70063 _enter("");
70064
70065 sp = rxrpc_skb(skb);
70066 @@ -609,7 +613,7 @@ static int rxkad_issue_challenge(struct
70067
70068 len = iov[0].iov_len + iov[1].iov_len;
70069
70070 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
70071 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
70072 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
70073
70074 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
70075 @@ -659,7 +663,7 @@ static int rxkad_send_response(struct rx
70076
70077 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
70078
70079 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
70080 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
70081 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
70082
70083 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
70084 diff -urNp linux-2.6.32.42/net/sctp/proc.c linux-2.6.32.42/net/sctp/proc.c
70085 --- linux-2.6.32.42/net/sctp/proc.c 2011-03-27 14:31:47.000000000 -0400
70086 +++ linux-2.6.32.42/net/sctp/proc.c 2011-04-17 15:56:46.000000000 -0400
70087 @@ -213,7 +213,12 @@ static int sctp_eps_seq_show(struct seq_
70088 sctp_for_each_hentry(epb, node, &head->chain) {
70089 ep = sctp_ep(epb);
70090 sk = epb->sk;
70091 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
70092 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
70093 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70094 + NULL, NULL,
70095 +#else
70096 + ep, sk,
70097 +#endif
70098 sctp_sk(sk)->type, sk->sk_state, hash,
70099 epb->bind_addr.port,
70100 sock_i_uid(sk), sock_i_ino(sk));
70101 @@ -320,7 +325,12 @@ static int sctp_assocs_seq_show(struct s
70102 seq_printf(seq,
70103 "%8p %8p %-3d %-3d %-2d %-4d "
70104 "%4d %8d %8d %7d %5lu %-5d %5d ",
70105 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
70106 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70107 + NULL, NULL,
70108 +#else
70109 + assoc, sk,
70110 +#endif
70111 + sctp_sk(sk)->type, sk->sk_state,
70112 assoc->state, hash,
70113 assoc->assoc_id,
70114 assoc->sndbuf_used,
70115 diff -urNp linux-2.6.32.42/net/sctp/socket.c linux-2.6.32.42/net/sctp/socket.c
70116 --- linux-2.6.32.42/net/sctp/socket.c 2011-03-27 14:31:47.000000000 -0400
70117 +++ linux-2.6.32.42/net/sctp/socket.c 2011-04-23 12:56:11.000000000 -0400
70118 @@ -5802,7 +5802,6 @@ pp_found:
70119 */
70120 int reuse = sk->sk_reuse;
70121 struct sock *sk2;
70122 - struct hlist_node *node;
70123
70124 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
70125 if (pp->fastreuse && sk->sk_reuse &&
70126 diff -urNp linux-2.6.32.42/net/socket.c linux-2.6.32.42/net/socket.c
70127 --- linux-2.6.32.42/net/socket.c 2011-03-27 14:31:47.000000000 -0400
70128 +++ linux-2.6.32.42/net/socket.c 2011-05-16 21:46:57.000000000 -0400
70129 @@ -87,6 +87,7 @@
70130 #include <linux/wireless.h>
70131 #include <linux/nsproxy.h>
70132 #include <linux/magic.h>
70133 +#include <linux/in.h>
70134
70135 #include <asm/uaccess.h>
70136 #include <asm/unistd.h>
70137 @@ -97,6 +98,21 @@
70138 #include <net/sock.h>
70139 #include <linux/netfilter.h>
70140
70141 +extern void gr_attach_curr_ip(const struct sock *sk);
70142 +extern int gr_handle_sock_all(const int family, const int type,
70143 + const int protocol);
70144 +extern int gr_handle_sock_server(const struct sockaddr *sck);
70145 +extern int gr_handle_sock_server_other(const struct sock *sck);
70146 +extern int gr_handle_sock_client(const struct sockaddr *sck);
70147 +extern int gr_search_connect(struct socket * sock,
70148 + struct sockaddr_in * addr);
70149 +extern int gr_search_bind(struct socket * sock,
70150 + struct sockaddr_in * addr);
70151 +extern int gr_search_listen(struct socket * sock);
70152 +extern int gr_search_accept(struct socket * sock);
70153 +extern int gr_search_socket(const int domain, const int type,
70154 + const int protocol);
70155 +
70156 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
70157 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
70158 unsigned long nr_segs, loff_t pos);
70159 @@ -298,7 +314,7 @@ static int sockfs_get_sb(struct file_sys
70160 mnt);
70161 }
70162
70163 -static struct vfsmount *sock_mnt __read_mostly;
70164 +struct vfsmount *sock_mnt __read_mostly;
70165
70166 static struct file_system_type sock_fs_type = {
70167 .name = "sockfs",
70168 @@ -1154,6 +1170,8 @@ static int __sock_create(struct net *net
70169 return -EAFNOSUPPORT;
70170 if (type < 0 || type >= SOCK_MAX)
70171 return -EINVAL;
70172 + if (protocol < 0)
70173 + return -EINVAL;
70174
70175 /* Compatibility.
70176
70177 @@ -1283,6 +1301,16 @@ SYSCALL_DEFINE3(socket, int, family, int
70178 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
70179 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
70180
70181 + if(!gr_search_socket(family, type, protocol)) {
70182 + retval = -EACCES;
70183 + goto out;
70184 + }
70185 +
70186 + if (gr_handle_sock_all(family, type, protocol)) {
70187 + retval = -EACCES;
70188 + goto out;
70189 + }
70190 +
70191 retval = sock_create(family, type, protocol, &sock);
70192 if (retval < 0)
70193 goto out;
70194 @@ -1415,6 +1443,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
70195 if (sock) {
70196 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
70197 if (err >= 0) {
70198 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
70199 + err = -EACCES;
70200 + goto error;
70201 + }
70202 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
70203 + if (err)
70204 + goto error;
70205 +
70206 err = security_socket_bind(sock,
70207 (struct sockaddr *)&address,
70208 addrlen);
70209 @@ -1423,6 +1459,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
70210 (struct sockaddr *)
70211 &address, addrlen);
70212 }
70213 +error:
70214 fput_light(sock->file, fput_needed);
70215 }
70216 return err;
70217 @@ -1446,10 +1483,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
70218 if ((unsigned)backlog > somaxconn)
70219 backlog = somaxconn;
70220
70221 + if (gr_handle_sock_server_other(sock->sk)) {
70222 + err = -EPERM;
70223 + goto error;
70224 + }
70225 +
70226 + err = gr_search_listen(sock);
70227 + if (err)
70228 + goto error;
70229 +
70230 err = security_socket_listen(sock, backlog);
70231 if (!err)
70232 err = sock->ops->listen(sock, backlog);
70233
70234 +error:
70235 fput_light(sock->file, fput_needed);
70236 }
70237 return err;
70238 @@ -1492,6 +1539,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
70239 newsock->type = sock->type;
70240 newsock->ops = sock->ops;
70241
70242 + if (gr_handle_sock_server_other(sock->sk)) {
70243 + err = -EPERM;
70244 + sock_release(newsock);
70245 + goto out_put;
70246 + }
70247 +
70248 + err = gr_search_accept(sock);
70249 + if (err) {
70250 + sock_release(newsock);
70251 + goto out_put;
70252 + }
70253 +
70254 /*
70255 * We don't need try_module_get here, as the listening socket (sock)
70256 * has the protocol module (sock->ops->owner) held.
70257 @@ -1534,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
70258 fd_install(newfd, newfile);
70259 err = newfd;
70260
70261 + gr_attach_curr_ip(newsock->sk);
70262 +
70263 out_put:
70264 fput_light(sock->file, fput_needed);
70265 out:
70266 @@ -1571,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
70267 int, addrlen)
70268 {
70269 struct socket *sock;
70270 + struct sockaddr *sck;
70271 struct sockaddr_storage address;
70272 int err, fput_needed;
70273
70274 @@ -1581,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
70275 if (err < 0)
70276 goto out_put;
70277
70278 + sck = (struct sockaddr *)&address;
70279 +
70280 + if (gr_handle_sock_client(sck)) {
70281 + err = -EACCES;
70282 + goto out_put;
70283 + }
70284 +
70285 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
70286 + if (err)
70287 + goto out_put;
70288 +
70289 err =
70290 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
70291 if (err)
70292 @@ -1882,6 +1955,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct
70293 int err, ctl_len, iov_size, total_len;
70294 int fput_needed;
70295
70296 + pax_track_stack();
70297 +
70298 err = -EFAULT;
70299 if (MSG_CMSG_COMPAT & flags) {
70300 if (get_compat_msghdr(&msg_sys, msg_compat))
70301 diff -urNp linux-2.6.32.42/net/sunrpc/sched.c linux-2.6.32.42/net/sunrpc/sched.c
70302 --- linux-2.6.32.42/net/sunrpc/sched.c 2011-03-27 14:31:47.000000000 -0400
70303 +++ linux-2.6.32.42/net/sunrpc/sched.c 2011-04-17 15:56:46.000000000 -0400
70304 @@ -234,10 +234,10 @@ static int rpc_wait_bit_killable(void *w
70305 #ifdef RPC_DEBUG
70306 static void rpc_task_set_debuginfo(struct rpc_task *task)
70307 {
70308 - static atomic_t rpc_pid;
70309 + static atomic_unchecked_t rpc_pid;
70310
70311 task->tk_magic = RPC_TASK_MAGIC_ID;
70312 - task->tk_pid = atomic_inc_return(&rpc_pid);
70313 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
70314 }
70315 #else
70316 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
70317 diff -urNp linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma.c linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma.c
70318 --- linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma.c 2011-03-27 14:31:47.000000000 -0400
70319 +++ linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma.c 2011-05-04 17:56:20.000000000 -0400
70320 @@ -59,15 +59,15 @@ unsigned int svcrdma_max_req_size = RPCR
70321 static unsigned int min_max_inline = 4096;
70322 static unsigned int max_max_inline = 65536;
70323
70324 -atomic_t rdma_stat_recv;
70325 -atomic_t rdma_stat_read;
70326 -atomic_t rdma_stat_write;
70327 -atomic_t rdma_stat_sq_starve;
70328 -atomic_t rdma_stat_rq_starve;
70329 -atomic_t rdma_stat_rq_poll;
70330 -atomic_t rdma_stat_rq_prod;
70331 -atomic_t rdma_stat_sq_poll;
70332 -atomic_t rdma_stat_sq_prod;
70333 +atomic_unchecked_t rdma_stat_recv;
70334 +atomic_unchecked_t rdma_stat_read;
70335 +atomic_unchecked_t rdma_stat_write;
70336 +atomic_unchecked_t rdma_stat_sq_starve;
70337 +atomic_unchecked_t rdma_stat_rq_starve;
70338 +atomic_unchecked_t rdma_stat_rq_poll;
70339 +atomic_unchecked_t rdma_stat_rq_prod;
70340 +atomic_unchecked_t rdma_stat_sq_poll;
70341 +atomic_unchecked_t rdma_stat_sq_prod;
70342
70343 /* Temporary NFS request map and context caches */
70344 struct kmem_cache *svc_rdma_map_cachep;
70345 @@ -105,7 +105,7 @@ static int read_reset_stat(ctl_table *ta
70346 len -= *ppos;
70347 if (len > *lenp)
70348 len = *lenp;
70349 - if (len && copy_to_user(buffer, str_buf, len))
70350 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
70351 return -EFAULT;
70352 *lenp = len;
70353 *ppos += len;
70354 @@ -149,63 +149,63 @@ static ctl_table svcrdma_parm_table[] =
70355 {
70356 .procname = "rdma_stat_read",
70357 .data = &rdma_stat_read,
70358 - .maxlen = sizeof(atomic_t),
70359 + .maxlen = sizeof(atomic_unchecked_t),
70360 .mode = 0644,
70361 .proc_handler = &read_reset_stat,
70362 },
70363 {
70364 .procname = "rdma_stat_recv",
70365 .data = &rdma_stat_recv,
70366 - .maxlen = sizeof(atomic_t),
70367 + .maxlen = sizeof(atomic_unchecked_t),
70368 .mode = 0644,
70369 .proc_handler = &read_reset_stat,
70370 },
70371 {
70372 .procname = "rdma_stat_write",
70373 .data = &rdma_stat_write,
70374 - .maxlen = sizeof(atomic_t),
70375 + .maxlen = sizeof(atomic_unchecked_t),
70376 .mode = 0644,
70377 .proc_handler = &read_reset_stat,
70378 },
70379 {
70380 .procname = "rdma_stat_sq_starve",
70381 .data = &rdma_stat_sq_starve,
70382 - .maxlen = sizeof(atomic_t),
70383 + .maxlen = sizeof(atomic_unchecked_t),
70384 .mode = 0644,
70385 .proc_handler = &read_reset_stat,
70386 },
70387 {
70388 .procname = "rdma_stat_rq_starve",
70389 .data = &rdma_stat_rq_starve,
70390 - .maxlen = sizeof(atomic_t),
70391 + .maxlen = sizeof(atomic_unchecked_t),
70392 .mode = 0644,
70393 .proc_handler = &read_reset_stat,
70394 },
70395 {
70396 .procname = "rdma_stat_rq_poll",
70397 .data = &rdma_stat_rq_poll,
70398 - .maxlen = sizeof(atomic_t),
70399 + .maxlen = sizeof(atomic_unchecked_t),
70400 .mode = 0644,
70401 .proc_handler = &read_reset_stat,
70402 },
70403 {
70404 .procname = "rdma_stat_rq_prod",
70405 .data = &rdma_stat_rq_prod,
70406 - .maxlen = sizeof(atomic_t),
70407 + .maxlen = sizeof(atomic_unchecked_t),
70408 .mode = 0644,
70409 .proc_handler = &read_reset_stat,
70410 },
70411 {
70412 .procname = "rdma_stat_sq_poll",
70413 .data = &rdma_stat_sq_poll,
70414 - .maxlen = sizeof(atomic_t),
70415 + .maxlen = sizeof(atomic_unchecked_t),
70416 .mode = 0644,
70417 .proc_handler = &read_reset_stat,
70418 },
70419 {
70420 .procname = "rdma_stat_sq_prod",
70421 .data = &rdma_stat_sq_prod,
70422 - .maxlen = sizeof(atomic_t),
70423 + .maxlen = sizeof(atomic_unchecked_t),
70424 .mode = 0644,
70425 .proc_handler = &read_reset_stat,
70426 },
70427 diff -urNp linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
70428 --- linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-03-27 14:31:47.000000000 -0400
70429 +++ linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-05-04 17:56:28.000000000 -0400
70430 @@ -495,7 +495,7 @@ next_sge:
70431 svc_rdma_put_context(ctxt, 0);
70432 goto out;
70433 }
70434 - atomic_inc(&rdma_stat_read);
70435 + atomic_inc_unchecked(&rdma_stat_read);
70436
70437 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
70438 chl_map->ch[ch_no].count -= read_wr.num_sge;
70439 @@ -606,7 +606,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
70440 dto_q);
70441 list_del_init(&ctxt->dto_q);
70442 } else {
70443 - atomic_inc(&rdma_stat_rq_starve);
70444 + atomic_inc_unchecked(&rdma_stat_rq_starve);
70445 clear_bit(XPT_DATA, &xprt->xpt_flags);
70446 ctxt = NULL;
70447 }
70448 @@ -626,7 +626,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
70449 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
70450 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
70451 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
70452 - atomic_inc(&rdma_stat_recv);
70453 + atomic_inc_unchecked(&rdma_stat_recv);
70454
70455 /* Build up the XDR from the receive buffers. */
70456 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
70457 diff -urNp linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_sendto.c
70458 --- linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-03-27 14:31:47.000000000 -0400
70459 +++ linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-05-04 17:56:28.000000000 -0400
70460 @@ -328,7 +328,7 @@ static int send_write(struct svcxprt_rdm
70461 write_wr.wr.rdma.remote_addr = to;
70462
70463 /* Post It */
70464 - atomic_inc(&rdma_stat_write);
70465 + atomic_inc_unchecked(&rdma_stat_write);
70466 if (svc_rdma_send(xprt, &write_wr))
70467 goto err;
70468 return 0;
70469 diff -urNp linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_transport.c
70470 --- linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-03-27 14:31:47.000000000 -0400
70471 +++ linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-05-04 17:56:28.000000000 -0400
70472 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rd
70473 return;
70474
70475 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
70476 - atomic_inc(&rdma_stat_rq_poll);
70477 + atomic_inc_unchecked(&rdma_stat_rq_poll);
70478
70479 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
70480 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
70481 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rd
70482 }
70483
70484 if (ctxt)
70485 - atomic_inc(&rdma_stat_rq_prod);
70486 + atomic_inc_unchecked(&rdma_stat_rq_prod);
70487
70488 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
70489 /*
70490 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rd
70491 return;
70492
70493 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
70494 - atomic_inc(&rdma_stat_sq_poll);
70495 + atomic_inc_unchecked(&rdma_stat_sq_poll);
70496 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
70497 if (wc.status != IB_WC_SUCCESS)
70498 /* Close the transport */
70499 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rd
70500 }
70501
70502 if (ctxt)
70503 - atomic_inc(&rdma_stat_sq_prod);
70504 + atomic_inc_unchecked(&rdma_stat_sq_prod);
70505 }
70506
70507 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
70508 @@ -1260,7 +1260,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
70509 spin_lock_bh(&xprt->sc_lock);
70510 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
70511 spin_unlock_bh(&xprt->sc_lock);
70512 - atomic_inc(&rdma_stat_sq_starve);
70513 + atomic_inc_unchecked(&rdma_stat_sq_starve);
70514
70515 /* See if we can opportunistically reap SQ WR to make room */
70516 sq_cq_reap(xprt);
70517 diff -urNp linux-2.6.32.42/net/sysctl_net.c linux-2.6.32.42/net/sysctl_net.c
70518 --- linux-2.6.32.42/net/sysctl_net.c 2011-03-27 14:31:47.000000000 -0400
70519 +++ linux-2.6.32.42/net/sysctl_net.c 2011-04-17 15:56:46.000000000 -0400
70520 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
70521 struct ctl_table *table)
70522 {
70523 /* Allow network administrator to have same access as root. */
70524 - if (capable(CAP_NET_ADMIN)) {
70525 + if (capable_nolog(CAP_NET_ADMIN)) {
70526 int mode = (table->mode >> 6) & 7;
70527 return (mode << 6) | (mode << 3) | mode;
70528 }
70529 diff -urNp linux-2.6.32.42/net/unix/af_unix.c linux-2.6.32.42/net/unix/af_unix.c
70530 --- linux-2.6.32.42/net/unix/af_unix.c 2011-05-10 22:12:02.000000000 -0400
70531 +++ linux-2.6.32.42/net/unix/af_unix.c 2011-05-10 22:12:34.000000000 -0400
70532 @@ -745,6 +745,12 @@ static struct sock *unix_find_other(stru
70533 err = -ECONNREFUSED;
70534 if (!S_ISSOCK(inode->i_mode))
70535 goto put_fail;
70536 +
70537 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
70538 + err = -EACCES;
70539 + goto put_fail;
70540 + }
70541 +
70542 u = unix_find_socket_byinode(net, inode);
70543 if (!u)
70544 goto put_fail;
70545 @@ -765,6 +771,13 @@ static struct sock *unix_find_other(stru
70546 if (u) {
70547 struct dentry *dentry;
70548 dentry = unix_sk(u)->dentry;
70549 +
70550 + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
70551 + err = -EPERM;
70552 + sock_put(u);
70553 + goto fail;
70554 + }
70555 +
70556 if (dentry)
70557 touch_atime(unix_sk(u)->mnt, dentry);
70558 } else
70559 @@ -850,11 +863,18 @@ static int unix_bind(struct socket *sock
70560 err = security_path_mknod(&nd.path, dentry, mode, 0);
70561 if (err)
70562 goto out_mknod_drop_write;
70563 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
70564 + err = -EACCES;
70565 + goto out_mknod_drop_write;
70566 + }
70567 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
70568 out_mknod_drop_write:
70569 mnt_drop_write(nd.path.mnt);
70570 if (err)
70571 goto out_mknod_dput;
70572 +
70573 + gr_handle_create(dentry, nd.path.mnt);
70574 +
70575 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
70576 dput(nd.path.dentry);
70577 nd.path.dentry = dentry;
70578 @@ -872,6 +892,10 @@ out_mknod_drop_write:
70579 goto out_unlock;
70580 }
70581
70582 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
70583 + sk->sk_peercred.pid = current->pid;
70584 +#endif
70585 +
70586 list = &unix_socket_table[addr->hash];
70587 } else {
70588 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
70589 @@ -2211,7 +2235,11 @@ static int unix_seq_show(struct seq_file
70590 unix_state_lock(s);
70591
70592 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
70593 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70594 + NULL,
70595 +#else
70596 s,
70597 +#endif
70598 atomic_read(&s->sk_refcnt),
70599 0,
70600 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
70601 diff -urNp linux-2.6.32.42/net/wireless/wext.c linux-2.6.32.42/net/wireless/wext.c
70602 --- linux-2.6.32.42/net/wireless/wext.c 2011-03-27 14:31:47.000000000 -0400
70603 +++ linux-2.6.32.42/net/wireless/wext.c 2011-04-17 15:56:46.000000000 -0400
70604 @@ -816,8 +816,7 @@ static int ioctl_standard_iw_point(struc
70605 */
70606
70607 /* Support for very large requests */
70608 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
70609 - (user_length > descr->max_tokens)) {
70610 + if (user_length > descr->max_tokens) {
70611 /* Allow userspace to GET more than max so
70612 * we can support any size GET requests.
70613 * There is still a limit : -ENOMEM.
70614 @@ -854,22 +853,6 @@ static int ioctl_standard_iw_point(struc
70615 }
70616 }
70617
70618 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
70619 - /*
70620 - * If this is a GET, but not NOMAX, it means that the extra
70621 - * data is not bounded by userspace, but by max_tokens. Thus
70622 - * set the length to max_tokens. This matches the extra data
70623 - * allocation.
70624 - * The driver should fill it with the number of tokens it
70625 - * provided, and it may check iwp->length rather than having
70626 - * knowledge of max_tokens. If the driver doesn't change the
70627 - * iwp->length, this ioctl just copies back max_token tokens
70628 - * filled with zeroes. Hopefully the driver isn't claiming
70629 - * them to be valid data.
70630 - */
70631 - iwp->length = descr->max_tokens;
70632 - }
70633 -
70634 err = handler(dev, info, (union iwreq_data *) iwp, extra);
70635
70636 iwp->length += essid_compat;
70637 diff -urNp linux-2.6.32.42/net/xfrm/xfrm_policy.c linux-2.6.32.42/net/xfrm/xfrm_policy.c
70638 --- linux-2.6.32.42/net/xfrm/xfrm_policy.c 2011-03-27 14:31:47.000000000 -0400
70639 +++ linux-2.6.32.42/net/xfrm/xfrm_policy.c 2011-05-04 17:56:20.000000000 -0400
70640 @@ -586,7 +586,7 @@ int xfrm_policy_insert(int dir, struct x
70641 hlist_add_head(&policy->bydst, chain);
70642 xfrm_pol_hold(policy);
70643 net->xfrm.policy_count[dir]++;
70644 - atomic_inc(&flow_cache_genid);
70645 + atomic_inc_unchecked(&flow_cache_genid);
70646 if (delpol)
70647 __xfrm_policy_unlink(delpol, dir);
70648 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
70649 @@ -669,7 +669,7 @@ struct xfrm_policy *xfrm_policy_bysel_ct
70650 write_unlock_bh(&xfrm_policy_lock);
70651
70652 if (ret && delete) {
70653 - atomic_inc(&flow_cache_genid);
70654 + atomic_inc_unchecked(&flow_cache_genid);
70655 xfrm_policy_kill(ret);
70656 }
70657 return ret;
70658 @@ -710,7 +710,7 @@ struct xfrm_policy *xfrm_policy_byid(str
70659 write_unlock_bh(&xfrm_policy_lock);
70660
70661 if (ret && delete) {
70662 - atomic_inc(&flow_cache_genid);
70663 + atomic_inc_unchecked(&flow_cache_genid);
70664 xfrm_policy_kill(ret);
70665 }
70666 return ret;
70667 @@ -824,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u
70668 }
70669
70670 }
70671 - atomic_inc(&flow_cache_genid);
70672 + atomic_inc_unchecked(&flow_cache_genid);
70673 out:
70674 write_unlock_bh(&xfrm_policy_lock);
70675 return err;
70676 @@ -1088,7 +1088,7 @@ int xfrm_policy_delete(struct xfrm_polic
70677 write_unlock_bh(&xfrm_policy_lock);
70678 if (pol) {
70679 if (dir < XFRM_POLICY_MAX)
70680 - atomic_inc(&flow_cache_genid);
70681 + atomic_inc_unchecked(&flow_cache_genid);
70682 xfrm_policy_kill(pol);
70683 return 0;
70684 }
70685 @@ -1477,7 +1477,7 @@ free_dst:
70686 goto out;
70687 }
70688
70689 -static int inline
70690 +static inline int
70691 xfrm_dst_alloc_copy(void **target, void *src, int size)
70692 {
70693 if (!*target) {
70694 @@ -1489,7 +1489,7 @@ xfrm_dst_alloc_copy(void **target, void
70695 return 0;
70696 }
70697
70698 -static int inline
70699 +static inline int
70700 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
70701 {
70702 #ifdef CONFIG_XFRM_SUB_POLICY
70703 @@ -1501,7 +1501,7 @@ xfrm_dst_update_parent(struct dst_entry
70704 #endif
70705 }
70706
70707 -static int inline
70708 +static inline int
70709 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
70710 {
70711 #ifdef CONFIG_XFRM_SUB_POLICY
70712 @@ -1537,7 +1537,7 @@ int __xfrm_lookup(struct net *net, struc
70713 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
70714
70715 restart:
70716 - genid = atomic_read(&flow_cache_genid);
70717 + genid = atomic_read_unchecked(&flow_cache_genid);
70718 policy = NULL;
70719 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
70720 pols[pi] = NULL;
70721 @@ -1680,7 +1680,7 @@ restart:
70722 goto error;
70723 }
70724 if (nx == -EAGAIN ||
70725 - genid != atomic_read(&flow_cache_genid)) {
70726 + genid != atomic_read_unchecked(&flow_cache_genid)) {
70727 xfrm_pols_put(pols, npols);
70728 goto restart;
70729 }
70730 diff -urNp linux-2.6.32.42/net/xfrm/xfrm_user.c linux-2.6.32.42/net/xfrm/xfrm_user.c
70731 --- linux-2.6.32.42/net/xfrm/xfrm_user.c 2011-03-27 14:31:47.000000000 -0400
70732 +++ linux-2.6.32.42/net/xfrm/xfrm_user.c 2011-05-16 21:46:57.000000000 -0400
70733 @@ -1169,6 +1169,8 @@ static int copy_to_user_tmpl(struct xfrm
70734 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
70735 int i;
70736
70737 + pax_track_stack();
70738 +
70739 if (xp->xfrm_nr == 0)
70740 return 0;
70741
70742 @@ -1784,6 +1786,8 @@ static int xfrm_do_migrate(struct sk_buf
70743 int err;
70744 int n = 0;
70745
70746 + pax_track_stack();
70747 +
70748 if (attrs[XFRMA_MIGRATE] == NULL)
70749 return -EINVAL;
70750
70751 diff -urNp linux-2.6.32.42/samples/kobject/kset-example.c linux-2.6.32.42/samples/kobject/kset-example.c
70752 --- linux-2.6.32.42/samples/kobject/kset-example.c 2011-03-27 14:31:47.000000000 -0400
70753 +++ linux-2.6.32.42/samples/kobject/kset-example.c 2011-04-17 15:56:46.000000000 -0400
70754 @@ -87,7 +87,7 @@ static ssize_t foo_attr_store(struct kob
70755 }
70756
70757 /* Our custom sysfs_ops that we will associate with our ktype later on */
70758 -static struct sysfs_ops foo_sysfs_ops = {
70759 +static const struct sysfs_ops foo_sysfs_ops = {
70760 .show = foo_attr_show,
70761 .store = foo_attr_store,
70762 };
70763 diff -urNp linux-2.6.32.42/scripts/basic/fixdep.c linux-2.6.32.42/scripts/basic/fixdep.c
70764 --- linux-2.6.32.42/scripts/basic/fixdep.c 2011-03-27 14:31:47.000000000 -0400
70765 +++ linux-2.6.32.42/scripts/basic/fixdep.c 2011-04-17 15:56:46.000000000 -0400
70766 @@ -222,9 +222,9 @@ static void use_config(char *m, int slen
70767
70768 static void parse_config_file(char *map, size_t len)
70769 {
70770 - int *end = (int *) (map + len);
70771 + unsigned int *end = (unsigned int *) (map + len);
70772 /* start at +1, so that p can never be < map */
70773 - int *m = (int *) map + 1;
70774 + unsigned int *m = (unsigned int *) map + 1;
70775 char *p, *q;
70776
70777 for (; m < end; m++) {
70778 @@ -371,7 +371,7 @@ static void print_deps(void)
70779 static void traps(void)
70780 {
70781 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
70782 - int *p = (int *)test;
70783 + unsigned int *p = (unsigned int *)test;
70784
70785 if (*p != INT_CONF) {
70786 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
70787 diff -urNp linux-2.6.32.42/scripts/Makefile.build linux-2.6.32.42/scripts/Makefile.build
70788 --- linux-2.6.32.42/scripts/Makefile.build 2011-03-27 14:31:47.000000000 -0400
70789 +++ linux-2.6.32.42/scripts/Makefile.build 2011-06-04 20:46:51.000000000 -0400
70790 @@ -59,7 +59,7 @@ endif
70791 endif
70792
70793 # Do not include host rules unless needed
70794 -ifneq ($(hostprogs-y)$(hostprogs-m),)
70795 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
70796 include scripts/Makefile.host
70797 endif
70798
70799 diff -urNp linux-2.6.32.42/scripts/Makefile.clean linux-2.6.32.42/scripts/Makefile.clean
70800 --- linux-2.6.32.42/scripts/Makefile.clean 2011-03-27 14:31:47.000000000 -0400
70801 +++ linux-2.6.32.42/scripts/Makefile.clean 2011-06-04 20:47:19.000000000 -0400
70802 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
70803 __clean-files := $(extra-y) $(always) \
70804 $(targets) $(clean-files) \
70805 $(host-progs) \
70806 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
70807 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
70808 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
70809
70810 # as clean-files is given relative to the current directory, this adds
70811 # a $(obj) prefix, except for absolute paths
70812 diff -urNp linux-2.6.32.42/scripts/Makefile.host linux-2.6.32.42/scripts/Makefile.host
70813 --- linux-2.6.32.42/scripts/Makefile.host 2011-03-27 14:31:47.000000000 -0400
70814 +++ linux-2.6.32.42/scripts/Makefile.host 2011-06-04 20:48:22.000000000 -0400
70815 @@ -31,6 +31,7 @@
70816 # Note: Shared libraries consisting of C++ files are not supported
70817
70818 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
70819 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
70820
70821 # C code
70822 # Executables compiled from a single .c file
70823 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
70824 # Shared libaries (only .c supported)
70825 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
70826 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
70827 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
70828 # Remove .so files from "xxx-objs"
70829 host-cobjs := $(filter-out %.so,$(host-cobjs))
70830
70831 diff -urNp linux-2.6.32.42/scripts/mod/file2alias.c linux-2.6.32.42/scripts/mod/file2alias.c
70832 --- linux-2.6.32.42/scripts/mod/file2alias.c 2011-03-27 14:31:47.000000000 -0400
70833 +++ linux-2.6.32.42/scripts/mod/file2alias.c 2011-04-17 15:56:46.000000000 -0400
70834 @@ -72,7 +72,7 @@ static void device_id_check(const char *
70835 unsigned long size, unsigned long id_size,
70836 void *symval)
70837 {
70838 - int i;
70839 + unsigned int i;
70840
70841 if (size % id_size || size < id_size) {
70842 if (cross_build != 0)
70843 @@ -102,7 +102,7 @@ static void device_id_check(const char *
70844 /* USB is special because the bcdDevice can be matched against a numeric range */
70845 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
70846 static void do_usb_entry(struct usb_device_id *id,
70847 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
70848 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
70849 unsigned char range_lo, unsigned char range_hi,
70850 struct module *mod)
70851 {
70852 @@ -368,7 +368,7 @@ static void do_pnp_device_entry(void *sy
70853 for (i = 0; i < count; i++) {
70854 const char *id = (char *)devs[i].id;
70855 char acpi_id[sizeof(devs[0].id)];
70856 - int j;
70857 + unsigned int j;
70858
70859 buf_printf(&mod->dev_table_buf,
70860 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
70861 @@ -398,7 +398,7 @@ static void do_pnp_card_entries(void *sy
70862
70863 for (j = 0; j < PNP_MAX_DEVICES; j++) {
70864 const char *id = (char *)card->devs[j].id;
70865 - int i2, j2;
70866 + unsigned int i2, j2;
70867 int dup = 0;
70868
70869 if (!id[0])
70870 @@ -424,7 +424,7 @@ static void do_pnp_card_entries(void *sy
70871 /* add an individual alias for every device entry */
70872 if (!dup) {
70873 char acpi_id[sizeof(card->devs[0].id)];
70874 - int k;
70875 + unsigned int k;
70876
70877 buf_printf(&mod->dev_table_buf,
70878 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
70879 @@ -699,7 +699,7 @@ static void dmi_ascii_filter(char *d, co
70880 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
70881 char *alias)
70882 {
70883 - int i, j;
70884 + unsigned int i, j;
70885
70886 sprintf(alias, "dmi*");
70887
70888 diff -urNp linux-2.6.32.42/scripts/mod/modpost.c linux-2.6.32.42/scripts/mod/modpost.c
70889 --- linux-2.6.32.42/scripts/mod/modpost.c 2011-03-27 14:31:47.000000000 -0400
70890 +++ linux-2.6.32.42/scripts/mod/modpost.c 2011-04-17 15:56:46.000000000 -0400
70891 @@ -835,6 +835,7 @@ enum mismatch {
70892 INIT_TO_EXIT,
70893 EXIT_TO_INIT,
70894 EXPORT_TO_INIT_EXIT,
70895 + DATA_TO_TEXT
70896 };
70897
70898 struct sectioncheck {
70899 @@ -920,6 +921,12 @@ const struct sectioncheck sectioncheck[]
70900 .fromsec = { "__ksymtab*", NULL },
70901 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
70902 .mismatch = EXPORT_TO_INIT_EXIT
70903 +},
70904 +/* Do not reference code from writable data */
70905 +{
70906 + .fromsec = { DATA_SECTIONS, NULL },
70907 + .tosec = { TEXT_SECTIONS, NULL },
70908 + .mismatch = DATA_TO_TEXT
70909 }
70910 };
70911
70912 @@ -1024,10 +1031,10 @@ static Elf_Sym *find_elf_symbol(struct e
70913 continue;
70914 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
70915 continue;
70916 - if (sym->st_value == addr)
70917 - return sym;
70918 /* Find a symbol nearby - addr are maybe negative */
70919 d = sym->st_value - addr;
70920 + if (d == 0)
70921 + return sym;
70922 if (d < 0)
70923 d = addr - sym->st_value;
70924 if (d < distance) {
70925 @@ -1268,6 +1275,14 @@ static void report_sec_mismatch(const ch
70926 "Fix this by removing the %sannotation of %s "
70927 "or drop the export.\n",
70928 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
70929 + case DATA_TO_TEXT:
70930 +/*
70931 + fprintf(stderr,
70932 + "The variable %s references\n"
70933 + "the %s %s%s%s\n",
70934 + fromsym, to, sec2annotation(tosec), tosym, to_p);
70935 +*/
70936 + break;
70937 case NO_MISMATCH:
70938 /* To get warnings on missing members */
70939 break;
70940 @@ -1651,7 +1666,7 @@ void __attribute__((format(printf, 2, 3)
70941 va_end(ap);
70942 }
70943
70944 -void buf_write(struct buffer *buf, const char *s, int len)
70945 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
70946 {
70947 if (buf->size - buf->pos < len) {
70948 buf->size += len + SZ;
70949 @@ -1863,7 +1878,7 @@ static void write_if_changed(struct buff
70950 if (fstat(fileno(file), &st) < 0)
70951 goto close_write;
70952
70953 - if (st.st_size != b->pos)
70954 + if (st.st_size != (off_t)b->pos)
70955 goto close_write;
70956
70957 tmp = NOFAIL(malloc(b->pos));
70958 diff -urNp linux-2.6.32.42/scripts/mod/modpost.h linux-2.6.32.42/scripts/mod/modpost.h
70959 --- linux-2.6.32.42/scripts/mod/modpost.h 2011-03-27 14:31:47.000000000 -0400
70960 +++ linux-2.6.32.42/scripts/mod/modpost.h 2011-04-17 15:56:46.000000000 -0400
70961 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
70962
70963 struct buffer {
70964 char *p;
70965 - int pos;
70966 - int size;
70967 + unsigned int pos;
70968 + unsigned int size;
70969 };
70970
70971 void __attribute__((format(printf, 2, 3)))
70972 buf_printf(struct buffer *buf, const char *fmt, ...);
70973
70974 void
70975 -buf_write(struct buffer *buf, const char *s, int len);
70976 +buf_write(struct buffer *buf, const char *s, unsigned int len);
70977
70978 struct module {
70979 struct module *next;
70980 diff -urNp linux-2.6.32.42/scripts/mod/sumversion.c linux-2.6.32.42/scripts/mod/sumversion.c
70981 --- linux-2.6.32.42/scripts/mod/sumversion.c 2011-03-27 14:31:47.000000000 -0400
70982 +++ linux-2.6.32.42/scripts/mod/sumversion.c 2011-04-17 15:56:46.000000000 -0400
70983 @@ -455,7 +455,7 @@ static void write_version(const char *fi
70984 goto out;
70985 }
70986
70987 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
70988 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
70989 warn("writing sum in %s failed: %s\n",
70990 filename, strerror(errno));
70991 goto out;
70992 diff -urNp linux-2.6.32.42/scripts/pnmtologo.c linux-2.6.32.42/scripts/pnmtologo.c
70993 --- linux-2.6.32.42/scripts/pnmtologo.c 2011-03-27 14:31:47.000000000 -0400
70994 +++ linux-2.6.32.42/scripts/pnmtologo.c 2011-04-17 15:56:46.000000000 -0400
70995 @@ -237,14 +237,14 @@ static void write_header(void)
70996 fprintf(out, " * Linux logo %s\n", logoname);
70997 fputs(" */\n\n", out);
70998 fputs("#include <linux/linux_logo.h>\n\n", out);
70999 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
71000 + fprintf(out, "static unsigned char %s_data[] = {\n",
71001 logoname);
71002 }
71003
71004 static void write_footer(void)
71005 {
71006 fputs("\n};\n\n", out);
71007 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
71008 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
71009 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
71010 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
71011 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
71012 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
71013 fputs("\n};\n\n", out);
71014
71015 /* write logo clut */
71016 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
71017 + fprintf(out, "static unsigned char %s_clut[] = {\n",
71018 logoname);
71019 write_hex_cnt = 0;
71020 for (i = 0; i < logo_clutsize; i++) {
71021 diff -urNp linux-2.6.32.42/scripts/tags.sh linux-2.6.32.42/scripts/tags.sh
71022 --- linux-2.6.32.42/scripts/tags.sh 2011-03-27 14:31:47.000000000 -0400
71023 +++ linux-2.6.32.42/scripts/tags.sh 2011-06-07 18:06:04.000000000 -0400
71024 @@ -93,6 +93,11 @@ docscope()
71025 cscope -b -f cscope.out
71026 }
71027
71028 +dogtags()
71029 +{
71030 + all_sources | gtags -f -
71031 +}
71032 +
71033 exuberant()
71034 {
71035 all_sources | xargs $1 -a \
71036 @@ -164,6 +169,10 @@ case "$1" in
71037 docscope
71038 ;;
71039
71040 + "gtags")
71041 + dogtags
71042 + ;;
71043 +
71044 "tags")
71045 rm -f tags
71046 xtags ctags
71047 diff -urNp linux-2.6.32.42/security/capability.c linux-2.6.32.42/security/capability.c
71048 --- linux-2.6.32.42/security/capability.c 2011-03-27 14:31:47.000000000 -0400
71049 +++ linux-2.6.32.42/security/capability.c 2011-04-17 15:56:46.000000000 -0400
71050 @@ -890,7 +890,7 @@ static void cap_audit_rule_free(void *ls
71051 }
71052 #endif /* CONFIG_AUDIT */
71053
71054 -struct security_operations default_security_ops = {
71055 +struct security_operations default_security_ops __read_only = {
71056 .name = "default",
71057 };
71058
71059 diff -urNp linux-2.6.32.42/security/commoncap.c linux-2.6.32.42/security/commoncap.c
71060 --- linux-2.6.32.42/security/commoncap.c 2011-03-27 14:31:47.000000000 -0400
71061 +++ linux-2.6.32.42/security/commoncap.c 2011-04-17 15:56:46.000000000 -0400
71062 @@ -27,7 +27,7 @@
71063 #include <linux/sched.h>
71064 #include <linux/prctl.h>
71065 #include <linux/securebits.h>
71066 -
71067 +#include <net/sock.h>
71068 /*
71069 * If a non-root user executes a setuid-root binary in
71070 * !secure(SECURE_NOROOT) mode, then we raise capabilities.
71071 @@ -50,9 +50,11 @@ static void warn_setuid_and_fcaps_mixed(
71072 }
71073 }
71074
71075 +extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
71076 +
71077 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
71078 {
71079 - NETLINK_CB(skb).eff_cap = current_cap();
71080 + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
71081 return 0;
71082 }
71083
71084 @@ -582,6 +584,9 @@ int cap_bprm_secureexec(struct linux_bin
71085 {
71086 const struct cred *cred = current_cred();
71087
71088 + if (gr_acl_enable_at_secure())
71089 + return 1;
71090 +
71091 if (cred->uid != 0) {
71092 if (bprm->cap_effective)
71093 return 1;
71094 diff -urNp linux-2.6.32.42/security/integrity/ima/ima_api.c linux-2.6.32.42/security/integrity/ima/ima_api.c
71095 --- linux-2.6.32.42/security/integrity/ima/ima_api.c 2011-03-27 14:31:47.000000000 -0400
71096 +++ linux-2.6.32.42/security/integrity/ima/ima_api.c 2011-04-17 15:56:46.000000000 -0400
71097 @@ -74,7 +74,7 @@ void ima_add_violation(struct inode *ino
71098 int result;
71099
71100 /* can overflow, only indicator */
71101 - atomic_long_inc(&ima_htable.violations);
71102 + atomic_long_inc_unchecked(&ima_htable.violations);
71103
71104 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
71105 if (!entry) {
71106 diff -urNp linux-2.6.32.42/security/integrity/ima/ima_fs.c linux-2.6.32.42/security/integrity/ima/ima_fs.c
71107 --- linux-2.6.32.42/security/integrity/ima/ima_fs.c 2011-03-27 14:31:47.000000000 -0400
71108 +++ linux-2.6.32.42/security/integrity/ima/ima_fs.c 2011-04-17 15:56:46.000000000 -0400
71109 @@ -27,12 +27,12 @@
71110 static int valid_policy = 1;
71111 #define TMPBUFLEN 12
71112 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
71113 - loff_t *ppos, atomic_long_t *val)
71114 + loff_t *ppos, atomic_long_unchecked_t *val)
71115 {
71116 char tmpbuf[TMPBUFLEN];
71117 ssize_t len;
71118
71119 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
71120 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
71121 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
71122 }
71123
71124 diff -urNp linux-2.6.32.42/security/integrity/ima/ima.h linux-2.6.32.42/security/integrity/ima/ima.h
71125 --- linux-2.6.32.42/security/integrity/ima/ima.h 2011-03-27 14:31:47.000000000 -0400
71126 +++ linux-2.6.32.42/security/integrity/ima/ima.h 2011-04-17 15:56:46.000000000 -0400
71127 @@ -84,8 +84,8 @@ void ima_add_violation(struct inode *ino
71128 extern spinlock_t ima_queue_lock;
71129
71130 struct ima_h_table {
71131 - atomic_long_t len; /* number of stored measurements in the list */
71132 - atomic_long_t violations;
71133 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
71134 + atomic_long_unchecked_t violations;
71135 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
71136 };
71137 extern struct ima_h_table ima_htable;
71138 diff -urNp linux-2.6.32.42/security/integrity/ima/ima_queue.c linux-2.6.32.42/security/integrity/ima/ima_queue.c
71139 --- linux-2.6.32.42/security/integrity/ima/ima_queue.c 2011-03-27 14:31:47.000000000 -0400
71140 +++ linux-2.6.32.42/security/integrity/ima/ima_queue.c 2011-04-17 15:56:46.000000000 -0400
71141 @@ -78,7 +78,7 @@ static int ima_add_digest_entry(struct i
71142 INIT_LIST_HEAD(&qe->later);
71143 list_add_tail_rcu(&qe->later, &ima_measurements);
71144
71145 - atomic_long_inc(&ima_htable.len);
71146 + atomic_long_inc_unchecked(&ima_htable.len);
71147 key = ima_hash_key(entry->digest);
71148 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
71149 return 0;
71150 diff -urNp linux-2.6.32.42/security/Kconfig linux-2.6.32.42/security/Kconfig
71151 --- linux-2.6.32.42/security/Kconfig 2011-03-27 14:31:47.000000000 -0400
71152 +++ linux-2.6.32.42/security/Kconfig 2011-06-29 20:55:36.000000000 -0400
71153 @@ -4,6 +4,555 @@
71154
71155 menu "Security options"
71156
71157 +source grsecurity/Kconfig
71158 +
71159 +menu "PaX"
71160 +
71161 + config ARCH_TRACK_EXEC_LIMIT
71162 + bool
71163 +
71164 + config PAX_PER_CPU_PGD
71165 + bool
71166 +
71167 + config TASK_SIZE_MAX_SHIFT
71168 + int
71169 + depends on X86_64
71170 + default 47 if !PAX_PER_CPU_PGD
71171 + default 42 if PAX_PER_CPU_PGD
71172 +
71173 + config PAX_ENABLE_PAE
71174 + bool
71175 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
71176 +
71177 +config PAX
71178 + bool "Enable various PaX features"
71179 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
71180 + help
71181 + This allows you to enable various PaX features. PaX adds
71182 + intrusion prevention mechanisms to the kernel that reduce
71183 + the risks posed by exploitable memory corruption bugs.
71184 +
71185 +menu "PaX Control"
71186 + depends on PAX
71187 +
71188 +config PAX_SOFTMODE
71189 + bool 'Support soft mode'
71190 + select PAX_PT_PAX_FLAGS
71191 + help
71192 + Enabling this option will allow you to run PaX in soft mode, that
71193 + is, PaX features will not be enforced by default, only on executables
71194 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
71195 + is the only way to mark executables for soft mode use.
71196 +
71197 + Soft mode can be activated by using the "pax_softmode=1" kernel command
71198 + line option on boot. Furthermore you can control various PaX features
71199 + at runtime via the entries in /proc/sys/kernel/pax.
71200 +
71201 +config PAX_EI_PAX
71202 + bool 'Use legacy ELF header marking'
71203 + help
71204 + Enabling this option will allow you to control PaX features on
71205 + a per executable basis via the 'chpax' utility available at
71206 + http://pax.grsecurity.net/. The control flags will be read from
71207 + an otherwise reserved part of the ELF header. This marking has
71208 + numerous drawbacks (no support for soft-mode, toolchain does not
71209 + know about the non-standard use of the ELF header) therefore it
71210 + has been deprecated in favour of PT_PAX_FLAGS support.
71211 +
71212 + Note that if you enable PT_PAX_FLAGS marking support as well,
71213 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
71214 +
71215 +config PAX_PT_PAX_FLAGS
71216 + bool 'Use ELF program header marking'
71217 + help
71218 + Enabling this option will allow you to control PaX features on
71219 + a per executable basis via the 'paxctl' utility available at
71220 + http://pax.grsecurity.net/. The control flags will be read from
71221 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
71222 + has the benefits of supporting both soft mode and being fully
71223 + integrated into the toolchain (the binutils patch is available
71224 + from http://pax.grsecurity.net).
71225 +
71226 + If your toolchain does not support PT_PAX_FLAGS markings,
71227 + you can create one in most cases with 'paxctl -C'.
71228 +
71229 + Note that if you enable the legacy EI_PAX marking support as well,
71230 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
71231 +
71232 +choice
71233 + prompt 'MAC system integration'
71234 + default PAX_HAVE_ACL_FLAGS
71235 + help
71236 + Mandatory Access Control systems have the option of controlling
71237 + PaX flags on a per executable basis, choose the method supported
71238 + by your particular system.
71239 +
71240 + - "none": if your MAC system does not interact with PaX,
71241 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
71242 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
71243 +
71244 + NOTE: this option is for developers/integrators only.
71245 +
71246 + config PAX_NO_ACL_FLAGS
71247 + bool 'none'
71248 +
71249 + config PAX_HAVE_ACL_FLAGS
71250 + bool 'direct'
71251 +
71252 + config PAX_HOOK_ACL_FLAGS
71253 + bool 'hook'
71254 +endchoice
71255 +
71256 +endmenu
71257 +
71258 +menu "Non-executable pages"
71259 + depends on PAX
71260 +
71261 +config PAX_NOEXEC
71262 + bool "Enforce non-executable pages"
71263 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
71264 + help
71265 + By design some architectures do not allow for protecting memory
71266 + pages against execution or even if they do, Linux does not make
71267 + use of this feature. In practice this means that if a page is
71268 + readable (such as the stack or heap) it is also executable.
71269 +
71270 + There is a well known exploit technique that makes use of this
71271 + fact and a common programming mistake where an attacker can
71272 + introduce code of his choice somewhere in the attacked program's
71273 + memory (typically the stack or the heap) and then execute it.
71274 +
71275 + If the attacked program was running with different (typically
71276 + higher) privileges than that of the attacker, then he can elevate
71277 + his own privilege level (e.g. get a root shell, write to files for
71278 + which he does not have write access to, etc).
71279 +
71280 + Enabling this option will let you choose from various features
71281 + that prevent the injection and execution of 'foreign' code in
71282 + a program.
71283 +
71284 + This will also break programs that rely on the old behaviour and
71285 + expect that dynamically allocated memory via the malloc() family
71286 + of functions is executable (which it is not). Notable examples
71287 + are the XFree86 4.x server, the java runtime and wine.
71288 +
71289 +config PAX_PAGEEXEC
71290 + bool "Paging based non-executable pages"
71291 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
71292 + select S390_SWITCH_AMODE if S390
71293 + select S390_EXEC_PROTECT if S390
71294 + select ARCH_TRACK_EXEC_LIMIT if X86_32
71295 + help
71296 + This implementation is based on the paging feature of the CPU.
71297 + On i386 without hardware non-executable bit support there is a
71298 + variable but usually low performance impact, however on Intel's
71299 + P4 core based CPUs it is very high so you should not enable this
71300 + for kernels meant to be used on such CPUs.
71301 +
71302 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
71303 + with hardware non-executable bit support there is no performance
71304 + impact, on ppc the impact is negligible.
71305 +
71306 + Note that several architectures require various emulations due to
71307 + badly designed userland ABIs, this will cause a performance impact
71308 + but will disappear as soon as userland is fixed. For example, ppc
71309 + userland MUST have been built with secure-plt by a recent toolchain.
71310 +
71311 +config PAX_SEGMEXEC
71312 + bool "Segmentation based non-executable pages"
71313 + depends on PAX_NOEXEC && X86_32
71314 + help
71315 + This implementation is based on the segmentation feature of the
71316 + CPU and has a very small performance impact, however applications
71317 + will be limited to a 1.5 GB address space instead of the normal
71318 + 3 GB.
71319 +
71320 +config PAX_EMUTRAMP
71321 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
71322 + default y if PARISC
71323 + help
71324 + There are some programs and libraries that for one reason or
71325 + another attempt to execute special small code snippets from
71326 + non-executable memory pages. Most notable examples are the
71327 + signal handler return code generated by the kernel itself and
71328 + the GCC trampolines.
71329 +
71330 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
71331 + such programs will no longer work under your kernel.
71332 +
71333 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
71334 + utilities to enable trampoline emulation for the affected programs
71335 + yet still have the protection provided by the non-executable pages.
71336 +
71337 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
71338 + your system will not even boot.
71339 +
71340 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
71341 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
71342 + for the affected files.
71343 +
71344 + NOTE: enabling this feature *may* open up a loophole in the
71345 + protection provided by non-executable pages that an attacker
71346 + could abuse. Therefore the best solution is to not have any
71347 + files on your system that would require this option. This can
71348 + be achieved by not using libc5 (which relies on the kernel
71349 + signal handler return code) and not using or rewriting programs
71350 + that make use of the nested function implementation of GCC.
71351 + Skilled users can just fix GCC itself so that it implements
71352 + nested function calls in a way that does not interfere with PaX.
71353 +
71354 +config PAX_EMUSIGRT
71355 + bool "Automatically emulate sigreturn trampolines"
71356 + depends on PAX_EMUTRAMP && PARISC
71357 + default y
71358 + help
71359 + Enabling this option will have the kernel automatically detect
71360 + and emulate signal return trampolines executing on the stack
71361 + that would otherwise lead to task termination.
71362 +
71363 + This solution is intended as a temporary one for users with
71364 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
71365 + Modula-3 runtime, etc) or executables linked to such, basically
71366 + everything that does not specify its own SA_RESTORER function in
71367 + normal executable memory like glibc 2.1+ does.
71368 +
71369 + On parisc you MUST enable this option, otherwise your system will
71370 + not even boot.
71371 +
71372 + NOTE: this feature cannot be disabled on a per executable basis
71373 + and since it *does* open up a loophole in the protection provided
71374 + by non-executable pages, the best solution is to not have any
71375 + files on your system that would require this option.
71376 +
71377 +config PAX_MPROTECT
71378 + bool "Restrict mprotect()"
71379 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
71380 + help
71381 + Enabling this option will prevent programs from
71382 + - changing the executable status of memory pages that were
71383 + not originally created as executable,
71384 + - making read-only executable pages writable again,
71385 + - creating executable pages from anonymous memory,
71386 + - making read-only-after-relocations (RELRO) data pages writable again.
71387 +
71388 + You should say Y here to complete the protection provided by
71389 + the enforcement of non-executable pages.
71390 +
71391 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
71392 + this feature on a per file basis.
71393 +
71394 +config PAX_MPROTECT_COMPAT
71395 + bool "Use legacy/compat protection demoting (read help)"
71396 + depends on PAX_MPROTECT
71397 + default n
71398 + help
71399 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
71400 + by sending the proper error code to the application. For some broken
71401 + userland, this can cause problems with Python or other applications. The
71402 + current implementation however allows for applications like clamav to
71403 + detect if JIT compilation/execution is allowed and to fall back gracefully
71404 + to an interpreter-based mode if it does not. While we encourage everyone
71405 + to use the current implementation as-is and push upstream to fix broken
71406 + userland (note that the RWX logging option can assist with this), in some
71407 + environments this may not be possible. Having to disable MPROTECT
71408 + completely on certain binaries reduces the security benefit of PaX,
71409 + so this option is provided for those environments to revert to the old
71410 + behavior.
71411 +
71412 +config PAX_ELFRELOCS
71413 + bool "Allow ELF text relocations (read help)"
71414 + depends on PAX_MPROTECT
71415 + default n
71416 + help
71417 + Non-executable pages and mprotect() restrictions are effective
71418 + in preventing the introduction of new executable code into an
71419 + attacked task's address space. There remain only two venues
71420 + for this kind of attack: if the attacker can execute already
71421 + existing code in the attacked task then he can either have it
71422 + create and mmap() a file containing his code or have it mmap()
71423 + an already existing ELF library that does not have position
71424 + independent code in it and use mprotect() on it to make it
71425 + writable and copy his code there. While protecting against
71426 + the former approach is beyond PaX, the latter can be prevented
71427 + by having only PIC ELF libraries on one's system (which do not
71428 + need to relocate their code). If you are sure this is your case,
71429 + as is the case with all modern Linux distributions, then leave
71430 + this option disabled. You should say 'n' here.
71431 +
71432 +config PAX_ETEXECRELOCS
71433 + bool "Allow ELF ET_EXEC text relocations"
71434 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
71435 + select PAX_ELFRELOCS
71436 + default y
71437 + help
71438 + On some architectures there are incorrectly created applications
71439 + that require text relocations and would not work without enabling
71440 + this option. If you are an alpha, ia64 or parisc user, you should
71441 + enable this option and disable it once you have made sure that
71442 + none of your applications need it.
71443 +
71444 +config PAX_EMUPLT
71445 + bool "Automatically emulate ELF PLT"
71446 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
71447 + default y
71448 + help
71449 + Enabling this option will have the kernel automatically detect
71450 + and emulate the Procedure Linkage Table entries in ELF files.
71451 + On some architectures such entries are in writable memory, and
71452 + become non-executable leading to task termination. Therefore
71453 + it is mandatory that you enable this option on alpha, parisc,
71454 + sparc and sparc64, otherwise your system would not even boot.
71455 +
71456 + NOTE: this feature *does* open up a loophole in the protection
71457 + provided by the non-executable pages, therefore the proper
71458 + solution is to modify the toolchain to produce a PLT that does
71459 + not need to be writable.
71460 +
71461 +config PAX_DLRESOLVE
71462 + bool 'Emulate old glibc resolver stub'
71463 + depends on PAX_EMUPLT && SPARC
71464 + default n
71465 + help
71466 + This option is needed if userland has an old glibc (before 2.4)
71467 + that puts a 'save' instruction into the runtime generated resolver
71468 + stub that needs special emulation.
71469 +
71470 +config PAX_KERNEXEC
71471 + bool "Enforce non-executable kernel pages"
71472 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
71473 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
71474 + help
71475 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
71476 + that is, enabling this option will make it harder to inject
71477 + and execute 'foreign' code in kernel memory itself.
71478 +
71479 + Note that on x86_64 kernels there is a known regression when
71480 + this feature and KVM/VMX are both enabled in the host kernel.
71481 +
71482 +config PAX_KERNEXEC_MODULE_TEXT
71483 + int "Minimum amount of memory reserved for module code"
71484 + default "4"
71485 + depends on PAX_KERNEXEC && X86_32 && MODULES
71486 + help
71487 + Due to implementation details the kernel must reserve a fixed
71488 + amount of memory for module code at compile time that cannot be
71489 + changed at runtime. Here you can specify the minimum amount
71490 + in MB that will be reserved. Due to the same implementation
71491 + details this size will always be rounded up to the next 2/4 MB
71492 + boundary (depends on PAE) so the actually available memory for
71493 + module code will usually be more than this minimum.
71494 +
71495 + The default 4 MB should be enough for most users but if you have
71496 + an excessive number of modules (e.g., most distribution configs
71497 + compile many drivers as modules) or use huge modules such as
71498 + nvidia's kernel driver, you will need to adjust this amount.
71499 + A good rule of thumb is to look at your currently loaded kernel
71500 + modules and add up their sizes.
71501 +
71502 +endmenu
71503 +
71504 +menu "Address Space Layout Randomization"
71505 + depends on PAX
71506 +
71507 +config PAX_ASLR
71508 + bool "Address Space Layout Randomization"
71509 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
71510 + help
71511 + Many if not most exploit techniques rely on the knowledge of
71512 + certain addresses in the attacked program. The following options
71513 + will allow the kernel to apply a certain amount of randomization
71514 + to specific parts of the program thereby forcing an attacker to
71515 + guess them in most cases. Any failed guess will most likely crash
71516 + the attacked program which allows the kernel to detect such attempts
71517 + and react on them. PaX itself provides no reaction mechanisms,
71518 + instead it is strongly encouraged that you make use of Nergal's
71519 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
71520 + (http://www.grsecurity.net/) built-in crash detection features or
71521 + develop one yourself.
71522 +
71523 + By saying Y here you can choose to randomize the following areas:
71524 + - top of the task's kernel stack
71525 + - top of the task's userland stack
71526 + - base address for mmap() requests that do not specify one
71527 + (this includes all libraries)
71528 + - base address of the main executable
71529 +
71530 + It is strongly recommended to say Y here as address space layout
71531 + randomization has negligible impact on performance yet it provides
71532 + a very effective protection.
71533 +
71534 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
71535 + this feature on a per file basis.
71536 +
71537 +config PAX_RANDKSTACK
71538 + bool "Randomize kernel stack base"
71539 + depends on PAX_ASLR && X86_TSC && X86
71540 + help
71541 + By saying Y here the kernel will randomize every task's kernel
71542 + stack on every system call. This will not only force an attacker
71543 + to guess it but also prevent him from making use of possible
71544 + leaked information about it.
71545 +
71546 + Since the kernel stack is a rather scarce resource, randomization
71547 + may cause unexpected stack overflows, therefore you should very
71548 + carefully test your system. Note that once enabled in the kernel
71549 + configuration, this feature cannot be disabled on a per file basis.
71550 +
71551 +config PAX_RANDUSTACK
71552 + bool "Randomize user stack base"
71553 + depends on PAX_ASLR
71554 + help
71555 + By saying Y here the kernel will randomize every task's userland
71556 + stack. The randomization is done in two steps where the second
71557 + one may apply a big amount of shift to the top of the stack and
71558 + cause problems for programs that want to use lots of memory (more
71559 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
71560 + For this reason the second step can be controlled by 'chpax' or
71561 + 'paxctl' on a per file basis.
71562 +
71563 +config PAX_RANDMMAP
71564 + bool "Randomize mmap() base"
71565 + depends on PAX_ASLR
71566 + help
71567 + By saying Y here the kernel will use a randomized base address for
71568 + mmap() requests that do not specify one themselves. As a result
71569 + all dynamically loaded libraries will appear at random addresses
71570 + and therefore be harder to exploit by a technique where an attacker
71571 + attempts to execute library code for his purposes (e.g. spawn a
71572 + shell from an exploited program that is running at an elevated
71573 + privilege level).
71574 +
71575 + Furthermore, if a program is relinked as a dynamic ELF file, its
71576 + base address will be randomized as well, completing the full
71577 + randomization of the address space layout. Attacking such programs
71578 + becomes a guess game. You can find an example of doing this at
71579 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
71580 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
71581 +
71582 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
71583 + feature on a per file basis.
71584 +
71585 +endmenu
71586 +
71587 +menu "Miscellaneous hardening features"
71588 +
71589 +config PAX_MEMORY_SANITIZE
71590 + bool "Sanitize all freed memory"
71591 + help
71592 + By saying Y here the kernel will erase memory pages as soon as they
71593 + are freed. This in turn reduces the lifetime of data stored in the
71594 + pages, making it less likely that sensitive information such as
71595 + passwords, cryptographic secrets, etc stay in memory for too long.
71596 +
71597 + This is especially useful for programs whose runtime is short, long
71598 + lived processes and the kernel itself benefit from this as long as
71599 + they operate on whole memory pages and ensure timely freeing of pages
71600 + that may hold sensitive information.
71601 +
71602 + The tradeoff is performance impact, on a single CPU system kernel
71603 + compilation sees a 3% slowdown, other systems and workloads may vary
71604 + and you are advised to test this feature on your expected workload
71605 + before deploying it.
71606 +
71607 + Note that this feature does not protect data stored in live pages,
71608 + e.g., process memory swapped to disk may stay there for a long time.
71609 +
71610 +config PAX_MEMORY_STACKLEAK
71611 + bool "Sanitize kernel stack"
71612 + depends on X86
71613 + help
71614 + By saying Y here the kernel will erase the kernel stack before it
71615 + returns from a system call. This in turn reduces the information
71616 + that a kernel stack leak bug can reveal.
71617 +
71618 + Note that such a bug can still leak information that was put on
71619 + the stack by the current system call (the one eventually triggering
71620 + the bug) but traces of earlier system calls on the kernel stack
71621 + cannot leak anymore.
71622 +
71623 + The tradeoff is performance impact, on a single CPU system kernel
71624 + compilation sees a 1% slowdown, other systems and workloads may vary
71625 + and you are advised to test this feature on your expected workload
71626 + before deploying it.
71627 +
71628 + Note: full support for this feature requires gcc with plugin support
71629 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
71630 + is not supported). Using older gcc versions means that functions
71631 + with large enough stack frames may leave uninitialized memory behind
71632 + that may be exposed to a later syscall leaking the stack.
71633 +
71634 +config PAX_MEMORY_UDEREF
71635 + bool "Prevent invalid userland pointer dereference"
71636 + depends on X86 && !UML_X86 && !XEN
71637 + select PAX_PER_CPU_PGD if X86_64
71638 + help
71639 + By saying Y here the kernel will be prevented from dereferencing
71640 + userland pointers in contexts where the kernel expects only kernel
71641 + pointers. This is both a useful runtime debugging feature and a
71642 + security measure that prevents exploiting a class of kernel bugs.
71643 +
71644 + The tradeoff is that some virtualization solutions may experience
71645 + a huge slowdown and therefore you should not enable this feature
71646 + for kernels meant to run in such environments. Whether a given VM
71647 + solution is affected or not is best determined by simply trying it
71648 + out, the performance impact will be obvious right on boot as this
71649 + mechanism engages from very early on. A good rule of thumb is that
71650 + VMs running on CPUs without hardware virtualization support (i.e.,
71651 + the majority of IA-32 CPUs) will likely experience the slowdown.
71652 +
71653 +config PAX_REFCOUNT
71654 + bool "Prevent various kernel object reference counter overflows"
71655 + depends on GRKERNSEC && (X86 || SPARC64)
71656 + help
71657 + By saying Y here the kernel will detect and prevent overflowing
71658 + various (but not all) kinds of object reference counters. Such
71659 + overflows can normally occur due to bugs only and are often, if
71660 + not always, exploitable.
71661 +
71662 + The tradeoff is that data structures protected by an overflowed
71663 + refcount will never be freed and therefore will leak memory. Note
71664 + that this leak also happens even without this protection but in
71665 + that case the overflow can eventually trigger the freeing of the
71666 + data structure while it is still being used elsewhere, resulting
71667 + in the exploitable situation that this feature prevents.
71668 +
71669 + Since this has a negligible performance impact, you should enable
71670 + this feature.
71671 +
71672 +config PAX_USERCOPY
71673 + bool "Harden heap object copies between kernel and userland"
71674 + depends on X86 || PPC || SPARC || ARM
71675 + depends on GRKERNSEC && (SLAB || SLUB)
71676 + help
71677 + By saying Y here the kernel will enforce the size of heap objects
71678 + when they are copied in either direction between the kernel and
71679 + userland, even if only a part of the heap object is copied.
71680 +
71681 + Specifically, this checking prevents information leaking from the
71682 + kernel heap during kernel to userland copies (if the kernel heap
71683 + object is otherwise fully initialized) and prevents kernel heap
71684 + overflows during userland to kernel copies.
71685 +
71686 + Note that the current implementation provides the strictest bounds
71687 + checks for the SLUB allocator.
71688 +
71689 + Enabling this option also enables per-slab cache protection against
71690 + data in a given cache being copied into/out of via userland
71691 + accessors. Though the whitelist of regions will be reduced over
71692 + time, it notably protects important data structures like task structs.
71693 +
71694 +
71695 + If frame pointers are enabled on x86, this option will also
71696 + restrict copies into and out of the kernel stack to local variables
71697 + within a single frame.
71698 +
71699 + Since this has a negligible performance impact, you should enable
71700 + this feature.
71701 +
71702 +endmenu
71703 +
71704 +endmenu
71705 +
71706 config KEYS
71707 bool "Enable access key retention support"
71708 help
71709 @@ -146,7 +695,7 @@ config INTEL_TXT
71710 config LSM_MMAP_MIN_ADDR
71711 int "Low address space for LSM to protect from user allocation"
71712 depends on SECURITY && SECURITY_SELINUX
71713 - default 65536
71714 + default 32768
71715 help
71716 This is the portion of low virtual memory which should be protected
71717 from userspace allocation. Keeping a user from writing to low pages
71718 diff -urNp linux-2.6.32.42/security/keys/keyring.c linux-2.6.32.42/security/keys/keyring.c
71719 --- linux-2.6.32.42/security/keys/keyring.c 2011-03-27 14:31:47.000000000 -0400
71720 +++ linux-2.6.32.42/security/keys/keyring.c 2011-04-18 22:03:00.000000000 -0400
71721 @@ -214,15 +214,15 @@ static long keyring_read(const struct ke
71722 ret = -EFAULT;
71723
71724 for (loop = 0; loop < klist->nkeys; loop++) {
71725 + key_serial_t serial;
71726 key = klist->keys[loop];
71727 + serial = key->serial;
71728
71729 tmp = sizeof(key_serial_t);
71730 if (tmp > buflen)
71731 tmp = buflen;
71732
71733 - if (copy_to_user(buffer,
71734 - &key->serial,
71735 - tmp) != 0)
71736 + if (copy_to_user(buffer, &serial, tmp))
71737 goto error;
71738
71739 buflen -= tmp;
71740 diff -urNp linux-2.6.32.42/security/min_addr.c linux-2.6.32.42/security/min_addr.c
71741 --- linux-2.6.32.42/security/min_addr.c 2011-03-27 14:31:47.000000000 -0400
71742 +++ linux-2.6.32.42/security/min_addr.c 2011-04-17 15:56:46.000000000 -0400
71743 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
71744 */
71745 static void update_mmap_min_addr(void)
71746 {
71747 +#ifndef SPARC
71748 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
71749 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
71750 mmap_min_addr = dac_mmap_min_addr;
71751 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
71752 #else
71753 mmap_min_addr = dac_mmap_min_addr;
71754 #endif
71755 +#endif
71756 }
71757
71758 /*
71759 diff -urNp linux-2.6.32.42/security/root_plug.c linux-2.6.32.42/security/root_plug.c
71760 --- linux-2.6.32.42/security/root_plug.c 2011-03-27 14:31:47.000000000 -0400
71761 +++ linux-2.6.32.42/security/root_plug.c 2011-04-17 15:56:46.000000000 -0400
71762 @@ -70,7 +70,7 @@ static int rootplug_bprm_check_security
71763 return 0;
71764 }
71765
71766 -static struct security_operations rootplug_security_ops = {
71767 +static struct security_operations rootplug_security_ops __read_only = {
71768 .bprm_check_security = rootplug_bprm_check_security,
71769 };
71770
71771 diff -urNp linux-2.6.32.42/security/security.c linux-2.6.32.42/security/security.c
71772 --- linux-2.6.32.42/security/security.c 2011-03-27 14:31:47.000000000 -0400
71773 +++ linux-2.6.32.42/security/security.c 2011-04-17 15:56:46.000000000 -0400
71774 @@ -24,7 +24,7 @@ static __initdata char chosen_lsm[SECURI
71775 extern struct security_operations default_security_ops;
71776 extern void security_fixup_ops(struct security_operations *ops);
71777
71778 -struct security_operations *security_ops; /* Initialized to NULL */
71779 +struct security_operations *security_ops __read_only; /* Initialized to NULL */
71780
71781 static inline int verify(struct security_operations *ops)
71782 {
71783 @@ -106,7 +106,7 @@ int __init security_module_enable(struct
71784 * If there is already a security module registered with the kernel,
71785 * an error will be returned. Otherwise %0 is returned on success.
71786 */
71787 -int register_security(struct security_operations *ops)
71788 +int __init register_security(struct security_operations *ops)
71789 {
71790 if (verify(ops)) {
71791 printk(KERN_DEBUG "%s could not verify "
71792 diff -urNp linux-2.6.32.42/security/selinux/hooks.c linux-2.6.32.42/security/selinux/hooks.c
71793 --- linux-2.6.32.42/security/selinux/hooks.c 2011-03-27 14:31:47.000000000 -0400
71794 +++ linux-2.6.32.42/security/selinux/hooks.c 2011-04-17 15:56:46.000000000 -0400
71795 @@ -131,7 +131,7 @@ int selinux_enabled = 1;
71796 * Minimal support for a secondary security module,
71797 * just to allow the use of the capability module.
71798 */
71799 -static struct security_operations *secondary_ops;
71800 +static struct security_operations *secondary_ops __read_only;
71801
71802 /* Lists of inode and superblock security structures initialized
71803 before the policy was loaded. */
71804 @@ -5457,7 +5457,7 @@ static int selinux_key_getsecurity(struc
71805
71806 #endif
71807
71808 -static struct security_operations selinux_ops = {
71809 +static struct security_operations selinux_ops __read_only = {
71810 .name = "selinux",
71811
71812 .ptrace_access_check = selinux_ptrace_access_check,
71813 @@ -5841,7 +5841,9 @@ int selinux_disable(void)
71814 avc_disable();
71815
71816 /* Reset security_ops to the secondary module, dummy or capability. */
71817 + pax_open_kernel();
71818 security_ops = secondary_ops;
71819 + pax_close_kernel();
71820
71821 /* Unregister netfilter hooks. */
71822 selinux_nf_ip_exit();
71823 diff -urNp linux-2.6.32.42/security/selinux/include/xfrm.h linux-2.6.32.42/security/selinux/include/xfrm.h
71824 --- linux-2.6.32.42/security/selinux/include/xfrm.h 2011-03-27 14:31:47.000000000 -0400
71825 +++ linux-2.6.32.42/security/selinux/include/xfrm.h 2011-05-18 20:09:37.000000000 -0400
71826 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
71827
71828 static inline void selinux_xfrm_notify_policyload(void)
71829 {
71830 - atomic_inc(&flow_cache_genid);
71831 + atomic_inc_unchecked(&flow_cache_genid);
71832 }
71833 #else
71834 static inline int selinux_xfrm_enabled(void)
71835 diff -urNp linux-2.6.32.42/security/selinux/ss/services.c linux-2.6.32.42/security/selinux/ss/services.c
71836 --- linux-2.6.32.42/security/selinux/ss/services.c 2011-03-27 14:31:47.000000000 -0400
71837 +++ linux-2.6.32.42/security/selinux/ss/services.c 2011-05-16 21:46:57.000000000 -0400
71838 @@ -1715,6 +1715,8 @@ int security_load_policy(void *data, siz
71839 int rc = 0;
71840 struct policy_file file = { data, len }, *fp = &file;
71841
71842 + pax_track_stack();
71843 +
71844 if (!ss_initialized) {
71845 avtab_cache_init();
71846 if (policydb_read(&policydb, fp)) {
71847 diff -urNp linux-2.6.32.42/security/smack/smack_lsm.c linux-2.6.32.42/security/smack/smack_lsm.c
71848 --- linux-2.6.32.42/security/smack/smack_lsm.c 2011-03-27 14:31:47.000000000 -0400
71849 +++ linux-2.6.32.42/security/smack/smack_lsm.c 2011-04-17 15:56:46.000000000 -0400
71850 @@ -3073,7 +3073,7 @@ static int smack_inode_getsecctx(struct
71851 return 0;
71852 }
71853
71854 -struct security_operations smack_ops = {
71855 +struct security_operations smack_ops __read_only = {
71856 .name = "smack",
71857
71858 .ptrace_access_check = smack_ptrace_access_check,
71859 diff -urNp linux-2.6.32.42/security/tomoyo/tomoyo.c linux-2.6.32.42/security/tomoyo/tomoyo.c
71860 --- linux-2.6.32.42/security/tomoyo/tomoyo.c 2011-03-27 14:31:47.000000000 -0400
71861 +++ linux-2.6.32.42/security/tomoyo/tomoyo.c 2011-04-17 15:56:46.000000000 -0400
71862 @@ -275,7 +275,7 @@ static int tomoyo_dentry_open(struct fil
71863 * tomoyo_security_ops is a "struct security_operations" which is used for
71864 * registering TOMOYO.
71865 */
71866 -static struct security_operations tomoyo_security_ops = {
71867 +static struct security_operations tomoyo_security_ops __read_only = {
71868 .name = "tomoyo",
71869 .cred_alloc_blank = tomoyo_cred_alloc_blank,
71870 .cred_prepare = tomoyo_cred_prepare,
71871 diff -urNp linux-2.6.32.42/sound/aoa/codecs/onyx.c linux-2.6.32.42/sound/aoa/codecs/onyx.c
71872 --- linux-2.6.32.42/sound/aoa/codecs/onyx.c 2011-03-27 14:31:47.000000000 -0400
71873 +++ linux-2.6.32.42/sound/aoa/codecs/onyx.c 2011-04-17 15:56:46.000000000 -0400
71874 @@ -53,7 +53,7 @@ struct onyx {
71875 spdif_locked:1,
71876 analog_locked:1,
71877 original_mute:2;
71878 - int open_count;
71879 + local_t open_count;
71880 struct codec_info *codec_info;
71881
71882 /* mutex serializes concurrent access to the device
71883 @@ -752,7 +752,7 @@ static int onyx_open(struct codec_info_i
71884 struct onyx *onyx = cii->codec_data;
71885
71886 mutex_lock(&onyx->mutex);
71887 - onyx->open_count++;
71888 + local_inc(&onyx->open_count);
71889 mutex_unlock(&onyx->mutex);
71890
71891 return 0;
71892 @@ -764,8 +764,7 @@ static int onyx_close(struct codec_info_
71893 struct onyx *onyx = cii->codec_data;
71894
71895 mutex_lock(&onyx->mutex);
71896 - onyx->open_count--;
71897 - if (!onyx->open_count)
71898 + if (local_dec_and_test(&onyx->open_count))
71899 onyx->spdif_locked = onyx->analog_locked = 0;
71900 mutex_unlock(&onyx->mutex);
71901
71902 diff -urNp linux-2.6.32.42/sound/aoa/codecs/onyx.h linux-2.6.32.42/sound/aoa/codecs/onyx.h
71903 --- linux-2.6.32.42/sound/aoa/codecs/onyx.h 2011-03-27 14:31:47.000000000 -0400
71904 +++ linux-2.6.32.42/sound/aoa/codecs/onyx.h 2011-04-17 15:56:46.000000000 -0400
71905 @@ -11,6 +11,7 @@
71906 #include <linux/i2c.h>
71907 #include <asm/pmac_low_i2c.h>
71908 #include <asm/prom.h>
71909 +#include <asm/local.h>
71910
71911 /* PCM3052 register definitions */
71912
71913 diff -urNp linux-2.6.32.42/sound/drivers/mts64.c linux-2.6.32.42/sound/drivers/mts64.c
71914 --- linux-2.6.32.42/sound/drivers/mts64.c 2011-03-27 14:31:47.000000000 -0400
71915 +++ linux-2.6.32.42/sound/drivers/mts64.c 2011-04-17 15:56:46.000000000 -0400
71916 @@ -27,6 +27,7 @@
71917 #include <sound/initval.h>
71918 #include <sound/rawmidi.h>
71919 #include <sound/control.h>
71920 +#include <asm/local.h>
71921
71922 #define CARD_NAME "Miditerminal 4140"
71923 #define DRIVER_NAME "MTS64"
71924 @@ -65,7 +66,7 @@ struct mts64 {
71925 struct pardevice *pardev;
71926 int pardev_claimed;
71927
71928 - int open_count;
71929 + local_t open_count;
71930 int current_midi_output_port;
71931 int current_midi_input_port;
71932 u8 mode[MTS64_NUM_INPUT_PORTS];
71933 @@ -695,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct
71934 {
71935 struct mts64 *mts = substream->rmidi->private_data;
71936
71937 - if (mts->open_count == 0) {
71938 + if (local_read(&mts->open_count) == 0) {
71939 /* We don't need a spinlock here, because this is just called
71940 if the device has not been opened before.
71941 So there aren't any IRQs from the device */
71942 @@ -703,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct
71943
71944 msleep(50);
71945 }
71946 - ++(mts->open_count);
71947 + local_inc(&mts->open_count);
71948
71949 return 0;
71950 }
71951 @@ -713,8 +714,7 @@ static int snd_mts64_rawmidi_close(struc
71952 struct mts64 *mts = substream->rmidi->private_data;
71953 unsigned long flags;
71954
71955 - --(mts->open_count);
71956 - if (mts->open_count == 0) {
71957 + if (local_dec_return(&mts->open_count) == 0) {
71958 /* We need the spinlock_irqsave here because we can still
71959 have IRQs at this point */
71960 spin_lock_irqsave(&mts->lock, flags);
71961 @@ -723,8 +723,8 @@ static int snd_mts64_rawmidi_close(struc
71962
71963 msleep(500);
71964
71965 - } else if (mts->open_count < 0)
71966 - mts->open_count = 0;
71967 + } else if (local_read(&mts->open_count) < 0)
71968 + local_set(&mts->open_count, 0);
71969
71970 return 0;
71971 }
71972 diff -urNp linux-2.6.32.42/sound/drivers/portman2x4.c linux-2.6.32.42/sound/drivers/portman2x4.c
71973 --- linux-2.6.32.42/sound/drivers/portman2x4.c 2011-03-27 14:31:47.000000000 -0400
71974 +++ linux-2.6.32.42/sound/drivers/portman2x4.c 2011-04-17 15:56:46.000000000 -0400
71975 @@ -46,6 +46,7 @@
71976 #include <sound/initval.h>
71977 #include <sound/rawmidi.h>
71978 #include <sound/control.h>
71979 +#include <asm/local.h>
71980
71981 #define CARD_NAME "Portman 2x4"
71982 #define DRIVER_NAME "portman"
71983 @@ -83,7 +84,7 @@ struct portman {
71984 struct pardevice *pardev;
71985 int pardev_claimed;
71986
71987 - int open_count;
71988 + local_t open_count;
71989 int mode[PORTMAN_NUM_INPUT_PORTS];
71990 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
71991 };
71992 diff -urNp linux-2.6.32.42/sound/oss/sb_audio.c linux-2.6.32.42/sound/oss/sb_audio.c
71993 --- linux-2.6.32.42/sound/oss/sb_audio.c 2011-03-27 14:31:47.000000000 -0400
71994 +++ linux-2.6.32.42/sound/oss/sb_audio.c 2011-04-17 15:56:46.000000000 -0400
71995 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
71996 buf16 = (signed short *)(localbuf + localoffs);
71997 while (c)
71998 {
71999 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
72000 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
72001 if (copy_from_user(lbuf8,
72002 userbuf+useroffs + p,
72003 locallen))
72004 diff -urNp linux-2.6.32.42/sound/oss/swarm_cs4297a.c linux-2.6.32.42/sound/oss/swarm_cs4297a.c
72005 --- linux-2.6.32.42/sound/oss/swarm_cs4297a.c 2011-03-27 14:31:47.000000000 -0400
72006 +++ linux-2.6.32.42/sound/oss/swarm_cs4297a.c 2011-04-17 15:56:46.000000000 -0400
72007 @@ -2577,7 +2577,6 @@ static int __init cs4297a_init(void)
72008 {
72009 struct cs4297a_state *s;
72010 u32 pwr, id;
72011 - mm_segment_t fs;
72012 int rval;
72013 #ifndef CONFIG_BCM_CS4297A_CSWARM
72014 u64 cfg;
72015 @@ -2667,22 +2666,23 @@ static int __init cs4297a_init(void)
72016 if (!rval) {
72017 char *sb1250_duart_present;
72018
72019 +#if 0
72020 + mm_segment_t fs;
72021 fs = get_fs();
72022 set_fs(KERNEL_DS);
72023 -#if 0
72024 val = SOUND_MASK_LINE;
72025 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
72026 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
72027 val = initvol[i].vol;
72028 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
72029 }
72030 + set_fs(fs);
72031 // cs4297a_write_ac97(s, 0x18, 0x0808);
72032 #else
72033 // cs4297a_write_ac97(s, 0x5e, 0x180);
72034 cs4297a_write_ac97(s, 0x02, 0x0808);
72035 cs4297a_write_ac97(s, 0x18, 0x0808);
72036 #endif
72037 - set_fs(fs);
72038
72039 list_add(&s->list, &cs4297a_devs);
72040
72041 diff -urNp linux-2.6.32.42/sound/pci/ac97/ac97_codec.c linux-2.6.32.42/sound/pci/ac97/ac97_codec.c
72042 --- linux-2.6.32.42/sound/pci/ac97/ac97_codec.c 2011-03-27 14:31:47.000000000 -0400
72043 +++ linux-2.6.32.42/sound/pci/ac97/ac97_codec.c 2011-04-17 15:56:46.000000000 -0400
72044 @@ -1952,7 +1952,7 @@ static int snd_ac97_dev_disconnect(struc
72045 }
72046
72047 /* build_ops to do nothing */
72048 -static struct snd_ac97_build_ops null_build_ops;
72049 +static const struct snd_ac97_build_ops null_build_ops;
72050
72051 #ifdef CONFIG_SND_AC97_POWER_SAVE
72052 static void do_update_power(struct work_struct *work)
72053 diff -urNp linux-2.6.32.42/sound/pci/ac97/ac97_patch.c linux-2.6.32.42/sound/pci/ac97/ac97_patch.c
72054 --- linux-2.6.32.42/sound/pci/ac97/ac97_patch.c 2011-03-27 14:31:47.000000000 -0400
72055 +++ linux-2.6.32.42/sound/pci/ac97/ac97_patch.c 2011-04-23 12:56:12.000000000 -0400
72056 @@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spd
72057 return 0;
72058 }
72059
72060 -static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
72061 +static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
72062 .build_spdif = patch_yamaha_ymf743_build_spdif,
72063 .build_3d = patch_yamaha_ymf7x3_3d,
72064 };
72065 @@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdi
72066 return 0;
72067 }
72068
72069 -static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
72070 +static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
72071 .build_3d = patch_yamaha_ymf7x3_3d,
72072 .build_post_spdif = patch_yamaha_ymf753_post_spdif
72073 };
72074 @@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific
72075 return 0;
72076 }
72077
72078 -static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
72079 +static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
72080 .build_specific = patch_wolfson_wm9703_specific,
72081 };
72082
72083 @@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific
72084 return 0;
72085 }
72086
72087 -static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
72088 +static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
72089 .build_specific = patch_wolfson_wm9704_specific,
72090 };
72091
72092 @@ -555,7 +555,7 @@ static int patch_wolfson_wm9705_specific
72093 return 0;
72094 }
72095
72096 -static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
72097 +static const struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
72098 .build_specific = patch_wolfson_wm9705_specific,
72099 };
72100
72101 @@ -692,7 +692,7 @@ static int patch_wolfson_wm9711_specific
72102 return 0;
72103 }
72104
72105 -static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
72106 +static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
72107 .build_specific = patch_wolfson_wm9711_specific,
72108 };
72109
72110 @@ -886,7 +886,7 @@ static void patch_wolfson_wm9713_resume
72111 }
72112 #endif
72113
72114 -static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
72115 +static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
72116 .build_specific = patch_wolfson_wm9713_specific,
72117 .build_3d = patch_wolfson_wm9713_3d,
72118 #ifdef CONFIG_PM
72119 @@ -991,7 +991,7 @@ static int patch_sigmatel_stac97xx_speci
72120 return 0;
72121 }
72122
72123 -static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
72124 +static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
72125 .build_3d = patch_sigmatel_stac9700_3d,
72126 .build_specific = patch_sigmatel_stac97xx_specific
72127 };
72128 @@ -1038,7 +1038,7 @@ static int patch_sigmatel_stac9708_speci
72129 return patch_sigmatel_stac97xx_specific(ac97);
72130 }
72131
72132 -static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
72133 +static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
72134 .build_3d = patch_sigmatel_stac9708_3d,
72135 .build_specific = patch_sigmatel_stac9708_specific
72136 };
72137 @@ -1267,7 +1267,7 @@ static int patch_sigmatel_stac9758_speci
72138 return 0;
72139 }
72140
72141 -static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
72142 +static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
72143 .build_3d = patch_sigmatel_stac9700_3d,
72144 .build_specific = patch_sigmatel_stac9758_specific
72145 };
72146 @@ -1342,7 +1342,7 @@ static int patch_cirrus_build_spdif(stru
72147 return 0;
72148 }
72149
72150 -static struct snd_ac97_build_ops patch_cirrus_ops = {
72151 +static const struct snd_ac97_build_ops patch_cirrus_ops = {
72152 .build_spdif = patch_cirrus_build_spdif
72153 };
72154
72155 @@ -1399,7 +1399,7 @@ static int patch_conexant_build_spdif(st
72156 return 0;
72157 }
72158
72159 -static struct snd_ac97_build_ops patch_conexant_ops = {
72160 +static const struct snd_ac97_build_ops patch_conexant_ops = {
72161 .build_spdif = patch_conexant_build_spdif
72162 };
72163
72164 @@ -1575,7 +1575,7 @@ static void patch_ad1881_chained(struct
72165 }
72166 }
72167
72168 -static struct snd_ac97_build_ops patch_ad1881_build_ops = {
72169 +static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
72170 #ifdef CONFIG_PM
72171 .resume = ad18xx_resume
72172 #endif
72173 @@ -1662,7 +1662,7 @@ static int patch_ad1885_specific(struct
72174 return 0;
72175 }
72176
72177 -static struct snd_ac97_build_ops patch_ad1885_build_ops = {
72178 +static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
72179 .build_specific = &patch_ad1885_specific,
72180 #ifdef CONFIG_PM
72181 .resume = ad18xx_resume
72182 @@ -1689,7 +1689,7 @@ static int patch_ad1886_specific(struct
72183 return 0;
72184 }
72185
72186 -static struct snd_ac97_build_ops patch_ad1886_build_ops = {
72187 +static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
72188 .build_specific = &patch_ad1886_specific,
72189 #ifdef CONFIG_PM
72190 .resume = ad18xx_resume
72191 @@ -1896,7 +1896,7 @@ static int patch_ad1981a_specific(struct
72192 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
72193 }
72194
72195 -static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
72196 +static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
72197 .build_post_spdif = patch_ad198x_post_spdif,
72198 .build_specific = patch_ad1981a_specific,
72199 #ifdef CONFIG_PM
72200 @@ -1951,7 +1951,7 @@ static int patch_ad1981b_specific(struct
72201 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
72202 }
72203
72204 -static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
72205 +static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
72206 .build_post_spdif = patch_ad198x_post_spdif,
72207 .build_specific = patch_ad1981b_specific,
72208 #ifdef CONFIG_PM
72209 @@ -2090,7 +2090,7 @@ static int patch_ad1888_specific(struct
72210 return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
72211 }
72212
72213 -static struct snd_ac97_build_ops patch_ad1888_build_ops = {
72214 +static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
72215 .build_post_spdif = patch_ad198x_post_spdif,
72216 .build_specific = patch_ad1888_specific,
72217 #ifdef CONFIG_PM
72218 @@ -2139,7 +2139,7 @@ static int patch_ad1980_specific(struct
72219 return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
72220 }
72221
72222 -static struct snd_ac97_build_ops patch_ad1980_build_ops = {
72223 +static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
72224 .build_post_spdif = patch_ad198x_post_spdif,
72225 .build_specific = patch_ad1980_specific,
72226 #ifdef CONFIG_PM
72227 @@ -2254,7 +2254,7 @@ static int patch_ad1985_specific(struct
72228 ARRAY_SIZE(snd_ac97_ad1985_controls));
72229 }
72230
72231 -static struct snd_ac97_build_ops patch_ad1985_build_ops = {
72232 +static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
72233 .build_post_spdif = patch_ad198x_post_spdif,
72234 .build_specific = patch_ad1985_specific,
72235 #ifdef CONFIG_PM
72236 @@ -2546,7 +2546,7 @@ static int patch_ad1986_specific(struct
72237 ARRAY_SIZE(snd_ac97_ad1985_controls));
72238 }
72239
72240 -static struct snd_ac97_build_ops patch_ad1986_build_ops = {
72241 +static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
72242 .build_post_spdif = patch_ad198x_post_spdif,
72243 .build_specific = patch_ad1986_specific,
72244 #ifdef CONFIG_PM
72245 @@ -2651,7 +2651,7 @@ static int patch_alc650_specific(struct
72246 return 0;
72247 }
72248
72249 -static struct snd_ac97_build_ops patch_alc650_ops = {
72250 +static const struct snd_ac97_build_ops patch_alc650_ops = {
72251 .build_specific = patch_alc650_specific,
72252 .update_jacks = alc650_update_jacks
72253 };
72254 @@ -2803,7 +2803,7 @@ static int patch_alc655_specific(struct
72255 return 0;
72256 }
72257
72258 -static struct snd_ac97_build_ops patch_alc655_ops = {
72259 +static const struct snd_ac97_build_ops patch_alc655_ops = {
72260 .build_specific = patch_alc655_specific,
72261 .update_jacks = alc655_update_jacks
72262 };
72263 @@ -2915,7 +2915,7 @@ static int patch_alc850_specific(struct
72264 return 0;
72265 }
72266
72267 -static struct snd_ac97_build_ops patch_alc850_ops = {
72268 +static const struct snd_ac97_build_ops patch_alc850_ops = {
72269 .build_specific = patch_alc850_specific,
72270 .update_jacks = alc850_update_jacks
72271 };
72272 @@ -2977,7 +2977,7 @@ static int patch_cm9738_specific(struct
72273 return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
72274 }
72275
72276 -static struct snd_ac97_build_ops patch_cm9738_ops = {
72277 +static const struct snd_ac97_build_ops patch_cm9738_ops = {
72278 .build_specific = patch_cm9738_specific,
72279 .update_jacks = cm9738_update_jacks
72280 };
72281 @@ -3068,7 +3068,7 @@ static int patch_cm9739_post_spdif(struc
72282 return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
72283 }
72284
72285 -static struct snd_ac97_build_ops patch_cm9739_ops = {
72286 +static const struct snd_ac97_build_ops patch_cm9739_ops = {
72287 .build_specific = patch_cm9739_specific,
72288 .build_post_spdif = patch_cm9739_post_spdif,
72289 .update_jacks = cm9739_update_jacks
72290 @@ -3242,7 +3242,7 @@ static int patch_cm9761_specific(struct
72291 return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
72292 }
72293
72294 -static struct snd_ac97_build_ops patch_cm9761_ops = {
72295 +static const struct snd_ac97_build_ops patch_cm9761_ops = {
72296 .build_specific = patch_cm9761_specific,
72297 .build_post_spdif = patch_cm9761_post_spdif,
72298 .update_jacks = cm9761_update_jacks
72299 @@ -3338,7 +3338,7 @@ static int patch_cm9780_specific(struct
72300 return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
72301 }
72302
72303 -static struct snd_ac97_build_ops patch_cm9780_ops = {
72304 +static const struct snd_ac97_build_ops patch_cm9780_ops = {
72305 .build_specific = patch_cm9780_specific,
72306 .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
72307 };
72308 @@ -3458,7 +3458,7 @@ static int patch_vt1616_specific(struct
72309 return 0;
72310 }
72311
72312 -static struct snd_ac97_build_ops patch_vt1616_ops = {
72313 +static const struct snd_ac97_build_ops patch_vt1616_ops = {
72314 .build_specific = patch_vt1616_specific
72315 };
72316
72317 @@ -3812,7 +3812,7 @@ static int patch_it2646_specific(struct
72318 return 0;
72319 }
72320
72321 -static struct snd_ac97_build_ops patch_it2646_ops = {
72322 +static const struct snd_ac97_build_ops patch_it2646_ops = {
72323 .build_specific = patch_it2646_specific,
72324 .update_jacks = it2646_update_jacks
72325 };
72326 @@ -3846,7 +3846,7 @@ static int patch_si3036_specific(struct
72327 return 0;
72328 }
72329
72330 -static struct snd_ac97_build_ops patch_si3036_ops = {
72331 +static const struct snd_ac97_build_ops patch_si3036_ops = {
72332 .build_specific = patch_si3036_specific,
72333 };
72334
72335 @@ -3913,7 +3913,7 @@ static int patch_ucb1400_specific(struct
72336 return 0;
72337 }
72338
72339 -static struct snd_ac97_build_ops patch_ucb1400_ops = {
72340 +static const struct snd_ac97_build_ops patch_ucb1400_ops = {
72341 .build_specific = patch_ucb1400_specific,
72342 };
72343
72344 diff -urNp linux-2.6.32.42/sound/pci/hda/patch_intelhdmi.c linux-2.6.32.42/sound/pci/hda/patch_intelhdmi.c
72345 --- linux-2.6.32.42/sound/pci/hda/patch_intelhdmi.c 2011-03-27 14:31:47.000000000 -0400
72346 +++ linux-2.6.32.42/sound/pci/hda/patch_intelhdmi.c 2011-04-17 15:56:46.000000000 -0400
72347 @@ -511,10 +511,10 @@ static void hdmi_non_intrinsic_event(str
72348 cp_ready);
72349
72350 /* TODO */
72351 - if (cp_state)
72352 - ;
72353 - if (cp_ready)
72354 - ;
72355 + if (cp_state) {
72356 + }
72357 + if (cp_ready) {
72358 + }
72359 }
72360
72361
72362 diff -urNp linux-2.6.32.42/sound/pci/intel8x0m.c linux-2.6.32.42/sound/pci/intel8x0m.c
72363 --- linux-2.6.32.42/sound/pci/intel8x0m.c 2011-03-27 14:31:47.000000000 -0400
72364 +++ linux-2.6.32.42/sound/pci/intel8x0m.c 2011-04-23 12:56:12.000000000 -0400
72365 @@ -1264,7 +1264,7 @@ static struct shortname_table {
72366 { 0x5455, "ALi M5455" },
72367 { 0x746d, "AMD AMD8111" },
72368 #endif
72369 - { 0 },
72370 + { 0, },
72371 };
72372
72373 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
72374 diff -urNp linux-2.6.32.42/sound/pci/ymfpci/ymfpci_main.c linux-2.6.32.42/sound/pci/ymfpci/ymfpci_main.c
72375 --- linux-2.6.32.42/sound/pci/ymfpci/ymfpci_main.c 2011-03-27 14:31:47.000000000 -0400
72376 +++ linux-2.6.32.42/sound/pci/ymfpci/ymfpci_main.c 2011-05-04 17:56:28.000000000 -0400
72377 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
72378 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
72379 break;
72380 }
72381 - if (atomic_read(&chip->interrupt_sleep_count)) {
72382 - atomic_set(&chip->interrupt_sleep_count, 0);
72383 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
72384 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
72385 wake_up(&chip->interrupt_sleep);
72386 }
72387 __end:
72388 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
72389 continue;
72390 init_waitqueue_entry(&wait, current);
72391 add_wait_queue(&chip->interrupt_sleep, &wait);
72392 - atomic_inc(&chip->interrupt_sleep_count);
72393 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
72394 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
72395 remove_wait_queue(&chip->interrupt_sleep, &wait);
72396 }
72397 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
72398 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
72399 spin_unlock(&chip->reg_lock);
72400
72401 - if (atomic_read(&chip->interrupt_sleep_count)) {
72402 - atomic_set(&chip->interrupt_sleep_count, 0);
72403 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
72404 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
72405 wake_up(&chip->interrupt_sleep);
72406 }
72407 }
72408 @@ -2369,7 +2369,7 @@ int __devinit snd_ymfpci_create(struct s
72409 spin_lock_init(&chip->reg_lock);
72410 spin_lock_init(&chip->voice_lock);
72411 init_waitqueue_head(&chip->interrupt_sleep);
72412 - atomic_set(&chip->interrupt_sleep_count, 0);
72413 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
72414 chip->card = card;
72415 chip->pci = pci;
72416 chip->irq = -1;
72417 diff -urNp linux-2.6.32.42/tools/gcc/Makefile linux-2.6.32.42/tools/gcc/Makefile
72418 --- linux-2.6.32.42/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
72419 +++ linux-2.6.32.42/tools/gcc/Makefile 2011-06-04 20:52:13.000000000 -0400
72420 @@ -0,0 +1,11 @@
72421 +#CC := gcc
72422 +#PLUGIN_SOURCE_FILES := pax_plugin.c
72423 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
72424 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
72425 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
72426 +
72427 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
72428 +
72429 +hostlibs-y := pax_plugin.so
72430 +always := $(hostlibs-y)
72431 +pax_plugin-objs := pax_plugin.o
72432 diff -urNp linux-2.6.32.42/tools/gcc/pax_plugin.c linux-2.6.32.42/tools/gcc/pax_plugin.c
72433 --- linux-2.6.32.42/tools/gcc/pax_plugin.c 1969-12-31 19:00:00.000000000 -0500
72434 +++ linux-2.6.32.42/tools/gcc/pax_plugin.c 2011-06-04 20:52:13.000000000 -0400
72435 @@ -0,0 +1,242 @@
72436 +/*
72437 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
72438 + * Licensed under the GPL v2
72439 + *
72440 + * Note: the choice of the license means that the compilation process is
72441 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
72442 + * but for the kernel it doesn't matter since it doesn't link against
72443 + * any of the gcc libraries
72444 + *
72445 + * gcc plugin to help implement various PaX features
72446 + *
72447 + * - track lowest stack pointer
72448 + *
72449 + * TODO:
72450 + * - initialize all local variables
72451 + *
72452 + * BUGS:
72453 + */
72454 +#include "gcc-plugin.h"
72455 +#include "plugin-version.h"
72456 +#include "config.h"
72457 +#include "system.h"
72458 +#include "coretypes.h"
72459 +#include "tm.h"
72460 +#include "toplev.h"
72461 +#include "basic-block.h"
72462 +#include "gimple.h"
72463 +//#include "expr.h" where are you...
72464 +#include "diagnostic.h"
72465 +#include "rtl.h"
72466 +#include "emit-rtl.h"
72467 +#include "function.h"
72468 +#include "tree.h"
72469 +#include "tree-pass.h"
72470 +#include "intl.h"
72471 +
72472 +int plugin_is_GPL_compatible;
72473 +
72474 +static int track_frame_size = -1;
72475 +static const char track_function[] = "pax_track_stack";
72476 +static bool init_locals;
72477 +
72478 +static struct plugin_info pax_plugin_info = {
72479 + .version = "201106030000",
72480 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
72481 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
72482 +};
72483 +
72484 +static bool gate_pax_track_stack(void);
72485 +static unsigned int execute_pax_tree_instrument(void);
72486 +static unsigned int execute_pax_final(void);
72487 +
72488 +static struct gimple_opt_pass pax_tree_instrument_pass = {
72489 + .pass = {
72490 + .type = GIMPLE_PASS,
72491 + .name = "pax_tree_instrument",
72492 + .gate = gate_pax_track_stack,
72493 + .execute = execute_pax_tree_instrument,
72494 + .sub = NULL,
72495 + .next = NULL,
72496 + .static_pass_number = 0,
72497 + .tv_id = TV_NONE,
72498 + .properties_required = PROP_gimple_leh | PROP_cfg,
72499 + .properties_provided = 0,
72500 + .properties_destroyed = 0,
72501 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
72502 + .todo_flags_finish = TODO_verify_stmts // | TODO_dump_func
72503 + }
72504 +};
72505 +
72506 +static struct rtl_opt_pass pax_final_rtl_opt_pass = {
72507 + .pass = {
72508 + .type = RTL_PASS,
72509 + .name = "pax_final",
72510 + .gate = gate_pax_track_stack,
72511 + .execute = execute_pax_final,
72512 + .sub = NULL,
72513 + .next = NULL,
72514 + .static_pass_number = 0,
72515 + .tv_id = TV_NONE,
72516 + .properties_required = 0,
72517 + .properties_provided = 0,
72518 + .properties_destroyed = 0,
72519 + .todo_flags_start = 0,
72520 + .todo_flags_finish = 0
72521 + }
72522 +};
72523 +
72524 +static bool gate_pax_track_stack(void)
72525 +{
72526 + return track_frame_size >= 0;
72527 +}
72528 +
72529 +static void pax_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
72530 +{
72531 + gimple call;
72532 + tree decl, type;
72533 +
72534 + // insert call to void pax_track_stack(void)
72535 + type = build_function_type_list(void_type_node, NULL_TREE);
72536 + decl = build_fn_decl(track_function, type);
72537 + DECL_ASSEMBLER_NAME(decl); // for LTO
72538 + call = gimple_build_call(decl, 0);
72539 + if (before)
72540 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
72541 + else
72542 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
72543 +}
72544 +
72545 +static unsigned int execute_pax_tree_instrument(void)
72546 +{
72547 + basic_block bb;
72548 + gimple_stmt_iterator gsi;
72549 +
72550 + // 1. loop through BBs and GIMPLE statements
72551 + FOR_EACH_BB(bb) {
72552 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
72553 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
72554 + tree decl;
72555 + gimple stmt = gsi_stmt(gsi);
72556 +
72557 + if (!is_gimple_call(stmt))
72558 + continue;
72559 + decl = gimple_call_fndecl(stmt);
72560 + if (!decl)
72561 + continue;
72562 + if (TREE_CODE(decl) != FUNCTION_DECL)
72563 + continue;
72564 + if (!DECL_BUILT_IN(decl))
72565 + continue;
72566 + if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
72567 + continue;
72568 + if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
72569 + continue;
72570 +
72571 + // 2. insert track call after each __builtin_alloca call
72572 + pax_add_instrumentation(&gsi, false);
72573 +// print_node(stderr, "pax", decl, 4);
72574 + }
72575 + }
72576 +
72577 + // 3. insert track call at the beginning
72578 + bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
72579 + gsi = gsi_start_bb(bb);
72580 + pax_add_instrumentation(&gsi, true);
72581 +
72582 + return 0;
72583 +}
72584 +
72585 +static unsigned int execute_pax_final(void)
72586 +{
72587 + rtx insn;
72588 +
72589 + if (cfun->calls_alloca)
72590 + return 0;
72591 +
72592 + // 1. find pax_track_stack calls
72593 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
72594 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
72595 + rtx body;
72596 +
72597 + if (!CALL_P(insn))
72598 + continue;
72599 + body = PATTERN(insn);
72600 + if (GET_CODE(body) != CALL)
72601 + continue;
72602 + body = XEXP(body, 0);
72603 + if (GET_CODE(body) != MEM)
72604 + continue;
72605 + body = XEXP(body, 0);
72606 + if (GET_CODE(body) != SYMBOL_REF)
72607 + continue;
72608 + if (strcmp(XSTR(body, 0), track_function))
72609 + continue;
72610 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
72611 + // 2. delete call if function frame is not big enough
72612 + if (get_frame_size() >= track_frame_size)
72613 + continue;
72614 + delete_insn_and_edges(insn);
72615 + }
72616 +
72617 +// print_simple_rtl(stderr, get_insns());
72618 +// print_rtl(stderr, get_insns());
72619 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
72620 +
72621 + return 0;
72622 +}
72623 +
72624 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
72625 +{
72626 + const char * const plugin_name = plugin_info->base_name;
72627 + const int argc = plugin_info->argc;
72628 + const struct plugin_argument * const argv = plugin_info->argv;
72629 + int i;
72630 + struct register_pass_info pax_tree_instrument_pass_info = {
72631 + .pass = &pax_tree_instrument_pass.pass,
72632 +// .reference_pass_name = "tree_profile",
72633 + .reference_pass_name = "optimized",
72634 + .ref_pass_instance_number = 0,
72635 + .pos_op = PASS_POS_INSERT_AFTER
72636 + };
72637 + struct register_pass_info pax_final_pass_info = {
72638 + .pass = &pax_final_rtl_opt_pass.pass,
72639 + .reference_pass_name = "final",
72640 + .ref_pass_instance_number = 0,
72641 + .pos_op = PASS_POS_INSERT_BEFORE
72642 + };
72643 +
72644 + if (!plugin_default_version_check(version, &gcc_version)) {
72645 + error(G_("incompatible gcc/plugin versions"));
72646 + return 1;
72647 + }
72648 +
72649 + register_callback(plugin_name, PLUGIN_INFO, NULL, &pax_plugin_info);
72650 +
72651 + for (i = 0; i < argc; ++i) {
72652 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
72653 + if (!argv[i].value) {
72654 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
72655 + continue;
72656 + }
72657 + track_frame_size = atoi(argv[i].value);
72658 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
72659 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
72660 + continue;
72661 + }
72662 + if (!strcmp(argv[i].key, "initialize-locals")) {
72663 + if (argv[i].value) {
72664 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
72665 + continue;
72666 + }
72667 + init_locals = true;
72668 + continue;
72669 + }
72670 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
72671 + }
72672 +
72673 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pax_tree_instrument_pass_info);
72674 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pax_final_pass_info);
72675 +
72676 + return 0;
72677 +}
72678 Binary files linux-2.6.32.42/tools/gcc/pax_plugin.so and linux-2.6.32.42/tools/gcc/pax_plugin.so differ
72679 diff -urNp linux-2.6.32.42/usr/gen_init_cpio.c linux-2.6.32.42/usr/gen_init_cpio.c
72680 --- linux-2.6.32.42/usr/gen_init_cpio.c 2011-03-27 14:31:47.000000000 -0400
72681 +++ linux-2.6.32.42/usr/gen_init_cpio.c 2011-04-17 15:56:46.000000000 -0400
72682 @@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name,
72683 int retval;
72684 int rc = -1;
72685 int namesize;
72686 - int i;
72687 + unsigned int i;
72688
72689 mode |= S_IFREG;
72690
72691 @@ -383,9 +383,10 @@ static char *cpio_replace_env(char *new_
72692 *env_var = *expanded = '\0';
72693 strncat(env_var, start + 2, end - start - 2);
72694 strncat(expanded, new_location, start - new_location);
72695 - strncat(expanded, getenv(env_var), PATH_MAX);
72696 - strncat(expanded, end + 1, PATH_MAX);
72697 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
72698 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
72699 strncpy(new_location, expanded, PATH_MAX);
72700 + new_location[PATH_MAX] = 0;
72701 } else
72702 break;
72703 }
72704 diff -urNp linux-2.6.32.42/virt/kvm/kvm_main.c linux-2.6.32.42/virt/kvm/kvm_main.c
72705 --- linux-2.6.32.42/virt/kvm/kvm_main.c 2011-03-27 14:31:47.000000000 -0400
72706 +++ linux-2.6.32.42/virt/kvm/kvm_main.c 2011-04-23 21:41:37.000000000 -0400
72707 @@ -1748,6 +1748,7 @@ static int kvm_vcpu_release(struct inode
72708 return 0;
72709 }
72710
72711 +/* cannot be const */
72712 static struct file_operations kvm_vcpu_fops = {
72713 .release = kvm_vcpu_release,
72714 .unlocked_ioctl = kvm_vcpu_ioctl,
72715 @@ -2344,6 +2345,7 @@ static int kvm_vm_mmap(struct file *file
72716 return 0;
72717 }
72718
72719 +/* cannot be const */
72720 static struct file_operations kvm_vm_fops = {
72721 .release = kvm_vm_release,
72722 .unlocked_ioctl = kvm_vm_ioctl,
72723 @@ -2431,6 +2433,7 @@ out:
72724 return r;
72725 }
72726
72727 +/* cannot be const */
72728 static struct file_operations kvm_chardev_ops = {
72729 .unlocked_ioctl = kvm_dev_ioctl,
72730 .compat_ioctl = kvm_dev_ioctl,
72731 @@ -2494,7 +2497,7 @@ asmlinkage void kvm_handle_fault_on_rebo
72732 if (kvm_rebooting)
72733 /* spin while reset goes on */
72734 while (true)
72735 - ;
72736 + cpu_relax();
72737 /* Fault while not rebooting. We want the trace. */
72738 BUG();
72739 }
72740 @@ -2714,7 +2717,7 @@ static void kvm_sched_out(struct preempt
72741 kvm_arch_vcpu_put(vcpu);
72742 }
72743
72744 -int kvm_init(void *opaque, unsigned int vcpu_size,
72745 +int kvm_init(const void *opaque, unsigned int vcpu_size,
72746 struct module *module)
72747 {
72748 int r;
72749 @@ -2767,7 +2770,7 @@ int kvm_init(void *opaque, unsigned int
72750 /* A kmem cache lets us meet the alignment requirements of fx_save. */
72751 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
72752 __alignof__(struct kvm_vcpu),
72753 - 0, NULL);
72754 + SLAB_USERCOPY, NULL);
72755 if (!kvm_vcpu_cache) {
72756 r = -ENOMEM;
72757 goto out_free_5;