]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-2.6.32.42-201106292104.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.42-201106292104.patch
1 diff -urNp linux-2.6.32.42/arch/alpha/include/asm/elf.h linux-2.6.32.42/arch/alpha/include/asm/elf.h
2 --- linux-2.6.32.42/arch/alpha/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3 +++ linux-2.6.32.42/arch/alpha/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
4 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-2.6.32.42/arch/alpha/include/asm/pgtable.h linux-2.6.32.42/arch/alpha/include/asm/pgtable.h
19 --- linux-2.6.32.42/arch/alpha/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
20 +++ linux-2.6.32.42/arch/alpha/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-2.6.32.42/arch/alpha/kernel/module.c linux-2.6.32.42/arch/alpha/kernel/module.c
40 --- linux-2.6.32.42/arch/alpha/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
41 +++ linux-2.6.32.42/arch/alpha/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-2.6.32.42/arch/alpha/kernel/osf_sys.c linux-2.6.32.42/arch/alpha/kernel/osf_sys.c
52 --- linux-2.6.32.42/arch/alpha/kernel/osf_sys.c 2011-03-27 14:31:47.000000000 -0400
53 +++ linux-2.6.32.42/arch/alpha/kernel/osf_sys.c 2011-06-13 17:19:47.000000000 -0400
54 @@ -431,7 +431,7 @@ SYSCALL_DEFINE2(osf_getdomainname, char
55 return -EFAULT;
56
57 len = namelen;
58 - if (namelen > 32)
59 + if (len > 32)
60 len = 32;
61
62 down_read(&uts_sem);
63 @@ -618,7 +618,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, comman
64 down_read(&uts_sem);
65 res = sysinfo_table[offset];
66 len = strlen(res)+1;
67 - if (len > count)
68 + if ((unsigned long)len > (unsigned long)count)
69 len = count;
70 if (copy_to_user(buf, res, len))
71 err = -EFAULT;
72 @@ -673,7 +673,7 @@ SYSCALL_DEFINE5(osf_getsysinfo, unsigned
73 return 1;
74
75 case GSI_GET_HWRPB:
76 - if (nbytes < sizeof(*hwrpb))
77 + if (nbytes > sizeof(*hwrpb))
78 return -EINVAL;
79 if (copy_to_user(buffer, hwrpb, nbytes) != 0)
80 return -EFAULT;
81 @@ -1035,6 +1035,7 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, i
82 {
83 struct rusage r;
84 long ret, err;
85 + unsigned int status = 0;
86 mm_segment_t old_fs;
87
88 if (!ur)
89 @@ -1043,13 +1044,15 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, i
90 old_fs = get_fs();
91
92 set_fs (KERNEL_DS);
93 - ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r);
94 + ret = sys_wait4(pid, (unsigned int __user *) &status, options,
95 + (struct rusage __user *) &r);
96 set_fs (old_fs);
97
98 if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
99 return -EFAULT;
100
101 err = 0;
102 + err |= put_user(status, ustatus);
103 err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
104 err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
105 err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
106 @@ -1169,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long a
107 /* At this point: (!vma || addr < vma->vm_end). */
108 if (limit - len < addr)
109 return -ENOMEM;
110 - if (!vma || addr + len <= vma->vm_start)
111 + if (check_heap_stack_gap(vma, addr, len))
112 return addr;
113 addr = vma->vm_end;
114 vma = vma->vm_next;
115 @@ -1205,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp
116 merely specific addresses, but regions of memory -- perhaps
117 this feature should be incorporated into all ports? */
118
119 +#ifdef CONFIG_PAX_RANDMMAP
120 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
121 +#endif
122 +
123 if (addr) {
124 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
125 if (addr != (unsigned long) -ENOMEM)
126 @@ -1212,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp
127 }
128
129 /* Next, try allocating at TASK_UNMAPPED_BASE. */
130 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
131 - len, limit);
132 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
133 +
134 if (addr != (unsigned long) -ENOMEM)
135 return addr;
136
137 diff -urNp linux-2.6.32.42/arch/alpha/mm/fault.c linux-2.6.32.42/arch/alpha/mm/fault.c
138 --- linux-2.6.32.42/arch/alpha/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
139 +++ linux-2.6.32.42/arch/alpha/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
140 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
141 __reload_thread(pcb);
142 }
143
144 +#ifdef CONFIG_PAX_PAGEEXEC
145 +/*
146 + * PaX: decide what to do with offenders (regs->pc = fault address)
147 + *
148 + * returns 1 when task should be killed
149 + * 2 when patched PLT trampoline was detected
150 + * 3 when unpatched PLT trampoline was detected
151 + */
152 +static int pax_handle_fetch_fault(struct pt_regs *regs)
153 +{
154 +
155 +#ifdef CONFIG_PAX_EMUPLT
156 + int err;
157 +
158 + do { /* PaX: patched PLT emulation #1 */
159 + unsigned int ldah, ldq, jmp;
160 +
161 + err = get_user(ldah, (unsigned int *)regs->pc);
162 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
163 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
164 +
165 + if (err)
166 + break;
167 +
168 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
169 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
170 + jmp == 0x6BFB0000U)
171 + {
172 + unsigned long r27, addr;
173 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
174 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
175 +
176 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
177 + err = get_user(r27, (unsigned long *)addr);
178 + if (err)
179 + break;
180 +
181 + regs->r27 = r27;
182 + regs->pc = r27;
183 + return 2;
184 + }
185 + } while (0);
186 +
187 + do { /* PaX: patched PLT emulation #2 */
188 + unsigned int ldah, lda, br;
189 +
190 + err = get_user(ldah, (unsigned int *)regs->pc);
191 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
192 + err |= get_user(br, (unsigned int *)(regs->pc+8));
193 +
194 + if (err)
195 + break;
196 +
197 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
198 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
199 + (br & 0xFFE00000U) == 0xC3E00000U)
200 + {
201 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
202 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
203 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
204 +
205 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
206 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
207 + return 2;
208 + }
209 + } while (0);
210 +
211 + do { /* PaX: unpatched PLT emulation */
212 + unsigned int br;
213 +
214 + err = get_user(br, (unsigned int *)regs->pc);
215 +
216 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
217 + unsigned int br2, ldq, nop, jmp;
218 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
219 +
220 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
221 + err = get_user(br2, (unsigned int *)addr);
222 + err |= get_user(ldq, (unsigned int *)(addr+4));
223 + err |= get_user(nop, (unsigned int *)(addr+8));
224 + err |= get_user(jmp, (unsigned int *)(addr+12));
225 + err |= get_user(resolver, (unsigned long *)(addr+16));
226 +
227 + if (err)
228 + break;
229 +
230 + if (br2 == 0xC3600000U &&
231 + ldq == 0xA77B000CU &&
232 + nop == 0x47FF041FU &&
233 + jmp == 0x6B7B0000U)
234 + {
235 + regs->r28 = regs->pc+4;
236 + regs->r27 = addr+16;
237 + regs->pc = resolver;
238 + return 3;
239 + }
240 + }
241 + } while (0);
242 +#endif
243 +
244 + return 1;
245 +}
246 +
247 +void pax_report_insns(void *pc, void *sp)
248 +{
249 + unsigned long i;
250 +
251 + printk(KERN_ERR "PAX: bytes at PC: ");
252 + for (i = 0; i < 5; i++) {
253 + unsigned int c;
254 + if (get_user(c, (unsigned int *)pc+i))
255 + printk(KERN_CONT "???????? ");
256 + else
257 + printk(KERN_CONT "%08x ", c);
258 + }
259 + printk("\n");
260 +}
261 +#endif
262
263 /*
264 * This routine handles page faults. It determines the address,
265 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
266 good_area:
267 si_code = SEGV_ACCERR;
268 if (cause < 0) {
269 - if (!(vma->vm_flags & VM_EXEC))
270 + if (!(vma->vm_flags & VM_EXEC)) {
271 +
272 +#ifdef CONFIG_PAX_PAGEEXEC
273 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
274 + goto bad_area;
275 +
276 + up_read(&mm->mmap_sem);
277 + switch (pax_handle_fetch_fault(regs)) {
278 +
279 +#ifdef CONFIG_PAX_EMUPLT
280 + case 2:
281 + case 3:
282 + return;
283 +#endif
284 +
285 + }
286 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
287 + do_group_exit(SIGKILL);
288 +#else
289 goto bad_area;
290 +#endif
291 +
292 + }
293 } else if (!cause) {
294 /* Allow reads even for write-only mappings */
295 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
296 diff -urNp linux-2.6.32.42/arch/arm/include/asm/elf.h linux-2.6.32.42/arch/arm/include/asm/elf.h
297 --- linux-2.6.32.42/arch/arm/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
298 +++ linux-2.6.32.42/arch/arm/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
299 @@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t
300 the loader. We need to make sure that it is out of the way of the program
301 that it will "exec", and that there is sufficient room for the brk. */
302
303 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
304 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
305 +
306 +#ifdef CONFIG_PAX_ASLR
307 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
308 +
309 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
310 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
311 +#endif
312
313 /* When the program starts, a1 contains a pointer to a function to be
314 registered with atexit, as per the SVR4 ABI. A value of 0 means we
315 diff -urNp linux-2.6.32.42/arch/arm/include/asm/kmap_types.h linux-2.6.32.42/arch/arm/include/asm/kmap_types.h
316 --- linux-2.6.32.42/arch/arm/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
317 +++ linux-2.6.32.42/arch/arm/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
318 @@ -19,6 +19,7 @@ enum km_type {
319 KM_SOFTIRQ0,
320 KM_SOFTIRQ1,
321 KM_L2_CACHE,
322 + KM_CLEARPAGE,
323 KM_TYPE_NR
324 };
325
326 diff -urNp linux-2.6.32.42/arch/arm/include/asm/uaccess.h linux-2.6.32.42/arch/arm/include/asm/uaccess.h
327 --- linux-2.6.32.42/arch/arm/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
328 +++ linux-2.6.32.42/arch/arm/include/asm/uaccess.h 2011-06-29 21:02:24.000000000 -0400
329 @@ -22,6 +22,8 @@
330 #define VERIFY_READ 0
331 #define VERIFY_WRITE 1
332
333 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
334 +
335 /*
336 * The exception table consists of pairs of addresses: the first is the
337 * address of an instruction that is allowed to fault, and the second is
338 @@ -387,8 +389,23 @@ do { \
339
340
341 #ifdef CONFIG_MMU
342 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
343 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
344 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
345 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
346 +
347 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
348 +{
349 + if (!__builtin_constant_p(n))
350 + check_object_size(to, n, false);
351 + return ___copy_from_user(to, from, n);
352 +}
353 +
354 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
355 +{
356 + if (!__builtin_constant_p(n))
357 + check_object_size(from, n, true);
358 + return ___copy_to_user(to, from, n);
359 +}
360 +
361 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
362 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
363 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
364 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
365
366 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
367 {
368 + if ((long)n < 0)
369 + return n;
370 +
371 if (access_ok(VERIFY_READ, from, n))
372 n = __copy_from_user(to, from, n);
373 else /* security hole - plug it */
374 @@ -412,6 +432,9 @@ static inline unsigned long __must_check
375
376 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
377 {
378 + if ((long)n < 0)
379 + return n;
380 +
381 if (access_ok(VERIFY_WRITE, to, n))
382 n = __copy_to_user(to, from, n);
383 return n;
384 diff -urNp linux-2.6.32.42/arch/arm/kernel/kgdb.c linux-2.6.32.42/arch/arm/kernel/kgdb.c
385 --- linux-2.6.32.42/arch/arm/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
386 +++ linux-2.6.32.42/arch/arm/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
387 @@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
388 * and we handle the normal undef case within the do_undefinstr
389 * handler.
390 */
391 -struct kgdb_arch arch_kgdb_ops = {
392 +const struct kgdb_arch arch_kgdb_ops = {
393 #ifndef __ARMEB__
394 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
395 #else /* ! __ARMEB__ */
396 diff -urNp linux-2.6.32.42/arch/arm/kernel/traps.c linux-2.6.32.42/arch/arm/kernel/traps.c
397 --- linux-2.6.32.42/arch/arm/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
398 +++ linux-2.6.32.42/arch/arm/kernel/traps.c 2011-06-13 21:31:18.000000000 -0400
399 @@ -247,6 +247,8 @@ static void __die(const char *str, int e
400
401 DEFINE_SPINLOCK(die_lock);
402
403 +extern void gr_handle_kernel_exploit(void);
404 +
405 /*
406 * This function is protected against re-entrancy.
407 */
408 @@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, str
409 if (panic_on_oops)
410 panic("Fatal exception");
411
412 + gr_handle_kernel_exploit();
413 +
414 do_exit(SIGSEGV);
415 }
416
417 diff -urNp linux-2.6.32.42/arch/arm/lib/copy_from_user.S linux-2.6.32.42/arch/arm/lib/copy_from_user.S
418 --- linux-2.6.32.42/arch/arm/lib/copy_from_user.S 2011-03-27 14:31:47.000000000 -0400
419 +++ linux-2.6.32.42/arch/arm/lib/copy_from_user.S 2011-06-29 20:48:38.000000000 -0400
420 @@ -16,7 +16,7 @@
421 /*
422 * Prototype:
423 *
424 - * size_t __copy_from_user(void *to, const void *from, size_t n)
425 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
426 *
427 * Purpose:
428 *
429 @@ -84,11 +84,11 @@
430
431 .text
432
433 -ENTRY(__copy_from_user)
434 +ENTRY(___copy_from_user)
435
436 #include "copy_template.S"
437
438 -ENDPROC(__copy_from_user)
439 +ENDPROC(___copy_from_user)
440
441 .section .fixup,"ax"
442 .align 0
443 diff -urNp linux-2.6.32.42/arch/arm/lib/copy_to_user.S linux-2.6.32.42/arch/arm/lib/copy_to_user.S
444 --- linux-2.6.32.42/arch/arm/lib/copy_to_user.S 2011-03-27 14:31:47.000000000 -0400
445 +++ linux-2.6.32.42/arch/arm/lib/copy_to_user.S 2011-06-29 20:46:49.000000000 -0400
446 @@ -16,7 +16,7 @@
447 /*
448 * Prototype:
449 *
450 - * size_t __copy_to_user(void *to, const void *from, size_t n)
451 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
452 *
453 * Purpose:
454 *
455 @@ -88,11 +88,11 @@
456 .text
457
458 ENTRY(__copy_to_user_std)
459 -WEAK(__copy_to_user)
460 +WEAK(___copy_to_user)
461
462 #include "copy_template.S"
463
464 -ENDPROC(__copy_to_user)
465 +ENDPROC(___copy_to_user)
466
467 .section .fixup,"ax"
468 .align 0
469 diff -urNp linux-2.6.32.42/arch/arm/lib/uaccess.S linux-2.6.32.42/arch/arm/lib/uaccess.S
470 --- linux-2.6.32.42/arch/arm/lib/uaccess.S 2011-03-27 14:31:47.000000000 -0400
471 +++ linux-2.6.32.42/arch/arm/lib/uaccess.S 2011-06-29 20:48:53.000000000 -0400
472 @@ -19,7 +19,7 @@
473
474 #define PAGE_SHIFT 12
475
476 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
477 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
478 * Purpose : copy a block to user memory from kernel memory
479 * Params : to - user memory
480 * : from - kernel memory
481 @@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fau
482 sub r2, r2, ip
483 b .Lc2u_dest_aligned
484
485 -ENTRY(__copy_to_user)
486 +ENTRY(___copy_to_user)
487 stmfd sp!, {r2, r4 - r7, lr}
488 cmp r2, #4
489 blt .Lc2u_not_enough
490 @@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fau
491 ldrgtb r3, [r1], #0
492 USER( strgtbt r3, [r0], #1) @ May fault
493 b .Lc2u_finished
494 -ENDPROC(__copy_to_user)
495 +ENDPROC(___copy_to_user)
496
497 .section .fixup,"ax"
498 .align 0
499 9001: ldmfd sp!, {r0, r4 - r7, pc}
500 .previous
501
502 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
503 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
504 * Purpose : copy a block from user memory to kernel memory
505 * Params : to - kernel memory
506 * : from - user memory
507 @@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fau
508 sub r2, r2, ip
509 b .Lcfu_dest_aligned
510
511 -ENTRY(__copy_from_user)
512 +ENTRY(___copy_from_user)
513 stmfd sp!, {r0, r2, r4 - r7, lr}
514 cmp r2, #4
515 blt .Lcfu_not_enough
516 @@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fau
517 USER( ldrgtbt r3, [r1], #1) @ May fault
518 strgtb r3, [r0], #1
519 b .Lcfu_finished
520 -ENDPROC(__copy_from_user)
521 +ENDPROC(___copy_from_user)
522
523 .section .fixup,"ax"
524 .align 0
525 diff -urNp linux-2.6.32.42/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.32.42/arch/arm/lib/uaccess_with_memcpy.c
526 --- linux-2.6.32.42/arch/arm/lib/uaccess_with_memcpy.c 2011-03-27 14:31:47.000000000 -0400
527 +++ linux-2.6.32.42/arch/arm/lib/uaccess_with_memcpy.c 2011-06-29 20:44:35.000000000 -0400
528 @@ -97,7 +97,7 @@ out:
529 }
530
531 unsigned long
532 -__copy_to_user(void __user *to, const void *from, unsigned long n)
533 +___copy_to_user(void __user *to, const void *from, unsigned long n)
534 {
535 /*
536 * This test is stubbed out of the main function above to keep
537 diff -urNp linux-2.6.32.42/arch/arm/mach-at91/pm.c linux-2.6.32.42/arch/arm/mach-at91/pm.c
538 --- linux-2.6.32.42/arch/arm/mach-at91/pm.c 2011-03-27 14:31:47.000000000 -0400
539 +++ linux-2.6.32.42/arch/arm/mach-at91/pm.c 2011-04-17 15:56:45.000000000 -0400
540 @@ -348,7 +348,7 @@ static void at91_pm_end(void)
541 }
542
543
544 -static struct platform_suspend_ops at91_pm_ops ={
545 +static const struct platform_suspend_ops at91_pm_ops ={
546 .valid = at91_pm_valid_state,
547 .begin = at91_pm_begin,
548 .enter = at91_pm_enter,
549 diff -urNp linux-2.6.32.42/arch/arm/mach-omap1/pm.c linux-2.6.32.42/arch/arm/mach-omap1/pm.c
550 --- linux-2.6.32.42/arch/arm/mach-omap1/pm.c 2011-03-27 14:31:47.000000000 -0400
551 +++ linux-2.6.32.42/arch/arm/mach-omap1/pm.c 2011-04-17 15:56:45.000000000 -0400
552 @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
553
554
555
556 -static struct platform_suspend_ops omap_pm_ops ={
557 +static const struct platform_suspend_ops omap_pm_ops ={
558 .prepare = omap_pm_prepare,
559 .enter = omap_pm_enter,
560 .finish = omap_pm_finish,
561 diff -urNp linux-2.6.32.42/arch/arm/mach-omap2/pm24xx.c linux-2.6.32.42/arch/arm/mach-omap2/pm24xx.c
562 --- linux-2.6.32.42/arch/arm/mach-omap2/pm24xx.c 2011-03-27 14:31:47.000000000 -0400
563 +++ linux-2.6.32.42/arch/arm/mach-omap2/pm24xx.c 2011-04-17 15:56:45.000000000 -0400
564 @@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
565 enable_hlt();
566 }
567
568 -static struct platform_suspend_ops omap_pm_ops = {
569 +static const struct platform_suspend_ops omap_pm_ops = {
570 .prepare = omap2_pm_prepare,
571 .enter = omap2_pm_enter,
572 .finish = omap2_pm_finish,
573 diff -urNp linux-2.6.32.42/arch/arm/mach-omap2/pm34xx.c linux-2.6.32.42/arch/arm/mach-omap2/pm34xx.c
574 --- linux-2.6.32.42/arch/arm/mach-omap2/pm34xx.c 2011-03-27 14:31:47.000000000 -0400
575 +++ linux-2.6.32.42/arch/arm/mach-omap2/pm34xx.c 2011-04-17 15:56:45.000000000 -0400
576 @@ -401,7 +401,7 @@ static void omap3_pm_end(void)
577 return;
578 }
579
580 -static struct platform_suspend_ops omap_pm_ops = {
581 +static const struct platform_suspend_ops omap_pm_ops = {
582 .begin = omap3_pm_begin,
583 .end = omap3_pm_end,
584 .prepare = omap3_pm_prepare,
585 diff -urNp linux-2.6.32.42/arch/arm/mach-pnx4008/pm.c linux-2.6.32.42/arch/arm/mach-pnx4008/pm.c
586 --- linux-2.6.32.42/arch/arm/mach-pnx4008/pm.c 2011-03-27 14:31:47.000000000 -0400
587 +++ linux-2.6.32.42/arch/arm/mach-pnx4008/pm.c 2011-04-17 15:56:45.000000000 -0400
588 @@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_stat
589 (state == PM_SUSPEND_MEM);
590 }
591
592 -static struct platform_suspend_ops pnx4008_pm_ops = {
593 +static const struct platform_suspend_ops pnx4008_pm_ops = {
594 .enter = pnx4008_pm_enter,
595 .valid = pnx4008_pm_valid,
596 };
597 diff -urNp linux-2.6.32.42/arch/arm/mach-pxa/pm.c linux-2.6.32.42/arch/arm/mach-pxa/pm.c
598 --- linux-2.6.32.42/arch/arm/mach-pxa/pm.c 2011-03-27 14:31:47.000000000 -0400
599 +++ linux-2.6.32.42/arch/arm/mach-pxa/pm.c 2011-04-17 15:56:45.000000000 -0400
600 @@ -95,7 +95,7 @@ void pxa_pm_finish(void)
601 pxa_cpu_pm_fns->finish();
602 }
603
604 -static struct platform_suspend_ops pxa_pm_ops = {
605 +static const struct platform_suspend_ops pxa_pm_ops = {
606 .valid = pxa_pm_valid,
607 .enter = pxa_pm_enter,
608 .prepare = pxa_pm_prepare,
609 diff -urNp linux-2.6.32.42/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.32.42/arch/arm/mach-pxa/sharpsl_pm.c
610 --- linux-2.6.32.42/arch/arm/mach-pxa/sharpsl_pm.c 2011-03-27 14:31:47.000000000 -0400
611 +++ linux-2.6.32.42/arch/arm/mach-pxa/sharpsl_pm.c 2011-04-17 15:56:45.000000000 -0400
612 @@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
613 }
614
615 #ifdef CONFIG_PM
616 -static struct platform_suspend_ops sharpsl_pm_ops = {
617 +static const struct platform_suspend_ops sharpsl_pm_ops = {
618 .prepare = pxa_pm_prepare,
619 .finish = pxa_pm_finish,
620 .enter = corgi_pxa_pm_enter,
621 diff -urNp linux-2.6.32.42/arch/arm/mach-sa1100/pm.c linux-2.6.32.42/arch/arm/mach-sa1100/pm.c
622 --- linux-2.6.32.42/arch/arm/mach-sa1100/pm.c 2011-03-27 14:31:47.000000000 -0400
623 +++ linux-2.6.32.42/arch/arm/mach-sa1100/pm.c 2011-04-17 15:56:45.000000000 -0400
624 @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
625 return virt_to_phys(sp);
626 }
627
628 -static struct platform_suspend_ops sa11x0_pm_ops = {
629 +static const struct platform_suspend_ops sa11x0_pm_ops = {
630 .enter = sa11x0_pm_enter,
631 .valid = suspend_valid_only_mem,
632 };
633 diff -urNp linux-2.6.32.42/arch/arm/mm/fault.c linux-2.6.32.42/arch/arm/mm/fault.c
634 --- linux-2.6.32.42/arch/arm/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
635 +++ linux-2.6.32.42/arch/arm/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
636 @@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk,
637 }
638 #endif
639
640 +#ifdef CONFIG_PAX_PAGEEXEC
641 + if (fsr & FSR_LNX_PF) {
642 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
643 + do_group_exit(SIGKILL);
644 + }
645 +#endif
646 +
647 tsk->thread.address = addr;
648 tsk->thread.error_code = fsr;
649 tsk->thread.trap_no = 14;
650 @@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsign
651 }
652 #endif /* CONFIG_MMU */
653
654 +#ifdef CONFIG_PAX_PAGEEXEC
655 +void pax_report_insns(void *pc, void *sp)
656 +{
657 + long i;
658 +
659 + printk(KERN_ERR "PAX: bytes at PC: ");
660 + for (i = 0; i < 20; i++) {
661 + unsigned char c;
662 + if (get_user(c, (__force unsigned char __user *)pc+i))
663 + printk(KERN_CONT "?? ");
664 + else
665 + printk(KERN_CONT "%02x ", c);
666 + }
667 + printk("\n");
668 +
669 + printk(KERN_ERR "PAX: bytes at SP-4: ");
670 + for (i = -1; i < 20; i++) {
671 + unsigned long c;
672 + if (get_user(c, (__force unsigned long __user *)sp+i))
673 + printk(KERN_CONT "???????? ");
674 + else
675 + printk(KERN_CONT "%08lx ", c);
676 + }
677 + printk("\n");
678 +}
679 +#endif
680 +
681 /*
682 * First Level Translation Fault Handler
683 *
684 diff -urNp linux-2.6.32.42/arch/arm/mm/mmap.c linux-2.6.32.42/arch/arm/mm/mmap.c
685 --- linux-2.6.32.42/arch/arm/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
686 +++ linux-2.6.32.42/arch/arm/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
687 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
688 if (len > TASK_SIZE)
689 return -ENOMEM;
690
691 +#ifdef CONFIG_PAX_RANDMMAP
692 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
693 +#endif
694 +
695 if (addr) {
696 if (do_align)
697 addr = COLOUR_ALIGN(addr, pgoff);
698 @@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
699 addr = PAGE_ALIGN(addr);
700
701 vma = find_vma(mm, addr);
702 - if (TASK_SIZE - len >= addr &&
703 - (!vma || addr + len <= vma->vm_start))
704 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
705 return addr;
706 }
707 if (len > mm->cached_hole_size) {
708 - start_addr = addr = mm->free_area_cache;
709 + start_addr = addr = mm->free_area_cache;
710 } else {
711 - start_addr = addr = TASK_UNMAPPED_BASE;
712 - mm->cached_hole_size = 0;
713 + start_addr = addr = mm->mmap_base;
714 + mm->cached_hole_size = 0;
715 }
716
717 full_search:
718 @@ -94,14 +97,14 @@ full_search:
719 * Start a new search - just in case we missed
720 * some holes.
721 */
722 - if (start_addr != TASK_UNMAPPED_BASE) {
723 - start_addr = addr = TASK_UNMAPPED_BASE;
724 + if (start_addr != mm->mmap_base) {
725 + start_addr = addr = mm->mmap_base;
726 mm->cached_hole_size = 0;
727 goto full_search;
728 }
729 return -ENOMEM;
730 }
731 - if (!vma || addr + len <= vma->vm_start) {
732 + if (check_heap_stack_gap(vma, addr, len)) {
733 /*
734 * Remember the place where we stopped the search:
735 */
736 diff -urNp linux-2.6.32.42/arch/arm/plat-s3c/pm.c linux-2.6.32.42/arch/arm/plat-s3c/pm.c
737 --- linux-2.6.32.42/arch/arm/plat-s3c/pm.c 2011-03-27 14:31:47.000000000 -0400
738 +++ linux-2.6.32.42/arch/arm/plat-s3c/pm.c 2011-04-17 15:56:45.000000000 -0400
739 @@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
740 s3c_pm_check_cleanup();
741 }
742
743 -static struct platform_suspend_ops s3c_pm_ops = {
744 +static const struct platform_suspend_ops s3c_pm_ops = {
745 .enter = s3c_pm_enter,
746 .prepare = s3c_pm_prepare,
747 .finish = s3c_pm_finish,
748 diff -urNp linux-2.6.32.42/arch/avr32/include/asm/elf.h linux-2.6.32.42/arch/avr32/include/asm/elf.h
749 --- linux-2.6.32.42/arch/avr32/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
750 +++ linux-2.6.32.42/arch/avr32/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
751 @@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
752 the loader. We need to make sure that it is out of the way of the program
753 that it will "exec", and that there is sufficient room for the brk. */
754
755 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
756 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
757
758 +#ifdef CONFIG_PAX_ASLR
759 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
760 +
761 +#define PAX_DELTA_MMAP_LEN 15
762 +#define PAX_DELTA_STACK_LEN 15
763 +#endif
764
765 /* This yields a mask that user programs can use to figure out what
766 instruction set this CPU supports. This could be done in user space,
767 diff -urNp linux-2.6.32.42/arch/avr32/include/asm/kmap_types.h linux-2.6.32.42/arch/avr32/include/asm/kmap_types.h
768 --- linux-2.6.32.42/arch/avr32/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
769 +++ linux-2.6.32.42/arch/avr32/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
770 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
771 D(11) KM_IRQ1,
772 D(12) KM_SOFTIRQ0,
773 D(13) KM_SOFTIRQ1,
774 -D(14) KM_TYPE_NR
775 +D(14) KM_CLEARPAGE,
776 +D(15) KM_TYPE_NR
777 };
778
779 #undef D
780 diff -urNp linux-2.6.32.42/arch/avr32/mach-at32ap/pm.c linux-2.6.32.42/arch/avr32/mach-at32ap/pm.c
781 --- linux-2.6.32.42/arch/avr32/mach-at32ap/pm.c 2011-03-27 14:31:47.000000000 -0400
782 +++ linux-2.6.32.42/arch/avr32/mach-at32ap/pm.c 2011-04-17 15:56:45.000000000 -0400
783 @@ -176,7 +176,7 @@ out:
784 return 0;
785 }
786
787 -static struct platform_suspend_ops avr32_pm_ops = {
788 +static const struct platform_suspend_ops avr32_pm_ops = {
789 .valid = avr32_pm_valid_state,
790 .enter = avr32_pm_enter,
791 };
792 diff -urNp linux-2.6.32.42/arch/avr32/mm/fault.c linux-2.6.32.42/arch/avr32/mm/fault.c
793 --- linux-2.6.32.42/arch/avr32/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
794 +++ linux-2.6.32.42/arch/avr32/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
795 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
796
797 int exception_trace = 1;
798
799 +#ifdef CONFIG_PAX_PAGEEXEC
800 +void pax_report_insns(void *pc, void *sp)
801 +{
802 + unsigned long i;
803 +
804 + printk(KERN_ERR "PAX: bytes at PC: ");
805 + for (i = 0; i < 20; i++) {
806 + unsigned char c;
807 + if (get_user(c, (unsigned char *)pc+i))
808 + printk(KERN_CONT "???????? ");
809 + else
810 + printk(KERN_CONT "%02x ", c);
811 + }
812 + printk("\n");
813 +}
814 +#endif
815 +
816 /*
817 * This routine handles page faults. It determines the address and the
818 * problem, and then passes it off to one of the appropriate routines.
819 @@ -157,6 +174,16 @@ bad_area:
820 up_read(&mm->mmap_sem);
821
822 if (user_mode(regs)) {
823 +
824 +#ifdef CONFIG_PAX_PAGEEXEC
825 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
826 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
827 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
828 + do_group_exit(SIGKILL);
829 + }
830 + }
831 +#endif
832 +
833 if (exception_trace && printk_ratelimit())
834 printk("%s%s[%d]: segfault at %08lx pc %08lx "
835 "sp %08lx ecr %lu\n",
836 diff -urNp linux-2.6.32.42/arch/blackfin/kernel/kgdb.c linux-2.6.32.42/arch/blackfin/kernel/kgdb.c
837 --- linux-2.6.32.42/arch/blackfin/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
838 +++ linux-2.6.32.42/arch/blackfin/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
839 @@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vecto
840 return -1; /* this means that we do not want to exit from the handler */
841 }
842
843 -struct kgdb_arch arch_kgdb_ops = {
844 +const struct kgdb_arch arch_kgdb_ops = {
845 .gdb_bpt_instr = {0xa1},
846 #ifdef CONFIG_SMP
847 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
848 diff -urNp linux-2.6.32.42/arch/blackfin/mach-common/pm.c linux-2.6.32.42/arch/blackfin/mach-common/pm.c
849 --- linux-2.6.32.42/arch/blackfin/mach-common/pm.c 2011-03-27 14:31:47.000000000 -0400
850 +++ linux-2.6.32.42/arch/blackfin/mach-common/pm.c 2011-04-17 15:56:45.000000000 -0400
851 @@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t
852 return 0;
853 }
854
855 -struct platform_suspend_ops bfin_pm_ops = {
856 +const struct platform_suspend_ops bfin_pm_ops = {
857 .enter = bfin_pm_enter,
858 .valid = bfin_pm_valid,
859 };
860 diff -urNp linux-2.6.32.42/arch/frv/include/asm/kmap_types.h linux-2.6.32.42/arch/frv/include/asm/kmap_types.h
861 --- linux-2.6.32.42/arch/frv/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
862 +++ linux-2.6.32.42/arch/frv/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
863 @@ -23,6 +23,7 @@ enum km_type {
864 KM_IRQ1,
865 KM_SOFTIRQ0,
866 KM_SOFTIRQ1,
867 + KM_CLEARPAGE,
868 KM_TYPE_NR
869 };
870
871 diff -urNp linux-2.6.32.42/arch/frv/mm/elf-fdpic.c linux-2.6.32.42/arch/frv/mm/elf-fdpic.c
872 --- linux-2.6.32.42/arch/frv/mm/elf-fdpic.c 2011-03-27 14:31:47.000000000 -0400
873 +++ linux-2.6.32.42/arch/frv/mm/elf-fdpic.c 2011-04-17 15:56:45.000000000 -0400
874 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
875 if (addr) {
876 addr = PAGE_ALIGN(addr);
877 vma = find_vma(current->mm, addr);
878 - if (TASK_SIZE - len >= addr &&
879 - (!vma || addr + len <= vma->vm_start))
880 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
881 goto success;
882 }
883
884 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
885 for (; vma; vma = vma->vm_next) {
886 if (addr > limit)
887 break;
888 - if (addr + len <= vma->vm_start)
889 + if (check_heap_stack_gap(vma, addr, len))
890 goto success;
891 addr = vma->vm_end;
892 }
893 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
894 for (; vma; vma = vma->vm_next) {
895 if (addr > limit)
896 break;
897 - if (addr + len <= vma->vm_start)
898 + if (check_heap_stack_gap(vma, addr, len))
899 goto success;
900 addr = vma->vm_end;
901 }
902 diff -urNp linux-2.6.32.42/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.42/arch/ia64/hp/common/hwsw_iommu.c
903 --- linux-2.6.32.42/arch/ia64/hp/common/hwsw_iommu.c 2011-03-27 14:31:47.000000000 -0400
904 +++ linux-2.6.32.42/arch/ia64/hp/common/hwsw_iommu.c 2011-04-17 15:56:45.000000000 -0400
905 @@ -17,7 +17,7 @@
906 #include <linux/swiotlb.h>
907 #include <asm/machvec.h>
908
909 -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
910 +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
911
912 /* swiotlb declarations & definitions: */
913 extern int swiotlb_late_init_with_default_size (size_t size);
914 @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
915 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
916 }
917
918 -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
919 +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
920 {
921 if (use_swiotlb(dev))
922 return &swiotlb_dma_ops;
923 diff -urNp linux-2.6.32.42/arch/ia64/hp/common/sba_iommu.c linux-2.6.32.42/arch/ia64/hp/common/sba_iommu.c
924 --- linux-2.6.32.42/arch/ia64/hp/common/sba_iommu.c 2011-03-27 14:31:47.000000000 -0400
925 +++ linux-2.6.32.42/arch/ia64/hp/common/sba_iommu.c 2011-04-17 15:56:45.000000000 -0400
926 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
927 },
928 };
929
930 -extern struct dma_map_ops swiotlb_dma_ops;
931 +extern const struct dma_map_ops swiotlb_dma_ops;
932
933 static int __init
934 sba_init(void)
935 @@ -2211,7 +2211,7 @@ sba_page_override(char *str)
936
937 __setup("sbapagesize=",sba_page_override);
938
939 -struct dma_map_ops sba_dma_ops = {
940 +const struct dma_map_ops sba_dma_ops = {
941 .alloc_coherent = sba_alloc_coherent,
942 .free_coherent = sba_free_coherent,
943 .map_page = sba_map_page,
944 diff -urNp linux-2.6.32.42/arch/ia64/ia32/binfmt_elf32.c linux-2.6.32.42/arch/ia64/ia32/binfmt_elf32.c
945 --- linux-2.6.32.42/arch/ia64/ia32/binfmt_elf32.c 2011-03-27 14:31:47.000000000 -0400
946 +++ linux-2.6.32.42/arch/ia64/ia32/binfmt_elf32.c 2011-04-17 15:56:45.000000000 -0400
947 @@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
948
949 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
950
951 +#ifdef CONFIG_PAX_ASLR
952 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
953 +
954 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
955 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
956 +#endif
957 +
958 /* Ugly but avoids duplication */
959 #include "../../../fs/binfmt_elf.c"
960
961 diff -urNp linux-2.6.32.42/arch/ia64/ia32/ia32priv.h linux-2.6.32.42/arch/ia64/ia32/ia32priv.h
962 --- linux-2.6.32.42/arch/ia64/ia32/ia32priv.h 2011-03-27 14:31:47.000000000 -0400
963 +++ linux-2.6.32.42/arch/ia64/ia32/ia32priv.h 2011-04-17 15:56:45.000000000 -0400
964 @@ -296,7 +296,14 @@ typedef struct compat_siginfo {
965 #define ELF_DATA ELFDATA2LSB
966 #define ELF_ARCH EM_386
967
968 -#define IA32_STACK_TOP IA32_PAGE_OFFSET
969 +#ifdef CONFIG_PAX_RANDUSTACK
970 +#define __IA32_DELTA_STACK (current->mm->delta_stack)
971 +#else
972 +#define __IA32_DELTA_STACK 0UL
973 +#endif
974 +
975 +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
976 +
977 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
978 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
979
980 diff -urNp linux-2.6.32.42/arch/ia64/include/asm/dma-mapping.h linux-2.6.32.42/arch/ia64/include/asm/dma-mapping.h
981 --- linux-2.6.32.42/arch/ia64/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
982 +++ linux-2.6.32.42/arch/ia64/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
983 @@ -12,7 +12,7 @@
984
985 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
986
987 -extern struct dma_map_ops *dma_ops;
988 +extern const struct dma_map_ops *dma_ops;
989 extern struct ia64_machine_vector ia64_mv;
990 extern void set_iommu_machvec(void);
991
992 @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
993 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
994 dma_addr_t *daddr, gfp_t gfp)
995 {
996 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
997 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
998 void *caddr;
999
1000 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
1001 @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
1002 static inline void dma_free_coherent(struct device *dev, size_t size,
1003 void *caddr, dma_addr_t daddr)
1004 {
1005 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1006 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1007 debug_dma_free_coherent(dev, size, caddr, daddr);
1008 ops->free_coherent(dev, size, caddr, daddr);
1009 }
1010 @@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
1011
1012 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
1013 {
1014 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1015 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1016 return ops->mapping_error(dev, daddr);
1017 }
1018
1019 static inline int dma_supported(struct device *dev, u64 mask)
1020 {
1021 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1022 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1023 return ops->dma_supported(dev, mask);
1024 }
1025
1026 diff -urNp linux-2.6.32.42/arch/ia64/include/asm/elf.h linux-2.6.32.42/arch/ia64/include/asm/elf.h
1027 --- linux-2.6.32.42/arch/ia64/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1028 +++ linux-2.6.32.42/arch/ia64/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1029 @@ -43,6 +43,13 @@
1030 */
1031 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1032
1033 +#ifdef CONFIG_PAX_ASLR
1034 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1035 +
1036 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1037 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1038 +#endif
1039 +
1040 #define PT_IA_64_UNWIND 0x70000001
1041
1042 /* IA-64 relocations: */
1043 diff -urNp linux-2.6.32.42/arch/ia64/include/asm/machvec.h linux-2.6.32.42/arch/ia64/include/asm/machvec.h
1044 --- linux-2.6.32.42/arch/ia64/include/asm/machvec.h 2011-03-27 14:31:47.000000000 -0400
1045 +++ linux-2.6.32.42/arch/ia64/include/asm/machvec.h 2011-04-17 15:56:45.000000000 -0400
1046 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
1047 /* DMA-mapping interface: */
1048 typedef void ia64_mv_dma_init (void);
1049 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1050 -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1051 +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1052
1053 /*
1054 * WARNING: The legacy I/O space is _architected_. Platforms are
1055 @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
1056 # endif /* CONFIG_IA64_GENERIC */
1057
1058 extern void swiotlb_dma_init(void);
1059 -extern struct dma_map_ops *dma_get_ops(struct device *);
1060 +extern const struct dma_map_ops *dma_get_ops(struct device *);
1061
1062 /*
1063 * Define default versions so we can extend machvec for new platforms without having
1064 diff -urNp linux-2.6.32.42/arch/ia64/include/asm/pgtable.h linux-2.6.32.42/arch/ia64/include/asm/pgtable.h
1065 --- linux-2.6.32.42/arch/ia64/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1066 +++ linux-2.6.32.42/arch/ia64/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1067 @@ -12,7 +12,7 @@
1068 * David Mosberger-Tang <davidm@hpl.hp.com>
1069 */
1070
1071 -
1072 +#include <linux/const.h>
1073 #include <asm/mman.h>
1074 #include <asm/page.h>
1075 #include <asm/processor.h>
1076 @@ -143,6 +143,17 @@
1077 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1078 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1079 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1080 +
1081 +#ifdef CONFIG_PAX_PAGEEXEC
1082 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1083 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1084 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1085 +#else
1086 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1087 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1088 +# define PAGE_COPY_NOEXEC PAGE_COPY
1089 +#endif
1090 +
1091 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1092 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1093 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1094 diff -urNp linux-2.6.32.42/arch/ia64/include/asm/spinlock.h linux-2.6.32.42/arch/ia64/include/asm/spinlock.h
1095 --- linux-2.6.32.42/arch/ia64/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
1096 +++ linux-2.6.32.42/arch/ia64/include/asm/spinlock.h 2011-04-17 15:56:45.000000000 -0400
1097 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
1098 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1099
1100 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1101 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1102 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1103 }
1104
1105 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1106 diff -urNp linux-2.6.32.42/arch/ia64/include/asm/uaccess.h linux-2.6.32.42/arch/ia64/include/asm/uaccess.h
1107 --- linux-2.6.32.42/arch/ia64/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
1108 +++ linux-2.6.32.42/arch/ia64/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
1109 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
1110 const void *__cu_from = (from); \
1111 long __cu_len = (n); \
1112 \
1113 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1114 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1115 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1116 __cu_len; \
1117 })
1118 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
1119 long __cu_len = (n); \
1120 \
1121 __chk_user_ptr(__cu_from); \
1122 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1123 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1124 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1125 __cu_len; \
1126 })
1127 diff -urNp linux-2.6.32.42/arch/ia64/kernel/dma-mapping.c linux-2.6.32.42/arch/ia64/kernel/dma-mapping.c
1128 --- linux-2.6.32.42/arch/ia64/kernel/dma-mapping.c 2011-03-27 14:31:47.000000000 -0400
1129 +++ linux-2.6.32.42/arch/ia64/kernel/dma-mapping.c 2011-04-17 15:56:45.000000000 -0400
1130 @@ -3,7 +3,7 @@
1131 /* Set this to 1 if there is a HW IOMMU in the system */
1132 int iommu_detected __read_mostly;
1133
1134 -struct dma_map_ops *dma_ops;
1135 +const struct dma_map_ops *dma_ops;
1136 EXPORT_SYMBOL(dma_ops);
1137
1138 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1139 @@ -16,7 +16,7 @@ static int __init dma_init(void)
1140 }
1141 fs_initcall(dma_init);
1142
1143 -struct dma_map_ops *dma_get_ops(struct device *dev)
1144 +const struct dma_map_ops *dma_get_ops(struct device *dev)
1145 {
1146 return dma_ops;
1147 }
1148 diff -urNp linux-2.6.32.42/arch/ia64/kernel/module.c linux-2.6.32.42/arch/ia64/kernel/module.c
1149 --- linux-2.6.32.42/arch/ia64/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1150 +++ linux-2.6.32.42/arch/ia64/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1151 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1152 void
1153 module_free (struct module *mod, void *module_region)
1154 {
1155 - if (mod && mod->arch.init_unw_table &&
1156 - module_region == mod->module_init) {
1157 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1158 unw_remove_unwind_table(mod->arch.init_unw_table);
1159 mod->arch.init_unw_table = NULL;
1160 }
1161 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
1162 }
1163
1164 static inline int
1165 +in_init_rx (const struct module *mod, uint64_t addr)
1166 +{
1167 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1168 +}
1169 +
1170 +static inline int
1171 +in_init_rw (const struct module *mod, uint64_t addr)
1172 +{
1173 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1174 +}
1175 +
1176 +static inline int
1177 in_init (const struct module *mod, uint64_t addr)
1178 {
1179 - return addr - (uint64_t) mod->module_init < mod->init_size;
1180 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1181 +}
1182 +
1183 +static inline int
1184 +in_core_rx (const struct module *mod, uint64_t addr)
1185 +{
1186 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1187 +}
1188 +
1189 +static inline int
1190 +in_core_rw (const struct module *mod, uint64_t addr)
1191 +{
1192 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1193 }
1194
1195 static inline int
1196 in_core (const struct module *mod, uint64_t addr)
1197 {
1198 - return addr - (uint64_t) mod->module_core < mod->core_size;
1199 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1200 }
1201
1202 static inline int
1203 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
1204 break;
1205
1206 case RV_BDREL:
1207 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1208 + if (in_init_rx(mod, val))
1209 + val -= (uint64_t) mod->module_init_rx;
1210 + else if (in_init_rw(mod, val))
1211 + val -= (uint64_t) mod->module_init_rw;
1212 + else if (in_core_rx(mod, val))
1213 + val -= (uint64_t) mod->module_core_rx;
1214 + else if (in_core_rw(mod, val))
1215 + val -= (uint64_t) mod->module_core_rw;
1216 break;
1217
1218 case RV_LTV:
1219 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
1220 * addresses have been selected...
1221 */
1222 uint64_t gp;
1223 - if (mod->core_size > MAX_LTOFF)
1224 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1225 /*
1226 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1227 * at the end of the module.
1228 */
1229 - gp = mod->core_size - MAX_LTOFF / 2;
1230 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1231 else
1232 - gp = mod->core_size / 2;
1233 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1234 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1235 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1236 mod->arch.gp = gp;
1237 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1238 }
1239 diff -urNp linux-2.6.32.42/arch/ia64/kernel/pci-dma.c linux-2.6.32.42/arch/ia64/kernel/pci-dma.c
1240 --- linux-2.6.32.42/arch/ia64/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
1241 +++ linux-2.6.32.42/arch/ia64/kernel/pci-dma.c 2011-04-17 15:56:45.000000000 -0400
1242 @@ -43,7 +43,7 @@ struct device fallback_dev = {
1243 .dma_mask = &fallback_dev.coherent_dma_mask,
1244 };
1245
1246 -extern struct dma_map_ops intel_dma_ops;
1247 +extern const struct dma_map_ops intel_dma_ops;
1248
1249 static int __init pci_iommu_init(void)
1250 {
1251 @@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *d
1252 }
1253 EXPORT_SYMBOL(iommu_dma_supported);
1254
1255 +extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1256 +extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1257 +extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1258 +extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1259 +extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1260 +extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1261 +extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1262 +
1263 +static const struct dma_map_ops intel_iommu_dma_ops = {
1264 + /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1265 + .alloc_coherent = intel_alloc_coherent,
1266 + .free_coherent = intel_free_coherent,
1267 + .map_sg = intel_map_sg,
1268 + .unmap_sg = intel_unmap_sg,
1269 + .map_page = intel_map_page,
1270 + .unmap_page = intel_unmap_page,
1271 + .mapping_error = intel_mapping_error,
1272 +
1273 + .sync_single_for_cpu = machvec_dma_sync_single,
1274 + .sync_sg_for_cpu = machvec_dma_sync_sg,
1275 + .sync_single_for_device = machvec_dma_sync_single,
1276 + .sync_sg_for_device = machvec_dma_sync_sg,
1277 + .dma_supported = iommu_dma_supported,
1278 +};
1279 +
1280 void __init pci_iommu_alloc(void)
1281 {
1282 - dma_ops = &intel_dma_ops;
1283 -
1284 - dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1285 - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1286 - dma_ops->sync_single_for_device = machvec_dma_sync_single;
1287 - dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1288 - dma_ops->dma_supported = iommu_dma_supported;
1289 + dma_ops = &intel_iommu_dma_ops;
1290
1291 /*
1292 * The order of these functions is important for
1293 diff -urNp linux-2.6.32.42/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.42/arch/ia64/kernel/pci-swiotlb.c
1294 --- linux-2.6.32.42/arch/ia64/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
1295 +++ linux-2.6.32.42/arch/ia64/kernel/pci-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
1296 @@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent
1297 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1298 }
1299
1300 -struct dma_map_ops swiotlb_dma_ops = {
1301 +const struct dma_map_ops swiotlb_dma_ops = {
1302 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1303 .free_coherent = swiotlb_free_coherent,
1304 .map_page = swiotlb_map_page,
1305 diff -urNp linux-2.6.32.42/arch/ia64/kernel/sys_ia64.c linux-2.6.32.42/arch/ia64/kernel/sys_ia64.c
1306 --- linux-2.6.32.42/arch/ia64/kernel/sys_ia64.c 2011-03-27 14:31:47.000000000 -0400
1307 +++ linux-2.6.32.42/arch/ia64/kernel/sys_ia64.c 2011-04-17 15:56:45.000000000 -0400
1308 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1309 if (REGION_NUMBER(addr) == RGN_HPAGE)
1310 addr = 0;
1311 #endif
1312 +
1313 +#ifdef CONFIG_PAX_RANDMMAP
1314 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1315 + addr = mm->free_area_cache;
1316 + else
1317 +#endif
1318 +
1319 if (!addr)
1320 addr = mm->free_area_cache;
1321
1322 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1323 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1324 /* At this point: (!vma || addr < vma->vm_end). */
1325 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1326 - if (start_addr != TASK_UNMAPPED_BASE) {
1327 + if (start_addr != mm->mmap_base) {
1328 /* Start a new search --- just in case we missed some holes. */
1329 - addr = TASK_UNMAPPED_BASE;
1330 + addr = mm->mmap_base;
1331 goto full_search;
1332 }
1333 return -ENOMEM;
1334 }
1335 - if (!vma || addr + len <= vma->vm_start) {
1336 + if (check_heap_stack_gap(vma, addr, len)) {
1337 /* Remember the address where we stopped this search: */
1338 mm->free_area_cache = addr + len;
1339 return addr;
1340 diff -urNp linux-2.6.32.42/arch/ia64/kernel/topology.c linux-2.6.32.42/arch/ia64/kernel/topology.c
1341 --- linux-2.6.32.42/arch/ia64/kernel/topology.c 2011-03-27 14:31:47.000000000 -0400
1342 +++ linux-2.6.32.42/arch/ia64/kernel/topology.c 2011-04-17 15:56:45.000000000 -0400
1343 @@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject
1344 return ret;
1345 }
1346
1347 -static struct sysfs_ops cache_sysfs_ops = {
1348 +static const struct sysfs_ops cache_sysfs_ops = {
1349 .show = cache_show
1350 };
1351
1352 diff -urNp linux-2.6.32.42/arch/ia64/kernel/vmlinux.lds.S linux-2.6.32.42/arch/ia64/kernel/vmlinux.lds.S
1353 --- linux-2.6.32.42/arch/ia64/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
1354 +++ linux-2.6.32.42/arch/ia64/kernel/vmlinux.lds.S 2011-04-17 15:56:45.000000000 -0400
1355 @@ -190,7 +190,7 @@ SECTIONS
1356 /* Per-cpu data: */
1357 . = ALIGN(PERCPU_PAGE_SIZE);
1358 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1359 - __phys_per_cpu_start = __per_cpu_load;
1360 + __phys_per_cpu_start = per_cpu_load;
1361 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1362 * into percpu page size
1363 */
1364 diff -urNp linux-2.6.32.42/arch/ia64/mm/fault.c linux-2.6.32.42/arch/ia64/mm/fault.c
1365 --- linux-2.6.32.42/arch/ia64/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1366 +++ linux-2.6.32.42/arch/ia64/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1367 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1368 return pte_present(pte);
1369 }
1370
1371 +#ifdef CONFIG_PAX_PAGEEXEC
1372 +void pax_report_insns(void *pc, void *sp)
1373 +{
1374 + unsigned long i;
1375 +
1376 + printk(KERN_ERR "PAX: bytes at PC: ");
1377 + for (i = 0; i < 8; i++) {
1378 + unsigned int c;
1379 + if (get_user(c, (unsigned int *)pc+i))
1380 + printk(KERN_CONT "???????? ");
1381 + else
1382 + printk(KERN_CONT "%08x ", c);
1383 + }
1384 + printk("\n");
1385 +}
1386 +#endif
1387 +
1388 void __kprobes
1389 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1390 {
1391 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1392 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1393 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1394
1395 - if ((vma->vm_flags & mask) != mask)
1396 + if ((vma->vm_flags & mask) != mask) {
1397 +
1398 +#ifdef CONFIG_PAX_PAGEEXEC
1399 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1400 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1401 + goto bad_area;
1402 +
1403 + up_read(&mm->mmap_sem);
1404 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1405 + do_group_exit(SIGKILL);
1406 + }
1407 +#endif
1408 +
1409 goto bad_area;
1410
1411 + }
1412 +
1413 survive:
1414 /*
1415 * If for any reason at all we couldn't handle the fault, make
1416 diff -urNp linux-2.6.32.42/arch/ia64/mm/hugetlbpage.c linux-2.6.32.42/arch/ia64/mm/hugetlbpage.c
1417 --- linux-2.6.32.42/arch/ia64/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
1418 +++ linux-2.6.32.42/arch/ia64/mm/hugetlbpage.c 2011-04-17 15:56:45.000000000 -0400
1419 @@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
1420 /* At this point: (!vmm || addr < vmm->vm_end). */
1421 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1422 return -ENOMEM;
1423 - if (!vmm || (addr + len) <= vmm->vm_start)
1424 + if (check_heap_stack_gap(vmm, addr, len))
1425 return addr;
1426 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1427 }
1428 diff -urNp linux-2.6.32.42/arch/ia64/mm/init.c linux-2.6.32.42/arch/ia64/mm/init.c
1429 --- linux-2.6.32.42/arch/ia64/mm/init.c 2011-03-27 14:31:47.000000000 -0400
1430 +++ linux-2.6.32.42/arch/ia64/mm/init.c 2011-04-17 15:56:45.000000000 -0400
1431 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1432 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1433 vma->vm_end = vma->vm_start + PAGE_SIZE;
1434 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1435 +
1436 +#ifdef CONFIG_PAX_PAGEEXEC
1437 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1438 + vma->vm_flags &= ~VM_EXEC;
1439 +
1440 +#ifdef CONFIG_PAX_MPROTECT
1441 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1442 + vma->vm_flags &= ~VM_MAYEXEC;
1443 +#endif
1444 +
1445 + }
1446 +#endif
1447 +
1448 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1449 down_write(&current->mm->mmap_sem);
1450 if (insert_vm_struct(current->mm, vma)) {
1451 diff -urNp linux-2.6.32.42/arch/ia64/sn/pci/pci_dma.c linux-2.6.32.42/arch/ia64/sn/pci/pci_dma.c
1452 --- linux-2.6.32.42/arch/ia64/sn/pci/pci_dma.c 2011-03-27 14:31:47.000000000 -0400
1453 +++ linux-2.6.32.42/arch/ia64/sn/pci/pci_dma.c 2011-04-17 15:56:45.000000000 -0400
1454 @@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *
1455 return ret;
1456 }
1457
1458 -static struct dma_map_ops sn_dma_ops = {
1459 +static const struct dma_map_ops sn_dma_ops = {
1460 .alloc_coherent = sn_dma_alloc_coherent,
1461 .free_coherent = sn_dma_free_coherent,
1462 .map_page = sn_dma_map_page,
1463 diff -urNp linux-2.6.32.42/arch/m32r/lib/usercopy.c linux-2.6.32.42/arch/m32r/lib/usercopy.c
1464 --- linux-2.6.32.42/arch/m32r/lib/usercopy.c 2011-03-27 14:31:47.000000000 -0400
1465 +++ linux-2.6.32.42/arch/m32r/lib/usercopy.c 2011-04-17 15:56:45.000000000 -0400
1466 @@ -14,6 +14,9 @@
1467 unsigned long
1468 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1469 {
1470 + if ((long)n < 0)
1471 + return n;
1472 +
1473 prefetch(from);
1474 if (access_ok(VERIFY_WRITE, to, n))
1475 __copy_user(to,from,n);
1476 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1477 unsigned long
1478 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1479 {
1480 + if ((long)n < 0)
1481 + return n;
1482 +
1483 prefetchw(to);
1484 if (access_ok(VERIFY_READ, from, n))
1485 __copy_user_zeroing(to,from,n);
1486 diff -urNp linux-2.6.32.42/arch/mips/alchemy/devboards/pm.c linux-2.6.32.42/arch/mips/alchemy/devboards/pm.c
1487 --- linux-2.6.32.42/arch/mips/alchemy/devboards/pm.c 2011-03-27 14:31:47.000000000 -0400
1488 +++ linux-2.6.32.42/arch/mips/alchemy/devboards/pm.c 2011-04-17 15:56:45.000000000 -0400
1489 @@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1490
1491 }
1492
1493 -static struct platform_suspend_ops db1x_pm_ops = {
1494 +static const struct platform_suspend_ops db1x_pm_ops = {
1495 .valid = suspend_valid_only_mem,
1496 .begin = db1x_pm_begin,
1497 .enter = db1x_pm_enter,
1498 diff -urNp linux-2.6.32.42/arch/mips/include/asm/elf.h linux-2.6.32.42/arch/mips/include/asm/elf.h
1499 --- linux-2.6.32.42/arch/mips/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1500 +++ linux-2.6.32.42/arch/mips/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1501 @@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_str
1502 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1503 #endif
1504
1505 +#ifdef CONFIG_PAX_ASLR
1506 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1507 +
1508 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1509 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1510 +#endif
1511 +
1512 #endif /* _ASM_ELF_H */
1513 diff -urNp linux-2.6.32.42/arch/mips/include/asm/page.h linux-2.6.32.42/arch/mips/include/asm/page.h
1514 --- linux-2.6.32.42/arch/mips/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1515 +++ linux-2.6.32.42/arch/mips/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1516 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1517 #ifdef CONFIG_CPU_MIPS32
1518 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1519 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1520 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1521 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1522 #else
1523 typedef struct { unsigned long long pte; } pte_t;
1524 #define pte_val(x) ((x).pte)
1525 diff -urNp linux-2.6.32.42/arch/mips/include/asm/system.h linux-2.6.32.42/arch/mips/include/asm/system.h
1526 --- linux-2.6.32.42/arch/mips/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
1527 +++ linux-2.6.32.42/arch/mips/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
1528 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1529 */
1530 #define __ARCH_WANT_UNLOCKED_CTXSW
1531
1532 -extern unsigned long arch_align_stack(unsigned long sp);
1533 +#define arch_align_stack(x) ((x) & ~0xfUL)
1534
1535 #endif /* _ASM_SYSTEM_H */
1536 diff -urNp linux-2.6.32.42/arch/mips/kernel/binfmt_elfn32.c linux-2.6.32.42/arch/mips/kernel/binfmt_elfn32.c
1537 --- linux-2.6.32.42/arch/mips/kernel/binfmt_elfn32.c 2011-03-27 14:31:47.000000000 -0400
1538 +++ linux-2.6.32.42/arch/mips/kernel/binfmt_elfn32.c 2011-04-17 15:56:45.000000000 -0400
1539 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1540 #undef ELF_ET_DYN_BASE
1541 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1542
1543 +#ifdef CONFIG_PAX_ASLR
1544 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1545 +
1546 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1547 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1548 +#endif
1549 +
1550 #include <asm/processor.h>
1551 #include <linux/module.h>
1552 #include <linux/elfcore.h>
1553 diff -urNp linux-2.6.32.42/arch/mips/kernel/binfmt_elfo32.c linux-2.6.32.42/arch/mips/kernel/binfmt_elfo32.c
1554 --- linux-2.6.32.42/arch/mips/kernel/binfmt_elfo32.c 2011-03-27 14:31:47.000000000 -0400
1555 +++ linux-2.6.32.42/arch/mips/kernel/binfmt_elfo32.c 2011-04-17 15:56:45.000000000 -0400
1556 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1557 #undef ELF_ET_DYN_BASE
1558 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1559
1560 +#ifdef CONFIG_PAX_ASLR
1561 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1562 +
1563 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1564 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1565 +#endif
1566 +
1567 #include <asm/processor.h>
1568
1569 /*
1570 diff -urNp linux-2.6.32.42/arch/mips/kernel/kgdb.c linux-2.6.32.42/arch/mips/kernel/kgdb.c
1571 --- linux-2.6.32.42/arch/mips/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
1572 +++ linux-2.6.32.42/arch/mips/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
1573 @@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vecto
1574 return -1;
1575 }
1576
1577 +/* cannot be const */
1578 struct kgdb_arch arch_kgdb_ops;
1579
1580 /*
1581 diff -urNp linux-2.6.32.42/arch/mips/kernel/process.c linux-2.6.32.42/arch/mips/kernel/process.c
1582 --- linux-2.6.32.42/arch/mips/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
1583 +++ linux-2.6.32.42/arch/mips/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
1584 @@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_stru
1585 out:
1586 return pc;
1587 }
1588 -
1589 -/*
1590 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1591 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1592 - */
1593 -unsigned long arch_align_stack(unsigned long sp)
1594 -{
1595 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1596 - sp -= get_random_int() & ~PAGE_MASK;
1597 -
1598 - return sp & ALMASK;
1599 -}
1600 diff -urNp linux-2.6.32.42/arch/mips/kernel/syscall.c linux-2.6.32.42/arch/mips/kernel/syscall.c
1601 --- linux-2.6.32.42/arch/mips/kernel/syscall.c 2011-03-27 14:31:47.000000000 -0400
1602 +++ linux-2.6.32.42/arch/mips/kernel/syscall.c 2011-04-17 15:56:45.000000000 -0400
1603 @@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
1604 do_color_align = 0;
1605 if (filp || (flags & MAP_SHARED))
1606 do_color_align = 1;
1607 +
1608 +#ifdef CONFIG_PAX_RANDMMAP
1609 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1610 +#endif
1611 +
1612 if (addr) {
1613 if (do_color_align)
1614 addr = COLOUR_ALIGN(addr, pgoff);
1615 else
1616 addr = PAGE_ALIGN(addr);
1617 vmm = find_vma(current->mm, addr);
1618 - if (task_size - len >= addr &&
1619 - (!vmm || addr + len <= vmm->vm_start))
1620 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1621 return addr;
1622 }
1623 - addr = TASK_UNMAPPED_BASE;
1624 + addr = current->mm->mmap_base;
1625 if (do_color_align)
1626 addr = COLOUR_ALIGN(addr, pgoff);
1627 else
1628 @@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
1629 /* At this point: (!vmm || addr < vmm->vm_end). */
1630 if (task_size - len < addr)
1631 return -ENOMEM;
1632 - if (!vmm || addr + len <= vmm->vm_start)
1633 + if (check_heap_stack_gap(vmm, addr, len))
1634 return addr;
1635 addr = vmm->vm_end;
1636 if (do_color_align)
1637 diff -urNp linux-2.6.32.42/arch/mips/mm/fault.c linux-2.6.32.42/arch/mips/mm/fault.c
1638 --- linux-2.6.32.42/arch/mips/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1639 +++ linux-2.6.32.42/arch/mips/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1640 @@ -26,6 +26,23 @@
1641 #include <asm/ptrace.h>
1642 #include <asm/highmem.h> /* For VMALLOC_END */
1643
1644 +#ifdef CONFIG_PAX_PAGEEXEC
1645 +void pax_report_insns(void *pc, void *sp)
1646 +{
1647 + unsigned long i;
1648 +
1649 + printk(KERN_ERR "PAX: bytes at PC: ");
1650 + for (i = 0; i < 5; i++) {
1651 + unsigned int c;
1652 + if (get_user(c, (unsigned int *)pc+i))
1653 + printk(KERN_CONT "???????? ");
1654 + else
1655 + printk(KERN_CONT "%08x ", c);
1656 + }
1657 + printk("\n");
1658 +}
1659 +#endif
1660 +
1661 /*
1662 * This routine handles page faults. It determines the address,
1663 * and the problem, and then passes it off to one of the appropriate
1664 diff -urNp linux-2.6.32.42/arch/parisc/include/asm/elf.h linux-2.6.32.42/arch/parisc/include/asm/elf.h
1665 --- linux-2.6.32.42/arch/parisc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1666 +++ linux-2.6.32.42/arch/parisc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1667 @@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
1668
1669 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1670
1671 +#ifdef CONFIG_PAX_ASLR
1672 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1673 +
1674 +#define PAX_DELTA_MMAP_LEN 16
1675 +#define PAX_DELTA_STACK_LEN 16
1676 +#endif
1677 +
1678 /* This yields a mask that user programs can use to figure out what
1679 instruction set this CPU supports. This could be done in user space,
1680 but it's not easy, and we've already done it here. */
1681 diff -urNp linux-2.6.32.42/arch/parisc/include/asm/pgtable.h linux-2.6.32.42/arch/parisc/include/asm/pgtable.h
1682 --- linux-2.6.32.42/arch/parisc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1683 +++ linux-2.6.32.42/arch/parisc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1684 @@ -207,6 +207,17 @@
1685 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1686 #define PAGE_COPY PAGE_EXECREAD
1687 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1688 +
1689 +#ifdef CONFIG_PAX_PAGEEXEC
1690 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1691 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1692 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1693 +#else
1694 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1695 +# define PAGE_COPY_NOEXEC PAGE_COPY
1696 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1697 +#endif
1698 +
1699 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1700 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1701 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1702 diff -urNp linux-2.6.32.42/arch/parisc/kernel/module.c linux-2.6.32.42/arch/parisc/kernel/module.c
1703 --- linux-2.6.32.42/arch/parisc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1704 +++ linux-2.6.32.42/arch/parisc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1705 @@ -95,16 +95,38 @@
1706
1707 /* three functions to determine where in the module core
1708 * or init pieces the location is */
1709 +static inline int in_init_rx(struct module *me, void *loc)
1710 +{
1711 + return (loc >= me->module_init_rx &&
1712 + loc < (me->module_init_rx + me->init_size_rx));
1713 +}
1714 +
1715 +static inline int in_init_rw(struct module *me, void *loc)
1716 +{
1717 + return (loc >= me->module_init_rw &&
1718 + loc < (me->module_init_rw + me->init_size_rw));
1719 +}
1720 +
1721 static inline int in_init(struct module *me, void *loc)
1722 {
1723 - return (loc >= me->module_init &&
1724 - loc <= (me->module_init + me->init_size));
1725 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1726 +}
1727 +
1728 +static inline int in_core_rx(struct module *me, void *loc)
1729 +{
1730 + return (loc >= me->module_core_rx &&
1731 + loc < (me->module_core_rx + me->core_size_rx));
1732 +}
1733 +
1734 +static inline int in_core_rw(struct module *me, void *loc)
1735 +{
1736 + return (loc >= me->module_core_rw &&
1737 + loc < (me->module_core_rw + me->core_size_rw));
1738 }
1739
1740 static inline int in_core(struct module *me, void *loc)
1741 {
1742 - return (loc >= me->module_core &&
1743 - loc <= (me->module_core + me->core_size));
1744 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1745 }
1746
1747 static inline int in_local(struct module *me, void *loc)
1748 @@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_
1749 }
1750
1751 /* align things a bit */
1752 - me->core_size = ALIGN(me->core_size, 16);
1753 - me->arch.got_offset = me->core_size;
1754 - me->core_size += gots * sizeof(struct got_entry);
1755 -
1756 - me->core_size = ALIGN(me->core_size, 16);
1757 - me->arch.fdesc_offset = me->core_size;
1758 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1759 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1760 + me->arch.got_offset = me->core_size_rw;
1761 + me->core_size_rw += gots * sizeof(struct got_entry);
1762 +
1763 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1764 + me->arch.fdesc_offset = me->core_size_rw;
1765 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1766
1767 me->arch.got_max = gots;
1768 me->arch.fdesc_max = fdescs;
1769 @@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module
1770
1771 BUG_ON(value == 0);
1772
1773 - got = me->module_core + me->arch.got_offset;
1774 + got = me->module_core_rw + me->arch.got_offset;
1775 for (i = 0; got[i].addr; i++)
1776 if (got[i].addr == value)
1777 goto out;
1778 @@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module
1779 #ifdef CONFIG_64BIT
1780 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1781 {
1782 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1783 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1784
1785 if (!value) {
1786 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1787 @@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module
1788
1789 /* Create new one */
1790 fdesc->addr = value;
1791 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1792 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1793 return (Elf_Addr)fdesc;
1794 }
1795 #endif /* CONFIG_64BIT */
1796 @@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
1797
1798 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1799 end = table + sechdrs[me->arch.unwind_section].sh_size;
1800 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1801 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1802
1803 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1804 me->arch.unwind_section, table, end, gp);
1805 diff -urNp linux-2.6.32.42/arch/parisc/kernel/sys_parisc.c linux-2.6.32.42/arch/parisc/kernel/sys_parisc.c
1806 --- linux-2.6.32.42/arch/parisc/kernel/sys_parisc.c 2011-03-27 14:31:47.000000000 -0400
1807 +++ linux-2.6.32.42/arch/parisc/kernel/sys_parisc.c 2011-04-17 15:56:45.000000000 -0400
1808 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1809 /* At this point: (!vma || addr < vma->vm_end). */
1810 if (TASK_SIZE - len < addr)
1811 return -ENOMEM;
1812 - if (!vma || addr + len <= vma->vm_start)
1813 + if (check_heap_stack_gap(vma, addr, len))
1814 return addr;
1815 addr = vma->vm_end;
1816 }
1817 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1818 /* At this point: (!vma || addr < vma->vm_end). */
1819 if (TASK_SIZE - len < addr)
1820 return -ENOMEM;
1821 - if (!vma || addr + len <= vma->vm_start)
1822 + if (check_heap_stack_gap(vma, addr, len))
1823 return addr;
1824 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1825 if (addr < vma->vm_end) /* handle wraparound */
1826 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1827 if (flags & MAP_FIXED)
1828 return addr;
1829 if (!addr)
1830 - addr = TASK_UNMAPPED_BASE;
1831 + addr = current->mm->mmap_base;
1832
1833 if (filp) {
1834 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1835 diff -urNp linux-2.6.32.42/arch/parisc/kernel/traps.c linux-2.6.32.42/arch/parisc/kernel/traps.c
1836 --- linux-2.6.32.42/arch/parisc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
1837 +++ linux-2.6.32.42/arch/parisc/kernel/traps.c 2011-04-17 15:56:45.000000000 -0400
1838 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1839
1840 down_read(&current->mm->mmap_sem);
1841 vma = find_vma(current->mm,regs->iaoq[0]);
1842 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1843 - && (vma->vm_flags & VM_EXEC)) {
1844 -
1845 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1846 fault_address = regs->iaoq[0];
1847 fault_space = regs->iasq[0];
1848
1849 diff -urNp linux-2.6.32.42/arch/parisc/mm/fault.c linux-2.6.32.42/arch/parisc/mm/fault.c
1850 --- linux-2.6.32.42/arch/parisc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1851 +++ linux-2.6.32.42/arch/parisc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1852 @@ -15,6 +15,7 @@
1853 #include <linux/sched.h>
1854 #include <linux/interrupt.h>
1855 #include <linux/module.h>
1856 +#include <linux/unistd.h>
1857
1858 #include <asm/uaccess.h>
1859 #include <asm/traps.h>
1860 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1861 static unsigned long
1862 parisc_acctyp(unsigned long code, unsigned int inst)
1863 {
1864 - if (code == 6 || code == 16)
1865 + if (code == 6 || code == 7 || code == 16)
1866 return VM_EXEC;
1867
1868 switch (inst & 0xf0000000) {
1869 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1870 }
1871 #endif
1872
1873 +#ifdef CONFIG_PAX_PAGEEXEC
1874 +/*
1875 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1876 + *
1877 + * returns 1 when task should be killed
1878 + * 2 when rt_sigreturn trampoline was detected
1879 + * 3 when unpatched PLT trampoline was detected
1880 + */
1881 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1882 +{
1883 +
1884 +#ifdef CONFIG_PAX_EMUPLT
1885 + int err;
1886 +
1887 + do { /* PaX: unpatched PLT emulation */
1888 + unsigned int bl, depwi;
1889 +
1890 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1891 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1892 +
1893 + if (err)
1894 + break;
1895 +
1896 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1897 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1898 +
1899 + err = get_user(ldw, (unsigned int *)addr);
1900 + err |= get_user(bv, (unsigned int *)(addr+4));
1901 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1902 +
1903 + if (err)
1904 + break;
1905 +
1906 + if (ldw == 0x0E801096U &&
1907 + bv == 0xEAC0C000U &&
1908 + ldw2 == 0x0E881095U)
1909 + {
1910 + unsigned int resolver, map;
1911 +
1912 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1913 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1914 + if (err)
1915 + break;
1916 +
1917 + regs->gr[20] = instruction_pointer(regs)+8;
1918 + regs->gr[21] = map;
1919 + regs->gr[22] = resolver;
1920 + regs->iaoq[0] = resolver | 3UL;
1921 + regs->iaoq[1] = regs->iaoq[0] + 4;
1922 + return 3;
1923 + }
1924 + }
1925 + } while (0);
1926 +#endif
1927 +
1928 +#ifdef CONFIG_PAX_EMUTRAMP
1929 +
1930 +#ifndef CONFIG_PAX_EMUSIGRT
1931 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1932 + return 1;
1933 +#endif
1934 +
1935 + do { /* PaX: rt_sigreturn emulation */
1936 + unsigned int ldi1, ldi2, bel, nop;
1937 +
1938 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1939 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1940 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1941 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1942 +
1943 + if (err)
1944 + break;
1945 +
1946 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1947 + ldi2 == 0x3414015AU &&
1948 + bel == 0xE4008200U &&
1949 + nop == 0x08000240U)
1950 + {
1951 + regs->gr[25] = (ldi1 & 2) >> 1;
1952 + regs->gr[20] = __NR_rt_sigreturn;
1953 + regs->gr[31] = regs->iaoq[1] + 16;
1954 + regs->sr[0] = regs->iasq[1];
1955 + regs->iaoq[0] = 0x100UL;
1956 + regs->iaoq[1] = regs->iaoq[0] + 4;
1957 + regs->iasq[0] = regs->sr[2];
1958 + regs->iasq[1] = regs->sr[2];
1959 + return 2;
1960 + }
1961 + } while (0);
1962 +#endif
1963 +
1964 + return 1;
1965 +}
1966 +
1967 +void pax_report_insns(void *pc, void *sp)
1968 +{
1969 + unsigned long i;
1970 +
1971 + printk(KERN_ERR "PAX: bytes at PC: ");
1972 + for (i = 0; i < 5; i++) {
1973 + unsigned int c;
1974 + if (get_user(c, (unsigned int *)pc+i))
1975 + printk(KERN_CONT "???????? ");
1976 + else
1977 + printk(KERN_CONT "%08x ", c);
1978 + }
1979 + printk("\n");
1980 +}
1981 +#endif
1982 +
1983 int fixup_exception(struct pt_regs *regs)
1984 {
1985 const struct exception_table_entry *fix;
1986 @@ -192,8 +303,33 @@ good_area:
1987
1988 acc_type = parisc_acctyp(code,regs->iir);
1989
1990 - if ((vma->vm_flags & acc_type) != acc_type)
1991 + if ((vma->vm_flags & acc_type) != acc_type) {
1992 +
1993 +#ifdef CONFIG_PAX_PAGEEXEC
1994 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1995 + (address & ~3UL) == instruction_pointer(regs))
1996 + {
1997 + up_read(&mm->mmap_sem);
1998 + switch (pax_handle_fetch_fault(regs)) {
1999 +
2000 +#ifdef CONFIG_PAX_EMUPLT
2001 + case 3:
2002 + return;
2003 +#endif
2004 +
2005 +#ifdef CONFIG_PAX_EMUTRAMP
2006 + case 2:
2007 + return;
2008 +#endif
2009 +
2010 + }
2011 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2012 + do_group_exit(SIGKILL);
2013 + }
2014 +#endif
2015 +
2016 goto bad_area;
2017 + }
2018
2019 /*
2020 * If for any reason at all we couldn't handle the fault, make
2021 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/device.h linux-2.6.32.42/arch/powerpc/include/asm/device.h
2022 --- linux-2.6.32.42/arch/powerpc/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
2023 +++ linux-2.6.32.42/arch/powerpc/include/asm/device.h 2011-04-17 15:56:45.000000000 -0400
2024 @@ -14,7 +14,7 @@ struct dev_archdata {
2025 struct device_node *of_node;
2026
2027 /* DMA operations on that device */
2028 - struct dma_map_ops *dma_ops;
2029 + const struct dma_map_ops *dma_ops;
2030
2031 /*
2032 * When an iommu is in use, dma_data is used as a ptr to the base of the
2033 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/dma-mapping.h linux-2.6.32.42/arch/powerpc/include/asm/dma-mapping.h
2034 --- linux-2.6.32.42/arch/powerpc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
2035 +++ linux-2.6.32.42/arch/powerpc/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
2036 @@ -69,9 +69,9 @@ static inline unsigned long device_to_ma
2037 #ifdef CONFIG_PPC64
2038 extern struct dma_map_ops dma_iommu_ops;
2039 #endif
2040 -extern struct dma_map_ops dma_direct_ops;
2041 +extern const struct dma_map_ops dma_direct_ops;
2042
2043 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2044 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2045 {
2046 /* We don't handle the NULL dev case for ISA for now. We could
2047 * do it via an out of line call but it is not needed for now. The
2048 @@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dm
2049 return dev->archdata.dma_ops;
2050 }
2051
2052 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2053 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2054 {
2055 dev->archdata.dma_ops = ops;
2056 }
2057 @@ -118,7 +118,7 @@ static inline void set_dma_offset(struct
2058
2059 static inline int dma_supported(struct device *dev, u64 mask)
2060 {
2061 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2062 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2063
2064 if (unlikely(dma_ops == NULL))
2065 return 0;
2066 @@ -132,7 +132,7 @@ static inline int dma_supported(struct d
2067
2068 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2069 {
2070 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2071 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2072
2073 if (unlikely(dma_ops == NULL))
2074 return -EIO;
2075 @@ -147,7 +147,7 @@ static inline int dma_set_mask(struct de
2076 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2077 dma_addr_t *dma_handle, gfp_t flag)
2078 {
2079 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2080 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2081 void *cpu_addr;
2082
2083 BUG_ON(!dma_ops);
2084 @@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(s
2085 static inline void dma_free_coherent(struct device *dev, size_t size,
2086 void *cpu_addr, dma_addr_t dma_handle)
2087 {
2088 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2089 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2090
2091 BUG_ON(!dma_ops);
2092
2093 @@ -173,7 +173,7 @@ static inline void dma_free_coherent(str
2094
2095 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2096 {
2097 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2098 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2099
2100 if (dma_ops->mapping_error)
2101 return dma_ops->mapping_error(dev, dma_addr);
2102 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/elf.h linux-2.6.32.42/arch/powerpc/include/asm/elf.h
2103 --- linux-2.6.32.42/arch/powerpc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
2104 +++ linux-2.6.32.42/arch/powerpc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
2105 @@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
2106 the loader. We need to make sure that it is out of the way of the program
2107 that it will "exec", and that there is sufficient room for the brk. */
2108
2109 -extern unsigned long randomize_et_dyn(unsigned long base);
2110 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2111 +#define ELF_ET_DYN_BASE (0x20000000)
2112 +
2113 +#ifdef CONFIG_PAX_ASLR
2114 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2115 +
2116 +#ifdef __powerpc64__
2117 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2118 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2119 +#else
2120 +#define PAX_DELTA_MMAP_LEN 15
2121 +#define PAX_DELTA_STACK_LEN 15
2122 +#endif
2123 +#endif
2124
2125 /*
2126 * Our registers are always unsigned longs, whether we're a 32 bit
2127 @@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(s
2128 (0x7ff >> (PAGE_SHIFT - 12)) : \
2129 (0x3ffff >> (PAGE_SHIFT - 12)))
2130
2131 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2132 -#define arch_randomize_brk arch_randomize_brk
2133 -
2134 #endif /* __KERNEL__ */
2135
2136 /*
2137 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/iommu.h linux-2.6.32.42/arch/powerpc/include/asm/iommu.h
2138 --- linux-2.6.32.42/arch/powerpc/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
2139 +++ linux-2.6.32.42/arch/powerpc/include/asm/iommu.h 2011-04-17 15:56:45.000000000 -0400
2140 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
2141 extern void iommu_init_early_dart(void);
2142 extern void iommu_init_early_pasemi(void);
2143
2144 +/* dma-iommu.c */
2145 +extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2146 +
2147 #ifdef CONFIG_PCI
2148 extern void pci_iommu_init(void);
2149 extern void pci_direct_iommu_init(void);
2150 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/kmap_types.h linux-2.6.32.42/arch/powerpc/include/asm/kmap_types.h
2151 --- linux-2.6.32.42/arch/powerpc/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
2152 +++ linux-2.6.32.42/arch/powerpc/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
2153 @@ -26,6 +26,7 @@ enum km_type {
2154 KM_SOFTIRQ1,
2155 KM_PPC_SYNC_PAGE,
2156 KM_PPC_SYNC_ICACHE,
2157 + KM_CLEARPAGE,
2158 KM_TYPE_NR
2159 };
2160
2161 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/page_64.h linux-2.6.32.42/arch/powerpc/include/asm/page_64.h
2162 --- linux-2.6.32.42/arch/powerpc/include/asm/page_64.h 2011-03-27 14:31:47.000000000 -0400
2163 +++ linux-2.6.32.42/arch/powerpc/include/asm/page_64.h 2011-04-17 15:56:45.000000000 -0400
2164 @@ -180,15 +180,18 @@ do { \
2165 * stack by default, so in the absense of a PT_GNU_STACK program header
2166 * we turn execute permission off.
2167 */
2168 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2169 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2170 +#define VM_STACK_DEFAULT_FLAGS32 \
2171 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2172 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2173
2174 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2175 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2176
2177 +#ifndef CONFIG_PAX_PAGEEXEC
2178 #define VM_STACK_DEFAULT_FLAGS \
2179 (test_thread_flag(TIF_32BIT) ? \
2180 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2181 +#endif
2182
2183 #include <asm-generic/getorder.h>
2184
2185 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/page.h linux-2.6.32.42/arch/powerpc/include/asm/page.h
2186 --- linux-2.6.32.42/arch/powerpc/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
2187 +++ linux-2.6.32.42/arch/powerpc/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
2188 @@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2189 * and needs to be executable. This means the whole heap ends
2190 * up being executable.
2191 */
2192 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2193 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2194 +#define VM_DATA_DEFAULT_FLAGS32 \
2195 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2196 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2197
2198 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2199 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2200 @@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2201 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2202 #endif
2203
2204 +#define ktla_ktva(addr) (addr)
2205 +#define ktva_ktla(addr) (addr)
2206 +
2207 #ifndef __ASSEMBLY__
2208
2209 #undef STRICT_MM_TYPECHECKS
2210 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/pci.h linux-2.6.32.42/arch/powerpc/include/asm/pci.h
2211 --- linux-2.6.32.42/arch/powerpc/include/asm/pci.h 2011-03-27 14:31:47.000000000 -0400
2212 +++ linux-2.6.32.42/arch/powerpc/include/asm/pci.h 2011-04-17 15:56:45.000000000 -0400
2213 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
2214 }
2215
2216 #ifdef CONFIG_PCI
2217 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2218 -extern struct dma_map_ops *get_pci_dma_ops(void);
2219 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2220 +extern const struct dma_map_ops *get_pci_dma_ops(void);
2221 #else /* CONFIG_PCI */
2222 #define set_pci_dma_ops(d)
2223 #define get_pci_dma_ops() NULL
2224 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/pgtable.h linux-2.6.32.42/arch/powerpc/include/asm/pgtable.h
2225 --- linux-2.6.32.42/arch/powerpc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
2226 +++ linux-2.6.32.42/arch/powerpc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
2227 @@ -2,6 +2,7 @@
2228 #define _ASM_POWERPC_PGTABLE_H
2229 #ifdef __KERNEL__
2230
2231 +#include <linux/const.h>
2232 #ifndef __ASSEMBLY__
2233 #include <asm/processor.h> /* For TASK_SIZE */
2234 #include <asm/mmu.h>
2235 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/pte-hash32.h linux-2.6.32.42/arch/powerpc/include/asm/pte-hash32.h
2236 --- linux-2.6.32.42/arch/powerpc/include/asm/pte-hash32.h 2011-03-27 14:31:47.000000000 -0400
2237 +++ linux-2.6.32.42/arch/powerpc/include/asm/pte-hash32.h 2011-04-17 15:56:45.000000000 -0400
2238 @@ -21,6 +21,7 @@
2239 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2240 #define _PAGE_USER 0x004 /* usermode access allowed */
2241 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2242 +#define _PAGE_EXEC _PAGE_GUARDED
2243 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2244 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2245 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2246 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/reg.h linux-2.6.32.42/arch/powerpc/include/asm/reg.h
2247 --- linux-2.6.32.42/arch/powerpc/include/asm/reg.h 2011-03-27 14:31:47.000000000 -0400
2248 +++ linux-2.6.32.42/arch/powerpc/include/asm/reg.h 2011-04-17 15:56:45.000000000 -0400
2249 @@ -191,6 +191,7 @@
2250 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2251 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2252 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2253 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2254 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2255 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2256 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2257 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/swiotlb.h linux-2.6.32.42/arch/powerpc/include/asm/swiotlb.h
2258 --- linux-2.6.32.42/arch/powerpc/include/asm/swiotlb.h 2011-03-27 14:31:47.000000000 -0400
2259 +++ linux-2.6.32.42/arch/powerpc/include/asm/swiotlb.h 2011-04-17 15:56:45.000000000 -0400
2260 @@ -13,7 +13,7 @@
2261
2262 #include <linux/swiotlb.h>
2263
2264 -extern struct dma_map_ops swiotlb_dma_ops;
2265 +extern const struct dma_map_ops swiotlb_dma_ops;
2266
2267 static inline void dma_mark_clean(void *addr, size_t size) {}
2268
2269 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/system.h linux-2.6.32.42/arch/powerpc/include/asm/system.h
2270 --- linux-2.6.32.42/arch/powerpc/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
2271 +++ linux-2.6.32.42/arch/powerpc/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
2272 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
2273 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2274 #endif
2275
2276 -extern unsigned long arch_align_stack(unsigned long sp);
2277 +#define arch_align_stack(x) ((x) & ~0xfUL)
2278
2279 /* Used in very early kernel initialization. */
2280 extern unsigned long reloc_offset(void);
2281 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/uaccess.h linux-2.6.32.42/arch/powerpc/include/asm/uaccess.h
2282 --- linux-2.6.32.42/arch/powerpc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2283 +++ linux-2.6.32.42/arch/powerpc/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2284 @@ -13,6 +13,8 @@
2285 #define VERIFY_READ 0
2286 #define VERIFY_WRITE 1
2287
2288 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2289 +
2290 /*
2291 * The fs value determines whether argument validity checking should be
2292 * performed or not. If get_fs() == USER_DS, checking is performed, with
2293 @@ -327,52 +329,6 @@ do { \
2294 extern unsigned long __copy_tofrom_user(void __user *to,
2295 const void __user *from, unsigned long size);
2296
2297 -#ifndef __powerpc64__
2298 -
2299 -static inline unsigned long copy_from_user(void *to,
2300 - const void __user *from, unsigned long n)
2301 -{
2302 - unsigned long over;
2303 -
2304 - if (access_ok(VERIFY_READ, from, n))
2305 - return __copy_tofrom_user((__force void __user *)to, from, n);
2306 - if ((unsigned long)from < TASK_SIZE) {
2307 - over = (unsigned long)from + n - TASK_SIZE;
2308 - return __copy_tofrom_user((__force void __user *)to, from,
2309 - n - over) + over;
2310 - }
2311 - return n;
2312 -}
2313 -
2314 -static inline unsigned long copy_to_user(void __user *to,
2315 - const void *from, unsigned long n)
2316 -{
2317 - unsigned long over;
2318 -
2319 - if (access_ok(VERIFY_WRITE, to, n))
2320 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2321 - if ((unsigned long)to < TASK_SIZE) {
2322 - over = (unsigned long)to + n - TASK_SIZE;
2323 - return __copy_tofrom_user(to, (__force void __user *)from,
2324 - n - over) + over;
2325 - }
2326 - return n;
2327 -}
2328 -
2329 -#else /* __powerpc64__ */
2330 -
2331 -#define __copy_in_user(to, from, size) \
2332 - __copy_tofrom_user((to), (from), (size))
2333 -
2334 -extern unsigned long copy_from_user(void *to, const void __user *from,
2335 - unsigned long n);
2336 -extern unsigned long copy_to_user(void __user *to, const void *from,
2337 - unsigned long n);
2338 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2339 - unsigned long n);
2340 -
2341 -#endif /* __powerpc64__ */
2342 -
2343 static inline unsigned long __copy_from_user_inatomic(void *to,
2344 const void __user *from, unsigned long n)
2345 {
2346 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2347 if (ret == 0)
2348 return 0;
2349 }
2350 +
2351 + if (!__builtin_constant_p(n))
2352 + check_object_size(to, n, false);
2353 +
2354 return __copy_tofrom_user((__force void __user *)to, from, n);
2355 }
2356
2357 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2358 if (ret == 0)
2359 return 0;
2360 }
2361 +
2362 + if (!__builtin_constant_p(n))
2363 + check_object_size(from, n, true);
2364 +
2365 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2366 }
2367
2368 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2369 return __copy_to_user_inatomic(to, from, size);
2370 }
2371
2372 +#ifndef __powerpc64__
2373 +
2374 +static inline unsigned long __must_check copy_from_user(void *to,
2375 + const void __user *from, unsigned long n)
2376 +{
2377 + unsigned long over;
2378 +
2379 + if ((long)n < 0)
2380 + return n;
2381 +
2382 + if (access_ok(VERIFY_READ, from, n)) {
2383 + if (!__builtin_constant_p(n))
2384 + check_object_size(to, n, false);
2385 + return __copy_tofrom_user((__force void __user *)to, from, n);
2386 + }
2387 + if ((unsigned long)from < TASK_SIZE) {
2388 + over = (unsigned long)from + n - TASK_SIZE;
2389 + if (!__builtin_constant_p(n - over))
2390 + check_object_size(to, n - over, false);
2391 + return __copy_tofrom_user((__force void __user *)to, from,
2392 + n - over) + over;
2393 + }
2394 + return n;
2395 +}
2396 +
2397 +static inline unsigned long __must_check copy_to_user(void __user *to,
2398 + const void *from, unsigned long n)
2399 +{
2400 + unsigned long over;
2401 +
2402 + if ((long)n < 0)
2403 + return n;
2404 +
2405 + if (access_ok(VERIFY_WRITE, to, n)) {
2406 + if (!__builtin_constant_p(n))
2407 + check_object_size(from, n, true);
2408 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2409 + }
2410 + if ((unsigned long)to < TASK_SIZE) {
2411 + over = (unsigned long)to + n - TASK_SIZE;
2412 + if (!__builtin_constant_p(n))
2413 + check_object_size(from, n - over, true);
2414 + return __copy_tofrom_user(to, (__force void __user *)from,
2415 + n - over) + over;
2416 + }
2417 + return n;
2418 +}
2419 +
2420 +#else /* __powerpc64__ */
2421 +
2422 +#define __copy_in_user(to, from, size) \
2423 + __copy_tofrom_user((to), (from), (size))
2424 +
2425 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2426 +{
2427 + if ((long)n < 0 || n > INT_MAX)
2428 + return n;
2429 +
2430 + if (!__builtin_constant_p(n))
2431 + check_object_size(to, n, false);
2432 +
2433 + if (likely(access_ok(VERIFY_READ, from, n)))
2434 + n = __copy_from_user(to, from, n);
2435 + else
2436 + memset(to, 0, n);
2437 + return n;
2438 +}
2439 +
2440 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2441 +{
2442 + if ((long)n < 0 || n > INT_MAX)
2443 + return n;
2444 +
2445 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2446 + if (!__builtin_constant_p(n))
2447 + check_object_size(from, n, true);
2448 + n = __copy_to_user(to, from, n);
2449 + }
2450 + return n;
2451 +}
2452 +
2453 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2454 + unsigned long n);
2455 +
2456 +#endif /* __powerpc64__ */
2457 +
2458 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2459
2460 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2461 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/cacheinfo.c linux-2.6.32.42/arch/powerpc/kernel/cacheinfo.c
2462 --- linux-2.6.32.42/arch/powerpc/kernel/cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
2463 +++ linux-2.6.32.42/arch/powerpc/kernel/cacheinfo.c 2011-04-17 15:56:45.000000000 -0400
2464 @@ -642,7 +642,7 @@ static struct kobj_attribute *cache_inde
2465 &cache_assoc_attr,
2466 };
2467
2468 -static struct sysfs_ops cache_index_ops = {
2469 +static const struct sysfs_ops cache_index_ops = {
2470 .show = cache_index_show,
2471 };
2472
2473 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/dma.c linux-2.6.32.42/arch/powerpc/kernel/dma.c
2474 --- linux-2.6.32.42/arch/powerpc/kernel/dma.c 2011-03-27 14:31:47.000000000 -0400
2475 +++ linux-2.6.32.42/arch/powerpc/kernel/dma.c 2011-04-17 15:56:45.000000000 -0400
2476 @@ -134,7 +134,7 @@ static inline void dma_direct_sync_singl
2477 }
2478 #endif
2479
2480 -struct dma_map_ops dma_direct_ops = {
2481 +const struct dma_map_ops dma_direct_ops = {
2482 .alloc_coherent = dma_direct_alloc_coherent,
2483 .free_coherent = dma_direct_free_coherent,
2484 .map_sg = dma_direct_map_sg,
2485 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/dma-iommu.c linux-2.6.32.42/arch/powerpc/kernel/dma-iommu.c
2486 --- linux-2.6.32.42/arch/powerpc/kernel/dma-iommu.c 2011-03-27 14:31:47.000000000 -0400
2487 +++ linux-2.6.32.42/arch/powerpc/kernel/dma-iommu.c 2011-04-17 15:56:45.000000000 -0400
2488 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2489 }
2490
2491 /* We support DMA to/from any memory page via the iommu */
2492 -static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2493 +int dma_iommu_dma_supported(struct device *dev, u64 mask)
2494 {
2495 struct iommu_table *tbl = get_iommu_table_base(dev);
2496
2497 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.32.42/arch/powerpc/kernel/dma-swiotlb.c
2498 --- linux-2.6.32.42/arch/powerpc/kernel/dma-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
2499 +++ linux-2.6.32.42/arch/powerpc/kernel/dma-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
2500 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2501 * map_page, and unmap_page on highmem, use normal dma_ops
2502 * for everything else.
2503 */
2504 -struct dma_map_ops swiotlb_dma_ops = {
2505 +const struct dma_map_ops swiotlb_dma_ops = {
2506 .alloc_coherent = dma_direct_alloc_coherent,
2507 .free_coherent = dma_direct_free_coherent,
2508 .map_sg = swiotlb_map_sg_attrs,
2509 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/exceptions-64e.S linux-2.6.32.42/arch/powerpc/kernel/exceptions-64e.S
2510 --- linux-2.6.32.42/arch/powerpc/kernel/exceptions-64e.S 2011-03-27 14:31:47.000000000 -0400
2511 +++ linux-2.6.32.42/arch/powerpc/kernel/exceptions-64e.S 2011-04-17 15:56:45.000000000 -0400
2512 @@ -455,6 +455,7 @@ storage_fault_common:
2513 std r14,_DAR(r1)
2514 std r15,_DSISR(r1)
2515 addi r3,r1,STACK_FRAME_OVERHEAD
2516 + bl .save_nvgprs
2517 mr r4,r14
2518 mr r5,r15
2519 ld r14,PACA_EXGEN+EX_R14(r13)
2520 @@ -464,8 +465,7 @@ storage_fault_common:
2521 cmpdi r3,0
2522 bne- 1f
2523 b .ret_from_except_lite
2524 -1: bl .save_nvgprs
2525 - mr r5,r3
2526 +1: mr r5,r3
2527 addi r3,r1,STACK_FRAME_OVERHEAD
2528 ld r4,_DAR(r1)
2529 bl .bad_page_fault
2530 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/exceptions-64s.S linux-2.6.32.42/arch/powerpc/kernel/exceptions-64s.S
2531 --- linux-2.6.32.42/arch/powerpc/kernel/exceptions-64s.S 2011-03-27 14:31:47.000000000 -0400
2532 +++ linux-2.6.32.42/arch/powerpc/kernel/exceptions-64s.S 2011-04-17 15:56:45.000000000 -0400
2533 @@ -818,10 +818,10 @@ handle_page_fault:
2534 11: ld r4,_DAR(r1)
2535 ld r5,_DSISR(r1)
2536 addi r3,r1,STACK_FRAME_OVERHEAD
2537 + bl .save_nvgprs
2538 bl .do_page_fault
2539 cmpdi r3,0
2540 beq+ 13f
2541 - bl .save_nvgprs
2542 mr r5,r3
2543 addi r3,r1,STACK_FRAME_OVERHEAD
2544 lwz r4,_DAR(r1)
2545 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/ibmebus.c linux-2.6.32.42/arch/powerpc/kernel/ibmebus.c
2546 --- linux-2.6.32.42/arch/powerpc/kernel/ibmebus.c 2011-03-27 14:31:47.000000000 -0400
2547 +++ linux-2.6.32.42/arch/powerpc/kernel/ibmebus.c 2011-04-17 15:56:45.000000000 -0400
2548 @@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct
2549 return 1;
2550 }
2551
2552 -static struct dma_map_ops ibmebus_dma_ops = {
2553 +static const struct dma_map_ops ibmebus_dma_ops = {
2554 .alloc_coherent = ibmebus_alloc_coherent,
2555 .free_coherent = ibmebus_free_coherent,
2556 .map_sg = ibmebus_map_sg,
2557 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/kgdb.c linux-2.6.32.42/arch/powerpc/kernel/kgdb.c
2558 --- linux-2.6.32.42/arch/powerpc/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
2559 +++ linux-2.6.32.42/arch/powerpc/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
2560 @@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct
2561 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
2562 return 0;
2563
2564 - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2565 + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2566 regs->nip += 4;
2567
2568 return 1;
2569 @@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vecto
2570 /*
2571 * Global data
2572 */
2573 -struct kgdb_arch arch_kgdb_ops = {
2574 +const struct kgdb_arch arch_kgdb_ops = {
2575 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2576 };
2577
2578 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/module_32.c linux-2.6.32.42/arch/powerpc/kernel/module_32.c
2579 --- linux-2.6.32.42/arch/powerpc/kernel/module_32.c 2011-03-27 14:31:47.000000000 -0400
2580 +++ linux-2.6.32.42/arch/powerpc/kernel/module_32.c 2011-04-17 15:56:45.000000000 -0400
2581 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2582 me->arch.core_plt_section = i;
2583 }
2584 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2585 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2586 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2587 return -ENOEXEC;
2588 }
2589
2590 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2591
2592 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2593 /* Init, or core PLT? */
2594 - if (location >= mod->module_core
2595 - && location < mod->module_core + mod->core_size)
2596 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2597 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2598 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2599 - else
2600 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2601 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2602 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2603 + else {
2604 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2605 + return ~0UL;
2606 + }
2607
2608 /* Find this entry, or if that fails, the next avail. entry */
2609 while (entry->jump[0]) {
2610 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/module.c linux-2.6.32.42/arch/powerpc/kernel/module.c
2611 --- linux-2.6.32.42/arch/powerpc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
2612 +++ linux-2.6.32.42/arch/powerpc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
2613 @@ -31,11 +31,24 @@
2614
2615 LIST_HEAD(module_bug_list);
2616
2617 +#ifdef CONFIG_PAX_KERNEXEC
2618 void *module_alloc(unsigned long size)
2619 {
2620 if (size == 0)
2621 return NULL;
2622
2623 + return vmalloc(size);
2624 +}
2625 +
2626 +void *module_alloc_exec(unsigned long size)
2627 +#else
2628 +void *module_alloc(unsigned long size)
2629 +#endif
2630 +
2631 +{
2632 + if (size == 0)
2633 + return NULL;
2634 +
2635 return vmalloc_exec(size);
2636 }
2637
2638 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2639 vfree(module_region);
2640 }
2641
2642 +#ifdef CONFIG_PAX_KERNEXEC
2643 +void module_free_exec(struct module *mod, void *module_region)
2644 +{
2645 + module_free(mod, module_region);
2646 +}
2647 +#endif
2648 +
2649 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2650 const Elf_Shdr *sechdrs,
2651 const char *name)
2652 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/pci-common.c linux-2.6.32.42/arch/powerpc/kernel/pci-common.c
2653 --- linux-2.6.32.42/arch/powerpc/kernel/pci-common.c 2011-03-27 14:31:47.000000000 -0400
2654 +++ linux-2.6.32.42/arch/powerpc/kernel/pci-common.c 2011-04-17 15:56:45.000000000 -0400
2655 @@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
2656 unsigned int ppc_pci_flags = 0;
2657
2658
2659 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2660 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2661
2662 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2663 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2664 {
2665 pci_dma_ops = dma_ops;
2666 }
2667
2668 -struct dma_map_ops *get_pci_dma_ops(void)
2669 +const struct dma_map_ops *get_pci_dma_ops(void)
2670 {
2671 return pci_dma_ops;
2672 }
2673 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/process.c linux-2.6.32.42/arch/powerpc/kernel/process.c
2674 --- linux-2.6.32.42/arch/powerpc/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
2675 +++ linux-2.6.32.42/arch/powerpc/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
2676 @@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
2677 * Lookup NIP late so we have the best change of getting the
2678 * above info out without failing
2679 */
2680 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2681 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2682 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2683 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2684 #endif
2685 show_stack(current, (unsigned long *) regs->gpr[1]);
2686 if (!user_mode(regs))
2687 @@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk,
2688 newsp = stack[0];
2689 ip = stack[STACK_FRAME_LR_SAVE];
2690 if (!firstframe || ip != lr) {
2691 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2692 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2693 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2694 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2695 - printk(" (%pS)",
2696 + printk(" (%pA)",
2697 (void *)current->ret_stack[curr_frame].ret);
2698 curr_frame--;
2699 }
2700 @@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk,
2701 struct pt_regs *regs = (struct pt_regs *)
2702 (sp + STACK_FRAME_OVERHEAD);
2703 lr = regs->link;
2704 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2705 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2706 regs->trap, (void *)regs->nip, (void *)lr);
2707 firstframe = 1;
2708 }
2709 @@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
2710 }
2711
2712 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2713 -
2714 -unsigned long arch_align_stack(unsigned long sp)
2715 -{
2716 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2717 - sp -= get_random_int() & ~PAGE_MASK;
2718 - return sp & ~0xf;
2719 -}
2720 -
2721 -static inline unsigned long brk_rnd(void)
2722 -{
2723 - unsigned long rnd = 0;
2724 -
2725 - /* 8MB for 32bit, 1GB for 64bit */
2726 - if (is_32bit_task())
2727 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2728 - else
2729 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2730 -
2731 - return rnd << PAGE_SHIFT;
2732 -}
2733 -
2734 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2735 -{
2736 - unsigned long base = mm->brk;
2737 - unsigned long ret;
2738 -
2739 -#ifdef CONFIG_PPC_STD_MMU_64
2740 - /*
2741 - * If we are using 1TB segments and we are allowed to randomise
2742 - * the heap, we can put it above 1TB so it is backed by a 1TB
2743 - * segment. Otherwise the heap will be in the bottom 1TB
2744 - * which always uses 256MB segments and this may result in a
2745 - * performance penalty.
2746 - */
2747 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2748 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2749 -#endif
2750 -
2751 - ret = PAGE_ALIGN(base + brk_rnd());
2752 -
2753 - if (ret < mm->brk)
2754 - return mm->brk;
2755 -
2756 - return ret;
2757 -}
2758 -
2759 -unsigned long randomize_et_dyn(unsigned long base)
2760 -{
2761 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2762 -
2763 - if (ret < base)
2764 - return base;
2765 -
2766 - return ret;
2767 -}
2768 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/signal_32.c linux-2.6.32.42/arch/powerpc/kernel/signal_32.c
2769 --- linux-2.6.32.42/arch/powerpc/kernel/signal_32.c 2011-03-27 14:31:47.000000000 -0400
2770 +++ linux-2.6.32.42/arch/powerpc/kernel/signal_32.c 2011-04-17 15:56:45.000000000 -0400
2771 @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2772 /* Save user registers on the stack */
2773 frame = &rt_sf->uc.uc_mcontext;
2774 addr = frame;
2775 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2776 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2777 if (save_user_regs(regs, frame, 0, 1))
2778 goto badframe;
2779 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2780 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/signal_64.c linux-2.6.32.42/arch/powerpc/kernel/signal_64.c
2781 --- linux-2.6.32.42/arch/powerpc/kernel/signal_64.c 2011-03-27 14:31:47.000000000 -0400
2782 +++ linux-2.6.32.42/arch/powerpc/kernel/signal_64.c 2011-04-17 15:56:45.000000000 -0400
2783 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2784 current->thread.fpscr.val = 0;
2785
2786 /* Set up to return from userspace. */
2787 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2788 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2789 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2790 } else {
2791 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2792 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/sys_ppc32.c linux-2.6.32.42/arch/powerpc/kernel/sys_ppc32.c
2793 --- linux-2.6.32.42/arch/powerpc/kernel/sys_ppc32.c 2011-03-27 14:31:47.000000000 -0400
2794 +++ linux-2.6.32.42/arch/powerpc/kernel/sys_ppc32.c 2011-04-17 15:56:45.000000000 -0400
2795 @@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct
2796 if (oldlenp) {
2797 if (!error) {
2798 if (get_user(oldlen, oldlenp) ||
2799 - put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
2800 + put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
2801 + copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
2802 error = -EFAULT;
2803 }
2804 - copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
2805 }
2806 return error;
2807 }
2808 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/traps.c linux-2.6.32.42/arch/powerpc/kernel/traps.c
2809 --- linux-2.6.32.42/arch/powerpc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
2810 +++ linux-2.6.32.42/arch/powerpc/kernel/traps.c 2011-06-13 21:33:37.000000000 -0400
2811 @@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
2812 static inline void pmac_backlight_unblank(void) { }
2813 #endif
2814
2815 +extern void gr_handle_kernel_exploit(void);
2816 +
2817 int die(const char *str, struct pt_regs *regs, long err)
2818 {
2819 static struct {
2820 @@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs
2821 if (panic_on_oops)
2822 panic("Fatal exception");
2823
2824 + gr_handle_kernel_exploit();
2825 +
2826 oops_exit();
2827 do_exit(err);
2828
2829 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/vdso.c linux-2.6.32.42/arch/powerpc/kernel/vdso.c
2830 --- linux-2.6.32.42/arch/powerpc/kernel/vdso.c 2011-03-27 14:31:47.000000000 -0400
2831 +++ linux-2.6.32.42/arch/powerpc/kernel/vdso.c 2011-04-17 15:56:45.000000000 -0400
2832 @@ -36,6 +36,7 @@
2833 #include <asm/firmware.h>
2834 #include <asm/vdso.h>
2835 #include <asm/vdso_datapage.h>
2836 +#include <asm/mman.h>
2837
2838 #include "setup.h"
2839
2840 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2841 vdso_base = VDSO32_MBASE;
2842 #endif
2843
2844 - current->mm->context.vdso_base = 0;
2845 + current->mm->context.vdso_base = ~0UL;
2846
2847 /* vDSO has a problem and was disabled, just don't "enable" it for the
2848 * process
2849 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2850 vdso_base = get_unmapped_area(NULL, vdso_base,
2851 (vdso_pages << PAGE_SHIFT) +
2852 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2853 - 0, 0);
2854 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2855 if (IS_ERR_VALUE(vdso_base)) {
2856 rc = vdso_base;
2857 goto fail_mmapsem;
2858 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/vio.c linux-2.6.32.42/arch/powerpc/kernel/vio.c
2859 --- linux-2.6.32.42/arch/powerpc/kernel/vio.c 2011-03-27 14:31:47.000000000 -0400
2860 +++ linux-2.6.32.42/arch/powerpc/kernel/vio.c 2011-04-17 15:56:45.000000000 -0400
2861 @@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struc
2862 vio_cmo_dealloc(viodev, alloc_size);
2863 }
2864
2865 -struct dma_map_ops vio_dma_mapping_ops = {
2866 +static const struct dma_map_ops vio_dma_mapping_ops = {
2867 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2868 .free_coherent = vio_dma_iommu_free_coherent,
2869 .map_sg = vio_dma_iommu_map_sg,
2870 .unmap_sg = vio_dma_iommu_unmap_sg,
2871 + .dma_supported = dma_iommu_dma_supported,
2872 .map_page = vio_dma_iommu_map_page,
2873 .unmap_page = vio_dma_iommu_unmap_page,
2874
2875 @@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vi
2876
2877 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2878 {
2879 - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2880 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2881 }
2882
2883 diff -urNp linux-2.6.32.42/arch/powerpc/lib/usercopy_64.c linux-2.6.32.42/arch/powerpc/lib/usercopy_64.c
2884 --- linux-2.6.32.42/arch/powerpc/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
2885 +++ linux-2.6.32.42/arch/powerpc/lib/usercopy_64.c 2011-04-17 15:56:45.000000000 -0400
2886 @@ -9,22 +9,6 @@
2887 #include <linux/module.h>
2888 #include <asm/uaccess.h>
2889
2890 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2891 -{
2892 - if (likely(access_ok(VERIFY_READ, from, n)))
2893 - n = __copy_from_user(to, from, n);
2894 - else
2895 - memset(to, 0, n);
2896 - return n;
2897 -}
2898 -
2899 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2900 -{
2901 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2902 - n = __copy_to_user(to, from, n);
2903 - return n;
2904 -}
2905 -
2906 unsigned long copy_in_user(void __user *to, const void __user *from,
2907 unsigned long n)
2908 {
2909 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2910 return n;
2911 }
2912
2913 -EXPORT_SYMBOL(copy_from_user);
2914 -EXPORT_SYMBOL(copy_to_user);
2915 EXPORT_SYMBOL(copy_in_user);
2916
2917 diff -urNp linux-2.6.32.42/arch/powerpc/mm/fault.c linux-2.6.32.42/arch/powerpc/mm/fault.c
2918 --- linux-2.6.32.42/arch/powerpc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
2919 +++ linux-2.6.32.42/arch/powerpc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
2920 @@ -30,6 +30,10 @@
2921 #include <linux/kprobes.h>
2922 #include <linux/kdebug.h>
2923 #include <linux/perf_event.h>
2924 +#include <linux/slab.h>
2925 +#include <linux/pagemap.h>
2926 +#include <linux/compiler.h>
2927 +#include <linux/unistd.h>
2928
2929 #include <asm/firmware.h>
2930 #include <asm/page.h>
2931 @@ -40,6 +44,7 @@
2932 #include <asm/uaccess.h>
2933 #include <asm/tlbflush.h>
2934 #include <asm/siginfo.h>
2935 +#include <asm/ptrace.h>
2936
2937
2938 #ifdef CONFIG_KPROBES
2939 @@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
2940 }
2941 #endif
2942
2943 +#ifdef CONFIG_PAX_PAGEEXEC
2944 +/*
2945 + * PaX: decide what to do with offenders (regs->nip = fault address)
2946 + *
2947 + * returns 1 when task should be killed
2948 + */
2949 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2950 +{
2951 + return 1;
2952 +}
2953 +
2954 +void pax_report_insns(void *pc, void *sp)
2955 +{
2956 + unsigned long i;
2957 +
2958 + printk(KERN_ERR "PAX: bytes at PC: ");
2959 + for (i = 0; i < 5; i++) {
2960 + unsigned int c;
2961 + if (get_user(c, (unsigned int __user *)pc+i))
2962 + printk(KERN_CONT "???????? ");
2963 + else
2964 + printk(KERN_CONT "%08x ", c);
2965 + }
2966 + printk("\n");
2967 +}
2968 +#endif
2969 +
2970 /*
2971 * Check whether the instruction at regs->nip is a store using
2972 * an update addressing form which will update r1.
2973 @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
2974 * indicate errors in DSISR but can validly be set in SRR1.
2975 */
2976 if (trap == 0x400)
2977 - error_code &= 0x48200000;
2978 + error_code &= 0x58200000;
2979 else
2980 is_write = error_code & DSISR_ISSTORE;
2981 #else
2982 @@ -250,7 +282,7 @@ good_area:
2983 * "undefined". Of those that can be set, this is the only
2984 * one which seems bad.
2985 */
2986 - if (error_code & 0x10000000)
2987 + if (error_code & DSISR_GUARDED)
2988 /* Guarded storage error. */
2989 goto bad_area;
2990 #endif /* CONFIG_8xx */
2991 @@ -265,7 +297,7 @@ good_area:
2992 * processors use the same I/D cache coherency mechanism
2993 * as embedded.
2994 */
2995 - if (error_code & DSISR_PROTFAULT)
2996 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2997 goto bad_area;
2998 #endif /* CONFIG_PPC_STD_MMU */
2999
3000 @@ -335,6 +367,23 @@ bad_area:
3001 bad_area_nosemaphore:
3002 /* User mode accesses cause a SIGSEGV */
3003 if (user_mode(regs)) {
3004 +
3005 +#ifdef CONFIG_PAX_PAGEEXEC
3006 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3007 +#ifdef CONFIG_PPC_STD_MMU
3008 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3009 +#else
3010 + if (is_exec && regs->nip == address) {
3011 +#endif
3012 + switch (pax_handle_fetch_fault(regs)) {
3013 + }
3014 +
3015 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3016 + do_group_exit(SIGKILL);
3017 + }
3018 + }
3019 +#endif
3020 +
3021 _exception(SIGSEGV, regs, code, address);
3022 return 0;
3023 }
3024 diff -urNp linux-2.6.32.42/arch/powerpc/mm/mmap_64.c linux-2.6.32.42/arch/powerpc/mm/mmap_64.c
3025 --- linux-2.6.32.42/arch/powerpc/mm/mmap_64.c 2011-03-27 14:31:47.000000000 -0400
3026 +++ linux-2.6.32.42/arch/powerpc/mm/mmap_64.c 2011-04-17 15:56:45.000000000 -0400
3027 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
3028 */
3029 if (mmap_is_legacy()) {
3030 mm->mmap_base = TASK_UNMAPPED_BASE;
3031 +
3032 +#ifdef CONFIG_PAX_RANDMMAP
3033 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3034 + mm->mmap_base += mm->delta_mmap;
3035 +#endif
3036 +
3037 mm->get_unmapped_area = arch_get_unmapped_area;
3038 mm->unmap_area = arch_unmap_area;
3039 } else {
3040 mm->mmap_base = mmap_base();
3041 +
3042 +#ifdef CONFIG_PAX_RANDMMAP
3043 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3044 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3045 +#endif
3046 +
3047 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3048 mm->unmap_area = arch_unmap_area_topdown;
3049 }
3050 diff -urNp linux-2.6.32.42/arch/powerpc/mm/slice.c linux-2.6.32.42/arch/powerpc/mm/slice.c
3051 --- linux-2.6.32.42/arch/powerpc/mm/slice.c 2011-03-27 14:31:47.000000000 -0400
3052 +++ linux-2.6.32.42/arch/powerpc/mm/slice.c 2011-04-17 15:56:45.000000000 -0400
3053 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
3054 if ((mm->task_size - len) < addr)
3055 return 0;
3056 vma = find_vma(mm, addr);
3057 - return (!vma || (addr + len) <= vma->vm_start);
3058 + return check_heap_stack_gap(vma, addr, len);
3059 }
3060
3061 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3062 @@ -256,7 +256,7 @@ full_search:
3063 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3064 continue;
3065 }
3066 - if (!vma || addr + len <= vma->vm_start) {
3067 + if (check_heap_stack_gap(vma, addr, len)) {
3068 /*
3069 * Remember the place where we stopped the search:
3070 */
3071 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
3072 }
3073 }
3074
3075 - addr = mm->mmap_base;
3076 - while (addr > len) {
3077 + if (mm->mmap_base < len)
3078 + addr = -ENOMEM;
3079 + else
3080 + addr = mm->mmap_base - len;
3081 +
3082 + while (!IS_ERR_VALUE(addr)) {
3083 /* Go down by chunk size */
3084 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3085 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
3086
3087 /* Check for hit with different page size */
3088 mask = slice_range_to_mask(addr, len);
3089 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
3090 * return with success:
3091 */
3092 vma = find_vma(mm, addr);
3093 - if (!vma || (addr + len) <= vma->vm_start) {
3094 + if (check_heap_stack_gap(vma, addr, len)) {
3095 /* remember the address as a hint for next time */
3096 if (use_cache)
3097 mm->free_area_cache = addr;
3098 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
3099 mm->cached_hole_size = vma->vm_start - addr;
3100
3101 /* try just below the current vma->vm_start */
3102 - addr = vma->vm_start;
3103 + addr = skip_heap_stack_gap(vma, len);
3104 }
3105
3106 /*
3107 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
3108 if (fixed && addr > (mm->task_size - len))
3109 return -EINVAL;
3110
3111 +#ifdef CONFIG_PAX_RANDMMAP
3112 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3113 + addr = 0;
3114 +#endif
3115 +
3116 /* If hint, make sure it matches our alignment restrictions */
3117 if (!fixed && addr) {
3118 addr = _ALIGN_UP(addr, 1ul << pshift);
3119 diff -urNp linux-2.6.32.42/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.32.42/arch/powerpc/platforms/52xx/lite5200_pm.c
3120 --- linux-2.6.32.42/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-03-27 14:31:47.000000000 -0400
3121 +++ linux-2.6.32.42/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-04-17 15:56:45.000000000 -0400
3122 @@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3123 lite5200_pm_target_state = PM_SUSPEND_ON;
3124 }
3125
3126 -static struct platform_suspend_ops lite5200_pm_ops = {
3127 +static const struct platform_suspend_ops lite5200_pm_ops = {
3128 .valid = lite5200_pm_valid,
3129 .begin = lite5200_pm_begin,
3130 .prepare = lite5200_pm_prepare,
3131 diff -urNp linux-2.6.32.42/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.32.42/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3132 --- linux-2.6.32.42/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-03-27 14:31:47.000000000 -0400
3133 +++ linux-2.6.32.42/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-04-17 15:56:45.000000000 -0400
3134 @@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3135 iounmap(mbar);
3136 }
3137
3138 -static struct platform_suspend_ops mpc52xx_pm_ops = {
3139 +static const struct platform_suspend_ops mpc52xx_pm_ops = {
3140 .valid = mpc52xx_pm_valid,
3141 .prepare = mpc52xx_pm_prepare,
3142 .enter = mpc52xx_pm_enter,
3143 diff -urNp linux-2.6.32.42/arch/powerpc/platforms/83xx/suspend.c linux-2.6.32.42/arch/powerpc/platforms/83xx/suspend.c
3144 --- linux-2.6.32.42/arch/powerpc/platforms/83xx/suspend.c 2011-03-27 14:31:47.000000000 -0400
3145 +++ linux-2.6.32.42/arch/powerpc/platforms/83xx/suspend.c 2011-04-17 15:56:45.000000000 -0400
3146 @@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3147 return ret;
3148 }
3149
3150 -static struct platform_suspend_ops mpc83xx_suspend_ops = {
3151 +static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3152 .valid = mpc83xx_suspend_valid,
3153 .begin = mpc83xx_suspend_begin,
3154 .enter = mpc83xx_suspend_enter,
3155 diff -urNp linux-2.6.32.42/arch/powerpc/platforms/cell/iommu.c linux-2.6.32.42/arch/powerpc/platforms/cell/iommu.c
3156 --- linux-2.6.32.42/arch/powerpc/platforms/cell/iommu.c 2011-03-27 14:31:47.000000000 -0400
3157 +++ linux-2.6.32.42/arch/powerpc/platforms/cell/iommu.c 2011-04-17 15:56:45.000000000 -0400
3158 @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
3159
3160 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3161
3162 -struct dma_map_ops dma_iommu_fixed_ops = {
3163 +const struct dma_map_ops dma_iommu_fixed_ops = {
3164 .alloc_coherent = dma_fixed_alloc_coherent,
3165 .free_coherent = dma_fixed_free_coherent,
3166 .map_sg = dma_fixed_map_sg,
3167 diff -urNp linux-2.6.32.42/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.32.42/arch/powerpc/platforms/ps3/system-bus.c
3168 --- linux-2.6.32.42/arch/powerpc/platforms/ps3/system-bus.c 2011-03-27 14:31:47.000000000 -0400
3169 +++ linux-2.6.32.42/arch/powerpc/platforms/ps3/system-bus.c 2011-04-17 15:56:45.000000000 -0400
3170 @@ -694,7 +694,7 @@ static int ps3_dma_supported(struct devi
3171 return mask >= DMA_BIT_MASK(32);
3172 }
3173
3174 -static struct dma_map_ops ps3_sb_dma_ops = {
3175 +static const struct dma_map_ops ps3_sb_dma_ops = {
3176 .alloc_coherent = ps3_alloc_coherent,
3177 .free_coherent = ps3_free_coherent,
3178 .map_sg = ps3_sb_map_sg,
3179 @@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops
3180 .unmap_page = ps3_unmap_page,
3181 };
3182
3183 -static struct dma_map_ops ps3_ioc0_dma_ops = {
3184 +static const struct dma_map_ops ps3_ioc0_dma_ops = {
3185 .alloc_coherent = ps3_alloc_coherent,
3186 .free_coherent = ps3_free_coherent,
3187 .map_sg = ps3_ioc0_map_sg,
3188 diff -urNp linux-2.6.32.42/arch/powerpc/platforms/pseries/Kconfig linux-2.6.32.42/arch/powerpc/platforms/pseries/Kconfig
3189 --- linux-2.6.32.42/arch/powerpc/platforms/pseries/Kconfig 2011-03-27 14:31:47.000000000 -0400
3190 +++ linux-2.6.32.42/arch/powerpc/platforms/pseries/Kconfig 2011-04-17 15:56:45.000000000 -0400
3191 @@ -2,6 +2,8 @@ config PPC_PSERIES
3192 depends on PPC64 && PPC_BOOK3S
3193 bool "IBM pSeries & new (POWER5-based) iSeries"
3194 select MPIC
3195 + select PCI_MSI
3196 + select XICS
3197 select PPC_I8259
3198 select PPC_RTAS
3199 select RTAS_ERROR_LOGGING
3200 diff -urNp linux-2.6.32.42/arch/s390/include/asm/elf.h linux-2.6.32.42/arch/s390/include/asm/elf.h
3201 --- linux-2.6.32.42/arch/s390/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3202 +++ linux-2.6.32.42/arch/s390/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
3203 @@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3204 that it will "exec", and that there is sufficient room for the brk. */
3205 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3206
3207 +#ifdef CONFIG_PAX_ASLR
3208 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3209 +
3210 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3211 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3212 +#endif
3213 +
3214 /* This yields a mask that user programs can use to figure out what
3215 instruction set this CPU supports. */
3216
3217 diff -urNp linux-2.6.32.42/arch/s390/include/asm/setup.h linux-2.6.32.42/arch/s390/include/asm/setup.h
3218 --- linux-2.6.32.42/arch/s390/include/asm/setup.h 2011-03-27 14:31:47.000000000 -0400
3219 +++ linux-2.6.32.42/arch/s390/include/asm/setup.h 2011-04-17 15:56:45.000000000 -0400
3220 @@ -50,13 +50,13 @@ extern unsigned long memory_end;
3221 void detect_memory_layout(struct mem_chunk chunk[]);
3222
3223 #ifdef CONFIG_S390_SWITCH_AMODE
3224 -extern unsigned int switch_amode;
3225 +#define switch_amode (1)
3226 #else
3227 #define switch_amode (0)
3228 #endif
3229
3230 #ifdef CONFIG_S390_EXEC_PROTECT
3231 -extern unsigned int s390_noexec;
3232 +#define s390_noexec (1)
3233 #else
3234 #define s390_noexec (0)
3235 #endif
3236 diff -urNp linux-2.6.32.42/arch/s390/include/asm/uaccess.h linux-2.6.32.42/arch/s390/include/asm/uaccess.h
3237 --- linux-2.6.32.42/arch/s390/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3238 +++ linux-2.6.32.42/arch/s390/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
3239 @@ -232,6 +232,10 @@ static inline unsigned long __must_check
3240 copy_to_user(void __user *to, const void *from, unsigned long n)
3241 {
3242 might_fault();
3243 +
3244 + if ((long)n < 0)
3245 + return n;
3246 +
3247 if (access_ok(VERIFY_WRITE, to, n))
3248 n = __copy_to_user(to, from, n);
3249 return n;
3250 @@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
3251 static inline unsigned long __must_check
3252 __copy_from_user(void *to, const void __user *from, unsigned long n)
3253 {
3254 + if ((long)n < 0)
3255 + return n;
3256 +
3257 if (__builtin_constant_p(n) && (n <= 256))
3258 return uaccess.copy_from_user_small(n, from, to);
3259 else
3260 @@ -283,6 +290,10 @@ static inline unsigned long __must_check
3261 copy_from_user(void *to, const void __user *from, unsigned long n)
3262 {
3263 might_fault();
3264 +
3265 + if ((long)n < 0)
3266 + return n;
3267 +
3268 if (access_ok(VERIFY_READ, from, n))
3269 n = __copy_from_user(to, from, n);
3270 else
3271 diff -urNp linux-2.6.32.42/arch/s390/Kconfig linux-2.6.32.42/arch/s390/Kconfig
3272 --- linux-2.6.32.42/arch/s390/Kconfig 2011-03-27 14:31:47.000000000 -0400
3273 +++ linux-2.6.32.42/arch/s390/Kconfig 2011-04-17 15:56:45.000000000 -0400
3274 @@ -194,28 +194,26 @@ config AUDIT_ARCH
3275
3276 config S390_SWITCH_AMODE
3277 bool "Switch kernel/user addressing modes"
3278 + default y
3279 help
3280 This option allows to switch the addressing modes of kernel and user
3281 - space. The kernel parameter switch_amode=on will enable this feature,
3282 - default is disabled. Enabling this (via kernel parameter) on machines
3283 - earlier than IBM System z9-109 EC/BC will reduce system performance.
3284 + space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3285 + will reduce system performance.
3286
3287 Note that this option will also be selected by selecting the execute
3288 - protection option below. Enabling the execute protection via the
3289 - noexec kernel parameter will also switch the addressing modes,
3290 - independent of the switch_amode kernel parameter.
3291 + protection option below. Enabling the execute protection will also
3292 + switch the addressing modes, independent of this option.
3293
3294
3295 config S390_EXEC_PROTECT
3296 bool "Data execute protection"
3297 + default y
3298 select S390_SWITCH_AMODE
3299 help
3300 This option allows to enable a buffer overflow protection for user
3301 space programs and it also selects the addressing mode option above.
3302 - The kernel parameter noexec=on will enable this feature and also
3303 - switch the addressing modes, default is disabled. Enabling this (via
3304 - kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3305 - will reduce system performance.
3306 + Enabling this on machines earlier than IBM System z9-109 EC/BC will
3307 + reduce system performance.
3308
3309 comment "Code generation options"
3310
3311 diff -urNp linux-2.6.32.42/arch/s390/kernel/module.c linux-2.6.32.42/arch/s390/kernel/module.c
3312 --- linux-2.6.32.42/arch/s390/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
3313 +++ linux-2.6.32.42/arch/s390/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
3314 @@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3315
3316 /* Increase core size by size of got & plt and set start
3317 offsets for got and plt. */
3318 - me->core_size = ALIGN(me->core_size, 4);
3319 - me->arch.got_offset = me->core_size;
3320 - me->core_size += me->arch.got_size;
3321 - me->arch.plt_offset = me->core_size;
3322 - me->core_size += me->arch.plt_size;
3323 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3324 + me->arch.got_offset = me->core_size_rw;
3325 + me->core_size_rw += me->arch.got_size;
3326 + me->arch.plt_offset = me->core_size_rx;
3327 + me->core_size_rx += me->arch.plt_size;
3328 return 0;
3329 }
3330
3331 @@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3332 if (info->got_initialized == 0) {
3333 Elf_Addr *gotent;
3334
3335 - gotent = me->module_core + me->arch.got_offset +
3336 + gotent = me->module_core_rw + me->arch.got_offset +
3337 info->got_offset;
3338 *gotent = val;
3339 info->got_initialized = 1;
3340 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3341 else if (r_type == R_390_GOTENT ||
3342 r_type == R_390_GOTPLTENT)
3343 *(unsigned int *) loc =
3344 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3345 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3346 else if (r_type == R_390_GOT64 ||
3347 r_type == R_390_GOTPLT64)
3348 *(unsigned long *) loc = val;
3349 @@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3350 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3351 if (info->plt_initialized == 0) {
3352 unsigned int *ip;
3353 - ip = me->module_core + me->arch.plt_offset +
3354 + ip = me->module_core_rx + me->arch.plt_offset +
3355 info->plt_offset;
3356 #ifndef CONFIG_64BIT
3357 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3358 @@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3359 val - loc + 0xffffUL < 0x1ffffeUL) ||
3360 (r_type == R_390_PLT32DBL &&
3361 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3362 - val = (Elf_Addr) me->module_core +
3363 + val = (Elf_Addr) me->module_core_rx +
3364 me->arch.plt_offset +
3365 info->plt_offset;
3366 val += rela->r_addend - loc;
3367 @@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3368 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3369 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3370 val = val + rela->r_addend -
3371 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3372 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3373 if (r_type == R_390_GOTOFF16)
3374 *(unsigned short *) loc = val;
3375 else if (r_type == R_390_GOTOFF32)
3376 @@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3377 break;
3378 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3379 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3380 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3381 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3382 rela->r_addend - loc;
3383 if (r_type == R_390_GOTPC)
3384 *(unsigned int *) loc = val;
3385 diff -urNp linux-2.6.32.42/arch/s390/kernel/setup.c linux-2.6.32.42/arch/s390/kernel/setup.c
3386 --- linux-2.6.32.42/arch/s390/kernel/setup.c 2011-03-27 14:31:47.000000000 -0400
3387 +++ linux-2.6.32.42/arch/s390/kernel/setup.c 2011-04-17 15:56:45.000000000 -0400
3388 @@ -306,9 +306,6 @@ static int __init early_parse_mem(char *
3389 early_param("mem", early_parse_mem);
3390
3391 #ifdef CONFIG_S390_SWITCH_AMODE
3392 -unsigned int switch_amode = 0;
3393 -EXPORT_SYMBOL_GPL(switch_amode);
3394 -
3395 static int set_amode_and_uaccess(unsigned long user_amode,
3396 unsigned long user32_amode)
3397 {
3398 @@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigne
3399 return 0;
3400 }
3401 }
3402 -
3403 -/*
3404 - * Switch kernel/user addressing modes?
3405 - */
3406 -static int __init early_parse_switch_amode(char *p)
3407 -{
3408 - switch_amode = 1;
3409 - return 0;
3410 -}
3411 -early_param("switch_amode", early_parse_switch_amode);
3412 -
3413 #else /* CONFIG_S390_SWITCH_AMODE */
3414 static inline int set_amode_and_uaccess(unsigned long user_amode,
3415 unsigned long user32_amode)
3416 @@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(
3417 }
3418 #endif /* CONFIG_S390_SWITCH_AMODE */
3419
3420 -#ifdef CONFIG_S390_EXEC_PROTECT
3421 -unsigned int s390_noexec = 0;
3422 -EXPORT_SYMBOL_GPL(s390_noexec);
3423 -
3424 -/*
3425 - * Enable execute protection?
3426 - */
3427 -static int __init early_parse_noexec(char *p)
3428 -{
3429 - if (!strncmp(p, "off", 3))
3430 - return 0;
3431 - switch_amode = 1;
3432 - s390_noexec = 1;
3433 - return 0;
3434 -}
3435 -early_param("noexec", early_parse_noexec);
3436 -#endif /* CONFIG_S390_EXEC_PROTECT */
3437 -
3438 static void setup_addressing_mode(void)
3439 {
3440 if (s390_noexec) {
3441 diff -urNp linux-2.6.32.42/arch/s390/mm/mmap.c linux-2.6.32.42/arch/s390/mm/mmap.c
3442 --- linux-2.6.32.42/arch/s390/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3443 +++ linux-2.6.32.42/arch/s390/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
3444 @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3445 */
3446 if (mmap_is_legacy()) {
3447 mm->mmap_base = TASK_UNMAPPED_BASE;
3448 +
3449 +#ifdef CONFIG_PAX_RANDMMAP
3450 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3451 + mm->mmap_base += mm->delta_mmap;
3452 +#endif
3453 +
3454 mm->get_unmapped_area = arch_get_unmapped_area;
3455 mm->unmap_area = arch_unmap_area;
3456 } else {
3457 mm->mmap_base = mmap_base();
3458 +
3459 +#ifdef CONFIG_PAX_RANDMMAP
3460 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3461 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3462 +#endif
3463 +
3464 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3465 mm->unmap_area = arch_unmap_area_topdown;
3466 }
3467 @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3468 */
3469 if (mmap_is_legacy()) {
3470 mm->mmap_base = TASK_UNMAPPED_BASE;
3471 +
3472 +#ifdef CONFIG_PAX_RANDMMAP
3473 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3474 + mm->mmap_base += mm->delta_mmap;
3475 +#endif
3476 +
3477 mm->get_unmapped_area = s390_get_unmapped_area;
3478 mm->unmap_area = arch_unmap_area;
3479 } else {
3480 mm->mmap_base = mmap_base();
3481 +
3482 +#ifdef CONFIG_PAX_RANDMMAP
3483 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3484 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3485 +#endif
3486 +
3487 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3488 mm->unmap_area = arch_unmap_area_topdown;
3489 }
3490 diff -urNp linux-2.6.32.42/arch/score/include/asm/system.h linux-2.6.32.42/arch/score/include/asm/system.h
3491 --- linux-2.6.32.42/arch/score/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
3492 +++ linux-2.6.32.42/arch/score/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
3493 @@ -17,7 +17,7 @@ do { \
3494 #define finish_arch_switch(prev) do {} while (0)
3495
3496 typedef void (*vi_handler_t)(void);
3497 -extern unsigned long arch_align_stack(unsigned long sp);
3498 +#define arch_align_stack(x) (x)
3499
3500 #define mb() barrier()
3501 #define rmb() barrier()
3502 diff -urNp linux-2.6.32.42/arch/score/kernel/process.c linux-2.6.32.42/arch/score/kernel/process.c
3503 --- linux-2.6.32.42/arch/score/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
3504 +++ linux-2.6.32.42/arch/score/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
3505 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
3506
3507 return task_pt_regs(task)->cp0_epc;
3508 }
3509 -
3510 -unsigned long arch_align_stack(unsigned long sp)
3511 -{
3512 - return sp;
3513 -}
3514 diff -urNp linux-2.6.32.42/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.32.42/arch/sh/boards/mach-hp6xx/pm.c
3515 --- linux-2.6.32.42/arch/sh/boards/mach-hp6xx/pm.c 2011-03-27 14:31:47.000000000 -0400
3516 +++ linux-2.6.32.42/arch/sh/boards/mach-hp6xx/pm.c 2011-04-17 15:56:45.000000000 -0400
3517 @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3518 return 0;
3519 }
3520
3521 -static struct platform_suspend_ops hp6x0_pm_ops = {
3522 +static const struct platform_suspend_ops hp6x0_pm_ops = {
3523 .enter = hp6x0_pm_enter,
3524 .valid = suspend_valid_only_mem,
3525 };
3526 diff -urNp linux-2.6.32.42/arch/sh/kernel/cpu/sh4/sq.c linux-2.6.32.42/arch/sh/kernel/cpu/sh4/sq.c
3527 --- linux-2.6.32.42/arch/sh/kernel/cpu/sh4/sq.c 2011-03-27 14:31:47.000000000 -0400
3528 +++ linux-2.6.32.42/arch/sh/kernel/cpu/sh4/sq.c 2011-04-17 15:56:46.000000000 -0400
3529 @@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[
3530 NULL,
3531 };
3532
3533 -static struct sysfs_ops sq_sysfs_ops = {
3534 +static const struct sysfs_ops sq_sysfs_ops = {
3535 .show = sq_sysfs_show,
3536 .store = sq_sysfs_store,
3537 };
3538 diff -urNp linux-2.6.32.42/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.32.42/arch/sh/kernel/cpu/shmobile/pm.c
3539 --- linux-2.6.32.42/arch/sh/kernel/cpu/shmobile/pm.c 2011-03-27 14:31:47.000000000 -0400
3540 +++ linux-2.6.32.42/arch/sh/kernel/cpu/shmobile/pm.c 2011-04-17 15:56:46.000000000 -0400
3541 @@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t s
3542 return 0;
3543 }
3544
3545 -static struct platform_suspend_ops sh_pm_ops = {
3546 +static const struct platform_suspend_ops sh_pm_ops = {
3547 .enter = sh_pm_enter,
3548 .valid = suspend_valid_only_mem,
3549 };
3550 diff -urNp linux-2.6.32.42/arch/sh/kernel/kgdb.c linux-2.6.32.42/arch/sh/kernel/kgdb.c
3551 --- linux-2.6.32.42/arch/sh/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
3552 +++ linux-2.6.32.42/arch/sh/kernel/kgdb.c 2011-04-17 15:56:46.000000000 -0400
3553 @@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
3554 {
3555 }
3556
3557 -struct kgdb_arch arch_kgdb_ops = {
3558 +const struct kgdb_arch arch_kgdb_ops = {
3559 /* Breakpoint instruction: trapa #0x3c */
3560 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3561 .gdb_bpt_instr = { 0x3c, 0xc3 },
3562 diff -urNp linux-2.6.32.42/arch/sh/mm/mmap.c linux-2.6.32.42/arch/sh/mm/mmap.c
3563 --- linux-2.6.32.42/arch/sh/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3564 +++ linux-2.6.32.42/arch/sh/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
3565 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3566 addr = PAGE_ALIGN(addr);
3567
3568 vma = find_vma(mm, addr);
3569 - if (TASK_SIZE - len >= addr &&
3570 - (!vma || addr + len <= vma->vm_start))
3571 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3572 return addr;
3573 }
3574
3575 @@ -106,7 +105,7 @@ full_search:
3576 }
3577 return -ENOMEM;
3578 }
3579 - if (likely(!vma || addr + len <= vma->vm_start)) {
3580 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3581 /*
3582 * Remember the place where we stopped the search:
3583 */
3584 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3585 addr = PAGE_ALIGN(addr);
3586
3587 vma = find_vma(mm, addr);
3588 - if (TASK_SIZE - len >= addr &&
3589 - (!vma || addr + len <= vma->vm_start))
3590 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3591 return addr;
3592 }
3593
3594 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3595 /* make sure it can fit in the remaining address space */
3596 if (likely(addr > len)) {
3597 vma = find_vma(mm, addr-len);
3598 - if (!vma || addr <= vma->vm_start) {
3599 + if (check_heap_stack_gap(vma, addr - len, len)) {
3600 /* remember the address as a hint for next time */
3601 return (mm->free_area_cache = addr-len);
3602 }
3603 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
3604 if (unlikely(mm->mmap_base < len))
3605 goto bottomup;
3606
3607 - addr = mm->mmap_base-len;
3608 - if (do_colour_align)
3609 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3610 + addr = mm->mmap_base - len;
3611
3612 do {
3613 + if (do_colour_align)
3614 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3615 /*
3616 * Lookup failure means no vma is above this address,
3617 * else if new region fits below vma->vm_start,
3618 * return with success:
3619 */
3620 vma = find_vma(mm, addr);
3621 - if (likely(!vma || addr+len <= vma->vm_start)) {
3622 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3623 /* remember the address as a hint for next time */
3624 return (mm->free_area_cache = addr);
3625 }
3626 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
3627 mm->cached_hole_size = vma->vm_start - addr;
3628
3629 /* try just below the current vma->vm_start */
3630 - addr = vma->vm_start-len;
3631 - if (do_colour_align)
3632 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3633 - } while (likely(len < vma->vm_start));
3634 + addr = skip_heap_stack_gap(vma, len);
3635 + } while (!IS_ERR_VALUE(addr));
3636
3637 bottomup:
3638 /*
3639 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/atomic_64.h linux-2.6.32.42/arch/sparc/include/asm/atomic_64.h
3640 --- linux-2.6.32.42/arch/sparc/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
3641 +++ linux-2.6.32.42/arch/sparc/include/asm/atomic_64.h 2011-05-04 17:56:20.000000000 -0400
3642 @@ -14,18 +14,40 @@
3643 #define ATOMIC64_INIT(i) { (i) }
3644
3645 #define atomic_read(v) ((v)->counter)
3646 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3647 +{
3648 + return v->counter;
3649 +}
3650 #define atomic64_read(v) ((v)->counter)
3651 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3652 +{
3653 + return v->counter;
3654 +}
3655
3656 #define atomic_set(v, i) (((v)->counter) = i)
3657 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3658 +{
3659 + v->counter = i;
3660 +}
3661 #define atomic64_set(v, i) (((v)->counter) = i)
3662 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3663 +{
3664 + v->counter = i;
3665 +}
3666
3667 extern void atomic_add(int, atomic_t *);
3668 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3669 extern void atomic64_add(long, atomic64_t *);
3670 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3671 extern void atomic_sub(int, atomic_t *);
3672 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3673 extern void atomic64_sub(long, atomic64_t *);
3674 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3675
3676 extern int atomic_add_ret(int, atomic_t *);
3677 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3678 extern long atomic64_add_ret(long, atomic64_t *);
3679 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3680 extern int atomic_sub_ret(int, atomic_t *);
3681 extern long atomic64_sub_ret(long, atomic64_t *);
3682
3683 @@ -33,7 +55,15 @@ extern long atomic64_sub_ret(long, atomi
3684 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3685
3686 #define atomic_inc_return(v) atomic_add_ret(1, v)
3687 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3688 +{
3689 + return atomic_add_ret_unchecked(1, v);
3690 +}
3691 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3692 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3693 +{
3694 + return atomic64_add_ret_unchecked(1, v);
3695 +}
3696
3697 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3698 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3699 @@ -50,6 +80,7 @@ extern long atomic64_sub_ret(long, atomi
3700 * other cases.
3701 */
3702 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3703 +#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0)
3704 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3705
3706 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3707 @@ -59,30 +90,59 @@ extern long atomic64_sub_ret(long, atomi
3708 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3709
3710 #define atomic_inc(v) atomic_add(1, v)
3711 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3712 +{
3713 + atomic_add_unchecked(1, v);
3714 +}
3715 #define atomic64_inc(v) atomic64_add(1, v)
3716 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3717 +{
3718 + atomic64_add_unchecked(1, v);
3719 +}
3720
3721 #define atomic_dec(v) atomic_sub(1, v)
3722 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3723 +{
3724 + atomic_sub_unchecked(1, v);
3725 +}
3726 #define atomic64_dec(v) atomic64_sub(1, v)
3727 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3728 +{
3729 + atomic64_sub_unchecked(1, v);
3730 +}
3731
3732 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3733 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3734
3735 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3736 +#define atomic_cmpxchg_unchecked(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3737 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3738 +#define atomic_xchg_unchecked(v, new) (xchg(&((v)->counter), new))
3739
3740 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3741 {
3742 - int c, old;
3743 + int c, old, new;
3744 c = atomic_read(v);
3745 for (;;) {
3746 - if (unlikely(c == (u)))
3747 + if (unlikely(c == u))
3748 break;
3749 - old = atomic_cmpxchg((v), c, c + (a));
3750 +
3751 + asm volatile("addcc %2, %0, %0\n"
3752 +
3753 +#ifdef CONFIG_PAX_REFCOUNT
3754 + "tvs %%icc, 6\n"
3755 +#endif
3756 +
3757 + : "=r" (new)
3758 + : "0" (c), "ir" (a)
3759 + : "cc");
3760 +
3761 + old = atomic_cmpxchg(v, c, new);
3762 if (likely(old == c))
3763 break;
3764 c = old;
3765 }
3766 - return c != (u);
3767 + return c != u;
3768 }
3769
3770 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3771 @@ -93,17 +153,28 @@ static inline int atomic_add_unless(atom
3772
3773 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3774 {
3775 - long c, old;
3776 + long c, old, new;
3777 c = atomic64_read(v);
3778 for (;;) {
3779 - if (unlikely(c == (u)))
3780 + if (unlikely(c == u))
3781 break;
3782 - old = atomic64_cmpxchg((v), c, c + (a));
3783 +
3784 + asm volatile("addcc %2, %0, %0\n"
3785 +
3786 +#ifdef CONFIG_PAX_REFCOUNT
3787 + "tvs %%xcc, 6\n"
3788 +#endif
3789 +
3790 + : "=r" (new)
3791 + : "0" (c), "ir" (a)
3792 + : "cc");
3793 +
3794 + old = atomic64_cmpxchg(v, c, new);
3795 if (likely(old == c))
3796 break;
3797 c = old;
3798 }
3799 - return c != (u);
3800 + return c != u;
3801 }
3802
3803 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3804 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/cache.h linux-2.6.32.42/arch/sparc/include/asm/cache.h
3805 --- linux-2.6.32.42/arch/sparc/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
3806 +++ linux-2.6.32.42/arch/sparc/include/asm/cache.h 2011-05-17 19:26:34.000000000 -0400
3807 @@ -8,7 +8,7 @@
3808 #define _SPARC_CACHE_H
3809
3810 #define L1_CACHE_SHIFT 5
3811 -#define L1_CACHE_BYTES 32
3812 +#define L1_CACHE_BYTES 32U
3813 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
3814
3815 #ifdef CONFIG_SPARC32
3816 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/dma-mapping.h linux-2.6.32.42/arch/sparc/include/asm/dma-mapping.h
3817 --- linux-2.6.32.42/arch/sparc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
3818 +++ linux-2.6.32.42/arch/sparc/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
3819 @@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *d
3820 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3821 #define dma_is_consistent(d, h) (1)
3822
3823 -extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3824 +extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3825 extern struct bus_type pci_bus_type;
3826
3827 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3828 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3829 {
3830 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3831 if (dev->bus == &pci_bus_type)
3832 @@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dm
3833 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3834 dma_addr_t *dma_handle, gfp_t flag)
3835 {
3836 - struct dma_map_ops *ops = get_dma_ops(dev);
3837 + const struct dma_map_ops *ops = get_dma_ops(dev);
3838 void *cpu_addr;
3839
3840 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3841 @@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(s
3842 static inline void dma_free_coherent(struct device *dev, size_t size,
3843 void *cpu_addr, dma_addr_t dma_handle)
3844 {
3845 - struct dma_map_ops *ops = get_dma_ops(dev);
3846 + const struct dma_map_ops *ops = get_dma_ops(dev);
3847
3848 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3849 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3850 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/elf_32.h linux-2.6.32.42/arch/sparc/include/asm/elf_32.h
3851 --- linux-2.6.32.42/arch/sparc/include/asm/elf_32.h 2011-03-27 14:31:47.000000000 -0400
3852 +++ linux-2.6.32.42/arch/sparc/include/asm/elf_32.h 2011-04-17 15:56:46.000000000 -0400
3853 @@ -116,6 +116,13 @@ typedef struct {
3854
3855 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3856
3857 +#ifdef CONFIG_PAX_ASLR
3858 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3859 +
3860 +#define PAX_DELTA_MMAP_LEN 16
3861 +#define PAX_DELTA_STACK_LEN 16
3862 +#endif
3863 +
3864 /* This yields a mask that user programs can use to figure out what
3865 instruction set this cpu supports. This can NOT be done in userspace
3866 on Sparc. */
3867 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/elf_64.h linux-2.6.32.42/arch/sparc/include/asm/elf_64.h
3868 --- linux-2.6.32.42/arch/sparc/include/asm/elf_64.h 2011-03-27 14:31:47.000000000 -0400
3869 +++ linux-2.6.32.42/arch/sparc/include/asm/elf_64.h 2011-04-17 15:56:46.000000000 -0400
3870 @@ -163,6 +163,12 @@ typedef struct {
3871 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3872 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3873
3874 +#ifdef CONFIG_PAX_ASLR
3875 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3876 +
3877 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3878 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3879 +#endif
3880
3881 /* This yields a mask that user programs can use to figure out what
3882 instruction set this cpu supports. */
3883 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/pgtable_32.h linux-2.6.32.42/arch/sparc/include/asm/pgtable_32.h
3884 --- linux-2.6.32.42/arch/sparc/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
3885 +++ linux-2.6.32.42/arch/sparc/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
3886 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3887 BTFIXUPDEF_INT(page_none)
3888 BTFIXUPDEF_INT(page_copy)
3889 BTFIXUPDEF_INT(page_readonly)
3890 +
3891 +#ifdef CONFIG_PAX_PAGEEXEC
3892 +BTFIXUPDEF_INT(page_shared_noexec)
3893 +BTFIXUPDEF_INT(page_copy_noexec)
3894 +BTFIXUPDEF_INT(page_readonly_noexec)
3895 +#endif
3896 +
3897 BTFIXUPDEF_INT(page_kernel)
3898
3899 #define PMD_SHIFT SUN4C_PMD_SHIFT
3900 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3901 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3902 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3903
3904 +#ifdef CONFIG_PAX_PAGEEXEC
3905 +extern pgprot_t PAGE_SHARED_NOEXEC;
3906 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3907 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3908 +#else
3909 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3910 +# define PAGE_COPY_NOEXEC PAGE_COPY
3911 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3912 +#endif
3913 +
3914 extern unsigned long page_kernel;
3915
3916 #ifdef MODULE
3917 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.32.42/arch/sparc/include/asm/pgtsrmmu.h
3918 --- linux-2.6.32.42/arch/sparc/include/asm/pgtsrmmu.h 2011-03-27 14:31:47.000000000 -0400
3919 +++ linux-2.6.32.42/arch/sparc/include/asm/pgtsrmmu.h 2011-04-17 15:56:46.000000000 -0400
3920 @@ -115,6 +115,13 @@
3921 SRMMU_EXEC | SRMMU_REF)
3922 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3923 SRMMU_EXEC | SRMMU_REF)
3924 +
3925 +#ifdef CONFIG_PAX_PAGEEXEC
3926 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3927 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3928 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3929 +#endif
3930 +
3931 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3932 SRMMU_DIRTY | SRMMU_REF)
3933
3934 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/spinlock_64.h linux-2.6.32.42/arch/sparc/include/asm/spinlock_64.h
3935 --- linux-2.6.32.42/arch/sparc/include/asm/spinlock_64.h 2011-03-27 14:31:47.000000000 -0400
3936 +++ linux-2.6.32.42/arch/sparc/include/asm/spinlock_64.h 2011-05-04 17:56:20.000000000 -0400
3937 @@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags
3938
3939 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3940
3941 -static void inline arch_read_lock(raw_rwlock_t *lock)
3942 +static inline void arch_read_lock(raw_rwlock_t *lock)
3943 {
3944 unsigned long tmp1, tmp2;
3945
3946 __asm__ __volatile__ (
3947 "1: ldsw [%2], %0\n"
3948 " brlz,pn %0, 2f\n"
3949 -"4: add %0, 1, %1\n"
3950 +"4: addcc %0, 1, %1\n"
3951 +
3952 +#ifdef CONFIG_PAX_REFCOUNT
3953 +" tvs %%icc, 6\n"
3954 +#endif
3955 +
3956 " cas [%2], %0, %1\n"
3957 " cmp %0, %1\n"
3958 " bne,pn %%icc, 1b\n"
3959 @@ -112,7 +117,7 @@ static void inline arch_read_lock(raw_rw
3960 " .previous"
3961 : "=&r" (tmp1), "=&r" (tmp2)
3962 : "r" (lock)
3963 - : "memory");
3964 + : "memory", "cc");
3965 }
3966
3967 static int inline arch_read_trylock(raw_rwlock_t *lock)
3968 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_
3969 "1: ldsw [%2], %0\n"
3970 " brlz,a,pn %0, 2f\n"
3971 " mov 0, %0\n"
3972 -" add %0, 1, %1\n"
3973 +" addcc %0, 1, %1\n"
3974 +
3975 +#ifdef CONFIG_PAX_REFCOUNT
3976 +" tvs %%icc, 6\n"
3977 +#endif
3978 +
3979 " cas [%2], %0, %1\n"
3980 " cmp %0, %1\n"
3981 " bne,pn %%icc, 1b\n"
3982 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_
3983 return tmp1;
3984 }
3985
3986 -static void inline arch_read_unlock(raw_rwlock_t *lock)
3987 +static inline void arch_read_unlock(raw_rwlock_t *lock)
3988 {
3989 unsigned long tmp1, tmp2;
3990
3991 __asm__ __volatile__(
3992 "1: lduw [%2], %0\n"
3993 -" sub %0, 1, %1\n"
3994 +" subcc %0, 1, %1\n"
3995 +
3996 +#ifdef CONFIG_PAX_REFCOUNT
3997 +" tvs %%icc, 6\n"
3998 +#endif
3999 +
4000 " cas [%2], %0, %1\n"
4001 " cmp %0, %1\n"
4002 " bne,pn %%xcc, 1b\n"
4003 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_
4004 : "memory");
4005 }
4006
4007 -static void inline arch_write_lock(raw_rwlock_t *lock)
4008 +static inline void arch_write_lock(raw_rwlock_t *lock)
4009 {
4010 unsigned long mask, tmp1, tmp2;
4011
4012 @@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_r
4013 : "memory");
4014 }
4015
4016 -static void inline arch_write_unlock(raw_rwlock_t *lock)
4017 +static inline void arch_write_unlock(raw_rwlock_t *lock)
4018 {
4019 __asm__ __volatile__(
4020 " stw %%g0, [%0]"
4021 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/thread_info_32.h linux-2.6.32.42/arch/sparc/include/asm/thread_info_32.h
4022 --- linux-2.6.32.42/arch/sparc/include/asm/thread_info_32.h 2011-03-27 14:31:47.000000000 -0400
4023 +++ linux-2.6.32.42/arch/sparc/include/asm/thread_info_32.h 2011-06-04 20:46:01.000000000 -0400
4024 @@ -50,6 +50,8 @@ struct thread_info {
4025 unsigned long w_saved;
4026
4027 struct restart_block restart_block;
4028 +
4029 + unsigned long lowest_stack;
4030 };
4031
4032 /*
4033 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/thread_info_64.h linux-2.6.32.42/arch/sparc/include/asm/thread_info_64.h
4034 --- linux-2.6.32.42/arch/sparc/include/asm/thread_info_64.h 2011-03-27 14:31:47.000000000 -0400
4035 +++ linux-2.6.32.42/arch/sparc/include/asm/thread_info_64.h 2011-06-04 20:46:21.000000000 -0400
4036 @@ -68,6 +68,8 @@ struct thread_info {
4037 struct pt_regs *kern_una_regs;
4038 unsigned int kern_una_insn;
4039
4040 + unsigned long lowest_stack;
4041 +
4042 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4043 };
4044
4045 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/uaccess_32.h linux-2.6.32.42/arch/sparc/include/asm/uaccess_32.h
4046 --- linux-2.6.32.42/arch/sparc/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
4047 +++ linux-2.6.32.42/arch/sparc/include/asm/uaccess_32.h 2011-04-17 15:56:46.000000000 -0400
4048 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
4049
4050 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4051 {
4052 - if (n && __access_ok((unsigned long) to, n))
4053 + if ((long)n < 0)
4054 + return n;
4055 +
4056 + if (n && __access_ok((unsigned long) to, n)) {
4057 + if (!__builtin_constant_p(n))
4058 + check_object_size(from, n, true);
4059 return __copy_user(to, (__force void __user *) from, n);
4060 - else
4061 + } else
4062 return n;
4063 }
4064
4065 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4066 {
4067 + if ((long)n < 0)
4068 + return n;
4069 +
4070 + if (!__builtin_constant_p(n))
4071 + check_object_size(from, n, true);
4072 +
4073 return __copy_user(to, (__force void __user *) from, n);
4074 }
4075
4076 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4077 {
4078 - if (n && __access_ok((unsigned long) from, n))
4079 + if ((long)n < 0)
4080 + return n;
4081 +
4082 + if (n && __access_ok((unsigned long) from, n)) {
4083 + if (!__builtin_constant_p(n))
4084 + check_object_size(to, n, false);
4085 return __copy_user((__force void __user *) to, from, n);
4086 - else
4087 + } else
4088 return n;
4089 }
4090
4091 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4092 {
4093 + if ((long)n < 0)
4094 + return n;
4095 +
4096 return __copy_user((__force void __user *) to, from, n);
4097 }
4098
4099 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/uaccess_64.h linux-2.6.32.42/arch/sparc/include/asm/uaccess_64.h
4100 --- linux-2.6.32.42/arch/sparc/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
4101 +++ linux-2.6.32.42/arch/sparc/include/asm/uaccess_64.h 2011-04-17 15:56:46.000000000 -0400
4102 @@ -9,6 +9,7 @@
4103 #include <linux/compiler.h>
4104 #include <linux/string.h>
4105 #include <linux/thread_info.h>
4106 +#include <linux/kernel.h>
4107 #include <asm/asi.h>
4108 #include <asm/system.h>
4109 #include <asm/spitfire.h>
4110 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixu
4111 static inline unsigned long __must_check
4112 copy_from_user(void *to, const void __user *from, unsigned long size)
4113 {
4114 - unsigned long ret = ___copy_from_user(to, from, size);
4115 + unsigned long ret;
4116
4117 + if ((long)size < 0 || size > INT_MAX)
4118 + return size;
4119 +
4120 + if (!__builtin_constant_p(size))
4121 + check_object_size(to, size, false);
4122 +
4123 + ret = ___copy_from_user(to, from, size);
4124 if (unlikely(ret))
4125 ret = copy_from_user_fixup(to, from, size);
4126 return ret;
4127 @@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(
4128 static inline unsigned long __must_check
4129 copy_to_user(void __user *to, const void *from, unsigned long size)
4130 {
4131 - unsigned long ret = ___copy_to_user(to, from, size);
4132 + unsigned long ret;
4133 +
4134 + if ((long)size < 0 || size > INT_MAX)
4135 + return size;
4136 +
4137 + if (!__builtin_constant_p(size))
4138 + check_object_size(from, size, true);
4139
4140 + ret = ___copy_to_user(to, from, size);
4141 if (unlikely(ret))
4142 ret = copy_to_user_fixup(to, from, size);
4143 return ret;
4144 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/uaccess.h linux-2.6.32.42/arch/sparc/include/asm/uaccess.h
4145 --- linux-2.6.32.42/arch/sparc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
4146 +++ linux-2.6.32.42/arch/sparc/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
4147 @@ -1,5 +1,13 @@
4148 #ifndef ___ASM_SPARC_UACCESS_H
4149 #define ___ASM_SPARC_UACCESS_H
4150 +
4151 +#ifdef __KERNEL__
4152 +#ifndef __ASSEMBLY__
4153 +#include <linux/types.h>
4154 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
4155 +#endif
4156 +#endif
4157 +
4158 #if defined(__sparc__) && defined(__arch64__)
4159 #include <asm/uaccess_64.h>
4160 #else
4161 diff -urNp linux-2.6.32.42/arch/sparc/kernel/iommu.c linux-2.6.32.42/arch/sparc/kernel/iommu.c
4162 --- linux-2.6.32.42/arch/sparc/kernel/iommu.c 2011-03-27 14:31:47.000000000 -0400
4163 +++ linux-2.6.32.42/arch/sparc/kernel/iommu.c 2011-04-17 15:56:46.000000000 -0400
4164 @@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struc
4165 spin_unlock_irqrestore(&iommu->lock, flags);
4166 }
4167
4168 -static struct dma_map_ops sun4u_dma_ops = {
4169 +static const struct dma_map_ops sun4u_dma_ops = {
4170 .alloc_coherent = dma_4u_alloc_coherent,
4171 .free_coherent = dma_4u_free_coherent,
4172 .map_page = dma_4u_map_page,
4173 @@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops
4174 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4175 };
4176
4177 -struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4178 +const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4179 EXPORT_SYMBOL(dma_ops);
4180
4181 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4182 diff -urNp linux-2.6.32.42/arch/sparc/kernel/ioport.c linux-2.6.32.42/arch/sparc/kernel/ioport.c
4183 --- linux-2.6.32.42/arch/sparc/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
4184 +++ linux-2.6.32.42/arch/sparc/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
4185 @@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(stru
4186 BUG();
4187 }
4188
4189 -struct dma_map_ops sbus_dma_ops = {
4190 +const struct dma_map_ops sbus_dma_ops = {
4191 .alloc_coherent = sbus_alloc_coherent,
4192 .free_coherent = sbus_free_coherent,
4193 .map_page = sbus_map_page,
4194 @@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4195 .sync_sg_for_device = sbus_sync_sg_for_device,
4196 };
4197
4198 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
4199 +const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4200 EXPORT_SYMBOL(dma_ops);
4201
4202 static int __init sparc_register_ioport(void)
4203 @@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(str
4204 }
4205 }
4206
4207 -struct dma_map_ops pci32_dma_ops = {
4208 +const struct dma_map_ops pci32_dma_ops = {
4209 .alloc_coherent = pci32_alloc_coherent,
4210 .free_coherent = pci32_free_coherent,
4211 .map_page = pci32_map_page,
4212 diff -urNp linux-2.6.32.42/arch/sparc/kernel/kgdb_32.c linux-2.6.32.42/arch/sparc/kernel/kgdb_32.c
4213 --- linux-2.6.32.42/arch/sparc/kernel/kgdb_32.c 2011-03-27 14:31:47.000000000 -0400
4214 +++ linux-2.6.32.42/arch/sparc/kernel/kgdb_32.c 2011-04-17 15:56:46.000000000 -0400
4215 @@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4216 {
4217 }
4218
4219 -struct kgdb_arch arch_kgdb_ops = {
4220 +const struct kgdb_arch arch_kgdb_ops = {
4221 /* Breakpoint instruction: ta 0x7d */
4222 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4223 };
4224 diff -urNp linux-2.6.32.42/arch/sparc/kernel/kgdb_64.c linux-2.6.32.42/arch/sparc/kernel/kgdb_64.c
4225 --- linux-2.6.32.42/arch/sparc/kernel/kgdb_64.c 2011-03-27 14:31:47.000000000 -0400
4226 +++ linux-2.6.32.42/arch/sparc/kernel/kgdb_64.c 2011-04-17 15:56:46.000000000 -0400
4227 @@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4228 {
4229 }
4230
4231 -struct kgdb_arch arch_kgdb_ops = {
4232 +const struct kgdb_arch arch_kgdb_ops = {
4233 /* Breakpoint instruction: ta 0x72 */
4234 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4235 };
4236 diff -urNp linux-2.6.32.42/arch/sparc/kernel/Makefile linux-2.6.32.42/arch/sparc/kernel/Makefile
4237 --- linux-2.6.32.42/arch/sparc/kernel/Makefile 2011-03-27 14:31:47.000000000 -0400
4238 +++ linux-2.6.32.42/arch/sparc/kernel/Makefile 2011-04-17 15:56:46.000000000 -0400
4239 @@ -3,7 +3,7 @@
4240 #
4241
4242 asflags-y := -ansi
4243 -ccflags-y := -Werror
4244 +#ccflags-y := -Werror
4245
4246 extra-y := head_$(BITS).o
4247 extra-y += init_task.o
4248 diff -urNp linux-2.6.32.42/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.42/arch/sparc/kernel/pci_sun4v.c
4249 --- linux-2.6.32.42/arch/sparc/kernel/pci_sun4v.c 2011-03-27 14:31:47.000000000 -0400
4250 +++ linux-2.6.32.42/arch/sparc/kernel/pci_sun4v.c 2011-04-17 15:56:46.000000000 -0400
4251 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
4252 spin_unlock_irqrestore(&iommu->lock, flags);
4253 }
4254
4255 -static struct dma_map_ops sun4v_dma_ops = {
4256 +static const struct dma_map_ops sun4v_dma_ops = {
4257 .alloc_coherent = dma_4v_alloc_coherent,
4258 .free_coherent = dma_4v_free_coherent,
4259 .map_page = dma_4v_map_page,
4260 diff -urNp linux-2.6.32.42/arch/sparc/kernel/process_32.c linux-2.6.32.42/arch/sparc/kernel/process_32.c
4261 --- linux-2.6.32.42/arch/sparc/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
4262 +++ linux-2.6.32.42/arch/sparc/kernel/process_32.c 2011-04-17 15:56:46.000000000 -0400
4263 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4264 rw->ins[4], rw->ins[5],
4265 rw->ins[6],
4266 rw->ins[7]);
4267 - printk("%pS\n", (void *) rw->ins[7]);
4268 + printk("%pA\n", (void *) rw->ins[7]);
4269 rw = (struct reg_window32 *) rw->ins[6];
4270 }
4271 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4272 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4273
4274 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4275 r->psr, r->pc, r->npc, r->y, print_tainted());
4276 - printk("PC: <%pS>\n", (void *) r->pc);
4277 + printk("PC: <%pA>\n", (void *) r->pc);
4278 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4279 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4280 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4281 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4282 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4283 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4284 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4285 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4286
4287 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4288 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4289 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
4290 rw = (struct reg_window32 *) fp;
4291 pc = rw->ins[7];
4292 printk("[%08lx : ", pc);
4293 - printk("%pS ] ", (void *) pc);
4294 + printk("%pA ] ", (void *) pc);
4295 fp = rw->ins[6];
4296 } while (++count < 16);
4297 printk("\n");
4298 diff -urNp linux-2.6.32.42/arch/sparc/kernel/process_64.c linux-2.6.32.42/arch/sparc/kernel/process_64.c
4299 --- linux-2.6.32.42/arch/sparc/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
4300 +++ linux-2.6.32.42/arch/sparc/kernel/process_64.c 2011-04-17 15:56:46.000000000 -0400
4301 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
4302 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4303 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4304 if (regs->tstate & TSTATE_PRIV)
4305 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4306 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4307 }
4308
4309 void show_regs(struct pt_regs *regs)
4310 {
4311 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4312 regs->tpc, regs->tnpc, regs->y, print_tainted());
4313 - printk("TPC: <%pS>\n", (void *) regs->tpc);
4314 + printk("TPC: <%pA>\n", (void *) regs->tpc);
4315 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4316 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4317 regs->u_regs[3]);
4318 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4319 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4320 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4321 regs->u_regs[15]);
4322 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4323 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4324 show_regwindow(regs);
4325 }
4326
4327 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void
4328 ((tp && tp->task) ? tp->task->pid : -1));
4329
4330 if (gp->tstate & TSTATE_PRIV) {
4331 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4332 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4333 (void *) gp->tpc,
4334 (void *) gp->o7,
4335 (void *) gp->i7,
4336 diff -urNp linux-2.6.32.42/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.42/arch/sparc/kernel/sys_sparc_32.c
4337 --- linux-2.6.32.42/arch/sparc/kernel/sys_sparc_32.c 2011-03-27 14:31:47.000000000 -0400
4338 +++ linux-2.6.32.42/arch/sparc/kernel/sys_sparc_32.c 2011-04-17 15:56:46.000000000 -0400
4339 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
4340 if (ARCH_SUN4C && len > 0x20000000)
4341 return -ENOMEM;
4342 if (!addr)
4343 - addr = TASK_UNMAPPED_BASE;
4344 + addr = current->mm->mmap_base;
4345
4346 if (flags & MAP_SHARED)
4347 addr = COLOUR_ALIGN(addr);
4348 @@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
4349 }
4350 if (TASK_SIZE - PAGE_SIZE - len < addr)
4351 return -ENOMEM;
4352 - if (!vmm || addr + len <= vmm->vm_start)
4353 + if (check_heap_stack_gap(vmm, addr, len))
4354 return addr;
4355 addr = vmm->vm_end;
4356 if (flags & MAP_SHARED)
4357 diff -urNp linux-2.6.32.42/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.42/arch/sparc/kernel/sys_sparc_64.c
4358 --- linux-2.6.32.42/arch/sparc/kernel/sys_sparc_64.c 2011-03-27 14:31:47.000000000 -0400
4359 +++ linux-2.6.32.42/arch/sparc/kernel/sys_sparc_64.c 2011-04-17 15:56:46.000000000 -0400
4360 @@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
4361 /* We do not accept a shared mapping if it would violate
4362 * cache aliasing constraints.
4363 */
4364 - if ((flags & MAP_SHARED) &&
4365 + if ((filp || (flags & MAP_SHARED)) &&
4366 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4367 return -EINVAL;
4368 return addr;
4369 @@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
4370 if (filp || (flags & MAP_SHARED))
4371 do_color_align = 1;
4372
4373 +#ifdef CONFIG_PAX_RANDMMAP
4374 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4375 +#endif
4376 +
4377 if (addr) {
4378 if (do_color_align)
4379 addr = COLOUR_ALIGN(addr, pgoff);
4380 @@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
4381 addr = PAGE_ALIGN(addr);
4382
4383 vma = find_vma(mm, addr);
4384 - if (task_size - len >= addr &&
4385 - (!vma || addr + len <= vma->vm_start))
4386 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4387 return addr;
4388 }
4389
4390 if (len > mm->cached_hole_size) {
4391 - start_addr = addr = mm->free_area_cache;
4392 + start_addr = addr = mm->free_area_cache;
4393 } else {
4394 - start_addr = addr = TASK_UNMAPPED_BASE;
4395 + start_addr = addr = mm->mmap_base;
4396 mm->cached_hole_size = 0;
4397 }
4398
4399 @@ -175,14 +178,14 @@ full_search:
4400 vma = find_vma(mm, VA_EXCLUDE_END);
4401 }
4402 if (unlikely(task_size < addr)) {
4403 - if (start_addr != TASK_UNMAPPED_BASE) {
4404 - start_addr = addr = TASK_UNMAPPED_BASE;
4405 + if (start_addr != mm->mmap_base) {
4406 + start_addr = addr = mm->mmap_base;
4407 mm->cached_hole_size = 0;
4408 goto full_search;
4409 }
4410 return -ENOMEM;
4411 }
4412 - if (likely(!vma || addr + len <= vma->vm_start)) {
4413 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4414 /*
4415 * Remember the place where we stopped the search:
4416 */
4417 @@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
4418 /* We do not accept a shared mapping if it would violate
4419 * cache aliasing constraints.
4420 */
4421 - if ((flags & MAP_SHARED) &&
4422 + if ((filp || (flags & MAP_SHARED)) &&
4423 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4424 return -EINVAL;
4425 return addr;
4426 @@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
4427 addr = PAGE_ALIGN(addr);
4428
4429 vma = find_vma(mm, addr);
4430 - if (task_size - len >= addr &&
4431 - (!vma || addr + len <= vma->vm_start))
4432 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4433 return addr;
4434 }
4435
4436 @@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
4437 /* make sure it can fit in the remaining address space */
4438 if (likely(addr > len)) {
4439 vma = find_vma(mm, addr-len);
4440 - if (!vma || addr <= vma->vm_start) {
4441 + if (check_heap_stack_gap(vma, addr - len, len)) {
4442 /* remember the address as a hint for next time */
4443 return (mm->free_area_cache = addr-len);
4444 }
4445 @@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct fi
4446 if (unlikely(mm->mmap_base < len))
4447 goto bottomup;
4448
4449 - addr = mm->mmap_base-len;
4450 - if (do_color_align)
4451 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4452 + addr = mm->mmap_base - len;
4453
4454 do {
4455 + if (do_color_align)
4456 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4457 /*
4458 * Lookup failure means no vma is above this address,
4459 * else if new region fits below vma->vm_start,
4460 * return with success:
4461 */
4462 vma = find_vma(mm, addr);
4463 - if (likely(!vma || addr+len <= vma->vm_start)) {
4464 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4465 /* remember the address as a hint for next time */
4466 return (mm->free_area_cache = addr);
4467 }
4468 @@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct fi
4469 mm->cached_hole_size = vma->vm_start - addr;
4470
4471 /* try just below the current vma->vm_start */
4472 - addr = vma->vm_start-len;
4473 - if (do_color_align)
4474 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4475 - } while (likely(len < vma->vm_start));
4476 + addr = skip_heap_stack_gap(vma, len);
4477 + } while (!IS_ERR_VALUE(addr));
4478
4479 bottomup:
4480 /*
4481 @@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
4482 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
4483 sysctl_legacy_va_layout) {
4484 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4485 +
4486 +#ifdef CONFIG_PAX_RANDMMAP
4487 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4488 + mm->mmap_base += mm->delta_mmap;
4489 +#endif
4490 +
4491 mm->get_unmapped_area = arch_get_unmapped_area;
4492 mm->unmap_area = arch_unmap_area;
4493 } else {
4494 @@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
4495 gap = (task_size / 6 * 5);
4496
4497 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4498 +
4499 +#ifdef CONFIG_PAX_RANDMMAP
4500 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4501 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4502 +#endif
4503 +
4504 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4505 mm->unmap_area = arch_unmap_area_topdown;
4506 }
4507 diff -urNp linux-2.6.32.42/arch/sparc/kernel/traps_32.c linux-2.6.32.42/arch/sparc/kernel/traps_32.c
4508 --- linux-2.6.32.42/arch/sparc/kernel/traps_32.c 2011-03-27 14:31:47.000000000 -0400
4509 +++ linux-2.6.32.42/arch/sparc/kernel/traps_32.c 2011-06-13 21:25:39.000000000 -0400
4510 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
4511 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4512 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4513
4514 +extern void gr_handle_kernel_exploit(void);
4515 +
4516 void die_if_kernel(char *str, struct pt_regs *regs)
4517 {
4518 static int die_counter;
4519 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
4520 count++ < 30 &&
4521 (((unsigned long) rw) >= PAGE_OFFSET) &&
4522 !(((unsigned long) rw) & 0x7)) {
4523 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
4524 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
4525 (void *) rw->ins[7]);
4526 rw = (struct reg_window32 *)rw->ins[6];
4527 }
4528 }
4529 printk("Instruction DUMP:");
4530 instruction_dump ((unsigned long *) regs->pc);
4531 - if(regs->psr & PSR_PS)
4532 + if(regs->psr & PSR_PS) {
4533 + gr_handle_kernel_exploit();
4534 do_exit(SIGKILL);
4535 + }
4536 do_exit(SIGSEGV);
4537 }
4538
4539 diff -urNp linux-2.6.32.42/arch/sparc/kernel/traps_64.c linux-2.6.32.42/arch/sparc/kernel/traps_64.c
4540 --- linux-2.6.32.42/arch/sparc/kernel/traps_64.c 2011-03-27 14:31:47.000000000 -0400
4541 +++ linux-2.6.32.42/arch/sparc/kernel/traps_64.c 2011-06-13 21:24:11.000000000 -0400
4542 @@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_
4543 i + 1,
4544 p->trapstack[i].tstate, p->trapstack[i].tpc,
4545 p->trapstack[i].tnpc, p->trapstack[i].tt);
4546 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4547 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4548 }
4549 }
4550
4551 @@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long
4552
4553 lvl -= 0x100;
4554 if (regs->tstate & TSTATE_PRIV) {
4555 +
4556 +#ifdef CONFIG_PAX_REFCOUNT
4557 + if (lvl == 6)
4558 + pax_report_refcount_overflow(regs);
4559 +#endif
4560 +
4561 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4562 die_if_kernel(buffer, regs);
4563 }
4564 @@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long
4565 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4566 {
4567 char buffer[32];
4568 -
4569 +
4570 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4571 0, lvl, SIGTRAP) == NOTIFY_STOP)
4572 return;
4573
4574 +#ifdef CONFIG_PAX_REFCOUNT
4575 + if (lvl == 6)
4576 + pax_report_refcount_overflow(regs);
4577 +#endif
4578 +
4579 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4580
4581 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4582 @@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt
4583 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4584 printk("%s" "ERROR(%d): ",
4585 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4586 - printk("TPC<%pS>\n", (void *) regs->tpc);
4587 + printk("TPC<%pA>\n", (void *) regs->tpc);
4588 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4589 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4590 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4591 @@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type,
4592 smp_processor_id(),
4593 (type & 0x1) ? 'I' : 'D',
4594 regs->tpc);
4595 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4596 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4597 panic("Irrecoverable Cheetah+ parity error.");
4598 }
4599
4600 @@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type,
4601 smp_processor_id(),
4602 (type & 0x1) ? 'I' : 'D',
4603 regs->tpc);
4604 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4605 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4606 }
4607
4608 struct sun4v_error_entry {
4609 @@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_r
4610
4611 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4612 regs->tpc, tl);
4613 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4614 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4615 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4616 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4617 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4618 (void *) regs->u_regs[UREG_I7]);
4619 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4620 "pte[%lx] error[%lx]\n",
4621 @@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_r
4622
4623 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4624 regs->tpc, tl);
4625 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4626 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4627 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4628 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4629 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4630 (void *) regs->u_regs[UREG_I7]);
4631 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4632 "pte[%lx] error[%lx]\n",
4633 @@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk,
4634 fp = (unsigned long)sf->fp + STACK_BIAS;
4635 }
4636
4637 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4638 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4639 } while (++count < 16);
4640 }
4641
4642 @@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_
4643 return (struct reg_window *) (fp + STACK_BIAS);
4644 }
4645
4646 +extern void gr_handle_kernel_exploit(void);
4647 +
4648 void die_if_kernel(char *str, struct pt_regs *regs)
4649 {
4650 static int die_counter;
4651 @@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_
4652 while (rw &&
4653 count++ < 30&&
4654 is_kernel_stack(current, rw)) {
4655 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
4656 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
4657 (void *) rw->ins[7]);
4658
4659 rw = kernel_stack_up(rw);
4660 @@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_
4661 }
4662 user_instruction_dump ((unsigned int __user *) regs->tpc);
4663 }
4664 - if (regs->tstate & TSTATE_PRIV)
4665 + if (regs->tstate & TSTATE_PRIV) {
4666 + gr_handle_kernel_exploit();
4667 do_exit(SIGKILL);
4668 + }
4669 +
4670 do_exit(SIGSEGV);
4671 }
4672 EXPORT_SYMBOL(die_if_kernel);
4673 diff -urNp linux-2.6.32.42/arch/sparc/kernel/unaligned_64.c linux-2.6.32.42/arch/sparc/kernel/unaligned_64.c
4674 --- linux-2.6.32.42/arch/sparc/kernel/unaligned_64.c 2011-03-27 14:31:47.000000000 -0400
4675 +++ linux-2.6.32.42/arch/sparc/kernel/unaligned_64.c 2011-04-17 15:56:46.000000000 -0400
4676 @@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs
4677 if (count < 5) {
4678 last_time = jiffies;
4679 count++;
4680 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
4681 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
4682 regs->tpc, (void *) regs->tpc);
4683 }
4684 }
4685 diff -urNp linux-2.6.32.42/arch/sparc/lib/atomic_64.S linux-2.6.32.42/arch/sparc/lib/atomic_64.S
4686 --- linux-2.6.32.42/arch/sparc/lib/atomic_64.S 2011-03-27 14:31:47.000000000 -0400
4687 +++ linux-2.6.32.42/arch/sparc/lib/atomic_64.S 2011-04-17 15:56:46.000000000 -0400
4688 @@ -18,7 +18,12 @@
4689 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4690 BACKOFF_SETUP(%o2)
4691 1: lduw [%o1], %g1
4692 - add %g1, %o0, %g7
4693 + addcc %g1, %o0, %g7
4694 +
4695 +#ifdef CONFIG_PAX_REFCOUNT
4696 + tvs %icc, 6
4697 +#endif
4698 +
4699 cas [%o1], %g1, %g7
4700 cmp %g1, %g7
4701 bne,pn %icc, 2f
4702 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4703 2: BACKOFF_SPIN(%o2, %o3, 1b)
4704 .size atomic_add, .-atomic_add
4705
4706 + .globl atomic_add_unchecked
4707 + .type atomic_add_unchecked,#function
4708 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4709 + BACKOFF_SETUP(%o2)
4710 +1: lduw [%o1], %g1
4711 + add %g1, %o0, %g7
4712 + cas [%o1], %g1, %g7
4713 + cmp %g1, %g7
4714 + bne,pn %icc, 2f
4715 + nop
4716 + retl
4717 + nop
4718 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4719 + .size atomic_add_unchecked, .-atomic_add_unchecked
4720 +
4721 .globl atomic_sub
4722 .type atomic_sub,#function
4723 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4724 BACKOFF_SETUP(%o2)
4725 1: lduw [%o1], %g1
4726 - sub %g1, %o0, %g7
4727 + subcc %g1, %o0, %g7
4728 +
4729 +#ifdef CONFIG_PAX_REFCOUNT
4730 + tvs %icc, 6
4731 +#endif
4732 +
4733 cas [%o1], %g1, %g7
4734 cmp %g1, %g7
4735 bne,pn %icc, 2f
4736 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4737 2: BACKOFF_SPIN(%o2, %o3, 1b)
4738 .size atomic_sub, .-atomic_sub
4739
4740 + .globl atomic_sub_unchecked
4741 + .type atomic_sub_unchecked,#function
4742 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4743 + BACKOFF_SETUP(%o2)
4744 +1: lduw [%o1], %g1
4745 + sub %g1, %o0, %g7
4746 + cas [%o1], %g1, %g7
4747 + cmp %g1, %g7
4748 + bne,pn %icc, 2f
4749 + nop
4750 + retl
4751 + nop
4752 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4753 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4754 +
4755 .globl atomic_add_ret
4756 .type atomic_add_ret,#function
4757 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4758 BACKOFF_SETUP(%o2)
4759 1: lduw [%o1], %g1
4760 - add %g1, %o0, %g7
4761 + addcc %g1, %o0, %g7
4762 +
4763 +#ifdef CONFIG_PAX_REFCOUNT
4764 + tvs %icc, 6
4765 +#endif
4766 +
4767 cas [%o1], %g1, %g7
4768 cmp %g1, %g7
4769 bne,pn %icc, 2f
4770 @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4771 2: BACKOFF_SPIN(%o2, %o3, 1b)
4772 .size atomic_add_ret, .-atomic_add_ret
4773
4774 + .globl atomic_add_ret_unchecked
4775 + .type atomic_add_ret_unchecked,#function
4776 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4777 + BACKOFF_SETUP(%o2)
4778 +1: lduw [%o1], %g1
4779 + addcc %g1, %o0, %g7
4780 + cas [%o1], %g1, %g7
4781 + cmp %g1, %g7
4782 + bne,pn %icc, 2f
4783 + add %g7, %o0, %g7
4784 + sra %g7, 0, %o0
4785 + retl
4786 + nop
4787 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4788 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4789 +
4790 .globl atomic_sub_ret
4791 .type atomic_sub_ret,#function
4792 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4793 BACKOFF_SETUP(%o2)
4794 1: lduw [%o1], %g1
4795 - sub %g1, %o0, %g7
4796 + subcc %g1, %o0, %g7
4797 +
4798 +#ifdef CONFIG_PAX_REFCOUNT
4799 + tvs %icc, 6
4800 +#endif
4801 +
4802 cas [%o1], %g1, %g7
4803 cmp %g1, %g7
4804 bne,pn %icc, 2f
4805 @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4806 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4807 BACKOFF_SETUP(%o2)
4808 1: ldx [%o1], %g1
4809 - add %g1, %o0, %g7
4810 + addcc %g1, %o0, %g7
4811 +
4812 +#ifdef CONFIG_PAX_REFCOUNT
4813 + tvs %xcc, 6
4814 +#endif
4815 +
4816 casx [%o1], %g1, %g7
4817 cmp %g1, %g7
4818 bne,pn %xcc, 2f
4819 @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4820 2: BACKOFF_SPIN(%o2, %o3, 1b)
4821 .size atomic64_add, .-atomic64_add
4822
4823 + .globl atomic64_add_unchecked
4824 + .type atomic64_add_unchecked,#function
4825 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4826 + BACKOFF_SETUP(%o2)
4827 +1: ldx [%o1], %g1
4828 + addcc %g1, %o0, %g7
4829 + casx [%o1], %g1, %g7
4830 + cmp %g1, %g7
4831 + bne,pn %xcc, 2f
4832 + nop
4833 + retl
4834 + nop
4835 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4836 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4837 +
4838 .globl atomic64_sub
4839 .type atomic64_sub,#function
4840 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4841 BACKOFF_SETUP(%o2)
4842 1: ldx [%o1], %g1
4843 - sub %g1, %o0, %g7
4844 + subcc %g1, %o0, %g7
4845 +
4846 +#ifdef CONFIG_PAX_REFCOUNT
4847 + tvs %xcc, 6
4848 +#endif
4849 +
4850 casx [%o1], %g1, %g7
4851 cmp %g1, %g7
4852 bne,pn %xcc, 2f
4853 @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4854 2: BACKOFF_SPIN(%o2, %o3, 1b)
4855 .size atomic64_sub, .-atomic64_sub
4856
4857 + .globl atomic64_sub_unchecked
4858 + .type atomic64_sub_unchecked,#function
4859 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4860 + BACKOFF_SETUP(%o2)
4861 +1: ldx [%o1], %g1
4862 + subcc %g1, %o0, %g7
4863 + casx [%o1], %g1, %g7
4864 + cmp %g1, %g7
4865 + bne,pn %xcc, 2f
4866 + nop
4867 + retl
4868 + nop
4869 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4870 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4871 +
4872 .globl atomic64_add_ret
4873 .type atomic64_add_ret,#function
4874 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4875 BACKOFF_SETUP(%o2)
4876 1: ldx [%o1], %g1
4877 - add %g1, %o0, %g7
4878 + addcc %g1, %o0, %g7
4879 +
4880 +#ifdef CONFIG_PAX_REFCOUNT
4881 + tvs %xcc, 6
4882 +#endif
4883 +
4884 casx [%o1], %g1, %g7
4885 cmp %g1, %g7
4886 bne,pn %xcc, 2f
4887 @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4888 2: BACKOFF_SPIN(%o2, %o3, 1b)
4889 .size atomic64_add_ret, .-atomic64_add_ret
4890
4891 + .globl atomic64_add_ret_unchecked
4892 + .type atomic64_add_ret_unchecked,#function
4893 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4894 + BACKOFF_SETUP(%o2)
4895 +1: ldx [%o1], %g1
4896 + addcc %g1, %o0, %g7
4897 + casx [%o1], %g1, %g7
4898 + cmp %g1, %g7
4899 + bne,pn %xcc, 2f
4900 + add %g7, %o0, %g7
4901 + mov %g7, %o0
4902 + retl
4903 + nop
4904 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4905 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4906 +
4907 .globl atomic64_sub_ret
4908 .type atomic64_sub_ret,#function
4909 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4910 BACKOFF_SETUP(%o2)
4911 1: ldx [%o1], %g1
4912 - sub %g1, %o0, %g7
4913 + subcc %g1, %o0, %g7
4914 +
4915 +#ifdef CONFIG_PAX_REFCOUNT
4916 + tvs %xcc, 6
4917 +#endif
4918 +
4919 casx [%o1], %g1, %g7
4920 cmp %g1, %g7
4921 bne,pn %xcc, 2f
4922 diff -urNp linux-2.6.32.42/arch/sparc/lib/ksyms.c linux-2.6.32.42/arch/sparc/lib/ksyms.c
4923 --- linux-2.6.32.42/arch/sparc/lib/ksyms.c 2011-03-27 14:31:47.000000000 -0400
4924 +++ linux-2.6.32.42/arch/sparc/lib/ksyms.c 2011-04-17 15:56:46.000000000 -0400
4925 @@ -144,12 +144,17 @@ EXPORT_SYMBOL(__downgrade_write);
4926
4927 /* Atomic counter implementation. */
4928 EXPORT_SYMBOL(atomic_add);
4929 +EXPORT_SYMBOL(atomic_add_unchecked);
4930 EXPORT_SYMBOL(atomic_add_ret);
4931 EXPORT_SYMBOL(atomic_sub);
4932 +EXPORT_SYMBOL(atomic_sub_unchecked);
4933 EXPORT_SYMBOL(atomic_sub_ret);
4934 EXPORT_SYMBOL(atomic64_add);
4935 +EXPORT_SYMBOL(atomic64_add_unchecked);
4936 EXPORT_SYMBOL(atomic64_add_ret);
4937 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4938 EXPORT_SYMBOL(atomic64_sub);
4939 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4940 EXPORT_SYMBOL(atomic64_sub_ret);
4941
4942 /* Atomic bit operations. */
4943 diff -urNp linux-2.6.32.42/arch/sparc/lib/Makefile linux-2.6.32.42/arch/sparc/lib/Makefile
4944 --- linux-2.6.32.42/arch/sparc/lib/Makefile 2011-03-27 14:31:47.000000000 -0400
4945 +++ linux-2.6.32.42/arch/sparc/lib/Makefile 2011-05-17 19:26:34.000000000 -0400
4946 @@ -2,7 +2,7 @@
4947 #
4948
4949 asflags-y := -ansi -DST_DIV0=0x02
4950 -ccflags-y := -Werror
4951 +#ccflags-y := -Werror
4952
4953 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4954 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4955 diff -urNp linux-2.6.32.42/arch/sparc/lib/rwsem_64.S linux-2.6.32.42/arch/sparc/lib/rwsem_64.S
4956 --- linux-2.6.32.42/arch/sparc/lib/rwsem_64.S 2011-03-27 14:31:47.000000000 -0400
4957 +++ linux-2.6.32.42/arch/sparc/lib/rwsem_64.S 2011-04-17 15:56:46.000000000 -0400
4958 @@ -11,7 +11,12 @@
4959 .globl __down_read
4960 __down_read:
4961 1: lduw [%o0], %g1
4962 - add %g1, 1, %g7
4963 + addcc %g1, 1, %g7
4964 +
4965 +#ifdef CONFIG_PAX_REFCOUNT
4966 + tvs %icc, 6
4967 +#endif
4968 +
4969 cas [%o0], %g1, %g7
4970 cmp %g1, %g7
4971 bne,pn %icc, 1b
4972 @@ -33,7 +38,12 @@ __down_read:
4973 .globl __down_read_trylock
4974 __down_read_trylock:
4975 1: lduw [%o0], %g1
4976 - add %g1, 1, %g7
4977 + addcc %g1, 1, %g7
4978 +
4979 +#ifdef CONFIG_PAX_REFCOUNT
4980 + tvs %icc, 6
4981 +#endif
4982 +
4983 cmp %g7, 0
4984 bl,pn %icc, 2f
4985 mov 0, %o1
4986 @@ -51,7 +61,12 @@ __down_write:
4987 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
4988 1:
4989 lduw [%o0], %g3
4990 - add %g3, %g1, %g7
4991 + addcc %g3, %g1, %g7
4992 +
4993 +#ifdef CONFIG_PAX_REFCOUNT
4994 + tvs %icc, 6
4995 +#endif
4996 +
4997 cas [%o0], %g3, %g7
4998 cmp %g3, %g7
4999 bne,pn %icc, 1b
5000 @@ -77,7 +92,12 @@ __down_write_trylock:
5001 cmp %g3, 0
5002 bne,pn %icc, 2f
5003 mov 0, %o1
5004 - add %g3, %g1, %g7
5005 + addcc %g3, %g1, %g7
5006 +
5007 +#ifdef CONFIG_PAX_REFCOUNT
5008 + tvs %icc, 6
5009 +#endif
5010 +
5011 cas [%o0], %g3, %g7
5012 cmp %g3, %g7
5013 bne,pn %icc, 1b
5014 @@ -90,7 +110,12 @@ __down_write_trylock:
5015 __up_read:
5016 1:
5017 lduw [%o0], %g1
5018 - sub %g1, 1, %g7
5019 + subcc %g1, 1, %g7
5020 +
5021 +#ifdef CONFIG_PAX_REFCOUNT
5022 + tvs %icc, 6
5023 +#endif
5024 +
5025 cas [%o0], %g1, %g7
5026 cmp %g1, %g7
5027 bne,pn %icc, 1b
5028 @@ -118,7 +143,12 @@ __up_write:
5029 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5030 1:
5031 lduw [%o0], %g3
5032 - sub %g3, %g1, %g7
5033 + subcc %g3, %g1, %g7
5034 +
5035 +#ifdef CONFIG_PAX_REFCOUNT
5036 + tvs %icc, 6
5037 +#endif
5038 +
5039 cas [%o0], %g3, %g7
5040 cmp %g3, %g7
5041 bne,pn %icc, 1b
5042 @@ -143,7 +173,12 @@ __downgrade_write:
5043 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5044 1:
5045 lduw [%o0], %g3
5046 - sub %g3, %g1, %g7
5047 + subcc %g3, %g1, %g7
5048 +
5049 +#ifdef CONFIG_PAX_REFCOUNT
5050 + tvs %icc, 6
5051 +#endif
5052 +
5053 cas [%o0], %g3, %g7
5054 cmp %g3, %g7
5055 bne,pn %icc, 1b
5056 diff -urNp linux-2.6.32.42/arch/sparc/Makefile linux-2.6.32.42/arch/sparc/Makefile
5057 --- linux-2.6.32.42/arch/sparc/Makefile 2011-03-27 14:31:47.000000000 -0400
5058 +++ linux-2.6.32.42/arch/sparc/Makefile 2011-04-17 15:56:46.000000000 -0400
5059 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
5060 # Export what is needed by arch/sparc/boot/Makefile
5061 export VMLINUX_INIT VMLINUX_MAIN
5062 VMLINUX_INIT := $(head-y) $(init-y)
5063 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5064 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5065 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5066 VMLINUX_MAIN += $(drivers-y) $(net-y)
5067
5068 diff -urNp linux-2.6.32.42/arch/sparc/mm/fault_32.c linux-2.6.32.42/arch/sparc/mm/fault_32.c
5069 --- linux-2.6.32.42/arch/sparc/mm/fault_32.c 2011-03-27 14:31:47.000000000 -0400
5070 +++ linux-2.6.32.42/arch/sparc/mm/fault_32.c 2011-04-17 15:56:46.000000000 -0400
5071 @@ -21,6 +21,9 @@
5072 #include <linux/interrupt.h>
5073 #include <linux/module.h>
5074 #include <linux/kdebug.h>
5075 +#include <linux/slab.h>
5076 +#include <linux/pagemap.h>
5077 +#include <linux/compiler.h>
5078
5079 #include <asm/system.h>
5080 #include <asm/page.h>
5081 @@ -167,6 +170,267 @@ static unsigned long compute_si_addr(str
5082 return safe_compute_effective_address(regs, insn);
5083 }
5084
5085 +#ifdef CONFIG_PAX_PAGEEXEC
5086 +#ifdef CONFIG_PAX_DLRESOLVE
5087 +static void pax_emuplt_close(struct vm_area_struct *vma)
5088 +{
5089 + vma->vm_mm->call_dl_resolve = 0UL;
5090 +}
5091 +
5092 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5093 +{
5094 + unsigned int *kaddr;
5095 +
5096 + vmf->page = alloc_page(GFP_HIGHUSER);
5097 + if (!vmf->page)
5098 + return VM_FAULT_OOM;
5099 +
5100 + kaddr = kmap(vmf->page);
5101 + memset(kaddr, 0, PAGE_SIZE);
5102 + kaddr[0] = 0x9DE3BFA8U; /* save */
5103 + flush_dcache_page(vmf->page);
5104 + kunmap(vmf->page);
5105 + return VM_FAULT_MAJOR;
5106 +}
5107 +
5108 +static const struct vm_operations_struct pax_vm_ops = {
5109 + .close = pax_emuplt_close,
5110 + .fault = pax_emuplt_fault
5111 +};
5112 +
5113 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5114 +{
5115 + int ret;
5116 +
5117 + vma->vm_mm = current->mm;
5118 + vma->vm_start = addr;
5119 + vma->vm_end = addr + PAGE_SIZE;
5120 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5121 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5122 + vma->vm_ops = &pax_vm_ops;
5123 +
5124 + ret = insert_vm_struct(current->mm, vma);
5125 + if (ret)
5126 + return ret;
5127 +
5128 + ++current->mm->total_vm;
5129 + return 0;
5130 +}
5131 +#endif
5132 +
5133 +/*
5134 + * PaX: decide what to do with offenders (regs->pc = fault address)
5135 + *
5136 + * returns 1 when task should be killed
5137 + * 2 when patched PLT trampoline was detected
5138 + * 3 when unpatched PLT trampoline was detected
5139 + */
5140 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5141 +{
5142 +
5143 +#ifdef CONFIG_PAX_EMUPLT
5144 + int err;
5145 +
5146 + do { /* PaX: patched PLT emulation #1 */
5147 + unsigned int sethi1, sethi2, jmpl;
5148 +
5149 + err = get_user(sethi1, (unsigned int *)regs->pc);
5150 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5151 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5152 +
5153 + if (err)
5154 + break;
5155 +
5156 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5157 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5158 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5159 + {
5160 + unsigned int addr;
5161 +
5162 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5163 + addr = regs->u_regs[UREG_G1];
5164 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5165 + regs->pc = addr;
5166 + regs->npc = addr+4;
5167 + return 2;
5168 + }
5169 + } while (0);
5170 +
5171 + { /* PaX: patched PLT emulation #2 */
5172 + unsigned int ba;
5173 +
5174 + err = get_user(ba, (unsigned int *)regs->pc);
5175 +
5176 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5177 + unsigned int addr;
5178 +
5179 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5180 + regs->pc = addr;
5181 + regs->npc = addr+4;
5182 + return 2;
5183 + }
5184 + }
5185 +
5186 + do { /* PaX: patched PLT emulation #3 */
5187 + unsigned int sethi, jmpl, nop;
5188 +
5189 + err = get_user(sethi, (unsigned int *)regs->pc);
5190 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5191 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5192 +
5193 + if (err)
5194 + break;
5195 +
5196 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5197 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5198 + nop == 0x01000000U)
5199 + {
5200 + unsigned int addr;
5201 +
5202 + addr = (sethi & 0x003FFFFFU) << 10;
5203 + regs->u_regs[UREG_G1] = addr;
5204 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5205 + regs->pc = addr;
5206 + regs->npc = addr+4;
5207 + return 2;
5208 + }
5209 + } while (0);
5210 +
5211 + do { /* PaX: unpatched PLT emulation step 1 */
5212 + unsigned int sethi, ba, nop;
5213 +
5214 + err = get_user(sethi, (unsigned int *)regs->pc);
5215 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
5216 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5217 +
5218 + if (err)
5219 + break;
5220 +
5221 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5222 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5223 + nop == 0x01000000U)
5224 + {
5225 + unsigned int addr, save, call;
5226 +
5227 + if ((ba & 0xFFC00000U) == 0x30800000U)
5228 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5229 + else
5230 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5231 +
5232 + err = get_user(save, (unsigned int *)addr);
5233 + err |= get_user(call, (unsigned int *)(addr+4));
5234 + err |= get_user(nop, (unsigned int *)(addr+8));
5235 + if (err)
5236 + break;
5237 +
5238 +#ifdef CONFIG_PAX_DLRESOLVE
5239 + if (save == 0x9DE3BFA8U &&
5240 + (call & 0xC0000000U) == 0x40000000U &&
5241 + nop == 0x01000000U)
5242 + {
5243 + struct vm_area_struct *vma;
5244 + unsigned long call_dl_resolve;
5245 +
5246 + down_read(&current->mm->mmap_sem);
5247 + call_dl_resolve = current->mm->call_dl_resolve;
5248 + up_read(&current->mm->mmap_sem);
5249 + if (likely(call_dl_resolve))
5250 + goto emulate;
5251 +
5252 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5253 +
5254 + down_write(&current->mm->mmap_sem);
5255 + if (current->mm->call_dl_resolve) {
5256 + call_dl_resolve = current->mm->call_dl_resolve;
5257 + up_write(&current->mm->mmap_sem);
5258 + if (vma)
5259 + kmem_cache_free(vm_area_cachep, vma);
5260 + goto emulate;
5261 + }
5262 +
5263 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5264 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5265 + up_write(&current->mm->mmap_sem);
5266 + if (vma)
5267 + kmem_cache_free(vm_area_cachep, vma);
5268 + return 1;
5269 + }
5270 +
5271 + if (pax_insert_vma(vma, call_dl_resolve)) {
5272 + up_write(&current->mm->mmap_sem);
5273 + kmem_cache_free(vm_area_cachep, vma);
5274 + return 1;
5275 + }
5276 +
5277 + current->mm->call_dl_resolve = call_dl_resolve;
5278 + up_write(&current->mm->mmap_sem);
5279 +
5280 +emulate:
5281 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5282 + regs->pc = call_dl_resolve;
5283 + regs->npc = addr+4;
5284 + return 3;
5285 + }
5286 +#endif
5287 +
5288 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5289 + if ((save & 0xFFC00000U) == 0x05000000U &&
5290 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5291 + nop == 0x01000000U)
5292 + {
5293 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5294 + regs->u_regs[UREG_G2] = addr + 4;
5295 + addr = (save & 0x003FFFFFU) << 10;
5296 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5297 + regs->pc = addr;
5298 + regs->npc = addr+4;
5299 + return 3;
5300 + }
5301 + }
5302 + } while (0);
5303 +
5304 + do { /* PaX: unpatched PLT emulation step 2 */
5305 + unsigned int save, call, nop;
5306 +
5307 + err = get_user(save, (unsigned int *)(regs->pc-4));
5308 + err |= get_user(call, (unsigned int *)regs->pc);
5309 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
5310 + if (err)
5311 + break;
5312 +
5313 + if (save == 0x9DE3BFA8U &&
5314 + (call & 0xC0000000U) == 0x40000000U &&
5315 + nop == 0x01000000U)
5316 + {
5317 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5318 +
5319 + regs->u_regs[UREG_RETPC] = regs->pc;
5320 + regs->pc = dl_resolve;
5321 + regs->npc = dl_resolve+4;
5322 + return 3;
5323 + }
5324 + } while (0);
5325 +#endif
5326 +
5327 + return 1;
5328 +}
5329 +
5330 +void pax_report_insns(void *pc, void *sp)
5331 +{
5332 + unsigned long i;
5333 +
5334 + printk(KERN_ERR "PAX: bytes at PC: ");
5335 + for (i = 0; i < 8; i++) {
5336 + unsigned int c;
5337 + if (get_user(c, (unsigned int *)pc+i))
5338 + printk(KERN_CONT "???????? ");
5339 + else
5340 + printk(KERN_CONT "%08x ", c);
5341 + }
5342 + printk("\n");
5343 +}
5344 +#endif
5345 +
5346 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
5347 unsigned long address)
5348 {
5349 @@ -231,6 +495,24 @@ good_area:
5350 if(!(vma->vm_flags & VM_WRITE))
5351 goto bad_area;
5352 } else {
5353 +
5354 +#ifdef CONFIG_PAX_PAGEEXEC
5355 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5356 + up_read(&mm->mmap_sem);
5357 + switch (pax_handle_fetch_fault(regs)) {
5358 +
5359 +#ifdef CONFIG_PAX_EMUPLT
5360 + case 2:
5361 + case 3:
5362 + return;
5363 +#endif
5364 +
5365 + }
5366 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5367 + do_group_exit(SIGKILL);
5368 + }
5369 +#endif
5370 +
5371 /* Allow reads even for write-only mappings */
5372 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5373 goto bad_area;
5374 diff -urNp linux-2.6.32.42/arch/sparc/mm/fault_64.c linux-2.6.32.42/arch/sparc/mm/fault_64.c
5375 --- linux-2.6.32.42/arch/sparc/mm/fault_64.c 2011-03-27 14:31:47.000000000 -0400
5376 +++ linux-2.6.32.42/arch/sparc/mm/fault_64.c 2011-04-17 15:56:46.000000000 -0400
5377 @@ -20,6 +20,9 @@
5378 #include <linux/kprobes.h>
5379 #include <linux/kdebug.h>
5380 #include <linux/percpu.h>
5381 +#include <linux/slab.h>
5382 +#include <linux/pagemap.h>
5383 +#include <linux/compiler.h>
5384
5385 #include <asm/page.h>
5386 #include <asm/pgtable.h>
5387 @@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs
5388 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5389 regs->tpc);
5390 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5391 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5392 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5393 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5394 dump_stack();
5395 unhandled_fault(regs->tpc, current, regs);
5396 @@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_a
5397 show_regs(regs);
5398 }
5399
5400 +#ifdef CONFIG_PAX_PAGEEXEC
5401 +#ifdef CONFIG_PAX_DLRESOLVE
5402 +static void pax_emuplt_close(struct vm_area_struct *vma)
5403 +{
5404 + vma->vm_mm->call_dl_resolve = 0UL;
5405 +}
5406 +
5407 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5408 +{
5409 + unsigned int *kaddr;
5410 +
5411 + vmf->page = alloc_page(GFP_HIGHUSER);
5412 + if (!vmf->page)
5413 + return VM_FAULT_OOM;
5414 +
5415 + kaddr = kmap(vmf->page);
5416 + memset(kaddr, 0, PAGE_SIZE);
5417 + kaddr[0] = 0x9DE3BFA8U; /* save */
5418 + flush_dcache_page(vmf->page);
5419 + kunmap(vmf->page);
5420 + return VM_FAULT_MAJOR;
5421 +}
5422 +
5423 +static const struct vm_operations_struct pax_vm_ops = {
5424 + .close = pax_emuplt_close,
5425 + .fault = pax_emuplt_fault
5426 +};
5427 +
5428 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5429 +{
5430 + int ret;
5431 +
5432 + vma->vm_mm = current->mm;
5433 + vma->vm_start = addr;
5434 + vma->vm_end = addr + PAGE_SIZE;
5435 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5436 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5437 + vma->vm_ops = &pax_vm_ops;
5438 +
5439 + ret = insert_vm_struct(current->mm, vma);
5440 + if (ret)
5441 + return ret;
5442 +
5443 + ++current->mm->total_vm;
5444 + return 0;
5445 +}
5446 +#endif
5447 +
5448 +/*
5449 + * PaX: decide what to do with offenders (regs->tpc = fault address)
5450 + *
5451 + * returns 1 when task should be killed
5452 + * 2 when patched PLT trampoline was detected
5453 + * 3 when unpatched PLT trampoline was detected
5454 + */
5455 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5456 +{
5457 +
5458 +#ifdef CONFIG_PAX_EMUPLT
5459 + int err;
5460 +
5461 + do { /* PaX: patched PLT emulation #1 */
5462 + unsigned int sethi1, sethi2, jmpl;
5463 +
5464 + err = get_user(sethi1, (unsigned int *)regs->tpc);
5465 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5466 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5467 +
5468 + if (err)
5469 + break;
5470 +
5471 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5472 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5473 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5474 + {
5475 + unsigned long addr;
5476 +
5477 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5478 + addr = regs->u_regs[UREG_G1];
5479 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5480 +
5481 + if (test_thread_flag(TIF_32BIT))
5482 + addr &= 0xFFFFFFFFUL;
5483 +
5484 + regs->tpc = addr;
5485 + regs->tnpc = addr+4;
5486 + return 2;
5487 + }
5488 + } while (0);
5489 +
5490 + { /* PaX: patched PLT emulation #2 */
5491 + unsigned int ba;
5492 +
5493 + err = get_user(ba, (unsigned int *)regs->tpc);
5494 +
5495 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5496 + unsigned long addr;
5497 +
5498 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5499 +
5500 + if (test_thread_flag(TIF_32BIT))
5501 + addr &= 0xFFFFFFFFUL;
5502 +
5503 + regs->tpc = addr;
5504 + regs->tnpc = addr+4;
5505 + return 2;
5506 + }
5507 + }
5508 +
5509 + do { /* PaX: patched PLT emulation #3 */
5510 + unsigned int sethi, jmpl, nop;
5511 +
5512 + err = get_user(sethi, (unsigned int *)regs->tpc);
5513 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5514 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5515 +
5516 + if (err)
5517 + break;
5518 +
5519 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5520 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5521 + nop == 0x01000000U)
5522 + {
5523 + unsigned long addr;
5524 +
5525 + addr = (sethi & 0x003FFFFFU) << 10;
5526 + regs->u_regs[UREG_G1] = addr;
5527 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5528 +
5529 + if (test_thread_flag(TIF_32BIT))
5530 + addr &= 0xFFFFFFFFUL;
5531 +
5532 + regs->tpc = addr;
5533 + regs->tnpc = addr+4;
5534 + return 2;
5535 + }
5536 + } while (0);
5537 +
5538 + do { /* PaX: patched PLT emulation #4 */
5539 + unsigned int sethi, mov1, call, mov2;
5540 +
5541 + err = get_user(sethi, (unsigned int *)regs->tpc);
5542 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5543 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
5544 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5545 +
5546 + if (err)
5547 + break;
5548 +
5549 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5550 + mov1 == 0x8210000FU &&
5551 + (call & 0xC0000000U) == 0x40000000U &&
5552 + mov2 == 0x9E100001U)
5553 + {
5554 + unsigned long addr;
5555 +
5556 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5557 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5558 +
5559 + if (test_thread_flag(TIF_32BIT))
5560 + addr &= 0xFFFFFFFFUL;
5561 +
5562 + regs->tpc = addr;
5563 + regs->tnpc = addr+4;
5564 + return 2;
5565 + }
5566 + } while (0);
5567 +
5568 + do { /* PaX: patched PLT emulation #5 */
5569 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5570 +
5571 + err = get_user(sethi, (unsigned int *)regs->tpc);
5572 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5573 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5574 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5575 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5576 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5577 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5578 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5579 +
5580 + if (err)
5581 + break;
5582 +
5583 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5584 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5585 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5586 + (or1 & 0xFFFFE000U) == 0x82106000U &&
5587 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5588 + sllx == 0x83287020U &&
5589 + jmpl == 0x81C04005U &&
5590 + nop == 0x01000000U)
5591 + {
5592 + unsigned long addr;
5593 +
5594 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5595 + regs->u_regs[UREG_G1] <<= 32;
5596 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5597 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5598 + regs->tpc = addr;
5599 + regs->tnpc = addr+4;
5600 + return 2;
5601 + }
5602 + } while (0);
5603 +
5604 + do { /* PaX: patched PLT emulation #6 */
5605 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5606 +
5607 + err = get_user(sethi, (unsigned int *)regs->tpc);
5608 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5609 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5610 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5611 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
5612 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5613 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5614 +
5615 + if (err)
5616 + break;
5617 +
5618 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5619 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5620 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5621 + sllx == 0x83287020U &&
5622 + (or & 0xFFFFE000U) == 0x8A116000U &&
5623 + jmpl == 0x81C04005U &&
5624 + nop == 0x01000000U)
5625 + {
5626 + unsigned long addr;
5627 +
5628 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5629 + regs->u_regs[UREG_G1] <<= 32;
5630 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5631 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5632 + regs->tpc = addr;
5633 + regs->tnpc = addr+4;
5634 + return 2;
5635 + }
5636 + } while (0);
5637 +
5638 + do { /* PaX: unpatched PLT emulation step 1 */
5639 + unsigned int sethi, ba, nop;
5640 +
5641 + err = get_user(sethi, (unsigned int *)regs->tpc);
5642 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5643 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5644 +
5645 + if (err)
5646 + break;
5647 +
5648 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5649 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5650 + nop == 0x01000000U)
5651 + {
5652 + unsigned long addr;
5653 + unsigned int save, call;
5654 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5655 +
5656 + if ((ba & 0xFFC00000U) == 0x30800000U)
5657 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5658 + else
5659 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5660 +
5661 + if (test_thread_flag(TIF_32BIT))
5662 + addr &= 0xFFFFFFFFUL;
5663 +
5664 + err = get_user(save, (unsigned int *)addr);
5665 + err |= get_user(call, (unsigned int *)(addr+4));
5666 + err |= get_user(nop, (unsigned int *)(addr+8));
5667 + if (err)
5668 + break;
5669 +
5670 +#ifdef CONFIG_PAX_DLRESOLVE
5671 + if (save == 0x9DE3BFA8U &&
5672 + (call & 0xC0000000U) == 0x40000000U &&
5673 + nop == 0x01000000U)
5674 + {
5675 + struct vm_area_struct *vma;
5676 + unsigned long call_dl_resolve;
5677 +
5678 + down_read(&current->mm->mmap_sem);
5679 + call_dl_resolve = current->mm->call_dl_resolve;
5680 + up_read(&current->mm->mmap_sem);
5681 + if (likely(call_dl_resolve))
5682 + goto emulate;
5683 +
5684 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5685 +
5686 + down_write(&current->mm->mmap_sem);
5687 + if (current->mm->call_dl_resolve) {
5688 + call_dl_resolve = current->mm->call_dl_resolve;
5689 + up_write(&current->mm->mmap_sem);
5690 + if (vma)
5691 + kmem_cache_free(vm_area_cachep, vma);
5692 + goto emulate;
5693 + }
5694 +
5695 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5696 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5697 + up_write(&current->mm->mmap_sem);
5698 + if (vma)
5699 + kmem_cache_free(vm_area_cachep, vma);
5700 + return 1;
5701 + }
5702 +
5703 + if (pax_insert_vma(vma, call_dl_resolve)) {
5704 + up_write(&current->mm->mmap_sem);
5705 + kmem_cache_free(vm_area_cachep, vma);
5706 + return 1;
5707 + }
5708 +
5709 + current->mm->call_dl_resolve = call_dl_resolve;
5710 + up_write(&current->mm->mmap_sem);
5711 +
5712 +emulate:
5713 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5714 + regs->tpc = call_dl_resolve;
5715 + regs->tnpc = addr+4;
5716 + return 3;
5717 + }
5718 +#endif
5719 +
5720 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5721 + if ((save & 0xFFC00000U) == 0x05000000U &&
5722 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5723 + nop == 0x01000000U)
5724 + {
5725 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5726 + regs->u_regs[UREG_G2] = addr + 4;
5727 + addr = (save & 0x003FFFFFU) << 10;
5728 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5729 +
5730 + if (test_thread_flag(TIF_32BIT))
5731 + addr &= 0xFFFFFFFFUL;
5732 +
5733 + regs->tpc = addr;
5734 + regs->tnpc = addr+4;
5735 + return 3;
5736 + }
5737 +
5738 + /* PaX: 64-bit PLT stub */
5739 + err = get_user(sethi1, (unsigned int *)addr);
5740 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5741 + err |= get_user(or1, (unsigned int *)(addr+8));
5742 + err |= get_user(or2, (unsigned int *)(addr+12));
5743 + err |= get_user(sllx, (unsigned int *)(addr+16));
5744 + err |= get_user(add, (unsigned int *)(addr+20));
5745 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5746 + err |= get_user(nop, (unsigned int *)(addr+28));
5747 + if (err)
5748 + break;
5749 +
5750 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5751 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5752 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5753 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5754 + sllx == 0x89293020U &&
5755 + add == 0x8A010005U &&
5756 + jmpl == 0x89C14000U &&
5757 + nop == 0x01000000U)
5758 + {
5759 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5760 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5761 + regs->u_regs[UREG_G4] <<= 32;
5762 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5763 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5764 + regs->u_regs[UREG_G4] = addr + 24;
5765 + addr = regs->u_regs[UREG_G5];
5766 + regs->tpc = addr;
5767 + regs->tnpc = addr+4;
5768 + return 3;
5769 + }
5770 + }
5771 + } while (0);
5772 +
5773 +#ifdef CONFIG_PAX_DLRESOLVE
5774 + do { /* PaX: unpatched PLT emulation step 2 */
5775 + unsigned int save, call, nop;
5776 +
5777 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5778 + err |= get_user(call, (unsigned int *)regs->tpc);
5779 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5780 + if (err)
5781 + break;
5782 +
5783 + if (save == 0x9DE3BFA8U &&
5784 + (call & 0xC0000000U) == 0x40000000U &&
5785 + nop == 0x01000000U)
5786 + {
5787 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5788 +
5789 + if (test_thread_flag(TIF_32BIT))
5790 + dl_resolve &= 0xFFFFFFFFUL;
5791 +
5792 + regs->u_regs[UREG_RETPC] = regs->tpc;
5793 + regs->tpc = dl_resolve;
5794 + regs->tnpc = dl_resolve+4;
5795 + return 3;
5796 + }
5797 + } while (0);
5798 +#endif
5799 +
5800 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5801 + unsigned int sethi, ba, nop;
5802 +
5803 + err = get_user(sethi, (unsigned int *)regs->tpc);
5804 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5805 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5806 +
5807 + if (err)
5808 + break;
5809 +
5810 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5811 + (ba & 0xFFF00000U) == 0x30600000U &&
5812 + nop == 0x01000000U)
5813 + {
5814 + unsigned long addr;
5815 +
5816 + addr = (sethi & 0x003FFFFFU) << 10;
5817 + regs->u_regs[UREG_G1] = addr;
5818 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5819 +
5820 + if (test_thread_flag(TIF_32BIT))
5821 + addr &= 0xFFFFFFFFUL;
5822 +
5823 + regs->tpc = addr;
5824 + regs->tnpc = addr+4;
5825 + return 2;
5826 + }
5827 + } while (0);
5828 +
5829 +#endif
5830 +
5831 + return 1;
5832 +}
5833 +
5834 +void pax_report_insns(void *pc, void *sp)
5835 +{
5836 + unsigned long i;
5837 +
5838 + printk(KERN_ERR "PAX: bytes at PC: ");
5839 + for (i = 0; i < 8; i++) {
5840 + unsigned int c;
5841 + if (get_user(c, (unsigned int *)pc+i))
5842 + printk(KERN_CONT "???????? ");
5843 + else
5844 + printk(KERN_CONT "%08x ", c);
5845 + }
5846 + printk("\n");
5847 +}
5848 +#endif
5849 +
5850 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5851 {
5852 struct mm_struct *mm = current->mm;
5853 @@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fau
5854 if (!vma)
5855 goto bad_area;
5856
5857 +#ifdef CONFIG_PAX_PAGEEXEC
5858 + /* PaX: detect ITLB misses on non-exec pages */
5859 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5860 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5861 + {
5862 + if (address != regs->tpc)
5863 + goto good_area;
5864 +
5865 + up_read(&mm->mmap_sem);
5866 + switch (pax_handle_fetch_fault(regs)) {
5867 +
5868 +#ifdef CONFIG_PAX_EMUPLT
5869 + case 2:
5870 + case 3:
5871 + return;
5872 +#endif
5873 +
5874 + }
5875 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5876 + do_group_exit(SIGKILL);
5877 + }
5878 +#endif
5879 +
5880 /* Pure DTLB misses do not tell us whether the fault causing
5881 * load/store/atomic was a write or not, it only says that there
5882 * was no match. So in such a case we (carefully) read the
5883 diff -urNp linux-2.6.32.42/arch/sparc/mm/hugetlbpage.c linux-2.6.32.42/arch/sparc/mm/hugetlbpage.c
5884 --- linux-2.6.32.42/arch/sparc/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
5885 +++ linux-2.6.32.42/arch/sparc/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
5886 @@ -69,7 +69,7 @@ full_search:
5887 }
5888 return -ENOMEM;
5889 }
5890 - if (likely(!vma || addr + len <= vma->vm_start)) {
5891 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5892 /*
5893 * Remember the place where we stopped the search:
5894 */
5895 @@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
5896 /* make sure it can fit in the remaining address space */
5897 if (likely(addr > len)) {
5898 vma = find_vma(mm, addr-len);
5899 - if (!vma || addr <= vma->vm_start) {
5900 + if (check_heap_stack_gap(vma, addr - len, len)) {
5901 /* remember the address as a hint for next time */
5902 return (mm->free_area_cache = addr-len);
5903 }
5904 @@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct
5905 if (unlikely(mm->mmap_base < len))
5906 goto bottomup;
5907
5908 - addr = (mm->mmap_base-len) & HPAGE_MASK;
5909 + addr = mm->mmap_base - len;
5910
5911 do {
5912 + addr &= HPAGE_MASK;
5913 /*
5914 * Lookup failure means no vma is above this address,
5915 * else if new region fits below vma->vm_start,
5916 * return with success:
5917 */
5918 vma = find_vma(mm, addr);
5919 - if (likely(!vma || addr+len <= vma->vm_start)) {
5920 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5921 /* remember the address as a hint for next time */
5922 return (mm->free_area_cache = addr);
5923 }
5924 @@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct
5925 mm->cached_hole_size = vma->vm_start - addr;
5926
5927 /* try just below the current vma->vm_start */
5928 - addr = (vma->vm_start-len) & HPAGE_MASK;
5929 - } while (likely(len < vma->vm_start));
5930 + addr = skip_heap_stack_gap(vma, len);
5931 + } while (!IS_ERR_VALUE(addr));
5932
5933 bottomup:
5934 /*
5935 @@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *f
5936 if (addr) {
5937 addr = ALIGN(addr, HPAGE_SIZE);
5938 vma = find_vma(mm, addr);
5939 - if (task_size - len >= addr &&
5940 - (!vma || addr + len <= vma->vm_start))
5941 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5942 return addr;
5943 }
5944 if (mm->get_unmapped_area == arch_get_unmapped_area)
5945 diff -urNp linux-2.6.32.42/arch/sparc/mm/init_32.c linux-2.6.32.42/arch/sparc/mm/init_32.c
5946 --- linux-2.6.32.42/arch/sparc/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
5947 +++ linux-2.6.32.42/arch/sparc/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
5948 @@ -317,6 +317,9 @@ extern void device_scan(void);
5949 pgprot_t PAGE_SHARED __read_mostly;
5950 EXPORT_SYMBOL(PAGE_SHARED);
5951
5952 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5953 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5954 +
5955 void __init paging_init(void)
5956 {
5957 switch(sparc_cpu_model) {
5958 @@ -345,17 +348,17 @@ void __init paging_init(void)
5959
5960 /* Initialize the protection map with non-constant, MMU dependent values. */
5961 protection_map[0] = PAGE_NONE;
5962 - protection_map[1] = PAGE_READONLY;
5963 - protection_map[2] = PAGE_COPY;
5964 - protection_map[3] = PAGE_COPY;
5965 + protection_map[1] = PAGE_READONLY_NOEXEC;
5966 + protection_map[2] = PAGE_COPY_NOEXEC;
5967 + protection_map[3] = PAGE_COPY_NOEXEC;
5968 protection_map[4] = PAGE_READONLY;
5969 protection_map[5] = PAGE_READONLY;
5970 protection_map[6] = PAGE_COPY;
5971 protection_map[7] = PAGE_COPY;
5972 protection_map[8] = PAGE_NONE;
5973 - protection_map[9] = PAGE_READONLY;
5974 - protection_map[10] = PAGE_SHARED;
5975 - protection_map[11] = PAGE_SHARED;
5976 + protection_map[9] = PAGE_READONLY_NOEXEC;
5977 + protection_map[10] = PAGE_SHARED_NOEXEC;
5978 + protection_map[11] = PAGE_SHARED_NOEXEC;
5979 protection_map[12] = PAGE_READONLY;
5980 protection_map[13] = PAGE_READONLY;
5981 protection_map[14] = PAGE_SHARED;
5982 diff -urNp linux-2.6.32.42/arch/sparc/mm/Makefile linux-2.6.32.42/arch/sparc/mm/Makefile
5983 --- linux-2.6.32.42/arch/sparc/mm/Makefile 2011-03-27 14:31:47.000000000 -0400
5984 +++ linux-2.6.32.42/arch/sparc/mm/Makefile 2011-04-17 15:56:46.000000000 -0400
5985 @@ -2,7 +2,7 @@
5986 #
5987
5988 asflags-y := -ansi
5989 -ccflags-y := -Werror
5990 +#ccflags-y := -Werror
5991
5992 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5993 obj-y += fault_$(BITS).o
5994 diff -urNp linux-2.6.32.42/arch/sparc/mm/srmmu.c linux-2.6.32.42/arch/sparc/mm/srmmu.c
5995 --- linux-2.6.32.42/arch/sparc/mm/srmmu.c 2011-03-27 14:31:47.000000000 -0400
5996 +++ linux-2.6.32.42/arch/sparc/mm/srmmu.c 2011-04-17 15:56:46.000000000 -0400
5997 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5998 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5999 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6000 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6001 +
6002 +#ifdef CONFIG_PAX_PAGEEXEC
6003 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6004 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6005 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6006 +#endif
6007 +
6008 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6009 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6010
6011 diff -urNp linux-2.6.32.42/arch/um/include/asm/kmap_types.h linux-2.6.32.42/arch/um/include/asm/kmap_types.h
6012 --- linux-2.6.32.42/arch/um/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
6013 +++ linux-2.6.32.42/arch/um/include/asm/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
6014 @@ -23,6 +23,7 @@ enum km_type {
6015 KM_IRQ1,
6016 KM_SOFTIRQ0,
6017 KM_SOFTIRQ1,
6018 + KM_CLEARPAGE,
6019 KM_TYPE_NR
6020 };
6021
6022 diff -urNp linux-2.6.32.42/arch/um/include/asm/page.h linux-2.6.32.42/arch/um/include/asm/page.h
6023 --- linux-2.6.32.42/arch/um/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
6024 +++ linux-2.6.32.42/arch/um/include/asm/page.h 2011-04-17 15:56:46.000000000 -0400
6025 @@ -14,6 +14,9 @@
6026 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6027 #define PAGE_MASK (~(PAGE_SIZE-1))
6028
6029 +#define ktla_ktva(addr) (addr)
6030 +#define ktva_ktla(addr) (addr)
6031 +
6032 #ifndef __ASSEMBLY__
6033
6034 struct page;
6035 diff -urNp linux-2.6.32.42/arch/um/kernel/process.c linux-2.6.32.42/arch/um/kernel/process.c
6036 --- linux-2.6.32.42/arch/um/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
6037 +++ linux-2.6.32.42/arch/um/kernel/process.c 2011-04-17 15:56:46.000000000 -0400
6038 @@ -393,22 +393,6 @@ int singlestepping(void * t)
6039 return 2;
6040 }
6041
6042 -/*
6043 - * Only x86 and x86_64 have an arch_align_stack().
6044 - * All other arches have "#define arch_align_stack(x) (x)"
6045 - * in their asm/system.h
6046 - * As this is included in UML from asm-um/system-generic.h,
6047 - * we can use it to behave as the subarch does.
6048 - */
6049 -#ifndef arch_align_stack
6050 -unsigned long arch_align_stack(unsigned long sp)
6051 -{
6052 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6053 - sp -= get_random_int() % 8192;
6054 - return sp & ~0xf;
6055 -}
6056 -#endif
6057 -
6058 unsigned long get_wchan(struct task_struct *p)
6059 {
6060 unsigned long stack_page, sp, ip;
6061 diff -urNp linux-2.6.32.42/arch/um/sys-i386/syscalls.c linux-2.6.32.42/arch/um/sys-i386/syscalls.c
6062 --- linux-2.6.32.42/arch/um/sys-i386/syscalls.c 2011-03-27 14:31:47.000000000 -0400
6063 +++ linux-2.6.32.42/arch/um/sys-i386/syscalls.c 2011-04-17 15:56:46.000000000 -0400
6064 @@ -11,6 +11,21 @@
6065 #include "asm/uaccess.h"
6066 #include "asm/unistd.h"
6067
6068 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6069 +{
6070 + unsigned long pax_task_size = TASK_SIZE;
6071 +
6072 +#ifdef CONFIG_PAX_SEGMEXEC
6073 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6074 + pax_task_size = SEGMEXEC_TASK_SIZE;
6075 +#endif
6076 +
6077 + if (len > pax_task_size || addr > pax_task_size - len)
6078 + return -EINVAL;
6079 +
6080 + return 0;
6081 +}
6082 +
6083 /*
6084 * Perform the select(nd, in, out, ex, tv) and mmap() system
6085 * calls. Linux/i386 didn't use to be able to handle more than
6086 diff -urNp linux-2.6.32.42/arch/x86/boot/bitops.h linux-2.6.32.42/arch/x86/boot/bitops.h
6087 --- linux-2.6.32.42/arch/x86/boot/bitops.h 2011-03-27 14:31:47.000000000 -0400
6088 +++ linux-2.6.32.42/arch/x86/boot/bitops.h 2011-04-17 15:56:46.000000000 -0400
6089 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
6090 u8 v;
6091 const u32 *p = (const u32 *)addr;
6092
6093 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6094 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6095 return v;
6096 }
6097
6098 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
6099
6100 static inline void set_bit(int nr, void *addr)
6101 {
6102 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6103 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6104 }
6105
6106 #endif /* BOOT_BITOPS_H */
6107 diff -urNp linux-2.6.32.42/arch/x86/boot/boot.h linux-2.6.32.42/arch/x86/boot/boot.h
6108 --- linux-2.6.32.42/arch/x86/boot/boot.h 2011-03-27 14:31:47.000000000 -0400
6109 +++ linux-2.6.32.42/arch/x86/boot/boot.h 2011-04-17 15:56:46.000000000 -0400
6110 @@ -82,7 +82,7 @@ static inline void io_delay(void)
6111 static inline u16 ds(void)
6112 {
6113 u16 seg;
6114 - asm("movw %%ds,%0" : "=rm" (seg));
6115 + asm volatile("movw %%ds,%0" : "=rm" (seg));
6116 return seg;
6117 }
6118
6119 @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
6120 static inline int memcmp(const void *s1, const void *s2, size_t len)
6121 {
6122 u8 diff;
6123 - asm("repe; cmpsb; setnz %0"
6124 + asm volatile("repe; cmpsb; setnz %0"
6125 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
6126 return diff;
6127 }
6128 diff -urNp linux-2.6.32.42/arch/x86/boot/compressed/head_32.S linux-2.6.32.42/arch/x86/boot/compressed/head_32.S
6129 --- linux-2.6.32.42/arch/x86/boot/compressed/head_32.S 2011-03-27 14:31:47.000000000 -0400
6130 +++ linux-2.6.32.42/arch/x86/boot/compressed/head_32.S 2011-04-17 15:56:46.000000000 -0400
6131 @@ -76,7 +76,7 @@ ENTRY(startup_32)
6132 notl %eax
6133 andl %eax, %ebx
6134 #else
6135 - movl $LOAD_PHYSICAL_ADDR, %ebx
6136 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6137 #endif
6138
6139 /* Target address to relocate to for decompression */
6140 @@ -149,7 +149,7 @@ relocated:
6141 * and where it was actually loaded.
6142 */
6143 movl %ebp, %ebx
6144 - subl $LOAD_PHYSICAL_ADDR, %ebx
6145 + subl $____LOAD_PHYSICAL_ADDR, %ebx
6146 jz 2f /* Nothing to be done if loaded at compiled addr. */
6147 /*
6148 * Process relocations.
6149 @@ -157,8 +157,7 @@ relocated:
6150
6151 1: subl $4, %edi
6152 movl (%edi), %ecx
6153 - testl %ecx, %ecx
6154 - jz 2f
6155 + jecxz 2f
6156 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
6157 jmp 1b
6158 2:
6159 diff -urNp linux-2.6.32.42/arch/x86/boot/compressed/head_64.S linux-2.6.32.42/arch/x86/boot/compressed/head_64.S
6160 --- linux-2.6.32.42/arch/x86/boot/compressed/head_64.S 2011-03-27 14:31:47.000000000 -0400
6161 +++ linux-2.6.32.42/arch/x86/boot/compressed/head_64.S 2011-04-17 15:56:46.000000000 -0400
6162 @@ -91,7 +91,7 @@ ENTRY(startup_32)
6163 notl %eax
6164 andl %eax, %ebx
6165 #else
6166 - movl $LOAD_PHYSICAL_ADDR, %ebx
6167 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6168 #endif
6169
6170 /* Target address to relocate to for decompression */
6171 @@ -234,7 +234,7 @@ ENTRY(startup_64)
6172 notq %rax
6173 andq %rax, %rbp
6174 #else
6175 - movq $LOAD_PHYSICAL_ADDR, %rbp
6176 + movq $____LOAD_PHYSICAL_ADDR, %rbp
6177 #endif
6178
6179 /* Target address to relocate to for decompression */
6180 diff -urNp linux-2.6.32.42/arch/x86/boot/compressed/misc.c linux-2.6.32.42/arch/x86/boot/compressed/misc.c
6181 --- linux-2.6.32.42/arch/x86/boot/compressed/misc.c 2011-03-27 14:31:47.000000000 -0400
6182 +++ linux-2.6.32.42/arch/x86/boot/compressed/misc.c 2011-04-17 15:56:46.000000000 -0400
6183 @@ -288,7 +288,7 @@ static void parse_elf(void *output)
6184 case PT_LOAD:
6185 #ifdef CONFIG_RELOCATABLE
6186 dest = output;
6187 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
6188 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
6189 #else
6190 dest = (void *)(phdr->p_paddr);
6191 #endif
6192 @@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *
6193 error("Destination address too large");
6194 #endif
6195 #ifndef CONFIG_RELOCATABLE
6196 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
6197 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
6198 error("Wrong destination address");
6199 #endif
6200
6201 diff -urNp linux-2.6.32.42/arch/x86/boot/compressed/mkpiggy.c linux-2.6.32.42/arch/x86/boot/compressed/mkpiggy.c
6202 --- linux-2.6.32.42/arch/x86/boot/compressed/mkpiggy.c 2011-03-27 14:31:47.000000000 -0400
6203 +++ linux-2.6.32.42/arch/x86/boot/compressed/mkpiggy.c 2011-04-17 15:56:46.000000000 -0400
6204 @@ -74,7 +74,7 @@ int main(int argc, char *argv[])
6205
6206 offs = (olen > ilen) ? olen - ilen : 0;
6207 offs += olen >> 12; /* Add 8 bytes for each 32K block */
6208 - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
6209 + offs += 64*1024; /* Add 64K bytes slack */
6210 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
6211
6212 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
6213 diff -urNp linux-2.6.32.42/arch/x86/boot/compressed/relocs.c linux-2.6.32.42/arch/x86/boot/compressed/relocs.c
6214 --- linux-2.6.32.42/arch/x86/boot/compressed/relocs.c 2011-03-27 14:31:47.000000000 -0400
6215 +++ linux-2.6.32.42/arch/x86/boot/compressed/relocs.c 2011-04-17 15:56:46.000000000 -0400
6216 @@ -10,8 +10,11 @@
6217 #define USE_BSD
6218 #include <endian.h>
6219
6220 +#include "../../../../include/linux/autoconf.h"
6221 +
6222 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
6223 static Elf32_Ehdr ehdr;
6224 +static Elf32_Phdr *phdr;
6225 static unsigned long reloc_count, reloc_idx;
6226 static unsigned long *relocs;
6227
6228 @@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
6229
6230 static int is_safe_abs_reloc(const char* sym_name)
6231 {
6232 - int i;
6233 + unsigned int i;
6234
6235 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
6236 if (!strcmp(sym_name, safe_abs_relocs[i]))
6237 @@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
6238 }
6239 }
6240
6241 +static void read_phdrs(FILE *fp)
6242 +{
6243 + unsigned int i;
6244 +
6245 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6246 + if (!phdr) {
6247 + die("Unable to allocate %d program headers\n",
6248 + ehdr.e_phnum);
6249 + }
6250 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6251 + die("Seek to %d failed: %s\n",
6252 + ehdr.e_phoff, strerror(errno));
6253 + }
6254 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6255 + die("Cannot read ELF program headers: %s\n",
6256 + strerror(errno));
6257 + }
6258 + for(i = 0; i < ehdr.e_phnum; i++) {
6259 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6260 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6261 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6262 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6263 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6264 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6265 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6266 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6267 + }
6268 +
6269 +}
6270 +
6271 static void read_shdrs(FILE *fp)
6272 {
6273 - int i;
6274 + unsigned int i;
6275 Elf32_Shdr shdr;
6276
6277 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6278 @@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
6279
6280 static void read_strtabs(FILE *fp)
6281 {
6282 - int i;
6283 + unsigned int i;
6284 for (i = 0; i < ehdr.e_shnum; i++) {
6285 struct section *sec = &secs[i];
6286 if (sec->shdr.sh_type != SHT_STRTAB) {
6287 @@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
6288
6289 static void read_symtabs(FILE *fp)
6290 {
6291 - int i,j;
6292 + unsigned int i,j;
6293 for (i = 0; i < ehdr.e_shnum; i++) {
6294 struct section *sec = &secs[i];
6295 if (sec->shdr.sh_type != SHT_SYMTAB) {
6296 @@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
6297
6298 static void read_relocs(FILE *fp)
6299 {
6300 - int i,j;
6301 + unsigned int i,j;
6302 + uint32_t base;
6303 +
6304 for (i = 0; i < ehdr.e_shnum; i++) {
6305 struct section *sec = &secs[i];
6306 if (sec->shdr.sh_type != SHT_REL) {
6307 @@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
6308 die("Cannot read symbol table: %s\n",
6309 strerror(errno));
6310 }
6311 + base = 0;
6312 + for (j = 0; j < ehdr.e_phnum; j++) {
6313 + if (phdr[j].p_type != PT_LOAD )
6314 + continue;
6315 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6316 + continue;
6317 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6318 + break;
6319 + }
6320 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6321 Elf32_Rel *rel = &sec->reltab[j];
6322 - rel->r_offset = elf32_to_cpu(rel->r_offset);
6323 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6324 rel->r_info = elf32_to_cpu(rel->r_info);
6325 }
6326 }
6327 @@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
6328
6329 static void print_absolute_symbols(void)
6330 {
6331 - int i;
6332 + unsigned int i;
6333 printf("Absolute symbols\n");
6334 printf(" Num: Value Size Type Bind Visibility Name\n");
6335 for (i = 0; i < ehdr.e_shnum; i++) {
6336 struct section *sec = &secs[i];
6337 char *sym_strtab;
6338 Elf32_Sym *sh_symtab;
6339 - int j;
6340 + unsigned int j;
6341
6342 if (sec->shdr.sh_type != SHT_SYMTAB) {
6343 continue;
6344 @@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
6345
6346 static void print_absolute_relocs(void)
6347 {
6348 - int i, printed = 0;
6349 + unsigned int i, printed = 0;
6350
6351 for (i = 0; i < ehdr.e_shnum; i++) {
6352 struct section *sec = &secs[i];
6353 struct section *sec_applies, *sec_symtab;
6354 char *sym_strtab;
6355 Elf32_Sym *sh_symtab;
6356 - int j;
6357 + unsigned int j;
6358 if (sec->shdr.sh_type != SHT_REL) {
6359 continue;
6360 }
6361 @@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
6362
6363 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6364 {
6365 - int i;
6366 + unsigned int i;
6367 /* Walk through the relocations */
6368 for (i = 0; i < ehdr.e_shnum; i++) {
6369 char *sym_strtab;
6370 Elf32_Sym *sh_symtab;
6371 struct section *sec_applies, *sec_symtab;
6372 - int j;
6373 + unsigned int j;
6374 struct section *sec = &secs[i];
6375
6376 if (sec->shdr.sh_type != SHT_REL) {
6377 @@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(El
6378 if (sym->st_shndx == SHN_ABS) {
6379 continue;
6380 }
6381 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6382 + if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6383 + continue;
6384 +
6385 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6386 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6387 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6388 + continue;
6389 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6390 + continue;
6391 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6392 + continue;
6393 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6394 + continue;
6395 +#endif
6396 if (r_type == R_386_NONE || r_type == R_386_PC32) {
6397 /*
6398 * NONE can be ignored and and PC relative
6399 @@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, co
6400
6401 static void emit_relocs(int as_text)
6402 {
6403 - int i;
6404 + unsigned int i;
6405 /* Count how many relocations I have and allocate space for them. */
6406 reloc_count = 0;
6407 walk_relocs(count_reloc);
6408 @@ -634,6 +693,7 @@ int main(int argc, char **argv)
6409 fname, strerror(errno));
6410 }
6411 read_ehdr(fp);
6412 + read_phdrs(fp);
6413 read_shdrs(fp);
6414 read_strtabs(fp);
6415 read_symtabs(fp);
6416 diff -urNp linux-2.6.32.42/arch/x86/boot/cpucheck.c linux-2.6.32.42/arch/x86/boot/cpucheck.c
6417 --- linux-2.6.32.42/arch/x86/boot/cpucheck.c 2011-03-27 14:31:47.000000000 -0400
6418 +++ linux-2.6.32.42/arch/x86/boot/cpucheck.c 2011-04-17 15:56:46.000000000 -0400
6419 @@ -74,7 +74,7 @@ static int has_fpu(void)
6420 u16 fcw = -1, fsw = -1;
6421 u32 cr0;
6422
6423 - asm("movl %%cr0,%0" : "=r" (cr0));
6424 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
6425 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6426 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6427 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6428 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6429 {
6430 u32 f0, f1;
6431
6432 - asm("pushfl ; "
6433 + asm volatile("pushfl ; "
6434 "pushfl ; "
6435 "popl %0 ; "
6436 "movl %0,%1 ; "
6437 @@ -115,7 +115,7 @@ static void get_flags(void)
6438 set_bit(X86_FEATURE_FPU, cpu.flags);
6439
6440 if (has_eflag(X86_EFLAGS_ID)) {
6441 - asm("cpuid"
6442 + asm volatile("cpuid"
6443 : "=a" (max_intel_level),
6444 "=b" (cpu_vendor[0]),
6445 "=d" (cpu_vendor[1]),
6446 @@ -124,7 +124,7 @@ static void get_flags(void)
6447
6448 if (max_intel_level >= 0x00000001 &&
6449 max_intel_level <= 0x0000ffff) {
6450 - asm("cpuid"
6451 + asm volatile("cpuid"
6452 : "=a" (tfms),
6453 "=c" (cpu.flags[4]),
6454 "=d" (cpu.flags[0])
6455 @@ -136,7 +136,7 @@ static void get_flags(void)
6456 cpu.model += ((tfms >> 16) & 0xf) << 4;
6457 }
6458
6459 - asm("cpuid"
6460 + asm volatile("cpuid"
6461 : "=a" (max_amd_level)
6462 : "a" (0x80000000)
6463 : "ebx", "ecx", "edx");
6464 @@ -144,7 +144,7 @@ static void get_flags(void)
6465 if (max_amd_level >= 0x80000001 &&
6466 max_amd_level <= 0x8000ffff) {
6467 u32 eax = 0x80000001;
6468 - asm("cpuid"
6469 + asm volatile("cpuid"
6470 : "+a" (eax),
6471 "=c" (cpu.flags[6]),
6472 "=d" (cpu.flags[1])
6473 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6474 u32 ecx = MSR_K7_HWCR;
6475 u32 eax, edx;
6476
6477 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6478 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6479 eax &= ~(1 << 15);
6480 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6481 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6482
6483 get_flags(); /* Make sure it really did something */
6484 err = check_flags();
6485 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6486 u32 ecx = MSR_VIA_FCR;
6487 u32 eax, edx;
6488
6489 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6490 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6491 eax |= (1<<1)|(1<<7);
6492 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6493 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6494
6495 set_bit(X86_FEATURE_CX8, cpu.flags);
6496 err = check_flags();
6497 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
6498 u32 eax, edx;
6499 u32 level = 1;
6500
6501 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6502 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6503 - asm("cpuid"
6504 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6505 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6506 + asm volatile("cpuid"
6507 : "+a" (level), "=d" (cpu.flags[0])
6508 : : "ecx", "ebx");
6509 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6510 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6511
6512 err = check_flags();
6513 }
6514 diff -urNp linux-2.6.32.42/arch/x86/boot/header.S linux-2.6.32.42/arch/x86/boot/header.S
6515 --- linux-2.6.32.42/arch/x86/boot/header.S 2011-03-27 14:31:47.000000000 -0400
6516 +++ linux-2.6.32.42/arch/x86/boot/header.S 2011-04-17 15:56:46.000000000 -0400
6517 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
6518 # single linked list of
6519 # struct setup_data
6520
6521 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6522 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6523
6524 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6525 #define VO_INIT_SIZE (VO__end - VO__text)
6526 diff -urNp linux-2.6.32.42/arch/x86/boot/memory.c linux-2.6.32.42/arch/x86/boot/memory.c
6527 --- linux-2.6.32.42/arch/x86/boot/memory.c 2011-03-27 14:31:47.000000000 -0400
6528 +++ linux-2.6.32.42/arch/x86/boot/memory.c 2011-04-17 15:56:46.000000000 -0400
6529 @@ -19,7 +19,7 @@
6530
6531 static int detect_memory_e820(void)
6532 {
6533 - int count = 0;
6534 + unsigned int count = 0;
6535 struct biosregs ireg, oreg;
6536 struct e820entry *desc = boot_params.e820_map;
6537 static struct e820entry buf; /* static so it is zeroed */
6538 diff -urNp linux-2.6.32.42/arch/x86/boot/video.c linux-2.6.32.42/arch/x86/boot/video.c
6539 --- linux-2.6.32.42/arch/x86/boot/video.c 2011-03-27 14:31:47.000000000 -0400
6540 +++ linux-2.6.32.42/arch/x86/boot/video.c 2011-04-17 15:56:46.000000000 -0400
6541 @@ -90,7 +90,7 @@ static void store_mode_params(void)
6542 static unsigned int get_entry(void)
6543 {
6544 char entry_buf[4];
6545 - int i, len = 0;
6546 + unsigned int i, len = 0;
6547 int key;
6548 unsigned int v;
6549
6550 diff -urNp linux-2.6.32.42/arch/x86/boot/video-vesa.c linux-2.6.32.42/arch/x86/boot/video-vesa.c
6551 --- linux-2.6.32.42/arch/x86/boot/video-vesa.c 2011-03-27 14:31:47.000000000 -0400
6552 +++ linux-2.6.32.42/arch/x86/boot/video-vesa.c 2011-04-17 15:56:46.000000000 -0400
6553 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6554
6555 boot_params.screen_info.vesapm_seg = oreg.es;
6556 boot_params.screen_info.vesapm_off = oreg.di;
6557 + boot_params.screen_info.vesapm_size = oreg.cx;
6558 }
6559
6560 /*
6561 diff -urNp linux-2.6.32.42/arch/x86/ia32/ia32_aout.c linux-2.6.32.42/arch/x86/ia32/ia32_aout.c
6562 --- linux-2.6.32.42/arch/x86/ia32/ia32_aout.c 2011-03-27 14:31:47.000000000 -0400
6563 +++ linux-2.6.32.42/arch/x86/ia32/ia32_aout.c 2011-04-17 15:56:46.000000000 -0400
6564 @@ -169,6 +169,8 @@ static int aout_core_dump(long signr, st
6565 unsigned long dump_start, dump_size;
6566 struct user32 dump;
6567
6568 + memset(&dump, 0, sizeof(dump));
6569 +
6570 fs = get_fs();
6571 set_fs(KERNEL_DS);
6572 has_dumped = 1;
6573 @@ -218,12 +220,6 @@ static int aout_core_dump(long signr, st
6574 dump_size = dump.u_ssize << PAGE_SHIFT;
6575 DUMP_WRITE(dump_start, dump_size);
6576 }
6577 - /*
6578 - * Finally dump the task struct. Not be used by gdb, but
6579 - * could be useful
6580 - */
6581 - set_fs(KERNEL_DS);
6582 - DUMP_WRITE(current, sizeof(*current));
6583 end_coredump:
6584 set_fs(fs);
6585 return has_dumped;
6586 diff -urNp linux-2.6.32.42/arch/x86/ia32/ia32entry.S linux-2.6.32.42/arch/x86/ia32/ia32entry.S
6587 --- linux-2.6.32.42/arch/x86/ia32/ia32entry.S 2011-03-27 14:31:47.000000000 -0400
6588 +++ linux-2.6.32.42/arch/x86/ia32/ia32entry.S 2011-06-04 20:29:52.000000000 -0400
6589 @@ -13,6 +13,7 @@
6590 #include <asm/thread_info.h>
6591 #include <asm/segment.h>
6592 #include <asm/irqflags.h>
6593 +#include <asm/pgtable.h>
6594 #include <linux/linkage.h>
6595
6596 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6597 @@ -93,6 +94,30 @@ ENTRY(native_irq_enable_sysexit)
6598 ENDPROC(native_irq_enable_sysexit)
6599 #endif
6600
6601 + .macro pax_enter_kernel_user
6602 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6603 + call pax_enter_kernel_user
6604 +#endif
6605 + .endm
6606 +
6607 + .macro pax_exit_kernel_user
6608 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6609 + call pax_exit_kernel_user
6610 +#endif
6611 +#ifdef CONFIG_PAX_RANDKSTACK
6612 + pushq %rax
6613 + call pax_randomize_kstack
6614 + popq %rax
6615 +#endif
6616 + pax_erase_kstack
6617 + .endm
6618 +
6619 +.macro pax_erase_kstack
6620 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6621 + call pax_erase_kstack
6622 +#endif
6623 +.endm
6624 +
6625 /*
6626 * 32bit SYSENTER instruction entry.
6627 *
6628 @@ -119,7 +144,7 @@ ENTRY(ia32_sysenter_target)
6629 CFI_REGISTER rsp,rbp
6630 SWAPGS_UNSAFE_STACK
6631 movq PER_CPU_VAR(kernel_stack), %rsp
6632 - addq $(KERNEL_STACK_OFFSET),%rsp
6633 + pax_enter_kernel_user
6634 /*
6635 * No need to follow this irqs on/off section: the syscall
6636 * disabled irqs, here we enable it straight after entry:
6637 @@ -135,7 +160,8 @@ ENTRY(ia32_sysenter_target)
6638 pushfq
6639 CFI_ADJUST_CFA_OFFSET 8
6640 /*CFI_REL_OFFSET rflags,0*/
6641 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6642 + GET_THREAD_INFO(%r10)
6643 + movl TI_sysenter_return(%r10), %r10d
6644 CFI_REGISTER rip,r10
6645 pushq $__USER32_CS
6646 CFI_ADJUST_CFA_OFFSET 8
6647 @@ -150,6 +176,12 @@ ENTRY(ia32_sysenter_target)
6648 SAVE_ARGS 0,0,1
6649 /* no need to do an access_ok check here because rbp has been
6650 32bit zero extended */
6651 +
6652 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6653 + mov $PAX_USER_SHADOW_BASE,%r10
6654 + add %r10,%rbp
6655 +#endif
6656 +
6657 1: movl (%rbp),%ebp
6658 .section __ex_table,"a"
6659 .quad 1b,ia32_badarg
6660 @@ -172,6 +204,7 @@ sysenter_dispatch:
6661 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6662 jnz sysexit_audit
6663 sysexit_from_sys_call:
6664 + pax_exit_kernel_user
6665 andl $~TS_COMPAT,TI_status(%r10)
6666 /* clear IF, that popfq doesn't enable interrupts early */
6667 andl $~0x200,EFLAGS-R11(%rsp)
6668 @@ -200,6 +233,9 @@ sysexit_from_sys_call:
6669 movl %eax,%esi /* 2nd arg: syscall number */
6670 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6671 call audit_syscall_entry
6672 +
6673 + pax_erase_kstack
6674 +
6675 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6676 cmpq $(IA32_NR_syscalls-1),%rax
6677 ja ia32_badsys
6678 @@ -252,6 +288,9 @@ sysenter_tracesys:
6679 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6680 movq %rsp,%rdi /* &pt_regs -> arg1 */
6681 call syscall_trace_enter
6682 +
6683 + pax_erase_kstack
6684 +
6685 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6686 RESTORE_REST
6687 cmpq $(IA32_NR_syscalls-1),%rax
6688 @@ -283,19 +322,24 @@ ENDPROC(ia32_sysenter_target)
6689 ENTRY(ia32_cstar_target)
6690 CFI_STARTPROC32 simple
6691 CFI_SIGNAL_FRAME
6692 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6693 + CFI_DEF_CFA rsp,0
6694 CFI_REGISTER rip,rcx
6695 /*CFI_REGISTER rflags,r11*/
6696 SWAPGS_UNSAFE_STACK
6697 movl %esp,%r8d
6698 CFI_REGISTER rsp,r8
6699 movq PER_CPU_VAR(kernel_stack),%rsp
6700 +
6701 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6702 + pax_enter_kernel_user
6703 +#endif
6704 +
6705 /*
6706 * No need to follow this irqs on/off section: the syscall
6707 * disabled irqs and here we enable it straight after entry:
6708 */
6709 ENABLE_INTERRUPTS(CLBR_NONE)
6710 - SAVE_ARGS 8,1,1
6711 + SAVE_ARGS 8*6,1,1
6712 movl %eax,%eax /* zero extension */
6713 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6714 movq %rcx,RIP-ARGOFFSET(%rsp)
6715 @@ -311,6 +355,12 @@ ENTRY(ia32_cstar_target)
6716 /* no need to do an access_ok check here because r8 has been
6717 32bit zero extended */
6718 /* hardware stack frame is complete now */
6719 +
6720 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6721 + mov $PAX_USER_SHADOW_BASE,%r10
6722 + add %r10,%r8
6723 +#endif
6724 +
6725 1: movl (%r8),%r9d
6726 .section __ex_table,"a"
6727 .quad 1b,ia32_badarg
6728 @@ -333,6 +383,7 @@ cstar_dispatch:
6729 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6730 jnz sysretl_audit
6731 sysretl_from_sys_call:
6732 + pax_exit_kernel_user
6733 andl $~TS_COMPAT,TI_status(%r10)
6734 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6735 movl RIP-ARGOFFSET(%rsp),%ecx
6736 @@ -370,6 +421,9 @@ cstar_tracesys:
6737 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6738 movq %rsp,%rdi /* &pt_regs -> arg1 */
6739 call syscall_trace_enter
6740 +
6741 + pax_erase_kstack
6742 +
6743 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6744 RESTORE_REST
6745 xchgl %ebp,%r9d
6746 @@ -415,6 +469,7 @@ ENTRY(ia32_syscall)
6747 CFI_REL_OFFSET rip,RIP-RIP
6748 PARAVIRT_ADJUST_EXCEPTION_FRAME
6749 SWAPGS
6750 + pax_enter_kernel_user
6751 /*
6752 * No need to follow this irqs on/off section: the syscall
6753 * disabled irqs and here we enable it straight after entry:
6754 @@ -448,6 +503,9 @@ ia32_tracesys:
6755 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6756 movq %rsp,%rdi /* &pt_regs -> arg1 */
6757 call syscall_trace_enter
6758 +
6759 + pax_erase_kstack
6760 +
6761 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6762 RESTORE_REST
6763 cmpq $(IA32_NR_syscalls-1),%rax
6764 diff -urNp linux-2.6.32.42/arch/x86/ia32/ia32_signal.c linux-2.6.32.42/arch/x86/ia32/ia32_signal.c
6765 --- linux-2.6.32.42/arch/x86/ia32/ia32_signal.c 2011-03-27 14:31:47.000000000 -0400
6766 +++ linux-2.6.32.42/arch/x86/ia32/ia32_signal.c 2011-04-17 15:56:46.000000000 -0400
6767 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
6768 sp -= frame_size;
6769 /* Align the stack pointer according to the i386 ABI,
6770 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6771 - sp = ((sp + 4) & -16ul) - 4;
6772 + sp = ((sp - 12) & -16ul) - 4;
6773 return (void __user *) sp;
6774 }
6775
6776 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
6777 * These are actually not used anymore, but left because some
6778 * gdb versions depend on them as a marker.
6779 */
6780 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6781 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6782 } put_user_catch(err);
6783
6784 if (err)
6785 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
6786 0xb8,
6787 __NR_ia32_rt_sigreturn,
6788 0x80cd,
6789 - 0,
6790 + 0
6791 };
6792
6793 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6794 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
6795
6796 if (ka->sa.sa_flags & SA_RESTORER)
6797 restorer = ka->sa.sa_restorer;
6798 + else if (current->mm->context.vdso)
6799 + /* Return stub is in 32bit vsyscall page */
6800 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6801 else
6802 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6803 - rt_sigreturn);
6804 + restorer = &frame->retcode;
6805 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6806
6807 /*
6808 * Not actually used anymore, but left because some gdb
6809 * versions need it.
6810 */
6811 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6812 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6813 } put_user_catch(err);
6814
6815 if (err)
6816 diff -urNp linux-2.6.32.42/arch/x86/include/asm/alternative.h linux-2.6.32.42/arch/x86/include/asm/alternative.h
6817 --- linux-2.6.32.42/arch/x86/include/asm/alternative.h 2011-03-27 14:31:47.000000000 -0400
6818 +++ linux-2.6.32.42/arch/x86/include/asm/alternative.h 2011-04-17 15:56:46.000000000 -0400
6819 @@ -85,7 +85,7 @@ static inline void alternatives_smp_swit
6820 " .byte 662b-661b\n" /* sourcelen */ \
6821 " .byte 664f-663f\n" /* replacementlen */ \
6822 ".previous\n" \
6823 - ".section .altinstr_replacement, \"ax\"\n" \
6824 + ".section .altinstr_replacement, \"a\"\n" \
6825 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6826 ".previous"
6827
6828 diff -urNp linux-2.6.32.42/arch/x86/include/asm/apm.h linux-2.6.32.42/arch/x86/include/asm/apm.h
6829 --- linux-2.6.32.42/arch/x86/include/asm/apm.h 2011-03-27 14:31:47.000000000 -0400
6830 +++ linux-2.6.32.42/arch/x86/include/asm/apm.h 2011-04-17 15:56:46.000000000 -0400
6831 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6832 __asm__ __volatile__(APM_DO_ZERO_SEGS
6833 "pushl %%edi\n\t"
6834 "pushl %%ebp\n\t"
6835 - "lcall *%%cs:apm_bios_entry\n\t"
6836 + "lcall *%%ss:apm_bios_entry\n\t"
6837 "setc %%al\n\t"
6838 "popl %%ebp\n\t"
6839 "popl %%edi\n\t"
6840 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6841 __asm__ __volatile__(APM_DO_ZERO_SEGS
6842 "pushl %%edi\n\t"
6843 "pushl %%ebp\n\t"
6844 - "lcall *%%cs:apm_bios_entry\n\t"
6845 + "lcall *%%ss:apm_bios_entry\n\t"
6846 "setc %%bl\n\t"
6847 "popl %%ebp\n\t"
6848 "popl %%edi\n\t"
6849 diff -urNp linux-2.6.32.42/arch/x86/include/asm/atomic_32.h linux-2.6.32.42/arch/x86/include/asm/atomic_32.h
6850 --- linux-2.6.32.42/arch/x86/include/asm/atomic_32.h 2011-03-27 14:31:47.000000000 -0400
6851 +++ linux-2.6.32.42/arch/x86/include/asm/atomic_32.h 2011-05-04 17:56:20.000000000 -0400
6852 @@ -25,6 +25,17 @@ static inline int atomic_read(const atom
6853 }
6854
6855 /**
6856 + * atomic_read_unchecked - read atomic variable
6857 + * @v: pointer of type atomic_unchecked_t
6858 + *
6859 + * Atomically reads the value of @v.
6860 + */
6861 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6862 +{
6863 + return v->counter;
6864 +}
6865 +
6866 +/**
6867 * atomic_set - set atomic variable
6868 * @v: pointer of type atomic_t
6869 * @i: required value
6870 @@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *
6871 }
6872
6873 /**
6874 + * atomic_set_unchecked - set atomic variable
6875 + * @v: pointer of type atomic_unchecked_t
6876 + * @i: required value
6877 + *
6878 + * Atomically sets the value of @v to @i.
6879 + */
6880 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6881 +{
6882 + v->counter = i;
6883 +}
6884 +
6885 +/**
6886 * atomic_add - add integer to atomic variable
6887 * @i: integer value to add
6888 * @v: pointer of type atomic_t
6889 @@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *
6890 */
6891 static inline void atomic_add(int i, atomic_t *v)
6892 {
6893 - asm volatile(LOCK_PREFIX "addl %1,%0"
6894 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6895 +
6896 +#ifdef CONFIG_PAX_REFCOUNT
6897 + "jno 0f\n"
6898 + LOCK_PREFIX "subl %1,%0\n"
6899 + "int $4\n0:\n"
6900 + _ASM_EXTABLE(0b, 0b)
6901 +#endif
6902 +
6903 + : "+m" (v->counter)
6904 + : "ir" (i));
6905 +}
6906 +
6907 +/**
6908 + * atomic_add_unchecked - add integer to atomic variable
6909 + * @i: integer value to add
6910 + * @v: pointer of type atomic_unchecked_t
6911 + *
6912 + * Atomically adds @i to @v.
6913 + */
6914 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6915 +{
6916 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6917 : "+m" (v->counter)
6918 : "ir" (i));
6919 }
6920 @@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato
6921 */
6922 static inline void atomic_sub(int i, atomic_t *v)
6923 {
6924 - asm volatile(LOCK_PREFIX "subl %1,%0"
6925 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6926 +
6927 +#ifdef CONFIG_PAX_REFCOUNT
6928 + "jno 0f\n"
6929 + LOCK_PREFIX "addl %1,%0\n"
6930 + "int $4\n0:\n"
6931 + _ASM_EXTABLE(0b, 0b)
6932 +#endif
6933 +
6934 + : "+m" (v->counter)
6935 + : "ir" (i));
6936 +}
6937 +
6938 +/**
6939 + * atomic_sub_unchecked - subtract integer from atomic variable
6940 + * @i: integer value to subtract
6941 + * @v: pointer of type atomic_unchecked_t
6942 + *
6943 + * Atomically subtracts @i from @v.
6944 + */
6945 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6946 +{
6947 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6948 : "+m" (v->counter)
6949 : "ir" (i));
6950 }
6951 @@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in
6952 {
6953 unsigned char c;
6954
6955 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6956 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
6957 +
6958 +#ifdef CONFIG_PAX_REFCOUNT
6959 + "jno 0f\n"
6960 + LOCK_PREFIX "addl %2,%0\n"
6961 + "int $4\n0:\n"
6962 + _ASM_EXTABLE(0b, 0b)
6963 +#endif
6964 +
6965 + "sete %1\n"
6966 : "+m" (v->counter), "=qm" (c)
6967 : "ir" (i) : "memory");
6968 return c;
6969 @@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(in
6970 */
6971 static inline void atomic_inc(atomic_t *v)
6972 {
6973 - asm volatile(LOCK_PREFIX "incl %0"
6974 + asm volatile(LOCK_PREFIX "incl %0\n"
6975 +
6976 +#ifdef CONFIG_PAX_REFCOUNT
6977 + "jno 0f\n"
6978 + LOCK_PREFIX "decl %0\n"
6979 + "int $4\n0:\n"
6980 + _ASM_EXTABLE(0b, 0b)
6981 +#endif
6982 +
6983 + : "+m" (v->counter));
6984 +}
6985 +
6986 +/**
6987 + * atomic_inc_unchecked - increment atomic variable
6988 + * @v: pointer of type atomic_unchecked_t
6989 + *
6990 + * Atomically increments @v by 1.
6991 + */
6992 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6993 +{
6994 + asm volatile(LOCK_PREFIX "incl %0\n"
6995 : "+m" (v->counter));
6996 }
6997
6998 @@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *
6999 */
7000 static inline void atomic_dec(atomic_t *v)
7001 {
7002 - asm volatile(LOCK_PREFIX "decl %0"
7003 + asm volatile(LOCK_PREFIX "decl %0\n"
7004 +
7005 +#ifdef CONFIG_PAX_REFCOUNT
7006 + "jno 0f\n"
7007 + LOCK_PREFIX "incl %0\n"
7008 + "int $4\n0:\n"
7009 + _ASM_EXTABLE(0b, 0b)
7010 +#endif
7011 +
7012 + : "+m" (v->counter));
7013 +}
7014 +
7015 +/**
7016 + * atomic_dec_unchecked - decrement atomic variable
7017 + * @v: pointer of type atomic_unchecked_t
7018 + *
7019 + * Atomically decrements @v by 1.
7020 + */
7021 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7022 +{
7023 + asm volatile(LOCK_PREFIX "decl %0\n"
7024 : "+m" (v->counter));
7025 }
7026
7027 @@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(at
7028 {
7029 unsigned char c;
7030
7031 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7032 + asm volatile(LOCK_PREFIX "decl %0\n"
7033 +
7034 +#ifdef CONFIG_PAX_REFCOUNT
7035 + "jno 0f\n"
7036 + LOCK_PREFIX "incl %0\n"
7037 + "int $4\n0:\n"
7038 + _ASM_EXTABLE(0b, 0b)
7039 +#endif
7040 +
7041 + "sete %1\n"
7042 : "+m" (v->counter), "=qm" (c)
7043 : : "memory");
7044 return c != 0;
7045 @@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(at
7046 {
7047 unsigned char c;
7048
7049 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7050 + asm volatile(LOCK_PREFIX "incl %0\n"
7051 +
7052 +#ifdef CONFIG_PAX_REFCOUNT
7053 + "jno 0f\n"
7054 + LOCK_PREFIX "decl %0\n"
7055 + "into\n0:\n"
7056 + _ASM_EXTABLE(0b, 0b)
7057 +#endif
7058 +
7059 + "sete %1\n"
7060 + : "+m" (v->counter), "=qm" (c)
7061 + : : "memory");
7062 + return c != 0;
7063 +}
7064 +
7065 +/**
7066 + * atomic_inc_and_test_unchecked - increment and test
7067 + * @v: pointer of type atomic_unchecked_t
7068 + *
7069 + * Atomically increments @v by 1
7070 + * and returns true if the result is zero, or false for all
7071 + * other cases.
7072 + */
7073 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7074 +{
7075 + unsigned char c;
7076 +
7077 + asm volatile(LOCK_PREFIX "incl %0\n"
7078 + "sete %1\n"
7079 : "+m" (v->counter), "=qm" (c)
7080 : : "memory");
7081 return c != 0;
7082 @@ -156,7 +309,16 @@ static inline int atomic_add_negative(in
7083 {
7084 unsigned char c;
7085
7086 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7087 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7088 +
7089 +#ifdef CONFIG_PAX_REFCOUNT
7090 + "jno 0f\n"
7091 + LOCK_PREFIX "subl %2,%0\n"
7092 + "int $4\n0:\n"
7093 + _ASM_EXTABLE(0b, 0b)
7094 +#endif
7095 +
7096 + "sets %1\n"
7097 : "+m" (v->counter), "=qm" (c)
7098 : "ir" (i) : "memory");
7099 return c;
7100 @@ -179,6 +341,46 @@ static inline int atomic_add_return(int
7101 #endif
7102 /* Modern 486+ processor */
7103 __i = i;
7104 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7105 +
7106 +#ifdef CONFIG_PAX_REFCOUNT
7107 + "jno 0f\n"
7108 + "movl %0, %1\n"
7109 + "int $4\n0:\n"
7110 + _ASM_EXTABLE(0b, 0b)
7111 +#endif
7112 +
7113 + : "+r" (i), "+m" (v->counter)
7114 + : : "memory");
7115 + return i + __i;
7116 +
7117 +#ifdef CONFIG_M386
7118 +no_xadd: /* Legacy 386 processor */
7119 + local_irq_save(flags);
7120 + __i = atomic_read(v);
7121 + atomic_set(v, i + __i);
7122 + local_irq_restore(flags);
7123 + return i + __i;
7124 +#endif
7125 +}
7126 +
7127 +/**
7128 + * atomic_add_return_unchecked - add integer and return
7129 + * @v: pointer of type atomic_unchecked_t
7130 + * @i: integer value to add
7131 + *
7132 + * Atomically adds @i to @v and returns @i + @v
7133 + */
7134 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7135 +{
7136 + int __i;
7137 +#ifdef CONFIG_M386
7138 + unsigned long flags;
7139 + if (unlikely(boot_cpu_data.x86 <= 3))
7140 + goto no_xadd;
7141 +#endif
7142 + /* Modern 486+ processor */
7143 + __i = i;
7144 asm volatile(LOCK_PREFIX "xaddl %0, %1"
7145 : "+r" (i), "+m" (v->counter)
7146 : : "memory");
7147 @@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_
7148 return cmpxchg(&v->counter, old, new);
7149 }
7150
7151 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7152 +{
7153 + return cmpxchg(&v->counter, old, new);
7154 +}
7155 +
7156 static inline int atomic_xchg(atomic_t *v, int new)
7157 {
7158 return xchg(&v->counter, new);
7159 }
7160
7161 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7162 +{
7163 + return xchg(&v->counter, new);
7164 +}
7165 +
7166 /**
7167 * atomic_add_unless - add unless the number is already a given value
7168 * @v: pointer of type atomic_t
7169 @@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *
7170 */
7171 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7172 {
7173 - int c, old;
7174 + int c, old, new;
7175 c = atomic_read(v);
7176 for (;;) {
7177 - if (unlikely(c == (u)))
7178 + if (unlikely(c == u))
7179 break;
7180 - old = atomic_cmpxchg((v), c, c + (a));
7181 +
7182 + asm volatile("addl %2,%0\n"
7183 +
7184 +#ifdef CONFIG_PAX_REFCOUNT
7185 + "jno 0f\n"
7186 + "subl %2,%0\n"
7187 + "int $4\n0:\n"
7188 + _ASM_EXTABLE(0b, 0b)
7189 +#endif
7190 +
7191 + : "=r" (new)
7192 + : "0" (c), "ir" (a));
7193 +
7194 + old = atomic_cmpxchg(v, c, new);
7195 if (likely(old == c))
7196 break;
7197 c = old;
7198 }
7199 - return c != (u);
7200 + return c != u;
7201 }
7202
7203 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7204
7205 #define atomic_inc_return(v) (atomic_add_return(1, v))
7206 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7207 +{
7208 + return atomic_add_return_unchecked(1, v);
7209 +}
7210 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7211
7212 /* These are x86-specific, used by some header files */
7213 @@ -266,9 +495,18 @@ typedef struct {
7214 u64 __aligned(8) counter;
7215 } atomic64_t;
7216
7217 +#ifdef CONFIG_PAX_REFCOUNT
7218 +typedef struct {
7219 + u64 __aligned(8) counter;
7220 +} atomic64_unchecked_t;
7221 +#else
7222 +typedef atomic64_t atomic64_unchecked_t;
7223 +#endif
7224 +
7225 #define ATOMIC64_INIT(val) { (val) }
7226
7227 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
7228 +extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
7229
7230 /**
7231 * atomic64_xchg - xchg atomic64 variable
7232 @@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *
7233 * the old value.
7234 */
7235 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
7236 +extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7237
7238 /**
7239 * atomic64_set - set atomic64 variable
7240 @@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr
7241 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
7242
7243 /**
7244 + * atomic64_unchecked_set - set atomic64 variable
7245 + * @ptr: pointer to type atomic64_unchecked_t
7246 + * @new_val: value to assign
7247 + *
7248 + * Atomically sets the value of @ptr to @new_val.
7249 + */
7250 +extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7251 +
7252 +/**
7253 * atomic64_read - read atomic64 variable
7254 * @ptr: pointer to type atomic64_t
7255 *
7256 @@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64
7257 return res;
7258 }
7259
7260 -extern u64 atomic64_read(atomic64_t *ptr);
7261 +/**
7262 + * atomic64_read_unchecked - read atomic64 variable
7263 + * @ptr: pointer to type atomic64_unchecked_t
7264 + *
7265 + * Atomically reads the value of @ptr and returns it.
7266 + */
7267 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
7268 +{
7269 + u64 res;
7270 +
7271 + /*
7272 + * Note, we inline this atomic64_unchecked_t primitive because
7273 + * it only clobbers EAX/EDX and leaves the others
7274 + * untouched. We also (somewhat subtly) rely on the
7275 + * fact that cmpxchg8b returns the current 64-bit value
7276 + * of the memory location we are touching:
7277 + */
7278 + asm volatile(
7279 + "mov %%ebx, %%eax\n\t"
7280 + "mov %%ecx, %%edx\n\t"
7281 + LOCK_PREFIX "cmpxchg8b %1\n"
7282 + : "=&A" (res)
7283 + : "m" (*ptr)
7284 + );
7285 +
7286 + return res;
7287 +}
7288
7289 /**
7290 * atomic64_add_return - add and return
7291 @@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta
7292 * Other variants with different arithmetic operators:
7293 */
7294 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
7295 +extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7296 extern u64 atomic64_inc_return(atomic64_t *ptr);
7297 +extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
7298 extern u64 atomic64_dec_return(atomic64_t *ptr);
7299 +extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
7300
7301 /**
7302 * atomic64_add - add integer to atomic64 variable
7303 @@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_
7304 extern void atomic64_add(u64 delta, atomic64_t *ptr);
7305
7306 /**
7307 + * atomic64_add_unchecked - add integer to atomic64 variable
7308 + * @delta: integer value to add
7309 + * @ptr: pointer to type atomic64_unchecked_t
7310 + *
7311 + * Atomically adds @delta to @ptr.
7312 + */
7313 +extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7314 +
7315 +/**
7316 * atomic64_sub - subtract the atomic64 variable
7317 * @delta: integer value to subtract
7318 * @ptr: pointer to type atomic64_t
7319 @@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atom
7320 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
7321
7322 /**
7323 + * atomic64_sub_unchecked - subtract the atomic64 variable
7324 + * @delta: integer value to subtract
7325 + * @ptr: pointer to type atomic64_unchecked_t
7326 + *
7327 + * Atomically subtracts @delta from @ptr.
7328 + */
7329 +extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7330 +
7331 +/**
7332 * atomic64_sub_and_test - subtract value from variable and test result
7333 * @delta: integer value to subtract
7334 * @ptr: pointer to type atomic64_t
7335 @@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 del
7336 extern void atomic64_inc(atomic64_t *ptr);
7337
7338 /**
7339 + * atomic64_inc_unchecked - increment atomic64 variable
7340 + * @ptr: pointer to type atomic64_unchecked_t
7341 + *
7342 + * Atomically increments @ptr by 1.
7343 + */
7344 +extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
7345 +
7346 +/**
7347 * atomic64_dec - decrement atomic64 variable
7348 * @ptr: pointer to type atomic64_t
7349 *
7350 @@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr
7351 extern void atomic64_dec(atomic64_t *ptr);
7352
7353 /**
7354 + * atomic64_dec_unchecked - decrement atomic64 variable
7355 + * @ptr: pointer to type atomic64_unchecked_t
7356 + *
7357 + * Atomically decrements @ptr by 1.
7358 + */
7359 +extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
7360 +
7361 +/**
7362 * atomic64_dec_and_test - decrement and test
7363 * @ptr: pointer to type atomic64_t
7364 *
7365 diff -urNp linux-2.6.32.42/arch/x86/include/asm/atomic_64.h linux-2.6.32.42/arch/x86/include/asm/atomic_64.h
7366 --- linux-2.6.32.42/arch/x86/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
7367 +++ linux-2.6.32.42/arch/x86/include/asm/atomic_64.h 2011-05-04 18:35:31.000000000 -0400
7368 @@ -24,6 +24,17 @@ static inline int atomic_read(const atom
7369 }
7370
7371 /**
7372 + * atomic_read_unchecked - read atomic variable
7373 + * @v: pointer of type atomic_unchecked_t
7374 + *
7375 + * Atomically reads the value of @v.
7376 + */
7377 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7378 +{
7379 + return v->counter;
7380 +}
7381 +
7382 +/**
7383 * atomic_set - set atomic variable
7384 * @v: pointer of type atomic_t
7385 * @i: required value
7386 @@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *
7387 }
7388
7389 /**
7390 + * atomic_set_unchecked - set atomic variable
7391 + * @v: pointer of type atomic_unchecked_t
7392 + * @i: required value
7393 + *
7394 + * Atomically sets the value of @v to @i.
7395 + */
7396 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7397 +{
7398 + v->counter = i;
7399 +}
7400 +
7401 +/**
7402 * atomic_add - add integer to atomic variable
7403 * @i: integer value to add
7404 * @v: pointer of type atomic_t
7405 @@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *
7406 */
7407 static inline void atomic_add(int i, atomic_t *v)
7408 {
7409 - asm volatile(LOCK_PREFIX "addl %1,%0"
7410 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7411 +
7412 +#ifdef CONFIG_PAX_REFCOUNT
7413 + "jno 0f\n"
7414 + LOCK_PREFIX "subl %1,%0\n"
7415 + "int $4\n0:\n"
7416 + _ASM_EXTABLE(0b, 0b)
7417 +#endif
7418 +
7419 + : "=m" (v->counter)
7420 + : "ir" (i), "m" (v->counter));
7421 +}
7422 +
7423 +/**
7424 + * atomic_add_unchecked - add integer to atomic variable
7425 + * @i: integer value to add
7426 + * @v: pointer of type atomic_unchecked_t
7427 + *
7428 + * Atomically adds @i to @v.
7429 + */
7430 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7431 +{
7432 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7433 : "=m" (v->counter)
7434 : "ir" (i), "m" (v->counter));
7435 }
7436 @@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato
7437 */
7438 static inline void atomic_sub(int i, atomic_t *v)
7439 {
7440 - asm volatile(LOCK_PREFIX "subl %1,%0"
7441 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7442 +
7443 +#ifdef CONFIG_PAX_REFCOUNT
7444 + "jno 0f\n"
7445 + LOCK_PREFIX "addl %1,%0\n"
7446 + "int $4\n0:\n"
7447 + _ASM_EXTABLE(0b, 0b)
7448 +#endif
7449 +
7450 + : "=m" (v->counter)
7451 + : "ir" (i), "m" (v->counter));
7452 +}
7453 +
7454 +/**
7455 + * atomic_sub_unchecked - subtract the atomic variable
7456 + * @i: integer value to subtract
7457 + * @v: pointer of type atomic_unchecked_t
7458 + *
7459 + * Atomically subtracts @i from @v.
7460 + */
7461 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7462 +{
7463 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7464 : "=m" (v->counter)
7465 : "ir" (i), "m" (v->counter));
7466 }
7467 @@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in
7468 {
7469 unsigned char c;
7470
7471 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7472 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7473 +
7474 +#ifdef CONFIG_PAX_REFCOUNT
7475 + "jno 0f\n"
7476 + LOCK_PREFIX "addl %2,%0\n"
7477 + "int $4\n0:\n"
7478 + _ASM_EXTABLE(0b, 0b)
7479 +#endif
7480 +
7481 + "sete %1\n"
7482 : "=m" (v->counter), "=qm" (c)
7483 : "ir" (i), "m" (v->counter) : "memory");
7484 return c;
7485 @@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(in
7486 */
7487 static inline void atomic_inc(atomic_t *v)
7488 {
7489 - asm volatile(LOCK_PREFIX "incl %0"
7490 + asm volatile(LOCK_PREFIX "incl %0\n"
7491 +
7492 +#ifdef CONFIG_PAX_REFCOUNT
7493 + "jno 0f\n"
7494 + LOCK_PREFIX "decl %0\n"
7495 + "int $4\n0:\n"
7496 + _ASM_EXTABLE(0b, 0b)
7497 +#endif
7498 +
7499 + : "=m" (v->counter)
7500 + : "m" (v->counter));
7501 +}
7502 +
7503 +/**
7504 + * atomic_inc_unchecked - increment atomic variable
7505 + * @v: pointer of type atomic_unchecked_t
7506 + *
7507 + * Atomically increments @v by 1.
7508 + */
7509 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7510 +{
7511 + asm volatile(LOCK_PREFIX "incl %0\n"
7512 : "=m" (v->counter)
7513 : "m" (v->counter));
7514 }
7515 @@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *
7516 */
7517 static inline void atomic_dec(atomic_t *v)
7518 {
7519 - asm volatile(LOCK_PREFIX "decl %0"
7520 + asm volatile(LOCK_PREFIX "decl %0\n"
7521 +
7522 +#ifdef CONFIG_PAX_REFCOUNT
7523 + "jno 0f\n"
7524 + LOCK_PREFIX "incl %0\n"
7525 + "int $4\n0:\n"
7526 + _ASM_EXTABLE(0b, 0b)
7527 +#endif
7528 +
7529 + : "=m" (v->counter)
7530 + : "m" (v->counter));
7531 +}
7532 +
7533 +/**
7534 + * atomic_dec_unchecked - decrement atomic variable
7535 + * @v: pointer of type atomic_unchecked_t
7536 + *
7537 + * Atomically decrements @v by 1.
7538 + */
7539 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7540 +{
7541 + asm volatile(LOCK_PREFIX "decl %0\n"
7542 : "=m" (v->counter)
7543 : "m" (v->counter));
7544 }
7545 @@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(at
7546 {
7547 unsigned char c;
7548
7549 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7550 + asm volatile(LOCK_PREFIX "decl %0\n"
7551 +
7552 +#ifdef CONFIG_PAX_REFCOUNT
7553 + "jno 0f\n"
7554 + LOCK_PREFIX "incl %0\n"
7555 + "int $4\n0:\n"
7556 + _ASM_EXTABLE(0b, 0b)
7557 +#endif
7558 +
7559 + "sete %1\n"
7560 : "=m" (v->counter), "=qm" (c)
7561 : "m" (v->counter) : "memory");
7562 return c != 0;
7563 @@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(at
7564 {
7565 unsigned char c;
7566
7567 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7568 + asm volatile(LOCK_PREFIX "incl %0\n"
7569 +
7570 +#ifdef CONFIG_PAX_REFCOUNT
7571 + "jno 0f\n"
7572 + LOCK_PREFIX "decl %0\n"
7573 + "int $4\n0:\n"
7574 + _ASM_EXTABLE(0b, 0b)
7575 +#endif
7576 +
7577 + "sete %1\n"
7578 + : "=m" (v->counter), "=qm" (c)
7579 + : "m" (v->counter) : "memory");
7580 + return c != 0;
7581 +}
7582 +
7583 +/**
7584 + * atomic_inc_and_test_unchecked - increment and test
7585 + * @v: pointer of type atomic_unchecked_t
7586 + *
7587 + * Atomically increments @v by 1
7588 + * and returns true if the result is zero, or false for all
7589 + * other cases.
7590 + */
7591 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7592 +{
7593 + unsigned char c;
7594 +
7595 + asm volatile(LOCK_PREFIX "incl %0\n"
7596 + "sete %1\n"
7597 : "=m" (v->counter), "=qm" (c)
7598 : "m" (v->counter) : "memory");
7599 return c != 0;
7600 @@ -157,7 +312,16 @@ static inline int atomic_add_negative(in
7601 {
7602 unsigned char c;
7603
7604 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7605 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7606 +
7607 +#ifdef CONFIG_PAX_REFCOUNT
7608 + "jno 0f\n"
7609 + LOCK_PREFIX "subl %2,%0\n"
7610 + "int $4\n0:\n"
7611 + _ASM_EXTABLE(0b, 0b)
7612 +#endif
7613 +
7614 + "sets %1\n"
7615 : "=m" (v->counter), "=qm" (c)
7616 : "ir" (i), "m" (v->counter) : "memory");
7617 return c;
7618 @@ -173,7 +337,31 @@ static inline int atomic_add_negative(in
7619 static inline int atomic_add_return(int i, atomic_t *v)
7620 {
7621 int __i = i;
7622 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
7623 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7624 +
7625 +#ifdef CONFIG_PAX_REFCOUNT
7626 + "jno 0f\n"
7627 + "movl %0, %1\n"
7628 + "int $4\n0:\n"
7629 + _ASM_EXTABLE(0b, 0b)
7630 +#endif
7631 +
7632 + : "+r" (i), "+m" (v->counter)
7633 + : : "memory");
7634 + return i + __i;
7635 +}
7636 +
7637 +/**
7638 + * atomic_add_return_unchecked - add and return
7639 + * @i: integer value to add
7640 + * @v: pointer of type atomic_unchecked_t
7641 + *
7642 + * Atomically adds @i to @v and returns @i + @v
7643 + */
7644 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7645 +{
7646 + int __i = i;
7647 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7648 : "+r" (i), "+m" (v->counter)
7649 : : "memory");
7650 return i + __i;
7651 @@ -185,6 +373,10 @@ static inline int atomic_sub_return(int
7652 }
7653
7654 #define atomic_inc_return(v) (atomic_add_return(1, v))
7655 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7656 +{
7657 + return atomic_add_return_unchecked(1, v);
7658 +}
7659 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7660
7661 /* The 64-bit atomic type */
7662 @@ -204,6 +396,18 @@ static inline long atomic64_read(const a
7663 }
7664
7665 /**
7666 + * atomic64_read_unchecked - read atomic64 variable
7667 + * @v: pointer of type atomic64_unchecked_t
7668 + *
7669 + * Atomically reads the value of @v.
7670 + * Doesn't imply a read memory barrier.
7671 + */
7672 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7673 +{
7674 + return v->counter;
7675 +}
7676 +
7677 +/**
7678 * atomic64_set - set atomic64 variable
7679 * @v: pointer to type atomic64_t
7680 * @i: required value
7681 @@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64
7682 }
7683
7684 /**
7685 + * atomic64_set_unchecked - set atomic64 variable
7686 + * @v: pointer to type atomic64_unchecked_t
7687 + * @i: required value
7688 + *
7689 + * Atomically sets the value of @v to @i.
7690 + */
7691 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7692 +{
7693 + v->counter = i;
7694 +}
7695 +
7696 +/**
7697 * atomic64_add - add integer to atomic64 variable
7698 * @i: integer value to add
7699 * @v: pointer to type atomic64_t
7700 @@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64
7701 */
7702 static inline void atomic64_add(long i, atomic64_t *v)
7703 {
7704 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
7705 +
7706 +#ifdef CONFIG_PAX_REFCOUNT
7707 + "jno 0f\n"
7708 + LOCK_PREFIX "subq %1,%0\n"
7709 + "int $4\n0:\n"
7710 + _ASM_EXTABLE(0b, 0b)
7711 +#endif
7712 +
7713 + : "=m" (v->counter)
7714 + : "er" (i), "m" (v->counter));
7715 +}
7716 +
7717 +/**
7718 + * atomic64_add_unchecked - add integer to atomic64 variable
7719 + * @i: integer value to add
7720 + * @v: pointer to type atomic64_unchecked_t
7721 + *
7722 + * Atomically adds @i to @v.
7723 + */
7724 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7725 +{
7726 asm volatile(LOCK_PREFIX "addq %1,%0"
7727 : "=m" (v->counter)
7728 : "er" (i), "m" (v->counter));
7729 @@ -238,7 +476,15 @@ static inline void atomic64_add(long i,
7730 */
7731 static inline void atomic64_sub(long i, atomic64_t *v)
7732 {
7733 - asm volatile(LOCK_PREFIX "subq %1,%0"
7734 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7735 +
7736 +#ifdef CONFIG_PAX_REFCOUNT
7737 + "jno 0f\n"
7738 + LOCK_PREFIX "addq %1,%0\n"
7739 + "int $4\n0:\n"
7740 + _ASM_EXTABLE(0b, 0b)
7741 +#endif
7742 +
7743 : "=m" (v->counter)
7744 : "er" (i), "m" (v->counter));
7745 }
7746 @@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(
7747 {
7748 unsigned char c;
7749
7750 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7751 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
7752 +
7753 +#ifdef CONFIG_PAX_REFCOUNT
7754 + "jno 0f\n"
7755 + LOCK_PREFIX "addq %2,%0\n"
7756 + "int $4\n0:\n"
7757 + _ASM_EXTABLE(0b, 0b)
7758 +#endif
7759 +
7760 + "sete %1\n"
7761 : "=m" (v->counter), "=qm" (c)
7762 : "er" (i), "m" (v->counter) : "memory");
7763 return c;
7764 @@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(
7765 */
7766 static inline void atomic64_inc(atomic64_t *v)
7767 {
7768 + asm volatile(LOCK_PREFIX "incq %0\n"
7769 +
7770 +#ifdef CONFIG_PAX_REFCOUNT
7771 + "jno 0f\n"
7772 + LOCK_PREFIX "decq %0\n"
7773 + "int $4\n0:\n"
7774 + _ASM_EXTABLE(0b, 0b)
7775 +#endif
7776 +
7777 + : "=m" (v->counter)
7778 + : "m" (v->counter));
7779 +}
7780 +
7781 +/**
7782 + * atomic64_inc_unchecked - increment atomic64 variable
7783 + * @v: pointer to type atomic64_unchecked_t
7784 + *
7785 + * Atomically increments @v by 1.
7786 + */
7787 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7788 +{
7789 asm volatile(LOCK_PREFIX "incq %0"
7790 : "=m" (v->counter)
7791 : "m" (v->counter));
7792 @@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64
7793 */
7794 static inline void atomic64_dec(atomic64_t *v)
7795 {
7796 - asm volatile(LOCK_PREFIX "decq %0"
7797 + asm volatile(LOCK_PREFIX "decq %0\n"
7798 +
7799 +#ifdef CONFIG_PAX_REFCOUNT
7800 + "jno 0f\n"
7801 + LOCK_PREFIX "incq %0\n"
7802 + "int $4\n0:\n"
7803 + _ASM_EXTABLE(0b, 0b)
7804 +#endif
7805 +
7806 + : "=m" (v->counter)
7807 + : "m" (v->counter));
7808 +}
7809 +
7810 +/**
7811 + * atomic64_dec_unchecked - decrement atomic64 variable
7812 + * @v: pointer to type atomic64_t
7813 + *
7814 + * Atomically decrements @v by 1.
7815 + */
7816 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7817 +{
7818 + asm volatile(LOCK_PREFIX "decq %0\n"
7819 : "=m" (v->counter)
7820 : "m" (v->counter));
7821 }
7822 @@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(
7823 {
7824 unsigned char c;
7825
7826 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
7827 + asm volatile(LOCK_PREFIX "decq %0\n"
7828 +
7829 +#ifdef CONFIG_PAX_REFCOUNT
7830 + "jno 0f\n"
7831 + LOCK_PREFIX "incq %0\n"
7832 + "int $4\n0:\n"
7833 + _ASM_EXTABLE(0b, 0b)
7834 +#endif
7835 +
7836 + "sete %1\n"
7837 : "=m" (v->counter), "=qm" (c)
7838 : "m" (v->counter) : "memory");
7839 return c != 0;
7840 @@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(
7841 {
7842 unsigned char c;
7843
7844 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
7845 + asm volatile(LOCK_PREFIX "incq %0\n"
7846 +
7847 +#ifdef CONFIG_PAX_REFCOUNT
7848 + "jno 0f\n"
7849 + LOCK_PREFIX "decq %0\n"
7850 + "int $4\n0:\n"
7851 + _ASM_EXTABLE(0b, 0b)
7852 +#endif
7853 +
7854 + "sete %1\n"
7855 : "=m" (v->counter), "=qm" (c)
7856 : "m" (v->counter) : "memory");
7857 return c != 0;
7858 @@ -337,7 +652,16 @@ static inline int atomic64_add_negative(
7859 {
7860 unsigned char c;
7861
7862 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7863 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
7864 +
7865 +#ifdef CONFIG_PAX_REFCOUNT
7866 + "jno 0f\n"
7867 + LOCK_PREFIX "subq %2,%0\n"
7868 + "int $4\n0:\n"
7869 + _ASM_EXTABLE(0b, 0b)
7870 +#endif
7871 +
7872 + "sets %1\n"
7873 : "=m" (v->counter), "=qm" (c)
7874 : "er" (i), "m" (v->counter) : "memory");
7875 return c;
7876 @@ -353,7 +677,31 @@ static inline int atomic64_add_negative(
7877 static inline long atomic64_add_return(long i, atomic64_t *v)
7878 {
7879 long __i = i;
7880 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
7881 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
7882 +
7883 +#ifdef CONFIG_PAX_REFCOUNT
7884 + "jno 0f\n"
7885 + "movq %0, %1\n"
7886 + "int $4\n0:\n"
7887 + _ASM_EXTABLE(0b, 0b)
7888 +#endif
7889 +
7890 + : "+r" (i), "+m" (v->counter)
7891 + : : "memory");
7892 + return i + __i;
7893 +}
7894 +
7895 +/**
7896 + * atomic64_add_return_unchecked - add and return
7897 + * @i: integer value to add
7898 + * @v: pointer to type atomic64_unchecked_t
7899 + *
7900 + * Atomically adds @i to @v and returns @i + @v
7901 + */
7902 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7903 +{
7904 + long __i = i;
7905 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
7906 : "+r" (i), "+m" (v->counter)
7907 : : "memory");
7908 return i + __i;
7909 @@ -365,6 +713,10 @@ static inline long atomic64_sub_return(l
7910 }
7911
7912 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7913 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7914 +{
7915 + return atomic64_add_return_unchecked(1, v);
7916 +}
7917 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7918
7919 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7920 @@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atom
7921 return cmpxchg(&v->counter, old, new);
7922 }
7923
7924 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
7925 +{
7926 + return cmpxchg(&v->counter, old, new);
7927 +}
7928 +
7929 static inline long atomic64_xchg(atomic64_t *v, long new)
7930 {
7931 return xchg(&v->counter, new);
7932 }
7933
7934 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7935 +{
7936 + return xchg(&v->counter, new);
7937 +}
7938 +
7939 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
7940 {
7941 return cmpxchg(&v->counter, old, new);
7942 }
7943
7944 +static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7945 +{
7946 + return cmpxchg(&v->counter, old, new);
7947 +}
7948 +
7949 static inline long atomic_xchg(atomic_t *v, int new)
7950 {
7951 return xchg(&v->counter, new);
7952 }
7953
7954 +static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7955 +{
7956 + return xchg(&v->counter, new);
7957 +}
7958 +
7959 /**
7960 * atomic_add_unless - add unless the number is a given value
7961 * @v: pointer of type atomic_t
7962 @@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t
7963 */
7964 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7965 {
7966 - int c, old;
7967 + int c, old, new;
7968 c = atomic_read(v);
7969 for (;;) {
7970 - if (unlikely(c == (u)))
7971 + if (unlikely(c == u))
7972 break;
7973 - old = atomic_cmpxchg((v), c, c + (a));
7974 +
7975 + asm volatile("addl %2,%0\n"
7976 +
7977 +#ifdef CONFIG_PAX_REFCOUNT
7978 + "jno 0f\n"
7979 + "subl %2,%0\n"
7980 + "int $4\n0:\n"
7981 + _ASM_EXTABLE(0b, 0b)
7982 +#endif
7983 +
7984 + : "=r" (new)
7985 + : "0" (c), "ir" (a));
7986 +
7987 + old = atomic_cmpxchg(v, c, new);
7988 if (likely(old == c))
7989 break;
7990 c = old;
7991 }
7992 - return c != (u);
7993 + return c != u;
7994 }
7995
7996 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7997 @@ -424,17 +809,30 @@ static inline int atomic_add_unless(atom
7998 */
7999 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8000 {
8001 - long c, old;
8002 + long c, old, new;
8003 c = atomic64_read(v);
8004 for (;;) {
8005 - if (unlikely(c == (u)))
8006 + if (unlikely(c == u))
8007 break;
8008 - old = atomic64_cmpxchg((v), c, c + (a));
8009 +
8010 + asm volatile("addq %2,%0\n"
8011 +
8012 +#ifdef CONFIG_PAX_REFCOUNT
8013 + "jno 0f\n"
8014 + "subq %2,%0\n"
8015 + "int $4\n0:\n"
8016 + _ASM_EXTABLE(0b, 0b)
8017 +#endif
8018 +
8019 + : "=r" (new)
8020 + : "0" (c), "er" (a));
8021 +
8022 + old = atomic64_cmpxchg(v, c, new);
8023 if (likely(old == c))
8024 break;
8025 c = old;
8026 }
8027 - return c != (u);
8028 + return c != u;
8029 }
8030
8031 /**
8032 diff -urNp linux-2.6.32.42/arch/x86/include/asm/bitops.h linux-2.6.32.42/arch/x86/include/asm/bitops.h
8033 --- linux-2.6.32.42/arch/x86/include/asm/bitops.h 2011-03-27 14:31:47.000000000 -0400
8034 +++ linux-2.6.32.42/arch/x86/include/asm/bitops.h 2011-04-17 15:56:46.000000000 -0400
8035 @@ -38,7 +38,7 @@
8036 * a mask operation on a byte.
8037 */
8038 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8039 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8040 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8041 #define CONST_MASK(nr) (1 << ((nr) & 7))
8042
8043 /**
8044 diff -urNp linux-2.6.32.42/arch/x86/include/asm/boot.h linux-2.6.32.42/arch/x86/include/asm/boot.h
8045 --- linux-2.6.32.42/arch/x86/include/asm/boot.h 2011-03-27 14:31:47.000000000 -0400
8046 +++ linux-2.6.32.42/arch/x86/include/asm/boot.h 2011-04-17 15:56:46.000000000 -0400
8047 @@ -11,10 +11,15 @@
8048 #include <asm/pgtable_types.h>
8049
8050 /* Physical address where kernel should be loaded. */
8051 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8052 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8053 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8054 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8055
8056 +#ifndef __ASSEMBLY__
8057 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
8058 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8059 +#endif
8060 +
8061 /* Minimum kernel alignment, as a power of two */
8062 #ifdef CONFIG_X86_64
8063 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8064 diff -urNp linux-2.6.32.42/arch/x86/include/asm/cacheflush.h linux-2.6.32.42/arch/x86/include/asm/cacheflush.h
8065 --- linux-2.6.32.42/arch/x86/include/asm/cacheflush.h 2011-03-27 14:31:47.000000000 -0400
8066 +++ linux-2.6.32.42/arch/x86/include/asm/cacheflush.h 2011-04-17 15:56:46.000000000 -0400
8067 @@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
8068 static inline unsigned long get_page_memtype(struct page *pg)
8069 {
8070 if (!PageUncached(pg) && !PageWC(pg))
8071 - return -1;
8072 + return ~0UL;
8073 else if (!PageUncached(pg) && PageWC(pg))
8074 return _PAGE_CACHE_WC;
8075 else if (PageUncached(pg) && !PageWC(pg))
8076 @@ -85,7 +85,7 @@ static inline void set_page_memtype(stru
8077 SetPageWC(pg);
8078 break;
8079 default:
8080 - case -1:
8081 + case ~0UL:
8082 ClearPageUncached(pg);
8083 ClearPageWC(pg);
8084 break;
8085 diff -urNp linux-2.6.32.42/arch/x86/include/asm/cache.h linux-2.6.32.42/arch/x86/include/asm/cache.h
8086 --- linux-2.6.32.42/arch/x86/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
8087 +++ linux-2.6.32.42/arch/x86/include/asm/cache.h 2011-05-04 17:56:20.000000000 -0400
8088 @@ -5,9 +5,10 @@
8089
8090 /* L1 cache line size */
8091 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8092 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8093 +#define L1_CACHE_BYTES (_AC(1,U) << L1_CACHE_SHIFT)
8094
8095 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
8096 +#define __read_only __attribute__((__section__(".data.read_only")))
8097
8098 #ifdef CONFIG_X86_VSMP
8099 /* vSMP Internode cacheline shift */
8100 diff -urNp linux-2.6.32.42/arch/x86/include/asm/checksum_32.h linux-2.6.32.42/arch/x86/include/asm/checksum_32.h
8101 --- linux-2.6.32.42/arch/x86/include/asm/checksum_32.h 2011-03-27 14:31:47.000000000 -0400
8102 +++ linux-2.6.32.42/arch/x86/include/asm/checksum_32.h 2011-04-17 15:56:46.000000000 -0400
8103 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
8104 int len, __wsum sum,
8105 int *src_err_ptr, int *dst_err_ptr);
8106
8107 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8108 + int len, __wsum sum,
8109 + int *src_err_ptr, int *dst_err_ptr);
8110 +
8111 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8112 + int len, __wsum sum,
8113 + int *src_err_ptr, int *dst_err_ptr);
8114 +
8115 /*
8116 * Note: when you get a NULL pointer exception here this means someone
8117 * passed in an incorrect kernel address to one of these functions.
8118 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
8119 int *err_ptr)
8120 {
8121 might_sleep();
8122 - return csum_partial_copy_generic((__force void *)src, dst,
8123 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
8124 len, sum, err_ptr, NULL);
8125 }
8126
8127 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
8128 {
8129 might_sleep();
8130 if (access_ok(VERIFY_WRITE, dst, len))
8131 - return csum_partial_copy_generic(src, (__force void *)dst,
8132 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8133 len, sum, NULL, err_ptr);
8134
8135 if (len)
8136 diff -urNp linux-2.6.32.42/arch/x86/include/asm/desc_defs.h linux-2.6.32.42/arch/x86/include/asm/desc_defs.h
8137 --- linux-2.6.32.42/arch/x86/include/asm/desc_defs.h 2011-03-27 14:31:47.000000000 -0400
8138 +++ linux-2.6.32.42/arch/x86/include/asm/desc_defs.h 2011-04-17 15:56:46.000000000 -0400
8139 @@ -31,6 +31,12 @@ struct desc_struct {
8140 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8141 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8142 };
8143 + struct {
8144 + u16 offset_low;
8145 + u16 seg;
8146 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8147 + unsigned offset_high: 16;
8148 + } gate;
8149 };
8150 } __attribute__((packed));
8151
8152 diff -urNp linux-2.6.32.42/arch/x86/include/asm/desc.h linux-2.6.32.42/arch/x86/include/asm/desc.h
8153 --- linux-2.6.32.42/arch/x86/include/asm/desc.h 2011-03-27 14:31:47.000000000 -0400
8154 +++ linux-2.6.32.42/arch/x86/include/asm/desc.h 2011-04-23 12:56:10.000000000 -0400
8155 @@ -4,6 +4,7 @@
8156 #include <asm/desc_defs.h>
8157 #include <asm/ldt.h>
8158 #include <asm/mmu.h>
8159 +#include <asm/pgtable.h>
8160 #include <linux/smp.h>
8161
8162 static inline void fill_ldt(struct desc_struct *desc,
8163 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
8164 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
8165 desc->type = (info->read_exec_only ^ 1) << 1;
8166 desc->type |= info->contents << 2;
8167 + desc->type |= info->seg_not_present ^ 1;
8168 desc->s = 1;
8169 desc->dpl = 0x3;
8170 desc->p = info->seg_not_present ^ 1;
8171 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
8172 }
8173
8174 extern struct desc_ptr idt_descr;
8175 -extern gate_desc idt_table[];
8176 -
8177 -struct gdt_page {
8178 - struct desc_struct gdt[GDT_ENTRIES];
8179 -} __attribute__((aligned(PAGE_SIZE)));
8180 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8181 +extern gate_desc idt_table[256];
8182
8183 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8184 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8185 {
8186 - return per_cpu(gdt_page, cpu).gdt;
8187 + return cpu_gdt_table[cpu];
8188 }
8189
8190 #ifdef CONFIG_X86_64
8191 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
8192 unsigned long base, unsigned dpl, unsigned flags,
8193 unsigned short seg)
8194 {
8195 - gate->a = (seg << 16) | (base & 0xffff);
8196 - gate->b = (base & 0xffff0000) |
8197 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8198 + gate->gate.offset_low = base;
8199 + gate->gate.seg = seg;
8200 + gate->gate.reserved = 0;
8201 + gate->gate.type = type;
8202 + gate->gate.s = 0;
8203 + gate->gate.dpl = dpl;
8204 + gate->gate.p = 1;
8205 + gate->gate.offset_high = base >> 16;
8206 }
8207
8208 #endif
8209 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
8210 static inline void native_write_idt_entry(gate_desc *idt, int entry,
8211 const gate_desc *gate)
8212 {
8213 + pax_open_kernel();
8214 memcpy(&idt[entry], gate, sizeof(*gate));
8215 + pax_close_kernel();
8216 }
8217
8218 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
8219 const void *desc)
8220 {
8221 + pax_open_kernel();
8222 memcpy(&ldt[entry], desc, 8);
8223 + pax_close_kernel();
8224 }
8225
8226 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
8227 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
8228 size = sizeof(struct desc_struct);
8229 break;
8230 }
8231 +
8232 + pax_open_kernel();
8233 memcpy(&gdt[entry], desc, size);
8234 + pax_close_kernel();
8235 }
8236
8237 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8238 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const
8239
8240 static inline void native_load_tr_desc(void)
8241 {
8242 + pax_open_kernel();
8243 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8244 + pax_close_kernel();
8245 }
8246
8247 static inline void native_load_gdt(const struct desc_ptr *dtr)
8248 @@ -246,8 +258,10 @@ static inline void native_load_tls(struc
8249 unsigned int i;
8250 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8251
8252 + pax_open_kernel();
8253 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8254 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8255 + pax_close_kernel();
8256 }
8257
8258 #define _LDT_empty(info) \
8259 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
8260 desc->limit = (limit >> 16) & 0xf;
8261 }
8262
8263 -static inline void _set_gate(int gate, unsigned type, void *addr,
8264 +static inline void _set_gate(int gate, unsigned type, const void *addr,
8265 unsigned dpl, unsigned ist, unsigned seg)
8266 {
8267 gate_desc s;
8268 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
8269 * Pentium F0 0F bugfix can have resulted in the mapped
8270 * IDT being write-protected.
8271 */
8272 -static inline void set_intr_gate(unsigned int n, void *addr)
8273 +static inline void set_intr_gate(unsigned int n, const void *addr)
8274 {
8275 BUG_ON((unsigned)n > 0xFF);
8276 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8277 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
8278 /*
8279 * This routine sets up an interrupt gate at directory privilege level 3.
8280 */
8281 -static inline void set_system_intr_gate(unsigned int n, void *addr)
8282 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
8283 {
8284 BUG_ON((unsigned)n > 0xFF);
8285 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8286 }
8287
8288 -static inline void set_system_trap_gate(unsigned int n, void *addr)
8289 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
8290 {
8291 BUG_ON((unsigned)n > 0xFF);
8292 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8293 }
8294
8295 -static inline void set_trap_gate(unsigned int n, void *addr)
8296 +static inline void set_trap_gate(unsigned int n, const void *addr)
8297 {
8298 BUG_ON((unsigned)n > 0xFF);
8299 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8300 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
8301 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8302 {
8303 BUG_ON((unsigned)n > 0xFF);
8304 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8305 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8306 }
8307
8308 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8309 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8310 {
8311 BUG_ON((unsigned)n > 0xFF);
8312 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8313 }
8314
8315 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8316 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8317 {
8318 BUG_ON((unsigned)n > 0xFF);
8319 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8320 }
8321
8322 +#ifdef CONFIG_X86_32
8323 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8324 +{
8325 + struct desc_struct d;
8326 +
8327 + if (likely(limit))
8328 + limit = (limit - 1UL) >> PAGE_SHIFT;
8329 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
8330 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8331 +}
8332 +#endif
8333 +
8334 #endif /* _ASM_X86_DESC_H */
8335 diff -urNp linux-2.6.32.42/arch/x86/include/asm/device.h linux-2.6.32.42/arch/x86/include/asm/device.h
8336 --- linux-2.6.32.42/arch/x86/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
8337 +++ linux-2.6.32.42/arch/x86/include/asm/device.h 2011-04-17 15:56:46.000000000 -0400
8338 @@ -6,7 +6,7 @@ struct dev_archdata {
8339 void *acpi_handle;
8340 #endif
8341 #ifdef CONFIG_X86_64
8342 -struct dma_map_ops *dma_ops;
8343 + const struct dma_map_ops *dma_ops;
8344 #endif
8345 #ifdef CONFIG_DMAR
8346 void *iommu; /* hook for IOMMU specific extension */
8347 diff -urNp linux-2.6.32.42/arch/x86/include/asm/dma-mapping.h linux-2.6.32.42/arch/x86/include/asm/dma-mapping.h
8348 --- linux-2.6.32.42/arch/x86/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
8349 +++ linux-2.6.32.42/arch/x86/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
8350 @@ -25,9 +25,9 @@ extern int iommu_merge;
8351 extern struct device x86_dma_fallback_dev;
8352 extern int panic_on_overflow;
8353
8354 -extern struct dma_map_ops *dma_ops;
8355 +extern const struct dma_map_ops *dma_ops;
8356
8357 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8358 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
8359 {
8360 #ifdef CONFIG_X86_32
8361 return dma_ops;
8362 @@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dm
8363 /* Make sure we keep the same behaviour */
8364 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
8365 {
8366 - struct dma_map_ops *ops = get_dma_ops(dev);
8367 + const struct dma_map_ops *ops = get_dma_ops(dev);
8368 if (ops->mapping_error)
8369 return ops->mapping_error(dev, dma_addr);
8370
8371 @@ -122,7 +122,7 @@ static inline void *
8372 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8373 gfp_t gfp)
8374 {
8375 - struct dma_map_ops *ops = get_dma_ops(dev);
8376 + const struct dma_map_ops *ops = get_dma_ops(dev);
8377 void *memory;
8378
8379 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
8380 @@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, s
8381 static inline void dma_free_coherent(struct device *dev, size_t size,
8382 void *vaddr, dma_addr_t bus)
8383 {
8384 - struct dma_map_ops *ops = get_dma_ops(dev);
8385 + const struct dma_map_ops *ops = get_dma_ops(dev);
8386
8387 WARN_ON(irqs_disabled()); /* for portability */
8388
8389 diff -urNp linux-2.6.32.42/arch/x86/include/asm/e820.h linux-2.6.32.42/arch/x86/include/asm/e820.h
8390 --- linux-2.6.32.42/arch/x86/include/asm/e820.h 2011-03-27 14:31:47.000000000 -0400
8391 +++ linux-2.6.32.42/arch/x86/include/asm/e820.h 2011-04-17 15:56:46.000000000 -0400
8392 @@ -133,7 +133,7 @@ extern char *default_machine_specific_me
8393 #define ISA_END_ADDRESS 0x100000
8394 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
8395
8396 -#define BIOS_BEGIN 0x000a0000
8397 +#define BIOS_BEGIN 0x000c0000
8398 #define BIOS_END 0x00100000
8399
8400 #ifdef __KERNEL__
8401 diff -urNp linux-2.6.32.42/arch/x86/include/asm/elf.h linux-2.6.32.42/arch/x86/include/asm/elf.h
8402 --- linux-2.6.32.42/arch/x86/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
8403 +++ linux-2.6.32.42/arch/x86/include/asm/elf.h 2011-04-17 15:56:46.000000000 -0400
8404 @@ -257,7 +257,25 @@ extern int force_personality32;
8405 the loader. We need to make sure that it is out of the way of the program
8406 that it will "exec", and that there is sufficient room for the brk. */
8407
8408 +#ifdef CONFIG_PAX_SEGMEXEC
8409 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8410 +#else
8411 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8412 +#endif
8413 +
8414 +#ifdef CONFIG_PAX_ASLR
8415 +#ifdef CONFIG_X86_32
8416 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8417 +
8418 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8419 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8420 +#else
8421 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
8422 +
8423 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8424 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8425 +#endif
8426 +#endif
8427
8428 /* This yields a mask that user programs can use to figure out what
8429 instruction set this CPU supports. This could be done in user space,
8430 @@ -311,8 +329,7 @@ do { \
8431 #define ARCH_DLINFO \
8432 do { \
8433 if (vdso_enabled) \
8434 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8435 - (unsigned long)current->mm->context.vdso); \
8436 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
8437 } while (0)
8438
8439 #define AT_SYSINFO 32
8440 @@ -323,7 +340,7 @@ do { \
8441
8442 #endif /* !CONFIG_X86_32 */
8443
8444 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8445 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8446
8447 #define VDSO_ENTRY \
8448 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8449 @@ -337,7 +354,4 @@ extern int arch_setup_additional_pages(s
8450 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8451 #define compat_arch_setup_additional_pages syscall32_setup_pages
8452
8453 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8454 -#define arch_randomize_brk arch_randomize_brk
8455 -
8456 #endif /* _ASM_X86_ELF_H */
8457 diff -urNp linux-2.6.32.42/arch/x86/include/asm/emergency-restart.h linux-2.6.32.42/arch/x86/include/asm/emergency-restart.h
8458 --- linux-2.6.32.42/arch/x86/include/asm/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
8459 +++ linux-2.6.32.42/arch/x86/include/asm/emergency-restart.h 2011-05-22 23:02:06.000000000 -0400
8460 @@ -15,6 +15,6 @@ enum reboot_type {
8461
8462 extern enum reboot_type reboot_type;
8463
8464 -extern void machine_emergency_restart(void);
8465 +extern void machine_emergency_restart(void) __noreturn;
8466
8467 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8468 diff -urNp linux-2.6.32.42/arch/x86/include/asm/futex.h linux-2.6.32.42/arch/x86/include/asm/futex.h
8469 --- linux-2.6.32.42/arch/x86/include/asm/futex.h 2011-03-27 14:31:47.000000000 -0400
8470 +++ linux-2.6.32.42/arch/x86/include/asm/futex.h 2011-04-17 15:56:46.000000000 -0400
8471 @@ -12,16 +12,18 @@
8472 #include <asm/system.h>
8473
8474 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8475 + typecheck(u32 *, uaddr); \
8476 asm volatile("1:\t" insn "\n" \
8477 "2:\t.section .fixup,\"ax\"\n" \
8478 "3:\tmov\t%3, %1\n" \
8479 "\tjmp\t2b\n" \
8480 "\t.previous\n" \
8481 _ASM_EXTABLE(1b, 3b) \
8482 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8483 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
8484 : "i" (-EFAULT), "0" (oparg), "1" (0))
8485
8486 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8487 + typecheck(u32 *, uaddr); \
8488 asm volatile("1:\tmovl %2, %0\n" \
8489 "\tmovl\t%0, %3\n" \
8490 "\t" insn "\n" \
8491 @@ -34,10 +36,10 @@
8492 _ASM_EXTABLE(1b, 4b) \
8493 _ASM_EXTABLE(2b, 4b) \
8494 : "=&a" (oldval), "=&r" (ret), \
8495 - "+m" (*uaddr), "=&r" (tem) \
8496 + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
8497 : "r" (oparg), "i" (-EFAULT), "1" (0))
8498
8499 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
8500 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8501 {
8502 int op = (encoded_op >> 28) & 7;
8503 int cmp = (encoded_op >> 24) & 15;
8504 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
8505
8506 switch (op) {
8507 case FUTEX_OP_SET:
8508 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8509 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8510 break;
8511 case FUTEX_OP_ADD:
8512 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8513 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8514 uaddr, oparg);
8515 break;
8516 case FUTEX_OP_OR:
8517 @@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
8518 return ret;
8519 }
8520
8521 -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
8522 +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
8523 int newval)
8524 {
8525
8526 @@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
8527 return -ENOSYS;
8528 #endif
8529
8530 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
8531 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8532 return -EFAULT;
8533
8534 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
8535 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
8536 "2:\t.section .fixup, \"ax\"\n"
8537 "3:\tmov %2, %0\n"
8538 "\tjmp 2b\n"
8539 "\t.previous\n"
8540 _ASM_EXTABLE(1b, 3b)
8541 - : "=a" (oldval), "+m" (*uaddr)
8542 + : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
8543 : "i" (-EFAULT), "r" (newval), "0" (oldval)
8544 : "memory"
8545 );
8546 diff -urNp linux-2.6.32.42/arch/x86/include/asm/hw_irq.h linux-2.6.32.42/arch/x86/include/asm/hw_irq.h
8547 --- linux-2.6.32.42/arch/x86/include/asm/hw_irq.h 2011-03-27 14:31:47.000000000 -0400
8548 +++ linux-2.6.32.42/arch/x86/include/asm/hw_irq.h 2011-05-04 17:56:28.000000000 -0400
8549 @@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
8550 extern void enable_IO_APIC(void);
8551
8552 /* Statistics */
8553 -extern atomic_t irq_err_count;
8554 -extern atomic_t irq_mis_count;
8555 +extern atomic_unchecked_t irq_err_count;
8556 +extern atomic_unchecked_t irq_mis_count;
8557
8558 /* EISA */
8559 extern void eisa_set_level_irq(unsigned int irq);
8560 diff -urNp linux-2.6.32.42/arch/x86/include/asm/i387.h linux-2.6.32.42/arch/x86/include/asm/i387.h
8561 --- linux-2.6.32.42/arch/x86/include/asm/i387.h 2011-03-27 14:31:47.000000000 -0400
8562 +++ linux-2.6.32.42/arch/x86/include/asm/i387.h 2011-04-17 15:56:46.000000000 -0400
8563 @@ -60,6 +60,11 @@ static inline int fxrstor_checking(struc
8564 {
8565 int err;
8566
8567 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8568 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8569 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
8570 +#endif
8571 +
8572 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
8573 "2:\n"
8574 ".section .fixup,\"ax\"\n"
8575 @@ -105,6 +110,11 @@ static inline int fxsave_user(struct i38
8576 {
8577 int err;
8578
8579 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8580 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8581 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8582 +#endif
8583 +
8584 asm volatile("1: rex64/fxsave (%[fx])\n\t"
8585 "2:\n"
8586 ".section .fixup,\"ax\"\n"
8587 @@ -195,13 +205,8 @@ static inline int fxrstor_checking(struc
8588 }
8589
8590 /* We need a safe address that is cheap to find and that is already
8591 - in L1 during context switch. The best choices are unfortunately
8592 - different for UP and SMP */
8593 -#ifdef CONFIG_SMP
8594 -#define safe_address (__per_cpu_offset[0])
8595 -#else
8596 -#define safe_address (kstat_cpu(0).cpustat.user)
8597 -#endif
8598 + in L1 during context switch. */
8599 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8600
8601 /*
8602 * These must be called with preempt disabled
8603 @@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void
8604 struct thread_info *me = current_thread_info();
8605 preempt_disable();
8606 if (me->status & TS_USEDFPU)
8607 - __save_init_fpu(me->task);
8608 + __save_init_fpu(current);
8609 else
8610 clts();
8611 }
8612 diff -urNp linux-2.6.32.42/arch/x86/include/asm/io_32.h linux-2.6.32.42/arch/x86/include/asm/io_32.h
8613 --- linux-2.6.32.42/arch/x86/include/asm/io_32.h 2011-03-27 14:31:47.000000000 -0400
8614 +++ linux-2.6.32.42/arch/x86/include/asm/io_32.h 2011-04-17 15:56:46.000000000 -0400
8615 @@ -3,6 +3,7 @@
8616
8617 #include <linux/string.h>
8618 #include <linux/compiler.h>
8619 +#include <asm/processor.h>
8620
8621 /*
8622 * This file contains the definitions for the x86 IO instructions
8623 @@ -42,6 +43,17 @@
8624
8625 #ifdef __KERNEL__
8626
8627 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8628 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8629 +{
8630 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8631 +}
8632 +
8633 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8634 +{
8635 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8636 +}
8637 +
8638 #include <asm-generic/iomap.h>
8639
8640 #include <linux/vmalloc.h>
8641 diff -urNp linux-2.6.32.42/arch/x86/include/asm/io_64.h linux-2.6.32.42/arch/x86/include/asm/io_64.h
8642 --- linux-2.6.32.42/arch/x86/include/asm/io_64.h 2011-03-27 14:31:47.000000000 -0400
8643 +++ linux-2.6.32.42/arch/x86/include/asm/io_64.h 2011-04-17 15:56:46.000000000 -0400
8644 @@ -140,6 +140,17 @@ __OUTS(l)
8645
8646 #include <linux/vmalloc.h>
8647
8648 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8649 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8650 +{
8651 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8652 +}
8653 +
8654 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8655 +{
8656 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8657 +}
8658 +
8659 #include <asm-generic/iomap.h>
8660
8661 void __memcpy_fromio(void *, unsigned long, unsigned);
8662 diff -urNp linux-2.6.32.42/arch/x86/include/asm/iommu.h linux-2.6.32.42/arch/x86/include/asm/iommu.h
8663 --- linux-2.6.32.42/arch/x86/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
8664 +++ linux-2.6.32.42/arch/x86/include/asm/iommu.h 2011-04-17 15:56:46.000000000 -0400
8665 @@ -3,7 +3,7 @@
8666
8667 extern void pci_iommu_shutdown(void);
8668 extern void no_iommu_init(void);
8669 -extern struct dma_map_ops nommu_dma_ops;
8670 +extern const struct dma_map_ops nommu_dma_ops;
8671 extern int force_iommu, no_iommu;
8672 extern int iommu_detected;
8673 extern int iommu_pass_through;
8674 diff -urNp linux-2.6.32.42/arch/x86/include/asm/irqflags.h linux-2.6.32.42/arch/x86/include/asm/irqflags.h
8675 --- linux-2.6.32.42/arch/x86/include/asm/irqflags.h 2011-03-27 14:31:47.000000000 -0400
8676 +++ linux-2.6.32.42/arch/x86/include/asm/irqflags.h 2011-04-17 15:56:46.000000000 -0400
8677 @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
8678 sti; \
8679 sysexit
8680
8681 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
8682 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8683 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
8684 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8685 +
8686 #else
8687 #define INTERRUPT_RETURN iret
8688 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8689 diff -urNp linux-2.6.32.42/arch/x86/include/asm/kprobes.h linux-2.6.32.42/arch/x86/include/asm/kprobes.h
8690 --- linux-2.6.32.42/arch/x86/include/asm/kprobes.h 2011-03-27 14:31:47.000000000 -0400
8691 +++ linux-2.6.32.42/arch/x86/include/asm/kprobes.h 2011-04-23 12:56:12.000000000 -0400
8692 @@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
8693 #define BREAKPOINT_INSTRUCTION 0xcc
8694 #define RELATIVEJUMP_INSTRUCTION 0xe9
8695 #define MAX_INSN_SIZE 16
8696 -#define MAX_STACK_SIZE 64
8697 -#define MIN_STACK_SIZE(ADDR) \
8698 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8699 - THREAD_SIZE - (unsigned long)(ADDR))) \
8700 - ? (MAX_STACK_SIZE) \
8701 - : (((unsigned long)current_thread_info()) + \
8702 - THREAD_SIZE - (unsigned long)(ADDR)))
8703 +#define MAX_STACK_SIZE 64UL
8704 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8705
8706 #define flush_insn_slot(p) do { } while (0)
8707
8708 diff -urNp linux-2.6.32.42/arch/x86/include/asm/kvm_host.h linux-2.6.32.42/arch/x86/include/asm/kvm_host.h
8709 --- linux-2.6.32.42/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:01.000000000 -0400
8710 +++ linux-2.6.32.42/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:26.000000000 -0400
8711 @@ -536,7 +536,7 @@ struct kvm_x86_ops {
8712 const struct trace_print_flags *exit_reasons_str;
8713 };
8714
8715 -extern struct kvm_x86_ops *kvm_x86_ops;
8716 +extern const struct kvm_x86_ops *kvm_x86_ops;
8717
8718 int kvm_mmu_module_init(void);
8719 void kvm_mmu_module_exit(void);
8720 diff -urNp linux-2.6.32.42/arch/x86/include/asm/local.h linux-2.6.32.42/arch/x86/include/asm/local.h
8721 --- linux-2.6.32.42/arch/x86/include/asm/local.h 2011-03-27 14:31:47.000000000 -0400
8722 +++ linux-2.6.32.42/arch/x86/include/asm/local.h 2011-04-17 15:56:46.000000000 -0400
8723 @@ -18,26 +18,58 @@ typedef struct {
8724
8725 static inline void local_inc(local_t *l)
8726 {
8727 - asm volatile(_ASM_INC "%0"
8728 + asm volatile(_ASM_INC "%0\n"
8729 +
8730 +#ifdef CONFIG_PAX_REFCOUNT
8731 + "jno 0f\n"
8732 + _ASM_DEC "%0\n"
8733 + "int $4\n0:\n"
8734 + _ASM_EXTABLE(0b, 0b)
8735 +#endif
8736 +
8737 : "+m" (l->a.counter));
8738 }
8739
8740 static inline void local_dec(local_t *l)
8741 {
8742 - asm volatile(_ASM_DEC "%0"
8743 + asm volatile(_ASM_DEC "%0\n"
8744 +
8745 +#ifdef CONFIG_PAX_REFCOUNT
8746 + "jno 0f\n"
8747 + _ASM_INC "%0\n"
8748 + "int $4\n0:\n"
8749 + _ASM_EXTABLE(0b, 0b)
8750 +#endif
8751 +
8752 : "+m" (l->a.counter));
8753 }
8754
8755 static inline void local_add(long i, local_t *l)
8756 {
8757 - asm volatile(_ASM_ADD "%1,%0"
8758 + asm volatile(_ASM_ADD "%1,%0\n"
8759 +
8760 +#ifdef CONFIG_PAX_REFCOUNT
8761 + "jno 0f\n"
8762 + _ASM_SUB "%1,%0\n"
8763 + "int $4\n0:\n"
8764 + _ASM_EXTABLE(0b, 0b)
8765 +#endif
8766 +
8767 : "+m" (l->a.counter)
8768 : "ir" (i));
8769 }
8770
8771 static inline void local_sub(long i, local_t *l)
8772 {
8773 - asm volatile(_ASM_SUB "%1,%0"
8774 + asm volatile(_ASM_SUB "%1,%0\n"
8775 +
8776 +#ifdef CONFIG_PAX_REFCOUNT
8777 + "jno 0f\n"
8778 + _ASM_ADD "%1,%0\n"
8779 + "int $4\n0:\n"
8780 + _ASM_EXTABLE(0b, 0b)
8781 +#endif
8782 +
8783 : "+m" (l->a.counter)
8784 : "ir" (i));
8785 }
8786 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
8787 {
8788 unsigned char c;
8789
8790 - asm volatile(_ASM_SUB "%2,%0; sete %1"
8791 + asm volatile(_ASM_SUB "%2,%0\n"
8792 +
8793 +#ifdef CONFIG_PAX_REFCOUNT
8794 + "jno 0f\n"
8795 + _ASM_ADD "%2,%0\n"
8796 + "int $4\n0:\n"
8797 + _ASM_EXTABLE(0b, 0b)
8798 +#endif
8799 +
8800 + "sete %1\n"
8801 : "+m" (l->a.counter), "=qm" (c)
8802 : "ir" (i) : "memory");
8803 return c;
8804 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
8805 {
8806 unsigned char c;
8807
8808 - asm volatile(_ASM_DEC "%0; sete %1"
8809 + asm volatile(_ASM_DEC "%0\n"
8810 +
8811 +#ifdef CONFIG_PAX_REFCOUNT
8812 + "jno 0f\n"
8813 + _ASM_INC "%0\n"
8814 + "int $4\n0:\n"
8815 + _ASM_EXTABLE(0b, 0b)
8816 +#endif
8817 +
8818 + "sete %1\n"
8819 : "+m" (l->a.counter), "=qm" (c)
8820 : : "memory");
8821 return c != 0;
8822 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
8823 {
8824 unsigned char c;
8825
8826 - asm volatile(_ASM_INC "%0; sete %1"
8827 + asm volatile(_ASM_INC "%0\n"
8828 +
8829 +#ifdef CONFIG_PAX_REFCOUNT
8830 + "jno 0f\n"
8831 + _ASM_DEC "%0\n"
8832 + "int $4\n0:\n"
8833 + _ASM_EXTABLE(0b, 0b)
8834 +#endif
8835 +
8836 + "sete %1\n"
8837 : "+m" (l->a.counter), "=qm" (c)
8838 : : "memory");
8839 return c != 0;
8840 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
8841 {
8842 unsigned char c;
8843
8844 - asm volatile(_ASM_ADD "%2,%0; sets %1"
8845 + asm volatile(_ASM_ADD "%2,%0\n"
8846 +
8847 +#ifdef CONFIG_PAX_REFCOUNT
8848 + "jno 0f\n"
8849 + _ASM_SUB "%2,%0\n"
8850 + "int $4\n0:\n"
8851 + _ASM_EXTABLE(0b, 0b)
8852 +#endif
8853 +
8854 + "sets %1\n"
8855 : "+m" (l->a.counter), "=qm" (c)
8856 : "ir" (i) : "memory");
8857 return c;
8858 @@ -133,7 +201,15 @@ static inline long local_add_return(long
8859 #endif
8860 /* Modern 486+ processor */
8861 __i = i;
8862 - asm volatile(_ASM_XADD "%0, %1;"
8863 + asm volatile(_ASM_XADD "%0, %1\n"
8864 +
8865 +#ifdef CONFIG_PAX_REFCOUNT
8866 + "jno 0f\n"
8867 + _ASM_MOV "%0,%1\n"
8868 + "int $4\n0:\n"
8869 + _ASM_EXTABLE(0b, 0b)
8870 +#endif
8871 +
8872 : "+r" (i), "+m" (l->a.counter)
8873 : : "memory");
8874 return i + __i;
8875 diff -urNp linux-2.6.32.42/arch/x86/include/asm/microcode.h linux-2.6.32.42/arch/x86/include/asm/microcode.h
8876 --- linux-2.6.32.42/arch/x86/include/asm/microcode.h 2011-03-27 14:31:47.000000000 -0400
8877 +++ linux-2.6.32.42/arch/x86/include/asm/microcode.h 2011-04-17 15:56:46.000000000 -0400
8878 @@ -12,13 +12,13 @@ struct device;
8879 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
8880
8881 struct microcode_ops {
8882 - enum ucode_state (*request_microcode_user) (int cpu,
8883 + enum ucode_state (* const request_microcode_user) (int cpu,
8884 const void __user *buf, size_t size);
8885
8886 - enum ucode_state (*request_microcode_fw) (int cpu,
8887 + enum ucode_state (* const request_microcode_fw) (int cpu,
8888 struct device *device);
8889
8890 - void (*microcode_fini_cpu) (int cpu);
8891 + void (* const microcode_fini_cpu) (int cpu);
8892
8893 /*
8894 * The generic 'microcode_core' part guarantees that
8895 @@ -38,18 +38,18 @@ struct ucode_cpu_info {
8896 extern struct ucode_cpu_info ucode_cpu_info[];
8897
8898 #ifdef CONFIG_MICROCODE_INTEL
8899 -extern struct microcode_ops * __init init_intel_microcode(void);
8900 +extern const struct microcode_ops * __init init_intel_microcode(void);
8901 #else
8902 -static inline struct microcode_ops * __init init_intel_microcode(void)
8903 +static inline const struct microcode_ops * __init init_intel_microcode(void)
8904 {
8905 return NULL;
8906 }
8907 #endif /* CONFIG_MICROCODE_INTEL */
8908
8909 #ifdef CONFIG_MICROCODE_AMD
8910 -extern struct microcode_ops * __init init_amd_microcode(void);
8911 +extern const struct microcode_ops * __init init_amd_microcode(void);
8912 #else
8913 -static inline struct microcode_ops * __init init_amd_microcode(void)
8914 +static inline const struct microcode_ops * __init init_amd_microcode(void)
8915 {
8916 return NULL;
8917 }
8918 diff -urNp linux-2.6.32.42/arch/x86/include/asm/mman.h linux-2.6.32.42/arch/x86/include/asm/mman.h
8919 --- linux-2.6.32.42/arch/x86/include/asm/mman.h 2011-03-27 14:31:47.000000000 -0400
8920 +++ linux-2.6.32.42/arch/x86/include/asm/mman.h 2011-04-17 15:56:46.000000000 -0400
8921 @@ -5,4 +5,14 @@
8922
8923 #include <asm-generic/mman.h>
8924
8925 +#ifdef __KERNEL__
8926 +#ifndef __ASSEMBLY__
8927 +#ifdef CONFIG_X86_32
8928 +#define arch_mmap_check i386_mmap_check
8929 +int i386_mmap_check(unsigned long addr, unsigned long len,
8930 + unsigned long flags);
8931 +#endif
8932 +#endif
8933 +#endif
8934 +
8935 #endif /* _ASM_X86_MMAN_H */
8936 diff -urNp linux-2.6.32.42/arch/x86/include/asm/mmu_context.h linux-2.6.32.42/arch/x86/include/asm/mmu_context.h
8937 --- linux-2.6.32.42/arch/x86/include/asm/mmu_context.h 2011-03-27 14:31:47.000000000 -0400
8938 +++ linux-2.6.32.42/arch/x86/include/asm/mmu_context.h 2011-04-17 15:56:46.000000000 -0400
8939 @@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
8940
8941 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8942 {
8943 +
8944 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8945 + unsigned int i;
8946 + pgd_t *pgd;
8947 +
8948 + pax_open_kernel();
8949 + pgd = get_cpu_pgd(smp_processor_id());
8950 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8951 + if (paravirt_enabled())
8952 + set_pgd(pgd+i, native_make_pgd(0));
8953 + else
8954 + pgd[i] = native_make_pgd(0);
8955 + pax_close_kernel();
8956 +#endif
8957 +
8958 #ifdef CONFIG_SMP
8959 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8960 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8961 @@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
8962 struct task_struct *tsk)
8963 {
8964 unsigned cpu = smp_processor_id();
8965 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
8966 + int tlbstate = TLBSTATE_OK;
8967 +#endif
8968
8969 if (likely(prev != next)) {
8970 #ifdef CONFIG_SMP
8971 +#ifdef CONFIG_X86_32
8972 + tlbstate = percpu_read(cpu_tlbstate.state);
8973 +#endif
8974 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8975 percpu_write(cpu_tlbstate.active_mm, next);
8976 #endif
8977 cpumask_set_cpu(cpu, mm_cpumask(next));
8978
8979 /* Re-load page tables */
8980 +#ifdef CONFIG_PAX_PER_CPU_PGD
8981 + pax_open_kernel();
8982 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8983 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8984 + pax_close_kernel();
8985 + load_cr3(get_cpu_pgd(cpu));
8986 +#else
8987 load_cr3(next->pgd);
8988 +#endif
8989
8990 /* stop flush ipis for the previous mm */
8991 cpumask_clear_cpu(cpu, mm_cpumask(prev));
8992 @@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
8993 */
8994 if (unlikely(prev->context.ldt != next->context.ldt))
8995 load_LDT_nolock(&next->context);
8996 - }
8997 +
8998 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8999 + if (!nx_enabled) {
9000 + smp_mb__before_clear_bit();
9001 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
9002 + smp_mb__after_clear_bit();
9003 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9004 + }
9005 +#endif
9006 +
9007 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9008 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
9009 + prev->context.user_cs_limit != next->context.user_cs_limit))
9010 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9011 #ifdef CONFIG_SMP
9012 + else if (unlikely(tlbstate != TLBSTATE_OK))
9013 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9014 +#endif
9015 +#endif
9016 +
9017 + }
9018 else {
9019 +
9020 +#ifdef CONFIG_PAX_PER_CPU_PGD
9021 + pax_open_kernel();
9022 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9023 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9024 + pax_close_kernel();
9025 + load_cr3(get_cpu_pgd(cpu));
9026 +#endif
9027 +
9028 +#ifdef CONFIG_SMP
9029 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9030 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9031
9032 @@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
9033 * tlb flush IPI delivery. We must reload CR3
9034 * to make sure to use no freed page tables.
9035 */
9036 +
9037 +#ifndef CONFIG_PAX_PER_CPU_PGD
9038 load_cr3(next->pgd);
9039 +#endif
9040 +
9041 load_LDT_nolock(&next->context);
9042 +
9043 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9044 + if (!nx_enabled)
9045 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9046 +#endif
9047 +
9048 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9049 +#ifdef CONFIG_PAX_PAGEEXEC
9050 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
9051 +#endif
9052 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9053 +#endif
9054 +
9055 }
9056 - }
9057 #endif
9058 + }
9059 }
9060
9061 #define activate_mm(prev, next) \
9062 diff -urNp linux-2.6.32.42/arch/x86/include/asm/mmu.h linux-2.6.32.42/arch/x86/include/asm/mmu.h
9063 --- linux-2.6.32.42/arch/x86/include/asm/mmu.h 2011-03-27 14:31:47.000000000 -0400
9064 +++ linux-2.6.32.42/arch/x86/include/asm/mmu.h 2011-04-17 15:56:46.000000000 -0400
9065 @@ -9,10 +9,23 @@
9066 * we put the segment information here.
9067 */
9068 typedef struct {
9069 - void *ldt;
9070 + struct desc_struct *ldt;
9071 int size;
9072 struct mutex lock;
9073 - void *vdso;
9074 + unsigned long vdso;
9075 +
9076 +#ifdef CONFIG_X86_32
9077 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
9078 + unsigned long user_cs_base;
9079 + unsigned long user_cs_limit;
9080 +
9081 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9082 + cpumask_t cpu_user_cs_mask;
9083 +#endif
9084 +
9085 +#endif
9086 +#endif
9087 +
9088 } mm_context_t;
9089
9090 #ifdef CONFIG_SMP
9091 diff -urNp linux-2.6.32.42/arch/x86/include/asm/module.h linux-2.6.32.42/arch/x86/include/asm/module.h
9092 --- linux-2.6.32.42/arch/x86/include/asm/module.h 2011-03-27 14:31:47.000000000 -0400
9093 +++ linux-2.6.32.42/arch/x86/include/asm/module.h 2011-04-23 13:18:57.000000000 -0400
9094 @@ -5,6 +5,7 @@
9095
9096 #ifdef CONFIG_X86_64
9097 /* X86_64 does not define MODULE_PROC_FAMILY */
9098 +#define MODULE_PROC_FAMILY ""
9099 #elif defined CONFIG_M386
9100 #define MODULE_PROC_FAMILY "386 "
9101 #elif defined CONFIG_M486
9102 @@ -59,13 +60,36 @@
9103 #error unknown processor family
9104 #endif
9105
9106 -#ifdef CONFIG_X86_32
9107 -# ifdef CONFIG_4KSTACKS
9108 -# define MODULE_STACKSIZE "4KSTACKS "
9109 -# else
9110 -# define MODULE_STACKSIZE ""
9111 -# endif
9112 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
9113 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9114 +#define MODULE_PAX_UDEREF "UDEREF "
9115 +#else
9116 +#define MODULE_PAX_UDEREF ""
9117 +#endif
9118 +
9119 +#ifdef CONFIG_PAX_KERNEXEC
9120 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
9121 +#else
9122 +#define MODULE_PAX_KERNEXEC ""
9123 +#endif
9124 +
9125 +#ifdef CONFIG_PAX_REFCOUNT
9126 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
9127 +#else
9128 +#define MODULE_PAX_REFCOUNT ""
9129 #endif
9130
9131 +#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
9132 +#define MODULE_STACKSIZE "4KSTACKS "
9133 +#else
9134 +#define MODULE_STACKSIZE ""
9135 +#endif
9136 +
9137 +#ifdef CONFIG_GRKERNSEC
9138 +#define MODULE_GRSEC "GRSECURITY "
9139 +#else
9140 +#define MODULE_GRSEC ""
9141 +#endif
9142 +
9143 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
9144 +
9145 #endif /* _ASM_X86_MODULE_H */
9146 diff -urNp linux-2.6.32.42/arch/x86/include/asm/page_64_types.h linux-2.6.32.42/arch/x86/include/asm/page_64_types.h
9147 --- linux-2.6.32.42/arch/x86/include/asm/page_64_types.h 2011-03-27 14:31:47.000000000 -0400
9148 +++ linux-2.6.32.42/arch/x86/include/asm/page_64_types.h 2011-04-17 15:56:46.000000000 -0400
9149 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9150
9151 /* duplicated to the one in bootmem.h */
9152 extern unsigned long max_pfn;
9153 -extern unsigned long phys_base;
9154 +extern const unsigned long phys_base;
9155
9156 extern unsigned long __phys_addr(unsigned long);
9157 #define __phys_reloc_hide(x) (x)
9158 diff -urNp linux-2.6.32.42/arch/x86/include/asm/paravirt.h linux-2.6.32.42/arch/x86/include/asm/paravirt.h
9159 --- linux-2.6.32.42/arch/x86/include/asm/paravirt.h 2011-03-27 14:31:47.000000000 -0400
9160 +++ linux-2.6.32.42/arch/x86/include/asm/paravirt.h 2011-04-17 15:56:46.000000000 -0400
9161 @@ -729,6 +729,21 @@ static inline void __set_fixmap(unsigned
9162 pv_mmu_ops.set_fixmap(idx, phys, flags);
9163 }
9164
9165 +#ifdef CONFIG_PAX_KERNEXEC
9166 +static inline unsigned long pax_open_kernel(void)
9167 +{
9168 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9169 +}
9170 +
9171 +static inline unsigned long pax_close_kernel(void)
9172 +{
9173 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9174 +}
9175 +#else
9176 +static inline unsigned long pax_open_kernel(void) { return 0; }
9177 +static inline unsigned long pax_close_kernel(void) { return 0; }
9178 +#endif
9179 +
9180 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9181
9182 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
9183 @@ -945,7 +960,7 @@ extern void default_banner(void);
9184
9185 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9186 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9187 -#define PARA_INDIRECT(addr) *%cs:addr
9188 +#define PARA_INDIRECT(addr) *%ss:addr
9189 #endif
9190
9191 #define INTERRUPT_RETURN \
9192 @@ -1022,6 +1037,21 @@ extern void default_banner(void);
9193 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9194 CLBR_NONE, \
9195 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9196 +
9197 +#define GET_CR0_INTO_RDI \
9198 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9199 + mov %rax,%rdi
9200 +
9201 +#define SET_RDI_INTO_CR0 \
9202 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9203 +
9204 +#define GET_CR3_INTO_RDI \
9205 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9206 + mov %rax,%rdi
9207 +
9208 +#define SET_RDI_INTO_CR3 \
9209 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9210 +
9211 #endif /* CONFIG_X86_32 */
9212
9213 #endif /* __ASSEMBLY__ */
9214 diff -urNp linux-2.6.32.42/arch/x86/include/asm/paravirt_types.h linux-2.6.32.42/arch/x86/include/asm/paravirt_types.h
9215 --- linux-2.6.32.42/arch/x86/include/asm/paravirt_types.h 2011-03-27 14:31:47.000000000 -0400
9216 +++ linux-2.6.32.42/arch/x86/include/asm/paravirt_types.h 2011-04-17 15:56:46.000000000 -0400
9217 @@ -316,6 +316,12 @@ struct pv_mmu_ops {
9218 an mfn. We can tell which is which from the index. */
9219 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9220 phys_addr_t phys, pgprot_t flags);
9221 +
9222 +#ifdef CONFIG_PAX_KERNEXEC
9223 + unsigned long (*pax_open_kernel)(void);
9224 + unsigned long (*pax_close_kernel)(void);
9225 +#endif
9226 +
9227 };
9228
9229 struct raw_spinlock;
9230 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pci_x86.h linux-2.6.32.42/arch/x86/include/asm/pci_x86.h
9231 --- linux-2.6.32.42/arch/x86/include/asm/pci_x86.h 2011-03-27 14:31:47.000000000 -0400
9232 +++ linux-2.6.32.42/arch/x86/include/asm/pci_x86.h 2011-04-17 15:56:46.000000000 -0400
9233 @@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct
9234 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
9235
9236 struct pci_raw_ops {
9237 - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9238 + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9239 int reg, int len, u32 *val);
9240 - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9241 + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9242 int reg, int len, u32 val);
9243 };
9244
9245 -extern struct pci_raw_ops *raw_pci_ops;
9246 -extern struct pci_raw_ops *raw_pci_ext_ops;
9247 +extern const struct pci_raw_ops *raw_pci_ops;
9248 +extern const struct pci_raw_ops *raw_pci_ext_ops;
9249
9250 -extern struct pci_raw_ops pci_direct_conf1;
9251 +extern const struct pci_raw_ops pci_direct_conf1;
9252 extern bool port_cf9_safe;
9253
9254 /* arch_initcall level */
9255 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgalloc.h linux-2.6.32.42/arch/x86/include/asm/pgalloc.h
9256 --- linux-2.6.32.42/arch/x86/include/asm/pgalloc.h 2011-03-27 14:31:47.000000000 -0400
9257 +++ linux-2.6.32.42/arch/x86/include/asm/pgalloc.h 2011-04-17 15:56:46.000000000 -0400
9258 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
9259 pmd_t *pmd, pte_t *pte)
9260 {
9261 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9262 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9263 +}
9264 +
9265 +static inline void pmd_populate_user(struct mm_struct *mm,
9266 + pmd_t *pmd, pte_t *pte)
9267 +{
9268 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9269 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9270 }
9271
9272 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable-2level.h linux-2.6.32.42/arch/x86/include/asm/pgtable-2level.h
9273 --- linux-2.6.32.42/arch/x86/include/asm/pgtable-2level.h 2011-03-27 14:31:47.000000000 -0400
9274 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable-2level.h 2011-04-17 15:56:46.000000000 -0400
9275 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
9276
9277 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9278 {
9279 + pax_open_kernel();
9280 *pmdp = pmd;
9281 + pax_close_kernel();
9282 }
9283
9284 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9285 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable_32.h linux-2.6.32.42/arch/x86/include/asm/pgtable_32.h
9286 --- linux-2.6.32.42/arch/x86/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
9287 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
9288 @@ -26,9 +26,6 @@
9289 struct mm_struct;
9290 struct vm_area_struct;
9291
9292 -extern pgd_t swapper_pg_dir[1024];
9293 -extern pgd_t trampoline_pg_dir[1024];
9294 -
9295 static inline void pgtable_cache_init(void) { }
9296 static inline void check_pgt_cache(void) { }
9297 void paging_init(void);
9298 @@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, u
9299 # include <asm/pgtable-2level.h>
9300 #endif
9301
9302 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9303 +extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
9304 +#ifdef CONFIG_X86_PAE
9305 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9306 +#endif
9307 +
9308 #if defined(CONFIG_HIGHPTE)
9309 #define __KM_PTE \
9310 (in_nmi() ? KM_NMI_PTE : \
9311 @@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, u
9312 /* Clear a kernel PTE and flush it from the TLB */
9313 #define kpte_clear_flush(ptep, vaddr) \
9314 do { \
9315 + pax_open_kernel(); \
9316 pte_clear(&init_mm, (vaddr), (ptep)); \
9317 + pax_close_kernel(); \
9318 __flush_tlb_one((vaddr)); \
9319 } while (0)
9320
9321 @@ -85,6 +90,9 @@ do { \
9322
9323 #endif /* !__ASSEMBLY__ */
9324
9325 +#define HAVE_ARCH_UNMAPPED_AREA
9326 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9327 +
9328 /*
9329 * kern_addr_valid() is (1) for FLATMEM and (0) for
9330 * SPARSEMEM and DISCONTIGMEM
9331 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable_32_types.h linux-2.6.32.42/arch/x86/include/asm/pgtable_32_types.h
9332 --- linux-2.6.32.42/arch/x86/include/asm/pgtable_32_types.h 2011-03-27 14:31:47.000000000 -0400
9333 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable_32_types.h 2011-04-17 15:56:46.000000000 -0400
9334 @@ -8,7 +8,7 @@
9335 */
9336 #ifdef CONFIG_X86_PAE
9337 # include <asm/pgtable-3level_types.h>
9338 -# define PMD_SIZE (1UL << PMD_SHIFT)
9339 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9340 # define PMD_MASK (~(PMD_SIZE - 1))
9341 #else
9342 # include <asm/pgtable-2level_types.h>
9343 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
9344 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9345 #endif
9346
9347 +#ifdef CONFIG_PAX_KERNEXEC
9348 +#ifndef __ASSEMBLY__
9349 +extern unsigned char MODULES_EXEC_VADDR[];
9350 +extern unsigned char MODULES_EXEC_END[];
9351 +#endif
9352 +#include <asm/boot.h>
9353 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9354 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9355 +#else
9356 +#define ktla_ktva(addr) (addr)
9357 +#define ktva_ktla(addr) (addr)
9358 +#endif
9359 +
9360 #define MODULES_VADDR VMALLOC_START
9361 #define MODULES_END VMALLOC_END
9362 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9363 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable-3level.h linux-2.6.32.42/arch/x86/include/asm/pgtable-3level.h
9364 --- linux-2.6.32.42/arch/x86/include/asm/pgtable-3level.h 2011-03-27 14:31:47.000000000 -0400
9365 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable-3level.h 2011-04-17 15:56:46.000000000 -0400
9366 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
9367
9368 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9369 {
9370 + pax_open_kernel();
9371 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9372 + pax_close_kernel();
9373 }
9374
9375 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9376 {
9377 + pax_open_kernel();
9378 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9379 + pax_close_kernel();
9380 }
9381
9382 /*
9383 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable_64.h linux-2.6.32.42/arch/x86/include/asm/pgtable_64.h
9384 --- linux-2.6.32.42/arch/x86/include/asm/pgtable_64.h 2011-03-27 14:31:47.000000000 -0400
9385 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable_64.h 2011-04-17 15:56:46.000000000 -0400
9386 @@ -16,10 +16,13 @@
9387
9388 extern pud_t level3_kernel_pgt[512];
9389 extern pud_t level3_ident_pgt[512];
9390 +extern pud_t level3_vmalloc_pgt[512];
9391 +extern pud_t level3_vmemmap_pgt[512];
9392 +extern pud_t level2_vmemmap_pgt[512];
9393 extern pmd_t level2_kernel_pgt[512];
9394 extern pmd_t level2_fixmap_pgt[512];
9395 -extern pmd_t level2_ident_pgt[512];
9396 -extern pgd_t init_level4_pgt[];
9397 +extern pmd_t level2_ident_pgt[512*2];
9398 +extern pgd_t init_level4_pgt[512];
9399
9400 #define swapper_pg_dir init_level4_pgt
9401
9402 @@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
9403
9404 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9405 {
9406 + pax_open_kernel();
9407 *pmdp = pmd;
9408 + pax_close_kernel();
9409 }
9410
9411 static inline void native_pmd_clear(pmd_t *pmd)
9412 @@ -94,7 +99,9 @@ static inline void native_pud_clear(pud_
9413
9414 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9415 {
9416 + pax_open_kernel();
9417 *pgdp = pgd;
9418 + pax_close_kernel();
9419 }
9420
9421 static inline void native_pgd_clear(pgd_t *pgd)
9422 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable_64_types.h linux-2.6.32.42/arch/x86/include/asm/pgtable_64_types.h
9423 --- linux-2.6.32.42/arch/x86/include/asm/pgtable_64_types.h 2011-03-27 14:31:47.000000000 -0400
9424 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable_64_types.h 2011-04-17 15:56:46.000000000 -0400
9425 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9426 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9427 #define MODULES_END _AC(0xffffffffff000000, UL)
9428 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9429 +#define MODULES_EXEC_VADDR MODULES_VADDR
9430 +#define MODULES_EXEC_END MODULES_END
9431 +
9432 +#define ktla_ktva(addr) (addr)
9433 +#define ktva_ktla(addr) (addr)
9434
9435 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9436 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable.h linux-2.6.32.42/arch/x86/include/asm/pgtable.h
9437 --- linux-2.6.32.42/arch/x86/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
9438 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable.h 2011-04-17 15:56:46.000000000 -0400
9439 @@ -74,12 +74,51 @@ extern struct list_head pgd_list;
9440
9441 #define arch_end_context_switch(prev) do {} while(0)
9442
9443 +#define pax_open_kernel() native_pax_open_kernel()
9444 +#define pax_close_kernel() native_pax_close_kernel()
9445 #endif /* CONFIG_PARAVIRT */
9446
9447 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
9448 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9449 +
9450 +#ifdef CONFIG_PAX_KERNEXEC
9451 +static inline unsigned long native_pax_open_kernel(void)
9452 +{
9453 + unsigned long cr0;
9454 +
9455 + preempt_disable();
9456 + barrier();
9457 + cr0 = read_cr0() ^ X86_CR0_WP;
9458 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
9459 + write_cr0(cr0);
9460 + return cr0 ^ X86_CR0_WP;
9461 +}
9462 +
9463 +static inline unsigned long native_pax_close_kernel(void)
9464 +{
9465 + unsigned long cr0;
9466 +
9467 + cr0 = read_cr0() ^ X86_CR0_WP;
9468 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9469 + write_cr0(cr0);
9470 + barrier();
9471 + preempt_enable_no_resched();
9472 + return cr0 ^ X86_CR0_WP;
9473 +}
9474 +#else
9475 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
9476 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
9477 +#endif
9478 +
9479 /*
9480 * The following only work if pte_present() is true.
9481 * Undefined behaviour if not..
9482 */
9483 +static inline int pte_user(pte_t pte)
9484 +{
9485 + return pte_val(pte) & _PAGE_USER;
9486 +}
9487 +
9488 static inline int pte_dirty(pte_t pte)
9489 {
9490 return pte_flags(pte) & _PAGE_DIRTY;
9491 @@ -167,9 +206,29 @@ static inline pte_t pte_wrprotect(pte_t
9492 return pte_clear_flags(pte, _PAGE_RW);
9493 }
9494
9495 +static inline pte_t pte_mkread(pte_t pte)
9496 +{
9497 + return __pte(pte_val(pte) | _PAGE_USER);
9498 +}
9499 +
9500 static inline pte_t pte_mkexec(pte_t pte)
9501 {
9502 - return pte_clear_flags(pte, _PAGE_NX);
9503 +#ifdef CONFIG_X86_PAE
9504 + if (__supported_pte_mask & _PAGE_NX)
9505 + return pte_clear_flags(pte, _PAGE_NX);
9506 + else
9507 +#endif
9508 + return pte_set_flags(pte, _PAGE_USER);
9509 +}
9510 +
9511 +static inline pte_t pte_exprotect(pte_t pte)
9512 +{
9513 +#ifdef CONFIG_X86_PAE
9514 + if (__supported_pte_mask & _PAGE_NX)
9515 + return pte_set_flags(pte, _PAGE_NX);
9516 + else
9517 +#endif
9518 + return pte_clear_flags(pte, _PAGE_USER);
9519 }
9520
9521 static inline pte_t pte_mkdirty(pte_t pte)
9522 @@ -302,6 +361,15 @@ pte_t *populate_extra_pte(unsigned long
9523 #endif
9524
9525 #ifndef __ASSEMBLY__
9526 +
9527 +#ifdef CONFIG_PAX_PER_CPU_PGD
9528 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9529 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9530 +{
9531 + return cpu_pgd[cpu];
9532 +}
9533 +#endif
9534 +
9535 #include <linux/mm_types.h>
9536
9537 static inline int pte_none(pte_t pte)
9538 @@ -472,7 +540,7 @@ static inline pud_t *pud_offset(pgd_t *p
9539
9540 static inline int pgd_bad(pgd_t pgd)
9541 {
9542 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9543 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9544 }
9545
9546 static inline int pgd_none(pgd_t pgd)
9547 @@ -495,7 +563,12 @@ static inline int pgd_none(pgd_t pgd)
9548 * pgd_offset() returns a (pgd_t *)
9549 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9550 */
9551 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9552 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9553 +
9554 +#ifdef CONFIG_PAX_PER_CPU_PGD
9555 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9556 +#endif
9557 +
9558 /*
9559 * a shortcut which implies the use of the kernel's pgd, instead
9560 * of a process's
9561 @@ -506,6 +579,20 @@ static inline int pgd_none(pgd_t pgd)
9562 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9563 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9564
9565 +#ifdef CONFIG_X86_32
9566 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9567 +#else
9568 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9569 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9570 +
9571 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9572 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9573 +#else
9574 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9575 +#endif
9576 +
9577 +#endif
9578 +
9579 #ifndef __ASSEMBLY__
9580
9581 extern int direct_gbpages;
9582 @@ -611,11 +698,23 @@ static inline void ptep_set_wrprotect(st
9583 * dst and src can be on the same page, but the range must not overlap,
9584 * and must not cross a page boundary.
9585 */
9586 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9587 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9588 {
9589 - memcpy(dst, src, count * sizeof(pgd_t));
9590 + pax_open_kernel();
9591 + while (count--)
9592 + *dst++ = *src++;
9593 + pax_close_kernel();
9594 }
9595
9596 +#ifdef CONFIG_PAX_PER_CPU_PGD
9597 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9598 +#endif
9599 +
9600 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9601 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9602 +#else
9603 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9604 +#endif
9605
9606 #include <asm-generic/pgtable.h>
9607 #endif /* __ASSEMBLY__ */
9608 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable_types.h linux-2.6.32.42/arch/x86/include/asm/pgtable_types.h
9609 --- linux-2.6.32.42/arch/x86/include/asm/pgtable_types.h 2011-03-27 14:31:47.000000000 -0400
9610 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable_types.h 2011-04-17 15:56:46.000000000 -0400
9611 @@ -16,12 +16,11 @@
9612 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9613 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9614 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9615 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9616 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9617 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9618 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9619 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9620 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9621 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9622 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9623 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9624
9625 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9626 @@ -39,7 +38,6 @@
9627 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9628 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9629 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9630 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9631 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9632 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9633 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9634 @@ -55,8 +53,10 @@
9635
9636 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9637 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9638 -#else
9639 +#elif defined(CONFIG_KMEMCHECK)
9640 #define _PAGE_NX (_AT(pteval_t, 0))
9641 +#else
9642 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9643 #endif
9644
9645 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9646 @@ -93,6 +93,9 @@
9647 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9648 _PAGE_ACCESSED)
9649
9650 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
9651 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
9652 +
9653 #define __PAGE_KERNEL_EXEC \
9654 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9655 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9656 @@ -103,8 +106,8 @@
9657 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9658 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9659 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9660 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9661 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
9662 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9663 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
9664 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9665 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
9666 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
9667 @@ -163,8 +166,8 @@
9668 * bits are combined, this will alow user to access the high address mapped
9669 * VDSO in the presence of CONFIG_COMPAT_VDSO
9670 */
9671 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9672 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9673 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9674 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9675 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9676 #endif
9677
9678 @@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
9679 {
9680 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9681 }
9682 +#endif
9683
9684 +#if PAGETABLE_LEVELS == 3
9685 +#include <asm-generic/pgtable-nopud.h>
9686 +#endif
9687 +
9688 +#if PAGETABLE_LEVELS == 2
9689 +#include <asm-generic/pgtable-nopmd.h>
9690 +#endif
9691 +
9692 +#ifndef __ASSEMBLY__
9693 #if PAGETABLE_LEVELS > 3
9694 typedef struct { pudval_t pud; } pud_t;
9695
9696 @@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
9697 return pud.pud;
9698 }
9699 #else
9700 -#include <asm-generic/pgtable-nopud.h>
9701 -
9702 static inline pudval_t native_pud_val(pud_t pud)
9703 {
9704 return native_pgd_val(pud.pgd);
9705 @@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
9706 return pmd.pmd;
9707 }
9708 #else
9709 -#include <asm-generic/pgtable-nopmd.h>
9710 -
9711 static inline pmdval_t native_pmd_val(pmd_t pmd)
9712 {
9713 return native_pgd_val(pmd.pud.pgd);
9714 @@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
9715
9716 extern pteval_t __supported_pte_mask;
9717 extern void set_nx(void);
9718 +
9719 +#ifdef CONFIG_X86_32
9720 +#ifdef CONFIG_X86_PAE
9721 extern int nx_enabled;
9722 +#else
9723 +#define nx_enabled (0)
9724 +#endif
9725 +#else
9726 +#define nx_enabled (1)
9727 +#endif
9728
9729 #define pgprot_writecombine pgprot_writecombine
9730 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9731 diff -urNp linux-2.6.32.42/arch/x86/include/asm/processor.h linux-2.6.32.42/arch/x86/include/asm/processor.h
9732 --- linux-2.6.32.42/arch/x86/include/asm/processor.h 2011-04-22 19:16:29.000000000 -0400
9733 +++ linux-2.6.32.42/arch/x86/include/asm/processor.h 2011-05-11 18:25:15.000000000 -0400
9734 @@ -272,7 +272,7 @@ struct tss_struct {
9735
9736 } ____cacheline_aligned;
9737
9738 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9739 +extern struct tss_struct init_tss[NR_CPUS];
9740
9741 /*
9742 * Save the original ist values for checking stack pointers during debugging
9743 @@ -888,11 +888,18 @@ static inline void spin_lock_prefetch(co
9744 */
9745 #define TASK_SIZE PAGE_OFFSET
9746 #define TASK_SIZE_MAX TASK_SIZE
9747 +
9748 +#ifdef CONFIG_PAX_SEGMEXEC
9749 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9750 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9751 +#else
9752 #define STACK_TOP TASK_SIZE
9753 -#define STACK_TOP_MAX STACK_TOP
9754 +#endif
9755 +
9756 +#define STACK_TOP_MAX TASK_SIZE
9757
9758 #define INIT_THREAD { \
9759 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9760 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9761 .vm86_info = NULL, \
9762 .sysenter_cs = __KERNEL_CS, \
9763 .io_bitmap_ptr = NULL, \
9764 @@ -906,7 +913,7 @@ static inline void spin_lock_prefetch(co
9765 */
9766 #define INIT_TSS { \
9767 .x86_tss = { \
9768 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9769 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9770 .ss0 = __KERNEL_DS, \
9771 .ss1 = __KERNEL_CS, \
9772 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9773 @@ -917,11 +924,7 @@ static inline void spin_lock_prefetch(co
9774 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9775
9776 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9777 -#define KSTK_TOP(info) \
9778 -({ \
9779 - unsigned long *__ptr = (unsigned long *)(info); \
9780 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9781 -})
9782 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9783
9784 /*
9785 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9786 @@ -936,7 +939,7 @@ extern unsigned long thread_saved_pc(str
9787 #define task_pt_regs(task) \
9788 ({ \
9789 struct pt_regs *__regs__; \
9790 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9791 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9792 __regs__ - 1; \
9793 })
9794
9795 @@ -946,13 +949,13 @@ extern unsigned long thread_saved_pc(str
9796 /*
9797 * User space process size. 47bits minus one guard page.
9798 */
9799 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9800 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9801
9802 /* This decides where the kernel will search for a free chunk of vm
9803 * space during mmap's.
9804 */
9805 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9806 - 0xc0000000 : 0xFFFFe000)
9807 + 0xc0000000 : 0xFFFFf000)
9808
9809 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9810 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9811 @@ -963,11 +966,11 @@ extern unsigned long thread_saved_pc(str
9812 #define STACK_TOP_MAX TASK_SIZE_MAX
9813
9814 #define INIT_THREAD { \
9815 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9816 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9817 }
9818
9819 #define INIT_TSS { \
9820 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9821 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9822 }
9823
9824 /*
9825 @@ -989,6 +992,10 @@ extern void start_thread(struct pt_regs
9826 */
9827 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9828
9829 +#ifdef CONFIG_PAX_SEGMEXEC
9830 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9831 +#endif
9832 +
9833 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9834
9835 /* Get/set a process' ability to use the timestamp counter instruction */
9836 diff -urNp linux-2.6.32.42/arch/x86/include/asm/ptrace.h linux-2.6.32.42/arch/x86/include/asm/ptrace.h
9837 --- linux-2.6.32.42/arch/x86/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
9838 +++ linux-2.6.32.42/arch/x86/include/asm/ptrace.h 2011-04-17 15:56:46.000000000 -0400
9839 @@ -151,28 +151,29 @@ static inline unsigned long regs_return_
9840 }
9841
9842 /*
9843 - * user_mode_vm(regs) determines whether a register set came from user mode.
9844 + * user_mode(regs) determines whether a register set came from user mode.
9845 * This is true if V8086 mode was enabled OR if the register set was from
9846 * protected mode with RPL-3 CS value. This tricky test checks that with
9847 * one comparison. Many places in the kernel can bypass this full check
9848 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9849 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9850 + * be used.
9851 */
9852 -static inline int user_mode(struct pt_regs *regs)
9853 +static inline int user_mode_novm(struct pt_regs *regs)
9854 {
9855 #ifdef CONFIG_X86_32
9856 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9857 #else
9858 - return !!(regs->cs & 3);
9859 + return !!(regs->cs & SEGMENT_RPL_MASK);
9860 #endif
9861 }
9862
9863 -static inline int user_mode_vm(struct pt_regs *regs)
9864 +static inline int user_mode(struct pt_regs *regs)
9865 {
9866 #ifdef CONFIG_X86_32
9867 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9868 USER_RPL;
9869 #else
9870 - return user_mode(regs);
9871 + return user_mode_novm(regs);
9872 #endif
9873 }
9874
9875 diff -urNp linux-2.6.32.42/arch/x86/include/asm/reboot.h linux-2.6.32.42/arch/x86/include/asm/reboot.h
9876 --- linux-2.6.32.42/arch/x86/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
9877 +++ linux-2.6.32.42/arch/x86/include/asm/reboot.h 2011-05-22 23:02:03.000000000 -0400
9878 @@ -6,19 +6,19 @@
9879 struct pt_regs;
9880
9881 struct machine_ops {
9882 - void (*restart)(char *cmd);
9883 - void (*halt)(void);
9884 - void (*power_off)(void);
9885 + void (* __noreturn restart)(char *cmd);
9886 + void (* __noreturn halt)(void);
9887 + void (* __noreturn power_off)(void);
9888 void (*shutdown)(void);
9889 void (*crash_shutdown)(struct pt_regs *);
9890 - void (*emergency_restart)(void);
9891 + void (* __noreturn emergency_restart)(void);
9892 };
9893
9894 extern struct machine_ops machine_ops;
9895
9896 void native_machine_crash_shutdown(struct pt_regs *regs);
9897 void native_machine_shutdown(void);
9898 -void machine_real_restart(const unsigned char *code, int length);
9899 +void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
9900
9901 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
9902 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
9903 diff -urNp linux-2.6.32.42/arch/x86/include/asm/rwsem.h linux-2.6.32.42/arch/x86/include/asm/rwsem.h
9904 --- linux-2.6.32.42/arch/x86/include/asm/rwsem.h 2011-03-27 14:31:47.000000000 -0400
9905 +++ linux-2.6.32.42/arch/x86/include/asm/rwsem.h 2011-04-17 15:56:46.000000000 -0400
9906 @@ -118,6 +118,14 @@ static inline void __down_read(struct rw
9907 {
9908 asm volatile("# beginning down_read\n\t"
9909 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9910 +
9911 +#ifdef CONFIG_PAX_REFCOUNT
9912 + "jno 0f\n"
9913 + LOCK_PREFIX _ASM_DEC "(%1)\n\t"
9914 + "int $4\n0:\n"
9915 + _ASM_EXTABLE(0b, 0b)
9916 +#endif
9917 +
9918 /* adds 0x00000001, returns the old value */
9919 " jns 1f\n"
9920 " call call_rwsem_down_read_failed\n"
9921 @@ -139,6 +147,14 @@ static inline int __down_read_trylock(st
9922 "1:\n\t"
9923 " mov %1,%2\n\t"
9924 " add %3,%2\n\t"
9925 +
9926 +#ifdef CONFIG_PAX_REFCOUNT
9927 + "jno 0f\n"
9928 + "sub %3,%2\n"
9929 + "int $4\n0:\n"
9930 + _ASM_EXTABLE(0b, 0b)
9931 +#endif
9932 +
9933 " jle 2f\n\t"
9934 LOCK_PREFIX " cmpxchg %2,%0\n\t"
9935 " jnz 1b\n\t"
9936 @@ -160,6 +176,14 @@ static inline void __down_write_nested(s
9937 tmp = RWSEM_ACTIVE_WRITE_BIAS;
9938 asm volatile("# beginning down_write\n\t"
9939 LOCK_PREFIX " xadd %1,(%2)\n\t"
9940 +
9941 +#ifdef CONFIG_PAX_REFCOUNT
9942 + "jno 0f\n"
9943 + "mov %1,(%2)\n"
9944 + "int $4\n0:\n"
9945 + _ASM_EXTABLE(0b, 0b)
9946 +#endif
9947 +
9948 /* subtract 0x0000ffff, returns the old value */
9949 " test %1,%1\n\t"
9950 /* was the count 0 before? */
9951 @@ -198,6 +222,14 @@ static inline void __up_read(struct rw_s
9952 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
9953 asm volatile("# beginning __up_read\n\t"
9954 LOCK_PREFIX " xadd %1,(%2)\n\t"
9955 +
9956 +#ifdef CONFIG_PAX_REFCOUNT
9957 + "jno 0f\n"
9958 + "mov %1,(%2)\n"
9959 + "int $4\n0:\n"
9960 + _ASM_EXTABLE(0b, 0b)
9961 +#endif
9962 +
9963 /* subtracts 1, returns the old value */
9964 " jns 1f\n\t"
9965 " call call_rwsem_wake\n"
9966 @@ -216,6 +248,14 @@ static inline void __up_write(struct rw_
9967 rwsem_count_t tmp;
9968 asm volatile("# beginning __up_write\n\t"
9969 LOCK_PREFIX " xadd %1,(%2)\n\t"
9970 +
9971 +#ifdef CONFIG_PAX_REFCOUNT
9972 + "jno 0f\n"
9973 + "mov %1,(%2)\n"
9974 + "int $4\n0:\n"
9975 + _ASM_EXTABLE(0b, 0b)
9976 +#endif
9977 +
9978 /* tries to transition
9979 0xffff0001 -> 0x00000000 */
9980 " jz 1f\n"
9981 @@ -234,6 +274,14 @@ static inline void __downgrade_write(str
9982 {
9983 asm volatile("# beginning __downgrade_write\n\t"
9984 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
9985 +
9986 +#ifdef CONFIG_PAX_REFCOUNT
9987 + "jno 0f\n"
9988 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
9989 + "int $4\n0:\n"
9990 + _ASM_EXTABLE(0b, 0b)
9991 +#endif
9992 +
9993 /*
9994 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
9995 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
9996 @@ -253,7 +301,15 @@ static inline void __downgrade_write(str
9997 static inline void rwsem_atomic_add(rwsem_count_t delta,
9998 struct rw_semaphore *sem)
9999 {
10000 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10001 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10002 +
10003 +#ifdef CONFIG_PAX_REFCOUNT
10004 + "jno 0f\n"
10005 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
10006 + "int $4\n0:\n"
10007 + _ASM_EXTABLE(0b, 0b)
10008 +#endif
10009 +
10010 : "+m" (sem->count)
10011 : "er" (delta));
10012 }
10013 @@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic
10014 {
10015 rwsem_count_t tmp = delta;
10016
10017 - asm volatile(LOCK_PREFIX "xadd %0,%1"
10018 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
10019 +
10020 +#ifdef CONFIG_PAX_REFCOUNT
10021 + "jno 0f\n"
10022 + "mov %0,%1\n"
10023 + "int $4\n0:\n"
10024 + _ASM_EXTABLE(0b, 0b)
10025 +#endif
10026 +
10027 : "+r" (tmp), "+m" (sem->count)
10028 : : "memory");
10029
10030 diff -urNp linux-2.6.32.42/arch/x86/include/asm/segment.h linux-2.6.32.42/arch/x86/include/asm/segment.h
10031 --- linux-2.6.32.42/arch/x86/include/asm/segment.h 2011-03-27 14:31:47.000000000 -0400
10032 +++ linux-2.6.32.42/arch/x86/include/asm/segment.h 2011-04-17 15:56:46.000000000 -0400
10033 @@ -62,8 +62,8 @@
10034 * 26 - ESPFIX small SS
10035 * 27 - per-cpu [ offset to per-cpu data area ]
10036 * 28 - stack_canary-20 [ for stack protector ]
10037 - * 29 - unused
10038 - * 30 - unused
10039 + * 29 - PCI BIOS CS
10040 + * 30 - PCI BIOS DS
10041 * 31 - TSS for double fault handler
10042 */
10043 #define GDT_ENTRY_TLS_MIN 6
10044 @@ -77,6 +77,8 @@
10045
10046 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
10047
10048 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10049 +
10050 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
10051
10052 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
10053 @@ -88,7 +90,7 @@
10054 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
10055 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
10056
10057 -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10058 +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10059 #ifdef CONFIG_SMP
10060 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
10061 #else
10062 @@ -102,6 +104,12 @@
10063 #define __KERNEL_STACK_CANARY 0
10064 #endif
10065
10066 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
10067 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10068 +
10069 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
10070 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10071 +
10072 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10073
10074 /*
10075 @@ -139,7 +147,7 @@
10076 */
10077
10078 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10079 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10080 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10081
10082
10083 #else
10084 @@ -163,6 +171,8 @@
10085 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
10086 #define __USER32_DS __USER_DS
10087
10088 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10089 +
10090 #define GDT_ENTRY_TSS 8 /* needs two entries */
10091 #define GDT_ENTRY_LDT 10 /* needs two entries */
10092 #define GDT_ENTRY_TLS_MIN 12
10093 @@ -183,6 +193,7 @@
10094 #endif
10095
10096 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
10097 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
10098 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
10099 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
10100 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
10101 diff -urNp linux-2.6.32.42/arch/x86/include/asm/smp.h linux-2.6.32.42/arch/x86/include/asm/smp.h
10102 --- linux-2.6.32.42/arch/x86/include/asm/smp.h 2011-03-27 14:31:47.000000000 -0400
10103 +++ linux-2.6.32.42/arch/x86/include/asm/smp.h 2011-04-17 15:56:46.000000000 -0400
10104 @@ -24,7 +24,7 @@ extern unsigned int num_processors;
10105 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
10106 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10107 DECLARE_PER_CPU(u16, cpu_llc_id);
10108 -DECLARE_PER_CPU(int, cpu_number);
10109 +DECLARE_PER_CPU(unsigned int, cpu_number);
10110
10111 static inline struct cpumask *cpu_sibling_mask(int cpu)
10112 {
10113 @@ -175,14 +175,8 @@ extern unsigned disabled_cpus __cpuinitd
10114 extern int safe_smp_processor_id(void);
10115
10116 #elif defined(CONFIG_X86_64_SMP)
10117 -#define raw_smp_processor_id() (percpu_read(cpu_number))
10118 -
10119 -#define stack_smp_processor_id() \
10120 -({ \
10121 - struct thread_info *ti; \
10122 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10123 - ti->cpu; \
10124 -})
10125 +#define raw_smp_processor_id() (percpu_read(cpu_number))
10126 +#define stack_smp_processor_id() raw_smp_processor_id()
10127 #define safe_smp_processor_id() smp_processor_id()
10128
10129 #endif
10130 diff -urNp linux-2.6.32.42/arch/x86/include/asm/spinlock.h linux-2.6.32.42/arch/x86/include/asm/spinlock.h
10131 --- linux-2.6.32.42/arch/x86/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
10132 +++ linux-2.6.32.42/arch/x86/include/asm/spinlock.h 2011-04-17 15:56:46.000000000 -0400
10133 @@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(r
10134 static inline void __raw_read_lock(raw_rwlock_t *rw)
10135 {
10136 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
10137 +
10138 +#ifdef CONFIG_PAX_REFCOUNT
10139 + "jno 0f\n"
10140 + LOCK_PREFIX " addl $1,(%0)\n"
10141 + "int $4\n0:\n"
10142 + _ASM_EXTABLE(0b, 0b)
10143 +#endif
10144 +
10145 "jns 1f\n"
10146 "call __read_lock_failed\n\t"
10147 "1:\n"
10148 @@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_r
10149 static inline void __raw_write_lock(raw_rwlock_t *rw)
10150 {
10151 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
10152 +
10153 +#ifdef CONFIG_PAX_REFCOUNT
10154 + "jno 0f\n"
10155 + LOCK_PREFIX " addl %1,(%0)\n"
10156 + "int $4\n0:\n"
10157 + _ASM_EXTABLE(0b, 0b)
10158 +#endif
10159 +
10160 "jz 1f\n"
10161 "call __write_lock_failed\n\t"
10162 "1:\n"
10163 @@ -286,12 +302,29 @@ static inline int __raw_write_trylock(ra
10164
10165 static inline void __raw_read_unlock(raw_rwlock_t *rw)
10166 {
10167 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
10168 + asm volatile(LOCK_PREFIX "incl %0\n"
10169 +
10170 +#ifdef CONFIG_PAX_REFCOUNT
10171 + "jno 0f\n"
10172 + LOCK_PREFIX "decl %0\n"
10173 + "int $4\n0:\n"
10174 + _ASM_EXTABLE(0b, 0b)
10175 +#endif
10176 +
10177 + :"+m" (rw->lock) : : "memory");
10178 }
10179
10180 static inline void __raw_write_unlock(raw_rwlock_t *rw)
10181 {
10182 - asm volatile(LOCK_PREFIX "addl %1, %0"
10183 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
10184 +
10185 +#ifdef CONFIG_PAX_REFCOUNT
10186 + "jno 0f\n"
10187 + LOCK_PREFIX "subl %1, %0\n"
10188 + "int $4\n0:\n"
10189 + _ASM_EXTABLE(0b, 0b)
10190 +#endif
10191 +
10192 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
10193 }
10194
10195 diff -urNp linux-2.6.32.42/arch/x86/include/asm/stackprotector.h linux-2.6.32.42/arch/x86/include/asm/stackprotector.h
10196 --- linux-2.6.32.42/arch/x86/include/asm/stackprotector.h 2011-03-27 14:31:47.000000000 -0400
10197 +++ linux-2.6.32.42/arch/x86/include/asm/stackprotector.h 2011-04-17 15:56:46.000000000 -0400
10198 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
10199
10200 static inline void load_stack_canary_segment(void)
10201 {
10202 -#ifdef CONFIG_X86_32
10203 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10204 asm volatile ("mov %0, %%gs" : : "r" (0));
10205 #endif
10206 }
10207 diff -urNp linux-2.6.32.42/arch/x86/include/asm/system.h linux-2.6.32.42/arch/x86/include/asm/system.h
10208 --- linux-2.6.32.42/arch/x86/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
10209 +++ linux-2.6.32.42/arch/x86/include/asm/system.h 2011-05-22 23:02:03.000000000 -0400
10210 @@ -132,7 +132,7 @@ do { \
10211 "thread_return:\n\t" \
10212 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10213 __switch_canary \
10214 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
10215 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10216 "movq %%rax,%%rdi\n\t" \
10217 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10218 "jnz ret_from_fork\n\t" \
10219 @@ -143,7 +143,7 @@ do { \
10220 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10221 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10222 [_tif_fork] "i" (_TIF_FORK), \
10223 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
10224 + [thread_info] "m" (per_cpu_var(current_tinfo)), \
10225 [current_task] "m" (per_cpu_var(current_task)) \
10226 __switch_canary_iparam \
10227 : "memory", "cc" __EXTRA_CLOBBER)
10228 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
10229 {
10230 unsigned long __limit;
10231 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10232 - return __limit + 1;
10233 + return __limit;
10234 }
10235
10236 static inline void native_clts(void)
10237 @@ -340,12 +340,12 @@ void enable_hlt(void);
10238
10239 void cpu_idle_wait(void);
10240
10241 -extern unsigned long arch_align_stack(unsigned long sp);
10242 +#define arch_align_stack(x) ((x) & ~0xfUL)
10243 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10244
10245 void default_idle(void);
10246
10247 -void stop_this_cpu(void *dummy);
10248 +void stop_this_cpu(void *dummy) __noreturn;
10249
10250 /*
10251 * Force strict CPU ordering.
10252 diff -urNp linux-2.6.32.42/arch/x86/include/asm/thread_info.h linux-2.6.32.42/arch/x86/include/asm/thread_info.h
10253 --- linux-2.6.32.42/arch/x86/include/asm/thread_info.h 2011-03-27 14:31:47.000000000 -0400
10254 +++ linux-2.6.32.42/arch/x86/include/asm/thread_info.h 2011-05-17 19:26:34.000000000 -0400
10255 @@ -10,6 +10,7 @@
10256 #include <linux/compiler.h>
10257 #include <asm/page.h>
10258 #include <asm/types.h>
10259 +#include <asm/percpu.h>
10260
10261 /*
10262 * low level task data that entry.S needs immediate access to
10263 @@ -24,7 +25,6 @@ struct exec_domain;
10264 #include <asm/atomic.h>
10265
10266 struct thread_info {
10267 - struct task_struct *task; /* main task structure */
10268 struct exec_domain *exec_domain; /* execution domain */
10269 __u32 flags; /* low level flags */
10270 __u32 status; /* thread synchronous flags */
10271 @@ -34,18 +34,12 @@ struct thread_info {
10272 mm_segment_t addr_limit;
10273 struct restart_block restart_block;
10274 void __user *sysenter_return;
10275 -#ifdef CONFIG_X86_32
10276 - unsigned long previous_esp; /* ESP of the previous stack in
10277 - case of nested (IRQ) stacks
10278 - */
10279 - __u8 supervisor_stack[0];
10280 -#endif
10281 + unsigned long lowest_stack;
10282 int uaccess_err;
10283 };
10284
10285 -#define INIT_THREAD_INFO(tsk) \
10286 +#define INIT_THREAD_INFO \
10287 { \
10288 - .task = &tsk, \
10289 .exec_domain = &default_exec_domain, \
10290 .flags = 0, \
10291 .cpu = 0, \
10292 @@ -56,7 +50,7 @@ struct thread_info {
10293 }, \
10294 }
10295
10296 -#define init_thread_info (init_thread_union.thread_info)
10297 +#define init_thread_info (init_thread_union.stack)
10298 #define init_stack (init_thread_union.stack)
10299
10300 #else /* !__ASSEMBLY__ */
10301 @@ -163,6 +157,23 @@ struct thread_info {
10302 #define alloc_thread_info(tsk) \
10303 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
10304
10305 +#ifdef __ASSEMBLY__
10306 +/* how to get the thread information struct from ASM */
10307 +#define GET_THREAD_INFO(reg) \
10308 + mov PER_CPU_VAR(current_tinfo), reg
10309 +
10310 +/* use this one if reg already contains %esp */
10311 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10312 +#else
10313 +/* how to get the thread information struct from C */
10314 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10315 +
10316 +static __always_inline struct thread_info *current_thread_info(void)
10317 +{
10318 + return percpu_read_stable(current_tinfo);
10319 +}
10320 +#endif
10321 +
10322 #ifdef CONFIG_X86_32
10323
10324 #define STACK_WARN (THREAD_SIZE/8)
10325 @@ -173,35 +184,13 @@ struct thread_info {
10326 */
10327 #ifndef __ASSEMBLY__
10328
10329 -
10330 /* how to get the current stack pointer from C */
10331 register unsigned long current_stack_pointer asm("esp") __used;
10332
10333 -/* how to get the thread information struct from C */
10334 -static inline struct thread_info *current_thread_info(void)
10335 -{
10336 - return (struct thread_info *)
10337 - (current_stack_pointer & ~(THREAD_SIZE - 1));
10338 -}
10339 -
10340 -#else /* !__ASSEMBLY__ */
10341 -
10342 -/* how to get the thread information struct from ASM */
10343 -#define GET_THREAD_INFO(reg) \
10344 - movl $-THREAD_SIZE, reg; \
10345 - andl %esp, reg
10346 -
10347 -/* use this one if reg already contains %esp */
10348 -#define GET_THREAD_INFO_WITH_ESP(reg) \
10349 - andl $-THREAD_SIZE, reg
10350 -
10351 #endif
10352
10353 #else /* X86_32 */
10354
10355 -#include <asm/percpu.h>
10356 -#define KERNEL_STACK_OFFSET (5*8)
10357 -
10358 /*
10359 * macros/functions for gaining access to the thread information structure
10360 * preempt_count needs to be 1 initially, until the scheduler is functional.
10361 @@ -209,21 +198,8 @@ static inline struct thread_info *curren
10362 #ifndef __ASSEMBLY__
10363 DECLARE_PER_CPU(unsigned long, kernel_stack);
10364
10365 -static inline struct thread_info *current_thread_info(void)
10366 -{
10367 - struct thread_info *ti;
10368 - ti = (void *)(percpu_read_stable(kernel_stack) +
10369 - KERNEL_STACK_OFFSET - THREAD_SIZE);
10370 - return ti;
10371 -}
10372 -
10373 -#else /* !__ASSEMBLY__ */
10374 -
10375 -/* how to get the thread information struct from ASM */
10376 -#define GET_THREAD_INFO(reg) \
10377 - movq PER_CPU_VAR(kernel_stack),reg ; \
10378 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10379 -
10380 +/* how to get the current stack pointer from C */
10381 +register unsigned long current_stack_pointer asm("rsp") __used;
10382 #endif
10383
10384 #endif /* !X86_32 */
10385 @@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
10386 extern void free_thread_info(struct thread_info *ti);
10387 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10388 #define arch_task_cache_init arch_task_cache_init
10389 +
10390 +#define __HAVE_THREAD_FUNCTIONS
10391 +#define task_thread_info(task) (&(task)->tinfo)
10392 +#define task_stack_page(task) ((task)->stack)
10393 +#define setup_thread_stack(p, org) do {} while (0)
10394 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10395 +
10396 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10397 +extern struct task_struct *alloc_task_struct(void);
10398 +extern void free_task_struct(struct task_struct *);
10399 +
10400 #endif
10401 #endif /* _ASM_X86_THREAD_INFO_H */
10402 diff -urNp linux-2.6.32.42/arch/x86/include/asm/uaccess_32.h linux-2.6.32.42/arch/x86/include/asm/uaccess_32.h
10403 --- linux-2.6.32.42/arch/x86/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
10404 +++ linux-2.6.32.42/arch/x86/include/asm/uaccess_32.h 2011-05-16 21:46:57.000000000 -0400
10405 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
10406 static __always_inline unsigned long __must_check
10407 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10408 {
10409 + pax_track_stack();
10410 +
10411 + if ((long)n < 0)
10412 + return n;
10413 +
10414 if (__builtin_constant_p(n)) {
10415 unsigned long ret;
10416
10417 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
10418 return ret;
10419 }
10420 }
10421 + if (!__builtin_constant_p(n))
10422 + check_object_size(from, n, true);
10423 return __copy_to_user_ll(to, from, n);
10424 }
10425
10426 @@ -83,12 +90,16 @@ static __always_inline unsigned long __m
10427 __copy_to_user(void __user *to, const void *from, unsigned long n)
10428 {
10429 might_fault();
10430 +
10431 return __copy_to_user_inatomic(to, from, n);
10432 }
10433
10434 static __always_inline unsigned long
10435 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10436 {
10437 + if ((long)n < 0)
10438 + return n;
10439 +
10440 /* Avoid zeroing the tail if the copy fails..
10441 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10442 * but as the zeroing behaviour is only significant when n is not
10443 @@ -138,6 +149,12 @@ static __always_inline unsigned long
10444 __copy_from_user(void *to, const void __user *from, unsigned long n)
10445 {
10446 might_fault();
10447 +
10448 + pax_track_stack();
10449 +
10450 + if ((long)n < 0)
10451 + return n;
10452 +
10453 if (__builtin_constant_p(n)) {
10454 unsigned long ret;
10455
10456 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
10457 return ret;
10458 }
10459 }
10460 + if (!__builtin_constant_p(n))
10461 + check_object_size(to, n, false);
10462 return __copy_from_user_ll(to, from, n);
10463 }
10464
10465 @@ -160,6 +179,10 @@ static __always_inline unsigned long __c
10466 const void __user *from, unsigned long n)
10467 {
10468 might_fault();
10469 +
10470 + if ((long)n < 0)
10471 + return n;
10472 +
10473 if (__builtin_constant_p(n)) {
10474 unsigned long ret;
10475
10476 @@ -182,14 +205,62 @@ static __always_inline unsigned long
10477 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10478 unsigned long n)
10479 {
10480 - return __copy_from_user_ll_nocache_nozero(to, from, n);
10481 + if ((long)n < 0)
10482 + return n;
10483 +
10484 + return __copy_from_user_ll_nocache_nozero(to, from, n);
10485 +}
10486 +
10487 +/**
10488 + * copy_to_user: - Copy a block of data into user space.
10489 + * @to: Destination address, in user space.
10490 + * @from: Source address, in kernel space.
10491 + * @n: Number of bytes to copy.
10492 + *
10493 + * Context: User context only. This function may sleep.
10494 + *
10495 + * Copy data from kernel space to user space.
10496 + *
10497 + * Returns number of bytes that could not be copied.
10498 + * On success, this will be zero.
10499 + */
10500 +static __always_inline unsigned long __must_check
10501 +copy_to_user(void __user *to, const void *from, unsigned long n)
10502 +{
10503 + if (access_ok(VERIFY_WRITE, to, n))
10504 + n = __copy_to_user(to, from, n);
10505 + return n;
10506 +}
10507 +
10508 +/**
10509 + * copy_from_user: - Copy a block of data from user space.
10510 + * @to: Destination address, in kernel space.
10511 + * @from: Source address, in user space.
10512 + * @n: Number of bytes to copy.
10513 + *
10514 + * Context: User context only. This function may sleep.
10515 + *
10516 + * Copy data from user space to kernel space.
10517 + *
10518 + * Returns number of bytes that could not be copied.
10519 + * On success, this will be zero.
10520 + *
10521 + * If some data could not be copied, this function will pad the copied
10522 + * data to the requested size using zero bytes.
10523 + */
10524 +static __always_inline unsigned long __must_check
10525 +copy_from_user(void *to, const void __user *from, unsigned long n)
10526 +{
10527 + if (access_ok(VERIFY_READ, from, n))
10528 + n = __copy_from_user(to, from, n);
10529 + else if ((long)n > 0) {
10530 + if (!__builtin_constant_p(n))
10531 + check_object_size(to, n, false);
10532 + memset(to, 0, n);
10533 + }
10534 + return n;
10535 }
10536
10537 -unsigned long __must_check copy_to_user(void __user *to,
10538 - const void *from, unsigned long n);
10539 -unsigned long __must_check copy_from_user(void *to,
10540 - const void __user *from,
10541 - unsigned long n);
10542 long __must_check strncpy_from_user(char *dst, const char __user *src,
10543 long count);
10544 long __must_check __strncpy_from_user(char *dst,
10545 diff -urNp linux-2.6.32.42/arch/x86/include/asm/uaccess_64.h linux-2.6.32.42/arch/x86/include/asm/uaccess_64.h
10546 --- linux-2.6.32.42/arch/x86/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
10547 +++ linux-2.6.32.42/arch/x86/include/asm/uaccess_64.h 2011-05-16 21:46:57.000000000 -0400
10548 @@ -9,6 +9,9 @@
10549 #include <linux/prefetch.h>
10550 #include <linux/lockdep.h>
10551 #include <asm/page.h>
10552 +#include <asm/pgtable.h>
10553 +
10554 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
10555
10556 /*
10557 * Copy To/From Userspace
10558 @@ -19,113 +22,203 @@ __must_check unsigned long
10559 copy_user_generic(void *to, const void *from, unsigned len);
10560
10561 __must_check unsigned long
10562 -copy_to_user(void __user *to, const void *from, unsigned len);
10563 -__must_check unsigned long
10564 -copy_from_user(void *to, const void __user *from, unsigned len);
10565 -__must_check unsigned long
10566 copy_in_user(void __user *to, const void __user *from, unsigned len);
10567
10568 static __always_inline __must_check
10569 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
10570 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
10571 {
10572 - int ret = 0;
10573 + unsigned ret = 0;
10574
10575 might_fault();
10576 - if (!__builtin_constant_p(size))
10577 - return copy_user_generic(dst, (__force void *)src, size);
10578 +
10579 + if ((int)size < 0)
10580 + return size;
10581 +
10582 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10583 + if (!__access_ok(VERIFY_READ, src, size))
10584 + return size;
10585 +#endif
10586 +
10587 + if (!__builtin_constant_p(size)) {
10588 + check_object_size(dst, size, false);
10589 +
10590 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10591 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10592 + src += PAX_USER_SHADOW_BASE;
10593 +#endif
10594 +
10595 + return copy_user_generic(dst, (__force const void *)src, size);
10596 + }
10597 switch (size) {
10598 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10599 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10600 ret, "b", "b", "=q", 1);
10601 return ret;
10602 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10603 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10604 ret, "w", "w", "=r", 2);
10605 return ret;
10606 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10607 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10608 ret, "l", "k", "=r", 4);
10609 return ret;
10610 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10611 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10612 ret, "q", "", "=r", 8);
10613 return ret;
10614 case 10:
10615 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10616 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10617 ret, "q", "", "=r", 10);
10618 if (unlikely(ret))
10619 return ret;
10620 __get_user_asm(*(u16 *)(8 + (char *)dst),
10621 - (u16 __user *)(8 + (char __user *)src),
10622 + (const u16 __user *)(8 + (const char __user *)src),
10623 ret, "w", "w", "=r", 2);
10624 return ret;
10625 case 16:
10626 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10627 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10628 ret, "q", "", "=r", 16);
10629 if (unlikely(ret))
10630 return ret;
10631 __get_user_asm(*(u64 *)(8 + (char *)dst),
10632 - (u64 __user *)(8 + (char __user *)src),
10633 + (const u64 __user *)(8 + (const char __user *)src),
10634 ret, "q", "", "=r", 8);
10635 return ret;
10636 default:
10637 - return copy_user_generic(dst, (__force void *)src, size);
10638 +
10639 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10640 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10641 + src += PAX_USER_SHADOW_BASE;
10642 +#endif
10643 +
10644 + return copy_user_generic(dst, (__force const void *)src, size);
10645 }
10646 }
10647
10648 static __always_inline __must_check
10649 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
10650 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10651 {
10652 - int ret = 0;
10653 + unsigned ret = 0;
10654
10655 might_fault();
10656 - if (!__builtin_constant_p(size))
10657 +
10658 + pax_track_stack();
10659 +
10660 + if ((int)size < 0)
10661 + return size;
10662 +
10663 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10664 + if (!__access_ok(VERIFY_WRITE, dst, size))
10665 + return size;
10666 +#endif
10667 +
10668 + if (!__builtin_constant_p(size)) {
10669 + check_object_size(src, size, true);
10670 +
10671 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10672 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10673 + dst += PAX_USER_SHADOW_BASE;
10674 +#endif
10675 +
10676 return copy_user_generic((__force void *)dst, src, size);
10677 + }
10678 switch (size) {
10679 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10680 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10681 ret, "b", "b", "iq", 1);
10682 return ret;
10683 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10684 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10685 ret, "w", "w", "ir", 2);
10686 return ret;
10687 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
10688 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
10689 ret, "l", "k", "ir", 4);
10690 return ret;
10691 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
10692 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10693 ret, "q", "", "er", 8);
10694 return ret;
10695 case 10:
10696 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10697 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10698 ret, "q", "", "er", 10);
10699 if (unlikely(ret))
10700 return ret;
10701 asm("":::"memory");
10702 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
10703 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
10704 ret, "w", "w", "ir", 2);
10705 return ret;
10706 case 16:
10707 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10708 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10709 ret, "q", "", "er", 16);
10710 if (unlikely(ret))
10711 return ret;
10712 asm("":::"memory");
10713 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
10714 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
10715 ret, "q", "", "er", 8);
10716 return ret;
10717 default:
10718 +
10719 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10720 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10721 + dst += PAX_USER_SHADOW_BASE;
10722 +#endif
10723 +
10724 return copy_user_generic((__force void *)dst, src, size);
10725 }
10726 }
10727
10728 static __always_inline __must_check
10729 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10730 +unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
10731 +{
10732 + if (access_ok(VERIFY_WRITE, to, len))
10733 + len = __copy_to_user(to, from, len);
10734 + return len;
10735 +}
10736 +
10737 +static __always_inline __must_check
10738 +unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
10739 +{
10740 + if ((int)len < 0)
10741 + return len;
10742 +
10743 + if (access_ok(VERIFY_READ, from, len))
10744 + len = __copy_from_user(to, from, len);
10745 + else if ((int)len > 0) {
10746 + if (!__builtin_constant_p(len))
10747 + check_object_size(to, len, false);
10748 + memset(to, 0, len);
10749 + }
10750 + return len;
10751 +}
10752 +
10753 +static __always_inline __must_check
10754 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10755 {
10756 - int ret = 0;
10757 + unsigned ret = 0;
10758
10759 might_fault();
10760 - if (!__builtin_constant_p(size))
10761 +
10762 + pax_track_stack();
10763 +
10764 + if ((int)size < 0)
10765 + return size;
10766 +
10767 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10768 + if (!__access_ok(VERIFY_READ, src, size))
10769 + return size;
10770 + if (!__access_ok(VERIFY_WRITE, dst, size))
10771 + return size;
10772 +#endif
10773 +
10774 + if (!__builtin_constant_p(size)) {
10775 +
10776 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10777 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10778 + src += PAX_USER_SHADOW_BASE;
10779 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10780 + dst += PAX_USER_SHADOW_BASE;
10781 +#endif
10782 +
10783 return copy_user_generic((__force void *)dst,
10784 - (__force void *)src, size);
10785 + (__force const void *)src, size);
10786 + }
10787 switch (size) {
10788 case 1: {
10789 u8 tmp;
10790 - __get_user_asm(tmp, (u8 __user *)src,
10791 + __get_user_asm(tmp, (const u8 __user *)src,
10792 ret, "b", "b", "=q", 1);
10793 if (likely(!ret))
10794 __put_user_asm(tmp, (u8 __user *)dst,
10795 @@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, con
10796 }
10797 case 2: {
10798 u16 tmp;
10799 - __get_user_asm(tmp, (u16 __user *)src,
10800 + __get_user_asm(tmp, (const u16 __user *)src,
10801 ret, "w", "w", "=r", 2);
10802 if (likely(!ret))
10803 __put_user_asm(tmp, (u16 __user *)dst,
10804 @@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, con
10805
10806 case 4: {
10807 u32 tmp;
10808 - __get_user_asm(tmp, (u32 __user *)src,
10809 + __get_user_asm(tmp, (const u32 __user *)src,
10810 ret, "l", "k", "=r", 4);
10811 if (likely(!ret))
10812 __put_user_asm(tmp, (u32 __user *)dst,
10813 @@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, con
10814 }
10815 case 8: {
10816 u64 tmp;
10817 - __get_user_asm(tmp, (u64 __user *)src,
10818 + __get_user_asm(tmp, (const u64 __user *)src,
10819 ret, "q", "", "=r", 8);
10820 if (likely(!ret))
10821 __put_user_asm(tmp, (u64 __user *)dst,
10822 @@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, con
10823 return ret;
10824 }
10825 default:
10826 +
10827 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10828 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10829 + src += PAX_USER_SHADOW_BASE;
10830 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10831 + dst += PAX_USER_SHADOW_BASE;
10832 +#endif
10833 +
10834 return copy_user_generic((__force void *)dst,
10835 - (__force void *)src, size);
10836 + (__force const void *)src, size);
10837 }
10838 }
10839
10840 @@ -176,33 +277,75 @@ __must_check long strlen_user(const char
10841 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
10842 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
10843
10844 -__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
10845 - unsigned size);
10846 +static __must_check __always_inline unsigned long
10847 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
10848 +{
10849 + pax_track_stack();
10850 +
10851 + if ((int)size < 0)
10852 + return size;
10853
10854 -static __must_check __always_inline int
10855 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10856 + if (!__access_ok(VERIFY_READ, src, size))
10857 + return size;
10858 +
10859 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10860 + src += PAX_USER_SHADOW_BASE;
10861 +#endif
10862 +
10863 + return copy_user_generic(dst, (__force const void *)src, size);
10864 +}
10865 +
10866 +static __must_check __always_inline unsigned long
10867 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
10868 {
10869 + if ((int)size < 0)
10870 + return size;
10871 +
10872 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10873 + if (!__access_ok(VERIFY_WRITE, dst, size))
10874 + return size;
10875 +
10876 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10877 + dst += PAX_USER_SHADOW_BASE;
10878 +#endif
10879 +
10880 return copy_user_generic((__force void *)dst, src, size);
10881 }
10882
10883 -extern long __copy_user_nocache(void *dst, const void __user *src,
10884 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
10885 unsigned size, int zerorest);
10886
10887 -static inline int
10888 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10889 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10890 {
10891 might_sleep();
10892 +
10893 + if ((int)size < 0)
10894 + return size;
10895 +
10896 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10897 + if (!__access_ok(VERIFY_READ, src, size))
10898 + return size;
10899 +#endif
10900 +
10901 return __copy_user_nocache(dst, src, size, 1);
10902 }
10903
10904 -static inline int
10905 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10906 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10907 unsigned size)
10908 {
10909 + if ((int)size < 0)
10910 + return size;
10911 +
10912 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10913 + if (!__access_ok(VERIFY_READ, src, size))
10914 + return size;
10915 +#endif
10916 +
10917 return __copy_user_nocache(dst, src, size, 0);
10918 }
10919
10920 -unsigned long
10921 +extern unsigned long
10922 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
10923
10924 #endif /* _ASM_X86_UACCESS_64_H */
10925 diff -urNp linux-2.6.32.42/arch/x86/include/asm/uaccess.h linux-2.6.32.42/arch/x86/include/asm/uaccess.h
10926 --- linux-2.6.32.42/arch/x86/include/asm/uaccess.h 2011-06-25 12:55:34.000000000 -0400
10927 +++ linux-2.6.32.42/arch/x86/include/asm/uaccess.h 2011-06-25 12:56:37.000000000 -0400
10928 @@ -8,12 +8,15 @@
10929 #include <linux/thread_info.h>
10930 #include <linux/prefetch.h>
10931 #include <linux/string.h>
10932 +#include <linux/sched.h>
10933 #include <asm/asm.h>
10934 #include <asm/page.h>
10935
10936 #define VERIFY_READ 0
10937 #define VERIFY_WRITE 1
10938
10939 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
10940 +
10941 /*
10942 * The fs value determines whether argument validity checking should be
10943 * performed or not. If get_fs() == USER_DS, checking is performed, with
10944 @@ -29,7 +32,12 @@
10945
10946 #define get_ds() (KERNEL_DS)
10947 #define get_fs() (current_thread_info()->addr_limit)
10948 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10949 +void __set_fs(mm_segment_t x);
10950 +void set_fs(mm_segment_t x);
10951 +#else
10952 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10953 +#endif
10954
10955 #define segment_eq(a, b) ((a).seg == (b).seg)
10956
10957 @@ -77,7 +85,33 @@
10958 * checks that the pointer is in the user space range - after calling
10959 * this function, memory access functions may still return -EFAULT.
10960 */
10961 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10962 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10963 +#define access_ok(type, addr, size) \
10964 +({ \
10965 + long __size = size; \
10966 + unsigned long __addr = (unsigned long)addr; \
10967 + unsigned long __addr_ao = __addr & PAGE_MASK; \
10968 + unsigned long __end_ao = __addr + __size - 1; \
10969 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10970 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10971 + while(__addr_ao <= __end_ao) { \
10972 + char __c_ao; \
10973 + __addr_ao += PAGE_SIZE; \
10974 + if (__size > PAGE_SIZE) \
10975 + cond_resched(); \
10976 + if (__get_user(__c_ao, (char __user *)__addr)) \
10977 + break; \
10978 + if (type != VERIFY_WRITE) { \
10979 + __addr = __addr_ao; \
10980 + continue; \
10981 + } \
10982 + if (__put_user(__c_ao, (char __user *)__addr)) \
10983 + break; \
10984 + __addr = __addr_ao; \
10985 + } \
10986 + } \
10987 + __ret_ao; \
10988 +})
10989
10990 /*
10991 * The exception table consists of pairs of addresses: the first is the
10992 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
10993 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10994 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10995
10996 -
10997 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10998 +#define __copyuser_seg "gs;"
10999 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
11000 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
11001 +#else
11002 +#define __copyuser_seg
11003 +#define __COPYUSER_SET_ES
11004 +#define __COPYUSER_RESTORE_ES
11005 +#endif
11006
11007 #ifdef CONFIG_X86_32
11008 #define __put_user_asm_u64(x, addr, err, errret) \
11009 - asm volatile("1: movl %%eax,0(%2)\n" \
11010 - "2: movl %%edx,4(%2)\n" \
11011 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
11012 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
11013 "3:\n" \
11014 ".section .fixup,\"ax\"\n" \
11015 "4: movl %3,%0\n" \
11016 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
11017 : "A" (x), "r" (addr), "i" (errret), "0" (err))
11018
11019 #define __put_user_asm_ex_u64(x, addr) \
11020 - asm volatile("1: movl %%eax,0(%1)\n" \
11021 - "2: movl %%edx,4(%1)\n" \
11022 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
11023 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
11024 "3:\n" \
11025 _ASM_EXTABLE(1b, 2b - 1b) \
11026 _ASM_EXTABLE(2b, 3b - 2b) \
11027 @@ -374,7 +416,7 @@ do { \
11028 } while (0)
11029
11030 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11031 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
11032 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
11033 "2:\n" \
11034 ".section .fixup,\"ax\"\n" \
11035 "3: mov %3,%0\n" \
11036 @@ -382,7 +424,7 @@ do { \
11037 " jmp 2b\n" \
11038 ".previous\n" \
11039 _ASM_EXTABLE(1b, 3b) \
11040 - : "=r" (err), ltype(x) \
11041 + : "=r" (err), ltype (x) \
11042 : "m" (__m(addr)), "i" (errret), "0" (err))
11043
11044 #define __get_user_size_ex(x, ptr, size) \
11045 @@ -407,7 +449,7 @@ do { \
11046 } while (0)
11047
11048 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
11049 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
11050 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
11051 "2:\n" \
11052 _ASM_EXTABLE(1b, 2b - 1b) \
11053 : ltype(x) : "m" (__m(addr)))
11054 @@ -424,13 +466,24 @@ do { \
11055 int __gu_err; \
11056 unsigned long __gu_val; \
11057 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
11058 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
11059 + (x) = (__typeof__(*(ptr)))__gu_val; \
11060 __gu_err; \
11061 })
11062
11063 /* FIXME: this hack is definitely wrong -AK */
11064 struct __large_struct { unsigned long buf[100]; };
11065 -#define __m(x) (*(struct __large_struct __user *)(x))
11066 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11067 +#define ____m(x) \
11068 +({ \
11069 + unsigned long ____x = (unsigned long)(x); \
11070 + if (____x < PAX_USER_SHADOW_BASE) \
11071 + ____x += PAX_USER_SHADOW_BASE; \
11072 + (void __user *)____x; \
11073 +})
11074 +#else
11075 +#define ____m(x) (x)
11076 +#endif
11077 +#define __m(x) (*(struct __large_struct __user *)____m(x))
11078
11079 /*
11080 * Tell gcc we read from memory instead of writing: this is because
11081 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
11082 * aliasing issues.
11083 */
11084 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11085 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
11086 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
11087 "2:\n" \
11088 ".section .fixup,\"ax\"\n" \
11089 "3: mov %3,%0\n" \
11090 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
11091 ".previous\n" \
11092 _ASM_EXTABLE(1b, 3b) \
11093 : "=r"(err) \
11094 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
11095 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
11096
11097 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
11098 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
11099 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
11100 "2:\n" \
11101 _ASM_EXTABLE(1b, 2b - 1b) \
11102 : : ltype(x), "m" (__m(addr)))
11103 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
11104 * On error, the variable @x is set to zero.
11105 */
11106
11107 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11108 +#define __get_user(x, ptr) get_user((x), (ptr))
11109 +#else
11110 #define __get_user(x, ptr) \
11111 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
11112 +#endif
11113
11114 /**
11115 * __put_user: - Write a simple value into user space, with less checking.
11116 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
11117 * Returns zero on success, or -EFAULT on error.
11118 */
11119
11120 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11121 +#define __put_user(x, ptr) put_user((x), (ptr))
11122 +#else
11123 #define __put_user(x, ptr) \
11124 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
11125 +#endif
11126
11127 #define __get_user_unaligned __get_user
11128 #define __put_user_unaligned __put_user
11129 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
11130 #define get_user_ex(x, ptr) do { \
11131 unsigned long __gue_val; \
11132 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
11133 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
11134 + (x) = (__typeof__(*(ptr)))__gue_val; \
11135 } while (0)
11136
11137 #ifdef CONFIG_X86_WP_WORKS_OK
11138 @@ -567,6 +628,7 @@ extern struct movsl_mask {
11139
11140 #define ARCH_HAS_NOCACHE_UACCESS 1
11141
11142 +#define ARCH_HAS_SORT_EXTABLE
11143 #ifdef CONFIG_X86_32
11144 # include "uaccess_32.h"
11145 #else
11146 diff -urNp linux-2.6.32.42/arch/x86/include/asm/vgtod.h linux-2.6.32.42/arch/x86/include/asm/vgtod.h
11147 --- linux-2.6.32.42/arch/x86/include/asm/vgtod.h 2011-03-27 14:31:47.000000000 -0400
11148 +++ linux-2.6.32.42/arch/x86/include/asm/vgtod.h 2011-04-17 15:56:46.000000000 -0400
11149 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
11150 int sysctl_enabled;
11151 struct timezone sys_tz;
11152 struct { /* extract of a clocksource struct */
11153 + char name[8];
11154 cycle_t (*vread)(void);
11155 cycle_t cycle_last;
11156 cycle_t mask;
11157 diff -urNp linux-2.6.32.42/arch/x86/include/asm/vmi.h linux-2.6.32.42/arch/x86/include/asm/vmi.h
11158 --- linux-2.6.32.42/arch/x86/include/asm/vmi.h 2011-03-27 14:31:47.000000000 -0400
11159 +++ linux-2.6.32.42/arch/x86/include/asm/vmi.h 2011-04-17 15:56:46.000000000 -0400
11160 @@ -191,6 +191,7 @@ struct vrom_header {
11161 u8 reserved[96]; /* Reserved for headers */
11162 char vmi_init[8]; /* VMI_Init jump point */
11163 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
11164 + char rom_data[8048]; /* rest of the option ROM */
11165 } __attribute__((packed));
11166
11167 struct pnp_header {
11168 diff -urNp linux-2.6.32.42/arch/x86/include/asm/vsyscall.h linux-2.6.32.42/arch/x86/include/asm/vsyscall.h
11169 --- linux-2.6.32.42/arch/x86/include/asm/vsyscall.h 2011-03-27 14:31:47.000000000 -0400
11170 +++ linux-2.6.32.42/arch/x86/include/asm/vsyscall.h 2011-04-17 15:56:46.000000000 -0400
11171 @@ -15,9 +15,10 @@ enum vsyscall_num {
11172
11173 #ifdef __KERNEL__
11174 #include <linux/seqlock.h>
11175 +#include <linux/getcpu.h>
11176 +#include <linux/time.h>
11177
11178 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
11179 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
11180
11181 /* Definitions for CONFIG_GENERIC_TIME definitions */
11182 #define __section_vsyscall_gtod_data __attribute__ \
11183 @@ -31,7 +32,6 @@ enum vsyscall_num {
11184 #define VGETCPU_LSL 2
11185
11186 extern int __vgetcpu_mode;
11187 -extern volatile unsigned long __jiffies;
11188
11189 /* kernel space (writeable) */
11190 extern int vgetcpu_mode;
11191 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
11192
11193 extern void map_vsyscall(void);
11194
11195 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
11196 +extern time_t vtime(time_t *t);
11197 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
11198 #endif /* __KERNEL__ */
11199
11200 #endif /* _ASM_X86_VSYSCALL_H */
11201 diff -urNp linux-2.6.32.42/arch/x86/include/asm/xsave.h linux-2.6.32.42/arch/x86/include/asm/xsave.h
11202 --- linux-2.6.32.42/arch/x86/include/asm/xsave.h 2011-03-27 14:31:47.000000000 -0400
11203 +++ linux-2.6.32.42/arch/x86/include/asm/xsave.h 2011-04-17 15:56:46.000000000 -0400
11204 @@ -56,6 +56,12 @@ static inline int xrstor_checking(struct
11205 static inline int xsave_user(struct xsave_struct __user *buf)
11206 {
11207 int err;
11208 +
11209 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11210 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11211 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11212 +#endif
11213 +
11214 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
11215 "2:\n"
11216 ".section .fixup,\"ax\"\n"
11217 @@ -82,6 +88,11 @@ static inline int xrestore_user(struct x
11218 u32 lmask = mask;
11219 u32 hmask = mask >> 32;
11220
11221 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11222 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11223 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11224 +#endif
11225 +
11226 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11227 "2:\n"
11228 ".section .fixup,\"ax\"\n"
11229 diff -urNp linux-2.6.32.42/arch/x86/Kconfig linux-2.6.32.42/arch/x86/Kconfig
11230 --- linux-2.6.32.42/arch/x86/Kconfig 2011-03-27 14:31:47.000000000 -0400
11231 +++ linux-2.6.32.42/arch/x86/Kconfig 2011-04-17 15:56:46.000000000 -0400
11232 @@ -223,7 +223,7 @@ config X86_TRAMPOLINE
11233
11234 config X86_32_LAZY_GS
11235 def_bool y
11236 - depends on X86_32 && !CC_STACKPROTECTOR
11237 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11238
11239 config KTIME_SCALAR
11240 def_bool X86_32
11241 @@ -1008,7 +1008,7 @@ choice
11242
11243 config NOHIGHMEM
11244 bool "off"
11245 - depends on !X86_NUMAQ
11246 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11247 ---help---
11248 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11249 However, the address space of 32-bit x86 processors is only 4
11250 @@ -1045,7 +1045,7 @@ config NOHIGHMEM
11251
11252 config HIGHMEM4G
11253 bool "4GB"
11254 - depends on !X86_NUMAQ
11255 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11256 ---help---
11257 Select this if you have a 32-bit processor and between 1 and 4
11258 gigabytes of physical RAM.
11259 @@ -1099,7 +1099,7 @@ config PAGE_OFFSET
11260 hex
11261 default 0xB0000000 if VMSPLIT_3G_OPT
11262 default 0x80000000 if VMSPLIT_2G
11263 - default 0x78000000 if VMSPLIT_2G_OPT
11264 + default 0x70000000 if VMSPLIT_2G_OPT
11265 default 0x40000000 if VMSPLIT_1G
11266 default 0xC0000000
11267 depends on X86_32
11268 @@ -1430,7 +1430,7 @@ config ARCH_USES_PG_UNCACHED
11269
11270 config EFI
11271 bool "EFI runtime service support"
11272 - depends on ACPI
11273 + depends on ACPI && !PAX_KERNEXEC
11274 ---help---
11275 This enables the kernel to use EFI runtime services that are
11276 available (such as the EFI variable services).
11277 @@ -1460,6 +1460,7 @@ config SECCOMP
11278
11279 config CC_STACKPROTECTOR
11280 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
11281 + depends on X86_64 || !PAX_MEMORY_UDEREF
11282 ---help---
11283 This option turns on the -fstack-protector GCC feature. This
11284 feature puts, at the beginning of functions, a canary value on
11285 @@ -1517,6 +1518,7 @@ config KEXEC_JUMP
11286 config PHYSICAL_START
11287 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
11288 default "0x1000000"
11289 + range 0x400000 0x40000000
11290 ---help---
11291 This gives the physical address where the kernel is loaded.
11292
11293 @@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
11294 hex
11295 prompt "Alignment value to which kernel should be aligned" if X86_32
11296 default "0x1000000"
11297 + range 0x400000 0x1000000 if PAX_KERNEXEC
11298 range 0x2000 0x1000000
11299 ---help---
11300 This value puts the alignment restrictions on physical address
11301 @@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
11302 Say N if you want to disable CPU hotplug.
11303
11304 config COMPAT_VDSO
11305 - def_bool y
11306 + def_bool n
11307 prompt "Compat VDSO support"
11308 depends on X86_32 || IA32_EMULATION
11309 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
11310 ---help---
11311 Map the 32-bit VDSO to the predictable old-style address too.
11312 ---help---
11313 diff -urNp linux-2.6.32.42/arch/x86/Kconfig.cpu linux-2.6.32.42/arch/x86/Kconfig.cpu
11314 --- linux-2.6.32.42/arch/x86/Kconfig.cpu 2011-03-27 14:31:47.000000000 -0400
11315 +++ linux-2.6.32.42/arch/x86/Kconfig.cpu 2011-04-17 15:56:46.000000000 -0400
11316 @@ -340,7 +340,7 @@ config X86_PPRO_FENCE
11317
11318 config X86_F00F_BUG
11319 def_bool y
11320 - depends on M586MMX || M586TSC || M586 || M486 || M386
11321 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
11322
11323 config X86_WP_WORKS_OK
11324 def_bool y
11325 @@ -360,7 +360,7 @@ config X86_POPAD_OK
11326
11327 config X86_ALIGNMENT_16
11328 def_bool y
11329 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11330 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11331
11332 config X86_INTEL_USERCOPY
11333 def_bool y
11334 @@ -406,7 +406,7 @@ config X86_CMPXCHG64
11335 # generates cmov.
11336 config X86_CMOV
11337 def_bool y
11338 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11339 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11340
11341 config X86_MINIMUM_CPU_FAMILY
11342 int
11343 diff -urNp linux-2.6.32.42/arch/x86/Kconfig.debug linux-2.6.32.42/arch/x86/Kconfig.debug
11344 --- linux-2.6.32.42/arch/x86/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
11345 +++ linux-2.6.32.42/arch/x86/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
11346 @@ -99,7 +99,7 @@ config X86_PTDUMP
11347 config DEBUG_RODATA
11348 bool "Write protect kernel read-only data structures"
11349 default y
11350 - depends on DEBUG_KERNEL
11351 + depends on DEBUG_KERNEL && BROKEN
11352 ---help---
11353 Mark the kernel read-only data as write-protected in the pagetables,
11354 in order to catch accidental (and incorrect) writes to such const
11355 diff -urNp linux-2.6.32.42/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.32.42/arch/x86/kernel/acpi/realmode/wakeup.S
11356 --- linux-2.6.32.42/arch/x86/kernel/acpi/realmode/wakeup.S 2011-03-27 14:31:47.000000000 -0400
11357 +++ linux-2.6.32.42/arch/x86/kernel/acpi/realmode/wakeup.S 2011-04-17 15:56:46.000000000 -0400
11358 @@ -104,7 +104,7 @@ _start:
11359 movl %eax, %ecx
11360 orl %edx, %ecx
11361 jz 1f
11362 - movl $0xc0000080, %ecx
11363 + mov $MSR_EFER, %ecx
11364 wrmsr
11365 1:
11366
11367 diff -urNp linux-2.6.32.42/arch/x86/kernel/acpi/sleep.c linux-2.6.32.42/arch/x86/kernel/acpi/sleep.c
11368 --- linux-2.6.32.42/arch/x86/kernel/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
11369 +++ linux-2.6.32.42/arch/x86/kernel/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
11370 @@ -11,11 +11,12 @@
11371 #include <linux/cpumask.h>
11372 #include <asm/segment.h>
11373 #include <asm/desc.h>
11374 +#include <asm/e820.h>
11375
11376 #include "realmode/wakeup.h"
11377 #include "sleep.h"
11378
11379 -unsigned long acpi_wakeup_address;
11380 +unsigned long acpi_wakeup_address = 0x2000;
11381 unsigned long acpi_realmode_flags;
11382
11383 /* address in low memory of the wakeup routine. */
11384 @@ -99,8 +100,12 @@ int acpi_save_state_mem(void)
11385 header->trampoline_segment = setup_trampoline() >> 4;
11386 #ifdef CONFIG_SMP
11387 stack_start.sp = temp_stack + sizeof(temp_stack);
11388 +
11389 + pax_open_kernel();
11390 early_gdt_descr.address =
11391 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11392 + pax_close_kernel();
11393 +
11394 initial_gs = per_cpu_offset(smp_processor_id());
11395 #endif
11396 initial_code = (unsigned long)wakeup_long64;
11397 @@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
11398 return;
11399 }
11400
11401 - acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
11402 -
11403 - if (!acpi_realmode) {
11404 - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
11405 - return;
11406 - }
11407 -
11408 - acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
11409 + reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
11410 + acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
11411 }
11412
11413
11414 diff -urNp linux-2.6.32.42/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.32.42/arch/x86/kernel/acpi/wakeup_32.S
11415 --- linux-2.6.32.42/arch/x86/kernel/acpi/wakeup_32.S 2011-03-27 14:31:47.000000000 -0400
11416 +++ linux-2.6.32.42/arch/x86/kernel/acpi/wakeup_32.S 2011-04-17 15:56:46.000000000 -0400
11417 @@ -30,13 +30,11 @@ wakeup_pmode_return:
11418 # and restore the stack ... but you need gdt for this to work
11419 movl saved_context_esp, %esp
11420
11421 - movl %cs:saved_magic, %eax
11422 - cmpl $0x12345678, %eax
11423 + cmpl $0x12345678, saved_magic
11424 jne bogus_magic
11425
11426 # jump to place where we left off
11427 - movl saved_eip, %eax
11428 - jmp *%eax
11429 + jmp *(saved_eip)
11430
11431 bogus_magic:
11432 jmp bogus_magic
11433 diff -urNp linux-2.6.32.42/arch/x86/kernel/alternative.c linux-2.6.32.42/arch/x86/kernel/alternative.c
11434 --- linux-2.6.32.42/arch/x86/kernel/alternative.c 2011-03-27 14:31:47.000000000 -0400
11435 +++ linux-2.6.32.42/arch/x86/kernel/alternative.c 2011-04-17 15:56:46.000000000 -0400
11436 @@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(str
11437
11438 BUG_ON(p->len > MAX_PATCH_LEN);
11439 /* prep the buffer with the original instructions */
11440 - memcpy(insnbuf, p->instr, p->len);
11441 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11442 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11443 (unsigned long)p->instr, p->len);
11444
11445 @@ -475,7 +475,7 @@ void __init alternative_instructions(voi
11446 if (smp_alt_once)
11447 free_init_pages("SMP alternatives",
11448 (unsigned long)__smp_locks,
11449 - (unsigned long)__smp_locks_end);
11450 + PAGE_ALIGN((unsigned long)__smp_locks_end));
11451
11452 restart_nmi();
11453 }
11454 @@ -492,13 +492,17 @@ void __init alternative_instructions(voi
11455 * instructions. And on the local CPU you need to be protected again NMI or MCE
11456 * handlers seeing an inconsistent instruction while you patch.
11457 */
11458 -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
11459 +static void *__kprobes text_poke_early(void *addr, const void *opcode,
11460 size_t len)
11461 {
11462 unsigned long flags;
11463 local_irq_save(flags);
11464 - memcpy(addr, opcode, len);
11465 +
11466 + pax_open_kernel();
11467 + memcpy(ktla_ktva(addr), opcode, len);
11468 sync_core();
11469 + pax_close_kernel();
11470 +
11471 local_irq_restore(flags);
11472 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11473 that causes hangs on some VIA CPUs. */
11474 @@ -520,35 +524,21 @@ static void *__init_or_module text_poke_
11475 */
11476 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11477 {
11478 - unsigned long flags;
11479 - char *vaddr;
11480 + unsigned char *vaddr = ktla_ktva(addr);
11481 struct page *pages[2];
11482 - int i;
11483 + size_t i;
11484
11485 if (!core_kernel_text((unsigned long)addr)) {
11486 - pages[0] = vmalloc_to_page(addr);
11487 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11488 + pages[0] = vmalloc_to_page(vaddr);
11489 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11490 } else {
11491 - pages[0] = virt_to_page(addr);
11492 + pages[0] = virt_to_page(vaddr);
11493 WARN_ON(!PageReserved(pages[0]));
11494 - pages[1] = virt_to_page(addr + PAGE_SIZE);
11495 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11496 }
11497 BUG_ON(!pages[0]);
11498 - local_irq_save(flags);
11499 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11500 - if (pages[1])
11501 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11502 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11503 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11504 - clear_fixmap(FIX_TEXT_POKE0);
11505 - if (pages[1])
11506 - clear_fixmap(FIX_TEXT_POKE1);
11507 - local_flush_tlb();
11508 - sync_core();
11509 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
11510 - that causes hangs on some VIA CPUs. */
11511 + text_poke_early(addr, opcode, len);
11512 for (i = 0; i < len; i++)
11513 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11514 - local_irq_restore(flags);
11515 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11516 return addr;
11517 }
11518 diff -urNp linux-2.6.32.42/arch/x86/kernel/amd_iommu.c linux-2.6.32.42/arch/x86/kernel/amd_iommu.c
11519 --- linux-2.6.32.42/arch/x86/kernel/amd_iommu.c 2011-03-27 14:31:47.000000000 -0400
11520 +++ linux-2.6.32.42/arch/x86/kernel/amd_iommu.c 2011-04-17 15:56:46.000000000 -0400
11521 @@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(
11522 }
11523 }
11524
11525 -static struct dma_map_ops amd_iommu_dma_ops = {
11526 +static const struct dma_map_ops amd_iommu_dma_ops = {
11527 .alloc_coherent = alloc_coherent,
11528 .free_coherent = free_coherent,
11529 .map_page = map_page,
11530 diff -urNp linux-2.6.32.42/arch/x86/kernel/apic/apic.c linux-2.6.32.42/arch/x86/kernel/apic/apic.c
11531 --- linux-2.6.32.42/arch/x86/kernel/apic/apic.c 2011-03-27 14:31:47.000000000 -0400
11532 +++ linux-2.6.32.42/arch/x86/kernel/apic/apic.c 2011-05-16 21:46:57.000000000 -0400
11533 @@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs
11534 apic_write(APIC_ESR, 0);
11535 v1 = apic_read(APIC_ESR);
11536 ack_APIC_irq();
11537 - atomic_inc(&irq_err_count);
11538 + atomic_inc_unchecked(&irq_err_count);
11539
11540 /*
11541 * Here is what the APIC error bits mean:
11542 @@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(vo
11543 u16 *bios_cpu_apicid;
11544 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11545
11546 + pax_track_stack();
11547 +
11548 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11549 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11550
11551 diff -urNp linux-2.6.32.42/arch/x86/kernel/apic/io_apic.c linux-2.6.32.42/arch/x86/kernel/apic/io_apic.c
11552 --- linux-2.6.32.42/arch/x86/kernel/apic/io_apic.c 2011-03-27 14:31:47.000000000 -0400
11553 +++ linux-2.6.32.42/arch/x86/kernel/apic/io_apic.c 2011-05-04 17:56:20.000000000 -0400
11554 @@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapi
11555 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
11556 GFP_ATOMIC);
11557 if (!ioapic_entries)
11558 - return 0;
11559 + return NULL;
11560
11561 for (apic = 0; apic < nr_ioapics; apic++) {
11562 ioapic_entries[apic] =
11563 @@ -733,7 +733,7 @@ nomem:
11564 kfree(ioapic_entries[apic]);
11565 kfree(ioapic_entries);
11566
11567 - return 0;
11568 + return NULL;
11569 }
11570
11571 /*
11572 @@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
11573 }
11574 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11575
11576 -void lock_vector_lock(void)
11577 +void lock_vector_lock(void) __acquires(vector_lock)
11578 {
11579 /* Used to the online set of cpus does not change
11580 * during assign_irq_vector.
11581 @@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
11582 spin_lock(&vector_lock);
11583 }
11584
11585 -void unlock_vector_lock(void)
11586 +void unlock_vector_lock(void) __releases(vector_lock)
11587 {
11588 spin_unlock(&vector_lock);
11589 }
11590 @@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int i
11591 ack_APIC_irq();
11592 }
11593
11594 -atomic_t irq_mis_count;
11595 +atomic_unchecked_t irq_mis_count;
11596
11597 static void ack_apic_level(unsigned int irq)
11598 {
11599 @@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int
11600
11601 /* Tail end of version 0x11 I/O APIC bug workaround */
11602 if (!(v & (1 << (i & 0x1f)))) {
11603 - atomic_inc(&irq_mis_count);
11604 + atomic_inc_unchecked(&irq_mis_count);
11605 spin_lock(&ioapic_lock);
11606 __mask_and_edge_IO_APIC_irq(cfg);
11607 __unmask_and_level_IO_APIC_irq(cfg);
11608 diff -urNp linux-2.6.32.42/arch/x86/kernel/apm_32.c linux-2.6.32.42/arch/x86/kernel/apm_32.c
11609 --- linux-2.6.32.42/arch/x86/kernel/apm_32.c 2011-03-27 14:31:47.000000000 -0400
11610 +++ linux-2.6.32.42/arch/x86/kernel/apm_32.c 2011-04-23 12:56:10.000000000 -0400
11611 @@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
11612 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11613 * even though they are called in protected mode.
11614 */
11615 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11616 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11617 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11618
11619 static const char driver_version[] = "1.16ac"; /* no spaces */
11620 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
11621 BUG_ON(cpu != 0);
11622 gdt = get_cpu_gdt_table(cpu);
11623 save_desc_40 = gdt[0x40 / 8];
11624 +
11625 + pax_open_kernel();
11626 gdt[0x40 / 8] = bad_bios_desc;
11627 + pax_close_kernel();
11628
11629 apm_irq_save(flags);
11630 APM_DO_SAVE_SEGS;
11631 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
11632 &call->esi);
11633 APM_DO_RESTORE_SEGS;
11634 apm_irq_restore(flags);
11635 +
11636 + pax_open_kernel();
11637 gdt[0x40 / 8] = save_desc_40;
11638 + pax_close_kernel();
11639 +
11640 put_cpu();
11641
11642 return call->eax & 0xff;
11643 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
11644 BUG_ON(cpu != 0);
11645 gdt = get_cpu_gdt_table(cpu);
11646 save_desc_40 = gdt[0x40 / 8];
11647 +
11648 + pax_open_kernel();
11649 gdt[0x40 / 8] = bad_bios_desc;
11650 + pax_close_kernel();
11651
11652 apm_irq_save(flags);
11653 APM_DO_SAVE_SEGS;
11654 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
11655 &call->eax);
11656 APM_DO_RESTORE_SEGS;
11657 apm_irq_restore(flags);
11658 +
11659 + pax_open_kernel();
11660 gdt[0x40 / 8] = save_desc_40;
11661 + pax_close_kernel();
11662 +
11663 put_cpu();
11664 return error;
11665 }
11666 @@ -975,7 +989,7 @@ recalc:
11667
11668 static void apm_power_off(void)
11669 {
11670 - unsigned char po_bios_call[] = {
11671 + const unsigned char po_bios_call[] = {
11672 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
11673 0x8e, 0xd0, /* movw ax,ss */
11674 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
11675 @@ -2357,12 +2371,15 @@ static int __init apm_init(void)
11676 * code to that CPU.
11677 */
11678 gdt = get_cpu_gdt_table(0);
11679 +
11680 + pax_open_kernel();
11681 set_desc_base(&gdt[APM_CS >> 3],
11682 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11683 set_desc_base(&gdt[APM_CS_16 >> 3],
11684 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11685 set_desc_base(&gdt[APM_DS >> 3],
11686 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11687 + pax_close_kernel();
11688
11689 proc_create("apm", 0, NULL, &apm_file_ops);
11690
11691 diff -urNp linux-2.6.32.42/arch/x86/kernel/asm-offsets_32.c linux-2.6.32.42/arch/x86/kernel/asm-offsets_32.c
11692 --- linux-2.6.32.42/arch/x86/kernel/asm-offsets_32.c 2011-03-27 14:31:47.000000000 -0400
11693 +++ linux-2.6.32.42/arch/x86/kernel/asm-offsets_32.c 2011-05-16 21:46:57.000000000 -0400
11694 @@ -51,7 +51,6 @@ void foo(void)
11695 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
11696 BLANK();
11697
11698 - OFFSET(TI_task, thread_info, task);
11699 OFFSET(TI_exec_domain, thread_info, exec_domain);
11700 OFFSET(TI_flags, thread_info, flags);
11701 OFFSET(TI_status, thread_info, status);
11702 @@ -60,6 +59,8 @@ void foo(void)
11703 OFFSET(TI_restart_block, thread_info, restart_block);
11704 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
11705 OFFSET(TI_cpu, thread_info, cpu);
11706 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11707 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11708 BLANK();
11709
11710 OFFSET(GDS_size, desc_ptr, size);
11711 @@ -99,6 +100,7 @@ void foo(void)
11712
11713 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11714 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11715 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11716 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
11717 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
11718 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
11719 @@ -115,6 +117,11 @@ void foo(void)
11720 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
11721 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11722 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11723 +
11724 +#ifdef CONFIG_PAX_KERNEXEC
11725 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11726 +#endif
11727 +
11728 #endif
11729
11730 #ifdef CONFIG_XEN
11731 diff -urNp linux-2.6.32.42/arch/x86/kernel/asm-offsets_64.c linux-2.6.32.42/arch/x86/kernel/asm-offsets_64.c
11732 --- linux-2.6.32.42/arch/x86/kernel/asm-offsets_64.c 2011-03-27 14:31:47.000000000 -0400
11733 +++ linux-2.6.32.42/arch/x86/kernel/asm-offsets_64.c 2011-05-16 21:46:57.000000000 -0400
11734 @@ -44,6 +44,8 @@ int main(void)
11735 ENTRY(addr_limit);
11736 ENTRY(preempt_count);
11737 ENTRY(status);
11738 + ENTRY(lowest_stack);
11739 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11740 #ifdef CONFIG_IA32_EMULATION
11741 ENTRY(sysenter_return);
11742 #endif
11743 @@ -63,6 +65,18 @@ int main(void)
11744 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11745 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
11746 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11747 +
11748 +#ifdef CONFIG_PAX_KERNEXEC
11749 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11750 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11751 +#endif
11752 +
11753 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11754 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11755 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11756 + OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
11757 +#endif
11758 +
11759 #endif
11760
11761
11762 @@ -115,6 +129,7 @@ int main(void)
11763 ENTRY(cr8);
11764 BLANK();
11765 #undef ENTRY
11766 + DEFINE(TSS_size, sizeof(struct tss_struct));
11767 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
11768 BLANK();
11769 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
11770 @@ -130,6 +145,7 @@ int main(void)
11771
11772 BLANK();
11773 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11774 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11775 #ifdef CONFIG_XEN
11776 BLANK();
11777 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
11778 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/amd.c linux-2.6.32.42/arch/x86/kernel/cpu/amd.c
11779 --- linux-2.6.32.42/arch/x86/kernel/cpu/amd.c 2011-06-25 12:55:34.000000000 -0400
11780 +++ linux-2.6.32.42/arch/x86/kernel/cpu/amd.c 2011-06-25 12:56:37.000000000 -0400
11781 @@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_c
11782 unsigned int size)
11783 {
11784 /* AMD errata T13 (order #21922) */
11785 - if ((c->x86 == 6)) {
11786 + if (c->x86 == 6) {
11787 /* Duron Rev A0 */
11788 if (c->x86_model == 3 && c->x86_mask == 0)
11789 size = 64;
11790 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/common.c linux-2.6.32.42/arch/x86/kernel/cpu/common.c
11791 --- linux-2.6.32.42/arch/x86/kernel/cpu/common.c 2011-03-27 14:31:47.000000000 -0400
11792 +++ linux-2.6.32.42/arch/x86/kernel/cpu/common.c 2011-05-11 18:25:15.000000000 -0400
11793 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
11794
11795 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11796
11797 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11798 -#ifdef CONFIG_X86_64
11799 - /*
11800 - * We need valid kernel segments for data and code in long mode too
11801 - * IRET will check the segment types kkeil 2000/10/28
11802 - * Also sysret mandates a special GDT layout
11803 - *
11804 - * TLS descriptors are currently at a different place compared to i386.
11805 - * Hopefully nobody expects them at a fixed place (Wine?)
11806 - */
11807 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11808 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11809 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11810 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11811 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11812 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
11813 -#else
11814 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11815 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11816 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11817 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
11818 - /*
11819 - * Segments used for calling PnP BIOS have byte granularity.
11820 - * They code segments and data segments have fixed 64k limits,
11821 - * the transfer segment sizes are set at run time.
11822 - */
11823 - /* 32-bit code */
11824 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11825 - /* 16-bit code */
11826 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11827 - /* 16-bit data */
11828 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
11829 - /* 16-bit data */
11830 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
11831 - /* 16-bit data */
11832 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
11833 - /*
11834 - * The APM segments have byte granularity and their bases
11835 - * are set at run time. All have 64k limits.
11836 - */
11837 - /* 32-bit code */
11838 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11839 - /* 16-bit code */
11840 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11841 - /* data */
11842 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
11843 -
11844 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11845 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11846 - GDT_STACK_CANARY_INIT
11847 -#endif
11848 -} };
11849 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
11850 -
11851 static int __init x86_xsave_setup(char *s)
11852 {
11853 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
11854 @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
11855 {
11856 struct desc_ptr gdt_descr;
11857
11858 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
11859 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
11860 gdt_descr.size = GDT_SIZE - 1;
11861 load_gdt(&gdt_descr);
11862 /* Reload the per-cpu base */
11863 @@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struc
11864 /* Filter out anything that depends on CPUID levels we don't have */
11865 filter_cpuid_features(c, true);
11866
11867 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
11868 + setup_clear_cpu_cap(X86_FEATURE_SEP);
11869 +#endif
11870 +
11871 /* If the model name is still unset, do table lookup. */
11872 if (!c->x86_model_id[0]) {
11873 const char *p;
11874 @@ -980,6 +930,9 @@ static __init int setup_disablecpuid(cha
11875 }
11876 __setup("clearcpuid=", setup_disablecpuid);
11877
11878 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
11879 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
11880 +
11881 #ifdef CONFIG_X86_64
11882 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
11883
11884 @@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
11885 EXPORT_PER_CPU_SYMBOL(current_task);
11886
11887 DEFINE_PER_CPU(unsigned long, kernel_stack) =
11888 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
11889 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
11890 EXPORT_PER_CPU_SYMBOL(kernel_stack);
11891
11892 DEFINE_PER_CPU(char *, irq_stack_ptr) =
11893 @@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(str
11894 {
11895 memset(regs, 0, sizeof(struct pt_regs));
11896 regs->fs = __KERNEL_PERCPU;
11897 - regs->gs = __KERNEL_STACK_CANARY;
11898 + savesegment(gs, regs->gs);
11899
11900 return regs;
11901 }
11902 @@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
11903 int i;
11904
11905 cpu = stack_smp_processor_id();
11906 - t = &per_cpu(init_tss, cpu);
11907 + t = init_tss + cpu;
11908 orig_ist = &per_cpu(orig_ist, cpu);
11909
11910 #ifdef CONFIG_NUMA
11911 @@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
11912 switch_to_new_gdt(cpu);
11913 loadsegment(fs, 0);
11914
11915 - load_idt((const struct desc_ptr *)&idt_descr);
11916 + load_idt(&idt_descr);
11917
11918 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
11919 syscall_init();
11920 @@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
11921 wrmsrl(MSR_KERNEL_GS_BASE, 0);
11922 barrier();
11923
11924 - check_efer();
11925 if (cpu != 0)
11926 enable_x2apic();
11927
11928 @@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
11929 {
11930 int cpu = smp_processor_id();
11931 struct task_struct *curr = current;
11932 - struct tss_struct *t = &per_cpu(init_tss, cpu);
11933 + struct tss_struct *t = init_tss + cpu;
11934 struct thread_struct *thread = &curr->thread;
11935
11936 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
11937 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/intel.c linux-2.6.32.42/arch/x86/kernel/cpu/intel.c
11938 --- linux-2.6.32.42/arch/x86/kernel/cpu/intel.c 2011-03-27 14:31:47.000000000 -0400
11939 +++ linux-2.6.32.42/arch/x86/kernel/cpu/intel.c 2011-04-17 15:56:46.000000000 -0400
11940 @@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug
11941 * Update the IDT descriptor and reload the IDT so that
11942 * it uses the read-only mapped virtual address.
11943 */
11944 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
11945 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
11946 load_idt(&idt_descr);
11947 }
11948 #endif
11949 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/intel_cacheinfo.c linux-2.6.32.42/arch/x86/kernel/cpu/intel_cacheinfo.c
11950 --- linux-2.6.32.42/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
11951 +++ linux-2.6.32.42/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-04-17 15:56:46.000000000 -0400
11952 @@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kob
11953 return ret;
11954 }
11955
11956 -static struct sysfs_ops sysfs_ops = {
11957 +static const struct sysfs_ops sysfs_ops = {
11958 .show = show,
11959 .store = store,
11960 };
11961 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/Makefile linux-2.6.32.42/arch/x86/kernel/cpu/Makefile
11962 --- linux-2.6.32.42/arch/x86/kernel/cpu/Makefile 2011-03-27 14:31:47.000000000 -0400
11963 +++ linux-2.6.32.42/arch/x86/kernel/cpu/Makefile 2011-04-17 15:56:46.000000000 -0400
11964 @@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
11965 CFLAGS_REMOVE_common.o = -pg
11966 endif
11967
11968 -# Make sure load_percpu_segment has no stackprotector
11969 -nostackp := $(call cc-option, -fno-stack-protector)
11970 -CFLAGS_common.o := $(nostackp)
11971 -
11972 obj-y := intel_cacheinfo.o addon_cpuid_features.o
11973 obj-y += proc.o capflags.o powerflags.o common.o
11974 obj-y += vmware.o hypervisor.o sched.o
11975 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce_amd.c linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce_amd.c
11976 --- linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:56:59.000000000 -0400
11977 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:57:13.000000000 -0400
11978 @@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kob
11979 return ret;
11980 }
11981
11982 -static struct sysfs_ops threshold_ops = {
11983 +static const struct sysfs_ops threshold_ops = {
11984 .show = show,
11985 .store = store,
11986 };
11987 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce.c
11988 --- linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce.c 2011-03-27 14:31:47.000000000 -0400
11989 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-04 17:56:20.000000000 -0400
11990 @@ -43,6 +43,7 @@
11991 #include <asm/ipi.h>
11992 #include <asm/mce.h>
11993 #include <asm/msr.h>
11994 +#include <asm/local.h>
11995
11996 #include "mce-internal.h"
11997
11998 @@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
11999 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12000 m->cs, m->ip);
12001
12002 - if (m->cs == __KERNEL_CS)
12003 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12004 print_symbol("{%s}", m->ip);
12005 pr_cont("\n");
12006 }
12007 @@ -221,10 +222,10 @@ static void print_mce_tail(void)
12008
12009 #define PANIC_TIMEOUT 5 /* 5 seconds */
12010
12011 -static atomic_t mce_paniced;
12012 +static atomic_unchecked_t mce_paniced;
12013
12014 static int fake_panic;
12015 -static atomic_t mce_fake_paniced;
12016 +static atomic_unchecked_t mce_fake_paniced;
12017
12018 /* Panic in progress. Enable interrupts and wait for final IPI */
12019 static void wait_for_panic(void)
12020 @@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct
12021 /*
12022 * Make sure only one CPU runs in machine check panic
12023 */
12024 - if (atomic_inc_return(&mce_paniced) > 1)
12025 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12026 wait_for_panic();
12027 barrier();
12028
12029 @@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct
12030 console_verbose();
12031 } else {
12032 /* Don't log too much for fake panic */
12033 - if (atomic_inc_return(&mce_fake_paniced) > 1)
12034 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12035 return;
12036 }
12037 print_mce_head();
12038 @@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
12039 * might have been modified by someone else.
12040 */
12041 rmb();
12042 - if (atomic_read(&mce_paniced))
12043 + if (atomic_read_unchecked(&mce_paniced))
12044 wait_for_panic();
12045 if (!monarch_timeout)
12046 goto out;
12047 @@ -1429,14 +1430,14 @@ void __cpuinit mcheck_init(struct cpuinf
12048 */
12049
12050 static DEFINE_SPINLOCK(mce_state_lock);
12051 -static int open_count; /* #times opened */
12052 +static local_t open_count; /* #times opened */
12053 static int open_exclu; /* already open exclusive? */
12054
12055 static int mce_open(struct inode *inode, struct file *file)
12056 {
12057 spin_lock(&mce_state_lock);
12058
12059 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
12060 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
12061 spin_unlock(&mce_state_lock);
12062
12063 return -EBUSY;
12064 @@ -1444,7 +1445,7 @@ static int mce_open(struct inode *inode,
12065
12066 if (file->f_flags & O_EXCL)
12067 open_exclu = 1;
12068 - open_count++;
12069 + local_inc(&open_count);
12070
12071 spin_unlock(&mce_state_lock);
12072
12073 @@ -1455,7 +1456,7 @@ static int mce_release(struct inode *ino
12074 {
12075 spin_lock(&mce_state_lock);
12076
12077 - open_count--;
12078 + local_dec(&open_count);
12079 open_exclu = 0;
12080
12081 spin_unlock(&mce_state_lock);
12082 @@ -2082,7 +2083,7 @@ struct dentry *mce_get_debugfs_dir(void)
12083 static void mce_reset(void)
12084 {
12085 cpu_missing = 0;
12086 - atomic_set(&mce_fake_paniced, 0);
12087 + atomic_set_unchecked(&mce_fake_paniced, 0);
12088 atomic_set(&mce_executing, 0);
12089 atomic_set(&mce_callin, 0);
12090 atomic_set(&global_nwo, 0);
12091 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/amd.c linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/amd.c
12092 --- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/amd.c 2011-03-27 14:31:47.000000000 -0400
12093 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/amd.c 2011-04-17 15:56:46.000000000 -0400
12094 @@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base
12095 return 0;
12096 }
12097
12098 -static struct mtrr_ops amd_mtrr_ops = {
12099 +static const struct mtrr_ops amd_mtrr_ops = {
12100 .vendor = X86_VENDOR_AMD,
12101 .set = amd_set_mtrr,
12102 .get = amd_get_mtrr,
12103 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/centaur.c linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/centaur.c
12104 --- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/centaur.c 2011-03-27 14:31:47.000000000 -0400
12105 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/centaur.c 2011-04-17 15:56:46.000000000 -0400
12106 @@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long
12107 return 0;
12108 }
12109
12110 -static struct mtrr_ops centaur_mtrr_ops = {
12111 +static const struct mtrr_ops centaur_mtrr_ops = {
12112 .vendor = X86_VENDOR_CENTAUR,
12113 .set = centaur_set_mcr,
12114 .get = centaur_get_mcr,
12115 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/cyrix.c linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/cyrix.c
12116 --- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-03-27 14:31:47.000000000 -0400
12117 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-04-17 15:56:46.000000000 -0400
12118 @@ -265,7 +265,7 @@ static void cyrix_set_all(void)
12119 post_set();
12120 }
12121
12122 -static struct mtrr_ops cyrix_mtrr_ops = {
12123 +static const struct mtrr_ops cyrix_mtrr_ops = {
12124 .vendor = X86_VENDOR_CYRIX,
12125 .set_all = cyrix_set_all,
12126 .set = cyrix_set_arr,
12127 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/generic.c
12128 --- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/generic.c 2011-03-27 14:31:47.000000000 -0400
12129 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/generic.c 2011-04-23 12:56:10.000000000 -0400
12130 @@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
12131 /*
12132 * Generic structure...
12133 */
12134 -struct mtrr_ops generic_mtrr_ops = {
12135 +const struct mtrr_ops generic_mtrr_ops = {
12136 .use_intel_if = 1,
12137 .set_all = generic_set_all,
12138 .get = generic_get_mtrr,
12139 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/main.c
12140 --- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:00:52.000000000 -0400
12141 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:03:05.000000000 -0400
12142 @@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
12143 u64 size_or_mask, size_and_mask;
12144 static bool mtrr_aps_delayed_init;
12145
12146 -static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12147 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12148
12149 -struct mtrr_ops *mtrr_if;
12150 +const struct mtrr_ops *mtrr_if;
12151
12152 static void set_mtrr(unsigned int reg, unsigned long base,
12153 unsigned long size, mtrr_type type);
12154
12155 -void set_mtrr_ops(struct mtrr_ops *ops)
12156 +void set_mtrr_ops(const struct mtrr_ops *ops)
12157 {
12158 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
12159 mtrr_ops[ops->vendor] = ops;
12160 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/mtrr.h
12161 --- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-03-27 14:31:47.000000000 -0400
12162 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-04-17 15:56:46.000000000 -0400
12163 @@ -12,19 +12,19 @@
12164 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
12165
12166 struct mtrr_ops {
12167 - u32 vendor;
12168 - u32 use_intel_if;
12169 - void (*set)(unsigned int reg, unsigned long base,
12170 + const u32 vendor;
12171 + const u32 use_intel_if;
12172 + void (* const set)(unsigned int reg, unsigned long base,
12173 unsigned long size, mtrr_type type);
12174 - void (*set_all)(void);
12175 + void (* const set_all)(void);
12176
12177 - void (*get)(unsigned int reg, unsigned long *base,
12178 + void (* const get)(unsigned int reg, unsigned long *base,
12179 unsigned long *size, mtrr_type *type);
12180 - int (*get_free_region)(unsigned long base, unsigned long size,
12181 + int (* const get_free_region)(unsigned long base, unsigned long size,
12182 int replace_reg);
12183 - int (*validate_add_page)(unsigned long base, unsigned long size,
12184 + int (* const validate_add_page)(unsigned long base, unsigned long size,
12185 unsigned int type);
12186 - int (*have_wrcomb)(void);
12187 + int (* const have_wrcomb)(void);
12188 };
12189
12190 extern int generic_get_free_region(unsigned long base, unsigned long size,
12191 @@ -32,7 +32,7 @@ extern int generic_get_free_region(unsig
12192 extern int generic_validate_add_page(unsigned long base, unsigned long size,
12193 unsigned int type);
12194
12195 -extern struct mtrr_ops generic_mtrr_ops;
12196 +extern const struct mtrr_ops generic_mtrr_ops;
12197
12198 extern int positive_have_wrcomb(void);
12199
12200 @@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int in
12201 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
12202 void get_mtrr_state(void);
12203
12204 -extern void set_mtrr_ops(struct mtrr_ops *ops);
12205 +extern void set_mtrr_ops(const struct mtrr_ops *ops);
12206
12207 extern u64 size_or_mask, size_and_mask;
12208 -extern struct mtrr_ops *mtrr_if;
12209 +extern const struct mtrr_ops *mtrr_if;
12210
12211 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
12212 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
12213 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.32.42/arch/x86/kernel/cpu/perfctr-watchdog.c
12214 --- linux-2.6.32.42/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-03-27 14:31:47.000000000 -0400
12215 +++ linux-2.6.32.42/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-04-17 15:56:46.000000000 -0400
12216 @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
12217
12218 /* Interface defining a CPU specific perfctr watchdog */
12219 struct wd_ops {
12220 - int (*reserve)(void);
12221 - void (*unreserve)(void);
12222 - int (*setup)(unsigned nmi_hz);
12223 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12224 - void (*stop)(void);
12225 + int (* const reserve)(void);
12226 + void (* const unreserve)(void);
12227 + int (* const setup)(unsigned nmi_hz);
12228 + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12229 + void (* const stop)(void);
12230 unsigned perfctr;
12231 unsigned evntsel;
12232 u64 checkbit;
12233 @@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
12234 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
12235 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
12236
12237 +/* cannot be const */
12238 static struct wd_ops intel_arch_wd_ops;
12239
12240 static int setup_intel_arch_watchdog(unsigned nmi_hz)
12241 @@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(uns
12242 return 1;
12243 }
12244
12245 +/* cannot be const */
12246 static struct wd_ops intel_arch_wd_ops __read_mostly = {
12247 .reserve = single_msr_reserve,
12248 .unreserve = single_msr_unreserve,
12249 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/perf_event.c linux-2.6.32.42/arch/x86/kernel/cpu/perf_event.c
12250 --- linux-2.6.32.42/arch/x86/kernel/cpu/perf_event.c 2011-03-27 14:31:47.000000000 -0400
12251 +++ linux-2.6.32.42/arch/x86/kernel/cpu/perf_event.c 2011-05-04 17:56:20.000000000 -0400
12252 @@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event
12253 * count to the generic event atomically:
12254 */
12255 again:
12256 - prev_raw_count = atomic64_read(&hwc->prev_count);
12257 + prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
12258 rdmsrl(hwc->event_base + idx, new_raw_count);
12259
12260 - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
12261 + if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
12262 new_raw_count) != prev_raw_count)
12263 goto again;
12264
12265 @@ -741,7 +741,7 @@ again:
12266 delta = (new_raw_count << shift) - (prev_raw_count << shift);
12267 delta >>= shift;
12268
12269 - atomic64_add(delta, &event->count);
12270 + atomic64_add_unchecked(delta, &event->count);
12271 atomic64_sub(delta, &hwc->period_left);
12272
12273 return new_raw_count;
12274 @@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_ev
12275 * The hw event starts counting from this event offset,
12276 * mark it to be able to extra future deltas:
12277 */
12278 - atomic64_set(&hwc->prev_count, (u64)-left);
12279 + atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
12280
12281 err = checking_wrmsrl(hwc->event_base + idx,
12282 (u64)(-left) & x86_pmu.event_mask);
12283 @@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs
12284 break;
12285
12286 callchain_store(entry, frame.return_address);
12287 - fp = frame.next_frame;
12288 + fp = (__force const void __user *)frame.next_frame;
12289 }
12290 }
12291
12292 diff -urNp linux-2.6.32.42/arch/x86/kernel/crash.c linux-2.6.32.42/arch/x86/kernel/crash.c
12293 --- linux-2.6.32.42/arch/x86/kernel/crash.c 2011-03-27 14:31:47.000000000 -0400
12294 +++ linux-2.6.32.42/arch/x86/kernel/crash.c 2011-04-17 15:56:46.000000000 -0400
12295 @@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu,
12296 regs = args->regs;
12297
12298 #ifdef CONFIG_X86_32
12299 - if (!user_mode_vm(regs)) {
12300 + if (!user_mode(regs)) {
12301 crash_fixup_ss_esp(&fixed_regs, regs);
12302 regs = &fixed_regs;
12303 }
12304 diff -urNp linux-2.6.32.42/arch/x86/kernel/doublefault_32.c linux-2.6.32.42/arch/x86/kernel/doublefault_32.c
12305 --- linux-2.6.32.42/arch/x86/kernel/doublefault_32.c 2011-03-27 14:31:47.000000000 -0400
12306 +++ linux-2.6.32.42/arch/x86/kernel/doublefault_32.c 2011-04-17 15:56:46.000000000 -0400
12307 @@ -11,7 +11,7 @@
12308
12309 #define DOUBLEFAULT_STACKSIZE (1024)
12310 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12311 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12312 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12313
12314 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12315
12316 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
12317 unsigned long gdt, tss;
12318
12319 store_gdt(&gdt_desc);
12320 - gdt = gdt_desc.address;
12321 + gdt = (unsigned long)gdt_desc.address;
12322
12323 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12324
12325 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
12326 /* 0x2 bit is always set */
12327 .flags = X86_EFLAGS_SF | 0x2,
12328 .sp = STACK_START,
12329 - .es = __USER_DS,
12330 + .es = __KERNEL_DS,
12331 .cs = __KERNEL_CS,
12332 .ss = __KERNEL_DS,
12333 - .ds = __USER_DS,
12334 + .ds = __KERNEL_DS,
12335 .fs = __KERNEL_PERCPU,
12336
12337 .__cr3 = __pa_nodebug(swapper_pg_dir),
12338 diff -urNp linux-2.6.32.42/arch/x86/kernel/dumpstack_32.c linux-2.6.32.42/arch/x86/kernel/dumpstack_32.c
12339 --- linux-2.6.32.42/arch/x86/kernel/dumpstack_32.c 2011-03-27 14:31:47.000000000 -0400
12340 +++ linux-2.6.32.42/arch/x86/kernel/dumpstack_32.c 2011-04-17 15:56:46.000000000 -0400
12341 @@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task
12342 #endif
12343
12344 for (;;) {
12345 - struct thread_info *context;
12346 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12347 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12348
12349 - context = (struct thread_info *)
12350 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12351 - bp = print_context_stack(context, stack, bp, ops,
12352 - data, NULL, &graph);
12353 -
12354 - stack = (unsigned long *)context->previous_esp;
12355 - if (!stack)
12356 + if (stack_start == task_stack_page(task))
12357 break;
12358 + stack = *(unsigned long **)stack_start;
12359 if (ops->stack(data, "IRQ") < 0)
12360 break;
12361 touch_nmi_watchdog();
12362 @@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs
12363 * When in-kernel, we also print out the stack and code at the
12364 * time of the fault..
12365 */
12366 - if (!user_mode_vm(regs)) {
12367 + if (!user_mode(regs)) {
12368 unsigned int code_prologue = code_bytes * 43 / 64;
12369 unsigned int code_len = code_bytes;
12370 unsigned char c;
12371 u8 *ip;
12372 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12373
12374 printk(KERN_EMERG "Stack:\n");
12375 show_stack_log_lvl(NULL, regs, &regs->sp,
12376 @@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs
12377
12378 printk(KERN_EMERG "Code: ");
12379
12380 - ip = (u8 *)regs->ip - code_prologue;
12381 + ip = (u8 *)regs->ip - code_prologue + cs_base;
12382 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12383 /* try starting at IP */
12384 - ip = (u8 *)regs->ip;
12385 + ip = (u8 *)regs->ip + cs_base;
12386 code_len = code_len - code_prologue + 1;
12387 }
12388 for (i = 0; i < code_len; i++, ip++) {
12389 @@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs
12390 printk(" Bad EIP value.");
12391 break;
12392 }
12393 - if (ip == (u8 *)regs->ip)
12394 + if (ip == (u8 *)regs->ip + cs_base)
12395 printk("<%02x> ", c);
12396 else
12397 printk("%02x ", c);
12398 @@ -149,6 +146,7 @@ int is_valid_bugaddr(unsigned long ip)
12399 {
12400 unsigned short ud2;
12401
12402 + ip = ktla_ktva(ip);
12403 if (ip < PAGE_OFFSET)
12404 return 0;
12405 if (probe_kernel_address((unsigned short *)ip, ud2))
12406 diff -urNp linux-2.6.32.42/arch/x86/kernel/dumpstack_64.c linux-2.6.32.42/arch/x86/kernel/dumpstack_64.c
12407 --- linux-2.6.32.42/arch/x86/kernel/dumpstack_64.c 2011-03-27 14:31:47.000000000 -0400
12408 +++ linux-2.6.32.42/arch/x86/kernel/dumpstack_64.c 2011-04-17 15:56:46.000000000 -0400
12409 @@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task
12410 unsigned long *irq_stack_end =
12411 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12412 unsigned used = 0;
12413 - struct thread_info *tinfo;
12414 int graph = 0;
12415 + void *stack_start;
12416
12417 if (!task)
12418 task = current;
12419 @@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task
12420 * current stack address. If the stacks consist of nested
12421 * exceptions
12422 */
12423 - tinfo = task_thread_info(task);
12424 for (;;) {
12425 char *id;
12426 unsigned long *estack_end;
12427 +
12428 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12429 &used, &id);
12430
12431 @@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task
12432 if (ops->stack(data, id) < 0)
12433 break;
12434
12435 - bp = print_context_stack(tinfo, stack, bp, ops,
12436 + bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12437 data, estack_end, &graph);
12438 ops->stack(data, "<EOE>");
12439 /*
12440 @@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task
12441 if (stack >= irq_stack && stack < irq_stack_end) {
12442 if (ops->stack(data, "IRQ") < 0)
12443 break;
12444 - bp = print_context_stack(tinfo, stack, bp,
12445 + bp = print_context_stack(task, irq_stack, stack, bp,
12446 ops, data, irq_stack_end, &graph);
12447 /*
12448 * We link to the next stack (which would be
12449 @@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task
12450 /*
12451 * This handles the process stack:
12452 */
12453 - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12454 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12455 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12456 put_cpu();
12457 }
12458 EXPORT_SYMBOL(dump_trace);
12459 diff -urNp linux-2.6.32.42/arch/x86/kernel/dumpstack.c linux-2.6.32.42/arch/x86/kernel/dumpstack.c
12460 --- linux-2.6.32.42/arch/x86/kernel/dumpstack.c 2011-03-27 14:31:47.000000000 -0400
12461 +++ linux-2.6.32.42/arch/x86/kernel/dumpstack.c 2011-04-17 15:56:46.000000000 -0400
12462 @@ -2,6 +2,9 @@
12463 * Copyright (C) 1991, 1992 Linus Torvalds
12464 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12465 */
12466 +#ifdef CONFIG_GRKERNSEC_HIDESYM
12467 +#define __INCLUDED_BY_HIDESYM 1
12468 +#endif
12469 #include <linux/kallsyms.h>
12470 #include <linux/kprobes.h>
12471 #include <linux/uaccess.h>
12472 @@ -28,7 +31,7 @@ static int die_counter;
12473
12474 void printk_address(unsigned long address, int reliable)
12475 {
12476 - printk(" [<%p>] %s%pS\n", (void *) address,
12477 + printk(" [<%p>] %s%pA\n", (void *) address,
12478 reliable ? "" : "? ", (void *) address);
12479 }
12480
12481 @@ -36,9 +39,8 @@ void printk_address(unsigned long addres
12482 static void
12483 print_ftrace_graph_addr(unsigned long addr, void *data,
12484 const struct stacktrace_ops *ops,
12485 - struct thread_info *tinfo, int *graph)
12486 + struct task_struct *task, int *graph)
12487 {
12488 - struct task_struct *task = tinfo->task;
12489 unsigned long ret_addr;
12490 int index = task->curr_ret_stack;
12491
12492 @@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long ad
12493 static inline void
12494 print_ftrace_graph_addr(unsigned long addr, void *data,
12495 const struct stacktrace_ops *ops,
12496 - struct thread_info *tinfo, int *graph)
12497 + struct task_struct *task, int *graph)
12498 { }
12499 #endif
12500
12501 @@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long ad
12502 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12503 */
12504
12505 -static inline int valid_stack_ptr(struct thread_info *tinfo,
12506 - void *p, unsigned int size, void *end)
12507 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12508 {
12509 - void *t = tinfo;
12510 if (end) {
12511 if (p < end && p >= (end-THREAD_SIZE))
12512 return 1;
12513 @@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct
12514 }
12515
12516 unsigned long
12517 -print_context_stack(struct thread_info *tinfo,
12518 +print_context_stack(struct task_struct *task, void *stack_start,
12519 unsigned long *stack, unsigned long bp,
12520 const struct stacktrace_ops *ops, void *data,
12521 unsigned long *end, int *graph)
12522 {
12523 struct stack_frame *frame = (struct stack_frame *)bp;
12524
12525 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12526 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12527 unsigned long addr;
12528
12529 addr = *stack;
12530 @@ -103,7 +103,7 @@ print_context_stack(struct thread_info *
12531 } else {
12532 ops->address(data, addr, 0);
12533 }
12534 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12535 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12536 }
12537 stack++;
12538 }
12539 @@ -180,7 +180,7 @@ void dump_stack(void)
12540 #endif
12541
12542 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12543 - current->pid, current->comm, print_tainted(),
12544 + task_pid_nr(current), current->comm, print_tainted(),
12545 init_utsname()->release,
12546 (int)strcspn(init_utsname()->version, " "),
12547 init_utsname()->version);
12548 @@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
12549 return flags;
12550 }
12551
12552 +extern void gr_handle_kernel_exploit(void);
12553 +
12554 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12555 {
12556 if (regs && kexec_should_crash(current))
12557 @@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long fl
12558 panic("Fatal exception in interrupt");
12559 if (panic_on_oops)
12560 panic("Fatal exception");
12561 - do_exit(signr);
12562 +
12563 + gr_handle_kernel_exploit();
12564 +
12565 + do_group_exit(signr);
12566 }
12567
12568 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12569 @@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs
12570 unsigned long flags = oops_begin();
12571 int sig = SIGSEGV;
12572
12573 - if (!user_mode_vm(regs))
12574 + if (!user_mode(regs))
12575 report_bug(regs->ip, regs);
12576
12577 if (__die(str, regs, err))
12578 diff -urNp linux-2.6.32.42/arch/x86/kernel/dumpstack.h linux-2.6.32.42/arch/x86/kernel/dumpstack.h
12579 --- linux-2.6.32.42/arch/x86/kernel/dumpstack.h 2011-03-27 14:31:47.000000000 -0400
12580 +++ linux-2.6.32.42/arch/x86/kernel/dumpstack.h 2011-04-23 13:25:26.000000000 -0400
12581 @@ -15,7 +15,7 @@
12582 #endif
12583
12584 extern unsigned long
12585 -print_context_stack(struct thread_info *tinfo,
12586 +print_context_stack(struct task_struct *task, void *stack_start,
12587 unsigned long *stack, unsigned long bp,
12588 const struct stacktrace_ops *ops, void *data,
12589 unsigned long *end, int *graph);
12590 diff -urNp linux-2.6.32.42/arch/x86/kernel/e820.c linux-2.6.32.42/arch/x86/kernel/e820.c
12591 --- linux-2.6.32.42/arch/x86/kernel/e820.c 2011-03-27 14:31:47.000000000 -0400
12592 +++ linux-2.6.32.42/arch/x86/kernel/e820.c 2011-04-17 15:56:46.000000000 -0400
12593 @@ -733,7 +733,7 @@ struct early_res {
12594 };
12595 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
12596 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
12597 - {}
12598 + { 0, 0, {0}, 0 }
12599 };
12600
12601 static int __init find_overlapped_early(u64 start, u64 end)
12602 diff -urNp linux-2.6.32.42/arch/x86/kernel/early_printk.c linux-2.6.32.42/arch/x86/kernel/early_printk.c
12603 --- linux-2.6.32.42/arch/x86/kernel/early_printk.c 2011-03-27 14:31:47.000000000 -0400
12604 +++ linux-2.6.32.42/arch/x86/kernel/early_printk.c 2011-05-16 21:46:57.000000000 -0400
12605 @@ -7,6 +7,7 @@
12606 #include <linux/pci_regs.h>
12607 #include <linux/pci_ids.h>
12608 #include <linux/errno.h>
12609 +#include <linux/sched.h>
12610 #include <asm/io.h>
12611 #include <asm/processor.h>
12612 #include <asm/fcntl.h>
12613 @@ -170,6 +171,8 @@ asmlinkage void early_printk(const char
12614 int n;
12615 va_list ap;
12616
12617 + pax_track_stack();
12618 +
12619 va_start(ap, fmt);
12620 n = vscnprintf(buf, sizeof(buf), fmt, ap);
12621 early_console->write(early_console, buf, n);
12622 diff -urNp linux-2.6.32.42/arch/x86/kernel/efi_32.c linux-2.6.32.42/arch/x86/kernel/efi_32.c
12623 --- linux-2.6.32.42/arch/x86/kernel/efi_32.c 2011-03-27 14:31:47.000000000 -0400
12624 +++ linux-2.6.32.42/arch/x86/kernel/efi_32.c 2011-04-17 15:56:46.000000000 -0400
12625 @@ -38,70 +38,38 @@
12626 */
12627
12628 static unsigned long efi_rt_eflags;
12629 -static pgd_t efi_bak_pg_dir_pointer[2];
12630 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
12631
12632 -void efi_call_phys_prelog(void)
12633 +void __init efi_call_phys_prelog(void)
12634 {
12635 - unsigned long cr4;
12636 - unsigned long temp;
12637 struct desc_ptr gdt_descr;
12638
12639 local_irq_save(efi_rt_eflags);
12640
12641 - /*
12642 - * If I don't have PAE, I should just duplicate two entries in page
12643 - * directory. If I have PAE, I just need to duplicate one entry in
12644 - * page directory.
12645 - */
12646 - cr4 = read_cr4_safe();
12647
12648 - if (cr4 & X86_CR4_PAE) {
12649 - efi_bak_pg_dir_pointer[0].pgd =
12650 - swapper_pg_dir[pgd_index(0)].pgd;
12651 - swapper_pg_dir[0].pgd =
12652 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12653 - } else {
12654 - efi_bak_pg_dir_pointer[0].pgd =
12655 - swapper_pg_dir[pgd_index(0)].pgd;
12656 - efi_bak_pg_dir_pointer[1].pgd =
12657 - swapper_pg_dir[pgd_index(0x400000)].pgd;
12658 - swapper_pg_dir[pgd_index(0)].pgd =
12659 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12660 - temp = PAGE_OFFSET + 0x400000;
12661 - swapper_pg_dir[pgd_index(0x400000)].pgd =
12662 - swapper_pg_dir[pgd_index(temp)].pgd;
12663 - }
12664 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
12665 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
12666 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
12667
12668 /*
12669 * After the lock is released, the original page table is restored.
12670 */
12671 __flush_tlb_all();
12672
12673 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
12674 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
12675 gdt_descr.size = GDT_SIZE - 1;
12676 load_gdt(&gdt_descr);
12677 }
12678
12679 -void efi_call_phys_epilog(void)
12680 +void __init efi_call_phys_epilog(void)
12681 {
12682 - unsigned long cr4;
12683 struct desc_ptr gdt_descr;
12684
12685 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
12686 + gdt_descr.address = get_cpu_gdt_table(0);
12687 gdt_descr.size = GDT_SIZE - 1;
12688 load_gdt(&gdt_descr);
12689
12690 - cr4 = read_cr4_safe();
12691 -
12692 - if (cr4 & X86_CR4_PAE) {
12693 - swapper_pg_dir[pgd_index(0)].pgd =
12694 - efi_bak_pg_dir_pointer[0].pgd;
12695 - } else {
12696 - swapper_pg_dir[pgd_index(0)].pgd =
12697 - efi_bak_pg_dir_pointer[0].pgd;
12698 - swapper_pg_dir[pgd_index(0x400000)].pgd =
12699 - efi_bak_pg_dir_pointer[1].pgd;
12700 - }
12701 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
12702
12703 /*
12704 * After the lock is released, the original page table is restored.
12705 diff -urNp linux-2.6.32.42/arch/x86/kernel/efi_stub_32.S linux-2.6.32.42/arch/x86/kernel/efi_stub_32.S
12706 --- linux-2.6.32.42/arch/x86/kernel/efi_stub_32.S 2011-03-27 14:31:47.000000000 -0400
12707 +++ linux-2.6.32.42/arch/x86/kernel/efi_stub_32.S 2011-04-17 15:56:46.000000000 -0400
12708 @@ -6,6 +6,7 @@
12709 */
12710
12711 #include <linux/linkage.h>
12712 +#include <linux/init.h>
12713 #include <asm/page_types.h>
12714
12715 /*
12716 @@ -20,7 +21,7 @@
12717 * service functions will comply with gcc calling convention, too.
12718 */
12719
12720 -.text
12721 +__INIT
12722 ENTRY(efi_call_phys)
12723 /*
12724 * 0. The function can only be called in Linux kernel. So CS has been
12725 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
12726 * The mapping of lower virtual memory has been created in prelog and
12727 * epilog.
12728 */
12729 - movl $1f, %edx
12730 - subl $__PAGE_OFFSET, %edx
12731 - jmp *%edx
12732 + jmp 1f-__PAGE_OFFSET
12733 1:
12734
12735 /*
12736 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
12737 * parameter 2, ..., param n. To make things easy, we save the return
12738 * address of efi_call_phys in a global variable.
12739 */
12740 - popl %edx
12741 - movl %edx, saved_return_addr
12742 - /* get the function pointer into ECX*/
12743 - popl %ecx
12744 - movl %ecx, efi_rt_function_ptr
12745 - movl $2f, %edx
12746 - subl $__PAGE_OFFSET, %edx
12747 - pushl %edx
12748 + popl (saved_return_addr)
12749 + popl (efi_rt_function_ptr)
12750
12751 /*
12752 * 3. Clear PG bit in %CR0.
12753 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
12754 /*
12755 * 5. Call the physical function.
12756 */
12757 - jmp *%ecx
12758 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
12759
12760 -2:
12761 /*
12762 * 6. After EFI runtime service returns, control will return to
12763 * following instruction. We'd better readjust stack pointer first.
12764 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
12765 movl %cr0, %edx
12766 orl $0x80000000, %edx
12767 movl %edx, %cr0
12768 - jmp 1f
12769 -1:
12770 +
12771 /*
12772 * 8. Now restore the virtual mode from flat mode by
12773 * adding EIP with PAGE_OFFSET.
12774 */
12775 - movl $1f, %edx
12776 - jmp *%edx
12777 + jmp 1f+__PAGE_OFFSET
12778 1:
12779
12780 /*
12781 * 9. Balance the stack. And because EAX contain the return value,
12782 * we'd better not clobber it.
12783 */
12784 - leal efi_rt_function_ptr, %edx
12785 - movl (%edx), %ecx
12786 - pushl %ecx
12787 + pushl (efi_rt_function_ptr)
12788
12789 /*
12790 - * 10. Push the saved return address onto the stack and return.
12791 + * 10. Return to the saved return address.
12792 */
12793 - leal saved_return_addr, %edx
12794 - movl (%edx), %ecx
12795 - pushl %ecx
12796 - ret
12797 + jmpl *(saved_return_addr)
12798 ENDPROC(efi_call_phys)
12799 .previous
12800
12801 -.data
12802 +__INITDATA
12803 saved_return_addr:
12804 .long 0
12805 efi_rt_function_ptr:
12806 diff -urNp linux-2.6.32.42/arch/x86/kernel/entry_32.S linux-2.6.32.42/arch/x86/kernel/entry_32.S
12807 --- linux-2.6.32.42/arch/x86/kernel/entry_32.S 2011-03-27 14:31:47.000000000 -0400
12808 +++ linux-2.6.32.42/arch/x86/kernel/entry_32.S 2011-05-22 23:02:03.000000000 -0400
12809 @@ -185,13 +185,146 @@
12810 /*CFI_REL_OFFSET gs, PT_GS*/
12811 .endm
12812 .macro SET_KERNEL_GS reg
12813 +
12814 +#ifdef CONFIG_CC_STACKPROTECTOR
12815 movl $(__KERNEL_STACK_CANARY), \reg
12816 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12817 + movl $(__USER_DS), \reg
12818 +#else
12819 + xorl \reg, \reg
12820 +#endif
12821 +
12822 movl \reg, %gs
12823 .endm
12824
12825 #endif /* CONFIG_X86_32_LAZY_GS */
12826
12827 -.macro SAVE_ALL
12828 +.macro pax_enter_kernel
12829 +#ifdef CONFIG_PAX_KERNEXEC
12830 + call pax_enter_kernel
12831 +#endif
12832 +.endm
12833 +
12834 +.macro pax_exit_kernel
12835 +#ifdef CONFIG_PAX_KERNEXEC
12836 + call pax_exit_kernel
12837 +#endif
12838 +.endm
12839 +
12840 +#ifdef CONFIG_PAX_KERNEXEC
12841 +ENTRY(pax_enter_kernel)
12842 +#ifdef CONFIG_PARAVIRT
12843 + pushl %eax
12844 + pushl %ecx
12845 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12846 + mov %eax, %esi
12847 +#else
12848 + mov %cr0, %esi
12849 +#endif
12850 + bts $16, %esi
12851 + jnc 1f
12852 + mov %cs, %esi
12853 + cmp $__KERNEL_CS, %esi
12854 + jz 3f
12855 + ljmp $__KERNEL_CS, $3f
12856 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
12857 +2:
12858 +#ifdef CONFIG_PARAVIRT
12859 + mov %esi, %eax
12860 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12861 +#else
12862 + mov %esi, %cr0
12863 +#endif
12864 +3:
12865 +#ifdef CONFIG_PARAVIRT
12866 + popl %ecx
12867 + popl %eax
12868 +#endif
12869 + ret
12870 +ENDPROC(pax_enter_kernel)
12871 +
12872 +ENTRY(pax_exit_kernel)
12873 +#ifdef CONFIG_PARAVIRT
12874 + pushl %eax
12875 + pushl %ecx
12876 +#endif
12877 + mov %cs, %esi
12878 + cmp $__KERNEXEC_KERNEL_CS, %esi
12879 + jnz 2f
12880 +#ifdef CONFIG_PARAVIRT
12881 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
12882 + mov %eax, %esi
12883 +#else
12884 + mov %cr0, %esi
12885 +#endif
12886 + btr $16, %esi
12887 + ljmp $__KERNEL_CS, $1f
12888 +1:
12889 +#ifdef CONFIG_PARAVIRT
12890 + mov %esi, %eax
12891 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
12892 +#else
12893 + mov %esi, %cr0
12894 +#endif
12895 +2:
12896 +#ifdef CONFIG_PARAVIRT
12897 + popl %ecx
12898 + popl %eax
12899 +#endif
12900 + ret
12901 +ENDPROC(pax_exit_kernel)
12902 +#endif
12903 +
12904 +.macro pax_erase_kstack
12905 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12906 + call pax_erase_kstack
12907 +#endif
12908 +.endm
12909 +
12910 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12911 +/*
12912 + * ebp: thread_info
12913 + * ecx, edx: can be clobbered
12914 + */
12915 +ENTRY(pax_erase_kstack)
12916 + pushl %edi
12917 + pushl %eax
12918 +
12919 + mov TI_lowest_stack(%ebp), %edi
12920 + mov $-0xBEEF, %eax
12921 + std
12922 +
12923 +1: mov %edi, %ecx
12924 + and $THREAD_SIZE_asm - 1, %ecx
12925 + shr $2, %ecx
12926 + repne scasl
12927 + jecxz 2f
12928 +
12929 + cmp $2*16, %ecx
12930 + jc 2f
12931 +
12932 + mov $2*16, %ecx
12933 + repe scasl
12934 + jecxz 2f
12935 + jne 1b
12936 +
12937 +2: cld
12938 + mov %esp, %ecx
12939 + sub %edi, %ecx
12940 + shr $2, %ecx
12941 + rep stosl
12942 +
12943 + mov TI_task_thread_sp0(%ebp), %edi
12944 + sub $128, %edi
12945 + mov %edi, TI_lowest_stack(%ebp)
12946 +
12947 + popl %eax
12948 + popl %edi
12949 + ret
12950 +ENDPROC(pax_erase_kstack)
12951 +#endif
12952 +
12953 +.macro __SAVE_ALL _DS
12954 cld
12955 PUSH_GS
12956 pushl %fs
12957 @@ -224,7 +357,7 @@
12958 pushl %ebx
12959 CFI_ADJUST_CFA_OFFSET 4
12960 CFI_REL_OFFSET ebx, 0
12961 - movl $(__USER_DS), %edx
12962 + movl $\_DS, %edx
12963 movl %edx, %ds
12964 movl %edx, %es
12965 movl $(__KERNEL_PERCPU), %edx
12966 @@ -232,6 +365,15 @@
12967 SET_KERNEL_GS %edx
12968 .endm
12969
12970 +.macro SAVE_ALL
12971 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
12972 + __SAVE_ALL __KERNEL_DS
12973 + pax_enter_kernel
12974 +#else
12975 + __SAVE_ALL __USER_DS
12976 +#endif
12977 +.endm
12978 +
12979 .macro RESTORE_INT_REGS
12980 popl %ebx
12981 CFI_ADJUST_CFA_OFFSET -4
12982 @@ -352,7 +494,15 @@ check_userspace:
12983 movb PT_CS(%esp), %al
12984 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
12985 cmpl $USER_RPL, %eax
12986 +
12987 +#ifdef CONFIG_PAX_KERNEXEC
12988 + jae resume_userspace
12989 +
12990 + PAX_EXIT_KERNEL
12991 + jmp resume_kernel
12992 +#else
12993 jb resume_kernel # not returning to v8086 or userspace
12994 +#endif
12995
12996 ENTRY(resume_userspace)
12997 LOCKDEP_SYS_EXIT
12998 @@ -364,7 +514,7 @@ ENTRY(resume_userspace)
12999 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
13000 # int/exception return?
13001 jne work_pending
13002 - jmp restore_all
13003 + jmp restore_all_pax
13004 END(ret_from_exception)
13005
13006 #ifdef CONFIG_PREEMPT
13007 @@ -414,25 +564,36 @@ sysenter_past_esp:
13008 /*CFI_REL_OFFSET cs, 0*/
13009 /*
13010 * Push current_thread_info()->sysenter_return to the stack.
13011 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
13012 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
13013 */
13014 - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
13015 + pushl $0
13016 CFI_ADJUST_CFA_OFFSET 4
13017 CFI_REL_OFFSET eip, 0
13018
13019 pushl %eax
13020 CFI_ADJUST_CFA_OFFSET 4
13021 SAVE_ALL
13022 + GET_THREAD_INFO(%ebp)
13023 + movl TI_sysenter_return(%ebp),%ebp
13024 + movl %ebp,PT_EIP(%esp)
13025 ENABLE_INTERRUPTS(CLBR_NONE)
13026
13027 /*
13028 * Load the potential sixth argument from user stack.
13029 * Careful about security.
13030 */
13031 + movl PT_OLDESP(%esp),%ebp
13032 +
13033 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13034 + mov PT_OLDSS(%esp),%ds
13035 +1: movl %ds:(%ebp),%ebp
13036 + push %ss
13037 + pop %ds
13038 +#else
13039 cmpl $__PAGE_OFFSET-3,%ebp
13040 jae syscall_fault
13041 1: movl (%ebp),%ebp
13042 +#endif
13043 +
13044 movl %ebp,PT_EBP(%esp)
13045 .section __ex_table,"a"
13046 .align 4
13047 @@ -455,12 +616,23 @@ sysenter_do_call:
13048 testl $_TIF_ALLWORK_MASK, %ecx
13049 jne sysexit_audit
13050 sysenter_exit:
13051 +
13052 +#ifdef CONFIG_PAX_RANDKSTACK
13053 + pushl_cfi %eax
13054 + call pax_randomize_kstack
13055 + popl_cfi %eax
13056 +#endif
13057 +
13058 + pax_erase_kstack
13059 +
13060 /* if something modifies registers it must also disable sysexit */
13061 movl PT_EIP(%esp), %edx
13062 movl PT_OLDESP(%esp), %ecx
13063 xorl %ebp,%ebp
13064 TRACE_IRQS_ON
13065 1: mov PT_FS(%esp), %fs
13066 +2: mov PT_DS(%esp), %ds
13067 +3: mov PT_ES(%esp), %es
13068 PTGS_TO_GS
13069 ENABLE_INTERRUPTS_SYSEXIT
13070
13071 @@ -477,6 +649,9 @@ sysenter_audit:
13072 movl %eax,%edx /* 2nd arg: syscall number */
13073 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
13074 call audit_syscall_entry
13075 +
13076 + pax_erase_kstack
13077 +
13078 pushl %ebx
13079 CFI_ADJUST_CFA_OFFSET 4
13080 movl PT_EAX(%esp),%eax /* reload syscall number */
13081 @@ -504,11 +679,17 @@ sysexit_audit:
13082
13083 CFI_ENDPROC
13084 .pushsection .fixup,"ax"
13085 -2: movl $0,PT_FS(%esp)
13086 +4: movl $0,PT_FS(%esp)
13087 + jmp 1b
13088 +5: movl $0,PT_DS(%esp)
13089 + jmp 1b
13090 +6: movl $0,PT_ES(%esp)
13091 jmp 1b
13092 .section __ex_table,"a"
13093 .align 4
13094 - .long 1b,2b
13095 + .long 1b,4b
13096 + .long 2b,5b
13097 + .long 3b,6b
13098 .popsection
13099 PTGS_TO_GS_EX
13100 ENDPROC(ia32_sysenter_target)
13101 @@ -538,6 +719,14 @@ syscall_exit:
13102 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13103 jne syscall_exit_work
13104
13105 +restore_all_pax:
13106 +
13107 +#ifdef CONFIG_PAX_RANDKSTACK
13108 + call pax_randomize_kstack
13109 +#endif
13110 +
13111 + pax_erase_kstack
13112 +
13113 restore_all:
13114 TRACE_IRQS_IRET
13115 restore_all_notrace:
13116 @@ -602,7 +791,13 @@ ldt_ss:
13117 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13118 mov %dx, %ax /* eax: new kernel esp */
13119 sub %eax, %edx /* offset (low word is 0) */
13120 - PER_CPU(gdt_page, %ebx)
13121 +#ifdef CONFIG_SMP
13122 + movl PER_CPU_VAR(cpu_number), %ebx
13123 + shll $PAGE_SHIFT_asm, %ebx
13124 + addl $cpu_gdt_table, %ebx
13125 +#else
13126 + movl $cpu_gdt_table, %ebx
13127 +#endif
13128 shr $16, %edx
13129 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
13130 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
13131 @@ -636,31 +831,25 @@ work_resched:
13132 movl TI_flags(%ebp), %ecx
13133 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13134 # than syscall tracing?
13135 - jz restore_all
13136 + jz restore_all_pax
13137 testb $_TIF_NEED_RESCHED, %cl
13138 jnz work_resched
13139
13140 work_notifysig: # deal with pending signals and
13141 # notify-resume requests
13142 + movl %esp, %eax
13143 #ifdef CONFIG_VM86
13144 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13145 - movl %esp, %eax
13146 - jne work_notifysig_v86 # returning to kernel-space or
13147 + jz 1f # returning to kernel-space or
13148 # vm86-space
13149 - xorl %edx, %edx
13150 - call do_notify_resume
13151 - jmp resume_userspace_sig
13152
13153 - ALIGN
13154 -work_notifysig_v86:
13155 pushl %ecx # save ti_flags for do_notify_resume
13156 CFI_ADJUST_CFA_OFFSET 4
13157 call save_v86_state # %eax contains pt_regs pointer
13158 popl %ecx
13159 CFI_ADJUST_CFA_OFFSET -4
13160 movl %eax, %esp
13161 -#else
13162 - movl %esp, %eax
13163 +1:
13164 #endif
13165 xorl %edx, %edx
13166 call do_notify_resume
13167 @@ -673,6 +862,9 @@ syscall_trace_entry:
13168 movl $-ENOSYS,PT_EAX(%esp)
13169 movl %esp, %eax
13170 call syscall_trace_enter
13171 +
13172 + pax_erase_kstack
13173 +
13174 /* What it returned is what we'll actually use. */
13175 cmpl $(nr_syscalls), %eax
13176 jnae syscall_call
13177 @@ -695,6 +887,10 @@ END(syscall_exit_work)
13178
13179 RING0_INT_FRAME # can't unwind into user space anyway
13180 syscall_fault:
13181 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13182 + push %ss
13183 + pop %ds
13184 +#endif
13185 GET_THREAD_INFO(%ebp)
13186 movl $-EFAULT,PT_EAX(%esp)
13187 jmp resume_userspace
13188 @@ -726,6 +922,33 @@ PTREGSCALL(rt_sigreturn)
13189 PTREGSCALL(vm86)
13190 PTREGSCALL(vm86old)
13191
13192 + ALIGN;
13193 +ENTRY(kernel_execve)
13194 + push %ebp
13195 + sub $PT_OLDSS+4,%esp
13196 + push %edi
13197 + push %ecx
13198 + push %eax
13199 + lea 3*4(%esp),%edi
13200 + mov $PT_OLDSS/4+1,%ecx
13201 + xorl %eax,%eax
13202 + rep stosl
13203 + pop %eax
13204 + pop %ecx
13205 + pop %edi
13206 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13207 + mov %eax,PT_EBX(%esp)
13208 + mov %edx,PT_ECX(%esp)
13209 + mov %ecx,PT_EDX(%esp)
13210 + mov %esp,%eax
13211 + call sys_execve
13212 + GET_THREAD_INFO(%ebp)
13213 + test %eax,%eax
13214 + jz syscall_exit
13215 + add $PT_OLDSS+4,%esp
13216 + pop %ebp
13217 + ret
13218 +
13219 .macro FIXUP_ESPFIX_STACK
13220 /*
13221 * Switch back for ESPFIX stack to the normal zerobased stack
13222 @@ -735,7 +958,13 @@ PTREGSCALL(vm86old)
13223 * normal stack and adjusts ESP with the matching offset.
13224 */
13225 /* fixup the stack */
13226 - PER_CPU(gdt_page, %ebx)
13227 +#ifdef CONFIG_SMP
13228 + movl PER_CPU_VAR(cpu_number), %ebx
13229 + shll $PAGE_SHIFT_asm, %ebx
13230 + addl $cpu_gdt_table, %ebx
13231 +#else
13232 + movl $cpu_gdt_table, %ebx
13233 +#endif
13234 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
13235 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
13236 shl $16, %eax
13237 @@ -1198,7 +1427,6 @@ return_to_handler:
13238 ret
13239 #endif
13240
13241 -.section .rodata,"a"
13242 #include "syscall_table_32.S"
13243
13244 syscall_table_size=(.-sys_call_table)
13245 @@ -1255,9 +1483,12 @@ error_code:
13246 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13247 REG_TO_PTGS %ecx
13248 SET_KERNEL_GS %ecx
13249 - movl $(__USER_DS), %ecx
13250 + movl $(__KERNEL_DS), %ecx
13251 movl %ecx, %ds
13252 movl %ecx, %es
13253 +
13254 + pax_enter_kernel
13255 +
13256 TRACE_IRQS_OFF
13257 movl %esp,%eax # pt_regs pointer
13258 call *%edi
13259 @@ -1351,6 +1582,9 @@ nmi_stack_correct:
13260 xorl %edx,%edx # zero error code
13261 movl %esp,%eax # pt_regs pointer
13262 call do_nmi
13263 +
13264 + pax_exit_kernel
13265 +
13266 jmp restore_all_notrace
13267 CFI_ENDPROC
13268
13269 @@ -1391,6 +1625,9 @@ nmi_espfix_stack:
13270 FIXUP_ESPFIX_STACK # %eax == %esp
13271 xorl %edx,%edx # zero error code
13272 call do_nmi
13273 +
13274 + pax_exit_kernel
13275 +
13276 RESTORE_REGS
13277 lss 12+4(%esp), %esp # back to espfix stack
13278 CFI_ADJUST_CFA_OFFSET -24
13279 diff -urNp linux-2.6.32.42/arch/x86/kernel/entry_64.S linux-2.6.32.42/arch/x86/kernel/entry_64.S
13280 --- linux-2.6.32.42/arch/x86/kernel/entry_64.S 2011-03-27 14:31:47.000000000 -0400
13281 +++ linux-2.6.32.42/arch/x86/kernel/entry_64.S 2011-06-04 20:30:53.000000000 -0400
13282 @@ -53,6 +53,7 @@
13283 #include <asm/paravirt.h>
13284 #include <asm/ftrace.h>
13285 #include <asm/percpu.h>
13286 +#include <asm/pgtable.h>
13287
13288 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13289 #include <linux/elf-em.h>
13290 @@ -174,6 +175,257 @@ ENTRY(native_usergs_sysret64)
13291 ENDPROC(native_usergs_sysret64)
13292 #endif /* CONFIG_PARAVIRT */
13293
13294 + .macro ljmpq sel, off
13295 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13296 + .byte 0x48; ljmp *1234f(%rip)
13297 + .pushsection .rodata
13298 + .align 16
13299 + 1234: .quad \off; .word \sel
13300 + .popsection
13301 +#else
13302 + pushq $\sel
13303 + pushq $\off
13304 + lretq
13305 +#endif
13306 + .endm
13307 +
13308 + .macro pax_enter_kernel
13309 +#ifdef CONFIG_PAX_KERNEXEC
13310 + call pax_enter_kernel
13311 +#endif
13312 + .endm
13313 +
13314 + .macro pax_exit_kernel
13315 +#ifdef CONFIG_PAX_KERNEXEC
13316 + call pax_exit_kernel
13317 +#endif
13318 + .endm
13319 +
13320 +#ifdef CONFIG_PAX_KERNEXEC
13321 +ENTRY(pax_enter_kernel)
13322 + pushq %rdi
13323 +
13324 +#ifdef CONFIG_PARAVIRT
13325 + PV_SAVE_REGS(CLBR_RDI)
13326 +#endif
13327 +
13328 + GET_CR0_INTO_RDI
13329 + bts $16,%rdi
13330 + jnc 1f
13331 + mov %cs,%edi
13332 + cmp $__KERNEL_CS,%edi
13333 + jz 3f
13334 + ljmpq __KERNEL_CS,3f
13335 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
13336 +2: SET_RDI_INTO_CR0
13337 +3:
13338 +
13339 +#ifdef CONFIG_PARAVIRT
13340 + PV_RESTORE_REGS(CLBR_RDI)
13341 +#endif
13342 +
13343 + popq %rdi
13344 + retq
13345 +ENDPROC(pax_enter_kernel)
13346 +
13347 +ENTRY(pax_exit_kernel)
13348 + pushq %rdi
13349 +
13350 +#ifdef CONFIG_PARAVIRT
13351 + PV_SAVE_REGS(CLBR_RDI)
13352 +#endif
13353 +
13354 + mov %cs,%rdi
13355 + cmp $__KERNEXEC_KERNEL_CS,%edi
13356 + jnz 2f
13357 + GET_CR0_INTO_RDI
13358 + btr $16,%rdi
13359 + ljmpq __KERNEL_CS,1f
13360 +1: SET_RDI_INTO_CR0
13361 +2:
13362 +
13363 +#ifdef CONFIG_PARAVIRT
13364 + PV_RESTORE_REGS(CLBR_RDI);
13365 +#endif
13366 +
13367 + popq %rdi
13368 + retq
13369 +ENDPROC(pax_exit_kernel)
13370 +#endif
13371 +
13372 + .macro pax_enter_kernel_user
13373 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13374 + call pax_enter_kernel_user
13375 +#endif
13376 + .endm
13377 +
13378 + .macro pax_exit_kernel_user
13379 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13380 + call pax_exit_kernel_user
13381 +#endif
13382 +#ifdef CONFIG_PAX_RANDKSTACK
13383 + push %rax
13384 + call pax_randomize_kstack
13385 + pop %rax
13386 +#endif
13387 + pax_erase_kstack
13388 + .endm
13389 +
13390 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13391 +ENTRY(pax_enter_kernel_user)
13392 + pushq %rdi
13393 + pushq %rbx
13394 +
13395 +#ifdef CONFIG_PARAVIRT
13396 + PV_SAVE_REGS(CLBR_RDI)
13397 +#endif
13398 +
13399 + GET_CR3_INTO_RDI
13400 + mov %rdi,%rbx
13401 + add $__START_KERNEL_map,%rbx
13402 + sub phys_base(%rip),%rbx
13403 +
13404 +#ifdef CONFIG_PARAVIRT
13405 + pushq %rdi
13406 + cmpl $0, pv_info+PARAVIRT_enabled
13407 + jz 1f
13408 + i = 0
13409 + .rept USER_PGD_PTRS
13410 + mov i*8(%rbx),%rsi
13411 + mov $0,%sil
13412 + lea i*8(%rbx),%rdi
13413 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13414 + i = i + 1
13415 + .endr
13416 + jmp 2f
13417 +1:
13418 +#endif
13419 +
13420 + i = 0
13421 + .rept USER_PGD_PTRS
13422 + movb $0,i*8(%rbx)
13423 + i = i + 1
13424 + .endr
13425 +
13426 +#ifdef CONFIG_PARAVIRT
13427 +2: popq %rdi
13428 +#endif
13429 + SET_RDI_INTO_CR3
13430 +
13431 +#ifdef CONFIG_PAX_KERNEXEC
13432 + GET_CR0_INTO_RDI
13433 + bts $16,%rdi
13434 + SET_RDI_INTO_CR0
13435 +#endif
13436 +
13437 +#ifdef CONFIG_PARAVIRT
13438 + PV_RESTORE_REGS(CLBR_RDI)
13439 +#endif
13440 +
13441 + popq %rbx
13442 + popq %rdi
13443 + retq
13444 +ENDPROC(pax_enter_kernel_user)
13445 +
13446 +ENTRY(pax_exit_kernel_user)
13447 + push %rdi
13448 +
13449 +#ifdef CONFIG_PARAVIRT
13450 + pushq %rbx
13451 + PV_SAVE_REGS(CLBR_RDI)
13452 +#endif
13453 +
13454 +#ifdef CONFIG_PAX_KERNEXEC
13455 + GET_CR0_INTO_RDI
13456 + btr $16,%rdi
13457 + SET_RDI_INTO_CR0
13458 +#endif
13459 +
13460 + GET_CR3_INTO_RDI
13461 + add $__START_KERNEL_map,%rdi
13462 + sub phys_base(%rip),%rdi
13463 +
13464 +#ifdef CONFIG_PARAVIRT
13465 + cmpl $0, pv_info+PARAVIRT_enabled
13466 + jz 1f
13467 + mov %rdi,%rbx
13468 + i = 0
13469 + .rept USER_PGD_PTRS
13470 + mov i*8(%rbx),%rsi
13471 + mov $0x67,%sil
13472 + lea i*8(%rbx),%rdi
13473 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13474 + i = i + 1
13475 + .endr
13476 + jmp 2f
13477 +1:
13478 +#endif
13479 +
13480 + i = 0
13481 + .rept USER_PGD_PTRS
13482 + movb $0x67,i*8(%rdi)
13483 + i = i + 1
13484 + .endr
13485 +
13486 +#ifdef CONFIG_PARAVIRT
13487 +2: PV_RESTORE_REGS(CLBR_RDI)
13488 + popq %rbx
13489 +#endif
13490 +
13491 + popq %rdi
13492 + retq
13493 +ENDPROC(pax_exit_kernel_user)
13494 +#endif
13495 +
13496 +.macro pax_erase_kstack
13497 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13498 + call pax_erase_kstack
13499 +#endif
13500 +.endm
13501 +
13502 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13503 +/*
13504 + * r10: thread_info
13505 + * rcx, rdx: can be clobbered
13506 + */
13507 +ENTRY(pax_erase_kstack)
13508 + pushq %rdi
13509 + pushq %rax
13510 +
13511 + GET_THREAD_INFO(%r10)
13512 + mov TI_lowest_stack(%r10), %rdi
13513 + mov $-0xBEEF, %rax
13514 + std
13515 +
13516 +1: mov %edi, %ecx
13517 + and $THREAD_SIZE_asm - 1, %ecx
13518 + shr $3, %ecx
13519 + repne scasq
13520 + jecxz 2f
13521 +
13522 + cmp $2*8, %ecx
13523 + jc 2f
13524 +
13525 + mov $2*8, %ecx
13526 + repe scasq
13527 + jecxz 2f
13528 + jne 1b
13529 +
13530 +2: cld
13531 + mov %esp, %ecx
13532 + sub %edi, %ecx
13533 + shr $3, %ecx
13534 + rep stosq
13535 +
13536 + mov TI_task_thread_sp0(%r10), %rdi
13537 + sub $256, %rdi
13538 + mov %rdi, TI_lowest_stack(%r10)
13539 +
13540 + popq %rax
13541 + popq %rdi
13542 + ret
13543 +ENDPROC(pax_erase_kstack)
13544 +#endif
13545
13546 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13547 #ifdef CONFIG_TRACE_IRQFLAGS
13548 @@ -317,7 +569,7 @@ ENTRY(save_args)
13549 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
13550 movq_cfi rbp, 8 /* push %rbp */
13551 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
13552 - testl $3, CS(%rdi)
13553 + testb $3, CS(%rdi)
13554 je 1f
13555 SWAPGS
13556 /*
13557 @@ -409,7 +661,7 @@ ENTRY(ret_from_fork)
13558
13559 RESTORE_REST
13560
13561 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13562 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13563 je int_ret_from_sys_call
13564
13565 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13566 @@ -455,7 +707,7 @@ END(ret_from_fork)
13567 ENTRY(system_call)
13568 CFI_STARTPROC simple
13569 CFI_SIGNAL_FRAME
13570 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13571 + CFI_DEF_CFA rsp,0
13572 CFI_REGISTER rip,rcx
13573 /*CFI_REGISTER rflags,r11*/
13574 SWAPGS_UNSAFE_STACK
13575 @@ -468,12 +720,13 @@ ENTRY(system_call_after_swapgs)
13576
13577 movq %rsp,PER_CPU_VAR(old_rsp)
13578 movq PER_CPU_VAR(kernel_stack),%rsp
13579 + pax_enter_kernel_user
13580 /*
13581 * No need to follow this irqs off/on section - it's straight
13582 * and short:
13583 */
13584 ENABLE_INTERRUPTS(CLBR_NONE)
13585 - SAVE_ARGS 8,1
13586 + SAVE_ARGS 8*6,1
13587 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13588 movq %rcx,RIP-ARGOFFSET(%rsp)
13589 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13590 @@ -502,6 +755,7 @@ sysret_check:
13591 andl %edi,%edx
13592 jnz sysret_careful
13593 CFI_REMEMBER_STATE
13594 + pax_exit_kernel_user
13595 /*
13596 * sysretq will re-enable interrupts:
13597 */
13598 @@ -562,6 +816,9 @@ auditsys:
13599 movq %rax,%rsi /* 2nd arg: syscall number */
13600 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13601 call audit_syscall_entry
13602 +
13603 + pax_erase_kstack
13604 +
13605 LOAD_ARGS 0 /* reload call-clobbered registers */
13606 jmp system_call_fastpath
13607
13608 @@ -592,6 +849,9 @@ tracesys:
13609 FIXUP_TOP_OF_STACK %rdi
13610 movq %rsp,%rdi
13611 call syscall_trace_enter
13612 +
13613 + pax_erase_kstack
13614 +
13615 /*
13616 * Reload arg registers from stack in case ptrace changed them.
13617 * We don't reload %rax because syscall_trace_enter() returned
13618 @@ -613,7 +873,7 @@ tracesys:
13619 GLOBAL(int_ret_from_sys_call)
13620 DISABLE_INTERRUPTS(CLBR_NONE)
13621 TRACE_IRQS_OFF
13622 - testl $3,CS-ARGOFFSET(%rsp)
13623 + testb $3,CS-ARGOFFSET(%rsp)
13624 je retint_restore_args
13625 movl $_TIF_ALLWORK_MASK,%edi
13626 /* edi: mask to check */
13627 @@ -800,6 +1060,16 @@ END(interrupt)
13628 CFI_ADJUST_CFA_OFFSET 10*8
13629 call save_args
13630 PARTIAL_FRAME 0
13631 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13632 + testb $3, CS(%rdi)
13633 + jnz 1f
13634 + pax_enter_kernel
13635 + jmp 2f
13636 +1: pax_enter_kernel_user
13637 +2:
13638 +#else
13639 + pax_enter_kernel
13640 +#endif
13641 call \func
13642 .endm
13643
13644 @@ -822,7 +1092,7 @@ ret_from_intr:
13645 CFI_ADJUST_CFA_OFFSET -8
13646 exit_intr:
13647 GET_THREAD_INFO(%rcx)
13648 - testl $3,CS-ARGOFFSET(%rsp)
13649 + testb $3,CS-ARGOFFSET(%rsp)
13650 je retint_kernel
13651
13652 /* Interrupt came from user space */
13653 @@ -844,12 +1114,14 @@ retint_swapgs: /* return to user-space
13654 * The iretq could re-enable interrupts:
13655 */
13656 DISABLE_INTERRUPTS(CLBR_ANY)
13657 + pax_exit_kernel_user
13658 TRACE_IRQS_IRETQ
13659 SWAPGS
13660 jmp restore_args
13661
13662 retint_restore_args: /* return to kernel space */
13663 DISABLE_INTERRUPTS(CLBR_ANY)
13664 + pax_exit_kernel
13665 /*
13666 * The iretq could re-enable interrupts:
13667 */
13668 @@ -1032,6 +1304,16 @@ ENTRY(\sym)
13669 CFI_ADJUST_CFA_OFFSET 15*8
13670 call error_entry
13671 DEFAULT_FRAME 0
13672 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13673 + testb $3, CS(%rsp)
13674 + jnz 1f
13675 + pax_enter_kernel
13676 + jmp 2f
13677 +1: pax_enter_kernel_user
13678 +2:
13679 +#else
13680 + pax_enter_kernel
13681 +#endif
13682 movq %rsp,%rdi /* pt_regs pointer */
13683 xorl %esi,%esi /* no error code */
13684 call \do_sym
13685 @@ -1049,6 +1331,16 @@ ENTRY(\sym)
13686 subq $15*8, %rsp
13687 call save_paranoid
13688 TRACE_IRQS_OFF
13689 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13690 + testb $3, CS(%rsp)
13691 + jnz 1f
13692 + pax_enter_kernel
13693 + jmp 2f
13694 +1: pax_enter_kernel_user
13695 +2:
13696 +#else
13697 + pax_enter_kernel
13698 +#endif
13699 movq %rsp,%rdi /* pt_regs pointer */
13700 xorl %esi,%esi /* no error code */
13701 call \do_sym
13702 @@ -1066,9 +1358,24 @@ ENTRY(\sym)
13703 subq $15*8, %rsp
13704 call save_paranoid
13705 TRACE_IRQS_OFF
13706 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13707 + testb $3, CS(%rsp)
13708 + jnz 1f
13709 + pax_enter_kernel
13710 + jmp 2f
13711 +1: pax_enter_kernel_user
13712 +2:
13713 +#else
13714 + pax_enter_kernel
13715 +#endif
13716 movq %rsp,%rdi /* pt_regs pointer */
13717 xorl %esi,%esi /* no error code */
13718 - PER_CPU(init_tss, %rbp)
13719 +#ifdef CONFIG_SMP
13720 + imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
13721 + lea init_tss(%rbp), %rbp
13722 +#else
13723 + lea init_tss(%rip), %rbp
13724 +#endif
13725 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13726 call \do_sym
13727 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13728 @@ -1085,6 +1392,16 @@ ENTRY(\sym)
13729 CFI_ADJUST_CFA_OFFSET 15*8
13730 call error_entry
13731 DEFAULT_FRAME 0
13732 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13733 + testb $3, CS(%rsp)
13734 + jnz 1f
13735 + pax_enter_kernel
13736 + jmp 2f
13737 +1: pax_enter_kernel_user
13738 +2:
13739 +#else
13740 + pax_enter_kernel
13741 +#endif
13742 movq %rsp,%rdi /* pt_regs pointer */
13743 movq ORIG_RAX(%rsp),%rsi /* get error code */
13744 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13745 @@ -1104,6 +1421,16 @@ ENTRY(\sym)
13746 call save_paranoid
13747 DEFAULT_FRAME 0
13748 TRACE_IRQS_OFF
13749 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13750 + testb $3, CS(%rsp)
13751 + jnz 1f
13752 + pax_enter_kernel
13753 + jmp 2f
13754 +1: pax_enter_kernel_user
13755 +2:
13756 +#else
13757 + pax_enter_kernel
13758 +#endif
13759 movq %rsp,%rdi /* pt_regs pointer */
13760 movq ORIG_RAX(%rsp),%rsi /* get error code */
13761 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13762 @@ -1405,14 +1732,27 @@ ENTRY(paranoid_exit)
13763 TRACE_IRQS_OFF
13764 testl %ebx,%ebx /* swapgs needed? */
13765 jnz paranoid_restore
13766 - testl $3,CS(%rsp)
13767 + testb $3,CS(%rsp)
13768 jnz paranoid_userspace
13769 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13770 + pax_exit_kernel
13771 + TRACE_IRQS_IRETQ 0
13772 + SWAPGS_UNSAFE_STACK
13773 + RESTORE_ALL 8
13774 + jmp irq_return
13775 +#endif
13776 paranoid_swapgs:
13777 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13778 + pax_exit_kernel_user
13779 +#else
13780 + pax_exit_kernel
13781 +#endif
13782 TRACE_IRQS_IRETQ 0
13783 SWAPGS_UNSAFE_STACK
13784 RESTORE_ALL 8
13785 jmp irq_return
13786 paranoid_restore:
13787 + pax_exit_kernel
13788 TRACE_IRQS_IRETQ 0
13789 RESTORE_ALL 8
13790 jmp irq_return
13791 @@ -1470,7 +1810,7 @@ ENTRY(error_entry)
13792 movq_cfi r14, R14+8
13793 movq_cfi r15, R15+8
13794 xorl %ebx,%ebx
13795 - testl $3,CS+8(%rsp)
13796 + testb $3,CS+8(%rsp)
13797 je error_kernelspace
13798 error_swapgs:
13799 SWAPGS
13800 @@ -1529,6 +1869,16 @@ ENTRY(nmi)
13801 CFI_ADJUST_CFA_OFFSET 15*8
13802 call save_paranoid
13803 DEFAULT_FRAME 0
13804 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13805 + testb $3, CS(%rsp)
13806 + jnz 1f
13807 + pax_enter_kernel
13808 + jmp 2f
13809 +1: pax_enter_kernel_user
13810 +2:
13811 +#else
13812 + pax_enter_kernel
13813 +#endif
13814 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
13815 movq %rsp,%rdi
13816 movq $-1,%rsi
13817 @@ -1539,11 +1889,25 @@ ENTRY(nmi)
13818 DISABLE_INTERRUPTS(CLBR_NONE)
13819 testl %ebx,%ebx /* swapgs needed? */
13820 jnz nmi_restore
13821 - testl $3,CS(%rsp)
13822 + testb $3,CS(%rsp)
13823 jnz nmi_userspace
13824 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13825 + pax_exit_kernel
13826 + SWAPGS_UNSAFE_STACK
13827 + RESTORE_ALL 8
13828 + jmp irq_return
13829 +#endif
13830 nmi_swapgs:
13831 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13832 + pax_exit_kernel_user
13833 +#else
13834 + pax_exit_kernel
13835 +#endif
13836 SWAPGS_UNSAFE_STACK
13837 + RESTORE_ALL 8
13838 + jmp irq_return
13839 nmi_restore:
13840 + pax_exit_kernel
13841 RESTORE_ALL 8
13842 jmp irq_return
13843 nmi_userspace:
13844 diff -urNp linux-2.6.32.42/arch/x86/kernel/ftrace.c linux-2.6.32.42/arch/x86/kernel/ftrace.c
13845 --- linux-2.6.32.42/arch/x86/kernel/ftrace.c 2011-03-27 14:31:47.000000000 -0400
13846 +++ linux-2.6.32.42/arch/x86/kernel/ftrace.c 2011-05-04 17:56:20.000000000 -0400
13847 @@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the
13848 static void *mod_code_newcode; /* holds the text to write to the IP */
13849
13850 static unsigned nmi_wait_count;
13851 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
13852 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
13853
13854 int ftrace_arch_read_dyn_info(char *buf, int size)
13855 {
13856 @@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf,
13857
13858 r = snprintf(buf, size, "%u %u",
13859 nmi_wait_count,
13860 - atomic_read(&nmi_update_count));
13861 + atomic_read_unchecked(&nmi_update_count));
13862 return r;
13863 }
13864
13865 @@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
13866 {
13867 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
13868 smp_rmb();
13869 + pax_open_kernel();
13870 ftrace_mod_code();
13871 - atomic_inc(&nmi_update_count);
13872 + pax_close_kernel();
13873 + atomic_inc_unchecked(&nmi_update_count);
13874 }
13875 /* Must have previous changes seen before executions */
13876 smp_mb();
13877 @@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, voi
13878
13879
13880
13881 -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
13882 +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
13883
13884 static unsigned char *ftrace_nop_replace(void)
13885 {
13886 @@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, uns
13887 {
13888 unsigned char replaced[MCOUNT_INSN_SIZE];
13889
13890 + ip = ktla_ktva(ip);
13891 +
13892 /*
13893 * Note: Due to modules and __init, code can
13894 * disappear and change, we need to protect against faulting
13895 @@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_fun
13896 unsigned char old[MCOUNT_INSN_SIZE], *new;
13897 int ret;
13898
13899 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
13900 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
13901 new = ftrace_call_replace(ip, (unsigned long)func);
13902 ret = ftrace_modify_code(ip, old, new);
13903
13904 @@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *da
13905 switch (faulted) {
13906 case 0:
13907 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
13908 - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
13909 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
13910 break;
13911 case 1:
13912 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
13913 - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
13914 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
13915 break;
13916 case 2:
13917 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
13918 - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
13919 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
13920 break;
13921 }
13922
13923 @@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long
13924 {
13925 unsigned char code[MCOUNT_INSN_SIZE];
13926
13927 + ip = ktla_ktva(ip);
13928 +
13929 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
13930 return -EFAULT;
13931
13932 diff -urNp linux-2.6.32.42/arch/x86/kernel/head32.c linux-2.6.32.42/arch/x86/kernel/head32.c
13933 --- linux-2.6.32.42/arch/x86/kernel/head32.c 2011-03-27 14:31:47.000000000 -0400
13934 +++ linux-2.6.32.42/arch/x86/kernel/head32.c 2011-04-17 15:56:46.000000000 -0400
13935 @@ -16,6 +16,7 @@
13936 #include <asm/apic.h>
13937 #include <asm/io_apic.h>
13938 #include <asm/bios_ebda.h>
13939 +#include <asm/boot.h>
13940
13941 static void __init i386_default_early_setup(void)
13942 {
13943 @@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
13944 {
13945 reserve_trampoline_memory();
13946
13947 - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13948 + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13949
13950 #ifdef CONFIG_BLK_DEV_INITRD
13951 /* Reserve INITRD */
13952 diff -urNp linux-2.6.32.42/arch/x86/kernel/head_32.S linux-2.6.32.42/arch/x86/kernel/head_32.S
13953 --- linux-2.6.32.42/arch/x86/kernel/head_32.S 2011-03-27 14:31:47.000000000 -0400
13954 +++ linux-2.6.32.42/arch/x86/kernel/head_32.S 2011-04-17 15:56:46.000000000 -0400
13955 @@ -19,10 +19,17 @@
13956 #include <asm/setup.h>
13957 #include <asm/processor-flags.h>
13958 #include <asm/percpu.h>
13959 +#include <asm/msr-index.h>
13960
13961 /* Physical address */
13962 #define pa(X) ((X) - __PAGE_OFFSET)
13963
13964 +#ifdef CONFIG_PAX_KERNEXEC
13965 +#define ta(X) (X)
13966 +#else
13967 +#define ta(X) ((X) - __PAGE_OFFSET)
13968 +#endif
13969 +
13970 /*
13971 * References to members of the new_cpu_data structure.
13972 */
13973 @@ -52,11 +59,7 @@
13974 * and small than max_low_pfn, otherwise will waste some page table entries
13975 */
13976
13977 -#if PTRS_PER_PMD > 1
13978 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
13979 -#else
13980 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
13981 -#endif
13982 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
13983
13984 /* Enough space to fit pagetables for the low memory linear map */
13985 MAPPING_BEYOND_END = \
13986 @@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
13987 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13988
13989 /*
13990 + * Real beginning of normal "text" segment
13991 + */
13992 +ENTRY(stext)
13993 +ENTRY(_stext)
13994 +
13995 +/*
13996 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
13997 * %esi points to the real-mode code as a 32-bit pointer.
13998 * CS and DS must be 4 GB flat segments, but we don't depend on
13999 @@ -80,6 +89,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14000 * can.
14001 */
14002 __HEAD
14003 +
14004 +#ifdef CONFIG_PAX_KERNEXEC
14005 + jmp startup_32
14006 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14007 +.fill PAGE_SIZE-5,1,0xcc
14008 +#endif
14009 +
14010 ENTRY(startup_32)
14011 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
14012 us to not reload segments */
14013 @@ -97,6 +113,57 @@ ENTRY(startup_32)
14014 movl %eax,%gs
14015 2:
14016
14017 +#ifdef CONFIG_SMP
14018 + movl $pa(cpu_gdt_table),%edi
14019 + movl $__per_cpu_load,%eax
14020 + movw %ax,__KERNEL_PERCPU + 2(%edi)
14021 + rorl $16,%eax
14022 + movb %al,__KERNEL_PERCPU + 4(%edi)
14023 + movb %ah,__KERNEL_PERCPU + 7(%edi)
14024 + movl $__per_cpu_end - 1,%eax
14025 + subl $__per_cpu_start,%eax
14026 + movw %ax,__KERNEL_PERCPU + 0(%edi)
14027 +#endif
14028 +
14029 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14030 + movl $NR_CPUS,%ecx
14031 + movl $pa(cpu_gdt_table),%edi
14032 +1:
14033 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14034 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14035 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14036 + addl $PAGE_SIZE_asm,%edi
14037 + loop 1b
14038 +#endif
14039 +
14040 +#ifdef CONFIG_PAX_KERNEXEC
14041 + movl $pa(boot_gdt),%edi
14042 + movl $__LOAD_PHYSICAL_ADDR,%eax
14043 + movw %ax,__BOOT_CS + 2(%edi)
14044 + rorl $16,%eax
14045 + movb %al,__BOOT_CS + 4(%edi)
14046 + movb %ah,__BOOT_CS + 7(%edi)
14047 + rorl $16,%eax
14048 +
14049 + ljmp $(__BOOT_CS),$1f
14050 +1:
14051 +
14052 + movl $NR_CPUS,%ecx
14053 + movl $pa(cpu_gdt_table),%edi
14054 + addl $__PAGE_OFFSET,%eax
14055 +1:
14056 + movw %ax,__KERNEL_CS + 2(%edi)
14057 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14058 + rorl $16,%eax
14059 + movb %al,__KERNEL_CS + 4(%edi)
14060 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14061 + movb %ah,__KERNEL_CS + 7(%edi)
14062 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14063 + rorl $16,%eax
14064 + addl $PAGE_SIZE_asm,%edi
14065 + loop 1b
14066 +#endif
14067 +
14068 /*
14069 * Clear BSS first so that there are no surprises...
14070 */
14071 @@ -140,9 +207,7 @@ ENTRY(startup_32)
14072 cmpl $num_subarch_entries, %eax
14073 jae bad_subarch
14074
14075 - movl pa(subarch_entries)(,%eax,4), %eax
14076 - subl $__PAGE_OFFSET, %eax
14077 - jmp *%eax
14078 + jmp *pa(subarch_entries)(,%eax,4)
14079
14080 bad_subarch:
14081 WEAK(lguest_entry)
14082 @@ -154,10 +219,10 @@ WEAK(xen_entry)
14083 __INITDATA
14084
14085 subarch_entries:
14086 - .long default_entry /* normal x86/PC */
14087 - .long lguest_entry /* lguest hypervisor */
14088 - .long xen_entry /* Xen hypervisor */
14089 - .long default_entry /* Moorestown MID */
14090 + .long ta(default_entry) /* normal x86/PC */
14091 + .long ta(lguest_entry) /* lguest hypervisor */
14092 + .long ta(xen_entry) /* Xen hypervisor */
14093 + .long ta(default_entry) /* Moorestown MID */
14094 num_subarch_entries = (. - subarch_entries) / 4
14095 .previous
14096 #endif /* CONFIG_PARAVIRT */
14097 @@ -218,8 +283,11 @@ default_entry:
14098 movl %eax, pa(max_pfn_mapped)
14099
14100 /* Do early initialization of the fixmap area */
14101 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14102 - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14103 +#ifdef CONFIG_COMPAT_VDSO
14104 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14105 +#else
14106 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14107 +#endif
14108 #else /* Not PAE */
14109
14110 page_pde_offset = (__PAGE_OFFSET >> 20);
14111 @@ -249,8 +317,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14112 movl %eax, pa(max_pfn_mapped)
14113
14114 /* Do early initialization of the fixmap area */
14115 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14116 - movl %eax,pa(swapper_pg_dir+0xffc)
14117 +#ifdef CONFIG_COMPAT_VDSO
14118 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
14119 +#else
14120 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
14121 +#endif
14122 #endif
14123 jmp 3f
14124 /*
14125 @@ -297,6 +368,7 @@ ENTRY(startup_32_smp)
14126 orl %edx,%eax
14127 movl %eax,%cr4
14128
14129 +#ifdef CONFIG_X86_PAE
14130 btl $5, %eax # check if PAE is enabled
14131 jnc 6f
14132
14133 @@ -312,13 +384,17 @@ ENTRY(startup_32_smp)
14134 jnc 6f
14135
14136 /* Setup EFER (Extended Feature Enable Register) */
14137 - movl $0xc0000080, %ecx
14138 + movl $MSR_EFER, %ecx
14139 rdmsr
14140
14141 btsl $11, %eax
14142 /* Make changes effective */
14143 wrmsr
14144
14145 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14146 + movl $1,pa(nx_enabled)
14147 +#endif
14148 +
14149 6:
14150
14151 /*
14152 @@ -344,9 +420,7 @@ ENTRY(startup_32_smp)
14153
14154 #ifdef CONFIG_SMP
14155 cmpb $0, ready
14156 - jz 1f /* Initial CPU cleans BSS */
14157 - jmp checkCPUtype
14158 -1:
14159 + jnz checkCPUtype /* Initial CPU cleans BSS */
14160 #endif /* CONFIG_SMP */
14161
14162 /*
14163 @@ -424,7 +498,7 @@ is386: movl $2,%ecx # set MP
14164 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14165 movl %eax,%ss # after changing gdt.
14166
14167 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
14168 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14169 movl %eax,%ds
14170 movl %eax,%es
14171
14172 @@ -438,15 +512,22 @@ is386: movl $2,%ecx # set MP
14173 */
14174 cmpb $0,ready
14175 jne 1f
14176 - movl $per_cpu__gdt_page,%eax
14177 + movl $cpu_gdt_table,%eax
14178 movl $per_cpu__stack_canary,%ecx
14179 +#ifdef CONFIG_SMP
14180 + addl $__per_cpu_load,%ecx
14181 +#endif
14182 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14183 shrl $16, %ecx
14184 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14185 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14186 1:
14187 -#endif
14188 movl $(__KERNEL_STACK_CANARY),%eax
14189 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14190 + movl $(__USER_DS),%eax
14191 +#else
14192 + xorl %eax,%eax
14193 +#endif
14194 movl %eax,%gs
14195
14196 xorl %eax,%eax # Clear LDT
14197 @@ -457,10 +538,6 @@ is386: movl $2,%ecx # set MP
14198 #ifdef CONFIG_SMP
14199 movb ready, %cl
14200 movb $1, ready
14201 - cmpb $0,%cl # the first CPU calls start_kernel
14202 - je 1f
14203 - movl (stack_start), %esp
14204 -1:
14205 #endif /* CONFIG_SMP */
14206 jmp *(initial_code)
14207
14208 @@ -546,22 +623,22 @@ early_page_fault:
14209 jmp early_fault
14210
14211 early_fault:
14212 - cld
14213 #ifdef CONFIG_PRINTK
14214 + cmpl $1,%ss:early_recursion_flag
14215 + je hlt_loop
14216 + incl %ss:early_recursion_flag
14217 + cld
14218 pusha
14219 movl $(__KERNEL_DS),%eax
14220 movl %eax,%ds
14221 movl %eax,%es
14222 - cmpl $2,early_recursion_flag
14223 - je hlt_loop
14224 - incl early_recursion_flag
14225 movl %cr2,%eax
14226 pushl %eax
14227 pushl %edx /* trapno */
14228 pushl $fault_msg
14229 call printk
14230 +; call dump_stack
14231 #endif
14232 - call dump_stack
14233 hlt_loop:
14234 hlt
14235 jmp hlt_loop
14236 @@ -569,8 +646,11 @@ hlt_loop:
14237 /* This is the default interrupt "handler" :-) */
14238 ALIGN
14239 ignore_int:
14240 - cld
14241 #ifdef CONFIG_PRINTK
14242 + cmpl $2,%ss:early_recursion_flag
14243 + je hlt_loop
14244 + incl %ss:early_recursion_flag
14245 + cld
14246 pushl %eax
14247 pushl %ecx
14248 pushl %edx
14249 @@ -579,9 +659,6 @@ ignore_int:
14250 movl $(__KERNEL_DS),%eax
14251 movl %eax,%ds
14252 movl %eax,%es
14253 - cmpl $2,early_recursion_flag
14254 - je hlt_loop
14255 - incl early_recursion_flag
14256 pushl 16(%esp)
14257 pushl 24(%esp)
14258 pushl 32(%esp)
14259 @@ -610,31 +687,47 @@ ENTRY(initial_page_table)
14260 /*
14261 * BSS section
14262 */
14263 -__PAGE_ALIGNED_BSS
14264 - .align PAGE_SIZE_asm
14265 #ifdef CONFIG_X86_PAE
14266 +.section .swapper_pg_pmd,"a",@progbits
14267 swapper_pg_pmd:
14268 .fill 1024*KPMDS,4,0
14269 #else
14270 +.section .swapper_pg_dir,"a",@progbits
14271 ENTRY(swapper_pg_dir)
14272 .fill 1024,4,0
14273 #endif
14274 +.section .swapper_pg_fixmap,"a",@progbits
14275 swapper_pg_fixmap:
14276 .fill 1024,4,0
14277 #ifdef CONFIG_X86_TRAMPOLINE
14278 +.section .trampoline_pg_dir,"a",@progbits
14279 ENTRY(trampoline_pg_dir)
14280 +#ifdef CONFIG_X86_PAE
14281 + .fill 4,8,0
14282 +#else
14283 .fill 1024,4,0
14284 #endif
14285 +#endif
14286 +
14287 +.section .empty_zero_page,"a",@progbits
14288 ENTRY(empty_zero_page)
14289 .fill 4096,1,0
14290
14291 /*
14292 + * The IDT has to be page-aligned to simplify the Pentium
14293 + * F0 0F bug workaround.. We have a special link segment
14294 + * for this.
14295 + */
14296 +.section .idt,"a",@progbits
14297 +ENTRY(idt_table)
14298 + .fill 256,8,0
14299 +
14300 +/*
14301 * This starts the data section.
14302 */
14303 #ifdef CONFIG_X86_PAE
14304 -__PAGE_ALIGNED_DATA
14305 - /* Page-aligned for the benefit of paravirt? */
14306 - .align PAGE_SIZE_asm
14307 +.section .swapper_pg_dir,"a",@progbits
14308 +
14309 ENTRY(swapper_pg_dir)
14310 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14311 # if KPMDS == 3
14312 @@ -653,15 +746,24 @@ ENTRY(swapper_pg_dir)
14313 # error "Kernel PMDs should be 1, 2 or 3"
14314 # endif
14315 .align PAGE_SIZE_asm /* needs to be page-sized too */
14316 +
14317 +#ifdef CONFIG_PAX_PER_CPU_PGD
14318 +ENTRY(cpu_pgd)
14319 + .rept NR_CPUS
14320 + .fill 4,8,0
14321 + .endr
14322 +#endif
14323 +
14324 #endif
14325
14326 .data
14327 ENTRY(stack_start)
14328 - .long init_thread_union+THREAD_SIZE
14329 + .long init_thread_union+THREAD_SIZE-8
14330 .long __BOOT_DS
14331
14332 ready: .byte 0
14333
14334 +.section .rodata,"a",@progbits
14335 early_recursion_flag:
14336 .long 0
14337
14338 @@ -697,7 +799,7 @@ fault_msg:
14339 .word 0 # 32 bit align gdt_desc.address
14340 boot_gdt_descr:
14341 .word __BOOT_DS+7
14342 - .long boot_gdt - __PAGE_OFFSET
14343 + .long pa(boot_gdt)
14344
14345 .word 0 # 32-bit align idt_desc.address
14346 idt_descr:
14347 @@ -708,7 +810,7 @@ idt_descr:
14348 .word 0 # 32 bit align gdt_desc.address
14349 ENTRY(early_gdt_descr)
14350 .word GDT_ENTRIES*8-1
14351 - .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
14352 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
14353
14354 /*
14355 * The boot_gdt must mirror the equivalent in setup.S and is
14356 @@ -717,5 +819,65 @@ ENTRY(early_gdt_descr)
14357 .align L1_CACHE_BYTES
14358 ENTRY(boot_gdt)
14359 .fill GDT_ENTRY_BOOT_CS,8,0
14360 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14361 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14362 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14363 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14364 +
14365 + .align PAGE_SIZE_asm
14366 +ENTRY(cpu_gdt_table)
14367 + .rept NR_CPUS
14368 + .quad 0x0000000000000000 /* NULL descriptor */
14369 + .quad 0x0000000000000000 /* 0x0b reserved */
14370 + .quad 0x0000000000000000 /* 0x13 reserved */
14371 + .quad 0x0000000000000000 /* 0x1b reserved */
14372 +
14373 +#ifdef CONFIG_PAX_KERNEXEC
14374 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14375 +#else
14376 + .quad 0x0000000000000000 /* 0x20 unused */
14377 +#endif
14378 +
14379 + .quad 0x0000000000000000 /* 0x28 unused */
14380 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14381 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14382 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14383 + .quad 0x0000000000000000 /* 0x4b reserved */
14384 + .quad 0x0000000000000000 /* 0x53 reserved */
14385 + .quad 0x0000000000000000 /* 0x5b reserved */
14386 +
14387 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14388 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14389 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14390 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14391 +
14392 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14393 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14394 +
14395 + /*
14396 + * Segments used for calling PnP BIOS have byte granularity.
14397 + * The code segments and data segments have fixed 64k limits,
14398 + * the transfer segment sizes are set at run time.
14399 + */
14400 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
14401 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
14402 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
14403 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
14404 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
14405 +
14406 + /*
14407 + * The APM segments have byte granularity and their bases
14408 + * are set at run time. All have 64k limits.
14409 + */
14410 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14411 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14412 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
14413 +
14414 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14415 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14416 + .quad 0x0040910000000018 /* 0xe0 - STACK_CANARY */
14417 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14418 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14419 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14420 +
14421 + /* Be sure this is zeroed to avoid false validations in Xen */
14422 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14423 + .endr
14424 diff -urNp linux-2.6.32.42/arch/x86/kernel/head_64.S linux-2.6.32.42/arch/x86/kernel/head_64.S
14425 --- linux-2.6.32.42/arch/x86/kernel/head_64.S 2011-03-27 14:31:47.000000000 -0400
14426 +++ linux-2.6.32.42/arch/x86/kernel/head_64.S 2011-04-17 15:56:46.000000000 -0400
14427 @@ -19,6 +19,7 @@
14428 #include <asm/cache.h>
14429 #include <asm/processor-flags.h>
14430 #include <asm/percpu.h>
14431 +#include <asm/cpufeature.h>
14432
14433 #ifdef CONFIG_PARAVIRT
14434 #include <asm/asm-offsets.h>
14435 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
14436 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14437 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14438 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14439 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
14440 +L3_VMALLOC_START = pud_index(VMALLOC_START)
14441 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14442 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14443
14444 .text
14445 __HEAD
14446 @@ -85,35 +90,22 @@ startup_64:
14447 */
14448 addq %rbp, init_level4_pgt + 0(%rip)
14449 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14450 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14451 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14452 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14453
14454 addq %rbp, level3_ident_pgt + 0(%rip)
14455 +#ifndef CONFIG_XEN
14456 + addq %rbp, level3_ident_pgt + 8(%rip)
14457 +#endif
14458
14459 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14460 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14461 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14462
14463 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14464 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14465 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14466
14467 - /* Add an Identity mapping if I am above 1G */
14468 - leaq _text(%rip), %rdi
14469 - andq $PMD_PAGE_MASK, %rdi
14470 -
14471 - movq %rdi, %rax
14472 - shrq $PUD_SHIFT, %rax
14473 - andq $(PTRS_PER_PUD - 1), %rax
14474 - jz ident_complete
14475 -
14476 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
14477 - leaq level3_ident_pgt(%rip), %rbx
14478 - movq %rdx, 0(%rbx, %rax, 8)
14479 -
14480 - movq %rdi, %rax
14481 - shrq $PMD_SHIFT, %rax
14482 - andq $(PTRS_PER_PMD - 1), %rax
14483 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
14484 - leaq level2_spare_pgt(%rip), %rbx
14485 - movq %rdx, 0(%rbx, %rax, 8)
14486 -ident_complete:
14487 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14488 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
14489
14490 /*
14491 * Fixup the kernel text+data virtual addresses. Note that
14492 @@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
14493 * after the boot processor executes this code.
14494 */
14495
14496 - /* Enable PAE mode and PGE */
14497 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
14498 + /* Enable PAE mode and PSE/PGE */
14499 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
14500 movq %rax, %cr4
14501
14502 /* Setup early boot stage 4 level pagetables. */
14503 @@ -184,9 +176,13 @@ ENTRY(secondary_startup_64)
14504 movl $MSR_EFER, %ecx
14505 rdmsr
14506 btsl $_EFER_SCE, %eax /* Enable System Call */
14507 - btl $20,%edi /* No Execute supported? */
14508 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
14509 jnc 1f
14510 btsl $_EFER_NX, %eax
14511 + leaq init_level4_pgt(%rip), %rdi
14512 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
14513 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
14514 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
14515 1: wrmsr /* Make changes effective */
14516
14517 /* Setup cr0 */
14518 @@ -262,16 +258,16 @@ ENTRY(secondary_startup_64)
14519 .quad x86_64_start_kernel
14520 ENTRY(initial_gs)
14521 .quad INIT_PER_CPU_VAR(irq_stack_union)
14522 - __FINITDATA
14523
14524 ENTRY(stack_start)
14525 .quad init_thread_union+THREAD_SIZE-8
14526 .word 0
14527 + __FINITDATA
14528
14529 bad_address:
14530 jmp bad_address
14531
14532 - .section ".init.text","ax"
14533 + __INIT
14534 #ifdef CONFIG_EARLY_PRINTK
14535 .globl early_idt_handlers
14536 early_idt_handlers:
14537 @@ -316,18 +312,23 @@ ENTRY(early_idt_handler)
14538 #endif /* EARLY_PRINTK */
14539 1: hlt
14540 jmp 1b
14541 + .previous
14542
14543 #ifdef CONFIG_EARLY_PRINTK
14544 + __INITDATA
14545 early_recursion_flag:
14546 .long 0
14547 + .previous
14548
14549 + .section .rodata,"a",@progbits
14550 early_idt_msg:
14551 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
14552 early_idt_ripmsg:
14553 .asciz "RIP %s\n"
14554 -#endif /* CONFIG_EARLY_PRINTK */
14555 .previous
14556 +#endif /* CONFIG_EARLY_PRINTK */
14557
14558 + .section .rodata,"a",@progbits
14559 #define NEXT_PAGE(name) \
14560 .balign PAGE_SIZE; \
14561 ENTRY(name)
14562 @@ -350,13 +351,36 @@ NEXT_PAGE(init_level4_pgt)
14563 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14564 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
14565 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14566 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
14567 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
14568 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
14569 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14570 .org init_level4_pgt + L4_START_KERNEL*8, 0
14571 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
14572 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
14573
14574 +#ifdef CONFIG_PAX_PER_CPU_PGD
14575 +NEXT_PAGE(cpu_pgd)
14576 + .rept NR_CPUS
14577 + .fill 512,8,0
14578 + .endr
14579 +#endif
14580 +
14581 NEXT_PAGE(level3_ident_pgt)
14582 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14583 +#ifdef CONFIG_XEN
14584 .fill 511,8,0
14585 +#else
14586 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
14587 + .fill 510,8,0
14588 +#endif
14589 +
14590 +NEXT_PAGE(level3_vmalloc_pgt)
14591 + .fill 512,8,0
14592 +
14593 +NEXT_PAGE(level3_vmemmap_pgt)
14594 + .fill L3_VMEMMAP_START,8,0
14595 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14596
14597 NEXT_PAGE(level3_kernel_pgt)
14598 .fill L3_START_KERNEL,8,0
14599 @@ -364,20 +388,23 @@ NEXT_PAGE(level3_kernel_pgt)
14600 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
14601 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14602
14603 +NEXT_PAGE(level2_vmemmap_pgt)
14604 + .fill 512,8,0
14605 +
14606 NEXT_PAGE(level2_fixmap_pgt)
14607 - .fill 506,8,0
14608 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14609 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
14610 - .fill 5,8,0
14611 + .fill 507,8,0
14612 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
14613 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
14614 + .fill 4,8,0
14615
14616 -NEXT_PAGE(level1_fixmap_pgt)
14617 +NEXT_PAGE(level1_vsyscall_pgt)
14618 .fill 512,8,0
14619
14620 -NEXT_PAGE(level2_ident_pgt)
14621 - /* Since I easily can, map the first 1G.
14622 + /* Since I easily can, map the first 2G.
14623 * Don't set NX because code runs from these pages.
14624 */
14625 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
14626 +NEXT_PAGE(level2_ident_pgt)
14627 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
14628
14629 NEXT_PAGE(level2_kernel_pgt)
14630 /*
14631 @@ -390,33 +417,55 @@ NEXT_PAGE(level2_kernel_pgt)
14632 * If you want to increase this then increase MODULES_VADDR
14633 * too.)
14634 */
14635 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
14636 - KERNEL_IMAGE_SIZE/PMD_SIZE)
14637 -
14638 -NEXT_PAGE(level2_spare_pgt)
14639 - .fill 512, 8, 0
14640 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
14641
14642 #undef PMDS
14643 #undef NEXT_PAGE
14644
14645 - .data
14646 + .align PAGE_SIZE
14647 +ENTRY(cpu_gdt_table)
14648 + .rept NR_CPUS
14649 + .quad 0x0000000000000000 /* NULL descriptor */
14650 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
14651 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
14652 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
14653 + .quad 0x00cffb000000ffff /* __USER32_CS */
14654 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
14655 + .quad 0x00affb000000ffff /* __USER_CS */
14656 +
14657 +#ifdef CONFIG_PAX_KERNEXEC
14658 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
14659 +#else
14660 + .quad 0x0 /* unused */
14661 +#endif
14662 +
14663 + .quad 0,0 /* TSS */
14664 + .quad 0,0 /* LDT */
14665 + .quad 0,0,0 /* three TLS descriptors */
14666 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
14667 + /* asm/segment.h:GDT_ENTRIES must match this */
14668 +
14669 + /* zero the remaining page */
14670 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
14671 + .endr
14672 +
14673 .align 16
14674 .globl early_gdt_descr
14675 early_gdt_descr:
14676 .word GDT_ENTRIES*8-1
14677 early_gdt_descr_base:
14678 - .quad INIT_PER_CPU_VAR(gdt_page)
14679 + .quad cpu_gdt_table
14680
14681 ENTRY(phys_base)
14682 /* This must match the first entry in level2_kernel_pgt */
14683 .quad 0x0000000000000000
14684
14685 #include "../../x86/xen/xen-head.S"
14686 -
14687 - .section .bss, "aw", @nobits
14688 +
14689 + .section .rodata,"a",@progbits
14690 .align L1_CACHE_BYTES
14691 ENTRY(idt_table)
14692 - .skip IDT_ENTRIES * 16
14693 + .fill 512,8,0
14694
14695 __PAGE_ALIGNED_BSS
14696 .align PAGE_SIZE
14697 diff -urNp linux-2.6.32.42/arch/x86/kernel/i386_ksyms_32.c linux-2.6.32.42/arch/x86/kernel/i386_ksyms_32.c
14698 --- linux-2.6.32.42/arch/x86/kernel/i386_ksyms_32.c 2011-03-27 14:31:47.000000000 -0400
14699 +++ linux-2.6.32.42/arch/x86/kernel/i386_ksyms_32.c 2011-04-17 15:56:46.000000000 -0400
14700 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
14701 EXPORT_SYMBOL(cmpxchg8b_emu);
14702 #endif
14703
14704 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
14705 +
14706 /* Networking helper routines. */
14707 EXPORT_SYMBOL(csum_partial_copy_generic);
14708 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
14709 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
14710
14711 EXPORT_SYMBOL(__get_user_1);
14712 EXPORT_SYMBOL(__get_user_2);
14713 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
14714
14715 EXPORT_SYMBOL(csum_partial);
14716 EXPORT_SYMBOL(empty_zero_page);
14717 +
14718 +#ifdef CONFIG_PAX_KERNEXEC
14719 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
14720 +#endif
14721 diff -urNp linux-2.6.32.42/arch/x86/kernel/i8259.c linux-2.6.32.42/arch/x86/kernel/i8259.c
14722 --- linux-2.6.32.42/arch/x86/kernel/i8259.c 2011-03-27 14:31:47.000000000 -0400
14723 +++ linux-2.6.32.42/arch/x86/kernel/i8259.c 2011-05-04 17:56:28.000000000 -0400
14724 @@ -208,7 +208,7 @@ spurious_8259A_irq:
14725 "spurious 8259A interrupt: IRQ%d.\n", irq);
14726 spurious_irq_mask |= irqmask;
14727 }
14728 - atomic_inc(&irq_err_count);
14729 + atomic_inc_unchecked(&irq_err_count);
14730 /*
14731 * Theoretically we do not have to handle this IRQ,
14732 * but in Linux this does not cause problems and is
14733 diff -urNp linux-2.6.32.42/arch/x86/kernel/init_task.c linux-2.6.32.42/arch/x86/kernel/init_task.c
14734 --- linux-2.6.32.42/arch/x86/kernel/init_task.c 2011-03-27 14:31:47.000000000 -0400
14735 +++ linux-2.6.32.42/arch/x86/kernel/init_task.c 2011-04-17 15:56:46.000000000 -0400
14736 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
14737 * way process stacks are handled. This is done by having a special
14738 * "init_task" linker map entry..
14739 */
14740 -union thread_union init_thread_union __init_task_data =
14741 - { INIT_THREAD_INFO(init_task) };
14742 +union thread_union init_thread_union __init_task_data;
14743
14744 /*
14745 * Initial task structure.
14746 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
14747 * section. Since TSS's are completely CPU-local, we want them
14748 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
14749 */
14750 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
14751 -
14752 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
14753 +EXPORT_SYMBOL(init_tss);
14754 diff -urNp linux-2.6.32.42/arch/x86/kernel/ioport.c linux-2.6.32.42/arch/x86/kernel/ioport.c
14755 --- linux-2.6.32.42/arch/x86/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
14756 +++ linux-2.6.32.42/arch/x86/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
14757 @@ -6,6 +6,7 @@
14758 #include <linux/sched.h>
14759 #include <linux/kernel.h>
14760 #include <linux/capability.h>
14761 +#include <linux/security.h>
14762 #include <linux/errno.h>
14763 #include <linux/types.h>
14764 #include <linux/ioport.h>
14765 @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
14766
14767 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
14768 return -EINVAL;
14769 +#ifdef CONFIG_GRKERNSEC_IO
14770 + if (turn_on && grsec_disable_privio) {
14771 + gr_handle_ioperm();
14772 + return -EPERM;
14773 + }
14774 +#endif
14775 if (turn_on && !capable(CAP_SYS_RAWIO))
14776 return -EPERM;
14777
14778 @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
14779 * because the ->io_bitmap_max value must match the bitmap
14780 * contents:
14781 */
14782 - tss = &per_cpu(init_tss, get_cpu());
14783 + tss = init_tss + get_cpu();
14784
14785 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
14786
14787 @@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, s
14788 return -EINVAL;
14789 /* Trying to gain more privileges? */
14790 if (level > old) {
14791 +#ifdef CONFIG_GRKERNSEC_IO
14792 + if (grsec_disable_privio) {
14793 + gr_handle_iopl();
14794 + return -EPERM;
14795 + }
14796 +#endif
14797 if (!capable(CAP_SYS_RAWIO))
14798 return -EPERM;
14799 }
14800 diff -urNp linux-2.6.32.42/arch/x86/kernel/irq_32.c linux-2.6.32.42/arch/x86/kernel/irq_32.c
14801 --- linux-2.6.32.42/arch/x86/kernel/irq_32.c 2011-03-27 14:31:47.000000000 -0400
14802 +++ linux-2.6.32.42/arch/x86/kernel/irq_32.c 2011-04-23 13:26:46.000000000 -0400
14803 @@ -35,7 +35,7 @@ static int check_stack_overflow(void)
14804 __asm__ __volatile__("andl %%esp,%0" :
14805 "=r" (sp) : "0" (THREAD_SIZE - 1));
14806
14807 - return sp < (sizeof(struct thread_info) + STACK_WARN);
14808 + return sp < STACK_WARN;
14809 }
14810
14811 static void print_stack_overflow(void)
14812 @@ -54,9 +54,9 @@ static inline void print_stack_overflow(
14813 * per-CPU IRQ handling contexts (thread information and stack)
14814 */
14815 union irq_ctx {
14816 - struct thread_info tinfo;
14817 - u32 stack[THREAD_SIZE/sizeof(u32)];
14818 -} __attribute__((aligned(PAGE_SIZE)));
14819 + unsigned long previous_esp;
14820 + u32 stack[THREAD_SIZE/sizeof(u32)];
14821 +} __attribute__((aligned(THREAD_SIZE)));
14822
14823 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
14824 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
14825 @@ -78,10 +78,9 @@ static void call_on_stack(void *func, vo
14826 static inline int
14827 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
14828 {
14829 - union irq_ctx *curctx, *irqctx;
14830 + union irq_ctx *irqctx;
14831 u32 *isp, arg1, arg2;
14832
14833 - curctx = (union irq_ctx *) current_thread_info();
14834 irqctx = __get_cpu_var(hardirq_ctx);
14835
14836 /*
14837 @@ -90,21 +89,17 @@ execute_on_irq_stack(int overflow, struc
14838 * handler) we can't do that and just have to keep using the
14839 * current stack (which is the irq stack already after all)
14840 */
14841 - if (unlikely(curctx == irqctx))
14842 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
14843 return 0;
14844
14845 /* build the stack frame on the IRQ stack */
14846 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14847 - irqctx->tinfo.task = curctx->tinfo.task;
14848 - irqctx->tinfo.previous_esp = current_stack_pointer;
14849 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14850 + irqctx->previous_esp = current_stack_pointer;
14851 + add_preempt_count(HARDIRQ_OFFSET);
14852
14853 - /*
14854 - * Copy the softirq bits in preempt_count so that the
14855 - * softirq checks work in the hardirq context.
14856 - */
14857 - irqctx->tinfo.preempt_count =
14858 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
14859 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
14860 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14861 + __set_fs(MAKE_MM_SEG(0));
14862 +#endif
14863
14864 if (unlikely(overflow))
14865 call_on_stack(print_stack_overflow, isp);
14866 @@ -116,6 +111,12 @@ execute_on_irq_stack(int overflow, struc
14867 : "0" (irq), "1" (desc), "2" (isp),
14868 "D" (desc->handle_irq)
14869 : "memory", "cc", "ecx");
14870 +
14871 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14872 + __set_fs(current_thread_info()->addr_limit);
14873 +#endif
14874 +
14875 + sub_preempt_count(HARDIRQ_OFFSET);
14876 return 1;
14877 }
14878
14879 @@ -124,28 +125,11 @@ execute_on_irq_stack(int overflow, struc
14880 */
14881 void __cpuinit irq_ctx_init(int cpu)
14882 {
14883 - union irq_ctx *irqctx;
14884 -
14885 if (per_cpu(hardirq_ctx, cpu))
14886 return;
14887
14888 - irqctx = &per_cpu(hardirq_stack, cpu);
14889 - irqctx->tinfo.task = NULL;
14890 - irqctx->tinfo.exec_domain = NULL;
14891 - irqctx->tinfo.cpu = cpu;
14892 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
14893 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14894 -
14895 - per_cpu(hardirq_ctx, cpu) = irqctx;
14896 -
14897 - irqctx = &per_cpu(softirq_stack, cpu);
14898 - irqctx->tinfo.task = NULL;
14899 - irqctx->tinfo.exec_domain = NULL;
14900 - irqctx->tinfo.cpu = cpu;
14901 - irqctx->tinfo.preempt_count = 0;
14902 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14903 -
14904 - per_cpu(softirq_ctx, cpu) = irqctx;
14905 + per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
14906 + per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
14907
14908 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
14909 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
14910 @@ -159,7 +143,6 @@ void irq_ctx_exit(int cpu)
14911 asmlinkage void do_softirq(void)
14912 {
14913 unsigned long flags;
14914 - struct thread_info *curctx;
14915 union irq_ctx *irqctx;
14916 u32 *isp;
14917
14918 @@ -169,15 +152,22 @@ asmlinkage void do_softirq(void)
14919 local_irq_save(flags);
14920
14921 if (local_softirq_pending()) {
14922 - curctx = current_thread_info();
14923 irqctx = __get_cpu_var(softirq_ctx);
14924 - irqctx->tinfo.task = curctx->task;
14925 - irqctx->tinfo.previous_esp = current_stack_pointer;
14926 + irqctx->previous_esp = current_stack_pointer;
14927
14928 /* build the stack frame on the softirq stack */
14929 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14930 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14931 +
14932 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14933 + __set_fs(MAKE_MM_SEG(0));
14934 +#endif
14935
14936 call_on_stack(__do_softirq, isp);
14937 +
14938 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14939 + __set_fs(current_thread_info()->addr_limit);
14940 +#endif
14941 +
14942 /*
14943 * Shouldnt happen, we returned above if in_interrupt():
14944 */
14945 diff -urNp linux-2.6.32.42/arch/x86/kernel/irq.c linux-2.6.32.42/arch/x86/kernel/irq.c
14946 --- linux-2.6.32.42/arch/x86/kernel/irq.c 2011-03-27 14:31:47.000000000 -0400
14947 +++ linux-2.6.32.42/arch/x86/kernel/irq.c 2011-05-04 17:56:28.000000000 -0400
14948 @@ -15,7 +15,7 @@
14949 #include <asm/mce.h>
14950 #include <asm/hw_irq.h>
14951
14952 -atomic_t irq_err_count;
14953 +atomic_unchecked_t irq_err_count;
14954
14955 /* Function pointer for generic interrupt vector handling */
14956 void (*generic_interrupt_extension)(void) = NULL;
14957 @@ -114,9 +114,9 @@ static int show_other_interrupts(struct
14958 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
14959 seq_printf(p, " Machine check polls\n");
14960 #endif
14961 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
14962 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
14963 #if defined(CONFIG_X86_IO_APIC)
14964 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
14965 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
14966 #endif
14967 return 0;
14968 }
14969 @@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
14970
14971 u64 arch_irq_stat(void)
14972 {
14973 - u64 sum = atomic_read(&irq_err_count);
14974 + u64 sum = atomic_read_unchecked(&irq_err_count);
14975
14976 #ifdef CONFIG_X86_IO_APIC
14977 - sum += atomic_read(&irq_mis_count);
14978 + sum += atomic_read_unchecked(&irq_mis_count);
14979 #endif
14980 return sum;
14981 }
14982 diff -urNp linux-2.6.32.42/arch/x86/kernel/kgdb.c linux-2.6.32.42/arch/x86/kernel/kgdb.c
14983 --- linux-2.6.32.42/arch/x86/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
14984 +++ linux-2.6.32.42/arch/x86/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
14985 @@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vec
14986
14987 /* clear the trace bit */
14988 linux_regs->flags &= ~X86_EFLAGS_TF;
14989 - atomic_set(&kgdb_cpu_doing_single_step, -1);
14990 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
14991
14992 /* set the trace bit if we're stepping */
14993 if (remcomInBuffer[0] == 's') {
14994 linux_regs->flags |= X86_EFLAGS_TF;
14995 kgdb_single_step = 1;
14996 - atomic_set(&kgdb_cpu_doing_single_step,
14997 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
14998 raw_smp_processor_id());
14999 }
15000
15001 @@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args
15002 break;
15003
15004 case DIE_DEBUG:
15005 - if (atomic_read(&kgdb_cpu_doing_single_step) ==
15006 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
15007 raw_smp_processor_id()) {
15008 if (user_mode(regs))
15009 return single_step_cont(regs, args);
15010 @@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception
15011 return instruction_pointer(regs);
15012 }
15013
15014 -struct kgdb_arch arch_kgdb_ops = {
15015 +const struct kgdb_arch arch_kgdb_ops = {
15016 /* Breakpoint instruction: */
15017 .gdb_bpt_instr = { 0xcc },
15018 .flags = KGDB_HW_BREAKPOINT,
15019 diff -urNp linux-2.6.32.42/arch/x86/kernel/kprobes.c linux-2.6.32.42/arch/x86/kernel/kprobes.c
15020 --- linux-2.6.32.42/arch/x86/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
15021 +++ linux-2.6.32.42/arch/x86/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
15022 @@ -166,9 +166,13 @@ static void __kprobes set_jmp_op(void *f
15023 char op;
15024 s32 raddr;
15025 } __attribute__((packed)) * jop;
15026 - jop = (struct __arch_jmp_op *)from;
15027 +
15028 + jop = (struct __arch_jmp_op *)(ktla_ktva(from));
15029 +
15030 + pax_open_kernel();
15031 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
15032 jop->op = RELATIVEJUMP_INSTRUCTION;
15033 + pax_close_kernel();
15034 }
15035
15036 /*
15037 @@ -193,7 +197,7 @@ static int __kprobes can_boost(kprobe_op
15038 kprobe_opcode_t opcode;
15039 kprobe_opcode_t *orig_opcodes = opcodes;
15040
15041 - if (search_exception_tables((unsigned long)opcodes))
15042 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15043 return 0; /* Page fault may occur on this address. */
15044
15045 retry:
15046 @@ -337,7 +341,9 @@ static void __kprobes fix_riprel(struct
15047 disp = (u8 *) p->addr + *((s32 *) insn) -
15048 (u8 *) p->ainsn.insn;
15049 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
15050 + pax_open_kernel();
15051 *(s32 *)insn = (s32) disp;
15052 + pax_close_kernel();
15053 }
15054 }
15055 #endif
15056 @@ -345,16 +351,18 @@ static void __kprobes fix_riprel(struct
15057
15058 static void __kprobes arch_copy_kprobe(struct kprobe *p)
15059 {
15060 - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15061 + pax_open_kernel();
15062 + memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15063 + pax_close_kernel();
15064
15065 fix_riprel(p);
15066
15067 - if (can_boost(p->addr))
15068 + if (can_boost(ktla_ktva(p->addr)))
15069 p->ainsn.boostable = 0;
15070 else
15071 p->ainsn.boostable = -1;
15072
15073 - p->opcode = *p->addr;
15074 + p->opcode = *(ktla_ktva(p->addr));
15075 }
15076
15077 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15078 @@ -432,7 +440,7 @@ static void __kprobes prepare_singlestep
15079 if (p->opcode == BREAKPOINT_INSTRUCTION)
15080 regs->ip = (unsigned long)p->addr;
15081 else
15082 - regs->ip = (unsigned long)p->ainsn.insn;
15083 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15084 }
15085
15086 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
15087 @@ -453,7 +461,7 @@ static void __kprobes setup_singlestep(s
15088 if (p->ainsn.boostable == 1 && !p->post_handler) {
15089 /* Boost up -- we can execute copied instructions directly */
15090 reset_current_kprobe();
15091 - regs->ip = (unsigned long)p->ainsn.insn;
15092 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15093 preempt_enable_no_resched();
15094 return;
15095 }
15096 @@ -523,7 +531,7 @@ static int __kprobes kprobe_handler(stru
15097 struct kprobe_ctlblk *kcb;
15098
15099 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
15100 - if (*addr != BREAKPOINT_INSTRUCTION) {
15101 + if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15102 /*
15103 * The breakpoint instruction was removed right
15104 * after we hit it. Another cpu has removed
15105 @@ -775,7 +783,7 @@ static void __kprobes resume_execution(s
15106 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15107 {
15108 unsigned long *tos = stack_addr(regs);
15109 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15110 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15111 unsigned long orig_ip = (unsigned long)p->addr;
15112 kprobe_opcode_t *insn = p->ainsn.insn;
15113
15114 @@ -958,7 +966,7 @@ int __kprobes kprobe_exceptions_notify(s
15115 struct die_args *args = data;
15116 int ret = NOTIFY_DONE;
15117
15118 - if (args->regs && user_mode_vm(args->regs))
15119 + if (args->regs && user_mode(args->regs))
15120 return ret;
15121
15122 switch (val) {
15123 diff -urNp linux-2.6.32.42/arch/x86/kernel/ldt.c linux-2.6.32.42/arch/x86/kernel/ldt.c
15124 --- linux-2.6.32.42/arch/x86/kernel/ldt.c 2011-03-27 14:31:47.000000000 -0400
15125 +++ linux-2.6.32.42/arch/x86/kernel/ldt.c 2011-04-17 15:56:46.000000000 -0400
15126 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
15127 if (reload) {
15128 #ifdef CONFIG_SMP
15129 preempt_disable();
15130 - load_LDT(pc);
15131 + load_LDT_nolock(pc);
15132 if (!cpumask_equal(mm_cpumask(current->mm),
15133 cpumask_of(smp_processor_id())))
15134 smp_call_function(flush_ldt, current->mm, 1);
15135 preempt_enable();
15136 #else
15137 - load_LDT(pc);
15138 + load_LDT_nolock(pc);
15139 #endif
15140 }
15141 if (oldsize) {
15142 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
15143 return err;
15144
15145 for (i = 0; i < old->size; i++)
15146 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15147 + write_ldt_entry(new->ldt, i, old->ldt + i);
15148 return 0;
15149 }
15150
15151 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct
15152 retval = copy_ldt(&mm->context, &old_mm->context);
15153 mutex_unlock(&old_mm->context.lock);
15154 }
15155 +
15156 + if (tsk == current) {
15157 + mm->context.vdso = 0;
15158 +
15159 +#ifdef CONFIG_X86_32
15160 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15161 + mm->context.user_cs_base = 0UL;
15162 + mm->context.user_cs_limit = ~0UL;
15163 +
15164 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15165 + cpus_clear(mm->context.cpu_user_cs_mask);
15166 +#endif
15167 +
15168 +#endif
15169 +#endif
15170 +
15171 + }
15172 +
15173 return retval;
15174 }
15175
15176 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
15177 }
15178 }
15179
15180 +#ifdef CONFIG_PAX_SEGMEXEC
15181 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15182 + error = -EINVAL;
15183 + goto out_unlock;
15184 + }
15185 +#endif
15186 +
15187 fill_ldt(&ldt, &ldt_info);
15188 if (oldmode)
15189 ldt.avl = 0;
15190 diff -urNp linux-2.6.32.42/arch/x86/kernel/machine_kexec_32.c linux-2.6.32.42/arch/x86/kernel/machine_kexec_32.c
15191 --- linux-2.6.32.42/arch/x86/kernel/machine_kexec_32.c 2011-03-27 14:31:47.000000000 -0400
15192 +++ linux-2.6.32.42/arch/x86/kernel/machine_kexec_32.c 2011-04-17 15:56:46.000000000 -0400
15193 @@ -26,7 +26,7 @@
15194 #include <asm/system.h>
15195 #include <asm/cacheflush.h>
15196
15197 -static void set_idt(void *newidt, __u16 limit)
15198 +static void set_idt(struct desc_struct *newidt, __u16 limit)
15199 {
15200 struct desc_ptr curidt;
15201
15202 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
15203 }
15204
15205
15206 -static void set_gdt(void *newgdt, __u16 limit)
15207 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15208 {
15209 struct desc_ptr curgdt;
15210
15211 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15212 }
15213
15214 control_page = page_address(image->control_code_page);
15215 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15216 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15217
15218 relocate_kernel_ptr = control_page;
15219 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15220 diff -urNp linux-2.6.32.42/arch/x86/kernel/microcode_amd.c linux-2.6.32.42/arch/x86/kernel/microcode_amd.c
15221 --- linux-2.6.32.42/arch/x86/kernel/microcode_amd.c 2011-04-17 17:00:52.000000000 -0400
15222 +++ linux-2.6.32.42/arch/x86/kernel/microcode_amd.c 2011-04-17 17:03:05.000000000 -0400
15223 @@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int c
15224 uci->mc = NULL;
15225 }
15226
15227 -static struct microcode_ops microcode_amd_ops = {
15228 +static const struct microcode_ops microcode_amd_ops = {
15229 .request_microcode_user = request_microcode_user,
15230 .request_microcode_fw = request_microcode_fw,
15231 .collect_cpu_info = collect_cpu_info_amd,
15232 @@ -372,7 +372,7 @@ static struct microcode_ops microcode_am
15233 .microcode_fini_cpu = microcode_fini_cpu_amd,
15234 };
15235
15236 -struct microcode_ops * __init init_amd_microcode(void)
15237 +const struct microcode_ops * __init init_amd_microcode(void)
15238 {
15239 return &microcode_amd_ops;
15240 }
15241 diff -urNp linux-2.6.32.42/arch/x86/kernel/microcode_core.c linux-2.6.32.42/arch/x86/kernel/microcode_core.c
15242 --- linux-2.6.32.42/arch/x86/kernel/microcode_core.c 2011-03-27 14:31:47.000000000 -0400
15243 +++ linux-2.6.32.42/arch/x86/kernel/microcode_core.c 2011-04-17 15:56:46.000000000 -0400
15244 @@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
15245
15246 #define MICROCODE_VERSION "2.00"
15247
15248 -static struct microcode_ops *microcode_ops;
15249 +static const struct microcode_ops *microcode_ops;
15250
15251 /*
15252 * Synchronization.
15253 diff -urNp linux-2.6.32.42/arch/x86/kernel/microcode_intel.c linux-2.6.32.42/arch/x86/kernel/microcode_intel.c
15254 --- linux-2.6.32.42/arch/x86/kernel/microcode_intel.c 2011-03-27 14:31:47.000000000 -0400
15255 +++ linux-2.6.32.42/arch/x86/kernel/microcode_intel.c 2011-04-17 15:56:46.000000000 -0400
15256 @@ -443,13 +443,13 @@ static enum ucode_state request_microcod
15257
15258 static int get_ucode_user(void *to, const void *from, size_t n)
15259 {
15260 - return copy_from_user(to, from, n);
15261 + return copy_from_user(to, (__force const void __user *)from, n);
15262 }
15263
15264 static enum ucode_state
15265 request_microcode_user(int cpu, const void __user *buf, size_t size)
15266 {
15267 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15268 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
15269 }
15270
15271 static void microcode_fini_cpu(int cpu)
15272 @@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
15273 uci->mc = NULL;
15274 }
15275
15276 -static struct microcode_ops microcode_intel_ops = {
15277 +static const struct microcode_ops microcode_intel_ops = {
15278 .request_microcode_user = request_microcode_user,
15279 .request_microcode_fw = request_microcode_fw,
15280 .collect_cpu_info = collect_cpu_info,
15281 @@ -468,7 +468,7 @@ static struct microcode_ops microcode_in
15282 .microcode_fini_cpu = microcode_fini_cpu,
15283 };
15284
15285 -struct microcode_ops * __init init_intel_microcode(void)
15286 +const struct microcode_ops * __init init_intel_microcode(void)
15287 {
15288 return &microcode_intel_ops;
15289 }
15290 diff -urNp linux-2.6.32.42/arch/x86/kernel/module.c linux-2.6.32.42/arch/x86/kernel/module.c
15291 --- linux-2.6.32.42/arch/x86/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
15292 +++ linux-2.6.32.42/arch/x86/kernel/module.c 2011-04-17 15:56:46.000000000 -0400
15293 @@ -34,7 +34,7 @@
15294 #define DEBUGP(fmt...)
15295 #endif
15296
15297 -void *module_alloc(unsigned long size)
15298 +static void *__module_alloc(unsigned long size, pgprot_t prot)
15299 {
15300 struct vm_struct *area;
15301
15302 @@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
15303 if (!area)
15304 return NULL;
15305
15306 - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
15307 - PAGE_KERNEL_EXEC);
15308 + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
15309 +}
15310 +
15311 +void *module_alloc(unsigned long size)
15312 +{
15313 +
15314 +#ifdef CONFIG_PAX_KERNEXEC
15315 + return __module_alloc(size, PAGE_KERNEL);
15316 +#else
15317 + return __module_alloc(size, PAGE_KERNEL_EXEC);
15318 +#endif
15319 +
15320 }
15321
15322 /* Free memory returned from module_alloc */
15323 @@ -58,6 +68,40 @@ void module_free(struct module *mod, voi
15324 vfree(module_region);
15325 }
15326
15327 +#ifdef CONFIG_PAX_KERNEXEC
15328 +#ifdef CONFIG_X86_32
15329 +void *module_alloc_exec(unsigned long size)
15330 +{
15331 + struct vm_struct *area;
15332 +
15333 + if (size == 0)
15334 + return NULL;
15335 +
15336 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15337 + return area ? area->addr : NULL;
15338 +}
15339 +EXPORT_SYMBOL(module_alloc_exec);
15340 +
15341 +void module_free_exec(struct module *mod, void *module_region)
15342 +{
15343 + vunmap(module_region);
15344 +}
15345 +EXPORT_SYMBOL(module_free_exec);
15346 +#else
15347 +void module_free_exec(struct module *mod, void *module_region)
15348 +{
15349 + module_free(mod, module_region);
15350 +}
15351 +EXPORT_SYMBOL(module_free_exec);
15352 +
15353 +void *module_alloc_exec(unsigned long size)
15354 +{
15355 + return __module_alloc(size, PAGE_KERNEL_RX);
15356 +}
15357 +EXPORT_SYMBOL(module_alloc_exec);
15358 +#endif
15359 +#endif
15360 +
15361 /* We don't need anything special. */
15362 int module_frob_arch_sections(Elf_Ehdr *hdr,
15363 Elf_Shdr *sechdrs,
15364 @@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15365 unsigned int i;
15366 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15367 Elf32_Sym *sym;
15368 - uint32_t *location;
15369 + uint32_t *plocation, location;
15370
15371 DEBUGP("Applying relocate section %u to %u\n", relsec,
15372 sechdrs[relsec].sh_info);
15373 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15374 /* This is where to make the change */
15375 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15376 - + rel[i].r_offset;
15377 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15378 + location = (uint32_t)plocation;
15379 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15380 + plocation = ktla_ktva((void *)plocation);
15381 /* This is the symbol it is referring to. Note that all
15382 undefined symbols have been resolved. */
15383 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15384 @@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15385 switch (ELF32_R_TYPE(rel[i].r_info)) {
15386 case R_386_32:
15387 /* We add the value into the location given */
15388 - *location += sym->st_value;
15389 + pax_open_kernel();
15390 + *plocation += sym->st_value;
15391 + pax_close_kernel();
15392 break;
15393 case R_386_PC32:
15394 /* Add the value, subtract its postition */
15395 - *location += sym->st_value - (uint32_t)location;
15396 + pax_open_kernel();
15397 + *plocation += sym->st_value - location;
15398 + pax_close_kernel();
15399 break;
15400 default:
15401 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15402 @@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
15403 case R_X86_64_NONE:
15404 break;
15405 case R_X86_64_64:
15406 + pax_open_kernel();
15407 *(u64 *)loc = val;
15408 + pax_close_kernel();
15409 break;
15410 case R_X86_64_32:
15411 + pax_open_kernel();
15412 *(u32 *)loc = val;
15413 + pax_close_kernel();
15414 if (val != *(u32 *)loc)
15415 goto overflow;
15416 break;
15417 case R_X86_64_32S:
15418 + pax_open_kernel();
15419 *(s32 *)loc = val;
15420 + pax_close_kernel();
15421 if ((s64)val != *(s32 *)loc)
15422 goto overflow;
15423 break;
15424 case R_X86_64_PC32:
15425 val -= (u64)loc;
15426 + pax_open_kernel();
15427 *(u32 *)loc = val;
15428 + pax_close_kernel();
15429 +
15430 #if 0
15431 if ((s64)val != *(s32 *)loc)
15432 goto overflow;
15433 diff -urNp linux-2.6.32.42/arch/x86/kernel/paravirt.c linux-2.6.32.42/arch/x86/kernel/paravirt.c
15434 --- linux-2.6.32.42/arch/x86/kernel/paravirt.c 2011-03-27 14:31:47.000000000 -0400
15435 +++ linux-2.6.32.42/arch/x86/kernel/paravirt.c 2011-05-16 21:46:57.000000000 -0400
15436 @@ -122,7 +122,7 @@ unsigned paravirt_patch_jmp(void *insnbu
15437 * corresponding structure. */
15438 static void *get_call_destination(u8 type)
15439 {
15440 - struct paravirt_patch_template tmpl = {
15441 + const struct paravirt_patch_template tmpl = {
15442 .pv_init_ops = pv_init_ops,
15443 .pv_time_ops = pv_time_ops,
15444 .pv_cpu_ops = pv_cpu_ops,
15445 @@ -133,6 +133,9 @@ static void *get_call_destination(u8 typ
15446 .pv_lock_ops = pv_lock_ops,
15447 #endif
15448 };
15449 +
15450 + pax_track_stack();
15451 +
15452 return *((void **)&tmpl + type);
15453 }
15454
15455 @@ -145,14 +148,14 @@ unsigned paravirt_patch_default(u8 type,
15456 if (opfunc == NULL)
15457 /* If there's no function, patch it with a ud2a (BUG) */
15458 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
15459 - else if (opfunc == _paravirt_nop)
15460 + else if (opfunc == (void *)_paravirt_nop)
15461 /* If the operation is a nop, then nop the callsite */
15462 ret = paravirt_patch_nop();
15463
15464 /* identity functions just return their single argument */
15465 - else if (opfunc == _paravirt_ident_32)
15466 + else if (opfunc == (void *)_paravirt_ident_32)
15467 ret = paravirt_patch_ident_32(insnbuf, len);
15468 - else if (opfunc == _paravirt_ident_64)
15469 + else if (opfunc == (void *)_paravirt_ident_64)
15470 ret = paravirt_patch_ident_64(insnbuf, len);
15471
15472 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
15473 @@ -178,7 +181,7 @@ unsigned paravirt_patch_insns(void *insn
15474 if (insn_len > len || start == NULL)
15475 insn_len = len;
15476 else
15477 - memcpy(insnbuf, start, insn_len);
15478 + memcpy(insnbuf, ktla_ktva(start), insn_len);
15479
15480 return insn_len;
15481 }
15482 @@ -294,22 +297,22 @@ void arch_flush_lazy_mmu_mode(void)
15483 preempt_enable();
15484 }
15485
15486 -struct pv_info pv_info = {
15487 +struct pv_info pv_info __read_only = {
15488 .name = "bare hardware",
15489 .paravirt_enabled = 0,
15490 .kernel_rpl = 0,
15491 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
15492 };
15493
15494 -struct pv_init_ops pv_init_ops = {
15495 +struct pv_init_ops pv_init_ops __read_only = {
15496 .patch = native_patch,
15497 };
15498
15499 -struct pv_time_ops pv_time_ops = {
15500 +struct pv_time_ops pv_time_ops __read_only = {
15501 .sched_clock = native_sched_clock,
15502 };
15503
15504 -struct pv_irq_ops pv_irq_ops = {
15505 +struct pv_irq_ops pv_irq_ops __read_only = {
15506 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
15507 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
15508 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
15509 @@ -321,7 +324,7 @@ struct pv_irq_ops pv_irq_ops = {
15510 #endif
15511 };
15512
15513 -struct pv_cpu_ops pv_cpu_ops = {
15514 +struct pv_cpu_ops pv_cpu_ops __read_only = {
15515 .cpuid = native_cpuid,
15516 .get_debugreg = native_get_debugreg,
15517 .set_debugreg = native_set_debugreg,
15518 @@ -382,7 +385,7 @@ struct pv_cpu_ops pv_cpu_ops = {
15519 .end_context_switch = paravirt_nop,
15520 };
15521
15522 -struct pv_apic_ops pv_apic_ops = {
15523 +struct pv_apic_ops pv_apic_ops __read_only = {
15524 #ifdef CONFIG_X86_LOCAL_APIC
15525 .startup_ipi_hook = paravirt_nop,
15526 #endif
15527 @@ -396,7 +399,7 @@ struct pv_apic_ops pv_apic_ops = {
15528 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
15529 #endif
15530
15531 -struct pv_mmu_ops pv_mmu_ops = {
15532 +struct pv_mmu_ops pv_mmu_ops __read_only = {
15533
15534 .read_cr2 = native_read_cr2,
15535 .write_cr2 = native_write_cr2,
15536 @@ -467,6 +470,12 @@ struct pv_mmu_ops pv_mmu_ops = {
15537 },
15538
15539 .set_fixmap = native_set_fixmap,
15540 +
15541 +#ifdef CONFIG_PAX_KERNEXEC
15542 + .pax_open_kernel = native_pax_open_kernel,
15543 + .pax_close_kernel = native_pax_close_kernel,
15544 +#endif
15545 +
15546 };
15547
15548 EXPORT_SYMBOL_GPL(pv_time_ops);
15549 diff -urNp linux-2.6.32.42/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.32.42/arch/x86/kernel/paravirt-spinlocks.c
15550 --- linux-2.6.32.42/arch/x86/kernel/paravirt-spinlocks.c 2011-03-27 14:31:47.000000000 -0400
15551 +++ linux-2.6.32.42/arch/x86/kernel/paravirt-spinlocks.c 2011-04-17 15:56:46.000000000 -0400
15552 @@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
15553 __raw_spin_lock(lock);
15554 }
15555
15556 -struct pv_lock_ops pv_lock_ops = {
15557 +struct pv_lock_ops pv_lock_ops __read_only = {
15558 #ifdef CONFIG_SMP
15559 .spin_is_locked = __ticket_spin_is_locked,
15560 .spin_is_contended = __ticket_spin_is_contended,
15561 diff -urNp linux-2.6.32.42/arch/x86/kernel/pci-calgary_64.c linux-2.6.32.42/arch/x86/kernel/pci-calgary_64.c
15562 --- linux-2.6.32.42/arch/x86/kernel/pci-calgary_64.c 2011-03-27 14:31:47.000000000 -0400
15563 +++ linux-2.6.32.42/arch/x86/kernel/pci-calgary_64.c 2011-04-17 15:56:46.000000000 -0400
15564 @@ -477,7 +477,7 @@ static void calgary_free_coherent(struct
15565 free_pages((unsigned long)vaddr, get_order(size));
15566 }
15567
15568 -static struct dma_map_ops calgary_dma_ops = {
15569 +static const struct dma_map_ops calgary_dma_ops = {
15570 .alloc_coherent = calgary_alloc_coherent,
15571 .free_coherent = calgary_free_coherent,
15572 .map_sg = calgary_map_sg,
15573 diff -urNp linux-2.6.32.42/arch/x86/kernel/pci-dma.c linux-2.6.32.42/arch/x86/kernel/pci-dma.c
15574 --- linux-2.6.32.42/arch/x86/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
15575 +++ linux-2.6.32.42/arch/x86/kernel/pci-dma.c 2011-04-17 15:56:46.000000000 -0400
15576 @@ -14,7 +14,7 @@
15577
15578 static int forbid_dac __read_mostly;
15579
15580 -struct dma_map_ops *dma_ops;
15581 +const struct dma_map_ops *dma_ops;
15582 EXPORT_SYMBOL(dma_ops);
15583
15584 static int iommu_sac_force __read_mostly;
15585 @@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
15586
15587 int dma_supported(struct device *dev, u64 mask)
15588 {
15589 - struct dma_map_ops *ops = get_dma_ops(dev);
15590 + const struct dma_map_ops *ops = get_dma_ops(dev);
15591
15592 #ifdef CONFIG_PCI
15593 if (mask > 0xffffffff && forbid_dac > 0) {
15594 diff -urNp linux-2.6.32.42/arch/x86/kernel/pci-gart_64.c linux-2.6.32.42/arch/x86/kernel/pci-gart_64.c
15595 --- linux-2.6.32.42/arch/x86/kernel/pci-gart_64.c 2011-03-27 14:31:47.000000000 -0400
15596 +++ linux-2.6.32.42/arch/x86/kernel/pci-gart_64.c 2011-04-17 15:56:46.000000000 -0400
15597 @@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct ag
15598 return -1;
15599 }
15600
15601 -static struct dma_map_ops gart_dma_ops = {
15602 +static const struct dma_map_ops gart_dma_ops = {
15603 .map_sg = gart_map_sg,
15604 .unmap_sg = gart_unmap_sg,
15605 .map_page = gart_map_page,
15606 diff -urNp linux-2.6.32.42/arch/x86/kernel/pci-nommu.c linux-2.6.32.42/arch/x86/kernel/pci-nommu.c
15607 --- linux-2.6.32.42/arch/x86/kernel/pci-nommu.c 2011-03-27 14:31:47.000000000 -0400
15608 +++ linux-2.6.32.42/arch/x86/kernel/pci-nommu.c 2011-04-17 15:56:46.000000000 -0400
15609 @@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(str
15610 flush_write_buffers();
15611 }
15612
15613 -struct dma_map_ops nommu_dma_ops = {
15614 +const struct dma_map_ops nommu_dma_ops = {
15615 .alloc_coherent = dma_generic_alloc_coherent,
15616 .free_coherent = nommu_free_coherent,
15617 .map_sg = nommu_map_sg,
15618 diff -urNp linux-2.6.32.42/arch/x86/kernel/pci-swiotlb.c linux-2.6.32.42/arch/x86/kernel/pci-swiotlb.c
15619 --- linux-2.6.32.42/arch/x86/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
15620 +++ linux-2.6.32.42/arch/x86/kernel/pci-swiotlb.c 2011-04-17 15:56:46.000000000 -0400
15621 @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
15622 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
15623 }
15624
15625 -static struct dma_map_ops swiotlb_dma_ops = {
15626 +static const struct dma_map_ops swiotlb_dma_ops = {
15627 .mapping_error = swiotlb_dma_mapping_error,
15628 .alloc_coherent = x86_swiotlb_alloc_coherent,
15629 .free_coherent = swiotlb_free_coherent,
15630 diff -urNp linux-2.6.32.42/arch/x86/kernel/process_32.c linux-2.6.32.42/arch/x86/kernel/process_32.c
15631 --- linux-2.6.32.42/arch/x86/kernel/process_32.c 2011-06-25 12:55:34.000000000 -0400
15632 +++ linux-2.6.32.42/arch/x86/kernel/process_32.c 2011-06-25 12:56:37.000000000 -0400
15633 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __as
15634 unsigned long thread_saved_pc(struct task_struct *tsk)
15635 {
15636 return ((unsigned long *)tsk->thread.sp)[3];
15637 +//XXX return tsk->thread.eip;
15638 }
15639
15640 #ifndef CONFIG_SMP
15641 @@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, i
15642 unsigned short ss, gs;
15643 const char *board;
15644
15645 - if (user_mode_vm(regs)) {
15646 + if (user_mode(regs)) {
15647 sp = regs->sp;
15648 ss = regs->ss & 0xffff;
15649 - gs = get_user_gs(regs);
15650 } else {
15651 sp = (unsigned long) (&regs->sp);
15652 savesegment(ss, ss);
15653 - savesegment(gs, gs);
15654 }
15655 + gs = get_user_gs(regs);
15656
15657 printk("\n");
15658
15659 @@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), voi
15660 regs.bx = (unsigned long) fn;
15661 regs.dx = (unsigned long) arg;
15662
15663 - regs.ds = __USER_DS;
15664 - regs.es = __USER_DS;
15665 + regs.ds = __KERNEL_DS;
15666 + regs.es = __KERNEL_DS;
15667 regs.fs = __KERNEL_PERCPU;
15668 - regs.gs = __KERNEL_STACK_CANARY;
15669 + savesegment(gs, regs.gs);
15670 regs.orig_ax = -1;
15671 regs.ip = (unsigned long) kernel_thread_helper;
15672 regs.cs = __KERNEL_CS | get_kernel_rpl();
15673 @@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flag
15674 struct task_struct *tsk;
15675 int err;
15676
15677 - childregs = task_pt_regs(p);
15678 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
15679 *childregs = *regs;
15680 childregs->ax = 0;
15681 childregs->sp = sp;
15682
15683 p->thread.sp = (unsigned long) childregs;
15684 p->thread.sp0 = (unsigned long) (childregs+1);
15685 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15686
15687 p->thread.ip = (unsigned long) ret_from_fork;
15688
15689 @@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p,
15690 struct thread_struct *prev = &prev_p->thread,
15691 *next = &next_p->thread;
15692 int cpu = smp_processor_id();
15693 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
15694 + struct tss_struct *tss = init_tss + cpu;
15695 bool preload_fpu;
15696
15697 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
15698 @@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p,
15699 */
15700 lazy_save_gs(prev->gs);
15701
15702 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15703 + __set_fs(task_thread_info(next_p)->addr_limit);
15704 +#endif
15705 +
15706 /*
15707 * Load the per-thread Thread-Local Storage descriptor.
15708 */
15709 @@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p,
15710 */
15711 arch_end_context_switch(next_p);
15712
15713 + percpu_write(current_task, next_p);
15714 + percpu_write(current_tinfo, &next_p->tinfo);
15715 +
15716 if (preload_fpu)
15717 __math_state_restore();
15718
15719 @@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p,
15720 if (prev->gs | next->gs)
15721 lazy_load_gs(next->gs);
15722
15723 - percpu_write(current_task, next_p);
15724 -
15725 return prev_p;
15726 }
15727
15728 @@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_stru
15729 } while (count++ < 16);
15730 return 0;
15731 }
15732 -
15733 diff -urNp linux-2.6.32.42/arch/x86/kernel/process_64.c linux-2.6.32.42/arch/x86/kernel/process_64.c
15734 --- linux-2.6.32.42/arch/x86/kernel/process_64.c 2011-06-25 12:55:34.000000000 -0400
15735 +++ linux-2.6.32.42/arch/x86/kernel/process_64.c 2011-06-25 12:56:37.000000000 -0400
15736 @@ -91,7 +91,7 @@ static void __exit_idle(void)
15737 void exit_idle(void)
15738 {
15739 /* idle loop has pid 0 */
15740 - if (current->pid)
15741 + if (task_pid_nr(current))
15742 return;
15743 __exit_idle();
15744 }
15745 @@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, i
15746 if (!board)
15747 board = "";
15748 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
15749 - current->pid, current->comm, print_tainted(),
15750 + task_pid_nr(current), current->comm, print_tainted(),
15751 init_utsname()->release,
15752 (int)strcspn(init_utsname()->version, " "),
15753 init_utsname()->version, board);
15754 @@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flag
15755 struct pt_regs *childregs;
15756 struct task_struct *me = current;
15757
15758 - childregs = ((struct pt_regs *)
15759 - (THREAD_SIZE + task_stack_page(p))) - 1;
15760 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
15761 *childregs = *regs;
15762
15763 childregs->ax = 0;
15764 @@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flag
15765 p->thread.sp = (unsigned long) childregs;
15766 p->thread.sp0 = (unsigned long) (childregs+1);
15767 p->thread.usersp = me->thread.usersp;
15768 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15769
15770 set_tsk_thread_flag(p, TIF_FORK);
15771
15772 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p,
15773 struct thread_struct *prev = &prev_p->thread;
15774 struct thread_struct *next = &next_p->thread;
15775 int cpu = smp_processor_id();
15776 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
15777 + struct tss_struct *tss = init_tss + cpu;
15778 unsigned fsindex, gsindex;
15779 bool preload_fpu;
15780
15781 @@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p,
15782 prev->usersp = percpu_read(old_rsp);
15783 percpu_write(old_rsp, next->usersp);
15784 percpu_write(current_task, next_p);
15785 + percpu_write(current_tinfo, &next_p->tinfo);
15786
15787 - percpu_write(kernel_stack,
15788 - (unsigned long)task_stack_page(next_p) +
15789 - THREAD_SIZE - KERNEL_STACK_OFFSET);
15790 + percpu_write(kernel_stack, next->sp0);
15791
15792 /*
15793 * Now maybe reload the debug registers and handle I/O bitmaps
15794 @@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_stru
15795 if (!p || p == current || p->state == TASK_RUNNING)
15796 return 0;
15797 stack = (unsigned long)task_stack_page(p);
15798 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
15799 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
15800 return 0;
15801 fp = *(u64 *)(p->thread.sp);
15802 do {
15803 - if (fp < (unsigned long)stack ||
15804 - fp >= (unsigned long)stack+THREAD_SIZE)
15805 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
15806 return 0;
15807 ip = *(u64 *)(fp+8);
15808 if (!in_sched_functions(ip))
15809 diff -urNp linux-2.6.32.42/arch/x86/kernel/process.c linux-2.6.32.42/arch/x86/kernel/process.c
15810 --- linux-2.6.32.42/arch/x86/kernel/process.c 2011-04-22 19:16:29.000000000 -0400
15811 +++ linux-2.6.32.42/arch/x86/kernel/process.c 2011-05-22 23:02:03.000000000 -0400
15812 @@ -51,16 +51,33 @@ void free_thread_xstate(struct task_stru
15813
15814 void free_thread_info(struct thread_info *ti)
15815 {
15816 - free_thread_xstate(ti->task);
15817 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
15818 }
15819
15820 +static struct kmem_cache *task_struct_cachep;
15821 +
15822 void arch_task_cache_init(void)
15823 {
15824 - task_xstate_cachep =
15825 - kmem_cache_create("task_xstate", xstate_size,
15826 + /* create a slab on which task_structs can be allocated */
15827 + task_struct_cachep =
15828 + kmem_cache_create("task_struct", sizeof(struct task_struct),
15829 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
15830 +
15831 + task_xstate_cachep =
15832 + kmem_cache_create("task_xstate", xstate_size,
15833 __alignof__(union thread_xstate),
15834 - SLAB_PANIC | SLAB_NOTRACK, NULL);
15835 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
15836 +}
15837 +
15838 +struct task_struct *alloc_task_struct(void)
15839 +{
15840 + return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
15841 +}
15842 +
15843 +void free_task_struct(struct task_struct *task)
15844 +{
15845 + free_thread_xstate(task);
15846 + kmem_cache_free(task_struct_cachep, task);
15847 }
15848
15849 /*
15850 @@ -73,7 +90,7 @@ void exit_thread(void)
15851 unsigned long *bp = t->io_bitmap_ptr;
15852
15853 if (bp) {
15854 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
15855 + struct tss_struct *tss = init_tss + get_cpu();
15856
15857 t->io_bitmap_ptr = NULL;
15858 clear_thread_flag(TIF_IO_BITMAP);
15859 @@ -93,6 +110,9 @@ void flush_thread(void)
15860
15861 clear_tsk_thread_flag(tsk, TIF_DEBUG);
15862
15863 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
15864 + loadsegment(gs, 0);
15865 +#endif
15866 tsk->thread.debugreg0 = 0;
15867 tsk->thread.debugreg1 = 0;
15868 tsk->thread.debugreg2 = 0;
15869 @@ -307,7 +327,7 @@ void default_idle(void)
15870 EXPORT_SYMBOL(default_idle);
15871 #endif
15872
15873 -void stop_this_cpu(void *dummy)
15874 +__noreturn void stop_this_cpu(void *dummy)
15875 {
15876 local_irq_disable();
15877 /*
15878 @@ -568,16 +588,35 @@ static int __init idle_setup(char *str)
15879 }
15880 early_param("idle", idle_setup);
15881
15882 -unsigned long arch_align_stack(unsigned long sp)
15883 +#ifdef CONFIG_PAX_RANDKSTACK
15884 +asmlinkage void pax_randomize_kstack(void)
15885 {
15886 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
15887 - sp -= get_random_int() % 8192;
15888 - return sp & ~0xf;
15889 -}
15890 + struct thread_struct *thread = &current->thread;
15891 + unsigned long time;
15892
15893 -unsigned long arch_randomize_brk(struct mm_struct *mm)
15894 -{
15895 - unsigned long range_end = mm->brk + 0x02000000;
15896 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
15897 + if (!randomize_va_space)
15898 + return;
15899 +
15900 + rdtscl(time);
15901 +
15902 + /* P4 seems to return a 0 LSB, ignore it */
15903 +#ifdef CONFIG_MPENTIUM4
15904 + time &= 0x3EUL;
15905 + time <<= 2;
15906 +#elif defined(CONFIG_X86_64)
15907 + time &= 0xFUL;
15908 + time <<= 4;
15909 +#else
15910 + time &= 0x1FUL;
15911 + time <<= 3;
15912 +#endif
15913 +
15914 + thread->sp0 ^= time;
15915 + load_sp0(init_tss + smp_processor_id(), thread);
15916 +
15917 +#ifdef CONFIG_X86_64
15918 + percpu_write(kernel_stack, thread->sp0);
15919 +#endif
15920 }
15921 +#endif
15922
15923 diff -urNp linux-2.6.32.42/arch/x86/kernel/ptrace.c linux-2.6.32.42/arch/x86/kernel/ptrace.c
15924 --- linux-2.6.32.42/arch/x86/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
15925 +++ linux-2.6.32.42/arch/x86/kernel/ptrace.c 2011-04-17 15:56:46.000000000 -0400
15926 @@ -925,7 +925,7 @@ static const struct user_regset_view use
15927 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
15928 {
15929 int ret;
15930 - unsigned long __user *datap = (unsigned long __user *)data;
15931 + unsigned long __user *datap = (__force unsigned long __user *)data;
15932
15933 switch (request) {
15934 /* read the word at location addr in the USER area. */
15935 @@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *chi
15936 if (addr < 0)
15937 return -EIO;
15938 ret = do_get_thread_area(child, addr,
15939 - (struct user_desc __user *) data);
15940 + (__force struct user_desc __user *) data);
15941 break;
15942
15943 case PTRACE_SET_THREAD_AREA:
15944 if (addr < 0)
15945 return -EIO;
15946 ret = do_set_thread_area(child, addr,
15947 - (struct user_desc __user *) data, 0);
15948 + (__force struct user_desc __user *) data, 0);
15949 break;
15950 #endif
15951
15952 @@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *chi
15953 #ifdef CONFIG_X86_PTRACE_BTS
15954 case PTRACE_BTS_CONFIG:
15955 ret = ptrace_bts_config
15956 - (child, data, (struct ptrace_bts_config __user *)addr);
15957 + (child, data, (__force struct ptrace_bts_config __user *)addr);
15958 break;
15959
15960 case PTRACE_BTS_STATUS:
15961 ret = ptrace_bts_status
15962 - (child, data, (struct ptrace_bts_config __user *)addr);
15963 + (child, data, (__force struct ptrace_bts_config __user *)addr);
15964 break;
15965
15966 case PTRACE_BTS_SIZE:
15967 @@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *chi
15968
15969 case PTRACE_BTS_GET:
15970 ret = ptrace_bts_read_record
15971 - (child, data, (struct bts_struct __user *) addr);
15972 + (child, data, (__force struct bts_struct __user *) addr);
15973 break;
15974
15975 case PTRACE_BTS_CLEAR:
15976 @@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *chi
15977
15978 case PTRACE_BTS_DRAIN:
15979 ret = ptrace_bts_drain
15980 - (child, data, (struct bts_struct __user *) addr);
15981 + (child, data, (__force struct bts_struct __user *) addr);
15982 break;
15983 #endif /* CONFIG_X86_PTRACE_BTS */
15984
15985 @@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *ts
15986 info.si_code = si_code;
15987
15988 /* User-mode ip? */
15989 - info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
15990 + info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
15991
15992 /* Send us the fake SIGTRAP */
15993 force_sig_info(SIGTRAP, &info, tsk);
15994 @@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *ts
15995 * We must return the syscall number to actually look up in the table.
15996 * This can be -1L to skip running any syscall at all.
15997 */
15998 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
15999 +long syscall_trace_enter(struct pt_regs *regs)
16000 {
16001 long ret = 0;
16002
16003 @@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(stru
16004 return ret ?: regs->orig_ax;
16005 }
16006
16007 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
16008 +void syscall_trace_leave(struct pt_regs *regs)
16009 {
16010 if (unlikely(current->audit_context))
16011 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
16012 diff -urNp linux-2.6.32.42/arch/x86/kernel/reboot.c linux-2.6.32.42/arch/x86/kernel/reboot.c
16013 --- linux-2.6.32.42/arch/x86/kernel/reboot.c 2011-03-27 14:31:47.000000000 -0400
16014 +++ linux-2.6.32.42/arch/x86/kernel/reboot.c 2011-05-22 23:02:03.000000000 -0400
16015 @@ -33,7 +33,7 @@ void (*pm_power_off)(void);
16016 EXPORT_SYMBOL(pm_power_off);
16017
16018 static const struct desc_ptr no_idt = {};
16019 -static int reboot_mode;
16020 +static unsigned short reboot_mode;
16021 enum reboot_type reboot_type = BOOT_KBD;
16022 int reboot_force;
16023
16024 @@ -292,12 +292,12 @@ core_initcall(reboot_init);
16025 controller to pulse the CPU reset line, which is more thorough, but
16026 doesn't work with at least one type of 486 motherboard. It is easy
16027 to stop this code working; hence the copious comments. */
16028 -static const unsigned long long
16029 -real_mode_gdt_entries [3] =
16030 +static struct desc_struct
16031 +real_mode_gdt_entries [3] __read_only =
16032 {
16033 - 0x0000000000000000ULL, /* Null descriptor */
16034 - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
16035 - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
16036 + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
16037 + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
16038 + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
16039 };
16040
16041 static const struct desc_ptr
16042 @@ -346,7 +346,7 @@ static const unsigned char jump_to_bios
16043 * specified by the code and length parameters.
16044 * We assume that length will aways be less that 100!
16045 */
16046 -void machine_real_restart(const unsigned char *code, int length)
16047 +__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
16048 {
16049 local_irq_disable();
16050
16051 @@ -366,8 +366,8 @@ void machine_real_restart(const unsigned
16052 /* Remap the kernel at virtual address zero, as well as offset zero
16053 from the kernel segment. This assumes the kernel segment starts at
16054 virtual address PAGE_OFFSET. */
16055 - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16056 - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
16057 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16058 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
16059
16060 /*
16061 * Use `swapper_pg_dir' as our page directory.
16062 @@ -379,16 +379,15 @@ void machine_real_restart(const unsigned
16063 boot)". This seems like a fairly standard thing that gets set by
16064 REBOOT.COM programs, and the previous reset routine did this
16065 too. */
16066 - *((unsigned short *)0x472) = reboot_mode;
16067 + *(unsigned short *)(__va(0x472)) = reboot_mode;
16068
16069 /* For the switch to real mode, copy some code to low memory. It has
16070 to be in the first 64k because it is running in 16-bit mode, and it
16071 has to have the same physical and virtual address, because it turns
16072 off paging. Copy it near the end of the first page, out of the way
16073 of BIOS variables. */
16074 - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
16075 - real_mode_switch, sizeof (real_mode_switch));
16076 - memcpy((void *)(0x1000 - 100), code, length);
16077 + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
16078 + memcpy(__va(0x1000 - 100), code, length);
16079
16080 /* Set up the IDT for real mode. */
16081 load_idt(&real_mode_idt);
16082 @@ -416,6 +415,7 @@ void machine_real_restart(const unsigned
16083 __asm__ __volatile__ ("ljmp $0x0008,%0"
16084 :
16085 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
16086 + do { } while (1);
16087 }
16088 #ifdef CONFIG_APM_MODULE
16089 EXPORT_SYMBOL(machine_real_restart);
16090 @@ -536,7 +536,7 @@ void __attribute__((weak)) mach_reboot_f
16091 {
16092 }
16093
16094 -static void native_machine_emergency_restart(void)
16095 +__noreturn static void native_machine_emergency_restart(void)
16096 {
16097 int i;
16098
16099 @@ -651,13 +651,13 @@ void native_machine_shutdown(void)
16100 #endif
16101 }
16102
16103 -static void __machine_emergency_restart(int emergency)
16104 +static __noreturn void __machine_emergency_restart(int emergency)
16105 {
16106 reboot_emergency = emergency;
16107 machine_ops.emergency_restart();
16108 }
16109
16110 -static void native_machine_restart(char *__unused)
16111 +static __noreturn void native_machine_restart(char *__unused)
16112 {
16113 printk("machine restart\n");
16114
16115 @@ -666,7 +666,7 @@ static void native_machine_restart(char
16116 __machine_emergency_restart(0);
16117 }
16118
16119 -static void native_machine_halt(void)
16120 +static __noreturn void native_machine_halt(void)
16121 {
16122 /* stop other cpus and apics */
16123 machine_shutdown();
16124 @@ -677,7 +677,7 @@ static void native_machine_halt(void)
16125 stop_this_cpu(NULL);
16126 }
16127
16128 -static void native_machine_power_off(void)
16129 +__noreturn static void native_machine_power_off(void)
16130 {
16131 if (pm_power_off) {
16132 if (!reboot_force)
16133 @@ -686,6 +686,7 @@ static void native_machine_power_off(voi
16134 }
16135 /* a fallback in case there is no PM info available */
16136 tboot_shutdown(TB_SHUTDOWN_HALT);
16137 + do { } while (1);
16138 }
16139
16140 struct machine_ops machine_ops = {
16141 diff -urNp linux-2.6.32.42/arch/x86/kernel/setup.c linux-2.6.32.42/arch/x86/kernel/setup.c
16142 --- linux-2.6.32.42/arch/x86/kernel/setup.c 2011-04-17 17:00:52.000000000 -0400
16143 +++ linux-2.6.32.42/arch/x86/kernel/setup.c 2011-04-17 17:03:05.000000000 -0400
16144 @@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
16145
16146 if (!boot_params.hdr.root_flags)
16147 root_mountflags &= ~MS_RDONLY;
16148 - init_mm.start_code = (unsigned long) _text;
16149 - init_mm.end_code = (unsigned long) _etext;
16150 + init_mm.start_code = ktla_ktva((unsigned long) _text);
16151 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
16152 init_mm.end_data = (unsigned long) _edata;
16153 init_mm.brk = _brk_end;
16154
16155 - code_resource.start = virt_to_phys(_text);
16156 - code_resource.end = virt_to_phys(_etext)-1;
16157 - data_resource.start = virt_to_phys(_etext);
16158 + code_resource.start = virt_to_phys(ktla_ktva(_text));
16159 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16160 + data_resource.start = virt_to_phys(_sdata);
16161 data_resource.end = virt_to_phys(_edata)-1;
16162 bss_resource.start = virt_to_phys(&__bss_start);
16163 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16164 diff -urNp linux-2.6.32.42/arch/x86/kernel/setup_percpu.c linux-2.6.32.42/arch/x86/kernel/setup_percpu.c
16165 --- linux-2.6.32.42/arch/x86/kernel/setup_percpu.c 2011-03-27 14:31:47.000000000 -0400
16166 +++ linux-2.6.32.42/arch/x86/kernel/setup_percpu.c 2011-06-04 20:36:29.000000000 -0400
16167 @@ -25,19 +25,17 @@
16168 # define DBG(x...)
16169 #endif
16170
16171 -DEFINE_PER_CPU(int, cpu_number);
16172 +#ifdef CONFIG_SMP
16173 +DEFINE_PER_CPU(unsigned int, cpu_number);
16174 EXPORT_PER_CPU_SYMBOL(cpu_number);
16175 +#endif
16176
16177 -#ifdef CONFIG_X86_64
16178 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16179 -#else
16180 -#define BOOT_PERCPU_OFFSET 0
16181 -#endif
16182
16183 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16184 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16185
16186 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16187 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16188 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16189 };
16190 EXPORT_SYMBOL(__per_cpu_offset);
16191 @@ -159,10 +157,10 @@ static inline void setup_percpu_segment(
16192 {
16193 #ifdef CONFIG_X86_32
16194 struct desc_struct gdt;
16195 + unsigned long base = per_cpu_offset(cpu);
16196
16197 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16198 - 0x2 | DESCTYPE_S, 0x8);
16199 - gdt.s = 1;
16200 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16201 + 0x83 | DESCTYPE_S, 0xC);
16202 write_gdt_entry(get_cpu_gdt_table(cpu),
16203 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16204 #endif
16205 @@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
16206 /* alrighty, percpu areas up and running */
16207 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16208 for_each_possible_cpu(cpu) {
16209 +#ifdef CONFIG_CC_STACKPROTECTOR
16210 +#ifdef CONFIG_X86_32
16211 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
16212 +#endif
16213 +#endif
16214 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16215 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16216 per_cpu(cpu_number, cpu) = cpu;
16217 @@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
16218 early_per_cpu_map(x86_cpu_to_node_map, cpu);
16219 #endif
16220 #endif
16221 +#ifdef CONFIG_CC_STACKPROTECTOR
16222 +#ifdef CONFIG_X86_32
16223 + if (!cpu)
16224 + per_cpu(stack_canary.canary, cpu) = canary;
16225 +#endif
16226 +#endif
16227 /*
16228 * Up to this point, the boot CPU has been using .data.init
16229 * area. Reload any changed state for the boot CPU.
16230 diff -urNp linux-2.6.32.42/arch/x86/kernel/signal.c linux-2.6.32.42/arch/x86/kernel/signal.c
16231 --- linux-2.6.32.42/arch/x86/kernel/signal.c 2011-03-27 14:31:47.000000000 -0400
16232 +++ linux-2.6.32.42/arch/x86/kernel/signal.c 2011-05-22 23:02:03.000000000 -0400
16233 @@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsi
16234 * Align the stack pointer according to the i386 ABI,
16235 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16236 */
16237 - sp = ((sp + 4) & -16ul) - 4;
16238 + sp = ((sp - 12) & -16ul) - 4;
16239 #else /* !CONFIG_X86_32 */
16240 sp = round_down(sp, 16) - 8;
16241 #endif
16242 @@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, str
16243 * Return an always-bogus address instead so we will die with SIGSEGV.
16244 */
16245 if (onsigstack && !likely(on_sig_stack(sp)))
16246 - return (void __user *)-1L;
16247 + return (__force void __user *)-1L;
16248
16249 /* save i387 state */
16250 if (used_math() && save_i387_xstate(*fpstate) < 0)
16251 - return (void __user *)-1L;
16252 + return (__force void __user *)-1L;
16253
16254 return (void __user *)sp;
16255 }
16256 @@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigactio
16257 }
16258
16259 if (current->mm->context.vdso)
16260 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16261 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16262 else
16263 - restorer = &frame->retcode;
16264 + restorer = (void __user *)&frame->retcode;
16265 if (ka->sa.sa_flags & SA_RESTORER)
16266 restorer = ka->sa.sa_restorer;
16267
16268 @@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigactio
16269 * reasons and because gdb uses it as a signature to notice
16270 * signal handler stack frames.
16271 */
16272 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16273 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16274
16275 if (err)
16276 return -EFAULT;
16277 @@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, str
16278 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16279
16280 /* Set up to return from userspace. */
16281 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16282 + if (current->mm->context.vdso)
16283 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16284 + else
16285 + restorer = (void __user *)&frame->retcode;
16286 if (ka->sa.sa_flags & SA_RESTORER)
16287 restorer = ka->sa.sa_restorer;
16288 put_user_ex(restorer, &frame->pretcode);
16289 @@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, str
16290 * reasons and because gdb uses it as a signature to notice
16291 * signal handler stack frames.
16292 */
16293 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16294 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16295 } put_user_catch(err);
16296
16297 if (err)
16298 @@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *re
16299 int signr;
16300 sigset_t *oldset;
16301
16302 + pax_track_stack();
16303 +
16304 /*
16305 * We want the common case to go fast, which is why we may in certain
16306 * cases get here from kernel mode. Just return without doing anything
16307 @@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *re
16308 * X86_32: vm86 regs switched out by assembly code before reaching
16309 * here, so testing against kernel CS suffices.
16310 */
16311 - if (!user_mode(regs))
16312 + if (!user_mode_novm(regs))
16313 return;
16314
16315 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
16316 diff -urNp linux-2.6.32.42/arch/x86/kernel/smpboot.c linux-2.6.32.42/arch/x86/kernel/smpboot.c
16317 --- linux-2.6.32.42/arch/x86/kernel/smpboot.c 2011-03-27 14:31:47.000000000 -0400
16318 +++ linux-2.6.32.42/arch/x86/kernel/smpboot.c 2011-05-11 18:25:15.000000000 -0400
16319 @@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct
16320 */
16321 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
16322
16323 -void cpu_hotplug_driver_lock()
16324 +void cpu_hotplug_driver_lock(void)
16325 {
16326 - mutex_lock(&x86_cpu_hotplug_driver_mutex);
16327 + mutex_lock(&x86_cpu_hotplug_driver_mutex);
16328 }
16329
16330 -void cpu_hotplug_driver_unlock()
16331 +void cpu_hotplug_driver_unlock(void)
16332 {
16333 - mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16334 + mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16335 }
16336
16337 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
16338 @@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int api
16339 set_idle_for_cpu(cpu, c_idle.idle);
16340 do_rest:
16341 per_cpu(current_task, cpu) = c_idle.idle;
16342 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16343 #ifdef CONFIG_X86_32
16344 /* Stack for startup_32 can be just as for start_secondary onwards */
16345 irq_ctx_init(cpu);
16346 @@ -750,11 +751,13 @@ do_rest:
16347 #else
16348 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16349 initial_gs = per_cpu_offset(cpu);
16350 - per_cpu(kernel_stack, cpu) =
16351 - (unsigned long)task_stack_page(c_idle.idle) -
16352 - KERNEL_STACK_OFFSET + THREAD_SIZE;
16353 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16354 #endif
16355 +
16356 + pax_open_kernel();
16357 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16358 + pax_close_kernel();
16359 +
16360 initial_code = (unsigned long)start_secondary;
16361 stack_start.sp = (void *) c_idle.idle->thread.sp;
16362
16363 @@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int
16364
16365 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16366
16367 +#ifdef CONFIG_PAX_PER_CPU_PGD
16368 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16369 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16370 + KERNEL_PGD_PTRS);
16371 +#endif
16372 +
16373 err = do_boot_cpu(apicid, cpu);
16374
16375 if (err) {
16376 diff -urNp linux-2.6.32.42/arch/x86/kernel/step.c linux-2.6.32.42/arch/x86/kernel/step.c
16377 --- linux-2.6.32.42/arch/x86/kernel/step.c 2011-03-27 14:31:47.000000000 -0400
16378 +++ linux-2.6.32.42/arch/x86/kernel/step.c 2011-04-17 15:56:46.000000000 -0400
16379 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
16380 struct desc_struct *desc;
16381 unsigned long base;
16382
16383 - seg &= ~7UL;
16384 + seg >>= 3;
16385
16386 mutex_lock(&child->mm->context.lock);
16387 - if (unlikely((seg >> 3) >= child->mm->context.size))
16388 + if (unlikely(seg >= child->mm->context.size))
16389 addr = -1L; /* bogus selector, access would fault */
16390 else {
16391 desc = child->mm->context.ldt + seg;
16392 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
16393 addr += base;
16394 }
16395 mutex_unlock(&child->mm->context.lock);
16396 - }
16397 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16398 + addr = ktla_ktva(addr);
16399
16400 return addr;
16401 }
16402 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
16403 unsigned char opcode[15];
16404 unsigned long addr = convert_ip_to_linear(child, regs);
16405
16406 + if (addr == -EINVAL)
16407 + return 0;
16408 +
16409 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16410 for (i = 0; i < copied; i++) {
16411 switch (opcode[i]) {
16412 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
16413
16414 #ifdef CONFIG_X86_64
16415 case 0x40 ... 0x4f:
16416 - if (regs->cs != __USER_CS)
16417 + if ((regs->cs & 0xffff) != __USER_CS)
16418 /* 32-bit mode: register increment */
16419 return 0;
16420 /* 64-bit mode: REX prefix */
16421 diff -urNp linux-2.6.32.42/arch/x86/kernel/syscall_table_32.S linux-2.6.32.42/arch/x86/kernel/syscall_table_32.S
16422 --- linux-2.6.32.42/arch/x86/kernel/syscall_table_32.S 2011-03-27 14:31:47.000000000 -0400
16423 +++ linux-2.6.32.42/arch/x86/kernel/syscall_table_32.S 2011-04-17 15:56:46.000000000 -0400
16424 @@ -1,3 +1,4 @@
16425 +.section .rodata,"a",@progbits
16426 ENTRY(sys_call_table)
16427 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
16428 .long sys_exit
16429 diff -urNp linux-2.6.32.42/arch/x86/kernel/sys_i386_32.c linux-2.6.32.42/arch/x86/kernel/sys_i386_32.c
16430 --- linux-2.6.32.42/arch/x86/kernel/sys_i386_32.c 2011-03-27 14:31:47.000000000 -0400
16431 +++ linux-2.6.32.42/arch/x86/kernel/sys_i386_32.c 2011-04-17 15:56:46.000000000 -0400
16432 @@ -24,6 +24,21 @@
16433
16434 #include <asm/syscalls.h>
16435
16436 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
16437 +{
16438 + unsigned long pax_task_size = TASK_SIZE;
16439 +
16440 +#ifdef CONFIG_PAX_SEGMEXEC
16441 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
16442 + pax_task_size = SEGMEXEC_TASK_SIZE;
16443 +#endif
16444 +
16445 + if (len > pax_task_size || addr > pax_task_size - len)
16446 + return -EINVAL;
16447 +
16448 + return 0;
16449 +}
16450 +
16451 /*
16452 * Perform the select(nd, in, out, ex, tv) and mmap() system
16453 * calls. Linux/i386 didn't use to be able to handle more than
16454 @@ -58,6 +73,212 @@ out:
16455 return err;
16456 }
16457
16458 +unsigned long
16459 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
16460 + unsigned long len, unsigned long pgoff, unsigned long flags)
16461 +{
16462 + struct mm_struct *mm = current->mm;
16463 + struct vm_area_struct *vma;
16464 + unsigned long start_addr, pax_task_size = TASK_SIZE;
16465 +
16466 +#ifdef CONFIG_PAX_SEGMEXEC
16467 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16468 + pax_task_size = SEGMEXEC_TASK_SIZE;
16469 +#endif
16470 +
16471 + pax_task_size -= PAGE_SIZE;
16472 +
16473 + if (len > pax_task_size)
16474 + return -ENOMEM;
16475 +
16476 + if (flags & MAP_FIXED)
16477 + return addr;
16478 +
16479 +#ifdef CONFIG_PAX_RANDMMAP
16480 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16481 +#endif
16482 +
16483 + if (addr) {
16484 + addr = PAGE_ALIGN(addr);
16485 + if (pax_task_size - len >= addr) {
16486 + vma = find_vma(mm, addr);
16487 + if (check_heap_stack_gap(vma, addr, len))
16488 + return addr;
16489 + }
16490 + }
16491 + if (len > mm->cached_hole_size) {
16492 + start_addr = addr = mm->free_area_cache;
16493 + } else {
16494 + start_addr = addr = mm->mmap_base;
16495 + mm->cached_hole_size = 0;
16496 + }
16497 +
16498 +#ifdef CONFIG_PAX_PAGEEXEC
16499 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
16500 + start_addr = 0x00110000UL;
16501 +
16502 +#ifdef CONFIG_PAX_RANDMMAP
16503 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16504 + start_addr += mm->delta_mmap & 0x03FFF000UL;
16505 +#endif
16506 +
16507 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
16508 + start_addr = addr = mm->mmap_base;
16509 + else
16510 + addr = start_addr;
16511 + }
16512 +#endif
16513 +
16514 +full_search:
16515 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
16516 + /* At this point: (!vma || addr < vma->vm_end). */
16517 + if (pax_task_size - len < addr) {
16518 + /*
16519 + * Start a new search - just in case we missed
16520 + * some holes.
16521 + */
16522 + if (start_addr != mm->mmap_base) {
16523 + start_addr = addr = mm->mmap_base;
16524 + mm->cached_hole_size = 0;
16525 + goto full_search;
16526 + }
16527 + return -ENOMEM;
16528 + }
16529 + if (check_heap_stack_gap(vma, addr, len))
16530 + break;
16531 + if (addr + mm->cached_hole_size < vma->vm_start)
16532 + mm->cached_hole_size = vma->vm_start - addr;
16533 + addr = vma->vm_end;
16534 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
16535 + start_addr = addr = mm->mmap_base;
16536 + mm->cached_hole_size = 0;
16537 + goto full_search;
16538 + }
16539 + }
16540 +
16541 + /*
16542 + * Remember the place where we stopped the search:
16543 + */
16544 + mm->free_area_cache = addr + len;
16545 + return addr;
16546 +}
16547 +
16548 +unsigned long
16549 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16550 + const unsigned long len, const unsigned long pgoff,
16551 + const unsigned long flags)
16552 +{
16553 + struct vm_area_struct *vma;
16554 + struct mm_struct *mm = current->mm;
16555 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
16556 +
16557 +#ifdef CONFIG_PAX_SEGMEXEC
16558 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16559 + pax_task_size = SEGMEXEC_TASK_SIZE;
16560 +#endif
16561 +
16562 + pax_task_size -= PAGE_SIZE;
16563 +
16564 + /* requested length too big for entire address space */
16565 + if (len > pax_task_size)
16566 + return -ENOMEM;
16567 +
16568 + if (flags & MAP_FIXED)
16569 + return addr;
16570 +
16571 +#ifdef CONFIG_PAX_PAGEEXEC
16572 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
16573 + goto bottomup;
16574 +#endif
16575 +
16576 +#ifdef CONFIG_PAX_RANDMMAP
16577 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16578 +#endif
16579 +
16580 + /* requesting a specific address */
16581 + if (addr) {
16582 + addr = PAGE_ALIGN(addr);
16583 + if (pax_task_size - len >= addr) {
16584 + vma = find_vma(mm, addr);
16585 + if (check_heap_stack_gap(vma, addr, len))
16586 + return addr;
16587 + }
16588 + }
16589 +
16590 + /* check if free_area_cache is useful for us */
16591 + if (len <= mm->cached_hole_size) {
16592 + mm->cached_hole_size = 0;
16593 + mm->free_area_cache = mm->mmap_base;
16594 + }
16595 +
16596 + /* either no address requested or can't fit in requested address hole */
16597 + addr = mm->free_area_cache;
16598 +
16599 + /* make sure it can fit in the remaining address space */
16600 + if (addr > len) {
16601 + vma = find_vma(mm, addr-len);
16602 + if (check_heap_stack_gap(vma, addr - len, len))
16603 + /* remember the address as a hint for next time */
16604 + return (mm->free_area_cache = addr-len);
16605 + }
16606 +
16607 + if (mm->mmap_base < len)
16608 + goto bottomup;
16609 +
16610 + addr = mm->mmap_base-len;
16611 +
16612 + do {
16613 + /*
16614 + * Lookup failure means no vma is above this address,
16615 + * else if new region fits below vma->vm_start,
16616 + * return with success:
16617 + */
16618 + vma = find_vma(mm, addr);
16619 + if (check_heap_stack_gap(vma, addr, len))
16620 + /* remember the address as a hint for next time */
16621 + return (mm->free_area_cache = addr);
16622 +
16623 + /* remember the largest hole we saw so far */
16624 + if (addr + mm->cached_hole_size < vma->vm_start)
16625 + mm->cached_hole_size = vma->vm_start - addr;
16626 +
16627 + /* try just below the current vma->vm_start */
16628 + addr = skip_heap_stack_gap(vma, len);
16629 + } while (!IS_ERR_VALUE(addr));
16630 +
16631 +bottomup:
16632 + /*
16633 + * A failed mmap() very likely causes application failure,
16634 + * so fall back to the bottom-up function here. This scenario
16635 + * can happen with large stack limits and large mmap()
16636 + * allocations.
16637 + */
16638 +
16639 +#ifdef CONFIG_PAX_SEGMEXEC
16640 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16641 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
16642 + else
16643 +#endif
16644 +
16645 + mm->mmap_base = TASK_UNMAPPED_BASE;
16646 +
16647 +#ifdef CONFIG_PAX_RANDMMAP
16648 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16649 + mm->mmap_base += mm->delta_mmap;
16650 +#endif
16651 +
16652 + mm->free_area_cache = mm->mmap_base;
16653 + mm->cached_hole_size = ~0UL;
16654 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16655 + /*
16656 + * Restore the topdown base:
16657 + */
16658 + mm->mmap_base = base;
16659 + mm->free_area_cache = base;
16660 + mm->cached_hole_size = ~0UL;
16661 +
16662 + return addr;
16663 +}
16664
16665 struct sel_arg_struct {
16666 unsigned long n;
16667 @@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int fi
16668 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
16669 case SEMTIMEDOP:
16670 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
16671 - (const struct timespec __user *)fifth);
16672 + (__force const struct timespec __user *)fifth);
16673
16674 case SEMGET:
16675 return sys_semget(first, second, third);
16676 @@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int fi
16677 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
16678 if (ret)
16679 return ret;
16680 - return put_user(raddr, (ulong __user *) third);
16681 + return put_user(raddr, (__force ulong __user *) third);
16682 }
16683 case 1: /* iBCS2 emulator entry point */
16684 if (!segment_eq(get_fs(), get_ds()))
16685 @@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldol
16686
16687 return error;
16688 }
16689 -
16690 -
16691 -/*
16692 - * Do a system call from kernel instead of calling sys_execve so we
16693 - * end up with proper pt_regs.
16694 - */
16695 -int kernel_execve(const char *filename, char *const argv[], char *const envp[])
16696 -{
16697 - long __res;
16698 - asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
16699 - : "=a" (__res)
16700 - : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
16701 - return __res;
16702 -}
16703 diff -urNp linux-2.6.32.42/arch/x86/kernel/sys_x86_64.c linux-2.6.32.42/arch/x86/kernel/sys_x86_64.c
16704 --- linux-2.6.32.42/arch/x86/kernel/sys_x86_64.c 2011-03-27 14:31:47.000000000 -0400
16705 +++ linux-2.6.32.42/arch/x86/kernel/sys_x86_64.c 2011-04-17 15:56:46.000000000 -0400
16706 @@ -32,8 +32,8 @@ out:
16707 return error;
16708 }
16709
16710 -static void find_start_end(unsigned long flags, unsigned long *begin,
16711 - unsigned long *end)
16712 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
16713 + unsigned long *begin, unsigned long *end)
16714 {
16715 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
16716 unsigned long new_begin;
16717 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
16718 *begin = new_begin;
16719 }
16720 } else {
16721 - *begin = TASK_UNMAPPED_BASE;
16722 + *begin = mm->mmap_base;
16723 *end = TASK_SIZE;
16724 }
16725 }
16726 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
16727 if (flags & MAP_FIXED)
16728 return addr;
16729
16730 - find_start_end(flags, &begin, &end);
16731 + find_start_end(mm, flags, &begin, &end);
16732
16733 if (len > end)
16734 return -ENOMEM;
16735
16736 +#ifdef CONFIG_PAX_RANDMMAP
16737 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16738 +#endif
16739 +
16740 if (addr) {
16741 addr = PAGE_ALIGN(addr);
16742 vma = find_vma(mm, addr);
16743 - if (end - len >= addr &&
16744 - (!vma || addr + len <= vma->vm_start))
16745 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
16746 return addr;
16747 }
16748 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
16749 @@ -106,7 +109,7 @@ full_search:
16750 }
16751 return -ENOMEM;
16752 }
16753 - if (!vma || addr + len <= vma->vm_start) {
16754 + if (check_heap_stack_gap(vma, addr, len)) {
16755 /*
16756 * Remember the place where we stopped the search:
16757 */
16758 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
16759 {
16760 struct vm_area_struct *vma;
16761 struct mm_struct *mm = current->mm;
16762 - unsigned long addr = addr0;
16763 + unsigned long base = mm->mmap_base, addr = addr0;
16764
16765 /* requested length too big for entire address space */
16766 if (len > TASK_SIZE)
16767 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
16768 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
16769 goto bottomup;
16770
16771 +#ifdef CONFIG_PAX_RANDMMAP
16772 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16773 +#endif
16774 +
16775 /* requesting a specific address */
16776 if (addr) {
16777 addr = PAGE_ALIGN(addr);
16778 - vma = find_vma(mm, addr);
16779 - if (TASK_SIZE - len >= addr &&
16780 - (!vma || addr + len <= vma->vm_start))
16781 - return addr;
16782 + if (TASK_SIZE - len >= addr) {
16783 + vma = find_vma(mm, addr);
16784 + if (check_heap_stack_gap(vma, addr, len))
16785 + return addr;
16786 + }
16787 }
16788
16789 /* check if free_area_cache is useful for us */
16790 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
16791 /* make sure it can fit in the remaining address space */
16792 if (addr > len) {
16793 vma = find_vma(mm, addr-len);
16794 - if (!vma || addr <= vma->vm_start)
16795 + if (check_heap_stack_gap(vma, addr - len, len))
16796 /* remember the address as a hint for next time */
16797 return mm->free_area_cache = addr-len;
16798 }
16799 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
16800 * return with success:
16801 */
16802 vma = find_vma(mm, addr);
16803 - if (!vma || addr+len <= vma->vm_start)
16804 + if (check_heap_stack_gap(vma, addr, len))
16805 /* remember the address as a hint for next time */
16806 return mm->free_area_cache = addr;
16807
16808 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
16809 mm->cached_hole_size = vma->vm_start - addr;
16810
16811 /* try just below the current vma->vm_start */
16812 - addr = vma->vm_start-len;
16813 - } while (len < vma->vm_start);
16814 + addr = skip_heap_stack_gap(vma, len);
16815 + } while (!IS_ERR_VALUE(addr));
16816
16817 bottomup:
16818 /*
16819 @@ -198,13 +206,21 @@ bottomup:
16820 * can happen with large stack limits and large mmap()
16821 * allocations.
16822 */
16823 + mm->mmap_base = TASK_UNMAPPED_BASE;
16824 +
16825 +#ifdef CONFIG_PAX_RANDMMAP
16826 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16827 + mm->mmap_base += mm->delta_mmap;
16828 +#endif
16829 +
16830 + mm->free_area_cache = mm->mmap_base;
16831 mm->cached_hole_size = ~0UL;
16832 - mm->free_area_cache = TASK_UNMAPPED_BASE;
16833 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16834 /*
16835 * Restore the topdown base:
16836 */
16837 - mm->free_area_cache = mm->mmap_base;
16838 + mm->mmap_base = base;
16839 + mm->free_area_cache = base;
16840 mm->cached_hole_size = ~0UL;
16841
16842 return addr;
16843 diff -urNp linux-2.6.32.42/arch/x86/kernel/tboot.c linux-2.6.32.42/arch/x86/kernel/tboot.c
16844 --- linux-2.6.32.42/arch/x86/kernel/tboot.c 2011-03-27 14:31:47.000000000 -0400
16845 +++ linux-2.6.32.42/arch/x86/kernel/tboot.c 2011-05-22 23:02:03.000000000 -0400
16846 @@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
16847
16848 void tboot_shutdown(u32 shutdown_type)
16849 {
16850 - void (*shutdown)(void);
16851 + void (* __noreturn shutdown)(void);
16852
16853 if (!tboot_enabled())
16854 return;
16855 @@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
16856
16857 switch_to_tboot_pt();
16858
16859 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
16860 + shutdown = (void *)tboot->shutdown_entry;
16861 shutdown();
16862
16863 /* should not reach here */
16864 @@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
16865 tboot_shutdown(acpi_shutdown_map[sleep_state]);
16866 }
16867
16868 -static atomic_t ap_wfs_count;
16869 +static atomic_unchecked_t ap_wfs_count;
16870
16871 static int tboot_wait_for_aps(int num_aps)
16872 {
16873 @@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(
16874 {
16875 switch (action) {
16876 case CPU_DYING:
16877 - atomic_inc(&ap_wfs_count);
16878 + atomic_inc_unchecked(&ap_wfs_count);
16879 if (num_online_cpus() == 1)
16880 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
16881 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
16882 return NOTIFY_BAD;
16883 break;
16884 }
16885 @@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
16886
16887 tboot_create_trampoline();
16888
16889 - atomic_set(&ap_wfs_count, 0);
16890 + atomic_set_unchecked(&ap_wfs_count, 0);
16891 register_hotcpu_notifier(&tboot_cpu_notifier);
16892 return 0;
16893 }
16894 diff -urNp linux-2.6.32.42/arch/x86/kernel/time.c linux-2.6.32.42/arch/x86/kernel/time.c
16895 --- linux-2.6.32.42/arch/x86/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
16896 +++ linux-2.6.32.42/arch/x86/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
16897 @@ -26,17 +26,13 @@
16898 int timer_ack;
16899 #endif
16900
16901 -#ifdef CONFIG_X86_64
16902 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
16903 -#endif
16904 -
16905 unsigned long profile_pc(struct pt_regs *regs)
16906 {
16907 unsigned long pc = instruction_pointer(regs);
16908
16909 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
16910 + if (!user_mode(regs) && in_lock_functions(pc)) {
16911 #ifdef CONFIG_FRAME_POINTER
16912 - return *(unsigned long *)(regs->bp + sizeof(long));
16913 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
16914 #else
16915 unsigned long *sp =
16916 (unsigned long *)kernel_stack_pointer(regs);
16917 @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
16918 * or above a saved flags. Eflags has bits 22-31 zero,
16919 * kernel addresses don't.
16920 */
16921 +
16922 +#ifdef CONFIG_PAX_KERNEXEC
16923 + return ktla_ktva(sp[0]);
16924 +#else
16925 if (sp[0] >> 22)
16926 return sp[0];
16927 if (sp[1] >> 22)
16928 return sp[1];
16929 #endif
16930 +
16931 +#endif
16932 }
16933 return pc;
16934 }
16935 diff -urNp linux-2.6.32.42/arch/x86/kernel/tls.c linux-2.6.32.42/arch/x86/kernel/tls.c
16936 --- linux-2.6.32.42/arch/x86/kernel/tls.c 2011-03-27 14:31:47.000000000 -0400
16937 +++ linux-2.6.32.42/arch/x86/kernel/tls.c 2011-04-17 15:56:46.000000000 -0400
16938 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
16939 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
16940 return -EINVAL;
16941
16942 +#ifdef CONFIG_PAX_SEGMEXEC
16943 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
16944 + return -EINVAL;
16945 +#endif
16946 +
16947 set_tls_desc(p, idx, &info, 1);
16948
16949 return 0;
16950 diff -urNp linux-2.6.32.42/arch/x86/kernel/trampoline_32.S linux-2.6.32.42/arch/x86/kernel/trampoline_32.S
16951 --- linux-2.6.32.42/arch/x86/kernel/trampoline_32.S 2011-03-27 14:31:47.000000000 -0400
16952 +++ linux-2.6.32.42/arch/x86/kernel/trampoline_32.S 2011-04-17 15:56:46.000000000 -0400
16953 @@ -32,6 +32,12 @@
16954 #include <asm/segment.h>
16955 #include <asm/page_types.h>
16956
16957 +#ifdef CONFIG_PAX_KERNEXEC
16958 +#define ta(X) (X)
16959 +#else
16960 +#define ta(X) ((X) - __PAGE_OFFSET)
16961 +#endif
16962 +
16963 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
16964 __CPUINITRODATA
16965 .code16
16966 @@ -60,7 +66,7 @@ r_base = .
16967 inc %ax # protected mode (PE) bit
16968 lmsw %ax # into protected mode
16969 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
16970 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
16971 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
16972
16973 # These need to be in the same 64K segment as the above;
16974 # hence we don't use the boot_gdt_descr defined in head.S
16975 diff -urNp linux-2.6.32.42/arch/x86/kernel/trampoline_64.S linux-2.6.32.42/arch/x86/kernel/trampoline_64.S
16976 --- linux-2.6.32.42/arch/x86/kernel/trampoline_64.S 2011-03-27 14:31:47.000000000 -0400
16977 +++ linux-2.6.32.42/arch/x86/kernel/trampoline_64.S 2011-04-17 15:56:46.000000000 -0400
16978 @@ -91,7 +91,7 @@ startup_32:
16979 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
16980 movl %eax, %ds
16981
16982 - movl $X86_CR4_PAE, %eax
16983 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
16984 movl %eax, %cr4 # Enable PAE mode
16985
16986 # Setup trampoline 4 level pagetables
16987 @@ -138,7 +138,7 @@ tidt:
16988 # so the kernel can live anywhere
16989 .balign 4
16990 tgdt:
16991 - .short tgdt_end - tgdt # gdt limit
16992 + .short tgdt_end - tgdt - 1 # gdt limit
16993 .long tgdt - r_base
16994 .short 0
16995 .quad 0x00cf9b000000ffff # __KERNEL32_CS
16996 diff -urNp linux-2.6.32.42/arch/x86/kernel/traps.c linux-2.6.32.42/arch/x86/kernel/traps.c
16997 --- linux-2.6.32.42/arch/x86/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
16998 +++ linux-2.6.32.42/arch/x86/kernel/traps.c 2011-04-17 15:56:46.000000000 -0400
16999 @@ -69,12 +69,6 @@ asmlinkage int system_call(void);
17000
17001 /* Do we ignore FPU interrupts ? */
17002 char ignore_fpu_irq;
17003 -
17004 -/*
17005 - * The IDT has to be page-aligned to simplify the Pentium
17006 - * F0 0F bug workaround.
17007 - */
17008 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17009 #endif
17010
17011 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17012 @@ -112,19 +106,19 @@ static inline void preempt_conditional_c
17013 static inline void
17014 die_if_kernel(const char *str, struct pt_regs *regs, long err)
17015 {
17016 - if (!user_mode_vm(regs))
17017 + if (!user_mode(regs))
17018 die(str, regs, err);
17019 }
17020 #endif
17021
17022 static void __kprobes
17023 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17024 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17025 long error_code, siginfo_t *info)
17026 {
17027 struct task_struct *tsk = current;
17028
17029 #ifdef CONFIG_X86_32
17030 - if (regs->flags & X86_VM_MASK) {
17031 + if (v8086_mode(regs)) {
17032 /*
17033 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17034 * On nmi (interrupt 2), do_trap should not be called.
17035 @@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str
17036 }
17037 #endif
17038
17039 - if (!user_mode(regs))
17040 + if (!user_mode_novm(regs))
17041 goto kernel_trap;
17042
17043 #ifdef CONFIG_X86_32
17044 @@ -158,7 +152,7 @@ trap_signal:
17045 printk_ratelimit()) {
17046 printk(KERN_INFO
17047 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17048 - tsk->comm, tsk->pid, str,
17049 + tsk->comm, task_pid_nr(tsk), str,
17050 regs->ip, regs->sp, error_code);
17051 print_vma_addr(" in ", regs->ip);
17052 printk("\n");
17053 @@ -175,8 +169,20 @@ kernel_trap:
17054 if (!fixup_exception(regs)) {
17055 tsk->thread.error_code = error_code;
17056 tsk->thread.trap_no = trapnr;
17057 +
17058 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17059 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17060 + str = "PAX: suspicious stack segment fault";
17061 +#endif
17062 +
17063 die(str, regs, error_code);
17064 }
17065 +
17066 +#ifdef CONFIG_PAX_REFCOUNT
17067 + if (trapnr == 4)
17068 + pax_report_refcount_overflow(regs);
17069 +#endif
17070 +
17071 return;
17072
17073 #ifdef CONFIG_X86_32
17074 @@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *re
17075 conditional_sti(regs);
17076
17077 #ifdef CONFIG_X86_32
17078 - if (regs->flags & X86_VM_MASK)
17079 + if (v8086_mode(regs))
17080 goto gp_in_vm86;
17081 #endif
17082
17083 tsk = current;
17084 - if (!user_mode(regs))
17085 + if (!user_mode_novm(regs))
17086 goto gp_in_kernel;
17087
17088 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17089 + if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17090 + struct mm_struct *mm = tsk->mm;
17091 + unsigned long limit;
17092 +
17093 + down_write(&mm->mmap_sem);
17094 + limit = mm->context.user_cs_limit;
17095 + if (limit < TASK_SIZE) {
17096 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17097 + up_write(&mm->mmap_sem);
17098 + return;
17099 + }
17100 + up_write(&mm->mmap_sem);
17101 + }
17102 +#endif
17103 +
17104 tsk->thread.error_code = error_code;
17105 tsk->thread.trap_no = 13;
17106
17107 @@ -305,6 +327,13 @@ gp_in_kernel:
17108 if (notify_die(DIE_GPF, "general protection fault", regs,
17109 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17110 return;
17111 +
17112 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17113 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17114 + die("PAX: suspicious general protection fault", regs, error_code);
17115 + else
17116 +#endif
17117 +
17118 die("general protection fault", regs, error_code);
17119 }
17120
17121 @@ -558,7 +587,7 @@ dotraplinkage void __kprobes do_debug(st
17122 }
17123
17124 #ifdef CONFIG_X86_32
17125 - if (regs->flags & X86_VM_MASK)
17126 + if (v8086_mode(regs))
17127 goto debug_vm86;
17128 #endif
17129
17130 @@ -570,7 +599,7 @@ dotraplinkage void __kprobes do_debug(st
17131 * kernel space (but re-enable TF when returning to user mode).
17132 */
17133 if (condition & DR_STEP) {
17134 - if (!user_mode(regs))
17135 + if (!user_mode_novm(regs))
17136 goto clear_TF_reenable;
17137 }
17138
17139 @@ -757,7 +786,7 @@ do_simd_coprocessor_error(struct pt_regs
17140 * Handle strange cache flush from user space exception
17141 * in all other cases. This is undocumented behaviour.
17142 */
17143 - if (regs->flags & X86_VM_MASK) {
17144 + if (v8086_mode(regs)) {
17145 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
17146 return;
17147 }
17148 @@ -798,7 +827,7 @@ asmlinkage void __attribute__((weak)) sm
17149 void __math_state_restore(void)
17150 {
17151 struct thread_info *thread = current_thread_info();
17152 - struct task_struct *tsk = thread->task;
17153 + struct task_struct *tsk = current;
17154
17155 /*
17156 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17157 @@ -825,8 +854,7 @@ void __math_state_restore(void)
17158 */
17159 asmlinkage void math_state_restore(void)
17160 {
17161 - struct thread_info *thread = current_thread_info();
17162 - struct task_struct *tsk = thread->task;
17163 + struct task_struct *tsk = current;
17164
17165 if (!tsk_used_math(tsk)) {
17166 local_irq_enable();
17167 diff -urNp linux-2.6.32.42/arch/x86/kernel/vm86_32.c linux-2.6.32.42/arch/x86/kernel/vm86_32.c
17168 --- linux-2.6.32.42/arch/x86/kernel/vm86_32.c 2011-03-27 14:31:47.000000000 -0400
17169 +++ linux-2.6.32.42/arch/x86/kernel/vm86_32.c 2011-04-17 15:56:46.000000000 -0400
17170 @@ -41,6 +41,7 @@
17171 #include <linux/ptrace.h>
17172 #include <linux/audit.h>
17173 #include <linux/stddef.h>
17174 +#include <linux/grsecurity.h>
17175
17176 #include <asm/uaccess.h>
17177 #include <asm/io.h>
17178 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
17179 do_exit(SIGSEGV);
17180 }
17181
17182 - tss = &per_cpu(init_tss, get_cpu());
17183 + tss = init_tss + get_cpu();
17184 current->thread.sp0 = current->thread.saved_sp0;
17185 current->thread.sysenter_cs = __KERNEL_CS;
17186 load_sp0(tss, &current->thread);
17187 @@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
17188 struct task_struct *tsk;
17189 int tmp, ret = -EPERM;
17190
17191 +#ifdef CONFIG_GRKERNSEC_VM86
17192 + if (!capable(CAP_SYS_RAWIO)) {
17193 + gr_handle_vm86();
17194 + goto out;
17195 + }
17196 +#endif
17197 +
17198 tsk = current;
17199 if (tsk->thread.saved_sp0)
17200 goto out;
17201 @@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
17202 int tmp, ret;
17203 struct vm86plus_struct __user *v86;
17204
17205 +#ifdef CONFIG_GRKERNSEC_VM86
17206 + if (!capable(CAP_SYS_RAWIO)) {
17207 + gr_handle_vm86();
17208 + ret = -EPERM;
17209 + goto out;
17210 + }
17211 +#endif
17212 +
17213 tsk = current;
17214 switch (regs->bx) {
17215 case VM86_REQUEST_IRQ:
17216 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
17217 tsk->thread.saved_fs = info->regs32->fs;
17218 tsk->thread.saved_gs = get_user_gs(info->regs32);
17219
17220 - tss = &per_cpu(init_tss, get_cpu());
17221 + tss = init_tss + get_cpu();
17222 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17223 if (cpu_has_sep)
17224 tsk->thread.sysenter_cs = 0;
17225 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
17226 goto cannot_handle;
17227 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17228 goto cannot_handle;
17229 - intr_ptr = (unsigned long __user *) (i << 2);
17230 + intr_ptr = (__force unsigned long __user *) (i << 2);
17231 if (get_user(segoffs, intr_ptr))
17232 goto cannot_handle;
17233 if ((segoffs >> 16) == BIOSSEG)
17234 diff -urNp linux-2.6.32.42/arch/x86/kernel/vmi_32.c linux-2.6.32.42/arch/x86/kernel/vmi_32.c
17235 --- linux-2.6.32.42/arch/x86/kernel/vmi_32.c 2011-03-27 14:31:47.000000000 -0400
17236 +++ linux-2.6.32.42/arch/x86/kernel/vmi_32.c 2011-04-17 15:56:46.000000000 -0400
17237 @@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1)))
17238 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
17239
17240 #define call_vrom_func(rom,func) \
17241 - (((VROMFUNC *)(rom->func))())
17242 + (((VROMFUNC *)(ktva_ktla(rom.func)))())
17243
17244 #define call_vrom_long_func(rom,func,arg) \
17245 - (((VROMLONGFUNC *)(rom->func)) (arg))
17246 +({\
17247 + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
17248 + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
17249 + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
17250 + __reloc;\
17251 +})
17252
17253 -static struct vrom_header *vmi_rom;
17254 +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
17255 static int disable_pge;
17256 static int disable_pse;
17257 static int disable_sep;
17258 @@ -76,10 +81,10 @@ static struct {
17259 void (*set_initial_ap_state)(int, int);
17260 void (*halt)(void);
17261 void (*set_lazy_mode)(int mode);
17262 -} vmi_ops;
17263 +} vmi_ops __read_only;
17264
17265 /* Cached VMI operations */
17266 -struct vmi_timer_ops vmi_timer_ops;
17267 +struct vmi_timer_ops vmi_timer_ops __read_only;
17268
17269 /*
17270 * VMI patching routines.
17271 @@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
17272 static inline void patch_offset(void *insnbuf,
17273 unsigned long ip, unsigned long dest)
17274 {
17275 - *(unsigned long *)(insnbuf+1) = dest-ip-5;
17276 + *(unsigned long *)(insnbuf+1) = dest-ip-5;
17277 }
17278
17279 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
17280 @@ -102,6 +107,7 @@ static unsigned patch_internal(int call,
17281 {
17282 u64 reloc;
17283 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
17284 +
17285 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
17286 switch(rel->type) {
17287 case VMI_RELOCATION_CALL_REL:
17288 @@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud
17289
17290 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
17291 {
17292 - const pte_t pte = { .pte = 0 };
17293 + const pte_t pte = __pte(0ULL);
17294 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
17295 }
17296
17297 static void vmi_pmd_clear(pmd_t *pmd)
17298 {
17299 - const pte_t pte = { .pte = 0 };
17300 + const pte_t pte = __pte(0ULL);
17301 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
17302 }
17303 #endif
17304 @@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, un
17305 ap.ss = __KERNEL_DS;
17306 ap.esp = (unsigned long) start_esp;
17307
17308 - ap.ds = __USER_DS;
17309 - ap.es = __USER_DS;
17310 + ap.ds = __KERNEL_DS;
17311 + ap.es = __KERNEL_DS;
17312 ap.fs = __KERNEL_PERCPU;
17313 - ap.gs = __KERNEL_STACK_CANARY;
17314 + savesegment(gs, ap.gs);
17315
17316 ap.eflags = 0;
17317
17318 @@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
17319 paravirt_leave_lazy_mmu();
17320 }
17321
17322 +#ifdef CONFIG_PAX_KERNEXEC
17323 +static unsigned long vmi_pax_open_kernel(void)
17324 +{
17325 + return 0;
17326 +}
17327 +
17328 +static unsigned long vmi_pax_close_kernel(void)
17329 +{
17330 + return 0;
17331 +}
17332 +#endif
17333 +
17334 static inline int __init check_vmi_rom(struct vrom_header *rom)
17335 {
17336 struct pci_header *pci;
17337 @@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(s
17338 return 0;
17339 if (rom->vrom_signature != VMI_SIGNATURE)
17340 return 0;
17341 + if (rom->rom_length * 512 > sizeof(*rom)) {
17342 + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
17343 + return 0;
17344 + }
17345 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
17346 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
17347 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
17348 @@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(v
17349 struct vrom_header *romstart;
17350 romstart = (struct vrom_header *)isa_bus_to_virt(base);
17351 if (check_vmi_rom(romstart)) {
17352 - vmi_rom = romstart;
17353 + vmi_rom = *romstart;
17354 return 1;
17355 }
17356 }
17357 @@ -836,6 +858,11 @@ static inline int __init activate_vmi(vo
17358
17359 para_fill(pv_irq_ops.safe_halt, Halt);
17360
17361 +#ifdef CONFIG_PAX_KERNEXEC
17362 + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
17363 + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
17364 +#endif
17365 +
17366 /*
17367 * Alternative instruction rewriting doesn't happen soon enough
17368 * to convert VMI_IRET to a call instead of a jump; so we have
17369 @@ -853,16 +880,16 @@ static inline int __init activate_vmi(vo
17370
17371 void __init vmi_init(void)
17372 {
17373 - if (!vmi_rom)
17374 + if (!vmi_rom.rom_signature)
17375 probe_vmi_rom();
17376 else
17377 - check_vmi_rom(vmi_rom);
17378 + check_vmi_rom(&vmi_rom);
17379
17380 /* In case probing for or validating the ROM failed, basil */
17381 - if (!vmi_rom)
17382 + if (!vmi_rom.rom_signature)
17383 return;
17384
17385 - reserve_top_address(-vmi_rom->virtual_top);
17386 + reserve_top_address(-vmi_rom.virtual_top);
17387
17388 #ifdef CONFIG_X86_IO_APIC
17389 /* This is virtual hardware; timer routing is wired correctly */
17390 @@ -874,7 +901,7 @@ void __init vmi_activate(void)
17391 {
17392 unsigned long flags;
17393
17394 - if (!vmi_rom)
17395 + if (!vmi_rom.rom_signature)
17396 return;
17397
17398 local_irq_save(flags);
17399 diff -urNp linux-2.6.32.42/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.42/arch/x86/kernel/vmlinux.lds.S
17400 --- linux-2.6.32.42/arch/x86/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
17401 +++ linux-2.6.32.42/arch/x86/kernel/vmlinux.lds.S 2011-04-17 15:56:46.000000000 -0400
17402 @@ -26,6 +26,13 @@
17403 #include <asm/page_types.h>
17404 #include <asm/cache.h>
17405 #include <asm/boot.h>
17406 +#include <asm/segment.h>
17407 +
17408 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17409 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
17410 +#else
17411 +#define __KERNEL_TEXT_OFFSET 0
17412 +#endif
17413
17414 #undef i386 /* in case the preprocessor is a 32bit one */
17415
17416 @@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
17417 #ifdef CONFIG_X86_32
17418 OUTPUT_ARCH(i386)
17419 ENTRY(phys_startup_32)
17420 -jiffies = jiffies_64;
17421 #else
17422 OUTPUT_ARCH(i386:x86-64)
17423 ENTRY(phys_startup_64)
17424 -jiffies_64 = jiffies;
17425 #endif
17426
17427 PHDRS {
17428 text PT_LOAD FLAGS(5); /* R_E */
17429 - data PT_LOAD FLAGS(7); /* RWE */
17430 +#ifdef CONFIG_X86_32
17431 + module PT_LOAD FLAGS(5); /* R_E */
17432 +#endif
17433 +#ifdef CONFIG_XEN
17434 + rodata PT_LOAD FLAGS(5); /* R_E */
17435 +#else
17436 + rodata PT_LOAD FLAGS(4); /* R__ */
17437 +#endif
17438 + data PT_LOAD FLAGS(6); /* RW_ */
17439 #ifdef CONFIG_X86_64
17440 user PT_LOAD FLAGS(5); /* R_E */
17441 +#endif
17442 + init.begin PT_LOAD FLAGS(6); /* RW_ */
17443 #ifdef CONFIG_SMP
17444 percpu PT_LOAD FLAGS(6); /* RW_ */
17445 #endif
17446 + text.init PT_LOAD FLAGS(5); /* R_E */
17447 + text.exit PT_LOAD FLAGS(5); /* R_E */
17448 init PT_LOAD FLAGS(7); /* RWE */
17449 -#endif
17450 note PT_NOTE FLAGS(0); /* ___ */
17451 }
17452
17453 SECTIONS
17454 {
17455 #ifdef CONFIG_X86_32
17456 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
17457 - phys_startup_32 = startup_32 - LOAD_OFFSET;
17458 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
17459 #else
17460 - . = __START_KERNEL;
17461 - phys_startup_64 = startup_64 - LOAD_OFFSET;
17462 + . = __START_KERNEL;
17463 #endif
17464
17465 /* Text and read-only data */
17466 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
17467 - _text = .;
17468 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17469 /* bootstrapping code */
17470 +#ifdef CONFIG_X86_32
17471 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17472 +#else
17473 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17474 +#endif
17475 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17476 + _text = .;
17477 HEAD_TEXT
17478 #ifdef CONFIG_X86_32
17479 . = ALIGN(PAGE_SIZE);
17480 @@ -82,28 +102,71 @@ SECTIONS
17481 IRQENTRY_TEXT
17482 *(.fixup)
17483 *(.gnu.warning)
17484 - /* End of text section */
17485 - _etext = .;
17486 } :text = 0x9090
17487
17488 - NOTES :text :note
17489 + . += __KERNEL_TEXT_OFFSET;
17490 +
17491 +#ifdef CONFIG_X86_32
17492 + . = ALIGN(PAGE_SIZE);
17493 + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
17494 + *(.vmi.rom)
17495 + } :module
17496 +
17497 + . = ALIGN(PAGE_SIZE);
17498 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
17499 +
17500 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
17501 + MODULES_EXEC_VADDR = .;
17502 + BYTE(0)
17503 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
17504 + . = ALIGN(HPAGE_SIZE);
17505 + MODULES_EXEC_END = . - 1;
17506 +#endif
17507 +
17508 + } :module
17509 +#endif
17510
17511 - EXCEPTION_TABLE(16) :text = 0x9090
17512 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
17513 + /* End of text section */
17514 + _etext = . - __KERNEL_TEXT_OFFSET;
17515 + }
17516 +
17517 +#ifdef CONFIG_X86_32
17518 + . = ALIGN(PAGE_SIZE);
17519 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
17520 + *(.idt)
17521 + . = ALIGN(PAGE_SIZE);
17522 + *(.empty_zero_page)
17523 + *(.swapper_pg_fixmap)
17524 + *(.swapper_pg_pmd)
17525 + *(.swapper_pg_dir)
17526 + *(.trampoline_pg_dir)
17527 + } :rodata
17528 +#endif
17529 +
17530 + . = ALIGN(PAGE_SIZE);
17531 + NOTES :rodata :note
17532 +
17533 + EXCEPTION_TABLE(16) :rodata
17534
17535 RO_DATA(PAGE_SIZE)
17536
17537 /* Data */
17538 .data : AT(ADDR(.data) - LOAD_OFFSET) {
17539 +
17540 +#ifdef CONFIG_PAX_KERNEXEC
17541 + . = ALIGN(HPAGE_SIZE);
17542 +#else
17543 + . = ALIGN(PAGE_SIZE);
17544 +#endif
17545 +
17546 /* Start of data section */
17547 _sdata = .;
17548
17549 /* init_task */
17550 INIT_TASK_DATA(THREAD_SIZE)
17551
17552 -#ifdef CONFIG_X86_32
17553 - /* 32 bit has nosave before _edata */
17554 NOSAVE_DATA
17555 -#endif
17556
17557 PAGE_ALIGNED_DATA(PAGE_SIZE)
17558
17559 @@ -112,6 +175,8 @@ SECTIONS
17560 DATA_DATA
17561 CONSTRUCTORS
17562
17563 + jiffies = jiffies_64;
17564 +
17565 /* rarely changed data like cpu maps */
17566 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
17567
17568 @@ -166,12 +231,6 @@ SECTIONS
17569 }
17570 vgetcpu_mode = VVIRT(.vgetcpu_mode);
17571
17572 - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
17573 - .jiffies : AT(VLOAD(.jiffies)) {
17574 - *(.jiffies)
17575 - }
17576 - jiffies = VVIRT(.jiffies);
17577 -
17578 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
17579 *(.vsyscall_3)
17580 }
17581 @@ -187,12 +246,19 @@ SECTIONS
17582 #endif /* CONFIG_X86_64 */
17583
17584 /* Init code and data - will be freed after init */
17585 - . = ALIGN(PAGE_SIZE);
17586 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
17587 + BYTE(0)
17588 +
17589 +#ifdef CONFIG_PAX_KERNEXEC
17590 + . = ALIGN(HPAGE_SIZE);
17591 +#else
17592 + . = ALIGN(PAGE_SIZE);
17593 +#endif
17594 +
17595 __init_begin = .; /* paired with __init_end */
17596 - }
17597 + } :init.begin
17598
17599 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
17600 +#ifdef CONFIG_SMP
17601 /*
17602 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
17603 * output PHDR, so the next output section - .init.text - should
17604 @@ -201,12 +267,27 @@ SECTIONS
17605 PERCPU_VADDR(0, :percpu)
17606 #endif
17607
17608 - INIT_TEXT_SECTION(PAGE_SIZE)
17609 -#ifdef CONFIG_X86_64
17610 - :init
17611 -#endif
17612 + . = ALIGN(PAGE_SIZE);
17613 + init_begin = .;
17614 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
17615 + VMLINUX_SYMBOL(_sinittext) = .;
17616 + INIT_TEXT
17617 + VMLINUX_SYMBOL(_einittext) = .;
17618 + . = ALIGN(PAGE_SIZE);
17619 + } :text.init
17620
17621 - INIT_DATA_SECTION(16)
17622 + /*
17623 + * .exit.text is discard at runtime, not link time, to deal with
17624 + * references from .altinstructions and .eh_frame
17625 + */
17626 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17627 + EXIT_TEXT
17628 + . = ALIGN(16);
17629 + } :text.exit
17630 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
17631 +
17632 + . = ALIGN(PAGE_SIZE);
17633 + INIT_DATA_SECTION(16) :init
17634
17635 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
17636 __x86_cpu_dev_start = .;
17637 @@ -232,19 +313,11 @@ SECTIONS
17638 *(.altinstr_replacement)
17639 }
17640
17641 - /*
17642 - * .exit.text is discard at runtime, not link time, to deal with
17643 - * references from .altinstructions and .eh_frame
17644 - */
17645 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
17646 - EXIT_TEXT
17647 - }
17648 -
17649 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
17650 EXIT_DATA
17651 }
17652
17653 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
17654 +#ifndef CONFIG_SMP
17655 PERCPU(PAGE_SIZE)
17656 #endif
17657
17658 @@ -267,12 +340,6 @@ SECTIONS
17659 . = ALIGN(PAGE_SIZE);
17660 }
17661
17662 -#ifdef CONFIG_X86_64
17663 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
17664 - NOSAVE_DATA
17665 - }
17666 -#endif
17667 -
17668 /* BSS */
17669 . = ALIGN(PAGE_SIZE);
17670 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
17671 @@ -288,6 +355,7 @@ SECTIONS
17672 __brk_base = .;
17673 . += 64 * 1024; /* 64k alignment slop space */
17674 *(.brk_reservation) /* areas brk users have reserved */
17675 + . = ALIGN(HPAGE_SIZE);
17676 __brk_limit = .;
17677 }
17678
17679 @@ -316,13 +384,12 @@ SECTIONS
17680 * for the boot processor.
17681 */
17682 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
17683 -INIT_PER_CPU(gdt_page);
17684 INIT_PER_CPU(irq_stack_union);
17685
17686 /*
17687 * Build-time check on the image size:
17688 */
17689 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
17690 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
17691 "kernel image bigger than KERNEL_IMAGE_SIZE");
17692
17693 #ifdef CONFIG_SMP
17694 diff -urNp linux-2.6.32.42/arch/x86/kernel/vsyscall_64.c linux-2.6.32.42/arch/x86/kernel/vsyscall_64.c
17695 --- linux-2.6.32.42/arch/x86/kernel/vsyscall_64.c 2011-03-27 14:31:47.000000000 -0400
17696 +++ linux-2.6.32.42/arch/x86/kernel/vsyscall_64.c 2011-04-23 12:56:10.000000000 -0400
17697 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
17698
17699 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
17700 /* copy vsyscall data */
17701 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
17702 vsyscall_gtod_data.clock.vread = clock->vread;
17703 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
17704 vsyscall_gtod_data.clock.mask = clock->mask;
17705 @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
17706 We do this here because otherwise user space would do it on
17707 its own in a likely inferior way (no access to jiffies).
17708 If you don't like it pass NULL. */
17709 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
17710 + if (tcache && tcache->blob[0] == (j = jiffies)) {
17711 p = tcache->blob[1];
17712 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
17713 /* Load per CPU data from RDTSCP */
17714 diff -urNp linux-2.6.32.42/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.32.42/arch/x86/kernel/x8664_ksyms_64.c
17715 --- linux-2.6.32.42/arch/x86/kernel/x8664_ksyms_64.c 2011-03-27 14:31:47.000000000 -0400
17716 +++ linux-2.6.32.42/arch/x86/kernel/x8664_ksyms_64.c 2011-04-17 15:56:46.000000000 -0400
17717 @@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
17718
17719 EXPORT_SYMBOL(copy_user_generic);
17720 EXPORT_SYMBOL(__copy_user_nocache);
17721 -EXPORT_SYMBOL(copy_from_user);
17722 -EXPORT_SYMBOL(copy_to_user);
17723 EXPORT_SYMBOL(__copy_from_user_inatomic);
17724
17725 EXPORT_SYMBOL(copy_page);
17726 diff -urNp linux-2.6.32.42/arch/x86/kernel/xsave.c linux-2.6.32.42/arch/x86/kernel/xsave.c
17727 --- linux-2.6.32.42/arch/x86/kernel/xsave.c 2011-03-27 14:31:47.000000000 -0400
17728 +++ linux-2.6.32.42/arch/x86/kernel/xsave.c 2011-04-17 15:56:46.000000000 -0400
17729 @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
17730 fx_sw_user->xstate_size > fx_sw_user->extended_size)
17731 return -1;
17732
17733 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
17734 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
17735 fx_sw_user->extended_size -
17736 FP_XSTATE_MAGIC2_SIZE));
17737 /*
17738 @@ -196,7 +196,7 @@ fx_only:
17739 * the other extended state.
17740 */
17741 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
17742 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
17743 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
17744 }
17745
17746 /*
17747 @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
17748 if (task_thread_info(tsk)->status & TS_XSAVE)
17749 err = restore_user_xstate(buf);
17750 else
17751 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
17752 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
17753 buf);
17754 if (unlikely(err)) {
17755 /*
17756 diff -urNp linux-2.6.32.42/arch/x86/kvm/emulate.c linux-2.6.32.42/arch/x86/kvm/emulate.c
17757 --- linux-2.6.32.42/arch/x86/kvm/emulate.c 2011-03-27 14:31:47.000000000 -0400
17758 +++ linux-2.6.32.42/arch/x86/kvm/emulate.c 2011-04-17 15:56:46.000000000 -0400
17759 @@ -81,8 +81,8 @@
17760 #define Src2CL (1<<29)
17761 #define Src2ImmByte (2<<29)
17762 #define Src2One (3<<29)
17763 -#define Src2Imm16 (4<<29)
17764 -#define Src2Mask (7<<29)
17765 +#define Src2Imm16 (4U<<29)
17766 +#define Src2Mask (7U<<29)
17767
17768 enum {
17769 Group1_80, Group1_81, Group1_82, Group1_83,
17770 @@ -411,6 +411,7 @@ static u32 group2_table[] = {
17771
17772 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
17773 do { \
17774 + unsigned long _tmp; \
17775 __asm__ __volatile__ ( \
17776 _PRE_EFLAGS("0", "4", "2") \
17777 _op _suffix " %"_x"3,%1; " \
17778 @@ -424,8 +425,6 @@ static u32 group2_table[] = {
17779 /* Raw emulation: instruction has two explicit operands. */
17780 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
17781 do { \
17782 - unsigned long _tmp; \
17783 - \
17784 switch ((_dst).bytes) { \
17785 case 2: \
17786 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
17787 @@ -441,7 +440,6 @@ static u32 group2_table[] = {
17788
17789 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
17790 do { \
17791 - unsigned long _tmp; \
17792 switch ((_dst).bytes) { \
17793 case 1: \
17794 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
17795 diff -urNp linux-2.6.32.42/arch/x86/kvm/lapic.c linux-2.6.32.42/arch/x86/kvm/lapic.c
17796 --- linux-2.6.32.42/arch/x86/kvm/lapic.c 2011-03-27 14:31:47.000000000 -0400
17797 +++ linux-2.6.32.42/arch/x86/kvm/lapic.c 2011-04-17 15:56:46.000000000 -0400
17798 @@ -52,7 +52,7 @@
17799 #define APIC_BUS_CYCLE_NS 1
17800
17801 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
17802 -#define apic_debug(fmt, arg...)
17803 +#define apic_debug(fmt, arg...) do {} while (0)
17804
17805 #define APIC_LVT_NUM 6
17806 /* 14 is the version for Xeon and Pentium 8.4.8*/
17807 diff -urNp linux-2.6.32.42/arch/x86/kvm/paging_tmpl.h linux-2.6.32.42/arch/x86/kvm/paging_tmpl.h
17808 --- linux-2.6.32.42/arch/x86/kvm/paging_tmpl.h 2011-03-27 14:31:47.000000000 -0400
17809 +++ linux-2.6.32.42/arch/x86/kvm/paging_tmpl.h 2011-05-16 21:46:57.000000000 -0400
17810 @@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_
17811 int level = PT_PAGE_TABLE_LEVEL;
17812 unsigned long mmu_seq;
17813
17814 + pax_track_stack();
17815 +
17816 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
17817 kvm_mmu_audit(vcpu, "pre page fault");
17818
17819 diff -urNp linux-2.6.32.42/arch/x86/kvm/svm.c linux-2.6.32.42/arch/x86/kvm/svm.c
17820 --- linux-2.6.32.42/arch/x86/kvm/svm.c 2011-03-27 14:31:47.000000000 -0400
17821 +++ linux-2.6.32.42/arch/x86/kvm/svm.c 2011-04-17 15:56:46.000000000 -0400
17822 @@ -2483,9 +2483,12 @@ static int handle_exit(struct kvm_run *k
17823 static void reload_tss(struct kvm_vcpu *vcpu)
17824 {
17825 int cpu = raw_smp_processor_id();
17826 -
17827 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
17828 +
17829 + pax_open_kernel();
17830 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
17831 + pax_close_kernel();
17832 +
17833 load_TR_desc();
17834 }
17835
17836 @@ -2946,7 +2949,7 @@ static bool svm_gb_page_enable(void)
17837 return true;
17838 }
17839
17840 -static struct kvm_x86_ops svm_x86_ops = {
17841 +static const struct kvm_x86_ops svm_x86_ops = {
17842 .cpu_has_kvm_support = has_svm,
17843 .disabled_by_bios = is_disabled,
17844 .hardware_setup = svm_hardware_setup,
17845 diff -urNp linux-2.6.32.42/arch/x86/kvm/vmx.c linux-2.6.32.42/arch/x86/kvm/vmx.c
17846 --- linux-2.6.32.42/arch/x86/kvm/vmx.c 2011-03-27 14:31:47.000000000 -0400
17847 +++ linux-2.6.32.42/arch/x86/kvm/vmx.c 2011-05-04 17:56:20.000000000 -0400
17848 @@ -570,7 +570,11 @@ static void reload_tss(void)
17849
17850 kvm_get_gdt(&gdt);
17851 descs = (void *)gdt.base;
17852 +
17853 + pax_open_kernel();
17854 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
17855 + pax_close_kernel();
17856 +
17857 load_TR_desc();
17858 }
17859
17860 @@ -1409,8 +1413,11 @@ static __init int hardware_setup(void)
17861 if (!cpu_has_vmx_flexpriority())
17862 flexpriority_enabled = 0;
17863
17864 - if (!cpu_has_vmx_tpr_shadow())
17865 - kvm_x86_ops->update_cr8_intercept = NULL;
17866 + if (!cpu_has_vmx_tpr_shadow()) {
17867 + pax_open_kernel();
17868 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
17869 + pax_close_kernel();
17870 + }
17871
17872 if (enable_ept && !cpu_has_vmx_ept_2m_page())
17873 kvm_disable_largepages();
17874 @@ -2361,7 +2368,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
17875 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
17876
17877 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
17878 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
17879 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
17880 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
17881 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
17882 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
17883 @@ -3717,6 +3724,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
17884 "jmp .Lkvm_vmx_return \n\t"
17885 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
17886 ".Lkvm_vmx_return: "
17887 +
17888 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17889 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
17890 + ".Lkvm_vmx_return2: "
17891 +#endif
17892 +
17893 /* Save guest registers, load host registers, keep flags */
17894 "xchg %0, (%%"R"sp) \n\t"
17895 "mov %%"R"ax, %c[rax](%0) \n\t"
17896 @@ -3763,8 +3776,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
17897 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
17898 #endif
17899 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
17900 +
17901 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17902 + ,[cs]"i"(__KERNEL_CS)
17903 +#endif
17904 +
17905 : "cc", "memory"
17906 - , R"bx", R"di", R"si"
17907 + , R"ax", R"bx", R"di", R"si"
17908 #ifdef CONFIG_X86_64
17909 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
17910 #endif
17911 @@ -3781,7 +3799,16 @@ static void vmx_vcpu_run(struct kvm_vcpu
17912 if (vmx->rmode.irq.pending)
17913 fixup_rmode_irq(vmx);
17914
17915 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
17916 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
17917 +
17918 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17919 + loadsegment(fs, __KERNEL_PERCPU);
17920 +#endif
17921 +
17922 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17923 + __set_fs(current_thread_info()->addr_limit);
17924 +#endif
17925 +
17926 vmx->launched = 1;
17927
17928 vmx_complete_interrupts(vmx);
17929 @@ -3956,7 +3983,7 @@ static bool vmx_gb_page_enable(void)
17930 return false;
17931 }
17932
17933 -static struct kvm_x86_ops vmx_x86_ops = {
17934 +static const struct kvm_x86_ops vmx_x86_ops = {
17935 .cpu_has_kvm_support = cpu_has_kvm_support,
17936 .disabled_by_bios = vmx_disabled_by_bios,
17937 .hardware_setup = hardware_setup,
17938 diff -urNp linux-2.6.32.42/arch/x86/kvm/x86.c linux-2.6.32.42/arch/x86/kvm/x86.c
17939 --- linux-2.6.32.42/arch/x86/kvm/x86.c 2011-05-10 22:12:01.000000000 -0400
17940 +++ linux-2.6.32.42/arch/x86/kvm/x86.c 2011-05-10 22:12:26.000000000 -0400
17941 @@ -82,7 +82,7 @@ static void update_cr8_intercept(struct
17942 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
17943 struct kvm_cpuid_entry2 __user *entries);
17944
17945 -struct kvm_x86_ops *kvm_x86_ops;
17946 +const struct kvm_x86_ops *kvm_x86_ops;
17947 EXPORT_SYMBOL_GPL(kvm_x86_ops);
17948
17949 int ignore_msrs = 0;
17950 @@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
17951 struct kvm_cpuid2 *cpuid,
17952 struct kvm_cpuid_entry2 __user *entries)
17953 {
17954 - int r;
17955 + int r, i;
17956
17957 r = -E2BIG;
17958 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
17959 goto out;
17960 r = -EFAULT;
17961 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
17962 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17963 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17964 goto out;
17965 + for (i = 0; i < cpuid->nent; ++i) {
17966 + struct kvm_cpuid_entry2 cpuid_entry;
17967 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
17968 + goto out;
17969 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
17970 + }
17971 vcpu->arch.cpuid_nent = cpuid->nent;
17972 kvm_apic_set_version(vcpu);
17973 return 0;
17974 @@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
17975 struct kvm_cpuid2 *cpuid,
17976 struct kvm_cpuid_entry2 __user *entries)
17977 {
17978 - int r;
17979 + int r, i;
17980
17981 vcpu_load(vcpu);
17982 r = -E2BIG;
17983 if (cpuid->nent < vcpu->arch.cpuid_nent)
17984 goto out;
17985 r = -EFAULT;
17986 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
17987 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17988 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17989 goto out;
17990 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
17991 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
17992 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
17993 + goto out;
17994 + }
17995 return 0;
17996
17997 out:
17998 @@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
17999 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18000 struct kvm_interrupt *irq)
18001 {
18002 - if (irq->irq < 0 || irq->irq >= 256)
18003 + if (irq->irq >= 256)
18004 return -EINVAL;
18005 if (irqchip_in_kernel(vcpu->kvm))
18006 return -ENXIO;
18007 @@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cp
18008 .notifier_call = kvmclock_cpufreq_notifier
18009 };
18010
18011 -int kvm_arch_init(void *opaque)
18012 +int kvm_arch_init(const void *opaque)
18013 {
18014 int r, cpu;
18015 - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18016 + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
18017
18018 if (kvm_x86_ops) {
18019 printk(KERN_ERR "kvm: already loaded the other module\n");
18020 diff -urNp linux-2.6.32.42/arch/x86/lib/atomic64_32.c linux-2.6.32.42/arch/x86/lib/atomic64_32.c
18021 --- linux-2.6.32.42/arch/x86/lib/atomic64_32.c 2011-03-27 14:31:47.000000000 -0400
18022 +++ linux-2.6.32.42/arch/x86/lib/atomic64_32.c 2011-05-04 17:56:28.000000000 -0400
18023 @@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u6
18024 }
18025 EXPORT_SYMBOL(atomic64_cmpxchg);
18026
18027 +u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
18028 +{
18029 + return cmpxchg8b(&ptr->counter, old_val, new_val);
18030 +}
18031 +EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
18032 +
18033 /**
18034 * atomic64_xchg - xchg atomic64 variable
18035 * @ptr: pointer to type atomic64_t
18036 @@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 n
18037 EXPORT_SYMBOL(atomic64_xchg);
18038
18039 /**
18040 + * atomic64_xchg_unchecked - xchg atomic64 variable
18041 + * @ptr: pointer to type atomic64_unchecked_t
18042 + * @new_val: value to assign
18043 + *
18044 + * Atomically xchgs the value of @ptr to @new_val and returns
18045 + * the old value.
18046 + */
18047 +u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18048 +{
18049 + /*
18050 + * Try first with a (possibly incorrect) assumption about
18051 + * what we have there. We'll do two loops most likely,
18052 + * but we'll get an ownership MESI transaction straight away
18053 + * instead of a read transaction followed by a
18054 + * flush-for-ownership transaction:
18055 + */
18056 + u64 old_val, real_val = 0;
18057 +
18058 + do {
18059 + old_val = real_val;
18060 +
18061 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18062 +
18063 + } while (real_val != old_val);
18064 +
18065 + return old_val;
18066 +}
18067 +EXPORT_SYMBOL(atomic64_xchg_unchecked);
18068 +
18069 +/**
18070 * atomic64_set - set atomic64 variable
18071 * @ptr: pointer to type atomic64_t
18072 * @new_val: value to assign
18073 @@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 n
18074 EXPORT_SYMBOL(atomic64_set);
18075
18076 /**
18077 -EXPORT_SYMBOL(atomic64_read);
18078 + * atomic64_unchecked_set - set atomic64 variable
18079 + * @ptr: pointer to type atomic64_unchecked_t
18080 + * @new_val: value to assign
18081 + *
18082 + * Atomically sets the value of @ptr to @new_val.
18083 + */
18084 +void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18085 +{
18086 + atomic64_xchg_unchecked(ptr, new_val);
18087 +}
18088 +EXPORT_SYMBOL(atomic64_set_unchecked);
18089 +
18090 +/**
18091 * atomic64_add_return - add and return
18092 * @delta: integer value to add
18093 * @ptr: pointer to type atomic64_t
18094 @@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 del
18095 }
18096 EXPORT_SYMBOL(atomic64_add_return);
18097
18098 +/**
18099 + * atomic64_add_return_unchecked - add and return
18100 + * @delta: integer value to add
18101 + * @ptr: pointer to type atomic64_unchecked_t
18102 + *
18103 + * Atomically adds @delta to @ptr and returns @delta + *@ptr
18104 + */
18105 +noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18106 +{
18107 + /*
18108 + * Try first with a (possibly incorrect) assumption about
18109 + * what we have there. We'll do two loops most likely,
18110 + * but we'll get an ownership MESI transaction straight away
18111 + * instead of a read transaction followed by a
18112 + * flush-for-ownership transaction:
18113 + */
18114 + u64 old_val, new_val, real_val = 0;
18115 +
18116 + do {
18117 + old_val = real_val;
18118 + new_val = old_val + delta;
18119 +
18120 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18121 +
18122 + } while (real_val != old_val);
18123 +
18124 + return new_val;
18125 +}
18126 +EXPORT_SYMBOL(atomic64_add_return_unchecked);
18127 +
18128 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
18129 {
18130 return atomic64_add_return(-delta, ptr);
18131 }
18132 EXPORT_SYMBOL(atomic64_sub_return);
18133
18134 +u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18135 +{
18136 + return atomic64_add_return_unchecked(-delta, ptr);
18137 +}
18138 +EXPORT_SYMBOL(atomic64_sub_return_unchecked);
18139 +
18140 u64 atomic64_inc_return(atomic64_t *ptr)
18141 {
18142 return atomic64_add_return(1, ptr);
18143 }
18144 EXPORT_SYMBOL(atomic64_inc_return);
18145
18146 +u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
18147 +{
18148 + return atomic64_add_return_unchecked(1, ptr);
18149 +}
18150 +EXPORT_SYMBOL(atomic64_inc_return_unchecked);
18151 +
18152 u64 atomic64_dec_return(atomic64_t *ptr)
18153 {
18154 return atomic64_sub_return(1, ptr);
18155 }
18156 EXPORT_SYMBOL(atomic64_dec_return);
18157
18158 +u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
18159 +{
18160 + return atomic64_sub_return_unchecked(1, ptr);
18161 +}
18162 +EXPORT_SYMBOL(atomic64_dec_return_unchecked);
18163 +
18164 /**
18165 * atomic64_add - add integer to atomic64 variable
18166 * @delta: integer value to add
18167 @@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t
18168 EXPORT_SYMBOL(atomic64_add);
18169
18170 /**
18171 + * atomic64_add_unchecked - add integer to atomic64 variable
18172 + * @delta: integer value to add
18173 + * @ptr: pointer to type atomic64_unchecked_t
18174 + *
18175 + * Atomically adds @delta to @ptr.
18176 + */
18177 +void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18178 +{
18179 + atomic64_add_return_unchecked(delta, ptr);
18180 +}
18181 +EXPORT_SYMBOL(atomic64_add_unchecked);
18182 +
18183 +/**
18184 * atomic64_sub - subtract the atomic64 variable
18185 * @delta: integer value to subtract
18186 * @ptr: pointer to type atomic64_t
18187 @@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t
18188 EXPORT_SYMBOL(atomic64_sub);
18189
18190 /**
18191 + * atomic64_sub_unchecked - subtract the atomic64 variable
18192 + * @delta: integer value to subtract
18193 + * @ptr: pointer to type atomic64_unchecked_t
18194 + *
18195 + * Atomically subtracts @delta from @ptr.
18196 + */
18197 +void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18198 +{
18199 + atomic64_add_unchecked(-delta, ptr);
18200 +}
18201 +EXPORT_SYMBOL(atomic64_sub_unchecked);
18202 +
18203 +/**
18204 * atomic64_sub_and_test - subtract value from variable and test result
18205 * @delta: integer value to subtract
18206 * @ptr: pointer to type atomic64_t
18207 @@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
18208 EXPORT_SYMBOL(atomic64_inc);
18209
18210 /**
18211 + * atomic64_inc_unchecked - increment atomic64 variable
18212 + * @ptr: pointer to type atomic64_unchecked_t
18213 + *
18214 + * Atomically increments @ptr by 1.
18215 + */
18216 +void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
18217 +{
18218 + atomic64_add_unchecked(1, ptr);
18219 +}
18220 +EXPORT_SYMBOL(atomic64_inc_unchecked);
18221 +
18222 +/**
18223 * atomic64_dec - decrement atomic64 variable
18224 * @ptr: pointer to type atomic64_t
18225 *
18226 @@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
18227 EXPORT_SYMBOL(atomic64_dec);
18228
18229 /**
18230 + * atomic64_dec_unchecked - decrement atomic64 variable
18231 + * @ptr: pointer to type atomic64_unchecked_t
18232 + *
18233 + * Atomically decrements @ptr by 1.
18234 + */
18235 +void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
18236 +{
18237 + atomic64_sub_unchecked(1, ptr);
18238 +}
18239 +EXPORT_SYMBOL(atomic64_dec_unchecked);
18240 +
18241 +/**
18242 * atomic64_dec_and_test - decrement and test
18243 * @ptr: pointer to type atomic64_t
18244 *
18245 diff -urNp linux-2.6.32.42/arch/x86/lib/checksum_32.S linux-2.6.32.42/arch/x86/lib/checksum_32.S
18246 --- linux-2.6.32.42/arch/x86/lib/checksum_32.S 2011-03-27 14:31:47.000000000 -0400
18247 +++ linux-2.6.32.42/arch/x86/lib/checksum_32.S 2011-04-17 15:56:46.000000000 -0400
18248 @@ -28,7 +28,8 @@
18249 #include <linux/linkage.h>
18250 #include <asm/dwarf2.h>
18251 #include <asm/errno.h>
18252 -
18253 +#include <asm/segment.h>
18254 +
18255 /*
18256 * computes a partial checksum, e.g. for TCP/UDP fragments
18257 */
18258 @@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (
18259
18260 #define ARGBASE 16
18261 #define FP 12
18262 -
18263 -ENTRY(csum_partial_copy_generic)
18264 +
18265 +ENTRY(csum_partial_copy_generic_to_user)
18266 CFI_STARTPROC
18267 +
18268 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18269 + pushl %gs
18270 + CFI_ADJUST_CFA_OFFSET 4
18271 + popl %es
18272 + CFI_ADJUST_CFA_OFFSET -4
18273 + jmp csum_partial_copy_generic
18274 +#endif
18275 +
18276 +ENTRY(csum_partial_copy_generic_from_user)
18277 +
18278 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18279 + pushl %gs
18280 + CFI_ADJUST_CFA_OFFSET 4
18281 + popl %ds
18282 + CFI_ADJUST_CFA_OFFSET -4
18283 +#endif
18284 +
18285 +ENTRY(csum_partial_copy_generic)
18286 subl $4,%esp
18287 CFI_ADJUST_CFA_OFFSET 4
18288 pushl %edi
18289 @@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
18290 jmp 4f
18291 SRC(1: movw (%esi), %bx )
18292 addl $2, %esi
18293 -DST( movw %bx, (%edi) )
18294 +DST( movw %bx, %es:(%edi) )
18295 addl $2, %edi
18296 addw %bx, %ax
18297 adcl $0, %eax
18298 @@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
18299 SRC(1: movl (%esi), %ebx )
18300 SRC( movl 4(%esi), %edx )
18301 adcl %ebx, %eax
18302 -DST( movl %ebx, (%edi) )
18303 +DST( movl %ebx, %es:(%edi) )
18304 adcl %edx, %eax
18305 -DST( movl %edx, 4(%edi) )
18306 +DST( movl %edx, %es:4(%edi) )
18307
18308 SRC( movl 8(%esi), %ebx )
18309 SRC( movl 12(%esi), %edx )
18310 adcl %ebx, %eax
18311 -DST( movl %ebx, 8(%edi) )
18312 +DST( movl %ebx, %es:8(%edi) )
18313 adcl %edx, %eax
18314 -DST( movl %edx, 12(%edi) )
18315 +DST( movl %edx, %es:12(%edi) )
18316
18317 SRC( movl 16(%esi), %ebx )
18318 SRC( movl 20(%esi), %edx )
18319 adcl %ebx, %eax
18320 -DST( movl %ebx, 16(%edi) )
18321 +DST( movl %ebx, %es:16(%edi) )
18322 adcl %edx, %eax
18323 -DST( movl %edx, 20(%edi) )
18324 +DST( movl %edx, %es:20(%edi) )
18325
18326 SRC( movl 24(%esi), %ebx )
18327 SRC( movl 28(%esi), %edx )
18328 adcl %ebx, %eax
18329 -DST( movl %ebx, 24(%edi) )
18330 +DST( movl %ebx, %es:24(%edi) )
18331 adcl %edx, %eax
18332 -DST( movl %edx, 28(%edi) )
18333 +DST( movl %edx, %es:28(%edi) )
18334
18335 lea 32(%esi), %esi
18336 lea 32(%edi), %edi
18337 @@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
18338 shrl $2, %edx # This clears CF
18339 SRC(3: movl (%esi), %ebx )
18340 adcl %ebx, %eax
18341 -DST( movl %ebx, (%edi) )
18342 +DST( movl %ebx, %es:(%edi) )
18343 lea 4(%esi), %esi
18344 lea 4(%edi), %edi
18345 dec %edx
18346 @@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
18347 jb 5f
18348 SRC( movw (%esi), %cx )
18349 leal 2(%esi), %esi
18350 -DST( movw %cx, (%edi) )
18351 +DST( movw %cx, %es:(%edi) )
18352 leal 2(%edi), %edi
18353 je 6f
18354 shll $16,%ecx
18355 SRC(5: movb (%esi), %cl )
18356 -DST( movb %cl, (%edi) )
18357 +DST( movb %cl, %es:(%edi) )
18358 6: addl %ecx, %eax
18359 adcl $0, %eax
18360 7:
18361 @@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
18362
18363 6001:
18364 movl ARGBASE+20(%esp), %ebx # src_err_ptr
18365 - movl $-EFAULT, (%ebx)
18366 + movl $-EFAULT, %ss:(%ebx)
18367
18368 # zero the complete destination - computing the rest
18369 # is too much work
18370 @@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
18371
18372 6002:
18373 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18374 - movl $-EFAULT,(%ebx)
18375 + movl $-EFAULT,%ss:(%ebx)
18376 jmp 5000b
18377
18378 .previous
18379
18380 + pushl %ss
18381 + CFI_ADJUST_CFA_OFFSET 4
18382 + popl %ds
18383 + CFI_ADJUST_CFA_OFFSET -4
18384 + pushl %ss
18385 + CFI_ADJUST_CFA_OFFSET 4
18386 + popl %es
18387 + CFI_ADJUST_CFA_OFFSET -4
18388 popl %ebx
18389 CFI_ADJUST_CFA_OFFSET -4
18390 CFI_RESTORE ebx
18391 @@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
18392 CFI_ADJUST_CFA_OFFSET -4
18393 ret
18394 CFI_ENDPROC
18395 -ENDPROC(csum_partial_copy_generic)
18396 +ENDPROC(csum_partial_copy_generic_to_user)
18397
18398 #else
18399
18400 /* Version for PentiumII/PPro */
18401
18402 #define ROUND1(x) \
18403 + nop; nop; nop; \
18404 SRC(movl x(%esi), %ebx ) ; \
18405 addl %ebx, %eax ; \
18406 - DST(movl %ebx, x(%edi) ) ;
18407 + DST(movl %ebx, %es:x(%edi)) ;
18408
18409 #define ROUND(x) \
18410 + nop; nop; nop; \
18411 SRC(movl x(%esi), %ebx ) ; \
18412 adcl %ebx, %eax ; \
18413 - DST(movl %ebx, x(%edi) ) ;
18414 + DST(movl %ebx, %es:x(%edi)) ;
18415
18416 #define ARGBASE 12
18417 -
18418 -ENTRY(csum_partial_copy_generic)
18419 +
18420 +ENTRY(csum_partial_copy_generic_to_user)
18421 CFI_STARTPROC
18422 +
18423 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18424 + pushl %gs
18425 + CFI_ADJUST_CFA_OFFSET 4
18426 + popl %es
18427 + CFI_ADJUST_CFA_OFFSET -4
18428 + jmp csum_partial_copy_generic
18429 +#endif
18430 +
18431 +ENTRY(csum_partial_copy_generic_from_user)
18432 +
18433 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18434 + pushl %gs
18435 + CFI_ADJUST_CFA_OFFSET 4
18436 + popl %ds
18437 + CFI_ADJUST_CFA_OFFSET -4
18438 +#endif
18439 +
18440 +ENTRY(csum_partial_copy_generic)
18441 pushl %ebx
18442 CFI_ADJUST_CFA_OFFSET 4
18443 CFI_REL_OFFSET ebx, 0
18444 @@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
18445 subl %ebx, %edi
18446 lea -1(%esi),%edx
18447 andl $-32,%edx
18448 - lea 3f(%ebx,%ebx), %ebx
18449 + lea 3f(%ebx,%ebx,2), %ebx
18450 testl %esi, %esi
18451 jmp *%ebx
18452 1: addl $64,%esi
18453 @@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
18454 jb 5f
18455 SRC( movw (%esi), %dx )
18456 leal 2(%esi), %esi
18457 -DST( movw %dx, (%edi) )
18458 +DST( movw %dx, %es:(%edi) )
18459 leal 2(%edi), %edi
18460 je 6f
18461 shll $16,%edx
18462 5:
18463 SRC( movb (%esi), %dl )
18464 -DST( movb %dl, (%edi) )
18465 +DST( movb %dl, %es:(%edi) )
18466 6: addl %edx, %eax
18467 adcl $0, %eax
18468 7:
18469 .section .fixup, "ax"
18470 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
18471 - movl $-EFAULT, (%ebx)
18472 + movl $-EFAULT, %ss:(%ebx)
18473 # zero the complete destination (computing the rest is too much work)
18474 movl ARGBASE+8(%esp),%edi # dst
18475 movl ARGBASE+12(%esp),%ecx # len
18476 @@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
18477 rep; stosb
18478 jmp 7b
18479 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18480 - movl $-EFAULT, (%ebx)
18481 + movl $-EFAULT, %ss:(%ebx)
18482 jmp 7b
18483 .previous
18484
18485 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18486 + pushl %ss
18487 + CFI_ADJUST_CFA_OFFSET 4
18488 + popl %ds
18489 + CFI_ADJUST_CFA_OFFSET -4
18490 + pushl %ss
18491 + CFI_ADJUST_CFA_OFFSET 4
18492 + popl %es
18493 + CFI_ADJUST_CFA_OFFSET -4
18494 +#endif
18495 +
18496 popl %esi
18497 CFI_ADJUST_CFA_OFFSET -4
18498 CFI_RESTORE esi
18499 @@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
18500 CFI_RESTORE ebx
18501 ret
18502 CFI_ENDPROC
18503 -ENDPROC(csum_partial_copy_generic)
18504 +ENDPROC(csum_partial_copy_generic_to_user)
18505
18506 #undef ROUND
18507 #undef ROUND1
18508 diff -urNp linux-2.6.32.42/arch/x86/lib/clear_page_64.S linux-2.6.32.42/arch/x86/lib/clear_page_64.S
18509 --- linux-2.6.32.42/arch/x86/lib/clear_page_64.S 2011-03-27 14:31:47.000000000 -0400
18510 +++ linux-2.6.32.42/arch/x86/lib/clear_page_64.S 2011-04-17 15:56:46.000000000 -0400
18511 @@ -43,7 +43,7 @@ ENDPROC(clear_page)
18512
18513 #include <asm/cpufeature.h>
18514
18515 - .section .altinstr_replacement,"ax"
18516 + .section .altinstr_replacement,"a"
18517 1: .byte 0xeb /* jmp <disp8> */
18518 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
18519 2:
18520 diff -urNp linux-2.6.32.42/arch/x86/lib/copy_page_64.S linux-2.6.32.42/arch/x86/lib/copy_page_64.S
18521 --- linux-2.6.32.42/arch/x86/lib/copy_page_64.S 2011-03-27 14:31:47.000000000 -0400
18522 +++ linux-2.6.32.42/arch/x86/lib/copy_page_64.S 2011-04-17 15:56:46.000000000 -0400
18523 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
18524
18525 #include <asm/cpufeature.h>
18526
18527 - .section .altinstr_replacement,"ax"
18528 + .section .altinstr_replacement,"a"
18529 1: .byte 0xeb /* jmp <disp8> */
18530 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
18531 2:
18532 diff -urNp linux-2.6.32.42/arch/x86/lib/copy_user_64.S linux-2.6.32.42/arch/x86/lib/copy_user_64.S
18533 --- linux-2.6.32.42/arch/x86/lib/copy_user_64.S 2011-06-25 12:55:34.000000000 -0400
18534 +++ linux-2.6.32.42/arch/x86/lib/copy_user_64.S 2011-06-25 12:56:37.000000000 -0400
18535 @@ -15,13 +15,14 @@
18536 #include <asm/asm-offsets.h>
18537 #include <asm/thread_info.h>
18538 #include <asm/cpufeature.h>
18539 +#include <asm/pgtable.h>
18540
18541 .macro ALTERNATIVE_JUMP feature,orig,alt
18542 0:
18543 .byte 0xe9 /* 32bit jump */
18544 .long \orig-1f /* by default jump to orig */
18545 1:
18546 - .section .altinstr_replacement,"ax"
18547 + .section .altinstr_replacement,"a"
18548 2: .byte 0xe9 /* near jump with 32bit immediate */
18549 .long \alt-1b /* offset */ /* or alternatively to alt */
18550 .previous
18551 @@ -64,49 +65,19 @@
18552 #endif
18553 .endm
18554
18555 -/* Standard copy_to_user with segment limit checking */
18556 -ENTRY(copy_to_user)
18557 - CFI_STARTPROC
18558 - GET_THREAD_INFO(%rax)
18559 - movq %rdi,%rcx
18560 - addq %rdx,%rcx
18561 - jc bad_to_user
18562 - cmpq TI_addr_limit(%rax),%rcx
18563 - ja bad_to_user
18564 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18565 - CFI_ENDPROC
18566 -ENDPROC(copy_to_user)
18567 -
18568 -/* Standard copy_from_user with segment limit checking */
18569 -ENTRY(copy_from_user)
18570 - CFI_STARTPROC
18571 - GET_THREAD_INFO(%rax)
18572 - movq %rsi,%rcx
18573 - addq %rdx,%rcx
18574 - jc bad_from_user
18575 - cmpq TI_addr_limit(%rax),%rcx
18576 - ja bad_from_user
18577 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18578 - CFI_ENDPROC
18579 -ENDPROC(copy_from_user)
18580 -
18581 ENTRY(copy_user_generic)
18582 CFI_STARTPROC
18583 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18584 CFI_ENDPROC
18585 ENDPROC(copy_user_generic)
18586
18587 -ENTRY(__copy_from_user_inatomic)
18588 - CFI_STARTPROC
18589 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18590 - CFI_ENDPROC
18591 -ENDPROC(__copy_from_user_inatomic)
18592 -
18593 .section .fixup,"ax"
18594 /* must zero dest */
18595 ENTRY(bad_from_user)
18596 bad_from_user:
18597 CFI_STARTPROC
18598 + testl %edx,%edx
18599 + js bad_to_user
18600 movl %edx,%ecx
18601 xorl %eax,%eax
18602 rep
18603 diff -urNp linux-2.6.32.42/arch/x86/lib/copy_user_nocache_64.S linux-2.6.32.42/arch/x86/lib/copy_user_nocache_64.S
18604 --- linux-2.6.32.42/arch/x86/lib/copy_user_nocache_64.S 2011-03-27 14:31:47.000000000 -0400
18605 +++ linux-2.6.32.42/arch/x86/lib/copy_user_nocache_64.S 2011-04-17 15:56:46.000000000 -0400
18606 @@ -14,6 +14,7 @@
18607 #include <asm/current.h>
18608 #include <asm/asm-offsets.h>
18609 #include <asm/thread_info.h>
18610 +#include <asm/pgtable.h>
18611
18612 .macro ALIGN_DESTINATION
18613 #ifdef FIX_ALIGNMENT
18614 @@ -50,6 +51,15 @@
18615 */
18616 ENTRY(__copy_user_nocache)
18617 CFI_STARTPROC
18618 +
18619 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18620 + mov $PAX_USER_SHADOW_BASE,%rcx
18621 + cmp %rcx,%rsi
18622 + jae 1f
18623 + add %rcx,%rsi
18624 +1:
18625 +#endif
18626 +
18627 cmpl $8,%edx
18628 jb 20f /* less then 8 bytes, go to byte copy loop */
18629 ALIGN_DESTINATION
18630 diff -urNp linux-2.6.32.42/arch/x86/lib/csum-wrappers_64.c linux-2.6.32.42/arch/x86/lib/csum-wrappers_64.c
18631 --- linux-2.6.32.42/arch/x86/lib/csum-wrappers_64.c 2011-03-27 14:31:47.000000000 -0400
18632 +++ linux-2.6.32.42/arch/x86/lib/csum-wrappers_64.c 2011-05-04 17:56:20.000000000 -0400
18633 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
18634 len -= 2;
18635 }
18636 }
18637 +
18638 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18639 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18640 + src += PAX_USER_SHADOW_BASE;
18641 +#endif
18642 +
18643 isum = csum_partial_copy_generic((__force const void *)src,
18644 dst, len, isum, errp, NULL);
18645 if (unlikely(*errp))
18646 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
18647 }
18648
18649 *errp = 0;
18650 +
18651 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18652 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
18653 + dst += PAX_USER_SHADOW_BASE;
18654 +#endif
18655 +
18656 return csum_partial_copy_generic(src, (void __force *)dst,
18657 len, isum, NULL, errp);
18658 }
18659 diff -urNp linux-2.6.32.42/arch/x86/lib/getuser.S linux-2.6.32.42/arch/x86/lib/getuser.S
18660 --- linux-2.6.32.42/arch/x86/lib/getuser.S 2011-03-27 14:31:47.000000000 -0400
18661 +++ linux-2.6.32.42/arch/x86/lib/getuser.S 2011-04-17 15:56:46.000000000 -0400
18662 @@ -33,14 +33,35 @@
18663 #include <asm/asm-offsets.h>
18664 #include <asm/thread_info.h>
18665 #include <asm/asm.h>
18666 +#include <asm/segment.h>
18667 +#include <asm/pgtable.h>
18668 +
18669 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18670 +#define __copyuser_seg gs;
18671 +#else
18672 +#define __copyuser_seg
18673 +#endif
18674
18675 .text
18676 ENTRY(__get_user_1)
18677 CFI_STARTPROC
18678 +
18679 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18680 GET_THREAD_INFO(%_ASM_DX)
18681 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18682 jae bad_get_user
18683 -1: movzb (%_ASM_AX),%edx
18684 +
18685 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18686 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18687 + cmp %_ASM_DX,%_ASM_AX
18688 + jae 1234f
18689 + add %_ASM_DX,%_ASM_AX
18690 +1234:
18691 +#endif
18692 +
18693 +#endif
18694 +
18695 +1: __copyuser_seg movzb (%_ASM_AX),%edx
18696 xor %eax,%eax
18697 ret
18698 CFI_ENDPROC
18699 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
18700 ENTRY(__get_user_2)
18701 CFI_STARTPROC
18702 add $1,%_ASM_AX
18703 +
18704 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18705 jc bad_get_user
18706 GET_THREAD_INFO(%_ASM_DX)
18707 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18708 jae bad_get_user
18709 -2: movzwl -1(%_ASM_AX),%edx
18710 +
18711 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18712 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18713 + cmp %_ASM_DX,%_ASM_AX
18714 + jae 1234f
18715 + add %_ASM_DX,%_ASM_AX
18716 +1234:
18717 +#endif
18718 +
18719 +#endif
18720 +
18721 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
18722 xor %eax,%eax
18723 ret
18724 CFI_ENDPROC
18725 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
18726 ENTRY(__get_user_4)
18727 CFI_STARTPROC
18728 add $3,%_ASM_AX
18729 +
18730 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18731 jc bad_get_user
18732 GET_THREAD_INFO(%_ASM_DX)
18733 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18734 jae bad_get_user
18735 -3: mov -3(%_ASM_AX),%edx
18736 +
18737 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18738 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18739 + cmp %_ASM_DX,%_ASM_AX
18740 + jae 1234f
18741 + add %_ASM_DX,%_ASM_AX
18742 +1234:
18743 +#endif
18744 +
18745 +#endif
18746 +
18747 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
18748 xor %eax,%eax
18749 ret
18750 CFI_ENDPROC
18751 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
18752 GET_THREAD_INFO(%_ASM_DX)
18753 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18754 jae bad_get_user
18755 +
18756 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18757 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18758 + cmp %_ASM_DX,%_ASM_AX
18759 + jae 1234f
18760 + add %_ASM_DX,%_ASM_AX
18761 +1234:
18762 +#endif
18763 +
18764 4: movq -7(%_ASM_AX),%_ASM_DX
18765 xor %eax,%eax
18766 ret
18767 diff -urNp linux-2.6.32.42/arch/x86/lib/memcpy_64.S linux-2.6.32.42/arch/x86/lib/memcpy_64.S
18768 --- linux-2.6.32.42/arch/x86/lib/memcpy_64.S 2011-03-27 14:31:47.000000000 -0400
18769 +++ linux-2.6.32.42/arch/x86/lib/memcpy_64.S 2011-04-17 15:56:46.000000000 -0400
18770 @@ -128,7 +128,7 @@ ENDPROC(__memcpy)
18771 * It is also a lot simpler. Use this when possible:
18772 */
18773
18774 - .section .altinstr_replacement, "ax"
18775 + .section .altinstr_replacement, "a"
18776 1: .byte 0xeb /* jmp <disp8> */
18777 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
18778 2:
18779 diff -urNp linux-2.6.32.42/arch/x86/lib/memset_64.S linux-2.6.32.42/arch/x86/lib/memset_64.S
18780 --- linux-2.6.32.42/arch/x86/lib/memset_64.S 2011-03-27 14:31:47.000000000 -0400
18781 +++ linux-2.6.32.42/arch/x86/lib/memset_64.S 2011-04-17 15:56:46.000000000 -0400
18782 @@ -118,7 +118,7 @@ ENDPROC(__memset)
18783
18784 #include <asm/cpufeature.h>
18785
18786 - .section .altinstr_replacement,"ax"
18787 + .section .altinstr_replacement,"a"
18788 1: .byte 0xeb /* jmp <disp8> */
18789 .byte (memset_c - memset) - (2f - 1b) /* offset */
18790 2:
18791 diff -urNp linux-2.6.32.42/arch/x86/lib/mmx_32.c linux-2.6.32.42/arch/x86/lib/mmx_32.c
18792 --- linux-2.6.32.42/arch/x86/lib/mmx_32.c 2011-03-27 14:31:47.000000000 -0400
18793 +++ linux-2.6.32.42/arch/x86/lib/mmx_32.c 2011-04-17 15:56:46.000000000 -0400
18794 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
18795 {
18796 void *p;
18797 int i;
18798 + unsigned long cr0;
18799
18800 if (unlikely(in_interrupt()))
18801 return __memcpy(to, from, len);
18802 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
18803 kernel_fpu_begin();
18804
18805 __asm__ __volatile__ (
18806 - "1: prefetch (%0)\n" /* This set is 28 bytes */
18807 - " prefetch 64(%0)\n"
18808 - " prefetch 128(%0)\n"
18809 - " prefetch 192(%0)\n"
18810 - " prefetch 256(%0)\n"
18811 + "1: prefetch (%1)\n" /* This set is 28 bytes */
18812 + " prefetch 64(%1)\n"
18813 + " prefetch 128(%1)\n"
18814 + " prefetch 192(%1)\n"
18815 + " prefetch 256(%1)\n"
18816 "2: \n"
18817 ".section .fixup, \"ax\"\n"
18818 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18819 + "3: \n"
18820 +
18821 +#ifdef CONFIG_PAX_KERNEXEC
18822 + " movl %%cr0, %0\n"
18823 + " movl %0, %%eax\n"
18824 + " andl $0xFFFEFFFF, %%eax\n"
18825 + " movl %%eax, %%cr0\n"
18826 +#endif
18827 +
18828 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18829 +
18830 +#ifdef CONFIG_PAX_KERNEXEC
18831 + " movl %0, %%cr0\n"
18832 +#endif
18833 +
18834 " jmp 2b\n"
18835 ".previous\n"
18836 _ASM_EXTABLE(1b, 3b)
18837 - : : "r" (from));
18838 + : "=&r" (cr0) : "r" (from) : "ax");
18839
18840 for ( ; i > 5; i--) {
18841 __asm__ __volatile__ (
18842 - "1: prefetch 320(%0)\n"
18843 - "2: movq (%0), %%mm0\n"
18844 - " movq 8(%0), %%mm1\n"
18845 - " movq 16(%0), %%mm2\n"
18846 - " movq 24(%0), %%mm3\n"
18847 - " movq %%mm0, (%1)\n"
18848 - " movq %%mm1, 8(%1)\n"
18849 - " movq %%mm2, 16(%1)\n"
18850 - " movq %%mm3, 24(%1)\n"
18851 - " movq 32(%0), %%mm0\n"
18852 - " movq 40(%0), %%mm1\n"
18853 - " movq 48(%0), %%mm2\n"
18854 - " movq 56(%0), %%mm3\n"
18855 - " movq %%mm0, 32(%1)\n"
18856 - " movq %%mm1, 40(%1)\n"
18857 - " movq %%mm2, 48(%1)\n"
18858 - " movq %%mm3, 56(%1)\n"
18859 + "1: prefetch 320(%1)\n"
18860 + "2: movq (%1), %%mm0\n"
18861 + " movq 8(%1), %%mm1\n"
18862 + " movq 16(%1), %%mm2\n"
18863 + " movq 24(%1), %%mm3\n"
18864 + " movq %%mm0, (%2)\n"
18865 + " movq %%mm1, 8(%2)\n"
18866 + " movq %%mm2, 16(%2)\n"
18867 + " movq %%mm3, 24(%2)\n"
18868 + " movq 32(%1), %%mm0\n"
18869 + " movq 40(%1), %%mm1\n"
18870 + " movq 48(%1), %%mm2\n"
18871 + " movq 56(%1), %%mm3\n"
18872 + " movq %%mm0, 32(%2)\n"
18873 + " movq %%mm1, 40(%2)\n"
18874 + " movq %%mm2, 48(%2)\n"
18875 + " movq %%mm3, 56(%2)\n"
18876 ".section .fixup, \"ax\"\n"
18877 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18878 + "3:\n"
18879 +
18880 +#ifdef CONFIG_PAX_KERNEXEC
18881 + " movl %%cr0, %0\n"
18882 + " movl %0, %%eax\n"
18883 + " andl $0xFFFEFFFF, %%eax\n"
18884 + " movl %%eax, %%cr0\n"
18885 +#endif
18886 +
18887 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18888 +
18889 +#ifdef CONFIG_PAX_KERNEXEC
18890 + " movl %0, %%cr0\n"
18891 +#endif
18892 +
18893 " jmp 2b\n"
18894 ".previous\n"
18895 _ASM_EXTABLE(1b, 3b)
18896 - : : "r" (from), "r" (to) : "memory");
18897 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18898
18899 from += 64;
18900 to += 64;
18901 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
18902 static void fast_copy_page(void *to, void *from)
18903 {
18904 int i;
18905 + unsigned long cr0;
18906
18907 kernel_fpu_begin();
18908
18909 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
18910 * but that is for later. -AV
18911 */
18912 __asm__ __volatile__(
18913 - "1: prefetch (%0)\n"
18914 - " prefetch 64(%0)\n"
18915 - " prefetch 128(%0)\n"
18916 - " prefetch 192(%0)\n"
18917 - " prefetch 256(%0)\n"
18918 + "1: prefetch (%1)\n"
18919 + " prefetch 64(%1)\n"
18920 + " prefetch 128(%1)\n"
18921 + " prefetch 192(%1)\n"
18922 + " prefetch 256(%1)\n"
18923 "2: \n"
18924 ".section .fixup, \"ax\"\n"
18925 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18926 + "3: \n"
18927 +
18928 +#ifdef CONFIG_PAX_KERNEXEC
18929 + " movl %%cr0, %0\n"
18930 + " movl %0, %%eax\n"
18931 + " andl $0xFFFEFFFF, %%eax\n"
18932 + " movl %%eax, %%cr0\n"
18933 +#endif
18934 +
18935 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18936 +
18937 +#ifdef CONFIG_PAX_KERNEXEC
18938 + " movl %0, %%cr0\n"
18939 +#endif
18940 +
18941 " jmp 2b\n"
18942 ".previous\n"
18943 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
18944 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18945
18946 for (i = 0; i < (4096-320)/64; i++) {
18947 __asm__ __volatile__ (
18948 - "1: prefetch 320(%0)\n"
18949 - "2: movq (%0), %%mm0\n"
18950 - " movntq %%mm0, (%1)\n"
18951 - " movq 8(%0), %%mm1\n"
18952 - " movntq %%mm1, 8(%1)\n"
18953 - " movq 16(%0), %%mm2\n"
18954 - " movntq %%mm2, 16(%1)\n"
18955 - " movq 24(%0), %%mm3\n"
18956 - " movntq %%mm3, 24(%1)\n"
18957 - " movq 32(%0), %%mm4\n"
18958 - " movntq %%mm4, 32(%1)\n"
18959 - " movq 40(%0), %%mm5\n"
18960 - " movntq %%mm5, 40(%1)\n"
18961 - " movq 48(%0), %%mm6\n"
18962 - " movntq %%mm6, 48(%1)\n"
18963 - " movq 56(%0), %%mm7\n"
18964 - " movntq %%mm7, 56(%1)\n"
18965 + "1: prefetch 320(%1)\n"
18966 + "2: movq (%1), %%mm0\n"
18967 + " movntq %%mm0, (%2)\n"
18968 + " movq 8(%1), %%mm1\n"
18969 + " movntq %%mm1, 8(%2)\n"
18970 + " movq 16(%1), %%mm2\n"
18971 + " movntq %%mm2, 16(%2)\n"
18972 + " movq 24(%1), %%mm3\n"
18973 + " movntq %%mm3, 24(%2)\n"
18974 + " movq 32(%1), %%mm4\n"
18975 + " movntq %%mm4, 32(%2)\n"
18976 + " movq 40(%1), %%mm5\n"
18977 + " movntq %%mm5, 40(%2)\n"
18978 + " movq 48(%1), %%mm6\n"
18979 + " movntq %%mm6, 48(%2)\n"
18980 + " movq 56(%1), %%mm7\n"
18981 + " movntq %%mm7, 56(%2)\n"
18982 ".section .fixup, \"ax\"\n"
18983 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18984 + "3:\n"
18985 +
18986 +#ifdef CONFIG_PAX_KERNEXEC
18987 + " movl %%cr0, %0\n"
18988 + " movl %0, %%eax\n"
18989 + " andl $0xFFFEFFFF, %%eax\n"
18990 + " movl %%eax, %%cr0\n"
18991 +#endif
18992 +
18993 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18994 +
18995 +#ifdef CONFIG_PAX_KERNEXEC
18996 + " movl %0, %%cr0\n"
18997 +#endif
18998 +
18999 " jmp 2b\n"
19000 ".previous\n"
19001 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
19002 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19003
19004 from += 64;
19005 to += 64;
19006 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
19007 static void fast_copy_page(void *to, void *from)
19008 {
19009 int i;
19010 + unsigned long cr0;
19011
19012 kernel_fpu_begin();
19013
19014 __asm__ __volatile__ (
19015 - "1: prefetch (%0)\n"
19016 - " prefetch 64(%0)\n"
19017 - " prefetch 128(%0)\n"
19018 - " prefetch 192(%0)\n"
19019 - " prefetch 256(%0)\n"
19020 + "1: prefetch (%1)\n"
19021 + " prefetch 64(%1)\n"
19022 + " prefetch 128(%1)\n"
19023 + " prefetch 192(%1)\n"
19024 + " prefetch 256(%1)\n"
19025 "2: \n"
19026 ".section .fixup, \"ax\"\n"
19027 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19028 + "3: \n"
19029 +
19030 +#ifdef CONFIG_PAX_KERNEXEC
19031 + " movl %%cr0, %0\n"
19032 + " movl %0, %%eax\n"
19033 + " andl $0xFFFEFFFF, %%eax\n"
19034 + " movl %%eax, %%cr0\n"
19035 +#endif
19036 +
19037 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19038 +
19039 +#ifdef CONFIG_PAX_KERNEXEC
19040 + " movl %0, %%cr0\n"
19041 +#endif
19042 +
19043 " jmp 2b\n"
19044 ".previous\n"
19045 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
19046 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19047
19048 for (i = 0; i < 4096/64; i++) {
19049 __asm__ __volatile__ (
19050 - "1: prefetch 320(%0)\n"
19051 - "2: movq (%0), %%mm0\n"
19052 - " movq 8(%0), %%mm1\n"
19053 - " movq 16(%0), %%mm2\n"
19054 - " movq 24(%0), %%mm3\n"
19055 - " movq %%mm0, (%1)\n"
19056 - " movq %%mm1, 8(%1)\n"
19057 - " movq %%mm2, 16(%1)\n"
19058 - " movq %%mm3, 24(%1)\n"
19059 - " movq 32(%0), %%mm0\n"
19060 - " movq 40(%0), %%mm1\n"
19061 - " movq 48(%0), %%mm2\n"
19062 - " movq 56(%0), %%mm3\n"
19063 - " movq %%mm0, 32(%1)\n"
19064 - " movq %%mm1, 40(%1)\n"
19065 - " movq %%mm2, 48(%1)\n"
19066 - " movq %%mm3, 56(%1)\n"
19067 + "1: prefetch 320(%1)\n"
19068 + "2: movq (%1), %%mm0\n"
19069 + " movq 8(%1), %%mm1\n"
19070 + " movq 16(%1), %%mm2\n"
19071 + " movq 24(%1), %%mm3\n"
19072 + " movq %%mm0, (%2)\n"
19073 + " movq %%mm1, 8(%2)\n"
19074 + " movq %%mm2, 16(%2)\n"
19075 + " movq %%mm3, 24(%2)\n"
19076 + " movq 32(%1), %%mm0\n"
19077 + " movq 40(%1), %%mm1\n"
19078 + " movq 48(%1), %%mm2\n"
19079 + " movq 56(%1), %%mm3\n"
19080 + " movq %%mm0, 32(%2)\n"
19081 + " movq %%mm1, 40(%2)\n"
19082 + " movq %%mm2, 48(%2)\n"
19083 + " movq %%mm3, 56(%2)\n"
19084 ".section .fixup, \"ax\"\n"
19085 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19086 + "3:\n"
19087 +
19088 +#ifdef CONFIG_PAX_KERNEXEC
19089 + " movl %%cr0, %0\n"
19090 + " movl %0, %%eax\n"
19091 + " andl $0xFFFEFFFF, %%eax\n"
19092 + " movl %%eax, %%cr0\n"
19093 +#endif
19094 +
19095 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19096 +
19097 +#ifdef CONFIG_PAX_KERNEXEC
19098 + " movl %0, %%cr0\n"
19099 +#endif
19100 +
19101 " jmp 2b\n"
19102 ".previous\n"
19103 _ASM_EXTABLE(1b, 3b)
19104 - : : "r" (from), "r" (to) : "memory");
19105 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19106
19107 from += 64;
19108 to += 64;
19109 diff -urNp linux-2.6.32.42/arch/x86/lib/putuser.S linux-2.6.32.42/arch/x86/lib/putuser.S
19110 --- linux-2.6.32.42/arch/x86/lib/putuser.S 2011-03-27 14:31:47.000000000 -0400
19111 +++ linux-2.6.32.42/arch/x86/lib/putuser.S 2011-04-17 15:56:46.000000000 -0400
19112 @@ -15,7 +15,8 @@
19113 #include <asm/thread_info.h>
19114 #include <asm/errno.h>
19115 #include <asm/asm.h>
19116 -
19117 +#include <asm/segment.h>
19118 +#include <asm/pgtable.h>
19119
19120 /*
19121 * __put_user_X
19122 @@ -29,52 +30,119 @@
19123 * as they get called from within inline assembly.
19124 */
19125
19126 -#define ENTER CFI_STARTPROC ; \
19127 - GET_THREAD_INFO(%_ASM_BX)
19128 +#define ENTER CFI_STARTPROC
19129 #define EXIT ret ; \
19130 CFI_ENDPROC
19131
19132 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19133 +#define _DEST %_ASM_CX,%_ASM_BX
19134 +#else
19135 +#define _DEST %_ASM_CX
19136 +#endif
19137 +
19138 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19139 +#define __copyuser_seg gs;
19140 +#else
19141 +#define __copyuser_seg
19142 +#endif
19143 +
19144 .text
19145 ENTRY(__put_user_1)
19146 ENTER
19147 +
19148 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19149 + GET_THREAD_INFO(%_ASM_BX)
19150 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
19151 jae bad_put_user
19152 -1: movb %al,(%_ASM_CX)
19153 +
19154 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19155 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19156 + cmp %_ASM_BX,%_ASM_CX
19157 + jb 1234f
19158 + xor %ebx,%ebx
19159 +1234:
19160 +#endif
19161 +
19162 +#endif
19163 +
19164 +1: __copyuser_seg movb %al,(_DEST)
19165 xor %eax,%eax
19166 EXIT
19167 ENDPROC(__put_user_1)
19168
19169 ENTRY(__put_user_2)
19170 ENTER
19171 +
19172 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19173 + GET_THREAD_INFO(%_ASM_BX)
19174 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19175 sub $1,%_ASM_BX
19176 cmp %_ASM_BX,%_ASM_CX
19177 jae bad_put_user
19178 -2: movw %ax,(%_ASM_CX)
19179 +
19180 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19181 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19182 + cmp %_ASM_BX,%_ASM_CX
19183 + jb 1234f
19184 + xor %ebx,%ebx
19185 +1234:
19186 +#endif
19187 +
19188 +#endif
19189 +
19190 +2: __copyuser_seg movw %ax,(_DEST)
19191 xor %eax,%eax
19192 EXIT
19193 ENDPROC(__put_user_2)
19194
19195 ENTRY(__put_user_4)
19196 ENTER
19197 +
19198 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19199 + GET_THREAD_INFO(%_ASM_BX)
19200 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19201 sub $3,%_ASM_BX
19202 cmp %_ASM_BX,%_ASM_CX
19203 jae bad_put_user
19204 -3: movl %eax,(%_ASM_CX)
19205 +
19206 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19207 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19208 + cmp %_ASM_BX,%_ASM_CX
19209 + jb 1234f
19210 + xor %ebx,%ebx
19211 +1234:
19212 +#endif
19213 +
19214 +#endif
19215 +
19216 +3: __copyuser_seg movl %eax,(_DEST)
19217 xor %eax,%eax
19218 EXIT
19219 ENDPROC(__put_user_4)
19220
19221 ENTRY(__put_user_8)
19222 ENTER
19223 +
19224 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19225 + GET_THREAD_INFO(%_ASM_BX)
19226 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19227 sub $7,%_ASM_BX
19228 cmp %_ASM_BX,%_ASM_CX
19229 jae bad_put_user
19230 -4: mov %_ASM_AX,(%_ASM_CX)
19231 +
19232 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19233 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19234 + cmp %_ASM_BX,%_ASM_CX
19235 + jb 1234f
19236 + xor %ebx,%ebx
19237 +1234:
19238 +#endif
19239 +
19240 +#endif
19241 +
19242 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
19243 #ifdef CONFIG_X86_32
19244 -5: movl %edx,4(%_ASM_CX)
19245 +5: __copyuser_seg movl %edx,4(_DEST)
19246 #endif
19247 xor %eax,%eax
19248 EXIT
19249 diff -urNp linux-2.6.32.42/arch/x86/lib/usercopy_32.c linux-2.6.32.42/arch/x86/lib/usercopy_32.c
19250 --- linux-2.6.32.42/arch/x86/lib/usercopy_32.c 2011-03-27 14:31:47.000000000 -0400
19251 +++ linux-2.6.32.42/arch/x86/lib/usercopy_32.c 2011-04-23 21:12:28.000000000 -0400
19252 @@ -43,7 +43,7 @@ do { \
19253 __asm__ __volatile__( \
19254 " testl %1,%1\n" \
19255 " jz 2f\n" \
19256 - "0: lodsb\n" \
19257 + "0: "__copyuser_seg"lodsb\n" \
19258 " stosb\n" \
19259 " testb %%al,%%al\n" \
19260 " jz 1f\n" \
19261 @@ -128,10 +128,12 @@ do { \
19262 int __d0; \
19263 might_fault(); \
19264 __asm__ __volatile__( \
19265 + __COPYUSER_SET_ES \
19266 "0: rep; stosl\n" \
19267 " movl %2,%0\n" \
19268 "1: rep; stosb\n" \
19269 "2:\n" \
19270 + __COPYUSER_RESTORE_ES \
19271 ".section .fixup,\"ax\"\n" \
19272 "3: lea 0(%2,%0,4),%0\n" \
19273 " jmp 2b\n" \
19274 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
19275 might_fault();
19276
19277 __asm__ __volatile__(
19278 + __COPYUSER_SET_ES
19279 " testl %0, %0\n"
19280 " jz 3f\n"
19281 " andl %0,%%ecx\n"
19282 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
19283 " subl %%ecx,%0\n"
19284 " addl %0,%%eax\n"
19285 "1:\n"
19286 + __COPYUSER_RESTORE_ES
19287 ".section .fixup,\"ax\"\n"
19288 "2: xorl %%eax,%%eax\n"
19289 " jmp 1b\n"
19290 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
19291
19292 #ifdef CONFIG_X86_INTEL_USERCOPY
19293 static unsigned long
19294 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
19295 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
19296 {
19297 int d0, d1;
19298 __asm__ __volatile__(
19299 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
19300 " .align 2,0x90\n"
19301 "3: movl 0(%4), %%eax\n"
19302 "4: movl 4(%4), %%edx\n"
19303 - "5: movl %%eax, 0(%3)\n"
19304 - "6: movl %%edx, 4(%3)\n"
19305 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
19306 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
19307 "7: movl 8(%4), %%eax\n"
19308 "8: movl 12(%4),%%edx\n"
19309 - "9: movl %%eax, 8(%3)\n"
19310 - "10: movl %%edx, 12(%3)\n"
19311 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
19312 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
19313 "11: movl 16(%4), %%eax\n"
19314 "12: movl 20(%4), %%edx\n"
19315 - "13: movl %%eax, 16(%3)\n"
19316 - "14: movl %%edx, 20(%3)\n"
19317 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
19318 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
19319 "15: movl 24(%4), %%eax\n"
19320 "16: movl 28(%4), %%edx\n"
19321 - "17: movl %%eax, 24(%3)\n"
19322 - "18: movl %%edx, 28(%3)\n"
19323 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
19324 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
19325 "19: movl 32(%4), %%eax\n"
19326 "20: movl 36(%4), %%edx\n"
19327 - "21: movl %%eax, 32(%3)\n"
19328 - "22: movl %%edx, 36(%3)\n"
19329 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
19330 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
19331 "23: movl 40(%4), %%eax\n"
19332 "24: movl 44(%4), %%edx\n"
19333 - "25: movl %%eax, 40(%3)\n"
19334 - "26: movl %%edx, 44(%3)\n"
19335 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
19336 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
19337 "27: movl 48(%4), %%eax\n"
19338 "28: movl 52(%4), %%edx\n"
19339 - "29: movl %%eax, 48(%3)\n"
19340 - "30: movl %%edx, 52(%3)\n"
19341 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
19342 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
19343 "31: movl 56(%4), %%eax\n"
19344 "32: movl 60(%4), %%edx\n"
19345 - "33: movl %%eax, 56(%3)\n"
19346 - "34: movl %%edx, 60(%3)\n"
19347 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
19348 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
19349 " addl $-64, %0\n"
19350 " addl $64, %4\n"
19351 " addl $64, %3\n"
19352 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
19353 " shrl $2, %0\n"
19354 " andl $3, %%eax\n"
19355 " cld\n"
19356 + __COPYUSER_SET_ES
19357 "99: rep; movsl\n"
19358 "36: movl %%eax, %0\n"
19359 "37: rep; movsb\n"
19360 "100:\n"
19361 + __COPYUSER_RESTORE_ES
19362 + ".section .fixup,\"ax\"\n"
19363 + "101: lea 0(%%eax,%0,4),%0\n"
19364 + " jmp 100b\n"
19365 + ".previous\n"
19366 + ".section __ex_table,\"a\"\n"
19367 + " .align 4\n"
19368 + " .long 1b,100b\n"
19369 + " .long 2b,100b\n"
19370 + " .long 3b,100b\n"
19371 + " .long 4b,100b\n"
19372 + " .long 5b,100b\n"
19373 + " .long 6b,100b\n"
19374 + " .long 7b,100b\n"
19375 + " .long 8b,100b\n"
19376 + " .long 9b,100b\n"
19377 + " .long 10b,100b\n"
19378 + " .long 11b,100b\n"
19379 + " .long 12b,100b\n"
19380 + " .long 13b,100b\n"
19381 + " .long 14b,100b\n"
19382 + " .long 15b,100b\n"
19383 + " .long 16b,100b\n"
19384 + " .long 17b,100b\n"
19385 + " .long 18b,100b\n"
19386 + " .long 19b,100b\n"
19387 + " .long 20b,100b\n"
19388 + " .long 21b,100b\n"
19389 + " .long 22b,100b\n"
19390 + " .long 23b,100b\n"
19391 + " .long 24b,100b\n"
19392 + " .long 25b,100b\n"
19393 + " .long 26b,100b\n"
19394 + " .long 27b,100b\n"
19395 + " .long 28b,100b\n"
19396 + " .long 29b,100b\n"
19397 + " .long 30b,100b\n"
19398 + " .long 31b,100b\n"
19399 + " .long 32b,100b\n"
19400 + " .long 33b,100b\n"
19401 + " .long 34b,100b\n"
19402 + " .long 35b,100b\n"
19403 + " .long 36b,100b\n"
19404 + " .long 37b,100b\n"
19405 + " .long 99b,101b\n"
19406 + ".previous"
19407 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
19408 + : "1"(to), "2"(from), "0"(size)
19409 + : "eax", "edx", "memory");
19410 + return size;
19411 +}
19412 +
19413 +static unsigned long
19414 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
19415 +{
19416 + int d0, d1;
19417 + __asm__ __volatile__(
19418 + " .align 2,0x90\n"
19419 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
19420 + " cmpl $67, %0\n"
19421 + " jbe 3f\n"
19422 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
19423 + " .align 2,0x90\n"
19424 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
19425 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
19426 + "5: movl %%eax, 0(%3)\n"
19427 + "6: movl %%edx, 4(%3)\n"
19428 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
19429 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
19430 + "9: movl %%eax, 8(%3)\n"
19431 + "10: movl %%edx, 12(%3)\n"
19432 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
19433 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
19434 + "13: movl %%eax, 16(%3)\n"
19435 + "14: movl %%edx, 20(%3)\n"
19436 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
19437 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
19438 + "17: movl %%eax, 24(%3)\n"
19439 + "18: movl %%edx, 28(%3)\n"
19440 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
19441 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
19442 + "21: movl %%eax, 32(%3)\n"
19443 + "22: movl %%edx, 36(%3)\n"
19444 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
19445 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
19446 + "25: movl %%eax, 40(%3)\n"
19447 + "26: movl %%edx, 44(%3)\n"
19448 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
19449 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
19450 + "29: movl %%eax, 48(%3)\n"
19451 + "30: movl %%edx, 52(%3)\n"
19452 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
19453 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
19454 + "33: movl %%eax, 56(%3)\n"
19455 + "34: movl %%edx, 60(%3)\n"
19456 + " addl $-64, %0\n"
19457 + " addl $64, %4\n"
19458 + " addl $64, %3\n"
19459 + " cmpl $63, %0\n"
19460 + " ja 1b\n"
19461 + "35: movl %0, %%eax\n"
19462 + " shrl $2, %0\n"
19463 + " andl $3, %%eax\n"
19464 + " cld\n"
19465 + "99: rep; "__copyuser_seg" movsl\n"
19466 + "36: movl %%eax, %0\n"
19467 + "37: rep; "__copyuser_seg" movsb\n"
19468 + "100:\n"
19469 ".section .fixup,\"ax\"\n"
19470 "101: lea 0(%%eax,%0,4),%0\n"
19471 " jmp 100b\n"
19472 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
19473 int d0, d1;
19474 __asm__ __volatile__(
19475 " .align 2,0x90\n"
19476 - "0: movl 32(%4), %%eax\n"
19477 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19478 " cmpl $67, %0\n"
19479 " jbe 2f\n"
19480 - "1: movl 64(%4), %%eax\n"
19481 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19482 " .align 2,0x90\n"
19483 - "2: movl 0(%4), %%eax\n"
19484 - "21: movl 4(%4), %%edx\n"
19485 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19486 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19487 " movl %%eax, 0(%3)\n"
19488 " movl %%edx, 4(%3)\n"
19489 - "3: movl 8(%4), %%eax\n"
19490 - "31: movl 12(%4),%%edx\n"
19491 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19492 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19493 " movl %%eax, 8(%3)\n"
19494 " movl %%edx, 12(%3)\n"
19495 - "4: movl 16(%4), %%eax\n"
19496 - "41: movl 20(%4), %%edx\n"
19497 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19498 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19499 " movl %%eax, 16(%3)\n"
19500 " movl %%edx, 20(%3)\n"
19501 - "10: movl 24(%4), %%eax\n"
19502 - "51: movl 28(%4), %%edx\n"
19503 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19504 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19505 " movl %%eax, 24(%3)\n"
19506 " movl %%edx, 28(%3)\n"
19507 - "11: movl 32(%4), %%eax\n"
19508 - "61: movl 36(%4), %%edx\n"
19509 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19510 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19511 " movl %%eax, 32(%3)\n"
19512 " movl %%edx, 36(%3)\n"
19513 - "12: movl 40(%4), %%eax\n"
19514 - "71: movl 44(%4), %%edx\n"
19515 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19516 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19517 " movl %%eax, 40(%3)\n"
19518 " movl %%edx, 44(%3)\n"
19519 - "13: movl 48(%4), %%eax\n"
19520 - "81: movl 52(%4), %%edx\n"
19521 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19522 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19523 " movl %%eax, 48(%3)\n"
19524 " movl %%edx, 52(%3)\n"
19525 - "14: movl 56(%4), %%eax\n"
19526 - "91: movl 60(%4), %%edx\n"
19527 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19528 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19529 " movl %%eax, 56(%3)\n"
19530 " movl %%edx, 60(%3)\n"
19531 " addl $-64, %0\n"
19532 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
19533 " shrl $2, %0\n"
19534 " andl $3, %%eax\n"
19535 " cld\n"
19536 - "6: rep; movsl\n"
19537 + "6: rep; "__copyuser_seg" movsl\n"
19538 " movl %%eax,%0\n"
19539 - "7: rep; movsb\n"
19540 + "7: rep; "__copyuser_seg" movsb\n"
19541 "8:\n"
19542 ".section .fixup,\"ax\"\n"
19543 "9: lea 0(%%eax,%0,4),%0\n"
19544 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
19545
19546 __asm__ __volatile__(
19547 " .align 2,0x90\n"
19548 - "0: movl 32(%4), %%eax\n"
19549 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19550 " cmpl $67, %0\n"
19551 " jbe 2f\n"
19552 - "1: movl 64(%4), %%eax\n"
19553 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19554 " .align 2,0x90\n"
19555 - "2: movl 0(%4), %%eax\n"
19556 - "21: movl 4(%4), %%edx\n"
19557 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19558 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19559 " movnti %%eax, 0(%3)\n"
19560 " movnti %%edx, 4(%3)\n"
19561 - "3: movl 8(%4), %%eax\n"
19562 - "31: movl 12(%4),%%edx\n"
19563 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19564 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19565 " movnti %%eax, 8(%3)\n"
19566 " movnti %%edx, 12(%3)\n"
19567 - "4: movl 16(%4), %%eax\n"
19568 - "41: movl 20(%4), %%edx\n"
19569 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19570 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19571 " movnti %%eax, 16(%3)\n"
19572 " movnti %%edx, 20(%3)\n"
19573 - "10: movl 24(%4), %%eax\n"
19574 - "51: movl 28(%4), %%edx\n"
19575 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19576 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19577 " movnti %%eax, 24(%3)\n"
19578 " movnti %%edx, 28(%3)\n"
19579 - "11: movl 32(%4), %%eax\n"
19580 - "61: movl 36(%4), %%edx\n"
19581 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19582 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19583 " movnti %%eax, 32(%3)\n"
19584 " movnti %%edx, 36(%3)\n"
19585 - "12: movl 40(%4), %%eax\n"
19586 - "71: movl 44(%4), %%edx\n"
19587 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19588 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19589 " movnti %%eax, 40(%3)\n"
19590 " movnti %%edx, 44(%3)\n"
19591 - "13: movl 48(%4), %%eax\n"
19592 - "81: movl 52(%4), %%edx\n"
19593 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19594 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19595 " movnti %%eax, 48(%3)\n"
19596 " movnti %%edx, 52(%3)\n"
19597 - "14: movl 56(%4), %%eax\n"
19598 - "91: movl 60(%4), %%edx\n"
19599 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19600 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19601 " movnti %%eax, 56(%3)\n"
19602 " movnti %%edx, 60(%3)\n"
19603 " addl $-64, %0\n"
19604 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
19605 " shrl $2, %0\n"
19606 " andl $3, %%eax\n"
19607 " cld\n"
19608 - "6: rep; movsl\n"
19609 + "6: rep; "__copyuser_seg" movsl\n"
19610 " movl %%eax,%0\n"
19611 - "7: rep; movsb\n"
19612 + "7: rep; "__copyuser_seg" movsb\n"
19613 "8:\n"
19614 ".section .fixup,\"ax\"\n"
19615 "9: lea 0(%%eax,%0,4),%0\n"
19616 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
19617
19618 __asm__ __volatile__(
19619 " .align 2,0x90\n"
19620 - "0: movl 32(%4), %%eax\n"
19621 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19622 " cmpl $67, %0\n"
19623 " jbe 2f\n"
19624 - "1: movl 64(%4), %%eax\n"
19625 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19626 " .align 2,0x90\n"
19627 - "2: movl 0(%4), %%eax\n"
19628 - "21: movl 4(%4), %%edx\n"
19629 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19630 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19631 " movnti %%eax, 0(%3)\n"
19632 " movnti %%edx, 4(%3)\n"
19633 - "3: movl 8(%4), %%eax\n"
19634 - "31: movl 12(%4),%%edx\n"
19635 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19636 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19637 " movnti %%eax, 8(%3)\n"
19638 " movnti %%edx, 12(%3)\n"
19639 - "4: movl 16(%4), %%eax\n"
19640 - "41: movl 20(%4), %%edx\n"
19641 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19642 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19643 " movnti %%eax, 16(%3)\n"
19644 " movnti %%edx, 20(%3)\n"
19645 - "10: movl 24(%4), %%eax\n"
19646 - "51: movl 28(%4), %%edx\n"
19647 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19648 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19649 " movnti %%eax, 24(%3)\n"
19650 " movnti %%edx, 28(%3)\n"
19651 - "11: movl 32(%4), %%eax\n"
19652 - "61: movl 36(%4), %%edx\n"
19653 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19654 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19655 " movnti %%eax, 32(%3)\n"
19656 " movnti %%edx, 36(%3)\n"
19657 - "12: movl 40(%4), %%eax\n"
19658 - "71: movl 44(%4), %%edx\n"
19659 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19660 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19661 " movnti %%eax, 40(%3)\n"
19662 " movnti %%edx, 44(%3)\n"
19663 - "13: movl 48(%4), %%eax\n"
19664 - "81: movl 52(%4), %%edx\n"
19665 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19666 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19667 " movnti %%eax, 48(%3)\n"
19668 " movnti %%edx, 52(%3)\n"
19669 - "14: movl 56(%4), %%eax\n"
19670 - "91: movl 60(%4), %%edx\n"
19671 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19672 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19673 " movnti %%eax, 56(%3)\n"
19674 " movnti %%edx, 60(%3)\n"
19675 " addl $-64, %0\n"
19676 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
19677 " shrl $2, %0\n"
19678 " andl $3, %%eax\n"
19679 " cld\n"
19680 - "6: rep; movsl\n"
19681 + "6: rep; "__copyuser_seg" movsl\n"
19682 " movl %%eax,%0\n"
19683 - "7: rep; movsb\n"
19684 + "7: rep; "__copyuser_seg" movsb\n"
19685 "8:\n"
19686 ".section .fixup,\"ax\"\n"
19687 "9: lea 0(%%eax,%0,4),%0\n"
19688 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
19689 */
19690 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
19691 unsigned long size);
19692 -unsigned long __copy_user_intel(void __user *to, const void *from,
19693 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
19694 + unsigned long size);
19695 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
19696 unsigned long size);
19697 unsigned long __copy_user_zeroing_intel_nocache(void *to,
19698 const void __user *from, unsigned long size);
19699 #endif /* CONFIG_X86_INTEL_USERCOPY */
19700
19701 /* Generic arbitrary sized copy. */
19702 -#define __copy_user(to, from, size) \
19703 +#define __copy_user(to, from, size, prefix, set, restore) \
19704 do { \
19705 int __d0, __d1, __d2; \
19706 __asm__ __volatile__( \
19707 + set \
19708 " cmp $7,%0\n" \
19709 " jbe 1f\n" \
19710 " movl %1,%0\n" \
19711 " negl %0\n" \
19712 " andl $7,%0\n" \
19713 " subl %0,%3\n" \
19714 - "4: rep; movsb\n" \
19715 + "4: rep; "prefix"movsb\n" \
19716 " movl %3,%0\n" \
19717 " shrl $2,%0\n" \
19718 " andl $3,%3\n" \
19719 " .align 2,0x90\n" \
19720 - "0: rep; movsl\n" \
19721 + "0: rep; "prefix"movsl\n" \
19722 " movl %3,%0\n" \
19723 - "1: rep; movsb\n" \
19724 + "1: rep; "prefix"movsb\n" \
19725 "2:\n" \
19726 + restore \
19727 ".section .fixup,\"ax\"\n" \
19728 "5: addl %3,%0\n" \
19729 " jmp 2b\n" \
19730 @@ -682,14 +799,14 @@ do { \
19731 " negl %0\n" \
19732 " andl $7,%0\n" \
19733 " subl %0,%3\n" \
19734 - "4: rep; movsb\n" \
19735 + "4: rep; "__copyuser_seg"movsb\n" \
19736 " movl %3,%0\n" \
19737 " shrl $2,%0\n" \
19738 " andl $3,%3\n" \
19739 " .align 2,0x90\n" \
19740 - "0: rep; movsl\n" \
19741 + "0: rep; "__copyuser_seg"movsl\n" \
19742 " movl %3,%0\n" \
19743 - "1: rep; movsb\n" \
19744 + "1: rep; "__copyuser_seg"movsb\n" \
19745 "2:\n" \
19746 ".section .fixup,\"ax\"\n" \
19747 "5: addl %3,%0\n" \
19748 @@ -775,9 +892,9 @@ survive:
19749 }
19750 #endif
19751 if (movsl_is_ok(to, from, n))
19752 - __copy_user(to, from, n);
19753 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
19754 else
19755 - n = __copy_user_intel(to, from, n);
19756 + n = __generic_copy_to_user_intel(to, from, n);
19757 return n;
19758 }
19759 EXPORT_SYMBOL(__copy_to_user_ll);
19760 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
19761 unsigned long n)
19762 {
19763 if (movsl_is_ok(to, from, n))
19764 - __copy_user(to, from, n);
19765 + __copy_user(to, from, n, __copyuser_seg, "", "");
19766 else
19767 - n = __copy_user_intel((void __user *)to,
19768 - (const void *)from, n);
19769 + n = __generic_copy_from_user_intel(to, from, n);
19770 return n;
19771 }
19772 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
19773 @@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocach
19774 if (n > 64 && cpu_has_xmm2)
19775 n = __copy_user_intel_nocache(to, from, n);
19776 else
19777 - __copy_user(to, from, n);
19778 + __copy_user(to, from, n, __copyuser_seg, "", "");
19779 #else
19780 - __copy_user(to, from, n);
19781 + __copy_user(to, from, n, __copyuser_seg, "", "");
19782 #endif
19783 return n;
19784 }
19785 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
19786
19787 -/**
19788 - * copy_to_user: - Copy a block of data into user space.
19789 - * @to: Destination address, in user space.
19790 - * @from: Source address, in kernel space.
19791 - * @n: Number of bytes to copy.
19792 - *
19793 - * Context: User context only. This function may sleep.
19794 - *
19795 - * Copy data from kernel space to user space.
19796 - *
19797 - * Returns number of bytes that could not be copied.
19798 - * On success, this will be zero.
19799 - */
19800 -unsigned long
19801 -copy_to_user(void __user *to, const void *from, unsigned long n)
19802 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19803 +void __set_fs(mm_segment_t x)
19804 {
19805 - if (access_ok(VERIFY_WRITE, to, n))
19806 - n = __copy_to_user(to, from, n);
19807 - return n;
19808 + switch (x.seg) {
19809 + case 0:
19810 + loadsegment(gs, 0);
19811 + break;
19812 + case TASK_SIZE_MAX:
19813 + loadsegment(gs, __USER_DS);
19814 + break;
19815 + case -1UL:
19816 + loadsegment(gs, __KERNEL_DS);
19817 + break;
19818 + default:
19819 + BUG();
19820 + }
19821 + return;
19822 }
19823 -EXPORT_SYMBOL(copy_to_user);
19824 +EXPORT_SYMBOL(__set_fs);
19825
19826 -/**
19827 - * copy_from_user: - Copy a block of data from user space.
19828 - * @to: Destination address, in kernel space.
19829 - * @from: Source address, in user space.
19830 - * @n: Number of bytes to copy.
19831 - *
19832 - * Context: User context only. This function may sleep.
19833 - *
19834 - * Copy data from user space to kernel space.
19835 - *
19836 - * Returns number of bytes that could not be copied.
19837 - * On success, this will be zero.
19838 - *
19839 - * If some data could not be copied, this function will pad the copied
19840 - * data to the requested size using zero bytes.
19841 - */
19842 -unsigned long
19843 -copy_from_user(void *to, const void __user *from, unsigned long n)
19844 +void set_fs(mm_segment_t x)
19845 {
19846 - if (access_ok(VERIFY_READ, from, n))
19847 - n = __copy_from_user(to, from, n);
19848 - else
19849 - memset(to, 0, n);
19850 - return n;
19851 + current_thread_info()->addr_limit = x;
19852 + __set_fs(x);
19853 }
19854 -EXPORT_SYMBOL(copy_from_user);
19855 +EXPORT_SYMBOL(set_fs);
19856 +#endif
19857 diff -urNp linux-2.6.32.42/arch/x86/lib/usercopy_64.c linux-2.6.32.42/arch/x86/lib/usercopy_64.c
19858 --- linux-2.6.32.42/arch/x86/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
19859 +++ linux-2.6.32.42/arch/x86/lib/usercopy_64.c 2011-05-04 17:56:20.000000000 -0400
19860 @@ -42,6 +42,12 @@ long
19861 __strncpy_from_user(char *dst, const char __user *src, long count)
19862 {
19863 long res;
19864 +
19865 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19866 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19867 + src += PAX_USER_SHADOW_BASE;
19868 +#endif
19869 +
19870 __do_strncpy_from_user(dst, src, count, res);
19871 return res;
19872 }
19873 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
19874 {
19875 long __d0;
19876 might_fault();
19877 +
19878 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19879 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
19880 + addr += PAX_USER_SHADOW_BASE;
19881 +#endif
19882 +
19883 /* no memory constraint because it doesn't change any memory gcc knows
19884 about */
19885 asm volatile(
19886 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
19887
19888 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
19889 {
19890 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19891 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19892 +
19893 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19894 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
19895 + to += PAX_USER_SHADOW_BASE;
19896 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
19897 + from += PAX_USER_SHADOW_BASE;
19898 +#endif
19899 +
19900 return copy_user_generic((__force void *)to, (__force void *)from, len);
19901 - }
19902 - return len;
19903 + }
19904 + return len;
19905 }
19906 EXPORT_SYMBOL(copy_in_user);
19907
19908 diff -urNp linux-2.6.32.42/arch/x86/Makefile linux-2.6.32.42/arch/x86/Makefile
19909 --- linux-2.6.32.42/arch/x86/Makefile 2011-03-27 14:31:47.000000000 -0400
19910 +++ linux-2.6.32.42/arch/x86/Makefile 2011-04-17 15:56:46.000000000 -0400
19911 @@ -189,3 +189,12 @@ define archhelp
19912 echo ' FDARGS="..." arguments for the booted kernel'
19913 echo ' FDINITRD=file initrd for the booted kernel'
19914 endef
19915 +
19916 +define OLD_LD
19917 +
19918 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
19919 +*** Please upgrade your binutils to 2.18 or newer
19920 +endef
19921 +
19922 +archprepare:
19923 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
19924 diff -urNp linux-2.6.32.42/arch/x86/mm/extable.c linux-2.6.32.42/arch/x86/mm/extable.c
19925 --- linux-2.6.32.42/arch/x86/mm/extable.c 2011-03-27 14:31:47.000000000 -0400
19926 +++ linux-2.6.32.42/arch/x86/mm/extable.c 2011-04-17 15:56:46.000000000 -0400
19927 @@ -1,14 +1,71 @@
19928 #include <linux/module.h>
19929 #include <linux/spinlock.h>
19930 +#include <linux/sort.h>
19931 #include <asm/uaccess.h>
19932 +#include <asm/pgtable.h>
19933
19934 +/*
19935 + * The exception table needs to be sorted so that the binary
19936 + * search that we use to find entries in it works properly.
19937 + * This is used both for the kernel exception table and for
19938 + * the exception tables of modules that get loaded.
19939 + */
19940 +static int cmp_ex(const void *a, const void *b)
19941 +{
19942 + const struct exception_table_entry *x = a, *y = b;
19943 +
19944 + /* avoid overflow */
19945 + if (x->insn > y->insn)
19946 + return 1;
19947 + if (x->insn < y->insn)
19948 + return -1;
19949 + return 0;
19950 +}
19951 +
19952 +static void swap_ex(void *a, void *b, int size)
19953 +{
19954 + struct exception_table_entry t, *x = a, *y = b;
19955 +
19956 + t = *x;
19957 +
19958 + pax_open_kernel();
19959 + *x = *y;
19960 + *y = t;
19961 + pax_close_kernel();
19962 +}
19963 +
19964 +void sort_extable(struct exception_table_entry *start,
19965 + struct exception_table_entry *finish)
19966 +{
19967 + sort(start, finish - start, sizeof(struct exception_table_entry),
19968 + cmp_ex, swap_ex);
19969 +}
19970 +
19971 +#ifdef CONFIG_MODULES
19972 +/*
19973 + * If the exception table is sorted, any referring to the module init
19974 + * will be at the beginning or the end.
19975 + */
19976 +void trim_init_extable(struct module *m)
19977 +{
19978 + /*trim the beginning*/
19979 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
19980 + m->extable++;
19981 + m->num_exentries--;
19982 + }
19983 + /*trim the end*/
19984 + while (m->num_exentries &&
19985 + within_module_init(m->extable[m->num_exentries-1].insn, m))
19986 + m->num_exentries--;
19987 +}
19988 +#endif /* CONFIG_MODULES */
19989
19990 int fixup_exception(struct pt_regs *regs)
19991 {
19992 const struct exception_table_entry *fixup;
19993
19994 #ifdef CONFIG_PNPBIOS
19995 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
19996 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
19997 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
19998 extern u32 pnp_bios_is_utter_crap;
19999 pnp_bios_is_utter_crap = 1;
20000 diff -urNp linux-2.6.32.42/arch/x86/mm/fault.c linux-2.6.32.42/arch/x86/mm/fault.c
20001 --- linux-2.6.32.42/arch/x86/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
20002 +++ linux-2.6.32.42/arch/x86/mm/fault.c 2011-06-06 17:35:16.000000000 -0400
20003 @@ -11,10 +11,19 @@
20004 #include <linux/kprobes.h> /* __kprobes, ... */
20005 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
20006 #include <linux/perf_event.h> /* perf_sw_event */
20007 +#include <linux/unistd.h>
20008 +#include <linux/compiler.h>
20009
20010 #include <asm/traps.h> /* dotraplinkage, ... */
20011 #include <asm/pgalloc.h> /* pgd_*(), ... */
20012 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
20013 +#include <asm/vsyscall.h>
20014 +#include <asm/tlbflush.h>
20015 +
20016 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20017 +#include <asm/stacktrace.h>
20018 +#include "../kernel/dumpstack.h"
20019 +#endif
20020
20021 /*
20022 * Page fault error code bits:
20023 @@ -51,7 +60,7 @@ static inline int notify_page_fault(stru
20024 int ret = 0;
20025
20026 /* kprobe_running() needs smp_processor_id() */
20027 - if (kprobes_built_in() && !user_mode_vm(regs)) {
20028 + if (kprobes_built_in() && !user_mode(regs)) {
20029 preempt_disable();
20030 if (kprobe_running() && kprobe_fault_handler(regs, 14))
20031 ret = 1;
20032 @@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *re
20033 return !instr_lo || (instr_lo>>1) == 1;
20034 case 0x00:
20035 /* Prefetch instruction is 0x0F0D or 0x0F18 */
20036 - if (probe_kernel_address(instr, opcode))
20037 + if (user_mode(regs)) {
20038 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20039 + return 0;
20040 + } else if (probe_kernel_address(instr, opcode))
20041 return 0;
20042
20043 *prefetch = (instr_lo == 0xF) &&
20044 @@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsign
20045 while (instr < max_instr) {
20046 unsigned char opcode;
20047
20048 - if (probe_kernel_address(instr, opcode))
20049 + if (user_mode(regs)) {
20050 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20051 + break;
20052 + } else if (probe_kernel_address(instr, opcode))
20053 break;
20054
20055 instr++;
20056 @@ -172,6 +187,30 @@ force_sig_info_fault(int si_signo, int s
20057 force_sig_info(si_signo, &info, tsk);
20058 }
20059
20060 +#ifdef CONFIG_PAX_EMUTRAMP
20061 +static int pax_handle_fetch_fault(struct pt_regs *regs);
20062 +#endif
20063 +
20064 +#ifdef CONFIG_PAX_PAGEEXEC
20065 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
20066 +{
20067 + pgd_t *pgd;
20068 + pud_t *pud;
20069 + pmd_t *pmd;
20070 +
20071 + pgd = pgd_offset(mm, address);
20072 + if (!pgd_present(*pgd))
20073 + return NULL;
20074 + pud = pud_offset(pgd, address);
20075 + if (!pud_present(*pud))
20076 + return NULL;
20077 + pmd = pmd_offset(pud, address);
20078 + if (!pmd_present(*pmd))
20079 + return NULL;
20080 + return pmd;
20081 +}
20082 +#endif
20083 +
20084 DEFINE_SPINLOCK(pgd_lock);
20085 LIST_HEAD(pgd_list);
20086
20087 @@ -224,11 +263,24 @@ void vmalloc_sync_all(void)
20088 address += PMD_SIZE) {
20089
20090 unsigned long flags;
20091 +
20092 +#ifdef CONFIG_PAX_PER_CPU_PGD
20093 + unsigned long cpu;
20094 +#else
20095 struct page *page;
20096 +#endif
20097
20098 spin_lock_irqsave(&pgd_lock, flags);
20099 +
20100 +#ifdef CONFIG_PAX_PER_CPU_PGD
20101 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20102 + pgd_t *pgd = get_cpu_pgd(cpu);
20103 +#else
20104 list_for_each_entry(page, &pgd_list, lru) {
20105 - if (!vmalloc_sync_one(page_address(page), address))
20106 + pgd_t *pgd = page_address(page);
20107 +#endif
20108 +
20109 + if (!vmalloc_sync_one(pgd, address))
20110 break;
20111 }
20112 spin_unlock_irqrestore(&pgd_lock, flags);
20113 @@ -258,6 +310,11 @@ static noinline int vmalloc_fault(unsign
20114 * an interrupt in the middle of a task switch..
20115 */
20116 pgd_paddr = read_cr3();
20117 +
20118 +#ifdef CONFIG_PAX_PER_CPU_PGD
20119 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
20120 +#endif
20121 +
20122 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
20123 if (!pmd_k)
20124 return -1;
20125 @@ -332,15 +389,27 @@ void vmalloc_sync_all(void)
20126
20127 const pgd_t *pgd_ref = pgd_offset_k(address);
20128 unsigned long flags;
20129 +
20130 +#ifdef CONFIG_PAX_PER_CPU_PGD
20131 + unsigned long cpu;
20132 +#else
20133 struct page *page;
20134 +#endif
20135
20136 if (pgd_none(*pgd_ref))
20137 continue;
20138
20139 spin_lock_irqsave(&pgd_lock, flags);
20140 +
20141 +#ifdef CONFIG_PAX_PER_CPU_PGD
20142 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20143 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
20144 +#else
20145 list_for_each_entry(page, &pgd_list, lru) {
20146 pgd_t *pgd;
20147 pgd = (pgd_t *)page_address(page) + pgd_index(address);
20148 +#endif
20149 +
20150 if (pgd_none(*pgd))
20151 set_pgd(pgd, *pgd_ref);
20152 else
20153 @@ -373,7 +442,14 @@ static noinline int vmalloc_fault(unsign
20154 * happen within a race in page table update. In the later
20155 * case just flush:
20156 */
20157 +
20158 +#ifdef CONFIG_PAX_PER_CPU_PGD
20159 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
20160 + pgd = pgd_offset_cpu(smp_processor_id(), address);
20161 +#else
20162 pgd = pgd_offset(current->active_mm, address);
20163 +#endif
20164 +
20165 pgd_ref = pgd_offset_k(address);
20166 if (pgd_none(*pgd_ref))
20167 return -1;
20168 @@ -535,7 +611,7 @@ static int is_errata93(struct pt_regs *r
20169 static int is_errata100(struct pt_regs *regs, unsigned long address)
20170 {
20171 #ifdef CONFIG_X86_64
20172 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
20173 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
20174 return 1;
20175 #endif
20176 return 0;
20177 @@ -562,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *r
20178 }
20179
20180 static const char nx_warning[] = KERN_CRIT
20181 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
20182 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
20183
20184 static void
20185 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
20186 @@ -571,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, un
20187 if (!oops_may_print())
20188 return;
20189
20190 - if (error_code & PF_INSTR) {
20191 + if (nx_enabled && (error_code & PF_INSTR)) {
20192 unsigned int level;
20193
20194 pte_t *pte = lookup_address(address, &level);
20195
20196 if (pte && pte_present(*pte) && !pte_exec(*pte))
20197 - printk(nx_warning, current_uid());
20198 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
20199 }
20200
20201 +#ifdef CONFIG_PAX_KERNEXEC
20202 + if (init_mm.start_code <= address && address < init_mm.end_code) {
20203 + if (current->signal->curr_ip)
20204 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20205 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
20206 + else
20207 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20208 + current->comm, task_pid_nr(current), current_uid(), current_euid());
20209 + }
20210 +#endif
20211 +
20212 printk(KERN_ALERT "BUG: unable to handle kernel ");
20213 if (address < PAGE_SIZE)
20214 printk(KERN_CONT "NULL pointer dereference");
20215 @@ -704,6 +791,68 @@ __bad_area_nosemaphore(struct pt_regs *r
20216 unsigned long address, int si_code)
20217 {
20218 struct task_struct *tsk = current;
20219 + struct mm_struct *mm = tsk->mm;
20220 +
20221 +#ifdef CONFIG_X86_64
20222 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
20223 + if (regs->ip == (unsigned long)vgettimeofday) {
20224 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
20225 + return;
20226 + } else if (regs->ip == (unsigned long)vtime) {
20227 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
20228 + return;
20229 + } else if (regs->ip == (unsigned long)vgetcpu) {
20230 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
20231 + return;
20232 + }
20233 + }
20234 +#endif
20235 +
20236 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20237 + if (mm && (error_code & PF_USER)) {
20238 + unsigned long ip = regs->ip;
20239 +
20240 + if (v8086_mode(regs))
20241 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
20242 +
20243 + /*
20244 + * It's possible to have interrupts off here:
20245 + */
20246 + local_irq_enable();
20247 +
20248 +#ifdef CONFIG_PAX_PAGEEXEC
20249 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
20250 + ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
20251 +
20252 +#ifdef CONFIG_PAX_EMUTRAMP
20253 + switch (pax_handle_fetch_fault(regs)) {
20254 + case 2:
20255 + return;
20256 + }
20257 +#endif
20258 +
20259 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20260 + do_group_exit(SIGKILL);
20261 + }
20262 +#endif
20263 +
20264 +#ifdef CONFIG_PAX_SEGMEXEC
20265 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
20266 +
20267 +#ifdef CONFIG_PAX_EMUTRAMP
20268 + switch (pax_handle_fetch_fault(regs)) {
20269 + case 2:
20270 + return;
20271 + }
20272 +#endif
20273 +
20274 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20275 + do_group_exit(SIGKILL);
20276 + }
20277 +#endif
20278 +
20279 + }
20280 +#endif
20281
20282 /* User mode accesses just cause a SIGSEGV */
20283 if (error_code & PF_USER) {
20284 @@ -857,6 +1006,99 @@ static int spurious_fault_check(unsigned
20285 return 1;
20286 }
20287
20288 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20289 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
20290 +{
20291 + pte_t *pte;
20292 + pmd_t *pmd;
20293 + spinlock_t *ptl;
20294 + unsigned char pte_mask;
20295 +
20296 + if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
20297 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
20298 + return 0;
20299 +
20300 + /* PaX: it's our fault, let's handle it if we can */
20301 +
20302 + /* PaX: take a look at read faults before acquiring any locks */
20303 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
20304 + /* instruction fetch attempt from a protected page in user mode */
20305 + up_read(&mm->mmap_sem);
20306 +
20307 +#ifdef CONFIG_PAX_EMUTRAMP
20308 + switch (pax_handle_fetch_fault(regs)) {
20309 + case 2:
20310 + return 1;
20311 + }
20312 +#endif
20313 +
20314 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20315 + do_group_exit(SIGKILL);
20316 + }
20317 +
20318 + pmd = pax_get_pmd(mm, address);
20319 + if (unlikely(!pmd))
20320 + return 0;
20321 +
20322 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
20323 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
20324 + pte_unmap_unlock(pte, ptl);
20325 + return 0;
20326 + }
20327 +
20328 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
20329 + /* write attempt to a protected page in user mode */
20330 + pte_unmap_unlock(pte, ptl);
20331 + return 0;
20332 + }
20333 +
20334 +#ifdef CONFIG_SMP
20335 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
20336 +#else
20337 + if (likely(address > get_limit(regs->cs)))
20338 +#endif
20339 + {
20340 + set_pte(pte, pte_mkread(*pte));
20341 + __flush_tlb_one(address);
20342 + pte_unmap_unlock(pte, ptl);
20343 + up_read(&mm->mmap_sem);
20344 + return 1;
20345 + }
20346 +
20347 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
20348 +
20349 + /*
20350 + * PaX: fill DTLB with user rights and retry
20351 + */
20352 + __asm__ __volatile__ (
20353 + "orb %2,(%1)\n"
20354 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
20355 +/*
20356 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
20357 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
20358 + * page fault when examined during a TLB load attempt. this is true not only
20359 + * for PTEs holding a non-present entry but also present entries that will
20360 + * raise a page fault (such as those set up by PaX, or the copy-on-write
20361 + * mechanism). in effect it means that we do *not* need to flush the TLBs
20362 + * for our target pages since their PTEs are simply not in the TLBs at all.
20363 +
20364 + * the best thing in omitting it is that we gain around 15-20% speed in the
20365 + * fast path of the page fault handler and can get rid of tracing since we
20366 + * can no longer flush unintended entries.
20367 + */
20368 + "invlpg (%0)\n"
20369 +#endif
20370 + __copyuser_seg"testb $0,(%0)\n"
20371 + "xorb %3,(%1)\n"
20372 + :
20373 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
20374 + : "memory", "cc");
20375 + pte_unmap_unlock(pte, ptl);
20376 + up_read(&mm->mmap_sem);
20377 + return 1;
20378 +}
20379 +#endif
20380 +
20381 /*
20382 * Handle a spurious fault caused by a stale TLB entry.
20383 *
20384 @@ -923,6 +1165,9 @@ int show_unhandled_signals = 1;
20385 static inline int
20386 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
20387 {
20388 + if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
20389 + return 1;
20390 +
20391 if (write) {
20392 /* write, present and write, not present: */
20393 if (unlikely(!(vma->vm_flags & VM_WRITE)))
20394 @@ -956,17 +1201,31 @@ do_page_fault(struct pt_regs *regs, unsi
20395 {
20396 struct vm_area_struct *vma;
20397 struct task_struct *tsk;
20398 - unsigned long address;
20399 struct mm_struct *mm;
20400 int write;
20401 int fault;
20402
20403 + /* Get the faulting address: */
20404 + unsigned long address = read_cr2();
20405 +
20406 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20407 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
20408 + if (!search_exception_tables(regs->ip)) {
20409 + bad_area_nosemaphore(regs, error_code, address);
20410 + return;
20411 + }
20412 + if (address < PAX_USER_SHADOW_BASE) {
20413 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
20414 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
20415 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
20416 + } else
20417 + address -= PAX_USER_SHADOW_BASE;
20418 + }
20419 +#endif
20420 +
20421 tsk = current;
20422 mm = tsk->mm;
20423
20424 - /* Get the faulting address: */
20425 - address = read_cr2();
20426 -
20427 /*
20428 * Detect and handle instructions that would cause a page fault for
20429 * both a tracked kernel page and a userspace page.
20430 @@ -1026,7 +1285,7 @@ do_page_fault(struct pt_regs *regs, unsi
20431 * User-mode registers count as a user access even for any
20432 * potential system fault or CPU buglet:
20433 */
20434 - if (user_mode_vm(regs)) {
20435 + if (user_mode(regs)) {
20436 local_irq_enable();
20437 error_code |= PF_USER;
20438 } else {
20439 @@ -1080,6 +1339,11 @@ do_page_fault(struct pt_regs *regs, unsi
20440 might_sleep();
20441 }
20442
20443 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20444 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
20445 + return;
20446 +#endif
20447 +
20448 vma = find_vma(mm, address);
20449 if (unlikely(!vma)) {
20450 bad_area(regs, error_code, address);
20451 @@ -1091,18 +1355,24 @@ do_page_fault(struct pt_regs *regs, unsi
20452 bad_area(regs, error_code, address);
20453 return;
20454 }
20455 - if (error_code & PF_USER) {
20456 - /*
20457 - * Accessing the stack below %sp is always a bug.
20458 - * The large cushion allows instructions like enter
20459 - * and pusha to work. ("enter $65535, $31" pushes
20460 - * 32 pointers and then decrements %sp by 65535.)
20461 - */
20462 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
20463 - bad_area(regs, error_code, address);
20464 - return;
20465 - }
20466 + /*
20467 + * Accessing the stack below %sp is always a bug.
20468 + * The large cushion allows instructions like enter
20469 + * and pusha to work. ("enter $65535, $31" pushes
20470 + * 32 pointers and then decrements %sp by 65535.)
20471 + */
20472 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
20473 + bad_area(regs, error_code, address);
20474 + return;
20475 + }
20476 +
20477 +#ifdef CONFIG_PAX_SEGMEXEC
20478 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
20479 + bad_area(regs, error_code, address);
20480 + return;
20481 }
20482 +#endif
20483 +
20484 if (unlikely(expand_stack(vma, address))) {
20485 bad_area(regs, error_code, address);
20486 return;
20487 @@ -1146,3 +1416,199 @@ good_area:
20488
20489 up_read(&mm->mmap_sem);
20490 }
20491 +
20492 +#ifdef CONFIG_PAX_EMUTRAMP
20493 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
20494 +{
20495 + int err;
20496 +
20497 + do { /* PaX: gcc trampoline emulation #1 */
20498 + unsigned char mov1, mov2;
20499 + unsigned short jmp;
20500 + unsigned int addr1, addr2;
20501 +
20502 +#ifdef CONFIG_X86_64
20503 + if ((regs->ip + 11) >> 32)
20504 + break;
20505 +#endif
20506 +
20507 + err = get_user(mov1, (unsigned char __user *)regs->ip);
20508 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20509 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
20510 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20511 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
20512 +
20513 + if (err)
20514 + break;
20515 +
20516 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
20517 + regs->cx = addr1;
20518 + regs->ax = addr2;
20519 + regs->ip = addr2;
20520 + return 2;
20521 + }
20522 + } while (0);
20523 +
20524 + do { /* PaX: gcc trampoline emulation #2 */
20525 + unsigned char mov, jmp;
20526 + unsigned int addr1, addr2;
20527 +
20528 +#ifdef CONFIG_X86_64
20529 + if ((regs->ip + 9) >> 32)
20530 + break;
20531 +#endif
20532 +
20533 + err = get_user(mov, (unsigned char __user *)regs->ip);
20534 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20535 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
20536 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20537 +
20538 + if (err)
20539 + break;
20540 +
20541 + if (mov == 0xB9 && jmp == 0xE9) {
20542 + regs->cx = addr1;
20543 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
20544 + return 2;
20545 + }
20546 + } while (0);
20547 +
20548 + return 1; /* PaX in action */
20549 +}
20550 +
20551 +#ifdef CONFIG_X86_64
20552 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
20553 +{
20554 + int err;
20555 +
20556 + do { /* PaX: gcc trampoline emulation #1 */
20557 + unsigned short mov1, mov2, jmp1;
20558 + unsigned char jmp2;
20559 + unsigned int addr1;
20560 + unsigned long addr2;
20561 +
20562 + err = get_user(mov1, (unsigned short __user *)regs->ip);
20563 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
20564 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
20565 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
20566 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
20567 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
20568 +
20569 + if (err)
20570 + break;
20571 +
20572 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20573 + regs->r11 = addr1;
20574 + regs->r10 = addr2;
20575 + regs->ip = addr1;
20576 + return 2;
20577 + }
20578 + } while (0);
20579 +
20580 + do { /* PaX: gcc trampoline emulation #2 */
20581 + unsigned short mov1, mov2, jmp1;
20582 + unsigned char jmp2;
20583 + unsigned long addr1, addr2;
20584 +
20585 + err = get_user(mov1, (unsigned short __user *)regs->ip);
20586 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
20587 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
20588 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
20589 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
20590 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
20591 +
20592 + if (err)
20593 + break;
20594 +
20595 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20596 + regs->r11 = addr1;
20597 + regs->r10 = addr2;
20598 + regs->ip = addr1;
20599 + return 2;
20600 + }
20601 + } while (0);
20602 +
20603 + return 1; /* PaX in action */
20604 +}
20605 +#endif
20606 +
20607 +/*
20608 + * PaX: decide what to do with offenders (regs->ip = fault address)
20609 + *
20610 + * returns 1 when task should be killed
20611 + * 2 when gcc trampoline was detected
20612 + */
20613 +static int pax_handle_fetch_fault(struct pt_regs *regs)
20614 +{
20615 + if (v8086_mode(regs))
20616 + return 1;
20617 +
20618 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
20619 + return 1;
20620 +
20621 +#ifdef CONFIG_X86_32
20622 + return pax_handle_fetch_fault_32(regs);
20623 +#else
20624 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
20625 + return pax_handle_fetch_fault_32(regs);
20626 + else
20627 + return pax_handle_fetch_fault_64(regs);
20628 +#endif
20629 +}
20630 +#endif
20631 +
20632 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20633 +void pax_report_insns(void *pc, void *sp)
20634 +{
20635 + long i;
20636 +
20637 + printk(KERN_ERR "PAX: bytes at PC: ");
20638 + for (i = 0; i < 20; i++) {
20639 + unsigned char c;
20640 + if (get_user(c, (__force unsigned char __user *)pc+i))
20641 + printk(KERN_CONT "?? ");
20642 + else
20643 + printk(KERN_CONT "%02x ", c);
20644 + }
20645 + printk("\n");
20646 +
20647 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
20648 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
20649 + unsigned long c;
20650 + if (get_user(c, (__force unsigned long __user *)sp+i))
20651 +#ifdef CONFIG_X86_32
20652 + printk(KERN_CONT "???????? ");
20653 +#else
20654 + printk(KERN_CONT "???????????????? ");
20655 +#endif
20656 + else
20657 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
20658 + }
20659 + printk("\n");
20660 +}
20661 +#endif
20662 +
20663 +/**
20664 + * probe_kernel_write(): safely attempt to write to a location
20665 + * @dst: address to write to
20666 + * @src: pointer to the data that shall be written
20667 + * @size: size of the data chunk
20668 + *
20669 + * Safely write to address @dst from the buffer at @src. If a kernel fault
20670 + * happens, handle that and return -EFAULT.
20671 + */
20672 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
20673 +{
20674 + long ret;
20675 + mm_segment_t old_fs = get_fs();
20676 +
20677 + set_fs(KERNEL_DS);
20678 + pagefault_disable();
20679 + pax_open_kernel();
20680 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
20681 + pax_close_kernel();
20682 + pagefault_enable();
20683 + set_fs(old_fs);
20684 +
20685 + return ret ? -EFAULT : 0;
20686 +}
20687 diff -urNp linux-2.6.32.42/arch/x86/mm/gup.c linux-2.6.32.42/arch/x86/mm/gup.c
20688 --- linux-2.6.32.42/arch/x86/mm/gup.c 2011-03-27 14:31:47.000000000 -0400
20689 +++ linux-2.6.32.42/arch/x86/mm/gup.c 2011-04-17 15:56:46.000000000 -0400
20690 @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
20691 addr = start;
20692 len = (unsigned long) nr_pages << PAGE_SHIFT;
20693 end = start + len;
20694 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20695 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20696 (void __user *)start, len)))
20697 return 0;
20698
20699 diff -urNp linux-2.6.32.42/arch/x86/mm/highmem_32.c linux-2.6.32.42/arch/x86/mm/highmem_32.c
20700 --- linux-2.6.32.42/arch/x86/mm/highmem_32.c 2011-03-27 14:31:47.000000000 -0400
20701 +++ linux-2.6.32.42/arch/x86/mm/highmem_32.c 2011-04-17 15:56:46.000000000 -0400
20702 @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
20703 idx = type + KM_TYPE_NR*smp_processor_id();
20704 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
20705 BUG_ON(!pte_none(*(kmap_pte-idx)));
20706 +
20707 + pax_open_kernel();
20708 set_pte(kmap_pte-idx, mk_pte(page, prot));
20709 + pax_close_kernel();
20710
20711 return (void *)vaddr;
20712 }
20713 diff -urNp linux-2.6.32.42/arch/x86/mm/hugetlbpage.c linux-2.6.32.42/arch/x86/mm/hugetlbpage.c
20714 --- linux-2.6.32.42/arch/x86/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
20715 +++ linux-2.6.32.42/arch/x86/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
20716 @@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmappe
20717 struct hstate *h = hstate_file(file);
20718 struct mm_struct *mm = current->mm;
20719 struct vm_area_struct *vma;
20720 - unsigned long start_addr;
20721 + unsigned long start_addr, pax_task_size = TASK_SIZE;
20722 +
20723 +#ifdef CONFIG_PAX_SEGMEXEC
20724 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20725 + pax_task_size = SEGMEXEC_TASK_SIZE;
20726 +#endif
20727 +
20728 + pax_task_size -= PAGE_SIZE;
20729
20730 if (len > mm->cached_hole_size) {
20731 - start_addr = mm->free_area_cache;
20732 + start_addr = mm->free_area_cache;
20733 } else {
20734 - start_addr = TASK_UNMAPPED_BASE;
20735 - mm->cached_hole_size = 0;
20736 + start_addr = mm->mmap_base;
20737 + mm->cached_hole_size = 0;
20738 }
20739
20740 full_search:
20741 @@ -281,26 +288,27 @@ full_search:
20742
20743 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
20744 /* At this point: (!vma || addr < vma->vm_end). */
20745 - if (TASK_SIZE - len < addr) {
20746 + if (pax_task_size - len < addr) {
20747 /*
20748 * Start a new search - just in case we missed
20749 * some holes.
20750 */
20751 - if (start_addr != TASK_UNMAPPED_BASE) {
20752 - start_addr = TASK_UNMAPPED_BASE;
20753 + if (start_addr != mm->mmap_base) {
20754 + start_addr = mm->mmap_base;
20755 mm->cached_hole_size = 0;
20756 goto full_search;
20757 }
20758 return -ENOMEM;
20759 }
20760 - if (!vma || addr + len <= vma->vm_start) {
20761 - mm->free_area_cache = addr + len;
20762 - return addr;
20763 - }
20764 + if (check_heap_stack_gap(vma, addr, len))
20765 + break;
20766 if (addr + mm->cached_hole_size < vma->vm_start)
20767 mm->cached_hole_size = vma->vm_start - addr;
20768 addr = ALIGN(vma->vm_end, huge_page_size(h));
20769 }
20770 +
20771 + mm->free_area_cache = addr + len;
20772 + return addr;
20773 }
20774
20775 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
20776 @@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmappe
20777 {
20778 struct hstate *h = hstate_file(file);
20779 struct mm_struct *mm = current->mm;
20780 - struct vm_area_struct *vma, *prev_vma;
20781 - unsigned long base = mm->mmap_base, addr = addr0;
20782 + struct vm_area_struct *vma;
20783 + unsigned long base = mm->mmap_base, addr;
20784 unsigned long largest_hole = mm->cached_hole_size;
20785 - int first_time = 1;
20786
20787 /* don't allow allocations above current base */
20788 if (mm->free_area_cache > base)
20789 @@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmappe
20790 largest_hole = 0;
20791 mm->free_area_cache = base;
20792 }
20793 -try_again:
20794 +
20795 /* make sure it can fit in the remaining address space */
20796 if (mm->free_area_cache < len)
20797 goto fail;
20798
20799 /* either no address requested or cant fit in requested address hole */
20800 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
20801 + addr = (mm->free_area_cache - len);
20802 do {
20803 + addr &= huge_page_mask(h);
20804 + vma = find_vma(mm, addr);
20805 /*
20806 * Lookup failure means no vma is above this address,
20807 * i.e. return with success:
20808 - */
20809 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
20810 - return addr;
20811 -
20812 - /*
20813 * new region fits between prev_vma->vm_end and
20814 * vma->vm_start, use it:
20815 */
20816 - if (addr + len <= vma->vm_start &&
20817 - (!prev_vma || (addr >= prev_vma->vm_end))) {
20818 + if (check_heap_stack_gap(vma, addr, len)) {
20819 /* remember the address as a hint for next time */
20820 - mm->cached_hole_size = largest_hole;
20821 - return (mm->free_area_cache = addr);
20822 - } else {
20823 - /* pull free_area_cache down to the first hole */
20824 - if (mm->free_area_cache == vma->vm_end) {
20825 - mm->free_area_cache = vma->vm_start;
20826 - mm->cached_hole_size = largest_hole;
20827 - }
20828 + mm->cached_hole_size = largest_hole;
20829 + return (mm->free_area_cache = addr);
20830 + }
20831 + /* pull free_area_cache down to the first hole */
20832 + if (mm->free_area_cache == vma->vm_end) {
20833 + mm->free_area_cache = vma->vm_start;
20834 + mm->cached_hole_size = largest_hole;
20835 }
20836
20837 /* remember the largest hole we saw so far */
20838 if (addr + largest_hole < vma->vm_start)
20839 - largest_hole = vma->vm_start - addr;
20840 + largest_hole = vma->vm_start - addr;
20841
20842 /* try just below the current vma->vm_start */
20843 - addr = (vma->vm_start - len) & huge_page_mask(h);
20844 - } while (len <= vma->vm_start);
20845 + addr = skip_heap_stack_gap(vma, len);
20846 + } while (!IS_ERR_VALUE(addr));
20847
20848 fail:
20849 /*
20850 - * if hint left us with no space for the requested
20851 - * mapping then try again:
20852 - */
20853 - if (first_time) {
20854 - mm->free_area_cache = base;
20855 - largest_hole = 0;
20856 - first_time = 0;
20857 - goto try_again;
20858 - }
20859 - /*
20860 * A failed mmap() very likely causes application failure,
20861 * so fall back to the bottom-up function here. This scenario
20862 * can happen with large stack limits and large mmap()
20863 * allocations.
20864 */
20865 - mm->free_area_cache = TASK_UNMAPPED_BASE;
20866 +
20867 +#ifdef CONFIG_PAX_SEGMEXEC
20868 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20869 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
20870 + else
20871 +#endif
20872 +
20873 + mm->mmap_base = TASK_UNMAPPED_BASE;
20874 +
20875 +#ifdef CONFIG_PAX_RANDMMAP
20876 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20877 + mm->mmap_base += mm->delta_mmap;
20878 +#endif
20879 +
20880 + mm->free_area_cache = mm->mmap_base;
20881 mm->cached_hole_size = ~0UL;
20882 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
20883 len, pgoff, flags);
20884 @@ -387,6 +393,7 @@ fail:
20885 /*
20886 * Restore the topdown base:
20887 */
20888 + mm->mmap_base = base;
20889 mm->free_area_cache = base;
20890 mm->cached_hole_size = ~0UL;
20891
20892 @@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *f
20893 struct hstate *h = hstate_file(file);
20894 struct mm_struct *mm = current->mm;
20895 struct vm_area_struct *vma;
20896 + unsigned long pax_task_size = TASK_SIZE;
20897
20898 if (len & ~huge_page_mask(h))
20899 return -EINVAL;
20900 - if (len > TASK_SIZE)
20901 +
20902 +#ifdef CONFIG_PAX_SEGMEXEC
20903 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20904 + pax_task_size = SEGMEXEC_TASK_SIZE;
20905 +#endif
20906 +
20907 + pax_task_size -= PAGE_SIZE;
20908 +
20909 + if (len > pax_task_size)
20910 return -ENOMEM;
20911
20912 if (flags & MAP_FIXED) {
20913 @@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
20914 if (addr) {
20915 addr = ALIGN(addr, huge_page_size(h));
20916 vma = find_vma(mm, addr);
20917 - if (TASK_SIZE - len >= addr &&
20918 - (!vma || addr + len <= vma->vm_start))
20919 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
20920 return addr;
20921 }
20922 if (mm->get_unmapped_area == arch_get_unmapped_area)
20923 diff -urNp linux-2.6.32.42/arch/x86/mm/init_32.c linux-2.6.32.42/arch/x86/mm/init_32.c
20924 --- linux-2.6.32.42/arch/x86/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
20925 +++ linux-2.6.32.42/arch/x86/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
20926 @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
20927 }
20928
20929 /*
20930 - * Creates a middle page table and puts a pointer to it in the
20931 - * given global directory entry. This only returns the gd entry
20932 - * in non-PAE compilation mode, since the middle layer is folded.
20933 - */
20934 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
20935 -{
20936 - pud_t *pud;
20937 - pmd_t *pmd_table;
20938 -
20939 -#ifdef CONFIG_X86_PAE
20940 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
20941 - if (after_bootmem)
20942 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
20943 - else
20944 - pmd_table = (pmd_t *)alloc_low_page();
20945 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
20946 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
20947 - pud = pud_offset(pgd, 0);
20948 - BUG_ON(pmd_table != pmd_offset(pud, 0));
20949 -
20950 - return pmd_table;
20951 - }
20952 -#endif
20953 - pud = pud_offset(pgd, 0);
20954 - pmd_table = pmd_offset(pud, 0);
20955 -
20956 - return pmd_table;
20957 -}
20958 -
20959 -/*
20960 * Create a page table and place a pointer to it in a middle page
20961 * directory entry:
20962 */
20963 @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
20964 page_table = (pte_t *)alloc_low_page();
20965
20966 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
20967 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20968 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
20969 +#else
20970 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
20971 +#endif
20972 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
20973 }
20974
20975 return pte_offset_kernel(pmd, 0);
20976 }
20977
20978 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
20979 +{
20980 + pud_t *pud;
20981 + pmd_t *pmd_table;
20982 +
20983 + pud = pud_offset(pgd, 0);
20984 + pmd_table = pmd_offset(pud, 0);
20985 +
20986 + return pmd_table;
20987 +}
20988 +
20989 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
20990 {
20991 int pgd_idx = pgd_index(vaddr);
20992 @@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
20993 int pgd_idx, pmd_idx;
20994 unsigned long vaddr;
20995 pgd_t *pgd;
20996 + pud_t *pud;
20997 pmd_t *pmd;
20998 pte_t *pte = NULL;
20999
21000 @@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
21001 pgd = pgd_base + pgd_idx;
21002
21003 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
21004 - pmd = one_md_table_init(pgd);
21005 - pmd = pmd + pmd_index(vaddr);
21006 + pud = pud_offset(pgd, vaddr);
21007 + pmd = pmd_offset(pud, vaddr);
21008 +
21009 +#ifdef CONFIG_X86_PAE
21010 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21011 +#endif
21012 +
21013 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
21014 pmd++, pmd_idx++) {
21015 pte = page_table_kmap_check(one_page_table_init(pmd),
21016 @@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
21017 }
21018 }
21019
21020 -static inline int is_kernel_text(unsigned long addr)
21021 +static inline int is_kernel_text(unsigned long start, unsigned long end)
21022 {
21023 - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
21024 - return 1;
21025 - return 0;
21026 + if ((start > ktla_ktva((unsigned long)_etext) ||
21027 + end <= ktla_ktva((unsigned long)_stext)) &&
21028 + (start > ktla_ktva((unsigned long)_einittext) ||
21029 + end <= ktla_ktva((unsigned long)_sinittext)) &&
21030 +
21031 +#ifdef CONFIG_ACPI_SLEEP
21032 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
21033 +#endif
21034 +
21035 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
21036 + return 0;
21037 + return 1;
21038 }
21039
21040 /*
21041 @@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned lo
21042 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
21043 unsigned long start_pfn, end_pfn;
21044 pgd_t *pgd_base = swapper_pg_dir;
21045 - int pgd_idx, pmd_idx, pte_ofs;
21046 + unsigned int pgd_idx, pmd_idx, pte_ofs;
21047 unsigned long pfn;
21048 pgd_t *pgd;
21049 + pud_t *pud;
21050 pmd_t *pmd;
21051 pte_t *pte;
21052 unsigned pages_2m, pages_4k;
21053 @@ -278,8 +279,13 @@ repeat:
21054 pfn = start_pfn;
21055 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21056 pgd = pgd_base + pgd_idx;
21057 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
21058 - pmd = one_md_table_init(pgd);
21059 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
21060 + pud = pud_offset(pgd, 0);
21061 + pmd = pmd_offset(pud, 0);
21062 +
21063 +#ifdef CONFIG_X86_PAE
21064 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21065 +#endif
21066
21067 if (pfn >= end_pfn)
21068 continue;
21069 @@ -291,14 +297,13 @@ repeat:
21070 #endif
21071 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
21072 pmd++, pmd_idx++) {
21073 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
21074 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
21075
21076 /*
21077 * Map with big pages if possible, otherwise
21078 * create normal page tables:
21079 */
21080 if (use_pse) {
21081 - unsigned int addr2;
21082 pgprot_t prot = PAGE_KERNEL_LARGE;
21083 /*
21084 * first pass will use the same initial
21085 @@ -308,11 +313,7 @@ repeat:
21086 __pgprot(PTE_IDENT_ATTR |
21087 _PAGE_PSE);
21088
21089 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
21090 - PAGE_OFFSET + PAGE_SIZE-1;
21091 -
21092 - if (is_kernel_text(addr) ||
21093 - is_kernel_text(addr2))
21094 + if (is_kernel_text(address, address + PMD_SIZE))
21095 prot = PAGE_KERNEL_LARGE_EXEC;
21096
21097 pages_2m++;
21098 @@ -329,7 +330,7 @@ repeat:
21099 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21100 pte += pte_ofs;
21101 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
21102 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
21103 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
21104 pgprot_t prot = PAGE_KERNEL;
21105 /*
21106 * first pass will use the same initial
21107 @@ -337,7 +338,7 @@ repeat:
21108 */
21109 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
21110
21111 - if (is_kernel_text(addr))
21112 + if (is_kernel_text(address, address + PAGE_SIZE))
21113 prot = PAGE_KERNEL_EXEC;
21114
21115 pages_4k++;
21116 @@ -489,7 +490,7 @@ void __init native_pagetable_setup_start
21117
21118 pud = pud_offset(pgd, va);
21119 pmd = pmd_offset(pud, va);
21120 - if (!pmd_present(*pmd))
21121 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
21122 break;
21123
21124 pte = pte_offset_kernel(pmd, va);
21125 @@ -541,9 +542,7 @@ void __init early_ioremap_page_table_ran
21126
21127 static void __init pagetable_init(void)
21128 {
21129 - pgd_t *pgd_base = swapper_pg_dir;
21130 -
21131 - permanent_kmaps_init(pgd_base);
21132 + permanent_kmaps_init(swapper_pg_dir);
21133 }
21134
21135 #ifdef CONFIG_ACPI_SLEEP
21136 @@ -551,12 +550,12 @@ static void __init pagetable_init(void)
21137 * ACPI suspend needs this for resume, because things like the intel-agp
21138 * driver might have split up a kernel 4MB mapping.
21139 */
21140 -char swsusp_pg_dir[PAGE_SIZE]
21141 +pgd_t swsusp_pg_dir[PTRS_PER_PGD]
21142 __attribute__ ((aligned(PAGE_SIZE)));
21143
21144 static inline void save_pg_dir(void)
21145 {
21146 - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
21147 + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
21148 }
21149 #else /* !CONFIG_ACPI_SLEEP */
21150 static inline void save_pg_dir(void)
21151 @@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
21152 flush_tlb_all();
21153 }
21154
21155 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21156 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21157 EXPORT_SYMBOL_GPL(__supported_pte_mask);
21158
21159 /* user-defined highmem size */
21160 @@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void
21161 * Initialize the boot-time allocator (with low memory only):
21162 */
21163 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
21164 - bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21165 + bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21166 PAGE_SIZE);
21167 if (bootmap == -1L)
21168 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
21169 @@ -864,6 +863,12 @@ void __init mem_init(void)
21170
21171 pci_iommu_alloc();
21172
21173 +#ifdef CONFIG_PAX_PER_CPU_PGD
21174 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21175 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21176 + KERNEL_PGD_PTRS);
21177 +#endif
21178 +
21179 #ifdef CONFIG_FLATMEM
21180 BUG_ON(!mem_map);
21181 #endif
21182 @@ -881,7 +886,7 @@ void __init mem_init(void)
21183 set_highmem_pages_init();
21184
21185 codesize = (unsigned long) &_etext - (unsigned long) &_text;
21186 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
21187 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
21188 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
21189
21190 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
21191 @@ -923,10 +928,10 @@ void __init mem_init(void)
21192 ((unsigned long)&__init_end -
21193 (unsigned long)&__init_begin) >> 10,
21194
21195 - (unsigned long)&_etext, (unsigned long)&_edata,
21196 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
21197 + (unsigned long)&_sdata, (unsigned long)&_edata,
21198 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
21199
21200 - (unsigned long)&_text, (unsigned long)&_etext,
21201 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
21202 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
21203
21204 /*
21205 @@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
21206 if (!kernel_set_to_readonly)
21207 return;
21208
21209 + start = ktla_ktva(start);
21210 pr_debug("Set kernel text: %lx - %lx for read write\n",
21211 start, start+size);
21212
21213 @@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
21214 if (!kernel_set_to_readonly)
21215 return;
21216
21217 + start = ktla_ktva(start);
21218 pr_debug("Set kernel text: %lx - %lx for read only\n",
21219 start, start+size);
21220
21221 @@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
21222 unsigned long start = PFN_ALIGN(_text);
21223 unsigned long size = PFN_ALIGN(_etext) - start;
21224
21225 + start = ktla_ktva(start);
21226 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
21227 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
21228 size >> 10);
21229 diff -urNp linux-2.6.32.42/arch/x86/mm/init_64.c linux-2.6.32.42/arch/x86/mm/init_64.c
21230 --- linux-2.6.32.42/arch/x86/mm/init_64.c 2011-04-17 17:00:52.000000000 -0400
21231 +++ linux-2.6.32.42/arch/x86/mm/init_64.c 2011-04-17 17:03:05.000000000 -0400
21232 @@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
21233 pmd = fill_pmd(pud, vaddr);
21234 pte = fill_pte(pmd, vaddr);
21235
21236 + pax_open_kernel();
21237 set_pte(pte, new_pte);
21238 + pax_close_kernel();
21239
21240 /*
21241 * It's enough to flush this one mapping.
21242 @@ -223,14 +225,12 @@ static void __init __init_extra_mapping(
21243 pgd = pgd_offset_k((unsigned long)__va(phys));
21244 if (pgd_none(*pgd)) {
21245 pud = (pud_t *) spp_getpage();
21246 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
21247 - _PAGE_USER));
21248 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
21249 }
21250 pud = pud_offset(pgd, (unsigned long)__va(phys));
21251 if (pud_none(*pud)) {
21252 pmd = (pmd_t *) spp_getpage();
21253 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
21254 - _PAGE_USER));
21255 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
21256 }
21257 pmd = pmd_offset(pud, phys);
21258 BUG_ON(!pmd_none(*pmd));
21259 @@ -675,6 +675,12 @@ void __init mem_init(void)
21260
21261 pci_iommu_alloc();
21262
21263 +#ifdef CONFIG_PAX_PER_CPU_PGD
21264 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21265 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21266 + KERNEL_PGD_PTRS);
21267 +#endif
21268 +
21269 /* clear_bss() already clear the empty_zero_page */
21270
21271 reservedpages = 0;
21272 @@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
21273 static struct vm_area_struct gate_vma = {
21274 .vm_start = VSYSCALL_START,
21275 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
21276 - .vm_page_prot = PAGE_READONLY_EXEC,
21277 - .vm_flags = VM_READ | VM_EXEC
21278 + .vm_page_prot = PAGE_READONLY,
21279 + .vm_flags = VM_READ
21280 };
21281
21282 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
21283 @@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long a
21284
21285 const char *arch_vma_name(struct vm_area_struct *vma)
21286 {
21287 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21288 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21289 return "[vdso]";
21290 if (vma == &gate_vma)
21291 return "[vsyscall]";
21292 diff -urNp linux-2.6.32.42/arch/x86/mm/init.c linux-2.6.32.42/arch/x86/mm/init.c
21293 --- linux-2.6.32.42/arch/x86/mm/init.c 2011-04-17 17:00:52.000000000 -0400
21294 +++ linux-2.6.32.42/arch/x86/mm/init.c 2011-06-07 19:06:09.000000000 -0400
21295 @@ -69,11 +69,7 @@ static void __init find_early_table_spac
21296 * cause a hotspot and fill up ZONE_DMA. The page tables
21297 * need roughly 0.5KB per GB.
21298 */
21299 -#ifdef CONFIG_X86_32
21300 - start = 0x7000;
21301 -#else
21302 - start = 0x8000;
21303 -#endif
21304 + start = 0x100000;
21305 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
21306 tables, PAGE_SIZE);
21307 if (e820_table_start == -1UL)
21308 @@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
21309 #endif
21310
21311 set_nx();
21312 - if (nx_enabled)
21313 + if (nx_enabled && cpu_has_nx)
21314 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
21315
21316 /* Enable PSE if available */
21317 @@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_m
21318 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
21319 * mmio resources as well as potential bios/acpi data regions.
21320 */
21321 +
21322 int devmem_is_allowed(unsigned long pagenr)
21323 {
21324 +#ifdef CONFIG_GRKERNSEC_KMEM
21325 + /* allow BDA */
21326 + if (!pagenr)
21327 + return 1;
21328 + /* allow EBDA */
21329 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
21330 + return 1;
21331 + /* allow ISA/video mem */
21332 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21333 + return 1;
21334 + /* throw out everything else below 1MB */
21335 + if (pagenr <= 256)
21336 + return 0;
21337 +#else
21338 if (pagenr <= 256)
21339 return 1;
21340 +#endif
21341 +
21342 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
21343 return 0;
21344 if (!page_is_ram(pagenr))
21345 @@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigne
21346
21347 void free_initmem(void)
21348 {
21349 +
21350 +#ifdef CONFIG_PAX_KERNEXEC
21351 +#ifdef CONFIG_X86_32
21352 + /* PaX: limit KERNEL_CS to actual size */
21353 + unsigned long addr, limit;
21354 + struct desc_struct d;
21355 + int cpu;
21356 +
21357 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
21358 + limit = (limit - 1UL) >> PAGE_SHIFT;
21359 +
21360 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
21361 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
21362 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
21363 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
21364 + }
21365 +
21366 + /* PaX: make KERNEL_CS read-only */
21367 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
21368 + if (!paravirt_enabled())
21369 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
21370 +/*
21371 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
21372 + pgd = pgd_offset_k(addr);
21373 + pud = pud_offset(pgd, addr);
21374 + pmd = pmd_offset(pud, addr);
21375 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21376 + }
21377 +*/
21378 +#ifdef CONFIG_X86_PAE
21379 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
21380 +/*
21381 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
21382 + pgd = pgd_offset_k(addr);
21383 + pud = pud_offset(pgd, addr);
21384 + pmd = pmd_offset(pud, addr);
21385 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21386 + }
21387 +*/
21388 +#endif
21389 +
21390 +#ifdef CONFIG_MODULES
21391 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
21392 +#endif
21393 +
21394 +#else
21395 + pgd_t *pgd;
21396 + pud_t *pud;
21397 + pmd_t *pmd;
21398 + unsigned long addr, end;
21399 +
21400 + /* PaX: make kernel code/rodata read-only, rest non-executable */
21401 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
21402 + pgd = pgd_offset_k(addr);
21403 + pud = pud_offset(pgd, addr);
21404 + pmd = pmd_offset(pud, addr);
21405 + if (!pmd_present(*pmd))
21406 + continue;
21407 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
21408 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21409 + else
21410 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21411 + }
21412 +
21413 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
21414 + end = addr + KERNEL_IMAGE_SIZE;
21415 + for (; addr < end; addr += PMD_SIZE) {
21416 + pgd = pgd_offset_k(addr);
21417 + pud = pud_offset(pgd, addr);
21418 + pmd = pmd_offset(pud, addr);
21419 + if (!pmd_present(*pmd))
21420 + continue;
21421 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
21422 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21423 + }
21424 +#endif
21425 +
21426 + flush_tlb_all();
21427 +#endif
21428 +
21429 free_init_pages("unused kernel memory",
21430 (unsigned long)(&__init_begin),
21431 (unsigned long)(&__init_end));
21432 diff -urNp linux-2.6.32.42/arch/x86/mm/iomap_32.c linux-2.6.32.42/arch/x86/mm/iomap_32.c
21433 --- linux-2.6.32.42/arch/x86/mm/iomap_32.c 2011-03-27 14:31:47.000000000 -0400
21434 +++ linux-2.6.32.42/arch/x86/mm/iomap_32.c 2011-04-17 15:56:46.000000000 -0400
21435 @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
21436 debug_kmap_atomic(type);
21437 idx = type + KM_TYPE_NR * smp_processor_id();
21438 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21439 +
21440 + pax_open_kernel();
21441 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
21442 + pax_close_kernel();
21443 +
21444 arch_flush_lazy_mmu_mode();
21445
21446 return (void *)vaddr;
21447 diff -urNp linux-2.6.32.42/arch/x86/mm/ioremap.c linux-2.6.32.42/arch/x86/mm/ioremap.c
21448 --- linux-2.6.32.42/arch/x86/mm/ioremap.c 2011-03-27 14:31:47.000000000 -0400
21449 +++ linux-2.6.32.42/arch/x86/mm/ioremap.c 2011-04-17 15:56:46.000000000 -0400
21450 @@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
21451 * Second special case: Some BIOSen report the PC BIOS
21452 * area (640->1Mb) as ram even though it is not.
21453 */
21454 - if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
21455 - pagenr < (BIOS_END >> PAGE_SHIFT))
21456 + if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
21457 + pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21458 return 0;
21459
21460 for (i = 0; i < e820.nr_map; i++) {
21461 @@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(re
21462 /*
21463 * Don't allow anybody to remap normal RAM that we're using..
21464 */
21465 - for (pfn = phys_addr >> PAGE_SHIFT;
21466 - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
21467 - pfn++) {
21468 -
21469 + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
21470 int is_ram = page_is_ram(pfn);
21471
21472 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
21473 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
21474 return NULL;
21475 WARN_ON_ONCE(is_ram);
21476 }
21477 @@ -407,7 +404,7 @@ static int __init early_ioremap_debug_se
21478 early_param("early_ioremap_debug", early_ioremap_debug_setup);
21479
21480 static __initdata int after_paging_init;
21481 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
21482 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
21483
21484 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
21485 {
21486 @@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
21487 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
21488
21489 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
21490 - memset(bm_pte, 0, sizeof(bm_pte));
21491 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
21492 + pmd_populate_user(&init_mm, pmd, bm_pte);
21493
21494 /*
21495 * The boot-ioremap range spans multiple pmds, for which
21496 diff -urNp linux-2.6.32.42/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.32.42/arch/x86/mm/kmemcheck/kmemcheck.c
21497 --- linux-2.6.32.42/arch/x86/mm/kmemcheck/kmemcheck.c 2011-03-27 14:31:47.000000000 -0400
21498 +++ linux-2.6.32.42/arch/x86/mm/kmemcheck/kmemcheck.c 2011-04-17 15:56:46.000000000 -0400
21499 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
21500 * memory (e.g. tracked pages)? For now, we need this to avoid
21501 * invoking kmemcheck for PnP BIOS calls.
21502 */
21503 - if (regs->flags & X86_VM_MASK)
21504 + if (v8086_mode(regs))
21505 return false;
21506 - if (regs->cs != __KERNEL_CS)
21507 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
21508 return false;
21509
21510 pte = kmemcheck_pte_lookup(address);
21511 diff -urNp linux-2.6.32.42/arch/x86/mm/mmap.c linux-2.6.32.42/arch/x86/mm/mmap.c
21512 --- linux-2.6.32.42/arch/x86/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
21513 +++ linux-2.6.32.42/arch/x86/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
21514 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
21515 * Leave an at least ~128 MB hole with possible stack randomization.
21516 */
21517 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
21518 -#define MAX_GAP (TASK_SIZE/6*5)
21519 +#define MAX_GAP (pax_task_size/6*5)
21520
21521 /*
21522 * True on X86_32 or when emulating IA32 on X86_64
21523 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
21524 return rnd << PAGE_SHIFT;
21525 }
21526
21527 -static unsigned long mmap_base(void)
21528 +static unsigned long mmap_base(struct mm_struct *mm)
21529 {
21530 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
21531 + unsigned long pax_task_size = TASK_SIZE;
21532 +
21533 +#ifdef CONFIG_PAX_SEGMEXEC
21534 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21535 + pax_task_size = SEGMEXEC_TASK_SIZE;
21536 +#endif
21537
21538 if (gap < MIN_GAP)
21539 gap = MIN_GAP;
21540 else if (gap > MAX_GAP)
21541 gap = MAX_GAP;
21542
21543 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
21544 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
21545 }
21546
21547 /*
21548 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
21549 * does, but not when emulating X86_32
21550 */
21551 -static unsigned long mmap_legacy_base(void)
21552 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
21553 {
21554 - if (mmap_is_ia32())
21555 + if (mmap_is_ia32()) {
21556 +
21557 +#ifdef CONFIG_PAX_SEGMEXEC
21558 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21559 + return SEGMEXEC_TASK_UNMAPPED_BASE;
21560 + else
21561 +#endif
21562 +
21563 return TASK_UNMAPPED_BASE;
21564 - else
21565 + } else
21566 return TASK_UNMAPPED_BASE + mmap_rnd();
21567 }
21568
21569 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
21570 void arch_pick_mmap_layout(struct mm_struct *mm)
21571 {
21572 if (mmap_is_legacy()) {
21573 - mm->mmap_base = mmap_legacy_base();
21574 + mm->mmap_base = mmap_legacy_base(mm);
21575 +
21576 +#ifdef CONFIG_PAX_RANDMMAP
21577 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21578 + mm->mmap_base += mm->delta_mmap;
21579 +#endif
21580 +
21581 mm->get_unmapped_area = arch_get_unmapped_area;
21582 mm->unmap_area = arch_unmap_area;
21583 } else {
21584 - mm->mmap_base = mmap_base();
21585 + mm->mmap_base = mmap_base(mm);
21586 +
21587 +#ifdef CONFIG_PAX_RANDMMAP
21588 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21589 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
21590 +#endif
21591 +
21592 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
21593 mm->unmap_area = arch_unmap_area_topdown;
21594 }
21595 diff -urNp linux-2.6.32.42/arch/x86/mm/mmio-mod.c linux-2.6.32.42/arch/x86/mm/mmio-mod.c
21596 --- linux-2.6.32.42/arch/x86/mm/mmio-mod.c 2011-03-27 14:31:47.000000000 -0400
21597 +++ linux-2.6.32.42/arch/x86/mm/mmio-mod.c 2011-05-04 17:56:28.000000000 -0400
21598 @@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p,
21599 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
21600 void __iomem *addr)
21601 {
21602 - static atomic_t next_id;
21603 + static atomic_unchecked_t next_id;
21604 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
21605 /* These are page-unaligned. */
21606 struct mmiotrace_map map = {
21607 @@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_
21608 .private = trace
21609 },
21610 .phys = offset,
21611 - .id = atomic_inc_return(&next_id)
21612 + .id = atomic_inc_return_unchecked(&next_id)
21613 };
21614 map.map_id = trace->id;
21615
21616 diff -urNp linux-2.6.32.42/arch/x86/mm/numa_32.c linux-2.6.32.42/arch/x86/mm/numa_32.c
21617 --- linux-2.6.32.42/arch/x86/mm/numa_32.c 2011-03-27 14:31:47.000000000 -0400
21618 +++ linux-2.6.32.42/arch/x86/mm/numa_32.c 2011-04-17 15:56:46.000000000 -0400
21619 @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
21620 }
21621 #endif
21622
21623 -extern unsigned long find_max_low_pfn(void);
21624 extern unsigned long highend_pfn, highstart_pfn;
21625
21626 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
21627 diff -urNp linux-2.6.32.42/arch/x86/mm/pageattr.c linux-2.6.32.42/arch/x86/mm/pageattr.c
21628 --- linux-2.6.32.42/arch/x86/mm/pageattr.c 2011-03-27 14:31:47.000000000 -0400
21629 +++ linux-2.6.32.42/arch/x86/mm/pageattr.c 2011-04-17 15:56:46.000000000 -0400
21630 @@ -261,16 +261,17 @@ static inline pgprot_t static_protection
21631 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
21632 */
21633 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
21634 - pgprot_val(forbidden) |= _PAGE_NX;
21635 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21636
21637 /*
21638 * The kernel text needs to be executable for obvious reasons
21639 * Does not cover __inittext since that is gone later on. On
21640 * 64bit we do not enforce !NX on the low mapping
21641 */
21642 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
21643 - pgprot_val(forbidden) |= _PAGE_NX;
21644 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
21645 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21646
21647 +#ifdef CONFIG_DEBUG_RODATA
21648 /*
21649 * The .rodata section needs to be read-only. Using the pfn
21650 * catches all aliases.
21651 @@ -278,6 +279,14 @@ static inline pgprot_t static_protection
21652 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
21653 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
21654 pgprot_val(forbidden) |= _PAGE_RW;
21655 +#endif
21656 +
21657 +#ifdef CONFIG_PAX_KERNEXEC
21658 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
21659 + pgprot_val(forbidden) |= _PAGE_RW;
21660 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21661 + }
21662 +#endif
21663
21664 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
21665
21666 @@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
21667 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
21668 {
21669 /* change init_mm */
21670 + pax_open_kernel();
21671 set_pte_atomic(kpte, pte);
21672 +
21673 #ifdef CONFIG_X86_32
21674 if (!SHARED_KERNEL_PMD) {
21675 +
21676 +#ifdef CONFIG_PAX_PER_CPU_PGD
21677 + unsigned long cpu;
21678 +#else
21679 struct page *page;
21680 +#endif
21681
21682 +#ifdef CONFIG_PAX_PER_CPU_PGD
21683 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21684 + pgd_t *pgd = get_cpu_pgd(cpu);
21685 +#else
21686 list_for_each_entry(page, &pgd_list, lru) {
21687 - pgd_t *pgd;
21688 + pgd_t *pgd = (pgd_t *)page_address(page);
21689 +#endif
21690 +
21691 pud_t *pud;
21692 pmd_t *pmd;
21693
21694 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
21695 + pgd += pgd_index(address);
21696 pud = pud_offset(pgd, address);
21697 pmd = pmd_offset(pud, address);
21698 set_pte_atomic((pte_t *)pmd, pte);
21699 }
21700 }
21701 #endif
21702 + pax_close_kernel();
21703 }
21704
21705 static int
21706 diff -urNp linux-2.6.32.42/arch/x86/mm/pageattr-test.c linux-2.6.32.42/arch/x86/mm/pageattr-test.c
21707 --- linux-2.6.32.42/arch/x86/mm/pageattr-test.c 2011-03-27 14:31:47.000000000 -0400
21708 +++ linux-2.6.32.42/arch/x86/mm/pageattr-test.c 2011-04-17 15:56:46.000000000 -0400
21709 @@ -36,7 +36,7 @@ enum {
21710
21711 static int pte_testbit(pte_t pte)
21712 {
21713 - return pte_flags(pte) & _PAGE_UNUSED1;
21714 + return pte_flags(pte) & _PAGE_CPA_TEST;
21715 }
21716
21717 struct split_state {
21718 diff -urNp linux-2.6.32.42/arch/x86/mm/pat.c linux-2.6.32.42/arch/x86/mm/pat.c
21719 --- linux-2.6.32.42/arch/x86/mm/pat.c 2011-03-27 14:31:47.000000000 -0400
21720 +++ linux-2.6.32.42/arch/x86/mm/pat.c 2011-04-17 15:56:46.000000000 -0400
21721 @@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct
21722
21723 conflict:
21724 printk(KERN_INFO "%s:%d conflicting memory types "
21725 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
21726 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
21727 new->end, cattr_name(new->type), cattr_name(entry->type));
21728 return -EBUSY;
21729 }
21730 @@ -559,7 +559,7 @@ unlock_ret:
21731
21732 if (err) {
21733 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
21734 - current->comm, current->pid, start, end);
21735 + current->comm, task_pid_nr(current), start, end);
21736 }
21737
21738 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
21739 @@ -689,8 +689,8 @@ static inline int range_is_allowed(unsig
21740 while (cursor < to) {
21741 if (!devmem_is_allowed(pfn)) {
21742 printk(KERN_INFO
21743 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
21744 - current->comm, from, to);
21745 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
21746 + current->comm, from, to, cursor);
21747 return 0;
21748 }
21749 cursor += PAGE_SIZE;
21750 @@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, un
21751 printk(KERN_INFO
21752 "%s:%d ioremap_change_attr failed %s "
21753 "for %Lx-%Lx\n",
21754 - current->comm, current->pid,
21755 + current->comm, task_pid_nr(current),
21756 cattr_name(flags),
21757 base, (unsigned long long)(base + size));
21758 return -EINVAL;
21759 @@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr,
21760 free_memtype(paddr, paddr + size);
21761 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
21762 " for %Lx-%Lx, got %s\n",
21763 - current->comm, current->pid,
21764 + current->comm, task_pid_nr(current),
21765 cattr_name(want_flags),
21766 (unsigned long long)paddr,
21767 (unsigned long long)(paddr + size),
21768 diff -urNp linux-2.6.32.42/arch/x86/mm/pgtable_32.c linux-2.6.32.42/arch/x86/mm/pgtable_32.c
21769 --- linux-2.6.32.42/arch/x86/mm/pgtable_32.c 2011-03-27 14:31:47.000000000 -0400
21770 +++ linux-2.6.32.42/arch/x86/mm/pgtable_32.c 2011-04-17 15:56:46.000000000 -0400
21771 @@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
21772 return;
21773 }
21774 pte = pte_offset_kernel(pmd, vaddr);
21775 +
21776 + pax_open_kernel();
21777 if (pte_val(pteval))
21778 set_pte_at(&init_mm, vaddr, pte, pteval);
21779 else
21780 pte_clear(&init_mm, vaddr, pte);
21781 + pax_close_kernel();
21782
21783 /*
21784 * It's enough to flush this one mapping.
21785 diff -urNp linux-2.6.32.42/arch/x86/mm/pgtable.c linux-2.6.32.42/arch/x86/mm/pgtable.c
21786 --- linux-2.6.32.42/arch/x86/mm/pgtable.c 2011-03-27 14:31:47.000000000 -0400
21787 +++ linux-2.6.32.42/arch/x86/mm/pgtable.c 2011-05-11 18:25:15.000000000 -0400
21788 @@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *p
21789 list_del(&page->lru);
21790 }
21791
21792 -#define UNSHARED_PTRS_PER_PGD \
21793 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21794 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21795 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
21796
21797 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21798 +{
21799 + while (count--)
21800 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
21801 +}
21802 +#endif
21803 +
21804 +#ifdef CONFIG_PAX_PER_CPU_PGD
21805 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21806 +{
21807 + while (count--)
21808 +
21809 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21810 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
21811 +#else
21812 + *dst++ = *src++;
21813 +#endif
21814 +
21815 +}
21816 +#endif
21817 +
21818 +#ifdef CONFIG_X86_64
21819 +#define pxd_t pud_t
21820 +#define pyd_t pgd_t
21821 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
21822 +#define pxd_free(mm, pud) pud_free((mm), (pud))
21823 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
21824 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
21825 +#define PYD_SIZE PGDIR_SIZE
21826 +#else
21827 +#define pxd_t pmd_t
21828 +#define pyd_t pud_t
21829 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
21830 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
21831 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
21832 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
21833 +#define PYD_SIZE PUD_SIZE
21834 +#endif
21835 +
21836 +#ifdef CONFIG_PAX_PER_CPU_PGD
21837 +static inline void pgd_ctor(pgd_t *pgd) {}
21838 +static inline void pgd_dtor(pgd_t *pgd) {}
21839 +#else
21840 static void pgd_ctor(pgd_t *pgd)
21841 {
21842 /* If the pgd points to a shared pagetable level (either the
21843 @@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
21844 pgd_list_del(pgd);
21845 spin_unlock_irqrestore(&pgd_lock, flags);
21846 }
21847 +#endif
21848
21849 /*
21850 * List of all pgd's needed for non-PAE so it can invalidate entries
21851 @@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
21852 * -- wli
21853 */
21854
21855 -#ifdef CONFIG_X86_PAE
21856 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21857 /*
21858 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
21859 * updating the top-level pagetable entries to guarantee the
21860 @@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
21861 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
21862 * and initialize the kernel pmds here.
21863 */
21864 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
21865 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21866
21867 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
21868 {
21869 @@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm,
21870 */
21871 flush_tlb_mm(mm);
21872 }
21873 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
21874 +#define PREALLOCATED_PXDS USER_PGD_PTRS
21875 #else /* !CONFIG_X86_PAE */
21876
21877 /* No need to prepopulate any pagetable entries in non-PAE modes. */
21878 -#define PREALLOCATED_PMDS 0
21879 +#define PREALLOCATED_PXDS 0
21880
21881 #endif /* CONFIG_X86_PAE */
21882
21883 -static void free_pmds(pmd_t *pmds[])
21884 +static void free_pxds(pxd_t *pxds[])
21885 {
21886 int i;
21887
21888 - for(i = 0; i < PREALLOCATED_PMDS; i++)
21889 - if (pmds[i])
21890 - free_page((unsigned long)pmds[i]);
21891 + for(i = 0; i < PREALLOCATED_PXDS; i++)
21892 + if (pxds[i])
21893 + free_page((unsigned long)pxds[i]);
21894 }
21895
21896 -static int preallocate_pmds(pmd_t *pmds[])
21897 +static int preallocate_pxds(pxd_t *pxds[])
21898 {
21899 int i;
21900 bool failed = false;
21901
21902 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
21903 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
21904 - if (pmd == NULL)
21905 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
21906 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
21907 + if (pxd == NULL)
21908 failed = true;
21909 - pmds[i] = pmd;
21910 + pxds[i] = pxd;
21911 }
21912
21913 if (failed) {
21914 - free_pmds(pmds);
21915 + free_pxds(pxds);
21916 return -ENOMEM;
21917 }
21918
21919 @@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[
21920 * preallocate which never got a corresponding vma will need to be
21921 * freed manually.
21922 */
21923 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
21924 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
21925 {
21926 int i;
21927
21928 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
21929 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
21930 pgd_t pgd = pgdp[i];
21931
21932 if (pgd_val(pgd) != 0) {
21933 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
21934 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
21935
21936 - pgdp[i] = native_make_pgd(0);
21937 + set_pgd(pgdp + i, native_make_pgd(0));
21938
21939 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
21940 - pmd_free(mm, pmd);
21941 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
21942 + pxd_free(mm, pxd);
21943 }
21944 }
21945 }
21946
21947 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
21948 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
21949 {
21950 - pud_t *pud;
21951 + pyd_t *pyd;
21952 unsigned long addr;
21953 int i;
21954
21955 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
21956 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
21957 return;
21958
21959 - pud = pud_offset(pgd, 0);
21960 +#ifdef CONFIG_X86_64
21961 + pyd = pyd_offset(mm, 0L);
21962 +#else
21963 + pyd = pyd_offset(pgd, 0L);
21964 +#endif
21965
21966 - for (addr = i = 0; i < PREALLOCATED_PMDS;
21967 - i++, pud++, addr += PUD_SIZE) {
21968 - pmd_t *pmd = pmds[i];
21969 + for (addr = i = 0; i < PREALLOCATED_PXDS;
21970 + i++, pyd++, addr += PYD_SIZE) {
21971 + pxd_t *pxd = pxds[i];
21972
21973 if (i >= KERNEL_PGD_BOUNDARY)
21974 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21975 - sizeof(pmd_t) * PTRS_PER_PMD);
21976 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21977 + sizeof(pxd_t) * PTRS_PER_PMD);
21978
21979 - pud_populate(mm, pud, pmd);
21980 + pyd_populate(mm, pyd, pxd);
21981 }
21982 }
21983
21984 pgd_t *pgd_alloc(struct mm_struct *mm)
21985 {
21986 pgd_t *pgd;
21987 - pmd_t *pmds[PREALLOCATED_PMDS];
21988 + pxd_t *pxds[PREALLOCATED_PXDS];
21989 +
21990 unsigned long flags;
21991
21992 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
21993 @@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21994
21995 mm->pgd = pgd;
21996
21997 - if (preallocate_pmds(pmds) != 0)
21998 + if (preallocate_pxds(pxds) != 0)
21999 goto out_free_pgd;
22000
22001 if (paravirt_pgd_alloc(mm) != 0)
22002 - goto out_free_pmds;
22003 + goto out_free_pxds;
22004
22005 /*
22006 * Make sure that pre-populating the pmds is atomic with
22007 @@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22008 spin_lock_irqsave(&pgd_lock, flags);
22009
22010 pgd_ctor(pgd);
22011 - pgd_prepopulate_pmd(mm, pgd, pmds);
22012 + pgd_prepopulate_pxd(mm, pgd, pxds);
22013
22014 spin_unlock_irqrestore(&pgd_lock, flags);
22015
22016 return pgd;
22017
22018 -out_free_pmds:
22019 - free_pmds(pmds);
22020 +out_free_pxds:
22021 + free_pxds(pxds);
22022 out_free_pgd:
22023 free_page((unsigned long)pgd);
22024 out:
22025 @@ -287,7 +338,7 @@ out:
22026
22027 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
22028 {
22029 - pgd_mop_up_pmds(mm, pgd);
22030 + pgd_mop_up_pxds(mm, pgd);
22031 pgd_dtor(pgd);
22032 paravirt_pgd_free(mm, pgd);
22033 free_page((unsigned long)pgd);
22034 diff -urNp linux-2.6.32.42/arch/x86/mm/setup_nx.c linux-2.6.32.42/arch/x86/mm/setup_nx.c
22035 --- linux-2.6.32.42/arch/x86/mm/setup_nx.c 2011-03-27 14:31:47.000000000 -0400
22036 +++ linux-2.6.32.42/arch/x86/mm/setup_nx.c 2011-04-17 15:56:46.000000000 -0400
22037 @@ -4,11 +4,10 @@
22038
22039 #include <asm/pgtable.h>
22040
22041 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22042 int nx_enabled;
22043
22044 -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22045 -static int disable_nx __cpuinitdata;
22046 -
22047 +#ifndef CONFIG_PAX_PAGEEXEC
22048 /*
22049 * noexec = on|off
22050 *
22051 @@ -22,32 +21,26 @@ static int __init noexec_setup(char *str
22052 if (!str)
22053 return -EINVAL;
22054 if (!strncmp(str, "on", 2)) {
22055 - __supported_pte_mask |= _PAGE_NX;
22056 - disable_nx = 0;
22057 + nx_enabled = 1;
22058 } else if (!strncmp(str, "off", 3)) {
22059 - disable_nx = 1;
22060 - __supported_pte_mask &= ~_PAGE_NX;
22061 + nx_enabled = 0;
22062 }
22063 return 0;
22064 }
22065 early_param("noexec", noexec_setup);
22066 #endif
22067 +#endif
22068
22069 #ifdef CONFIG_X86_PAE
22070 void __init set_nx(void)
22071 {
22072 - unsigned int v[4], l, h;
22073 + if (!nx_enabled && cpu_has_nx) {
22074 + unsigned l, h;
22075
22076 - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
22077 - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
22078 -
22079 - if ((v[3] & (1 << 20)) && !disable_nx) {
22080 - rdmsr(MSR_EFER, l, h);
22081 - l |= EFER_NX;
22082 - wrmsr(MSR_EFER, l, h);
22083 - nx_enabled = 1;
22084 - __supported_pte_mask |= _PAGE_NX;
22085 - }
22086 + __supported_pte_mask &= ~_PAGE_NX;
22087 + rdmsr(MSR_EFER, l, h);
22088 + l &= ~EFER_NX;
22089 + wrmsr(MSR_EFER, l, h);
22090 }
22091 }
22092 #else
22093 @@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
22094 unsigned long efer;
22095
22096 rdmsrl(MSR_EFER, efer);
22097 - if (!(efer & EFER_NX) || disable_nx)
22098 + if (!(efer & EFER_NX) || !nx_enabled)
22099 __supported_pte_mask &= ~_PAGE_NX;
22100 }
22101 #endif
22102 diff -urNp linux-2.6.32.42/arch/x86/mm/tlb.c linux-2.6.32.42/arch/x86/mm/tlb.c
22103 --- linux-2.6.32.42/arch/x86/mm/tlb.c 2011-03-27 14:31:47.000000000 -0400
22104 +++ linux-2.6.32.42/arch/x86/mm/tlb.c 2011-04-23 12:56:10.000000000 -0400
22105 @@ -61,7 +61,11 @@ void leave_mm(int cpu)
22106 BUG();
22107 cpumask_clear_cpu(cpu,
22108 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
22109 +
22110 +#ifndef CONFIG_PAX_PER_CPU_PGD
22111 load_cr3(swapper_pg_dir);
22112 +#endif
22113 +
22114 }
22115 EXPORT_SYMBOL_GPL(leave_mm);
22116
22117 diff -urNp linux-2.6.32.42/arch/x86/oprofile/backtrace.c linux-2.6.32.42/arch/x86/oprofile/backtrace.c
22118 --- linux-2.6.32.42/arch/x86/oprofile/backtrace.c 2011-03-27 14:31:47.000000000 -0400
22119 +++ linux-2.6.32.42/arch/x86/oprofile/backtrace.c 2011-04-17 15:56:46.000000000 -0400
22120 @@ -57,7 +57,7 @@ static struct frame_head *dump_user_back
22121 struct frame_head bufhead[2];
22122
22123 /* Also check accessibility of one struct frame_head beyond */
22124 - if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
22125 + if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
22126 return NULL;
22127 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
22128 return NULL;
22129 @@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
22130 {
22131 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
22132
22133 - if (!user_mode_vm(regs)) {
22134 + if (!user_mode(regs)) {
22135 unsigned long stack = kernel_stack_pointer(regs);
22136 if (depth)
22137 dump_trace(NULL, regs, (unsigned long *)stack, 0,
22138 diff -urNp linux-2.6.32.42/arch/x86/oprofile/op_model_p4.c linux-2.6.32.42/arch/x86/oprofile/op_model_p4.c
22139 --- linux-2.6.32.42/arch/x86/oprofile/op_model_p4.c 2011-03-27 14:31:47.000000000 -0400
22140 +++ linux-2.6.32.42/arch/x86/oprofile/op_model_p4.c 2011-04-17 15:56:46.000000000 -0400
22141 @@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
22142 #endif
22143 }
22144
22145 -static int inline addr_increment(void)
22146 +static inline int addr_increment(void)
22147 {
22148 #ifdef CONFIG_SMP
22149 return smp_num_siblings == 2 ? 2 : 1;
22150 diff -urNp linux-2.6.32.42/arch/x86/pci/common.c linux-2.6.32.42/arch/x86/pci/common.c
22151 --- linux-2.6.32.42/arch/x86/pci/common.c 2011-03-27 14:31:47.000000000 -0400
22152 +++ linux-2.6.32.42/arch/x86/pci/common.c 2011-04-23 12:56:10.000000000 -0400
22153 @@ -31,8 +31,8 @@ int noioapicreroute = 1;
22154 int pcibios_last_bus = -1;
22155 unsigned long pirq_table_addr;
22156 struct pci_bus *pci_root_bus;
22157 -struct pci_raw_ops *raw_pci_ops;
22158 -struct pci_raw_ops *raw_pci_ext_ops;
22159 +const struct pci_raw_ops *raw_pci_ops;
22160 +const struct pci_raw_ops *raw_pci_ext_ops;
22161
22162 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
22163 int reg, int len, u32 *val)
22164 diff -urNp linux-2.6.32.42/arch/x86/pci/direct.c linux-2.6.32.42/arch/x86/pci/direct.c
22165 --- linux-2.6.32.42/arch/x86/pci/direct.c 2011-03-27 14:31:47.000000000 -0400
22166 +++ linux-2.6.32.42/arch/x86/pci/direct.c 2011-04-17 15:56:46.000000000 -0400
22167 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
22168
22169 #undef PCI_CONF1_ADDRESS
22170
22171 -struct pci_raw_ops pci_direct_conf1 = {
22172 +const struct pci_raw_ops pci_direct_conf1 = {
22173 .read = pci_conf1_read,
22174 .write = pci_conf1_write,
22175 };
22176 @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
22177
22178 #undef PCI_CONF2_ADDRESS
22179
22180 -struct pci_raw_ops pci_direct_conf2 = {
22181 +const struct pci_raw_ops pci_direct_conf2 = {
22182 .read = pci_conf2_read,
22183 .write = pci_conf2_write,
22184 };
22185 @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
22186 * This should be close to trivial, but it isn't, because there are buggy
22187 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
22188 */
22189 -static int __init pci_sanity_check(struct pci_raw_ops *o)
22190 +static int __init pci_sanity_check(const struct pci_raw_ops *o)
22191 {
22192 u32 x = 0;
22193 int year, devfn;
22194 diff -urNp linux-2.6.32.42/arch/x86/pci/mmconfig_32.c linux-2.6.32.42/arch/x86/pci/mmconfig_32.c
22195 --- linux-2.6.32.42/arch/x86/pci/mmconfig_32.c 2011-03-27 14:31:47.000000000 -0400
22196 +++ linux-2.6.32.42/arch/x86/pci/mmconfig_32.c 2011-04-17 15:56:46.000000000 -0400
22197 @@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int
22198 return 0;
22199 }
22200
22201 -static struct pci_raw_ops pci_mmcfg = {
22202 +static const struct pci_raw_ops pci_mmcfg = {
22203 .read = pci_mmcfg_read,
22204 .write = pci_mmcfg_write,
22205 };
22206 diff -urNp linux-2.6.32.42/arch/x86/pci/mmconfig_64.c linux-2.6.32.42/arch/x86/pci/mmconfig_64.c
22207 --- linux-2.6.32.42/arch/x86/pci/mmconfig_64.c 2011-03-27 14:31:47.000000000 -0400
22208 +++ linux-2.6.32.42/arch/x86/pci/mmconfig_64.c 2011-04-17 15:56:46.000000000 -0400
22209 @@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int
22210 return 0;
22211 }
22212
22213 -static struct pci_raw_ops pci_mmcfg = {
22214 +static const struct pci_raw_ops pci_mmcfg = {
22215 .read = pci_mmcfg_read,
22216 .write = pci_mmcfg_write,
22217 };
22218 diff -urNp linux-2.6.32.42/arch/x86/pci/numaq_32.c linux-2.6.32.42/arch/x86/pci/numaq_32.c
22219 --- linux-2.6.32.42/arch/x86/pci/numaq_32.c 2011-03-27 14:31:47.000000000 -0400
22220 +++ linux-2.6.32.42/arch/x86/pci/numaq_32.c 2011-04-17 15:56:46.000000000 -0400
22221 @@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned i
22222
22223 #undef PCI_CONF1_MQ_ADDRESS
22224
22225 -static struct pci_raw_ops pci_direct_conf1_mq = {
22226 +static const struct pci_raw_ops pci_direct_conf1_mq = {
22227 .read = pci_conf1_mq_read,
22228 .write = pci_conf1_mq_write
22229 };
22230 diff -urNp linux-2.6.32.42/arch/x86/pci/olpc.c linux-2.6.32.42/arch/x86/pci/olpc.c
22231 --- linux-2.6.32.42/arch/x86/pci/olpc.c 2011-03-27 14:31:47.000000000 -0400
22232 +++ linux-2.6.32.42/arch/x86/pci/olpc.c 2011-04-17 15:56:46.000000000 -0400
22233 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
22234 return 0;
22235 }
22236
22237 -static struct pci_raw_ops pci_olpc_conf = {
22238 +static const struct pci_raw_ops pci_olpc_conf = {
22239 .read = pci_olpc_read,
22240 .write = pci_olpc_write,
22241 };
22242 diff -urNp linux-2.6.32.42/arch/x86/pci/pcbios.c linux-2.6.32.42/arch/x86/pci/pcbios.c
22243 --- linux-2.6.32.42/arch/x86/pci/pcbios.c 2011-03-27 14:31:47.000000000 -0400
22244 +++ linux-2.6.32.42/arch/x86/pci/pcbios.c 2011-04-17 15:56:46.000000000 -0400
22245 @@ -56,50 +56,93 @@ union bios32 {
22246 static struct {
22247 unsigned long address;
22248 unsigned short segment;
22249 -} bios32_indirect = { 0, __KERNEL_CS };
22250 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
22251
22252 /*
22253 * Returns the entry point for the given service, NULL on error
22254 */
22255
22256 -static unsigned long bios32_service(unsigned long service)
22257 +static unsigned long __devinit bios32_service(unsigned long service)
22258 {
22259 unsigned char return_code; /* %al */
22260 unsigned long address; /* %ebx */
22261 unsigned long length; /* %ecx */
22262 unsigned long entry; /* %edx */
22263 unsigned long flags;
22264 + struct desc_struct d, *gdt;
22265
22266 local_irq_save(flags);
22267 - __asm__("lcall *(%%edi); cld"
22268 +
22269 + gdt = get_cpu_gdt_table(smp_processor_id());
22270 +
22271 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
22272 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22273 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
22274 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22275 +
22276 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
22277 : "=a" (return_code),
22278 "=b" (address),
22279 "=c" (length),
22280 "=d" (entry)
22281 : "0" (service),
22282 "1" (0),
22283 - "D" (&bios32_indirect));
22284 + "D" (&bios32_indirect),
22285 + "r"(__PCIBIOS_DS)
22286 + : "memory");
22287 +
22288 + pax_open_kernel();
22289 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
22290 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
22291 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
22292 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
22293 + pax_close_kernel();
22294 +
22295 local_irq_restore(flags);
22296
22297 switch (return_code) {
22298 - case 0:
22299 - return address + entry;
22300 - case 0x80: /* Not present */
22301 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22302 - return 0;
22303 - default: /* Shouldn't happen */
22304 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22305 - service, return_code);
22306 + case 0: {
22307 + int cpu;
22308 + unsigned char flags;
22309 +
22310 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
22311 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
22312 + printk(KERN_WARNING "bios32_service: not valid\n");
22313 return 0;
22314 + }
22315 + address = address + PAGE_OFFSET;
22316 + length += 16UL; /* some BIOSs underreport this... */
22317 + flags = 4;
22318 + if (length >= 64*1024*1024) {
22319 + length >>= PAGE_SHIFT;
22320 + flags |= 8;
22321 + }
22322 +
22323 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
22324 + gdt = get_cpu_gdt_table(cpu);
22325 + pack_descriptor(&d, address, length, 0x9b, flags);
22326 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22327 + pack_descriptor(&d, address, length, 0x93, flags);
22328 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22329 + }
22330 + return entry;
22331 + }
22332 + case 0x80: /* Not present */
22333 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22334 + return 0;
22335 + default: /* Shouldn't happen */
22336 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22337 + service, return_code);
22338 + return 0;
22339 }
22340 }
22341
22342 static struct {
22343 unsigned long address;
22344 unsigned short segment;
22345 -} pci_indirect = { 0, __KERNEL_CS };
22346 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
22347
22348 -static int pci_bios_present;
22349 +static int pci_bios_present __read_only;
22350
22351 static int __devinit check_pcibios(void)
22352 {
22353 @@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
22354 unsigned long flags, pcibios_entry;
22355
22356 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
22357 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
22358 + pci_indirect.address = pcibios_entry;
22359
22360 local_irq_save(flags);
22361 - __asm__(
22362 - "lcall *(%%edi); cld\n\t"
22363 + __asm__("movw %w6, %%ds\n\t"
22364 + "lcall *%%ss:(%%edi); cld\n\t"
22365 + "push %%ss\n\t"
22366 + "pop %%ds\n\t"
22367 "jc 1f\n\t"
22368 "xor %%ah, %%ah\n"
22369 "1:"
22370 @@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
22371 "=b" (ebx),
22372 "=c" (ecx)
22373 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
22374 - "D" (&pci_indirect)
22375 + "D" (&pci_indirect),
22376 + "r" (__PCIBIOS_DS)
22377 : "memory");
22378 local_irq_restore(flags);
22379
22380 @@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int se
22381
22382 switch (len) {
22383 case 1:
22384 - __asm__("lcall *(%%esi); cld\n\t"
22385 + __asm__("movw %w6, %%ds\n\t"
22386 + "lcall *%%ss:(%%esi); cld\n\t"
22387 + "push %%ss\n\t"
22388 + "pop %%ds\n\t"
22389 "jc 1f\n\t"
22390 "xor %%ah, %%ah\n"
22391 "1:"
22392 @@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int se
22393 : "1" (PCIBIOS_READ_CONFIG_BYTE),
22394 "b" (bx),
22395 "D" ((long)reg),
22396 - "S" (&pci_indirect));
22397 + "S" (&pci_indirect),
22398 + "r" (__PCIBIOS_DS));
22399 /*
22400 * Zero-extend the result beyond 8 bits, do not trust the
22401 * BIOS having done it:
22402 @@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int se
22403 *value &= 0xff;
22404 break;
22405 case 2:
22406 - __asm__("lcall *(%%esi); cld\n\t"
22407 + __asm__("movw %w6, %%ds\n\t"
22408 + "lcall *%%ss:(%%esi); cld\n\t"
22409 + "push %%ss\n\t"
22410 + "pop %%ds\n\t"
22411 "jc 1f\n\t"
22412 "xor %%ah, %%ah\n"
22413 "1:"
22414 @@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int se
22415 : "1" (PCIBIOS_READ_CONFIG_WORD),
22416 "b" (bx),
22417 "D" ((long)reg),
22418 - "S" (&pci_indirect));
22419 + "S" (&pci_indirect),
22420 + "r" (__PCIBIOS_DS));
22421 /*
22422 * Zero-extend the result beyond 16 bits, do not trust the
22423 * BIOS having done it:
22424 @@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int se
22425 *value &= 0xffff;
22426 break;
22427 case 4:
22428 - __asm__("lcall *(%%esi); cld\n\t"
22429 + __asm__("movw %w6, %%ds\n\t"
22430 + "lcall *%%ss:(%%esi); cld\n\t"
22431 + "push %%ss\n\t"
22432 + "pop %%ds\n\t"
22433 "jc 1f\n\t"
22434 "xor %%ah, %%ah\n"
22435 "1:"
22436 @@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int se
22437 : "1" (PCIBIOS_READ_CONFIG_DWORD),
22438 "b" (bx),
22439 "D" ((long)reg),
22440 - "S" (&pci_indirect));
22441 + "S" (&pci_indirect),
22442 + "r" (__PCIBIOS_DS));
22443 break;
22444 }
22445
22446 @@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int s
22447
22448 switch (len) {
22449 case 1:
22450 - __asm__("lcall *(%%esi); cld\n\t"
22451 + __asm__("movw %w6, %%ds\n\t"
22452 + "lcall *%%ss:(%%esi); cld\n\t"
22453 + "push %%ss\n\t"
22454 + "pop %%ds\n\t"
22455 "jc 1f\n\t"
22456 "xor %%ah, %%ah\n"
22457 "1:"
22458 @@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int s
22459 "c" (value),
22460 "b" (bx),
22461 "D" ((long)reg),
22462 - "S" (&pci_indirect));
22463 + "S" (&pci_indirect),
22464 + "r" (__PCIBIOS_DS));
22465 break;
22466 case 2:
22467 - __asm__("lcall *(%%esi); cld\n\t"
22468 + __asm__("movw %w6, %%ds\n\t"
22469 + "lcall *%%ss:(%%esi); cld\n\t"
22470 + "push %%ss\n\t"
22471 + "pop %%ds\n\t"
22472 "jc 1f\n\t"
22473 "xor %%ah, %%ah\n"
22474 "1:"
22475 @@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int s
22476 "c" (value),
22477 "b" (bx),
22478 "D" ((long)reg),
22479 - "S" (&pci_indirect));
22480 + "S" (&pci_indirect),
22481 + "r" (__PCIBIOS_DS));
22482 break;
22483 case 4:
22484 - __asm__("lcall *(%%esi); cld\n\t"
22485 + __asm__("movw %w6, %%ds\n\t"
22486 + "lcall *%%ss:(%%esi); cld\n\t"
22487 + "push %%ss\n\t"
22488 + "pop %%ds\n\t"
22489 "jc 1f\n\t"
22490 "xor %%ah, %%ah\n"
22491 "1:"
22492 @@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int s
22493 "c" (value),
22494 "b" (bx),
22495 "D" ((long)reg),
22496 - "S" (&pci_indirect));
22497 + "S" (&pci_indirect),
22498 + "r" (__PCIBIOS_DS));
22499 break;
22500 }
22501
22502 @@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int s
22503 * Function table for BIOS32 access
22504 */
22505
22506 -static struct pci_raw_ops pci_bios_access = {
22507 +static const struct pci_raw_ops pci_bios_access = {
22508 .read = pci_bios_read,
22509 .write = pci_bios_write
22510 };
22511 @@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_acces
22512 * Try to find PCI BIOS.
22513 */
22514
22515 -static struct pci_raw_ops * __devinit pci_find_bios(void)
22516 +static const struct pci_raw_ops * __devinit pci_find_bios(void)
22517 {
22518 union bios32 *check;
22519 unsigned char sum;
22520 @@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_i
22521
22522 DBG("PCI: Fetching IRQ routing table... ");
22523 __asm__("push %%es\n\t"
22524 + "movw %w8, %%ds\n\t"
22525 "push %%ds\n\t"
22526 "pop %%es\n\t"
22527 - "lcall *(%%esi); cld\n\t"
22528 + "lcall *%%ss:(%%esi); cld\n\t"
22529 "pop %%es\n\t"
22530 + "push %%ss\n\t"
22531 + "pop %%ds\n"
22532 "jc 1f\n\t"
22533 "xor %%ah, %%ah\n"
22534 "1:"
22535 @@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_i
22536 "1" (0),
22537 "D" ((long) &opt),
22538 "S" (&pci_indirect),
22539 - "m" (opt)
22540 + "m" (opt),
22541 + "r" (__PCIBIOS_DS)
22542 : "memory");
22543 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
22544 if (ret & 0xff00)
22545 @@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_d
22546 {
22547 int ret;
22548
22549 - __asm__("lcall *(%%esi); cld\n\t"
22550 + __asm__("movw %w5, %%ds\n\t"
22551 + "lcall *%%ss:(%%esi); cld\n\t"
22552 + "push %%ss\n\t"
22553 + "pop %%ds\n"
22554 "jc 1f\n\t"
22555 "xor %%ah, %%ah\n"
22556 "1:"
22557 @@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_d
22558 : "0" (PCIBIOS_SET_PCI_HW_INT),
22559 "b" ((dev->bus->number << 8) | dev->devfn),
22560 "c" ((irq << 8) | (pin + 10)),
22561 - "S" (&pci_indirect));
22562 + "S" (&pci_indirect),
22563 + "r" (__PCIBIOS_DS));
22564 return !(ret & 0xff00);
22565 }
22566 EXPORT_SYMBOL(pcibios_set_irq_routing);
22567 diff -urNp linux-2.6.32.42/arch/x86/power/cpu.c linux-2.6.32.42/arch/x86/power/cpu.c
22568 --- linux-2.6.32.42/arch/x86/power/cpu.c 2011-03-27 14:31:47.000000000 -0400
22569 +++ linux-2.6.32.42/arch/x86/power/cpu.c 2011-04-17 15:56:46.000000000 -0400
22570 @@ -129,7 +129,7 @@ static void do_fpu_end(void)
22571 static void fix_processor_context(void)
22572 {
22573 int cpu = smp_processor_id();
22574 - struct tss_struct *t = &per_cpu(init_tss, cpu);
22575 + struct tss_struct *t = init_tss + cpu;
22576
22577 set_tss_desc(cpu, t); /*
22578 * This just modifies memory; should not be
22579 @@ -139,7 +139,9 @@ static void fix_processor_context(void)
22580 */
22581
22582 #ifdef CONFIG_X86_64
22583 + pax_open_kernel();
22584 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
22585 + pax_close_kernel();
22586
22587 syscall_init(); /* This sets MSR_*STAR and related */
22588 #endif
22589 diff -urNp linux-2.6.32.42/arch/x86/vdso/Makefile linux-2.6.32.42/arch/x86/vdso/Makefile
22590 --- linux-2.6.32.42/arch/x86/vdso/Makefile 2011-03-27 14:31:47.000000000 -0400
22591 +++ linux-2.6.32.42/arch/x86/vdso/Makefile 2011-04-17 15:56:46.000000000 -0400
22592 @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
22593 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
22594 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
22595
22596 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22597 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22598 GCOV_PROFILE := n
22599
22600 #
22601 diff -urNp linux-2.6.32.42/arch/x86/vdso/vclock_gettime.c linux-2.6.32.42/arch/x86/vdso/vclock_gettime.c
22602 --- linux-2.6.32.42/arch/x86/vdso/vclock_gettime.c 2011-03-27 14:31:47.000000000 -0400
22603 +++ linux-2.6.32.42/arch/x86/vdso/vclock_gettime.c 2011-04-17 15:56:46.000000000 -0400
22604 @@ -22,24 +22,48 @@
22605 #include <asm/hpet.h>
22606 #include <asm/unistd.h>
22607 #include <asm/io.h>
22608 +#include <asm/fixmap.h>
22609 #include "vextern.h"
22610
22611 #define gtod vdso_vsyscall_gtod_data
22612
22613 +notrace noinline long __vdso_fallback_time(long *t)
22614 +{
22615 + long secs;
22616 + asm volatile("syscall"
22617 + : "=a" (secs)
22618 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
22619 + return secs;
22620 +}
22621 +
22622 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
22623 {
22624 long ret;
22625 asm("syscall" : "=a" (ret) :
22626 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
22627 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
22628 return ret;
22629 }
22630
22631 +notrace static inline cycle_t __vdso_vread_hpet(void)
22632 +{
22633 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
22634 +}
22635 +
22636 +notrace static inline cycle_t __vdso_vread_tsc(void)
22637 +{
22638 + cycle_t ret = (cycle_t)vget_cycles();
22639 +
22640 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
22641 +}
22642 +
22643 notrace static inline long vgetns(void)
22644 {
22645 long v;
22646 - cycles_t (*vread)(void);
22647 - vread = gtod->clock.vread;
22648 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
22649 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
22650 + v = __vdso_vread_tsc();
22651 + else
22652 + v = __vdso_vread_hpet();
22653 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
22654 return (v * gtod->clock.mult) >> gtod->clock.shift;
22655 }
22656
22657 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
22658
22659 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
22660 {
22661 - if (likely(gtod->sysctl_enabled))
22662 + if (likely(gtod->sysctl_enabled &&
22663 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
22664 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
22665 switch (clock) {
22666 case CLOCK_REALTIME:
22667 if (likely(gtod->clock.vread))
22668 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
22669 int clock_gettime(clockid_t, struct timespec *)
22670 __attribute__((weak, alias("__vdso_clock_gettime")));
22671
22672 -notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
22673 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
22674 {
22675 long ret;
22676 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
22677 + asm("syscall" : "=a" (ret) :
22678 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
22679 + return ret;
22680 +}
22681 +
22682 +notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
22683 +{
22684 + if (likely(gtod->sysctl_enabled &&
22685 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
22686 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
22687 + {
22688 if (likely(tv != NULL)) {
22689 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
22690 offsetof(struct timespec, tv_nsec) ||
22691 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
22692 }
22693 return 0;
22694 }
22695 - asm("syscall" : "=a" (ret) :
22696 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
22697 - return ret;
22698 + return __vdso_fallback_gettimeofday(tv, tz);
22699 }
22700 int gettimeofday(struct timeval *, struct timezone *)
22701 __attribute__((weak, alias("__vdso_gettimeofday")));
22702 diff -urNp linux-2.6.32.42/arch/x86/vdso/vdso32-setup.c linux-2.6.32.42/arch/x86/vdso/vdso32-setup.c
22703 --- linux-2.6.32.42/arch/x86/vdso/vdso32-setup.c 2011-03-27 14:31:47.000000000 -0400
22704 +++ linux-2.6.32.42/arch/x86/vdso/vdso32-setup.c 2011-04-23 12:56:10.000000000 -0400
22705 @@ -25,6 +25,7 @@
22706 #include <asm/tlbflush.h>
22707 #include <asm/vdso.h>
22708 #include <asm/proto.h>
22709 +#include <asm/mman.h>
22710
22711 enum {
22712 VDSO_DISABLED = 0,
22713 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
22714 void enable_sep_cpu(void)
22715 {
22716 int cpu = get_cpu();
22717 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
22718 + struct tss_struct *tss = init_tss + cpu;
22719
22720 if (!boot_cpu_has(X86_FEATURE_SEP)) {
22721 put_cpu();
22722 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
22723 gate_vma.vm_start = FIXADDR_USER_START;
22724 gate_vma.vm_end = FIXADDR_USER_END;
22725 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
22726 - gate_vma.vm_page_prot = __P101;
22727 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
22728 /*
22729 * Make sure the vDSO gets into every core dump.
22730 * Dumping its contents makes post-mortem fully interpretable later
22731 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
22732 if (compat)
22733 addr = VDSO_HIGH_BASE;
22734 else {
22735 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
22736 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
22737 if (IS_ERR_VALUE(addr)) {
22738 ret = addr;
22739 goto up_fail;
22740 }
22741 }
22742
22743 - current->mm->context.vdso = (void *)addr;
22744 + current->mm->context.vdso = addr;
22745
22746 if (compat_uses_vma || !compat) {
22747 /*
22748 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
22749 }
22750
22751 current_thread_info()->sysenter_return =
22752 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22753 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22754
22755 up_fail:
22756 if (ret)
22757 - current->mm->context.vdso = NULL;
22758 + current->mm->context.vdso = 0;
22759
22760 up_write(&mm->mmap_sem);
22761
22762 @@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
22763
22764 const char *arch_vma_name(struct vm_area_struct *vma)
22765 {
22766 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22767 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22768 return "[vdso]";
22769 +
22770 +#ifdef CONFIG_PAX_SEGMEXEC
22771 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
22772 + return "[vdso]";
22773 +#endif
22774 +
22775 return NULL;
22776 }
22777
22778 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
22779 struct mm_struct *mm = tsk->mm;
22780
22781 /* Check to see if this task was created in compat vdso mode */
22782 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
22783 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
22784 return &gate_vma;
22785 return NULL;
22786 }
22787 diff -urNp linux-2.6.32.42/arch/x86/vdso/vdso.lds.S linux-2.6.32.42/arch/x86/vdso/vdso.lds.S
22788 --- linux-2.6.32.42/arch/x86/vdso/vdso.lds.S 2011-03-27 14:31:47.000000000 -0400
22789 +++ linux-2.6.32.42/arch/x86/vdso/vdso.lds.S 2011-06-06 17:35:35.000000000 -0400
22790 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
22791 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
22792 #include "vextern.h"
22793 #undef VEXTERN
22794 +
22795 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
22796 +VEXTERN(fallback_gettimeofday)
22797 +VEXTERN(fallback_time)
22798 +VEXTERN(getcpu)
22799 +#undef VEXTERN
22800 diff -urNp linux-2.6.32.42/arch/x86/vdso/vextern.h linux-2.6.32.42/arch/x86/vdso/vextern.h
22801 --- linux-2.6.32.42/arch/x86/vdso/vextern.h 2011-03-27 14:31:47.000000000 -0400
22802 +++ linux-2.6.32.42/arch/x86/vdso/vextern.h 2011-04-17 15:56:46.000000000 -0400
22803 @@ -11,6 +11,5 @@
22804 put into vextern.h and be referenced as a pointer with vdso prefix.
22805 The main kernel later fills in the values. */
22806
22807 -VEXTERN(jiffies)
22808 VEXTERN(vgetcpu_mode)
22809 VEXTERN(vsyscall_gtod_data)
22810 diff -urNp linux-2.6.32.42/arch/x86/vdso/vma.c linux-2.6.32.42/arch/x86/vdso/vma.c
22811 --- linux-2.6.32.42/arch/x86/vdso/vma.c 2011-03-27 14:31:47.000000000 -0400
22812 +++ linux-2.6.32.42/arch/x86/vdso/vma.c 2011-04-17 15:56:46.000000000 -0400
22813 @@ -57,7 +57,7 @@ static int __init init_vdso_vars(void)
22814 if (!vbase)
22815 goto oom;
22816
22817 - if (memcmp(vbase, "\177ELF", 4)) {
22818 + if (memcmp(vbase, ELFMAG, SELFMAG)) {
22819 printk("VDSO: I'm broken; not ELF\n");
22820 vdso_enabled = 0;
22821 }
22822 @@ -66,6 +66,7 @@ static int __init init_vdso_vars(void)
22823 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
22824 #include "vextern.h"
22825 #undef VEXTERN
22826 + vunmap(vbase);
22827 return 0;
22828
22829 oom:
22830 @@ -116,7 +117,7 @@ int arch_setup_additional_pages(struct l
22831 goto up_fail;
22832 }
22833
22834 - current->mm->context.vdso = (void *)addr;
22835 + current->mm->context.vdso = addr;
22836
22837 ret = install_special_mapping(mm, addr, vdso_size,
22838 VM_READ|VM_EXEC|
22839 @@ -124,7 +125,7 @@ int arch_setup_additional_pages(struct l
22840 VM_ALWAYSDUMP,
22841 vdso_pages);
22842 if (ret) {
22843 - current->mm->context.vdso = NULL;
22844 + current->mm->context.vdso = 0;
22845 goto up_fail;
22846 }
22847
22848 @@ -132,10 +133,3 @@ up_fail:
22849 up_write(&mm->mmap_sem);
22850 return ret;
22851 }
22852 -
22853 -static __init int vdso_setup(char *s)
22854 -{
22855 - vdso_enabled = simple_strtoul(s, NULL, 0);
22856 - return 0;
22857 -}
22858 -__setup("vdso=", vdso_setup);
22859 diff -urNp linux-2.6.32.42/arch/x86/xen/enlighten.c linux-2.6.32.42/arch/x86/xen/enlighten.c
22860 --- linux-2.6.32.42/arch/x86/xen/enlighten.c 2011-03-27 14:31:47.000000000 -0400
22861 +++ linux-2.6.32.42/arch/x86/xen/enlighten.c 2011-05-22 23:02:03.000000000 -0400
22862 @@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
22863
22864 struct shared_info xen_dummy_shared_info;
22865
22866 -void *xen_initial_gdt;
22867 -
22868 /*
22869 * Point at some empty memory to start with. We map the real shared_info
22870 * page as soon as fixmap is up and running.
22871 @@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_des
22872
22873 preempt_disable();
22874
22875 - start = __get_cpu_var(idt_desc).address;
22876 + start = (unsigned long)__get_cpu_var(idt_desc).address;
22877 end = start + __get_cpu_var(idt_desc).size + 1;
22878
22879 xen_mc_flush();
22880 @@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic
22881 #endif
22882 };
22883
22884 -static void xen_reboot(int reason)
22885 +static __noreturn void xen_reboot(int reason)
22886 {
22887 struct sched_shutdown r = { .reason = reason };
22888
22889 @@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
22890 BUG();
22891 }
22892
22893 -static void xen_restart(char *msg)
22894 +static __noreturn void xen_restart(char *msg)
22895 {
22896 xen_reboot(SHUTDOWN_reboot);
22897 }
22898
22899 -static void xen_emergency_restart(void)
22900 +static __noreturn void xen_emergency_restart(void)
22901 {
22902 xen_reboot(SHUTDOWN_reboot);
22903 }
22904
22905 -static void xen_machine_halt(void)
22906 +static __noreturn void xen_machine_halt(void)
22907 {
22908 xen_reboot(SHUTDOWN_poweroff);
22909 }
22910 @@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(
22911 */
22912 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
22913
22914 -#ifdef CONFIG_X86_64
22915 /* Work out if we support NX */
22916 - check_efer();
22917 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22918 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
22919 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
22920 + unsigned l, h;
22921 +
22922 +#ifdef CONFIG_X86_PAE
22923 + nx_enabled = 1;
22924 +#endif
22925 + __supported_pte_mask |= _PAGE_NX;
22926 + rdmsr(MSR_EFER, l, h);
22927 + l |= EFER_NX;
22928 + wrmsr(MSR_EFER, l, h);
22929 + }
22930 #endif
22931
22932 xen_setup_features();
22933 @@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(
22934
22935 machine_ops = xen_machine_ops;
22936
22937 - /*
22938 - * The only reliable way to retain the initial address of the
22939 - * percpu gdt_page is to remember it here, so we can go and
22940 - * mark it RW later, when the initial percpu area is freed.
22941 - */
22942 - xen_initial_gdt = &per_cpu(gdt_page, 0);
22943 -
22944 xen_smp_init();
22945
22946 pgd = (pgd_t *)xen_start_info->pt_base;
22947 diff -urNp linux-2.6.32.42/arch/x86/xen/mmu.c linux-2.6.32.42/arch/x86/xen/mmu.c
22948 --- linux-2.6.32.42/arch/x86/xen/mmu.c 2011-06-25 12:55:34.000000000 -0400
22949 +++ linux-2.6.32.42/arch/x86/xen/mmu.c 2011-06-25 12:56:37.000000000 -0400
22950 @@ -1714,6 +1714,8 @@ __init pgd_t *xen_setup_kernel_pagetable
22951 convert_pfn_mfn(init_level4_pgt);
22952 convert_pfn_mfn(level3_ident_pgt);
22953 convert_pfn_mfn(level3_kernel_pgt);
22954 + convert_pfn_mfn(level3_vmalloc_pgt);
22955 + convert_pfn_mfn(level3_vmemmap_pgt);
22956
22957 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
22958 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
22959 @@ -1732,7 +1734,10 @@ __init pgd_t *xen_setup_kernel_pagetable
22960 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
22961 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
22962 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
22963 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
22964 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
22965 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
22966 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
22967 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
22968 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
22969
22970 diff -urNp linux-2.6.32.42/arch/x86/xen/smp.c linux-2.6.32.42/arch/x86/xen/smp.c
22971 --- linux-2.6.32.42/arch/x86/xen/smp.c 2011-03-27 14:31:47.000000000 -0400
22972 +++ linux-2.6.32.42/arch/x86/xen/smp.c 2011-05-11 18:25:15.000000000 -0400
22973 @@ -167,11 +167,6 @@ static void __init xen_smp_prepare_boot_
22974 {
22975 BUG_ON(smp_processor_id() != 0);
22976 native_smp_prepare_boot_cpu();
22977 -
22978 - /* We've switched to the "real" per-cpu gdt, so make sure the
22979 - old memory can be recycled */
22980 - make_lowmem_page_readwrite(xen_initial_gdt);
22981 -
22982 xen_setup_vcpu_info_placement();
22983 }
22984
22985 @@ -231,12 +226,12 @@ cpu_initialize_context(unsigned int cpu,
22986 gdt = get_cpu_gdt_table(cpu);
22987
22988 ctxt->flags = VGCF_IN_KERNEL;
22989 - ctxt->user_regs.ds = __USER_DS;
22990 - ctxt->user_regs.es = __USER_DS;
22991 + ctxt->user_regs.ds = __KERNEL_DS;
22992 + ctxt->user_regs.es = __KERNEL_DS;
22993 ctxt->user_regs.ss = __KERNEL_DS;
22994 #ifdef CONFIG_X86_32
22995 ctxt->user_regs.fs = __KERNEL_PERCPU;
22996 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
22997 + savesegment(gs, ctxt->user_regs.gs);
22998 #else
22999 ctxt->gs_base_kernel = per_cpu_offset(cpu);
23000 #endif
23001 @@ -287,13 +282,12 @@ static int __cpuinit xen_cpu_up(unsigned
23002 int rc;
23003
23004 per_cpu(current_task, cpu) = idle;
23005 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
23006 #ifdef CONFIG_X86_32
23007 irq_ctx_init(cpu);
23008 #else
23009 clear_tsk_thread_flag(idle, TIF_FORK);
23010 - per_cpu(kernel_stack, cpu) =
23011 - (unsigned long)task_stack_page(idle) -
23012 - KERNEL_STACK_OFFSET + THREAD_SIZE;
23013 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
23014 #endif
23015 xen_setup_runstate_info(cpu);
23016 xen_setup_timer(cpu);
23017 diff -urNp linux-2.6.32.42/arch/x86/xen/xen-asm_32.S linux-2.6.32.42/arch/x86/xen/xen-asm_32.S
23018 --- linux-2.6.32.42/arch/x86/xen/xen-asm_32.S 2011-03-27 14:31:47.000000000 -0400
23019 +++ linux-2.6.32.42/arch/x86/xen/xen-asm_32.S 2011-04-22 19:13:13.000000000 -0400
23020 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
23021 ESP_OFFSET=4 # bytes pushed onto stack
23022
23023 /*
23024 - * Store vcpu_info pointer for easy access. Do it this way to
23025 - * avoid having to reload %fs
23026 + * Store vcpu_info pointer for easy access.
23027 */
23028 #ifdef CONFIG_SMP
23029 - GET_THREAD_INFO(%eax)
23030 - movl TI_cpu(%eax), %eax
23031 - movl __per_cpu_offset(,%eax,4), %eax
23032 - mov per_cpu__xen_vcpu(%eax), %eax
23033 + push %fs
23034 + mov $(__KERNEL_PERCPU), %eax
23035 + mov %eax, %fs
23036 + mov PER_CPU_VAR(xen_vcpu), %eax
23037 + pop %fs
23038 #else
23039 movl per_cpu__xen_vcpu, %eax
23040 #endif
23041 diff -urNp linux-2.6.32.42/arch/x86/xen/xen-head.S linux-2.6.32.42/arch/x86/xen/xen-head.S
23042 --- linux-2.6.32.42/arch/x86/xen/xen-head.S 2011-03-27 14:31:47.000000000 -0400
23043 +++ linux-2.6.32.42/arch/x86/xen/xen-head.S 2011-04-17 15:56:46.000000000 -0400
23044 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
23045 #ifdef CONFIG_X86_32
23046 mov %esi,xen_start_info
23047 mov $init_thread_union+THREAD_SIZE,%esp
23048 +#ifdef CONFIG_SMP
23049 + movl $cpu_gdt_table,%edi
23050 + movl $__per_cpu_load,%eax
23051 + movw %ax,__KERNEL_PERCPU + 2(%edi)
23052 + rorl $16,%eax
23053 + movb %al,__KERNEL_PERCPU + 4(%edi)
23054 + movb %ah,__KERNEL_PERCPU + 7(%edi)
23055 + movl $__per_cpu_end - 1,%eax
23056 + subl $__per_cpu_start,%eax
23057 + movw %ax,__KERNEL_PERCPU + 0(%edi)
23058 +#endif
23059 #else
23060 mov %rsi,xen_start_info
23061 mov $init_thread_union+THREAD_SIZE,%rsp
23062 diff -urNp linux-2.6.32.42/arch/x86/xen/xen-ops.h linux-2.6.32.42/arch/x86/xen/xen-ops.h
23063 --- linux-2.6.32.42/arch/x86/xen/xen-ops.h 2011-03-27 14:31:47.000000000 -0400
23064 +++ linux-2.6.32.42/arch/x86/xen/xen-ops.h 2011-04-17 15:56:46.000000000 -0400
23065 @@ -10,8 +10,6 @@
23066 extern const char xen_hypervisor_callback[];
23067 extern const char xen_failsafe_callback[];
23068
23069 -extern void *xen_initial_gdt;
23070 -
23071 struct trap_info;
23072 void xen_copy_trap_info(struct trap_info *traps);
23073
23074 diff -urNp linux-2.6.32.42/block/blk-integrity.c linux-2.6.32.42/block/blk-integrity.c
23075 --- linux-2.6.32.42/block/blk-integrity.c 2011-03-27 14:31:47.000000000 -0400
23076 +++ linux-2.6.32.42/block/blk-integrity.c 2011-04-17 15:56:46.000000000 -0400
23077 @@ -278,7 +278,7 @@ static struct attribute *integrity_attrs
23078 NULL,
23079 };
23080
23081 -static struct sysfs_ops integrity_ops = {
23082 +static const struct sysfs_ops integrity_ops = {
23083 .show = &integrity_attr_show,
23084 .store = &integrity_attr_store,
23085 };
23086 diff -urNp linux-2.6.32.42/block/blk-iopoll.c linux-2.6.32.42/block/blk-iopoll.c
23087 --- linux-2.6.32.42/block/blk-iopoll.c 2011-03-27 14:31:47.000000000 -0400
23088 +++ linux-2.6.32.42/block/blk-iopoll.c 2011-04-17 15:56:46.000000000 -0400
23089 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
23090 }
23091 EXPORT_SYMBOL(blk_iopoll_complete);
23092
23093 -static void blk_iopoll_softirq(struct softirq_action *h)
23094 +static void blk_iopoll_softirq(void)
23095 {
23096 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
23097 int rearm = 0, budget = blk_iopoll_budget;
23098 diff -urNp linux-2.6.32.42/block/blk-map.c linux-2.6.32.42/block/blk-map.c
23099 --- linux-2.6.32.42/block/blk-map.c 2011-03-27 14:31:47.000000000 -0400
23100 +++ linux-2.6.32.42/block/blk-map.c 2011-04-18 16:57:33.000000000 -0400
23101 @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
23102 * direct dma. else, set up kernel bounce buffers
23103 */
23104 uaddr = (unsigned long) ubuf;
23105 - if (blk_rq_aligned(q, ubuf, len) && !map_data)
23106 + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
23107 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
23108 else
23109 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
23110 @@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_q
23111 for (i = 0; i < iov_count; i++) {
23112 unsigned long uaddr = (unsigned long)iov[i].iov_base;
23113
23114 + if (!iov[i].iov_len)
23115 + return -EINVAL;
23116 +
23117 if (uaddr & queue_dma_alignment(q)) {
23118 unaligned = 1;
23119 break;
23120 }
23121 - if (!iov[i].iov_len)
23122 - return -EINVAL;
23123 }
23124
23125 if (unaligned || (q->dma_pad_mask & len) || map_data)
23126 @@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue
23127 if (!len || !kbuf)
23128 return -EINVAL;
23129
23130 - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
23131 + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
23132 if (do_copy)
23133 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
23134 else
23135 diff -urNp linux-2.6.32.42/block/blk-softirq.c linux-2.6.32.42/block/blk-softirq.c
23136 --- linux-2.6.32.42/block/blk-softirq.c 2011-03-27 14:31:47.000000000 -0400
23137 +++ linux-2.6.32.42/block/blk-softirq.c 2011-04-17 15:56:46.000000000 -0400
23138 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
23139 * Softirq action handler - move entries to local list and loop over them
23140 * while passing them to the queue registered handler.
23141 */
23142 -static void blk_done_softirq(struct softirq_action *h)
23143 +static void blk_done_softirq(void)
23144 {
23145 struct list_head *cpu_list, local_list;
23146
23147 diff -urNp linux-2.6.32.42/block/blk-sysfs.c linux-2.6.32.42/block/blk-sysfs.c
23148 --- linux-2.6.32.42/block/blk-sysfs.c 2011-05-10 22:12:01.000000000 -0400
23149 +++ linux-2.6.32.42/block/blk-sysfs.c 2011-05-10 22:12:26.000000000 -0400
23150 @@ -414,7 +414,7 @@ static void blk_release_queue(struct kob
23151 kmem_cache_free(blk_requestq_cachep, q);
23152 }
23153
23154 -static struct sysfs_ops queue_sysfs_ops = {
23155 +static const struct sysfs_ops queue_sysfs_ops = {
23156 .show = queue_attr_show,
23157 .store = queue_attr_store,
23158 };
23159 diff -urNp linux-2.6.32.42/block/bsg.c linux-2.6.32.42/block/bsg.c
23160 --- linux-2.6.32.42/block/bsg.c 2011-03-27 14:31:47.000000000 -0400
23161 +++ linux-2.6.32.42/block/bsg.c 2011-04-17 15:56:46.000000000 -0400
23162 @@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
23163 struct sg_io_v4 *hdr, struct bsg_device *bd,
23164 fmode_t has_write_perm)
23165 {
23166 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23167 + unsigned char *cmdptr;
23168 +
23169 if (hdr->request_len > BLK_MAX_CDB) {
23170 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
23171 if (!rq->cmd)
23172 return -ENOMEM;
23173 - }
23174 + cmdptr = rq->cmd;
23175 + } else
23176 + cmdptr = tmpcmd;
23177
23178 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
23179 + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
23180 hdr->request_len))
23181 return -EFAULT;
23182
23183 + if (cmdptr != rq->cmd)
23184 + memcpy(rq->cmd, cmdptr, hdr->request_len);
23185 +
23186 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
23187 if (blk_verify_command(rq->cmd, has_write_perm))
23188 return -EPERM;
23189 diff -urNp linux-2.6.32.42/block/elevator.c linux-2.6.32.42/block/elevator.c
23190 --- linux-2.6.32.42/block/elevator.c 2011-03-27 14:31:47.000000000 -0400
23191 +++ linux-2.6.32.42/block/elevator.c 2011-04-17 15:56:46.000000000 -0400
23192 @@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, str
23193 return error;
23194 }
23195
23196 -static struct sysfs_ops elv_sysfs_ops = {
23197 +static const struct sysfs_ops elv_sysfs_ops = {
23198 .show = elv_attr_show,
23199 .store = elv_attr_store,
23200 };
23201 diff -urNp linux-2.6.32.42/block/scsi_ioctl.c linux-2.6.32.42/block/scsi_ioctl.c
23202 --- linux-2.6.32.42/block/scsi_ioctl.c 2011-03-27 14:31:47.000000000 -0400
23203 +++ linux-2.6.32.42/block/scsi_ioctl.c 2011-04-23 13:28:22.000000000 -0400
23204 @@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
23205 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
23206 struct sg_io_hdr *hdr, fmode_t mode)
23207 {
23208 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
23209 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23210 + unsigned char *cmdptr;
23211 +
23212 + if (rq->cmd != rq->__cmd)
23213 + cmdptr = rq->cmd;
23214 + else
23215 + cmdptr = tmpcmd;
23216 +
23217 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
23218 return -EFAULT;
23219 +
23220 + if (cmdptr != rq->cmd)
23221 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
23222 +
23223 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
23224 return -EPERM;
23225
23226 @@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *
23227 int err;
23228 unsigned int in_len, out_len, bytes, opcode, cmdlen;
23229 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
23230 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23231 + unsigned char *cmdptr;
23232
23233 if (!sic)
23234 return -EINVAL;
23235 @@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *
23236 */
23237 err = -EFAULT;
23238 rq->cmd_len = cmdlen;
23239 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
23240 +
23241 + if (rq->cmd != rq->__cmd)
23242 + cmdptr = rq->cmd;
23243 + else
23244 + cmdptr = tmpcmd;
23245 +
23246 + if (copy_from_user(cmdptr, sic->data, cmdlen))
23247 goto error;
23248
23249 + if (rq->cmd != cmdptr)
23250 + memcpy(rq->cmd, cmdptr, cmdlen);
23251 +
23252 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
23253 goto error;
23254
23255 diff -urNp linux-2.6.32.42/crypto/serpent.c linux-2.6.32.42/crypto/serpent.c
23256 --- linux-2.6.32.42/crypto/serpent.c 2011-03-27 14:31:47.000000000 -0400
23257 +++ linux-2.6.32.42/crypto/serpent.c 2011-05-16 21:46:57.000000000 -0400
23258 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
23259 u32 r0,r1,r2,r3,r4;
23260 int i;
23261
23262 + pax_track_stack();
23263 +
23264 /* Copy key, add padding */
23265
23266 for (i = 0; i < keylen; ++i)
23267 diff -urNp linux-2.6.32.42/Documentation/dontdiff linux-2.6.32.42/Documentation/dontdiff
23268 --- linux-2.6.32.42/Documentation/dontdiff 2011-03-27 14:31:47.000000000 -0400
23269 +++ linux-2.6.32.42/Documentation/dontdiff 2011-05-18 20:09:36.000000000 -0400
23270 @@ -1,13 +1,16 @@
23271 *.a
23272 *.aux
23273 *.bin
23274 +*.cis
23275 *.cpio
23276 *.csp
23277 +*.dbg
23278 *.dsp
23279 *.dvi
23280 *.elf
23281 *.eps
23282 *.fw
23283 +*.gcno
23284 *.gen.S
23285 *.gif
23286 *.grep
23287 @@ -38,8 +41,10 @@
23288 *.tab.h
23289 *.tex
23290 *.ver
23291 +*.vim
23292 *.xml
23293 *_MODULES
23294 +*_reg_safe.h
23295 *_vga16.c
23296 *~
23297 *.9
23298 @@ -49,11 +54,16 @@
23299 53c700_d.h
23300 CVS
23301 ChangeSet
23302 +GPATH
23303 +GRTAGS
23304 +GSYMS
23305 +GTAGS
23306 Image
23307 Kerntypes
23308 Module.markers
23309 Module.symvers
23310 PENDING
23311 +PERF*
23312 SCCS
23313 System.map*
23314 TAGS
23315 @@ -76,7 +86,11 @@ btfixupprep
23316 build
23317 bvmlinux
23318 bzImage*
23319 +capability_names.h
23320 +capflags.c
23321 classlist.h*
23322 +clut_vga16.c
23323 +common-cmds.h
23324 comp*.log
23325 compile.h*
23326 conf
23327 @@ -103,13 +117,14 @@ gen_crc32table
23328 gen_init_cpio
23329 genksyms
23330 *_gray256.c
23331 +hash
23332 ihex2fw
23333 ikconfig.h*
23334 initramfs_data.cpio
23335 +initramfs_data.cpio.bz2
23336 initramfs_data.cpio.gz
23337 initramfs_list
23338 kallsyms
23339 -kconfig
23340 keywords.c
23341 ksym.c*
23342 ksym.h*
23343 @@ -133,7 +148,9 @@ mkboot
23344 mkbugboot
23345 mkcpustr
23346 mkdep
23347 +mkpiggy
23348 mkprep
23349 +mkregtable
23350 mktables
23351 mktree
23352 modpost
23353 @@ -149,6 +166,7 @@ patches*
23354 pca200e.bin
23355 pca200e_ecd.bin2
23356 piggy.gz
23357 +piggy.S
23358 piggyback
23359 pnmtologo
23360 ppc_defs.h*
23361 @@ -157,12 +175,15 @@ qconf
23362 raid6altivec*.c
23363 raid6int*.c
23364 raid6tables.c
23365 +regdb.c
23366 relocs
23367 +rlim_names.h
23368 series
23369 setup
23370 setup.bin
23371 setup.elf
23372 sImage
23373 +slabinfo
23374 sm_tbl*
23375 split-include
23376 syscalltab.h
23377 @@ -186,14 +207,20 @@ version.h*
23378 vmlinux
23379 vmlinux-*
23380 vmlinux.aout
23381 +vmlinux.bin.all
23382 +vmlinux.bin.bz2
23383 vmlinux.lds
23384 +vmlinux.relocs
23385 +voffset.h
23386 vsyscall.lds
23387 vsyscall_32.lds
23388 wanxlfw.inc
23389 uImage
23390 unifdef
23391 +utsrelease.h
23392 wakeup.bin
23393 wakeup.elf
23394 wakeup.lds
23395 zImage*
23396 zconf.hash.c
23397 +zoffset.h
23398 diff -urNp linux-2.6.32.42/Documentation/kernel-parameters.txt linux-2.6.32.42/Documentation/kernel-parameters.txt
23399 --- linux-2.6.32.42/Documentation/kernel-parameters.txt 2011-03-27 14:31:47.000000000 -0400
23400 +++ linux-2.6.32.42/Documentation/kernel-parameters.txt 2011-04-17 15:56:45.000000000 -0400
23401 @@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters.
23402 the specified number of seconds. This is to be used if
23403 your oopses keep scrolling off the screen.
23404
23405 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
23406 + virtualization environments that don't cope well with the
23407 + expand down segment used by UDEREF on X86-32 or the frequent
23408 + page table updates on X86-64.
23409 +
23410 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
23411 +
23412 pcbit= [HW,ISDN]
23413
23414 pcd. [PARIDE]
23415 diff -urNp linux-2.6.32.42/drivers/acpi/acpi_pad.c linux-2.6.32.42/drivers/acpi/acpi_pad.c
23416 --- linux-2.6.32.42/drivers/acpi/acpi_pad.c 2011-03-27 14:31:47.000000000 -0400
23417 +++ linux-2.6.32.42/drivers/acpi/acpi_pad.c 2011-04-17 15:56:46.000000000 -0400
23418 @@ -30,7 +30,7 @@
23419 #include <acpi/acpi_bus.h>
23420 #include <acpi/acpi_drivers.h>
23421
23422 -#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
23423 +#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
23424 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
23425 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
23426 static DEFINE_MUTEX(isolated_cpus_lock);
23427 diff -urNp linux-2.6.32.42/drivers/acpi/battery.c linux-2.6.32.42/drivers/acpi/battery.c
23428 --- linux-2.6.32.42/drivers/acpi/battery.c 2011-03-27 14:31:47.000000000 -0400
23429 +++ linux-2.6.32.42/drivers/acpi/battery.c 2011-04-17 15:56:46.000000000 -0400
23430 @@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
23431 }
23432
23433 static struct battery_file {
23434 - struct file_operations ops;
23435 + const struct file_operations ops;
23436 mode_t mode;
23437 const char *name;
23438 } acpi_battery_file[] = {
23439 diff -urNp linux-2.6.32.42/drivers/acpi/dock.c linux-2.6.32.42/drivers/acpi/dock.c
23440 --- linux-2.6.32.42/drivers/acpi/dock.c 2011-03-27 14:31:47.000000000 -0400
23441 +++ linux-2.6.32.42/drivers/acpi/dock.c 2011-04-17 15:56:46.000000000 -0400
23442 @@ -77,7 +77,7 @@ struct dock_dependent_device {
23443 struct list_head list;
23444 struct list_head hotplug_list;
23445 acpi_handle handle;
23446 - struct acpi_dock_ops *ops;
23447 + const struct acpi_dock_ops *ops;
23448 void *context;
23449 };
23450
23451 @@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
23452 * the dock driver after _DCK is executed.
23453 */
23454 int
23455 -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
23456 +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
23457 void *context)
23458 {
23459 struct dock_dependent_device *dd;
23460 diff -urNp linux-2.6.32.42/drivers/acpi/osl.c linux-2.6.32.42/drivers/acpi/osl.c
23461 --- linux-2.6.32.42/drivers/acpi/osl.c 2011-03-27 14:31:47.000000000 -0400
23462 +++ linux-2.6.32.42/drivers/acpi/osl.c 2011-04-17 15:56:46.000000000 -0400
23463 @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
23464 void __iomem *virt_addr;
23465
23466 virt_addr = ioremap(phys_addr, width);
23467 + if (!virt_addr)
23468 + return AE_NO_MEMORY;
23469 if (!value)
23470 value = &dummy;
23471
23472 @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
23473 void __iomem *virt_addr;
23474
23475 virt_addr = ioremap(phys_addr, width);
23476 + if (!virt_addr)
23477 + return AE_NO_MEMORY;
23478
23479 switch (width) {
23480 case 8:
23481 diff -urNp linux-2.6.32.42/drivers/acpi/power_meter.c linux-2.6.32.42/drivers/acpi/power_meter.c
23482 --- linux-2.6.32.42/drivers/acpi/power_meter.c 2011-03-27 14:31:47.000000000 -0400
23483 +++ linux-2.6.32.42/drivers/acpi/power_meter.c 2011-04-17 15:56:46.000000000 -0400
23484 @@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *d
23485 return res;
23486
23487 temp /= 1000;
23488 - if (temp < 0)
23489 - return -EINVAL;
23490
23491 mutex_lock(&resource->lock);
23492 resource->trip[attr->index - 7] = temp;
23493 diff -urNp linux-2.6.32.42/drivers/acpi/proc.c linux-2.6.32.42/drivers/acpi/proc.c
23494 --- linux-2.6.32.42/drivers/acpi/proc.c 2011-03-27 14:31:47.000000000 -0400
23495 +++ linux-2.6.32.42/drivers/acpi/proc.c 2011-04-17 15:56:46.000000000 -0400
23496 @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
23497 size_t count, loff_t * ppos)
23498 {
23499 struct list_head *node, *next;
23500 - char strbuf[5];
23501 - char str[5] = "";
23502 - unsigned int len = count;
23503 + char strbuf[5] = {0};
23504 struct acpi_device *found_dev = NULL;
23505
23506 - if (len > 4)
23507 - len = 4;
23508 - if (len < 0)
23509 - return -EFAULT;
23510 + if (count > 4)
23511 + count = 4;
23512
23513 - if (copy_from_user(strbuf, buffer, len))
23514 + if (copy_from_user(strbuf, buffer, count))
23515 return -EFAULT;
23516 - strbuf[len] = '\0';
23517 - sscanf(strbuf, "%s", str);
23518 + strbuf[count] = '\0';
23519
23520 mutex_lock(&acpi_device_lock);
23521 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
23522 @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
23523 if (!dev->wakeup.flags.valid)
23524 continue;
23525
23526 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
23527 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
23528 dev->wakeup.state.enabled =
23529 dev->wakeup.state.enabled ? 0 : 1;
23530 found_dev = dev;
23531 diff -urNp linux-2.6.32.42/drivers/acpi/processor_core.c linux-2.6.32.42/drivers/acpi/processor_core.c
23532 --- linux-2.6.32.42/drivers/acpi/processor_core.c 2011-03-27 14:31:47.000000000 -0400
23533 +++ linux-2.6.32.42/drivers/acpi/processor_core.c 2011-04-17 15:56:46.000000000 -0400
23534 @@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(
23535 return 0;
23536 }
23537
23538 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
23539 + BUG_ON(pr->id >= nr_cpu_ids);
23540
23541 /*
23542 * Buggy BIOS check
23543 diff -urNp linux-2.6.32.42/drivers/acpi/sbshc.c linux-2.6.32.42/drivers/acpi/sbshc.c
23544 --- linux-2.6.32.42/drivers/acpi/sbshc.c 2011-03-27 14:31:47.000000000 -0400
23545 +++ linux-2.6.32.42/drivers/acpi/sbshc.c 2011-04-17 15:56:46.000000000 -0400
23546 @@ -17,7 +17,7 @@
23547
23548 #define PREFIX "ACPI: "
23549
23550 -#define ACPI_SMB_HC_CLASS "smbus_host_controller"
23551 +#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
23552 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
23553
23554 struct acpi_smb_hc {
23555 diff -urNp linux-2.6.32.42/drivers/acpi/sleep.c linux-2.6.32.42/drivers/acpi/sleep.c
23556 --- linux-2.6.32.42/drivers/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
23557 +++ linux-2.6.32.42/drivers/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
23558 @@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(susp
23559 }
23560 }
23561
23562 -static struct platform_suspend_ops acpi_suspend_ops = {
23563 +static const struct platform_suspend_ops acpi_suspend_ops = {
23564 .valid = acpi_suspend_state_valid,
23565 .begin = acpi_suspend_begin,
23566 .prepare_late = acpi_pm_prepare,
23567 @@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspen
23568 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
23569 * been requested.
23570 */
23571 -static struct platform_suspend_ops acpi_suspend_ops_old = {
23572 +static const struct platform_suspend_ops acpi_suspend_ops_old = {
23573 .valid = acpi_suspend_state_valid,
23574 .begin = acpi_suspend_begin_old,
23575 .prepare_late = acpi_pm_disable_gpes,
23576 @@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
23577 acpi_enable_all_runtime_gpes();
23578 }
23579
23580 -static struct platform_hibernation_ops acpi_hibernation_ops = {
23581 +static const struct platform_hibernation_ops acpi_hibernation_ops = {
23582 .begin = acpi_hibernation_begin,
23583 .end = acpi_pm_end,
23584 .pre_snapshot = acpi_hibernation_pre_snapshot,
23585 @@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot
23586 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
23587 * been requested.
23588 */
23589 -static struct platform_hibernation_ops acpi_hibernation_ops_old = {
23590 +static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
23591 .begin = acpi_hibernation_begin_old,
23592 .end = acpi_pm_end,
23593 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
23594 diff -urNp linux-2.6.32.42/drivers/acpi/video.c linux-2.6.32.42/drivers/acpi/video.c
23595 --- linux-2.6.32.42/drivers/acpi/video.c 2011-03-27 14:31:47.000000000 -0400
23596 +++ linux-2.6.32.42/drivers/acpi/video.c 2011-04-17 15:56:46.000000000 -0400
23597 @@ -359,7 +359,7 @@ static int acpi_video_set_brightness(str
23598 vd->brightness->levels[request_level]);
23599 }
23600
23601 -static struct backlight_ops acpi_backlight_ops = {
23602 +static const struct backlight_ops acpi_backlight_ops = {
23603 .get_brightness = acpi_video_get_brightness,
23604 .update_status = acpi_video_set_brightness,
23605 };
23606 diff -urNp linux-2.6.32.42/drivers/ata/ahci.c linux-2.6.32.42/drivers/ata/ahci.c
23607 --- linux-2.6.32.42/drivers/ata/ahci.c 2011-03-27 14:31:47.000000000 -0400
23608 +++ linux-2.6.32.42/drivers/ata/ahci.c 2011-04-23 12:56:10.000000000 -0400
23609 @@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sh
23610 .sdev_attrs = ahci_sdev_attrs,
23611 };
23612
23613 -static struct ata_port_operations ahci_ops = {
23614 +static const struct ata_port_operations ahci_ops = {
23615 .inherits = &sata_pmp_port_ops,
23616
23617 .qc_defer = sata_pmp_qc_defer_cmd_switch,
23618 @@ -424,17 +424,17 @@ static struct ata_port_operations ahci_o
23619 .port_stop = ahci_port_stop,
23620 };
23621
23622 -static struct ata_port_operations ahci_vt8251_ops = {
23623 +static const struct ata_port_operations ahci_vt8251_ops = {
23624 .inherits = &ahci_ops,
23625 .hardreset = ahci_vt8251_hardreset,
23626 };
23627
23628 -static struct ata_port_operations ahci_p5wdh_ops = {
23629 +static const struct ata_port_operations ahci_p5wdh_ops = {
23630 .inherits = &ahci_ops,
23631 .hardreset = ahci_p5wdh_hardreset,
23632 };
23633
23634 -static struct ata_port_operations ahci_sb600_ops = {
23635 +static const struct ata_port_operations ahci_sb600_ops = {
23636 .inherits = &ahci_ops,
23637 .softreset = ahci_sb600_softreset,
23638 .pmp_softreset = ahci_sb600_softreset,
23639 diff -urNp linux-2.6.32.42/drivers/ata/ata_generic.c linux-2.6.32.42/drivers/ata/ata_generic.c
23640 --- linux-2.6.32.42/drivers/ata/ata_generic.c 2011-03-27 14:31:47.000000000 -0400
23641 +++ linux-2.6.32.42/drivers/ata/ata_generic.c 2011-04-17 15:56:46.000000000 -0400
23642 @@ -104,7 +104,7 @@ static struct scsi_host_template generic
23643 ATA_BMDMA_SHT(DRV_NAME),
23644 };
23645
23646 -static struct ata_port_operations generic_port_ops = {
23647 +static const struct ata_port_operations generic_port_ops = {
23648 .inherits = &ata_bmdma_port_ops,
23649 .cable_detect = ata_cable_unknown,
23650 .set_mode = generic_set_mode,
23651 diff -urNp linux-2.6.32.42/drivers/ata/ata_piix.c linux-2.6.32.42/drivers/ata/ata_piix.c
23652 --- linux-2.6.32.42/drivers/ata/ata_piix.c 2011-03-27 14:31:47.000000000 -0400
23653 +++ linux-2.6.32.42/drivers/ata/ata_piix.c 2011-04-23 12:56:10.000000000 -0400
23654 @@ -318,7 +318,7 @@ static struct scsi_host_template piix_sh
23655 ATA_BMDMA_SHT(DRV_NAME),
23656 };
23657
23658 -static struct ata_port_operations piix_pata_ops = {
23659 +static const struct ata_port_operations piix_pata_ops = {
23660 .inherits = &ata_bmdma32_port_ops,
23661 .cable_detect = ata_cable_40wire,
23662 .set_piomode = piix_set_piomode,
23663 @@ -326,22 +326,22 @@ static struct ata_port_operations piix_p
23664 .prereset = piix_pata_prereset,
23665 };
23666
23667 -static struct ata_port_operations piix_vmw_ops = {
23668 +static const struct ata_port_operations piix_vmw_ops = {
23669 .inherits = &piix_pata_ops,
23670 .bmdma_status = piix_vmw_bmdma_status,
23671 };
23672
23673 -static struct ata_port_operations ich_pata_ops = {
23674 +static const struct ata_port_operations ich_pata_ops = {
23675 .inherits = &piix_pata_ops,
23676 .cable_detect = ich_pata_cable_detect,
23677 .set_dmamode = ich_set_dmamode,
23678 };
23679
23680 -static struct ata_port_operations piix_sata_ops = {
23681 +static const struct ata_port_operations piix_sata_ops = {
23682 .inherits = &ata_bmdma_port_ops,
23683 };
23684
23685 -static struct ata_port_operations piix_sidpr_sata_ops = {
23686 +static const struct ata_port_operations piix_sidpr_sata_ops = {
23687 .inherits = &piix_sata_ops,
23688 .hardreset = sata_std_hardreset,
23689 .scr_read = piix_sidpr_scr_read,
23690 diff -urNp linux-2.6.32.42/drivers/ata/libata-acpi.c linux-2.6.32.42/drivers/ata/libata-acpi.c
23691 --- linux-2.6.32.42/drivers/ata/libata-acpi.c 2011-03-27 14:31:47.000000000 -0400
23692 +++ linux-2.6.32.42/drivers/ata/libata-acpi.c 2011-04-17 15:56:46.000000000 -0400
23693 @@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_han
23694 ata_acpi_uevent(dev->link->ap, dev, event);
23695 }
23696
23697 -static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
23698 +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
23699 .handler = ata_acpi_dev_notify_dock,
23700 .uevent = ata_acpi_dev_uevent,
23701 };
23702
23703 -static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
23704 +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
23705 .handler = ata_acpi_ap_notify_dock,
23706 .uevent = ata_acpi_ap_uevent,
23707 };
23708 diff -urNp linux-2.6.32.42/drivers/ata/libata-core.c linux-2.6.32.42/drivers/ata/libata-core.c
23709 --- linux-2.6.32.42/drivers/ata/libata-core.c 2011-03-27 14:31:47.000000000 -0400
23710 +++ linux-2.6.32.42/drivers/ata/libata-core.c 2011-04-23 12:56:10.000000000 -0400
23711 @@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *
23712 struct ata_port *ap;
23713 unsigned int tag;
23714
23715 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23716 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23717 ap = qc->ap;
23718
23719 qc->flags = 0;
23720 @@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued
23721 struct ata_port *ap;
23722 struct ata_link *link;
23723
23724 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23725 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23726 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
23727 ap = qc->ap;
23728 link = qc->dev->link;
23729 @@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device
23730 * LOCKING:
23731 * None.
23732 */
23733 -static void ata_finalize_port_ops(struct ata_port_operations *ops)
23734 +static void ata_finalize_port_ops(const struct ata_port_operations *ops)
23735 {
23736 static DEFINE_SPINLOCK(lock);
23737 const struct ata_port_operations *cur;
23738 @@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct
23739 return;
23740
23741 spin_lock(&lock);
23742 + pax_open_kernel();
23743
23744 for (cur = ops->inherits; cur; cur = cur->inherits) {
23745 void **inherit = (void **)cur;
23746 @@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct
23747 if (IS_ERR(*pp))
23748 *pp = NULL;
23749
23750 - ops->inherits = NULL;
23751 + ((struct ata_port_operations *)ops)->inherits = NULL;
23752
23753 + pax_close_kernel();
23754 spin_unlock(&lock);
23755 }
23756
23757 @@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host
23758 */
23759 /* KILLME - the only user left is ipr */
23760 void ata_host_init(struct ata_host *host, struct device *dev,
23761 - unsigned long flags, struct ata_port_operations *ops)
23762 + unsigned long flags, const struct ata_port_operations *ops)
23763 {
23764 spin_lock_init(&host->lock);
23765 host->dev = dev;
23766 @@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(stru
23767 /* truly dummy */
23768 }
23769
23770 -struct ata_port_operations ata_dummy_port_ops = {
23771 +const struct ata_port_operations ata_dummy_port_ops = {
23772 .qc_prep = ata_noop_qc_prep,
23773 .qc_issue = ata_dummy_qc_issue,
23774 .error_handler = ata_dummy_error_handler,
23775 diff -urNp linux-2.6.32.42/drivers/ata/libata-eh.c linux-2.6.32.42/drivers/ata/libata-eh.c
23776 --- linux-2.6.32.42/drivers/ata/libata-eh.c 2011-03-27 14:31:47.000000000 -0400
23777 +++ linux-2.6.32.42/drivers/ata/libata-eh.c 2011-05-16 21:46:57.000000000 -0400
23778 @@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
23779 {
23780 struct ata_link *link;
23781
23782 + pax_track_stack();
23783 +
23784 ata_for_each_link(link, ap, HOST_FIRST)
23785 ata_eh_link_report(link);
23786 }
23787 @@ -3590,7 +3592,7 @@ void ata_do_eh(struct ata_port *ap, ata_
23788 */
23789 void ata_std_error_handler(struct ata_port *ap)
23790 {
23791 - struct ata_port_operations *ops = ap->ops;
23792 + const struct ata_port_operations *ops = ap->ops;
23793 ata_reset_fn_t hardreset = ops->hardreset;
23794
23795 /* ignore built-in hardreset if SCR access is not available */
23796 diff -urNp linux-2.6.32.42/drivers/ata/libata-pmp.c linux-2.6.32.42/drivers/ata/libata-pmp.c
23797 --- linux-2.6.32.42/drivers/ata/libata-pmp.c 2011-03-27 14:31:47.000000000 -0400
23798 +++ linux-2.6.32.42/drivers/ata/libata-pmp.c 2011-04-17 15:56:46.000000000 -0400
23799 @@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(str
23800 */
23801 static int sata_pmp_eh_recover(struct ata_port *ap)
23802 {
23803 - struct ata_port_operations *ops = ap->ops;
23804 + const struct ata_port_operations *ops = ap->ops;
23805 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
23806 struct ata_link *pmp_link = &ap->link;
23807 struct ata_device *pmp_dev = pmp_link->device;
23808 diff -urNp linux-2.6.32.42/drivers/ata/pata_acpi.c linux-2.6.32.42/drivers/ata/pata_acpi.c
23809 --- linux-2.6.32.42/drivers/ata/pata_acpi.c 2011-03-27 14:31:47.000000000 -0400
23810 +++ linux-2.6.32.42/drivers/ata/pata_acpi.c 2011-04-17 15:56:46.000000000 -0400
23811 @@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_s
23812 ATA_BMDMA_SHT(DRV_NAME),
23813 };
23814
23815 -static struct ata_port_operations pacpi_ops = {
23816 +static const struct ata_port_operations pacpi_ops = {
23817 .inherits = &ata_bmdma_port_ops,
23818 .qc_issue = pacpi_qc_issue,
23819 .cable_detect = pacpi_cable_detect,
23820 diff -urNp linux-2.6.32.42/drivers/ata/pata_ali.c linux-2.6.32.42/drivers/ata/pata_ali.c
23821 --- linux-2.6.32.42/drivers/ata/pata_ali.c 2011-03-27 14:31:47.000000000 -0400
23822 +++ linux-2.6.32.42/drivers/ata/pata_ali.c 2011-04-17 15:56:46.000000000 -0400
23823 @@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht
23824 * Port operations for PIO only ALi
23825 */
23826
23827 -static struct ata_port_operations ali_early_port_ops = {
23828 +static const struct ata_port_operations ali_early_port_ops = {
23829 .inherits = &ata_sff_port_ops,
23830 .cable_detect = ata_cable_40wire,
23831 .set_piomode = ali_set_piomode,
23832 @@ -382,7 +382,7 @@ static const struct ata_port_operations
23833 * Port operations for DMA capable ALi without cable
23834 * detect
23835 */
23836 -static struct ata_port_operations ali_20_port_ops = {
23837 +static const struct ata_port_operations ali_20_port_ops = {
23838 .inherits = &ali_dma_base_ops,
23839 .cable_detect = ata_cable_40wire,
23840 .mode_filter = ali_20_filter,
23841 @@ -393,7 +393,7 @@ static struct ata_port_operations ali_20
23842 /*
23843 * Port operations for DMA capable ALi with cable detect
23844 */
23845 -static struct ata_port_operations ali_c2_port_ops = {
23846 +static const struct ata_port_operations ali_c2_port_ops = {
23847 .inherits = &ali_dma_base_ops,
23848 .check_atapi_dma = ali_check_atapi_dma,
23849 .cable_detect = ali_c2_cable_detect,
23850 @@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2
23851 /*
23852 * Port operations for DMA capable ALi with cable detect
23853 */
23854 -static struct ata_port_operations ali_c4_port_ops = {
23855 +static const struct ata_port_operations ali_c4_port_ops = {
23856 .inherits = &ali_dma_base_ops,
23857 .check_atapi_dma = ali_check_atapi_dma,
23858 .cable_detect = ali_c2_cable_detect,
23859 @@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4
23860 /*
23861 * Port operations for DMA capable ALi with cable detect and LBA48
23862 */
23863 -static struct ata_port_operations ali_c5_port_ops = {
23864 +static const struct ata_port_operations ali_c5_port_ops = {
23865 .inherits = &ali_dma_base_ops,
23866 .check_atapi_dma = ali_check_atapi_dma,
23867 .dev_config = ali_warn_atapi_dma,
23868 diff -urNp linux-2.6.32.42/drivers/ata/pata_amd.c linux-2.6.32.42/drivers/ata/pata_amd.c
23869 --- linux-2.6.32.42/drivers/ata/pata_amd.c 2011-03-27 14:31:47.000000000 -0400
23870 +++ linux-2.6.32.42/drivers/ata/pata_amd.c 2011-04-17 15:56:46.000000000 -0400
23871 @@ -397,28 +397,28 @@ static const struct ata_port_operations
23872 .prereset = amd_pre_reset,
23873 };
23874
23875 -static struct ata_port_operations amd33_port_ops = {
23876 +static const struct ata_port_operations amd33_port_ops = {
23877 .inherits = &amd_base_port_ops,
23878 .cable_detect = ata_cable_40wire,
23879 .set_piomode = amd33_set_piomode,
23880 .set_dmamode = amd33_set_dmamode,
23881 };
23882
23883 -static struct ata_port_operations amd66_port_ops = {
23884 +static const struct ata_port_operations amd66_port_ops = {
23885 .inherits = &amd_base_port_ops,
23886 .cable_detect = ata_cable_unknown,
23887 .set_piomode = amd66_set_piomode,
23888 .set_dmamode = amd66_set_dmamode,
23889 };
23890
23891 -static struct ata_port_operations amd100_port_ops = {
23892 +static const struct ata_port_operations amd100_port_ops = {
23893 .inherits = &amd_base_port_ops,
23894 .cable_detect = ata_cable_unknown,
23895 .set_piomode = amd100_set_piomode,
23896 .set_dmamode = amd100_set_dmamode,
23897 };
23898
23899 -static struct ata_port_operations amd133_port_ops = {
23900 +static const struct ata_port_operations amd133_port_ops = {
23901 .inherits = &amd_base_port_ops,
23902 .cable_detect = amd_cable_detect,
23903 .set_piomode = amd133_set_piomode,
23904 @@ -433,13 +433,13 @@ static const struct ata_port_operations
23905 .host_stop = nv_host_stop,
23906 };
23907
23908 -static struct ata_port_operations nv100_port_ops = {
23909 +static const struct ata_port_operations nv100_port_ops = {
23910 .inherits = &nv_base_port_ops,
23911 .set_piomode = nv100_set_piomode,
23912 .set_dmamode = nv100_set_dmamode,
23913 };
23914
23915 -static struct ata_port_operations nv133_port_ops = {
23916 +static const struct ata_port_operations nv133_port_ops = {
23917 .inherits = &nv_base_port_ops,
23918 .set_piomode = nv133_set_piomode,
23919 .set_dmamode = nv133_set_dmamode,
23920 diff -urNp linux-2.6.32.42/drivers/ata/pata_artop.c linux-2.6.32.42/drivers/ata/pata_artop.c
23921 --- linux-2.6.32.42/drivers/ata/pata_artop.c 2011-03-27 14:31:47.000000000 -0400
23922 +++ linux-2.6.32.42/drivers/ata/pata_artop.c 2011-04-17 15:56:46.000000000 -0400
23923 @@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
23924 ATA_BMDMA_SHT(DRV_NAME),
23925 };
23926
23927 -static struct ata_port_operations artop6210_ops = {
23928 +static const struct ata_port_operations artop6210_ops = {
23929 .inherits = &ata_bmdma_port_ops,
23930 .cable_detect = ata_cable_40wire,
23931 .set_piomode = artop6210_set_piomode,
23932 @@ -320,7 +320,7 @@ static struct ata_port_operations artop6
23933 .qc_defer = artop6210_qc_defer,
23934 };
23935
23936 -static struct ata_port_operations artop6260_ops = {
23937 +static const struct ata_port_operations artop6260_ops = {
23938 .inherits = &ata_bmdma_port_ops,
23939 .cable_detect = artop6260_cable_detect,
23940 .set_piomode = artop6260_set_piomode,
23941 diff -urNp linux-2.6.32.42/drivers/ata/pata_at32.c linux-2.6.32.42/drivers/ata/pata_at32.c
23942 --- linux-2.6.32.42/drivers/ata/pata_at32.c 2011-03-27 14:31:47.000000000 -0400
23943 +++ linux-2.6.32.42/drivers/ata/pata_at32.c 2011-04-17 15:56:46.000000000 -0400
23944 @@ -172,7 +172,7 @@ static struct scsi_host_template at32_sh
23945 ATA_PIO_SHT(DRV_NAME),
23946 };
23947
23948 -static struct ata_port_operations at32_port_ops = {
23949 +static const struct ata_port_operations at32_port_ops = {
23950 .inherits = &ata_sff_port_ops,
23951 .cable_detect = ata_cable_40wire,
23952 .set_piomode = pata_at32_set_piomode,
23953 diff -urNp linux-2.6.32.42/drivers/ata/pata_at91.c linux-2.6.32.42/drivers/ata/pata_at91.c
23954 --- linux-2.6.32.42/drivers/ata/pata_at91.c 2011-03-27 14:31:47.000000000 -0400
23955 +++ linux-2.6.32.42/drivers/ata/pata_at91.c 2011-04-17 15:56:46.000000000 -0400
23956 @@ -195,7 +195,7 @@ static struct scsi_host_template pata_at
23957 ATA_PIO_SHT(DRV_NAME),
23958 };
23959
23960 -static struct ata_port_operations pata_at91_port_ops = {
23961 +static const struct ata_port_operations pata_at91_port_ops = {
23962 .inherits = &ata_sff_port_ops,
23963
23964 .sff_data_xfer = pata_at91_data_xfer_noirq,
23965 diff -urNp linux-2.6.32.42/drivers/ata/pata_atiixp.c linux-2.6.32.42/drivers/ata/pata_atiixp.c
23966 --- linux-2.6.32.42/drivers/ata/pata_atiixp.c 2011-03-27 14:31:47.000000000 -0400
23967 +++ linux-2.6.32.42/drivers/ata/pata_atiixp.c 2011-04-17 15:56:46.000000000 -0400
23968 @@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_
23969 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
23970 };
23971
23972 -static struct ata_port_operations atiixp_port_ops = {
23973 +static const struct ata_port_operations atiixp_port_ops = {
23974 .inherits = &ata_bmdma_port_ops,
23975
23976 .qc_prep = ata_sff_dumb_qc_prep,
23977 diff -urNp linux-2.6.32.42/drivers/ata/pata_atp867x.c linux-2.6.32.42/drivers/ata/pata_atp867x.c
23978 --- linux-2.6.32.42/drivers/ata/pata_atp867x.c 2011-03-27 14:31:47.000000000 -0400
23979 +++ linux-2.6.32.42/drivers/ata/pata_atp867x.c 2011-04-17 15:56:46.000000000 -0400
23980 @@ -274,7 +274,7 @@ static struct scsi_host_template atp867x
23981 ATA_BMDMA_SHT(DRV_NAME),
23982 };
23983
23984 -static struct ata_port_operations atp867x_ops = {
23985 +static const struct ata_port_operations atp867x_ops = {
23986 .inherits = &ata_bmdma_port_ops,
23987 .cable_detect = atp867x_cable_detect,
23988 .set_piomode = atp867x_set_piomode,
23989 diff -urNp linux-2.6.32.42/drivers/ata/pata_bf54x.c linux-2.6.32.42/drivers/ata/pata_bf54x.c
23990 --- linux-2.6.32.42/drivers/ata/pata_bf54x.c 2011-03-27 14:31:47.000000000 -0400
23991 +++ linux-2.6.32.42/drivers/ata/pata_bf54x.c 2011-04-17 15:56:46.000000000 -0400
23992 @@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
23993 .dma_boundary = ATA_DMA_BOUNDARY,
23994 };
23995
23996 -static struct ata_port_operations bfin_pata_ops = {
23997 +static const struct ata_port_operations bfin_pata_ops = {
23998 .inherits = &ata_sff_port_ops,
23999
24000 .set_piomode = bfin_set_piomode,
24001 diff -urNp linux-2.6.32.42/drivers/ata/pata_cmd640.c linux-2.6.32.42/drivers/ata/pata_cmd640.c
24002 --- linux-2.6.32.42/drivers/ata/pata_cmd640.c 2011-03-27 14:31:47.000000000 -0400
24003 +++ linux-2.6.32.42/drivers/ata/pata_cmd640.c 2011-04-17 15:56:46.000000000 -0400
24004 @@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_
24005 ATA_BMDMA_SHT(DRV_NAME),
24006 };
24007
24008 -static struct ata_port_operations cmd640_port_ops = {
24009 +static const struct ata_port_operations cmd640_port_ops = {
24010 .inherits = &ata_bmdma_port_ops,
24011 /* In theory xfer_noirq is not needed once we kill the prefetcher */
24012 .sff_data_xfer = ata_sff_data_xfer_noirq,
24013 diff -urNp linux-2.6.32.42/drivers/ata/pata_cmd64x.c linux-2.6.32.42/drivers/ata/pata_cmd64x.c
24014 --- linux-2.6.32.42/drivers/ata/pata_cmd64x.c 2011-06-25 12:55:34.000000000 -0400
24015 +++ linux-2.6.32.42/drivers/ata/pata_cmd64x.c 2011-06-25 12:56:37.000000000 -0400
24016 @@ -271,18 +271,18 @@ static const struct ata_port_operations
24017 .set_dmamode = cmd64x_set_dmamode,
24018 };
24019
24020 -static struct ata_port_operations cmd64x_port_ops = {
24021 +static const struct ata_port_operations cmd64x_port_ops = {
24022 .inherits = &cmd64x_base_ops,
24023 .cable_detect = ata_cable_40wire,
24024 };
24025
24026 -static struct ata_port_operations cmd646r1_port_ops = {
24027 +static const struct ata_port_operations cmd646r1_port_ops = {
24028 .inherits = &cmd64x_base_ops,
24029 .bmdma_stop = cmd646r1_bmdma_stop,
24030 .cable_detect = ata_cable_40wire,
24031 };
24032
24033 -static struct ata_port_operations cmd648_port_ops = {
24034 +static const struct ata_port_operations cmd648_port_ops = {
24035 .inherits = &cmd64x_base_ops,
24036 .bmdma_stop = cmd648_bmdma_stop,
24037 .cable_detect = cmd648_cable_detect,
24038 diff -urNp linux-2.6.32.42/drivers/ata/pata_cs5520.c linux-2.6.32.42/drivers/ata/pata_cs5520.c
24039 --- linux-2.6.32.42/drivers/ata/pata_cs5520.c 2011-03-27 14:31:47.000000000 -0400
24040 +++ linux-2.6.32.42/drivers/ata/pata_cs5520.c 2011-04-17 15:56:46.000000000 -0400
24041 @@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_
24042 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24043 };
24044
24045 -static struct ata_port_operations cs5520_port_ops = {
24046 +static const struct ata_port_operations cs5520_port_ops = {
24047 .inherits = &ata_bmdma_port_ops,
24048 .qc_prep = ata_sff_dumb_qc_prep,
24049 .cable_detect = ata_cable_40wire,
24050 diff -urNp linux-2.6.32.42/drivers/ata/pata_cs5530.c linux-2.6.32.42/drivers/ata/pata_cs5530.c
24051 --- linux-2.6.32.42/drivers/ata/pata_cs5530.c 2011-03-27 14:31:47.000000000 -0400
24052 +++ linux-2.6.32.42/drivers/ata/pata_cs5530.c 2011-04-17 15:56:46.000000000 -0400
24053 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
24054 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24055 };
24056
24057 -static struct ata_port_operations cs5530_port_ops = {
24058 +static const struct ata_port_operations cs5530_port_ops = {
24059 .inherits = &ata_bmdma_port_ops,
24060
24061 .qc_prep = ata_sff_dumb_qc_prep,
24062 diff -urNp linux-2.6.32.42/drivers/ata/pata_cs5535.c linux-2.6.32.42/drivers/ata/pata_cs5535.c
24063 --- linux-2.6.32.42/drivers/ata/pata_cs5535.c 2011-03-27 14:31:47.000000000 -0400
24064 +++ linux-2.6.32.42/drivers/ata/pata_cs5535.c 2011-04-17 15:56:46.000000000 -0400
24065 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
24066 ATA_BMDMA_SHT(DRV_NAME),
24067 };
24068
24069 -static struct ata_port_operations cs5535_port_ops = {
24070 +static const struct ata_port_operations cs5535_port_ops = {
24071 .inherits = &ata_bmdma_port_ops,
24072 .cable_detect = cs5535_cable_detect,
24073 .set_piomode = cs5535_set_piomode,
24074 diff -urNp linux-2.6.32.42/drivers/ata/pata_cs5536.c linux-2.6.32.42/drivers/ata/pata_cs5536.c
24075 --- linux-2.6.32.42/drivers/ata/pata_cs5536.c 2011-03-27 14:31:47.000000000 -0400
24076 +++ linux-2.6.32.42/drivers/ata/pata_cs5536.c 2011-04-17 15:56:46.000000000 -0400
24077 @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
24078 ATA_BMDMA_SHT(DRV_NAME),
24079 };
24080
24081 -static struct ata_port_operations cs5536_port_ops = {
24082 +static const struct ata_port_operations cs5536_port_ops = {
24083 .inherits = &ata_bmdma_port_ops,
24084 .cable_detect = cs5536_cable_detect,
24085 .set_piomode = cs5536_set_piomode,
24086 diff -urNp linux-2.6.32.42/drivers/ata/pata_cypress.c linux-2.6.32.42/drivers/ata/pata_cypress.c
24087 --- linux-2.6.32.42/drivers/ata/pata_cypress.c 2011-03-27 14:31:47.000000000 -0400
24088 +++ linux-2.6.32.42/drivers/ata/pata_cypress.c 2011-04-17 15:56:46.000000000 -0400
24089 @@ -113,7 +113,7 @@ static struct scsi_host_template cy82c69
24090 ATA_BMDMA_SHT(DRV_NAME),
24091 };
24092
24093 -static struct ata_port_operations cy82c693_port_ops = {
24094 +static const struct ata_port_operations cy82c693_port_ops = {
24095 .inherits = &ata_bmdma_port_ops,
24096 .cable_detect = ata_cable_40wire,
24097 .set_piomode = cy82c693_set_piomode,
24098 diff -urNp linux-2.6.32.42/drivers/ata/pata_efar.c linux-2.6.32.42/drivers/ata/pata_efar.c
24099 --- linux-2.6.32.42/drivers/ata/pata_efar.c 2011-03-27 14:31:47.000000000 -0400
24100 +++ linux-2.6.32.42/drivers/ata/pata_efar.c 2011-04-17 15:56:46.000000000 -0400
24101 @@ -222,7 +222,7 @@ static struct scsi_host_template efar_sh
24102 ATA_BMDMA_SHT(DRV_NAME),
24103 };
24104
24105 -static struct ata_port_operations efar_ops = {
24106 +static const struct ata_port_operations efar_ops = {
24107 .inherits = &ata_bmdma_port_ops,
24108 .cable_detect = efar_cable_detect,
24109 .set_piomode = efar_set_piomode,
24110 diff -urNp linux-2.6.32.42/drivers/ata/pata_hpt366.c linux-2.6.32.42/drivers/ata/pata_hpt366.c
24111 --- linux-2.6.32.42/drivers/ata/pata_hpt366.c 2011-06-25 12:55:34.000000000 -0400
24112 +++ linux-2.6.32.42/drivers/ata/pata_hpt366.c 2011-06-25 12:56:37.000000000 -0400
24113 @@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_
24114 * Configuration for HPT366/68
24115 */
24116
24117 -static struct ata_port_operations hpt366_port_ops = {
24118 +static const struct ata_port_operations hpt366_port_ops = {
24119 .inherits = &ata_bmdma_port_ops,
24120 .cable_detect = hpt36x_cable_detect,
24121 .mode_filter = hpt366_filter,
24122 diff -urNp linux-2.6.32.42/drivers/ata/pata_hpt37x.c linux-2.6.32.42/drivers/ata/pata_hpt37x.c
24123 --- linux-2.6.32.42/drivers/ata/pata_hpt37x.c 2011-06-25 12:55:34.000000000 -0400
24124 +++ linux-2.6.32.42/drivers/ata/pata_hpt37x.c 2011-06-25 12:56:37.000000000 -0400
24125 @@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_
24126 * Configuration for HPT370
24127 */
24128
24129 -static struct ata_port_operations hpt370_port_ops = {
24130 +static const struct ata_port_operations hpt370_port_ops = {
24131 .inherits = &ata_bmdma_port_ops,
24132
24133 .bmdma_stop = hpt370_bmdma_stop,
24134 @@ -591,7 +591,7 @@ static struct ata_port_operations hpt370
24135 * Configuration for HPT370A. Close to 370 but less filters
24136 */
24137
24138 -static struct ata_port_operations hpt370a_port_ops = {
24139 +static const struct ata_port_operations hpt370a_port_ops = {
24140 .inherits = &hpt370_port_ops,
24141 .mode_filter = hpt370a_filter,
24142 };
24143 @@ -601,7 +601,7 @@ static struct ata_port_operations hpt370
24144 * and DMA mode setting functionality.
24145 */
24146
24147 -static struct ata_port_operations hpt372_port_ops = {
24148 +static const struct ata_port_operations hpt372_port_ops = {
24149 .inherits = &ata_bmdma_port_ops,
24150
24151 .bmdma_stop = hpt37x_bmdma_stop,
24152 @@ -616,7 +616,7 @@ static struct ata_port_operations hpt372
24153 * but we have a different cable detection procedure for function 1.
24154 */
24155
24156 -static struct ata_port_operations hpt374_fn1_port_ops = {
24157 +static const struct ata_port_operations hpt374_fn1_port_ops = {
24158 .inherits = &hpt372_port_ops,
24159 .prereset = hpt374_fn1_pre_reset,
24160 };
24161 diff -urNp linux-2.6.32.42/drivers/ata/pata_hpt3x2n.c linux-2.6.32.42/drivers/ata/pata_hpt3x2n.c
24162 --- linux-2.6.32.42/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:55:34.000000000 -0400
24163 +++ linux-2.6.32.42/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:56:37.000000000 -0400
24164 @@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n
24165 * Configuration for HPT3x2n.
24166 */
24167
24168 -static struct ata_port_operations hpt3x2n_port_ops = {
24169 +static const struct ata_port_operations hpt3x2n_port_ops = {
24170 .inherits = &ata_bmdma_port_ops,
24171
24172 .bmdma_stop = hpt3x2n_bmdma_stop,
24173 diff -urNp linux-2.6.32.42/drivers/ata/pata_hpt3x3.c linux-2.6.32.42/drivers/ata/pata_hpt3x3.c
24174 --- linux-2.6.32.42/drivers/ata/pata_hpt3x3.c 2011-03-27 14:31:47.000000000 -0400
24175 +++ linux-2.6.32.42/drivers/ata/pata_hpt3x3.c 2011-04-17 15:56:46.000000000 -0400
24176 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
24177 ATA_BMDMA_SHT(DRV_NAME),
24178 };
24179
24180 -static struct ata_port_operations hpt3x3_port_ops = {
24181 +static const struct ata_port_operations hpt3x3_port_ops = {
24182 .inherits = &ata_bmdma_port_ops,
24183 .cable_detect = ata_cable_40wire,
24184 .set_piomode = hpt3x3_set_piomode,
24185 diff -urNp linux-2.6.32.42/drivers/ata/pata_icside.c linux-2.6.32.42/drivers/ata/pata_icside.c
24186 --- linux-2.6.32.42/drivers/ata/pata_icside.c 2011-03-27 14:31:47.000000000 -0400
24187 +++ linux-2.6.32.42/drivers/ata/pata_icside.c 2011-04-17 15:56:46.000000000 -0400
24188 @@ -319,7 +319,7 @@ static void pata_icside_postreset(struct
24189 }
24190 }
24191
24192 -static struct ata_port_operations pata_icside_port_ops = {
24193 +static const struct ata_port_operations pata_icside_port_ops = {
24194 .inherits = &ata_sff_port_ops,
24195 /* no need to build any PRD tables for DMA */
24196 .qc_prep = ata_noop_qc_prep,
24197 diff -urNp linux-2.6.32.42/drivers/ata/pata_isapnp.c linux-2.6.32.42/drivers/ata/pata_isapnp.c
24198 --- linux-2.6.32.42/drivers/ata/pata_isapnp.c 2011-03-27 14:31:47.000000000 -0400
24199 +++ linux-2.6.32.42/drivers/ata/pata_isapnp.c 2011-04-17 15:56:46.000000000 -0400
24200 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
24201 ATA_PIO_SHT(DRV_NAME),
24202 };
24203
24204 -static struct ata_port_operations isapnp_port_ops = {
24205 +static const struct ata_port_operations isapnp_port_ops = {
24206 .inherits = &ata_sff_port_ops,
24207 .cable_detect = ata_cable_40wire,
24208 };
24209
24210 -static struct ata_port_operations isapnp_noalt_port_ops = {
24211 +static const struct ata_port_operations isapnp_noalt_port_ops = {
24212 .inherits = &ata_sff_port_ops,
24213 .cable_detect = ata_cable_40wire,
24214 /* No altstatus so we don't want to use the lost interrupt poll */
24215 diff -urNp linux-2.6.32.42/drivers/ata/pata_it8213.c linux-2.6.32.42/drivers/ata/pata_it8213.c
24216 --- linux-2.6.32.42/drivers/ata/pata_it8213.c 2011-03-27 14:31:47.000000000 -0400
24217 +++ linux-2.6.32.42/drivers/ata/pata_it8213.c 2011-04-17 15:56:46.000000000 -0400
24218 @@ -234,7 +234,7 @@ static struct scsi_host_template it8213_
24219 };
24220
24221
24222 -static struct ata_port_operations it8213_ops = {
24223 +static const struct ata_port_operations it8213_ops = {
24224 .inherits = &ata_bmdma_port_ops,
24225 .cable_detect = it8213_cable_detect,
24226 .set_piomode = it8213_set_piomode,
24227 diff -urNp linux-2.6.32.42/drivers/ata/pata_it821x.c linux-2.6.32.42/drivers/ata/pata_it821x.c
24228 --- linux-2.6.32.42/drivers/ata/pata_it821x.c 2011-03-27 14:31:47.000000000 -0400
24229 +++ linux-2.6.32.42/drivers/ata/pata_it821x.c 2011-04-17 15:56:46.000000000 -0400
24230 @@ -800,7 +800,7 @@ static struct scsi_host_template it821x_
24231 ATA_BMDMA_SHT(DRV_NAME),
24232 };
24233
24234 -static struct ata_port_operations it821x_smart_port_ops = {
24235 +static const struct ata_port_operations it821x_smart_port_ops = {
24236 .inherits = &ata_bmdma_port_ops,
24237
24238 .check_atapi_dma= it821x_check_atapi_dma,
24239 @@ -814,7 +814,7 @@ static struct ata_port_operations it821x
24240 .port_start = it821x_port_start,
24241 };
24242
24243 -static struct ata_port_operations it821x_passthru_port_ops = {
24244 +static const struct ata_port_operations it821x_passthru_port_ops = {
24245 .inherits = &ata_bmdma_port_ops,
24246
24247 .check_atapi_dma= it821x_check_atapi_dma,
24248 @@ -830,7 +830,7 @@ static struct ata_port_operations it821x
24249 .port_start = it821x_port_start,
24250 };
24251
24252 -static struct ata_port_operations it821x_rdc_port_ops = {
24253 +static const struct ata_port_operations it821x_rdc_port_ops = {
24254 .inherits = &ata_bmdma_port_ops,
24255
24256 .check_atapi_dma= it821x_check_atapi_dma,
24257 diff -urNp linux-2.6.32.42/drivers/ata/pata_ixp4xx_cf.c linux-2.6.32.42/drivers/ata/pata_ixp4xx_cf.c
24258 --- linux-2.6.32.42/drivers/ata/pata_ixp4xx_cf.c 2011-03-27 14:31:47.000000000 -0400
24259 +++ linux-2.6.32.42/drivers/ata/pata_ixp4xx_cf.c 2011-04-17 15:56:46.000000000 -0400
24260 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
24261 ATA_PIO_SHT(DRV_NAME),
24262 };
24263
24264 -static struct ata_port_operations ixp4xx_port_ops = {
24265 +static const struct ata_port_operations ixp4xx_port_ops = {
24266 .inherits = &ata_sff_port_ops,
24267 .sff_data_xfer = ixp4xx_mmio_data_xfer,
24268 .cable_detect = ata_cable_40wire,
24269 diff -urNp linux-2.6.32.42/drivers/ata/pata_jmicron.c linux-2.6.32.42/drivers/ata/pata_jmicron.c
24270 --- linux-2.6.32.42/drivers/ata/pata_jmicron.c 2011-03-27 14:31:47.000000000 -0400
24271 +++ linux-2.6.32.42/drivers/ata/pata_jmicron.c 2011-04-17 15:56:46.000000000 -0400
24272 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
24273 ATA_BMDMA_SHT(DRV_NAME),
24274 };
24275
24276 -static struct ata_port_operations jmicron_ops = {
24277 +static const struct ata_port_operations jmicron_ops = {
24278 .inherits = &ata_bmdma_port_ops,
24279 .prereset = jmicron_pre_reset,
24280 };
24281 diff -urNp linux-2.6.32.42/drivers/ata/pata_legacy.c linux-2.6.32.42/drivers/ata/pata_legacy.c
24282 --- linux-2.6.32.42/drivers/ata/pata_legacy.c 2011-03-27 14:31:47.000000000 -0400
24283 +++ linux-2.6.32.42/drivers/ata/pata_legacy.c 2011-04-17 15:56:46.000000000 -0400
24284 @@ -106,7 +106,7 @@ struct legacy_probe {
24285
24286 struct legacy_controller {
24287 const char *name;
24288 - struct ata_port_operations *ops;
24289 + const struct ata_port_operations *ops;
24290 unsigned int pio_mask;
24291 unsigned int flags;
24292 unsigned int pflags;
24293 @@ -223,12 +223,12 @@ static const struct ata_port_operations
24294 * pio_mask as well.
24295 */
24296
24297 -static struct ata_port_operations simple_port_ops = {
24298 +static const struct ata_port_operations simple_port_ops = {
24299 .inherits = &legacy_base_port_ops,
24300 .sff_data_xfer = ata_sff_data_xfer_noirq,
24301 };
24302
24303 -static struct ata_port_operations legacy_port_ops = {
24304 +static const struct ata_port_operations legacy_port_ops = {
24305 .inherits = &legacy_base_port_ops,
24306 .sff_data_xfer = ata_sff_data_xfer_noirq,
24307 .set_mode = legacy_set_mode,
24308 @@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(st
24309 return buflen;
24310 }
24311
24312 -static struct ata_port_operations pdc20230_port_ops = {
24313 +static const struct ata_port_operations pdc20230_port_ops = {
24314 .inherits = &legacy_base_port_ops,
24315 .set_piomode = pdc20230_set_piomode,
24316 .sff_data_xfer = pdc_data_xfer_vlb,
24317 @@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct a
24318 ioread8(ap->ioaddr.status_addr);
24319 }
24320
24321 -static struct ata_port_operations ht6560a_port_ops = {
24322 +static const struct ata_port_operations ht6560a_port_ops = {
24323 .inherits = &legacy_base_port_ops,
24324 .set_piomode = ht6560a_set_piomode,
24325 };
24326 @@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct a
24327 ioread8(ap->ioaddr.status_addr);
24328 }
24329
24330 -static struct ata_port_operations ht6560b_port_ops = {
24331 +static const struct ata_port_operations ht6560b_port_ops = {
24332 .inherits = &legacy_base_port_ops,
24333 .set_piomode = ht6560b_set_piomode,
24334 };
24335 @@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(stru
24336 }
24337
24338
24339 -static struct ata_port_operations opti82c611a_port_ops = {
24340 +static const struct ata_port_operations opti82c611a_port_ops = {
24341 .inherits = &legacy_base_port_ops,
24342 .set_piomode = opti82c611a_set_piomode,
24343 };
24344 @@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(
24345 return ata_sff_qc_issue(qc);
24346 }
24347
24348 -static struct ata_port_operations opti82c46x_port_ops = {
24349 +static const struct ata_port_operations opti82c46x_port_ops = {
24350 .inherits = &legacy_base_port_ops,
24351 .set_piomode = opti82c46x_set_piomode,
24352 .qc_issue = opti82c46x_qc_issue,
24353 @@ -771,20 +771,20 @@ static int qdi_port(struct platform_devi
24354 return 0;
24355 }
24356
24357 -static struct ata_port_operations qdi6500_port_ops = {
24358 +static const struct ata_port_operations qdi6500_port_ops = {
24359 .inherits = &legacy_base_port_ops,
24360 .set_piomode = qdi6500_set_piomode,
24361 .qc_issue = qdi_qc_issue,
24362 .sff_data_xfer = vlb32_data_xfer,
24363 };
24364
24365 -static struct ata_port_operations qdi6580_port_ops = {
24366 +static const struct ata_port_operations qdi6580_port_ops = {
24367 .inherits = &legacy_base_port_ops,
24368 .set_piomode = qdi6580_set_piomode,
24369 .sff_data_xfer = vlb32_data_xfer,
24370 };
24371
24372 -static struct ata_port_operations qdi6580dp_port_ops = {
24373 +static const struct ata_port_operations qdi6580dp_port_ops = {
24374 .inherits = &legacy_base_port_ops,
24375 .set_piomode = qdi6580dp_set_piomode,
24376 .sff_data_xfer = vlb32_data_xfer,
24377 @@ -855,7 +855,7 @@ static int winbond_port(struct platform_
24378 return 0;
24379 }
24380
24381 -static struct ata_port_operations winbond_port_ops = {
24382 +static const struct ata_port_operations winbond_port_ops = {
24383 .inherits = &legacy_base_port_ops,
24384 .set_piomode = winbond_set_piomode,
24385 .sff_data_xfer = vlb32_data_xfer,
24386 @@ -978,7 +978,7 @@ static __init int legacy_init_one(struct
24387 int pio_modes = controller->pio_mask;
24388 unsigned long io = probe->port;
24389 u32 mask = (1 << probe->slot);
24390 - struct ata_port_operations *ops = controller->ops;
24391 + const struct ata_port_operations *ops = controller->ops;
24392 struct legacy_data *ld = &legacy_data[probe->slot];
24393 struct ata_host *host = NULL;
24394 struct ata_port *ap;
24395 diff -urNp linux-2.6.32.42/drivers/ata/pata_marvell.c linux-2.6.32.42/drivers/ata/pata_marvell.c
24396 --- linux-2.6.32.42/drivers/ata/pata_marvell.c 2011-03-27 14:31:47.000000000 -0400
24397 +++ linux-2.6.32.42/drivers/ata/pata_marvell.c 2011-04-17 15:56:46.000000000 -0400
24398 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell
24399 ATA_BMDMA_SHT(DRV_NAME),
24400 };
24401
24402 -static struct ata_port_operations marvell_ops = {
24403 +static const struct ata_port_operations marvell_ops = {
24404 .inherits = &ata_bmdma_port_ops,
24405 .cable_detect = marvell_cable_detect,
24406 .prereset = marvell_pre_reset,
24407 diff -urNp linux-2.6.32.42/drivers/ata/pata_mpc52xx.c linux-2.6.32.42/drivers/ata/pata_mpc52xx.c
24408 --- linux-2.6.32.42/drivers/ata/pata_mpc52xx.c 2011-03-27 14:31:47.000000000 -0400
24409 +++ linux-2.6.32.42/drivers/ata/pata_mpc52xx.c 2011-04-17 15:56:46.000000000 -0400
24410 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
24411 ATA_PIO_SHT(DRV_NAME),
24412 };
24413
24414 -static struct ata_port_operations mpc52xx_ata_port_ops = {
24415 +static const struct ata_port_operations mpc52xx_ata_port_ops = {
24416 .inherits = &ata_bmdma_port_ops,
24417 .sff_dev_select = mpc52xx_ata_dev_select,
24418 .set_piomode = mpc52xx_ata_set_piomode,
24419 diff -urNp linux-2.6.32.42/drivers/ata/pata_mpiix.c linux-2.6.32.42/drivers/ata/pata_mpiix.c
24420 --- linux-2.6.32.42/drivers/ata/pata_mpiix.c 2011-03-27 14:31:47.000000000 -0400
24421 +++ linux-2.6.32.42/drivers/ata/pata_mpiix.c 2011-04-17 15:56:46.000000000 -0400
24422 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
24423 ATA_PIO_SHT(DRV_NAME),
24424 };
24425
24426 -static struct ata_port_operations mpiix_port_ops = {
24427 +static const struct ata_port_operations mpiix_port_ops = {
24428 .inherits = &ata_sff_port_ops,
24429 .qc_issue = mpiix_qc_issue,
24430 .cable_detect = ata_cable_40wire,
24431 diff -urNp linux-2.6.32.42/drivers/ata/pata_netcell.c linux-2.6.32.42/drivers/ata/pata_netcell.c
24432 --- linux-2.6.32.42/drivers/ata/pata_netcell.c 2011-03-27 14:31:47.000000000 -0400
24433 +++ linux-2.6.32.42/drivers/ata/pata_netcell.c 2011-04-17 15:56:46.000000000 -0400
24434 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell
24435 ATA_BMDMA_SHT(DRV_NAME),
24436 };
24437
24438 -static struct ata_port_operations netcell_ops = {
24439 +static const struct ata_port_operations netcell_ops = {
24440 .inherits = &ata_bmdma_port_ops,
24441 .cable_detect = ata_cable_80wire,
24442 .read_id = netcell_read_id,
24443 diff -urNp linux-2.6.32.42/drivers/ata/pata_ninja32.c linux-2.6.32.42/drivers/ata/pata_ninja32.c
24444 --- linux-2.6.32.42/drivers/ata/pata_ninja32.c 2011-03-27 14:31:47.000000000 -0400
24445 +++ linux-2.6.32.42/drivers/ata/pata_ninja32.c 2011-04-17 15:56:46.000000000 -0400
24446 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
24447 ATA_BMDMA_SHT(DRV_NAME),
24448 };
24449
24450 -static struct ata_port_operations ninja32_port_ops = {
24451 +static const struct ata_port_operations ninja32_port_ops = {
24452 .inherits = &ata_bmdma_port_ops,
24453 .sff_dev_select = ninja32_dev_select,
24454 .cable_detect = ata_cable_40wire,
24455 diff -urNp linux-2.6.32.42/drivers/ata/pata_ns87410.c linux-2.6.32.42/drivers/ata/pata_ns87410.c
24456 --- linux-2.6.32.42/drivers/ata/pata_ns87410.c 2011-03-27 14:31:47.000000000 -0400
24457 +++ linux-2.6.32.42/drivers/ata/pata_ns87410.c 2011-04-17 15:56:46.000000000 -0400
24458 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
24459 ATA_PIO_SHT(DRV_NAME),
24460 };
24461
24462 -static struct ata_port_operations ns87410_port_ops = {
24463 +static const struct ata_port_operations ns87410_port_ops = {
24464 .inherits = &ata_sff_port_ops,
24465 .qc_issue = ns87410_qc_issue,
24466 .cable_detect = ata_cable_40wire,
24467 diff -urNp linux-2.6.32.42/drivers/ata/pata_ns87415.c linux-2.6.32.42/drivers/ata/pata_ns87415.c
24468 --- linux-2.6.32.42/drivers/ata/pata_ns87415.c 2011-03-27 14:31:47.000000000 -0400
24469 +++ linux-2.6.32.42/drivers/ata/pata_ns87415.c 2011-04-17 15:56:46.000000000 -0400
24470 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
24471 }
24472 #endif /* 87560 SuperIO Support */
24473
24474 -static struct ata_port_operations ns87415_pata_ops = {
24475 +static const struct ata_port_operations ns87415_pata_ops = {
24476 .inherits = &ata_bmdma_port_ops,
24477
24478 .check_atapi_dma = ns87415_check_atapi_dma,
24479 @@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
24480 };
24481
24482 #if defined(CONFIG_SUPERIO)
24483 -static struct ata_port_operations ns87560_pata_ops = {
24484 +static const struct ata_port_operations ns87560_pata_ops = {
24485 .inherits = &ns87415_pata_ops,
24486 .sff_tf_read = ns87560_tf_read,
24487 .sff_check_status = ns87560_check_status,
24488 diff -urNp linux-2.6.32.42/drivers/ata/pata_octeon_cf.c linux-2.6.32.42/drivers/ata/pata_octeon_cf.c
24489 --- linux-2.6.32.42/drivers/ata/pata_octeon_cf.c 2011-03-27 14:31:47.000000000 -0400
24490 +++ linux-2.6.32.42/drivers/ata/pata_octeon_cf.c 2011-04-17 15:56:46.000000000 -0400
24491 @@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(s
24492 return 0;
24493 }
24494
24495 +/* cannot be const */
24496 static struct ata_port_operations octeon_cf_ops = {
24497 .inherits = &ata_sff_port_ops,
24498 .check_atapi_dma = octeon_cf_check_atapi_dma,
24499 diff -urNp linux-2.6.32.42/drivers/ata/pata_oldpiix.c linux-2.6.32.42/drivers/ata/pata_oldpiix.c
24500 --- linux-2.6.32.42/drivers/ata/pata_oldpiix.c 2011-03-27 14:31:47.000000000 -0400
24501 +++ linux-2.6.32.42/drivers/ata/pata_oldpiix.c 2011-04-17 15:56:46.000000000 -0400
24502 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
24503 ATA_BMDMA_SHT(DRV_NAME),
24504 };
24505
24506 -static struct ata_port_operations oldpiix_pata_ops = {
24507 +static const struct ata_port_operations oldpiix_pata_ops = {
24508 .inherits = &ata_bmdma_port_ops,
24509 .qc_issue = oldpiix_qc_issue,
24510 .cable_detect = ata_cable_40wire,
24511 diff -urNp linux-2.6.32.42/drivers/ata/pata_opti.c linux-2.6.32.42/drivers/ata/pata_opti.c
24512 --- linux-2.6.32.42/drivers/ata/pata_opti.c 2011-03-27 14:31:47.000000000 -0400
24513 +++ linux-2.6.32.42/drivers/ata/pata_opti.c 2011-04-17 15:56:46.000000000 -0400
24514 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
24515 ATA_PIO_SHT(DRV_NAME),
24516 };
24517
24518 -static struct ata_port_operations opti_port_ops = {
24519 +static const struct ata_port_operations opti_port_ops = {
24520 .inherits = &ata_sff_port_ops,
24521 .cable_detect = ata_cable_40wire,
24522 .set_piomode = opti_set_piomode,
24523 diff -urNp linux-2.6.32.42/drivers/ata/pata_optidma.c linux-2.6.32.42/drivers/ata/pata_optidma.c
24524 --- linux-2.6.32.42/drivers/ata/pata_optidma.c 2011-03-27 14:31:47.000000000 -0400
24525 +++ linux-2.6.32.42/drivers/ata/pata_optidma.c 2011-04-17 15:56:46.000000000 -0400
24526 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma
24527 ATA_BMDMA_SHT(DRV_NAME),
24528 };
24529
24530 -static struct ata_port_operations optidma_port_ops = {
24531 +static const struct ata_port_operations optidma_port_ops = {
24532 .inherits = &ata_bmdma_port_ops,
24533 .cable_detect = ata_cable_40wire,
24534 .set_piomode = optidma_set_pio_mode,
24535 @@ -346,7 +346,7 @@ static struct ata_port_operations optidm
24536 .prereset = optidma_pre_reset,
24537 };
24538
24539 -static struct ata_port_operations optiplus_port_ops = {
24540 +static const struct ata_port_operations optiplus_port_ops = {
24541 .inherits = &optidma_port_ops,
24542 .set_piomode = optiplus_set_pio_mode,
24543 .set_dmamode = optiplus_set_dma_mode,
24544 diff -urNp linux-2.6.32.42/drivers/ata/pata_palmld.c linux-2.6.32.42/drivers/ata/pata_palmld.c
24545 --- linux-2.6.32.42/drivers/ata/pata_palmld.c 2011-03-27 14:31:47.000000000 -0400
24546 +++ linux-2.6.32.42/drivers/ata/pata_palmld.c 2011-04-17 15:56:46.000000000 -0400
24547 @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
24548 ATA_PIO_SHT(DRV_NAME),
24549 };
24550
24551 -static struct ata_port_operations palmld_port_ops = {
24552 +static const struct ata_port_operations palmld_port_ops = {
24553 .inherits = &ata_sff_port_ops,
24554 .sff_data_xfer = ata_sff_data_xfer_noirq,
24555 .cable_detect = ata_cable_40wire,
24556 diff -urNp linux-2.6.32.42/drivers/ata/pata_pcmcia.c linux-2.6.32.42/drivers/ata/pata_pcmcia.c
24557 --- linux-2.6.32.42/drivers/ata/pata_pcmcia.c 2011-03-27 14:31:47.000000000 -0400
24558 +++ linux-2.6.32.42/drivers/ata/pata_pcmcia.c 2011-04-17 15:56:46.000000000 -0400
24559 @@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_
24560 ATA_PIO_SHT(DRV_NAME),
24561 };
24562
24563 -static struct ata_port_operations pcmcia_port_ops = {
24564 +static const struct ata_port_operations pcmcia_port_ops = {
24565 .inherits = &ata_sff_port_ops,
24566 .sff_data_xfer = ata_sff_data_xfer_noirq,
24567 .cable_detect = ata_cable_40wire,
24568 .set_mode = pcmcia_set_mode,
24569 };
24570
24571 -static struct ata_port_operations pcmcia_8bit_port_ops = {
24572 +static const struct ata_port_operations pcmcia_8bit_port_ops = {
24573 .inherits = &ata_sff_port_ops,
24574 .sff_data_xfer = ata_data_xfer_8bit,
24575 .cable_detect = ata_cable_40wire,
24576 @@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia
24577 unsigned long io_base, ctl_base;
24578 void __iomem *io_addr, *ctl_addr;
24579 int n_ports = 1;
24580 - struct ata_port_operations *ops = &pcmcia_port_ops;
24581 + const struct ata_port_operations *ops = &pcmcia_port_ops;
24582
24583 info = kzalloc(sizeof(*info), GFP_KERNEL);
24584 if (info == NULL)
24585 diff -urNp linux-2.6.32.42/drivers/ata/pata_pdc2027x.c linux-2.6.32.42/drivers/ata/pata_pdc2027x.c
24586 --- linux-2.6.32.42/drivers/ata/pata_pdc2027x.c 2011-03-27 14:31:47.000000000 -0400
24587 +++ linux-2.6.32.42/drivers/ata/pata_pdc2027x.c 2011-04-17 15:56:46.000000000 -0400
24588 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
24589 ATA_BMDMA_SHT(DRV_NAME),
24590 };
24591
24592 -static struct ata_port_operations pdc2027x_pata100_ops = {
24593 +static const struct ata_port_operations pdc2027x_pata100_ops = {
24594 .inherits = &ata_bmdma_port_ops,
24595 .check_atapi_dma = pdc2027x_check_atapi_dma,
24596 .cable_detect = pdc2027x_cable_detect,
24597 .prereset = pdc2027x_prereset,
24598 };
24599
24600 -static struct ata_port_operations pdc2027x_pata133_ops = {
24601 +static const struct ata_port_operations pdc2027x_pata133_ops = {
24602 .inherits = &pdc2027x_pata100_ops,
24603 .mode_filter = pdc2027x_mode_filter,
24604 .set_piomode = pdc2027x_set_piomode,
24605 diff -urNp linux-2.6.32.42/drivers/ata/pata_pdc202xx_old.c linux-2.6.32.42/drivers/ata/pata_pdc202xx_old.c
24606 --- linux-2.6.32.42/drivers/ata/pata_pdc202xx_old.c 2011-03-27 14:31:47.000000000 -0400
24607 +++ linux-2.6.32.42/drivers/ata/pata_pdc202xx_old.c 2011-04-17 15:56:46.000000000 -0400
24608 @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
24609 ATA_BMDMA_SHT(DRV_NAME),
24610 };
24611
24612 -static struct ata_port_operations pdc2024x_port_ops = {
24613 +static const struct ata_port_operations pdc2024x_port_ops = {
24614 .inherits = &ata_bmdma_port_ops,
24615
24616 .cable_detect = ata_cable_40wire,
24617 @@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
24618 .sff_exec_command = pdc202xx_exec_command,
24619 };
24620
24621 -static struct ata_port_operations pdc2026x_port_ops = {
24622 +static const struct ata_port_operations pdc2026x_port_ops = {
24623 .inherits = &pdc2024x_port_ops,
24624
24625 .check_atapi_dma = pdc2026x_check_atapi_dma,
24626 diff -urNp linux-2.6.32.42/drivers/ata/pata_platform.c linux-2.6.32.42/drivers/ata/pata_platform.c
24627 --- linux-2.6.32.42/drivers/ata/pata_platform.c 2011-03-27 14:31:47.000000000 -0400
24628 +++ linux-2.6.32.42/drivers/ata/pata_platform.c 2011-04-17 15:56:46.000000000 -0400
24629 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
24630 ATA_PIO_SHT(DRV_NAME),
24631 };
24632
24633 -static struct ata_port_operations pata_platform_port_ops = {
24634 +static const struct ata_port_operations pata_platform_port_ops = {
24635 .inherits = &ata_sff_port_ops,
24636 .sff_data_xfer = ata_sff_data_xfer_noirq,
24637 .cable_detect = ata_cable_unknown,
24638 diff -urNp linux-2.6.32.42/drivers/ata/pata_qdi.c linux-2.6.32.42/drivers/ata/pata_qdi.c
24639 --- linux-2.6.32.42/drivers/ata/pata_qdi.c 2011-03-27 14:31:47.000000000 -0400
24640 +++ linux-2.6.32.42/drivers/ata/pata_qdi.c 2011-04-17 15:56:46.000000000 -0400
24641 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
24642 ATA_PIO_SHT(DRV_NAME),
24643 };
24644
24645 -static struct ata_port_operations qdi6500_port_ops = {
24646 +static const struct ata_port_operations qdi6500_port_ops = {
24647 .inherits = &ata_sff_port_ops,
24648 .qc_issue = qdi_qc_issue,
24649 .sff_data_xfer = qdi_data_xfer,
24650 @@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
24651 .set_piomode = qdi6500_set_piomode,
24652 };
24653
24654 -static struct ata_port_operations qdi6580_port_ops = {
24655 +static const struct ata_port_operations qdi6580_port_ops = {
24656 .inherits = &qdi6500_port_ops,
24657 .set_piomode = qdi6580_set_piomode,
24658 };
24659 diff -urNp linux-2.6.32.42/drivers/ata/pata_radisys.c linux-2.6.32.42/drivers/ata/pata_radisys.c
24660 --- linux-2.6.32.42/drivers/ata/pata_radisys.c 2011-03-27 14:31:47.000000000 -0400
24661 +++ linux-2.6.32.42/drivers/ata/pata_radisys.c 2011-04-17 15:56:46.000000000 -0400
24662 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys
24663 ATA_BMDMA_SHT(DRV_NAME),
24664 };
24665
24666 -static struct ata_port_operations radisys_pata_ops = {
24667 +static const struct ata_port_operations radisys_pata_ops = {
24668 .inherits = &ata_bmdma_port_ops,
24669 .qc_issue = radisys_qc_issue,
24670 .cable_detect = ata_cable_unknown,
24671 diff -urNp linux-2.6.32.42/drivers/ata/pata_rb532_cf.c linux-2.6.32.42/drivers/ata/pata_rb532_cf.c
24672 --- linux-2.6.32.42/drivers/ata/pata_rb532_cf.c 2011-03-27 14:31:47.000000000 -0400
24673 +++ linux-2.6.32.42/drivers/ata/pata_rb532_cf.c 2011-04-17 15:56:46.000000000 -0400
24674 @@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handle
24675 return IRQ_HANDLED;
24676 }
24677
24678 -static struct ata_port_operations rb532_pata_port_ops = {
24679 +static const struct ata_port_operations rb532_pata_port_ops = {
24680 .inherits = &ata_sff_port_ops,
24681 .sff_data_xfer = ata_sff_data_xfer32,
24682 };
24683 diff -urNp linux-2.6.32.42/drivers/ata/pata_rdc.c linux-2.6.32.42/drivers/ata/pata_rdc.c
24684 --- linux-2.6.32.42/drivers/ata/pata_rdc.c 2011-03-27 14:31:47.000000000 -0400
24685 +++ linux-2.6.32.42/drivers/ata/pata_rdc.c 2011-04-17 15:56:46.000000000 -0400
24686 @@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_p
24687 pci_write_config_byte(dev, 0x48, udma_enable);
24688 }
24689
24690 -static struct ata_port_operations rdc_pata_ops = {
24691 +static const struct ata_port_operations rdc_pata_ops = {
24692 .inherits = &ata_bmdma32_port_ops,
24693 .cable_detect = rdc_pata_cable_detect,
24694 .set_piomode = rdc_set_piomode,
24695 diff -urNp linux-2.6.32.42/drivers/ata/pata_rz1000.c linux-2.6.32.42/drivers/ata/pata_rz1000.c
24696 --- linux-2.6.32.42/drivers/ata/pata_rz1000.c 2011-03-27 14:31:47.000000000 -0400
24697 +++ linux-2.6.32.42/drivers/ata/pata_rz1000.c 2011-04-17 15:56:46.000000000 -0400
24698 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
24699 ATA_PIO_SHT(DRV_NAME),
24700 };
24701
24702 -static struct ata_port_operations rz1000_port_ops = {
24703 +static const struct ata_port_operations rz1000_port_ops = {
24704 .inherits = &ata_sff_port_ops,
24705 .cable_detect = ata_cable_40wire,
24706 .set_mode = rz1000_set_mode,
24707 diff -urNp linux-2.6.32.42/drivers/ata/pata_sc1200.c linux-2.6.32.42/drivers/ata/pata_sc1200.c
24708 --- linux-2.6.32.42/drivers/ata/pata_sc1200.c 2011-03-27 14:31:47.000000000 -0400
24709 +++ linux-2.6.32.42/drivers/ata/pata_sc1200.c 2011-04-17 15:56:46.000000000 -0400
24710 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
24711 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24712 };
24713
24714 -static struct ata_port_operations sc1200_port_ops = {
24715 +static const struct ata_port_operations sc1200_port_ops = {
24716 .inherits = &ata_bmdma_port_ops,
24717 .qc_prep = ata_sff_dumb_qc_prep,
24718 .qc_issue = sc1200_qc_issue,
24719 diff -urNp linux-2.6.32.42/drivers/ata/pata_scc.c linux-2.6.32.42/drivers/ata/pata_scc.c
24720 --- linux-2.6.32.42/drivers/ata/pata_scc.c 2011-03-27 14:31:47.000000000 -0400
24721 +++ linux-2.6.32.42/drivers/ata/pata_scc.c 2011-04-17 15:56:46.000000000 -0400
24722 @@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
24723 ATA_BMDMA_SHT(DRV_NAME),
24724 };
24725
24726 -static struct ata_port_operations scc_pata_ops = {
24727 +static const struct ata_port_operations scc_pata_ops = {
24728 .inherits = &ata_bmdma_port_ops,
24729
24730 .set_piomode = scc_set_piomode,
24731 diff -urNp linux-2.6.32.42/drivers/ata/pata_sch.c linux-2.6.32.42/drivers/ata/pata_sch.c
24732 --- linux-2.6.32.42/drivers/ata/pata_sch.c 2011-03-27 14:31:47.000000000 -0400
24733 +++ linux-2.6.32.42/drivers/ata/pata_sch.c 2011-04-17 15:56:46.000000000 -0400
24734 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
24735 ATA_BMDMA_SHT(DRV_NAME),
24736 };
24737
24738 -static struct ata_port_operations sch_pata_ops = {
24739 +static const struct ata_port_operations sch_pata_ops = {
24740 .inherits = &ata_bmdma_port_ops,
24741 .cable_detect = ata_cable_unknown,
24742 .set_piomode = sch_set_piomode,
24743 diff -urNp linux-2.6.32.42/drivers/ata/pata_serverworks.c linux-2.6.32.42/drivers/ata/pata_serverworks.c
24744 --- linux-2.6.32.42/drivers/ata/pata_serverworks.c 2011-03-27 14:31:47.000000000 -0400
24745 +++ linux-2.6.32.42/drivers/ata/pata_serverworks.c 2011-04-17 15:56:46.000000000 -0400
24746 @@ -299,7 +299,7 @@ static struct scsi_host_template serverw
24747 ATA_BMDMA_SHT(DRV_NAME),
24748 };
24749
24750 -static struct ata_port_operations serverworks_osb4_port_ops = {
24751 +static const struct ata_port_operations serverworks_osb4_port_ops = {
24752 .inherits = &ata_bmdma_port_ops,
24753 .cable_detect = serverworks_cable_detect,
24754 .mode_filter = serverworks_osb4_filter,
24755 @@ -307,7 +307,7 @@ static struct ata_port_operations server
24756 .set_dmamode = serverworks_set_dmamode,
24757 };
24758
24759 -static struct ata_port_operations serverworks_csb_port_ops = {
24760 +static const struct ata_port_operations serverworks_csb_port_ops = {
24761 .inherits = &serverworks_osb4_port_ops,
24762 .mode_filter = serverworks_csb_filter,
24763 };
24764 diff -urNp linux-2.6.32.42/drivers/ata/pata_sil680.c linux-2.6.32.42/drivers/ata/pata_sil680.c
24765 --- linux-2.6.32.42/drivers/ata/pata_sil680.c 2011-06-25 12:55:34.000000000 -0400
24766 +++ linux-2.6.32.42/drivers/ata/pata_sil680.c 2011-06-25 12:56:37.000000000 -0400
24767 @@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
24768 ATA_BMDMA_SHT(DRV_NAME),
24769 };
24770
24771 -static struct ata_port_operations sil680_port_ops = {
24772 +static const struct ata_port_operations sil680_port_ops = {
24773 .inherits = &ata_bmdma32_port_ops,
24774 .cable_detect = sil680_cable_detect,
24775 .set_piomode = sil680_set_piomode,
24776 diff -urNp linux-2.6.32.42/drivers/ata/pata_sis.c linux-2.6.32.42/drivers/ata/pata_sis.c
24777 --- linux-2.6.32.42/drivers/ata/pata_sis.c 2011-03-27 14:31:47.000000000 -0400
24778 +++ linux-2.6.32.42/drivers/ata/pata_sis.c 2011-04-17 15:56:46.000000000 -0400
24779 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
24780 ATA_BMDMA_SHT(DRV_NAME),
24781 };
24782
24783 -static struct ata_port_operations sis_133_for_sata_ops = {
24784 +static const struct ata_port_operations sis_133_for_sata_ops = {
24785 .inherits = &ata_bmdma_port_ops,
24786 .set_piomode = sis_133_set_piomode,
24787 .set_dmamode = sis_133_set_dmamode,
24788 .cable_detect = sis_133_cable_detect,
24789 };
24790
24791 -static struct ata_port_operations sis_base_ops = {
24792 +static const struct ata_port_operations sis_base_ops = {
24793 .inherits = &ata_bmdma_port_ops,
24794 .prereset = sis_pre_reset,
24795 };
24796
24797 -static struct ata_port_operations sis_133_ops = {
24798 +static const struct ata_port_operations sis_133_ops = {
24799 .inherits = &sis_base_ops,
24800 .set_piomode = sis_133_set_piomode,
24801 .set_dmamode = sis_133_set_dmamode,
24802 .cable_detect = sis_133_cable_detect,
24803 };
24804
24805 -static struct ata_port_operations sis_133_early_ops = {
24806 +static const struct ata_port_operations sis_133_early_ops = {
24807 .inherits = &sis_base_ops,
24808 .set_piomode = sis_100_set_piomode,
24809 .set_dmamode = sis_133_early_set_dmamode,
24810 .cable_detect = sis_66_cable_detect,
24811 };
24812
24813 -static struct ata_port_operations sis_100_ops = {
24814 +static const struct ata_port_operations sis_100_ops = {
24815 .inherits = &sis_base_ops,
24816 .set_piomode = sis_100_set_piomode,
24817 .set_dmamode = sis_100_set_dmamode,
24818 .cable_detect = sis_66_cable_detect,
24819 };
24820
24821 -static struct ata_port_operations sis_66_ops = {
24822 +static const struct ata_port_operations sis_66_ops = {
24823 .inherits = &sis_base_ops,
24824 .set_piomode = sis_old_set_piomode,
24825 .set_dmamode = sis_66_set_dmamode,
24826 .cable_detect = sis_66_cable_detect,
24827 };
24828
24829 -static struct ata_port_operations sis_old_ops = {
24830 +static const struct ata_port_operations sis_old_ops = {
24831 .inherits = &sis_base_ops,
24832 .set_piomode = sis_old_set_piomode,
24833 .set_dmamode = sis_old_set_dmamode,
24834 diff -urNp linux-2.6.32.42/drivers/ata/pata_sl82c105.c linux-2.6.32.42/drivers/ata/pata_sl82c105.c
24835 --- linux-2.6.32.42/drivers/ata/pata_sl82c105.c 2011-03-27 14:31:47.000000000 -0400
24836 +++ linux-2.6.32.42/drivers/ata/pata_sl82c105.c 2011-04-17 15:56:46.000000000 -0400
24837 @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
24838 ATA_BMDMA_SHT(DRV_NAME),
24839 };
24840
24841 -static struct ata_port_operations sl82c105_port_ops = {
24842 +static const struct ata_port_operations sl82c105_port_ops = {
24843 .inherits = &ata_bmdma_port_ops,
24844 .qc_defer = sl82c105_qc_defer,
24845 .bmdma_start = sl82c105_bmdma_start,
24846 diff -urNp linux-2.6.32.42/drivers/ata/pata_triflex.c linux-2.6.32.42/drivers/ata/pata_triflex.c
24847 --- linux-2.6.32.42/drivers/ata/pata_triflex.c 2011-03-27 14:31:47.000000000 -0400
24848 +++ linux-2.6.32.42/drivers/ata/pata_triflex.c 2011-04-17 15:56:46.000000000 -0400
24849 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex
24850 ATA_BMDMA_SHT(DRV_NAME),
24851 };
24852
24853 -static struct ata_port_operations triflex_port_ops = {
24854 +static const struct ata_port_operations triflex_port_ops = {
24855 .inherits = &ata_bmdma_port_ops,
24856 .bmdma_start = triflex_bmdma_start,
24857 .bmdma_stop = triflex_bmdma_stop,
24858 diff -urNp linux-2.6.32.42/drivers/ata/pata_via.c linux-2.6.32.42/drivers/ata/pata_via.c
24859 --- linux-2.6.32.42/drivers/ata/pata_via.c 2011-03-27 14:31:47.000000000 -0400
24860 +++ linux-2.6.32.42/drivers/ata/pata_via.c 2011-04-17 15:56:46.000000000 -0400
24861 @@ -419,7 +419,7 @@ static struct scsi_host_template via_sht
24862 ATA_BMDMA_SHT(DRV_NAME),
24863 };
24864
24865 -static struct ata_port_operations via_port_ops = {
24866 +static const struct ata_port_operations via_port_ops = {
24867 .inherits = &ata_bmdma_port_ops,
24868 .cable_detect = via_cable_detect,
24869 .set_piomode = via_set_piomode,
24870 @@ -429,7 +429,7 @@ static struct ata_port_operations via_po
24871 .port_start = via_port_start,
24872 };
24873
24874 -static struct ata_port_operations via_port_ops_noirq = {
24875 +static const struct ata_port_operations via_port_ops_noirq = {
24876 .inherits = &via_port_ops,
24877 .sff_data_xfer = ata_sff_data_xfer_noirq,
24878 };
24879 diff -urNp linux-2.6.32.42/drivers/ata/pata_winbond.c linux-2.6.32.42/drivers/ata/pata_winbond.c
24880 --- linux-2.6.32.42/drivers/ata/pata_winbond.c 2011-03-27 14:31:47.000000000 -0400
24881 +++ linux-2.6.32.42/drivers/ata/pata_winbond.c 2011-04-17 15:56:46.000000000 -0400
24882 @@ -125,7 +125,7 @@ static struct scsi_host_template winbond
24883 ATA_PIO_SHT(DRV_NAME),
24884 };
24885
24886 -static struct ata_port_operations winbond_port_ops = {
24887 +static const struct ata_port_operations winbond_port_ops = {
24888 .inherits = &ata_sff_port_ops,
24889 .sff_data_xfer = winbond_data_xfer,
24890 .cable_detect = ata_cable_40wire,
24891 diff -urNp linux-2.6.32.42/drivers/ata/pdc_adma.c linux-2.6.32.42/drivers/ata/pdc_adma.c
24892 --- linux-2.6.32.42/drivers/ata/pdc_adma.c 2011-03-27 14:31:47.000000000 -0400
24893 +++ linux-2.6.32.42/drivers/ata/pdc_adma.c 2011-04-17 15:56:46.000000000 -0400
24894 @@ -145,7 +145,7 @@ static struct scsi_host_template adma_at
24895 .dma_boundary = ADMA_DMA_BOUNDARY,
24896 };
24897
24898 -static struct ata_port_operations adma_ata_ops = {
24899 +static const struct ata_port_operations adma_ata_ops = {
24900 .inherits = &ata_sff_port_ops,
24901
24902 .lost_interrupt = ATA_OP_NULL,
24903 diff -urNp linux-2.6.32.42/drivers/ata/sata_fsl.c linux-2.6.32.42/drivers/ata/sata_fsl.c
24904 --- linux-2.6.32.42/drivers/ata/sata_fsl.c 2011-03-27 14:31:47.000000000 -0400
24905 +++ linux-2.6.32.42/drivers/ata/sata_fsl.c 2011-04-17 15:56:46.000000000 -0400
24906 @@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fs
24907 .dma_boundary = ATA_DMA_BOUNDARY,
24908 };
24909
24910 -static struct ata_port_operations sata_fsl_ops = {
24911 +static const struct ata_port_operations sata_fsl_ops = {
24912 .inherits = &sata_pmp_port_ops,
24913
24914 .qc_defer = ata_std_qc_defer,
24915 diff -urNp linux-2.6.32.42/drivers/ata/sata_inic162x.c linux-2.6.32.42/drivers/ata/sata_inic162x.c
24916 --- linux-2.6.32.42/drivers/ata/sata_inic162x.c 2011-03-27 14:31:47.000000000 -0400
24917 +++ linux-2.6.32.42/drivers/ata/sata_inic162x.c 2011-04-17 15:56:46.000000000 -0400
24918 @@ -721,7 +721,7 @@ static int inic_port_start(struct ata_po
24919 return 0;
24920 }
24921
24922 -static struct ata_port_operations inic_port_ops = {
24923 +static const struct ata_port_operations inic_port_ops = {
24924 .inherits = &sata_port_ops,
24925
24926 .check_atapi_dma = inic_check_atapi_dma,
24927 diff -urNp linux-2.6.32.42/drivers/ata/sata_mv.c linux-2.6.32.42/drivers/ata/sata_mv.c
24928 --- linux-2.6.32.42/drivers/ata/sata_mv.c 2011-03-27 14:31:47.000000000 -0400
24929 +++ linux-2.6.32.42/drivers/ata/sata_mv.c 2011-04-17 15:56:46.000000000 -0400
24930 @@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht
24931 .dma_boundary = MV_DMA_BOUNDARY,
24932 };
24933
24934 -static struct ata_port_operations mv5_ops = {
24935 +static const struct ata_port_operations mv5_ops = {
24936 .inherits = &ata_sff_port_ops,
24937
24938 .lost_interrupt = ATA_OP_NULL,
24939 @@ -678,7 +678,7 @@ static struct ata_port_operations mv5_op
24940 .port_stop = mv_port_stop,
24941 };
24942
24943 -static struct ata_port_operations mv6_ops = {
24944 +static const struct ata_port_operations mv6_ops = {
24945 .inherits = &mv5_ops,
24946 .dev_config = mv6_dev_config,
24947 .scr_read = mv_scr_read,
24948 @@ -698,7 +698,7 @@ static struct ata_port_operations mv6_op
24949 .bmdma_status = mv_bmdma_status,
24950 };
24951
24952 -static struct ata_port_operations mv_iie_ops = {
24953 +static const struct ata_port_operations mv_iie_ops = {
24954 .inherits = &mv6_ops,
24955 .dev_config = ATA_OP_NULL,
24956 .qc_prep = mv_qc_prep_iie,
24957 diff -urNp linux-2.6.32.42/drivers/ata/sata_nv.c linux-2.6.32.42/drivers/ata/sata_nv.c
24958 --- linux-2.6.32.42/drivers/ata/sata_nv.c 2011-03-27 14:31:47.000000000 -0400
24959 +++ linux-2.6.32.42/drivers/ata/sata_nv.c 2011-04-17 15:56:46.000000000 -0400
24960 @@ -464,7 +464,7 @@ static struct scsi_host_template nv_swnc
24961 * cases. Define nv_hardreset() which only kicks in for post-boot
24962 * probing and use it for all variants.
24963 */
24964 -static struct ata_port_operations nv_generic_ops = {
24965 +static const struct ata_port_operations nv_generic_ops = {
24966 .inherits = &ata_bmdma_port_ops,
24967 .lost_interrupt = ATA_OP_NULL,
24968 .scr_read = nv_scr_read,
24969 @@ -472,20 +472,20 @@ static struct ata_port_operations nv_gen
24970 .hardreset = nv_hardreset,
24971 };
24972
24973 -static struct ata_port_operations nv_nf2_ops = {
24974 +static const struct ata_port_operations nv_nf2_ops = {
24975 .inherits = &nv_generic_ops,
24976 .freeze = nv_nf2_freeze,
24977 .thaw = nv_nf2_thaw,
24978 };
24979
24980 -static struct ata_port_operations nv_ck804_ops = {
24981 +static const struct ata_port_operations nv_ck804_ops = {
24982 .inherits = &nv_generic_ops,
24983 .freeze = nv_ck804_freeze,
24984 .thaw = nv_ck804_thaw,
24985 .host_stop = nv_ck804_host_stop,
24986 };
24987
24988 -static struct ata_port_operations nv_adma_ops = {
24989 +static const struct ata_port_operations nv_adma_ops = {
24990 .inherits = &nv_ck804_ops,
24991
24992 .check_atapi_dma = nv_adma_check_atapi_dma,
24993 @@ -509,7 +509,7 @@ static struct ata_port_operations nv_adm
24994 .host_stop = nv_adma_host_stop,
24995 };
24996
24997 -static struct ata_port_operations nv_swncq_ops = {
24998 +static const struct ata_port_operations nv_swncq_ops = {
24999 .inherits = &nv_generic_ops,
25000
25001 .qc_defer = ata_std_qc_defer,
25002 diff -urNp linux-2.6.32.42/drivers/ata/sata_promise.c linux-2.6.32.42/drivers/ata/sata_promise.c
25003 --- linux-2.6.32.42/drivers/ata/sata_promise.c 2011-03-27 14:31:47.000000000 -0400
25004 +++ linux-2.6.32.42/drivers/ata/sata_promise.c 2011-04-17 15:56:46.000000000 -0400
25005 @@ -195,7 +195,7 @@ static const struct ata_port_operations
25006 .error_handler = pdc_error_handler,
25007 };
25008
25009 -static struct ata_port_operations pdc_sata_ops = {
25010 +static const struct ata_port_operations pdc_sata_ops = {
25011 .inherits = &pdc_common_ops,
25012 .cable_detect = pdc_sata_cable_detect,
25013 .freeze = pdc_sata_freeze,
25014 @@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sa
25015
25016 /* First-generation chips need a more restrictive ->check_atapi_dma op,
25017 and ->freeze/thaw that ignore the hotplug controls. */
25018 -static struct ata_port_operations pdc_old_sata_ops = {
25019 +static const struct ata_port_operations pdc_old_sata_ops = {
25020 .inherits = &pdc_sata_ops,
25021 .freeze = pdc_freeze,
25022 .thaw = pdc_thaw,
25023 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
25024 };
25025
25026 -static struct ata_port_operations pdc_pata_ops = {
25027 +static const struct ata_port_operations pdc_pata_ops = {
25028 .inherits = &pdc_common_ops,
25029 .cable_detect = pdc_pata_cable_detect,
25030 .freeze = pdc_freeze,
25031 diff -urNp linux-2.6.32.42/drivers/ata/sata_qstor.c linux-2.6.32.42/drivers/ata/sata_qstor.c
25032 --- linux-2.6.32.42/drivers/ata/sata_qstor.c 2011-03-27 14:31:47.000000000 -0400
25033 +++ linux-2.6.32.42/drivers/ata/sata_qstor.c 2011-04-17 15:56:46.000000000 -0400
25034 @@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_
25035 .dma_boundary = QS_DMA_BOUNDARY,
25036 };
25037
25038 -static struct ata_port_operations qs_ata_ops = {
25039 +static const struct ata_port_operations qs_ata_ops = {
25040 .inherits = &ata_sff_port_ops,
25041
25042 .check_atapi_dma = qs_check_atapi_dma,
25043 diff -urNp linux-2.6.32.42/drivers/ata/sata_sil24.c linux-2.6.32.42/drivers/ata/sata_sil24.c
25044 --- linux-2.6.32.42/drivers/ata/sata_sil24.c 2011-03-27 14:31:47.000000000 -0400
25045 +++ linux-2.6.32.42/drivers/ata/sata_sil24.c 2011-04-17 15:56:46.000000000 -0400
25046 @@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
25047 .dma_boundary = ATA_DMA_BOUNDARY,
25048 };
25049
25050 -static struct ata_port_operations sil24_ops = {
25051 +static const struct ata_port_operations sil24_ops = {
25052 .inherits = &sata_pmp_port_ops,
25053
25054 .qc_defer = sil24_qc_defer,
25055 diff -urNp linux-2.6.32.42/drivers/ata/sata_sil.c linux-2.6.32.42/drivers/ata/sata_sil.c
25056 --- linux-2.6.32.42/drivers/ata/sata_sil.c 2011-03-27 14:31:47.000000000 -0400
25057 +++ linux-2.6.32.42/drivers/ata/sata_sil.c 2011-04-17 15:56:46.000000000 -0400
25058 @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
25059 .sg_tablesize = ATA_MAX_PRD
25060 };
25061
25062 -static struct ata_port_operations sil_ops = {
25063 +static const struct ata_port_operations sil_ops = {
25064 .inherits = &ata_bmdma32_port_ops,
25065 .dev_config = sil_dev_config,
25066 .set_mode = sil_set_mode,
25067 diff -urNp linux-2.6.32.42/drivers/ata/sata_sis.c linux-2.6.32.42/drivers/ata/sata_sis.c
25068 --- linux-2.6.32.42/drivers/ata/sata_sis.c 2011-03-27 14:31:47.000000000 -0400
25069 +++ linux-2.6.32.42/drivers/ata/sata_sis.c 2011-04-17 15:56:46.000000000 -0400
25070 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
25071 ATA_BMDMA_SHT(DRV_NAME),
25072 };
25073
25074 -static struct ata_port_operations sis_ops = {
25075 +static const struct ata_port_operations sis_ops = {
25076 .inherits = &ata_bmdma_port_ops,
25077 .scr_read = sis_scr_read,
25078 .scr_write = sis_scr_write,
25079 diff -urNp linux-2.6.32.42/drivers/ata/sata_svw.c linux-2.6.32.42/drivers/ata/sata_svw.c
25080 --- linux-2.6.32.42/drivers/ata/sata_svw.c 2011-03-27 14:31:47.000000000 -0400
25081 +++ linux-2.6.32.42/drivers/ata/sata_svw.c 2011-04-17 15:56:46.000000000 -0400
25082 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
25083 };
25084
25085
25086 -static struct ata_port_operations k2_sata_ops = {
25087 +static const struct ata_port_operations k2_sata_ops = {
25088 .inherits = &ata_bmdma_port_ops,
25089 .sff_tf_load = k2_sata_tf_load,
25090 .sff_tf_read = k2_sata_tf_read,
25091 diff -urNp linux-2.6.32.42/drivers/ata/sata_sx4.c linux-2.6.32.42/drivers/ata/sata_sx4.c
25092 --- linux-2.6.32.42/drivers/ata/sata_sx4.c 2011-03-27 14:31:47.000000000 -0400
25093 +++ linux-2.6.32.42/drivers/ata/sata_sx4.c 2011-04-17 15:56:46.000000000 -0400
25094 @@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sat
25095 };
25096
25097 /* TODO: inherit from base port_ops after converting to new EH */
25098 -static struct ata_port_operations pdc_20621_ops = {
25099 +static const struct ata_port_operations pdc_20621_ops = {
25100 .inherits = &ata_sff_port_ops,
25101
25102 .check_atapi_dma = pdc_check_atapi_dma,
25103 diff -urNp linux-2.6.32.42/drivers/ata/sata_uli.c linux-2.6.32.42/drivers/ata/sata_uli.c
25104 --- linux-2.6.32.42/drivers/ata/sata_uli.c 2011-03-27 14:31:47.000000000 -0400
25105 +++ linux-2.6.32.42/drivers/ata/sata_uli.c 2011-04-17 15:56:46.000000000 -0400
25106 @@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht
25107 ATA_BMDMA_SHT(DRV_NAME),
25108 };
25109
25110 -static struct ata_port_operations uli_ops = {
25111 +static const struct ata_port_operations uli_ops = {
25112 .inherits = &ata_bmdma_port_ops,
25113 .scr_read = uli_scr_read,
25114 .scr_write = uli_scr_write,
25115 diff -urNp linux-2.6.32.42/drivers/ata/sata_via.c linux-2.6.32.42/drivers/ata/sata_via.c
25116 --- linux-2.6.32.42/drivers/ata/sata_via.c 2011-05-10 22:12:01.000000000 -0400
25117 +++ linux-2.6.32.42/drivers/ata/sata_via.c 2011-05-10 22:15:08.000000000 -0400
25118 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
25119 ATA_BMDMA_SHT(DRV_NAME),
25120 };
25121
25122 -static struct ata_port_operations svia_base_ops = {
25123 +static const struct ata_port_operations svia_base_ops = {
25124 .inherits = &ata_bmdma_port_ops,
25125 .sff_tf_load = svia_tf_load,
25126 };
25127
25128 -static struct ata_port_operations vt6420_sata_ops = {
25129 +static const struct ata_port_operations vt6420_sata_ops = {
25130 .inherits = &svia_base_ops,
25131 .freeze = svia_noop_freeze,
25132 .prereset = vt6420_prereset,
25133 .bmdma_start = vt6420_bmdma_start,
25134 };
25135
25136 -static struct ata_port_operations vt6421_pata_ops = {
25137 +static const struct ata_port_operations vt6421_pata_ops = {
25138 .inherits = &svia_base_ops,
25139 .cable_detect = vt6421_pata_cable_detect,
25140 .set_piomode = vt6421_set_pio_mode,
25141 .set_dmamode = vt6421_set_dma_mode,
25142 };
25143
25144 -static struct ata_port_operations vt6421_sata_ops = {
25145 +static const struct ata_port_operations vt6421_sata_ops = {
25146 .inherits = &svia_base_ops,
25147 .scr_read = svia_scr_read,
25148 .scr_write = svia_scr_write,
25149 };
25150
25151 -static struct ata_port_operations vt8251_ops = {
25152 +static const struct ata_port_operations vt8251_ops = {
25153 .inherits = &svia_base_ops,
25154 .hardreset = sata_std_hardreset,
25155 .scr_read = vt8251_scr_read,
25156 diff -urNp linux-2.6.32.42/drivers/ata/sata_vsc.c linux-2.6.32.42/drivers/ata/sata_vsc.c
25157 --- linux-2.6.32.42/drivers/ata/sata_vsc.c 2011-03-27 14:31:47.000000000 -0400
25158 +++ linux-2.6.32.42/drivers/ata/sata_vsc.c 2011-04-17 15:56:46.000000000 -0400
25159 @@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
25160 };
25161
25162
25163 -static struct ata_port_operations vsc_sata_ops = {
25164 +static const struct ata_port_operations vsc_sata_ops = {
25165 .inherits = &ata_bmdma_port_ops,
25166 /* The IRQ handling is not quite standard SFF behaviour so we
25167 cannot use the default lost interrupt handler */
25168 diff -urNp linux-2.6.32.42/drivers/atm/adummy.c linux-2.6.32.42/drivers/atm/adummy.c
25169 --- linux-2.6.32.42/drivers/atm/adummy.c 2011-03-27 14:31:47.000000000 -0400
25170 +++ linux-2.6.32.42/drivers/atm/adummy.c 2011-04-17 15:56:46.000000000 -0400
25171 @@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
25172 vcc->pop(vcc, skb);
25173 else
25174 dev_kfree_skb_any(skb);
25175 - atomic_inc(&vcc->stats->tx);
25176 + atomic_inc_unchecked(&vcc->stats->tx);
25177
25178 return 0;
25179 }
25180 diff -urNp linux-2.6.32.42/drivers/atm/ambassador.c linux-2.6.32.42/drivers/atm/ambassador.c
25181 --- linux-2.6.32.42/drivers/atm/ambassador.c 2011-03-27 14:31:47.000000000 -0400
25182 +++ linux-2.6.32.42/drivers/atm/ambassador.c 2011-04-17 15:56:46.000000000 -0400
25183 @@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
25184 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25185
25186 // VC layer stats
25187 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25188 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25189
25190 // free the descriptor
25191 kfree (tx_descr);
25192 @@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
25193 dump_skb ("<<<", vc, skb);
25194
25195 // VC layer stats
25196 - atomic_inc(&atm_vcc->stats->rx);
25197 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25198 __net_timestamp(skb);
25199 // end of our responsability
25200 atm_vcc->push (atm_vcc, skb);
25201 @@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
25202 } else {
25203 PRINTK (KERN_INFO, "dropped over-size frame");
25204 // should we count this?
25205 - atomic_inc(&atm_vcc->stats->rx_drop);
25206 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25207 }
25208
25209 } else {
25210 @@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * at
25211 }
25212
25213 if (check_area (skb->data, skb->len)) {
25214 - atomic_inc(&atm_vcc->stats->tx_err);
25215 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25216 return -ENOMEM; // ?
25217 }
25218
25219 diff -urNp linux-2.6.32.42/drivers/atm/atmtcp.c linux-2.6.32.42/drivers/atm/atmtcp.c
25220 --- linux-2.6.32.42/drivers/atm/atmtcp.c 2011-03-27 14:31:47.000000000 -0400
25221 +++ linux-2.6.32.42/drivers/atm/atmtcp.c 2011-04-17 15:56:46.000000000 -0400
25222 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
25223 if (vcc->pop) vcc->pop(vcc,skb);
25224 else dev_kfree_skb(skb);
25225 if (dev_data) return 0;
25226 - atomic_inc(&vcc->stats->tx_err);
25227 + atomic_inc_unchecked(&vcc->stats->tx_err);
25228 return -ENOLINK;
25229 }
25230 size = skb->len+sizeof(struct atmtcp_hdr);
25231 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
25232 if (!new_skb) {
25233 if (vcc->pop) vcc->pop(vcc,skb);
25234 else dev_kfree_skb(skb);
25235 - atomic_inc(&vcc->stats->tx_err);
25236 + atomic_inc_unchecked(&vcc->stats->tx_err);
25237 return -ENOBUFS;
25238 }
25239 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25240 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
25241 if (vcc->pop) vcc->pop(vcc,skb);
25242 else dev_kfree_skb(skb);
25243 out_vcc->push(out_vcc,new_skb);
25244 - atomic_inc(&vcc->stats->tx);
25245 - atomic_inc(&out_vcc->stats->rx);
25246 + atomic_inc_unchecked(&vcc->stats->tx);
25247 + atomic_inc_unchecked(&out_vcc->stats->rx);
25248 return 0;
25249 }
25250
25251 @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
25252 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25253 read_unlock(&vcc_sklist_lock);
25254 if (!out_vcc) {
25255 - atomic_inc(&vcc->stats->tx_err);
25256 + atomic_inc_unchecked(&vcc->stats->tx_err);
25257 goto done;
25258 }
25259 skb_pull(skb,sizeof(struct atmtcp_hdr));
25260 @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
25261 __net_timestamp(new_skb);
25262 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25263 out_vcc->push(out_vcc,new_skb);
25264 - atomic_inc(&vcc->stats->tx);
25265 - atomic_inc(&out_vcc->stats->rx);
25266 + atomic_inc_unchecked(&vcc->stats->tx);
25267 + atomic_inc_unchecked(&out_vcc->stats->rx);
25268 done:
25269 if (vcc->pop) vcc->pop(vcc,skb);
25270 else dev_kfree_skb(skb);
25271 diff -urNp linux-2.6.32.42/drivers/atm/eni.c linux-2.6.32.42/drivers/atm/eni.c
25272 --- linux-2.6.32.42/drivers/atm/eni.c 2011-03-27 14:31:47.000000000 -0400
25273 +++ linux-2.6.32.42/drivers/atm/eni.c 2011-04-17 15:56:46.000000000 -0400
25274 @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25275 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25276 vcc->dev->number);
25277 length = 0;
25278 - atomic_inc(&vcc->stats->rx_err);
25279 + atomic_inc_unchecked(&vcc->stats->rx_err);
25280 }
25281 else {
25282 length = ATM_CELL_SIZE-1; /* no HEC */
25283 @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25284 size);
25285 }
25286 eff = length = 0;
25287 - atomic_inc(&vcc->stats->rx_err);
25288 + atomic_inc_unchecked(&vcc->stats->rx_err);
25289 }
25290 else {
25291 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25292 @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25293 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25294 vcc->dev->number,vcc->vci,length,size << 2,descr);
25295 length = eff = 0;
25296 - atomic_inc(&vcc->stats->rx_err);
25297 + atomic_inc_unchecked(&vcc->stats->rx_err);
25298 }
25299 }
25300 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25301 @@ -770,7 +770,7 @@ rx_dequeued++;
25302 vcc->push(vcc,skb);
25303 pushed++;
25304 }
25305 - atomic_inc(&vcc->stats->rx);
25306 + atomic_inc_unchecked(&vcc->stats->rx);
25307 }
25308 wake_up(&eni_dev->rx_wait);
25309 }
25310 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
25311 PCI_DMA_TODEVICE);
25312 if (vcc->pop) vcc->pop(vcc,skb);
25313 else dev_kfree_skb_irq(skb);
25314 - atomic_inc(&vcc->stats->tx);
25315 + atomic_inc_unchecked(&vcc->stats->tx);
25316 wake_up(&eni_dev->tx_wait);
25317 dma_complete++;
25318 }
25319 diff -urNp linux-2.6.32.42/drivers/atm/firestream.c linux-2.6.32.42/drivers/atm/firestream.c
25320 --- linux-2.6.32.42/drivers/atm/firestream.c 2011-03-27 14:31:47.000000000 -0400
25321 +++ linux-2.6.32.42/drivers/atm/firestream.c 2011-04-17 15:56:46.000000000 -0400
25322 @@ -748,7 +748,7 @@ static void process_txdone_queue (struct
25323 }
25324 }
25325
25326 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25327 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25328
25329 fs_dprintk (FS_DEBUG_TXMEM, "i");
25330 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25331 @@ -815,7 +815,7 @@ static void process_incoming (struct fs_
25332 #endif
25333 skb_put (skb, qe->p1 & 0xffff);
25334 ATM_SKB(skb)->vcc = atm_vcc;
25335 - atomic_inc(&atm_vcc->stats->rx);
25336 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25337 __net_timestamp(skb);
25338 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25339 atm_vcc->push (atm_vcc, skb);
25340 @@ -836,12 +836,12 @@ static void process_incoming (struct fs_
25341 kfree (pe);
25342 }
25343 if (atm_vcc)
25344 - atomic_inc(&atm_vcc->stats->rx_drop);
25345 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25346 break;
25347 case 0x1f: /* Reassembly abort: no buffers. */
25348 /* Silently increment error counter. */
25349 if (atm_vcc)
25350 - atomic_inc(&atm_vcc->stats->rx_drop);
25351 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25352 break;
25353 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25354 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25355 diff -urNp linux-2.6.32.42/drivers/atm/fore200e.c linux-2.6.32.42/drivers/atm/fore200e.c
25356 --- linux-2.6.32.42/drivers/atm/fore200e.c 2011-03-27 14:31:47.000000000 -0400
25357 +++ linux-2.6.32.42/drivers/atm/fore200e.c 2011-04-17 15:56:46.000000000 -0400
25358 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
25359 #endif
25360 /* check error condition */
25361 if (*entry->status & STATUS_ERROR)
25362 - atomic_inc(&vcc->stats->tx_err);
25363 + atomic_inc_unchecked(&vcc->stats->tx_err);
25364 else
25365 - atomic_inc(&vcc->stats->tx);
25366 + atomic_inc_unchecked(&vcc->stats->tx);
25367 }
25368 }
25369
25370 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
25371 if (skb == NULL) {
25372 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25373
25374 - atomic_inc(&vcc->stats->rx_drop);
25375 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25376 return -ENOMEM;
25377 }
25378
25379 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
25380
25381 dev_kfree_skb_any(skb);
25382
25383 - atomic_inc(&vcc->stats->rx_drop);
25384 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25385 return -ENOMEM;
25386 }
25387
25388 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25389
25390 vcc->push(vcc, skb);
25391 - atomic_inc(&vcc->stats->rx);
25392 + atomic_inc_unchecked(&vcc->stats->rx);
25393
25394 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25395
25396 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
25397 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25398 fore200e->atm_dev->number,
25399 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25400 - atomic_inc(&vcc->stats->rx_err);
25401 + atomic_inc_unchecked(&vcc->stats->rx_err);
25402 }
25403 }
25404
25405 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
25406 goto retry_here;
25407 }
25408
25409 - atomic_inc(&vcc->stats->tx_err);
25410 + atomic_inc_unchecked(&vcc->stats->tx_err);
25411
25412 fore200e->tx_sat++;
25413 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25414 diff -urNp linux-2.6.32.42/drivers/atm/he.c linux-2.6.32.42/drivers/atm/he.c
25415 --- linux-2.6.32.42/drivers/atm/he.c 2011-03-27 14:31:47.000000000 -0400
25416 +++ linux-2.6.32.42/drivers/atm/he.c 2011-04-17 15:56:46.000000000 -0400
25417 @@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25418
25419 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25420 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25421 - atomic_inc(&vcc->stats->rx_drop);
25422 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25423 goto return_host_buffers;
25424 }
25425
25426 @@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25427 RBRQ_LEN_ERR(he_dev->rbrq_head)
25428 ? "LEN_ERR" : "",
25429 vcc->vpi, vcc->vci);
25430 - atomic_inc(&vcc->stats->rx_err);
25431 + atomic_inc_unchecked(&vcc->stats->rx_err);
25432 goto return_host_buffers;
25433 }
25434
25435 @@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25436 vcc->push(vcc, skb);
25437 spin_lock(&he_dev->global_lock);
25438
25439 - atomic_inc(&vcc->stats->rx);
25440 + atomic_inc_unchecked(&vcc->stats->rx);
25441
25442 return_host_buffers:
25443 ++pdus_assembled;
25444 @@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
25445 tpd->vcc->pop(tpd->vcc, tpd->skb);
25446 else
25447 dev_kfree_skb_any(tpd->skb);
25448 - atomic_inc(&tpd->vcc->stats->tx_err);
25449 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25450 }
25451 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25452 return;
25453 @@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25454 vcc->pop(vcc, skb);
25455 else
25456 dev_kfree_skb_any(skb);
25457 - atomic_inc(&vcc->stats->tx_err);
25458 + atomic_inc_unchecked(&vcc->stats->tx_err);
25459 return -EINVAL;
25460 }
25461
25462 @@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25463 vcc->pop(vcc, skb);
25464 else
25465 dev_kfree_skb_any(skb);
25466 - atomic_inc(&vcc->stats->tx_err);
25467 + atomic_inc_unchecked(&vcc->stats->tx_err);
25468 return -EINVAL;
25469 }
25470 #endif
25471 @@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25472 vcc->pop(vcc, skb);
25473 else
25474 dev_kfree_skb_any(skb);
25475 - atomic_inc(&vcc->stats->tx_err);
25476 + atomic_inc_unchecked(&vcc->stats->tx_err);
25477 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25478 return -ENOMEM;
25479 }
25480 @@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25481 vcc->pop(vcc, skb);
25482 else
25483 dev_kfree_skb_any(skb);
25484 - atomic_inc(&vcc->stats->tx_err);
25485 + atomic_inc_unchecked(&vcc->stats->tx_err);
25486 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25487 return -ENOMEM;
25488 }
25489 @@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25490 __enqueue_tpd(he_dev, tpd, cid);
25491 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25492
25493 - atomic_inc(&vcc->stats->tx);
25494 + atomic_inc_unchecked(&vcc->stats->tx);
25495
25496 return 0;
25497 }
25498 diff -urNp linux-2.6.32.42/drivers/atm/horizon.c linux-2.6.32.42/drivers/atm/horizon.c
25499 --- linux-2.6.32.42/drivers/atm/horizon.c 2011-03-27 14:31:47.000000000 -0400
25500 +++ linux-2.6.32.42/drivers/atm/horizon.c 2011-04-17 15:56:46.000000000 -0400
25501 @@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
25502 {
25503 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25504 // VC layer stats
25505 - atomic_inc(&vcc->stats->rx);
25506 + atomic_inc_unchecked(&vcc->stats->rx);
25507 __net_timestamp(skb);
25508 // end of our responsability
25509 vcc->push (vcc, skb);
25510 @@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
25511 dev->tx_iovec = NULL;
25512
25513 // VC layer stats
25514 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25515 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25516
25517 // free the skb
25518 hrz_kfree_skb (skb);
25519 diff -urNp linux-2.6.32.42/drivers/atm/idt77252.c linux-2.6.32.42/drivers/atm/idt77252.c
25520 --- linux-2.6.32.42/drivers/atm/idt77252.c 2011-03-27 14:31:47.000000000 -0400
25521 +++ linux-2.6.32.42/drivers/atm/idt77252.c 2011-04-17 15:56:46.000000000 -0400
25522 @@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
25523 else
25524 dev_kfree_skb(skb);
25525
25526 - atomic_inc(&vcc->stats->tx);
25527 + atomic_inc_unchecked(&vcc->stats->tx);
25528 }
25529
25530 atomic_dec(&scq->used);
25531 @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
25532 if ((sb = dev_alloc_skb(64)) == NULL) {
25533 printk("%s: Can't allocate buffers for aal0.\n",
25534 card->name);
25535 - atomic_add(i, &vcc->stats->rx_drop);
25536 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
25537 break;
25538 }
25539 if (!atm_charge(vcc, sb->truesize)) {
25540 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25541 card->name);
25542 - atomic_add(i - 1, &vcc->stats->rx_drop);
25543 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25544 dev_kfree_skb(sb);
25545 break;
25546 }
25547 @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
25548 ATM_SKB(sb)->vcc = vcc;
25549 __net_timestamp(sb);
25550 vcc->push(vcc, sb);
25551 - atomic_inc(&vcc->stats->rx);
25552 + atomic_inc_unchecked(&vcc->stats->rx);
25553
25554 cell += ATM_CELL_PAYLOAD;
25555 }
25556 @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
25557 "(CDC: %08x)\n",
25558 card->name, len, rpp->len, readl(SAR_REG_CDC));
25559 recycle_rx_pool_skb(card, rpp);
25560 - atomic_inc(&vcc->stats->rx_err);
25561 + atomic_inc_unchecked(&vcc->stats->rx_err);
25562 return;
25563 }
25564 if (stat & SAR_RSQE_CRC) {
25565 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25566 recycle_rx_pool_skb(card, rpp);
25567 - atomic_inc(&vcc->stats->rx_err);
25568 + atomic_inc_unchecked(&vcc->stats->rx_err);
25569 return;
25570 }
25571 if (skb_queue_len(&rpp->queue) > 1) {
25572 @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
25573 RXPRINTK("%s: Can't alloc RX skb.\n",
25574 card->name);
25575 recycle_rx_pool_skb(card, rpp);
25576 - atomic_inc(&vcc->stats->rx_err);
25577 + atomic_inc_unchecked(&vcc->stats->rx_err);
25578 return;
25579 }
25580 if (!atm_charge(vcc, skb->truesize)) {
25581 @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
25582 __net_timestamp(skb);
25583
25584 vcc->push(vcc, skb);
25585 - atomic_inc(&vcc->stats->rx);
25586 + atomic_inc_unchecked(&vcc->stats->rx);
25587
25588 return;
25589 }
25590 @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
25591 __net_timestamp(skb);
25592
25593 vcc->push(vcc, skb);
25594 - atomic_inc(&vcc->stats->rx);
25595 + atomic_inc_unchecked(&vcc->stats->rx);
25596
25597 if (skb->truesize > SAR_FB_SIZE_3)
25598 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
25599 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
25600 if (vcc->qos.aal != ATM_AAL0) {
25601 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25602 card->name, vpi, vci);
25603 - atomic_inc(&vcc->stats->rx_drop);
25604 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25605 goto drop;
25606 }
25607
25608 if ((sb = dev_alloc_skb(64)) == NULL) {
25609 printk("%s: Can't allocate buffers for AAL0.\n",
25610 card->name);
25611 - atomic_inc(&vcc->stats->rx_err);
25612 + atomic_inc_unchecked(&vcc->stats->rx_err);
25613 goto drop;
25614 }
25615
25616 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
25617 ATM_SKB(sb)->vcc = vcc;
25618 __net_timestamp(sb);
25619 vcc->push(vcc, sb);
25620 - atomic_inc(&vcc->stats->rx);
25621 + atomic_inc_unchecked(&vcc->stats->rx);
25622
25623 drop:
25624 skb_pull(queue, 64);
25625 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
25626
25627 if (vc == NULL) {
25628 printk("%s: NULL connection in send().\n", card->name);
25629 - atomic_inc(&vcc->stats->tx_err);
25630 + atomic_inc_unchecked(&vcc->stats->tx_err);
25631 dev_kfree_skb(skb);
25632 return -EINVAL;
25633 }
25634 if (!test_bit(VCF_TX, &vc->flags)) {
25635 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
25636 - atomic_inc(&vcc->stats->tx_err);
25637 + atomic_inc_unchecked(&vcc->stats->tx_err);
25638 dev_kfree_skb(skb);
25639 return -EINVAL;
25640 }
25641 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
25642 break;
25643 default:
25644 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
25645 - atomic_inc(&vcc->stats->tx_err);
25646 + atomic_inc_unchecked(&vcc->stats->tx_err);
25647 dev_kfree_skb(skb);
25648 return -EINVAL;
25649 }
25650
25651 if (skb_shinfo(skb)->nr_frags != 0) {
25652 printk("%s: No scatter-gather yet.\n", card->name);
25653 - atomic_inc(&vcc->stats->tx_err);
25654 + atomic_inc_unchecked(&vcc->stats->tx_err);
25655 dev_kfree_skb(skb);
25656 return -EINVAL;
25657 }
25658 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
25659
25660 err = queue_skb(card, vc, skb, oam);
25661 if (err) {
25662 - atomic_inc(&vcc->stats->tx_err);
25663 + atomic_inc_unchecked(&vcc->stats->tx_err);
25664 dev_kfree_skb(skb);
25665 return err;
25666 }
25667 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
25668 skb = dev_alloc_skb(64);
25669 if (!skb) {
25670 printk("%s: Out of memory in send_oam().\n", card->name);
25671 - atomic_inc(&vcc->stats->tx_err);
25672 + atomic_inc_unchecked(&vcc->stats->tx_err);
25673 return -ENOMEM;
25674 }
25675 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
25676 diff -urNp linux-2.6.32.42/drivers/atm/iphase.c linux-2.6.32.42/drivers/atm/iphase.c
25677 --- linux-2.6.32.42/drivers/atm/iphase.c 2011-03-27 14:31:47.000000000 -0400
25678 +++ linux-2.6.32.42/drivers/atm/iphase.c 2011-04-17 15:56:46.000000000 -0400
25679 @@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
25680 status = (u_short) (buf_desc_ptr->desc_mode);
25681 if (status & (RX_CER | RX_PTE | RX_OFL))
25682 {
25683 - atomic_inc(&vcc->stats->rx_err);
25684 + atomic_inc_unchecked(&vcc->stats->rx_err);
25685 IF_ERR(printk("IA: bad packet, dropping it");)
25686 if (status & RX_CER) {
25687 IF_ERR(printk(" cause: packet CRC error\n");)
25688 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
25689 len = dma_addr - buf_addr;
25690 if (len > iadev->rx_buf_sz) {
25691 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
25692 - atomic_inc(&vcc->stats->rx_err);
25693 + atomic_inc_unchecked(&vcc->stats->rx_err);
25694 goto out_free_desc;
25695 }
25696
25697 @@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
25698 ia_vcc = INPH_IA_VCC(vcc);
25699 if (ia_vcc == NULL)
25700 {
25701 - atomic_inc(&vcc->stats->rx_err);
25702 + atomic_inc_unchecked(&vcc->stats->rx_err);
25703 dev_kfree_skb_any(skb);
25704 atm_return(vcc, atm_guess_pdu2truesize(len));
25705 goto INCR_DLE;
25706 @@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
25707 if ((length > iadev->rx_buf_sz) || (length >
25708 (skb->len - sizeof(struct cpcs_trailer))))
25709 {
25710 - atomic_inc(&vcc->stats->rx_err);
25711 + atomic_inc_unchecked(&vcc->stats->rx_err);
25712 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
25713 length, skb->len);)
25714 dev_kfree_skb_any(skb);
25715 @@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
25716
25717 IF_RX(printk("rx_dle_intr: skb push");)
25718 vcc->push(vcc,skb);
25719 - atomic_inc(&vcc->stats->rx);
25720 + atomic_inc_unchecked(&vcc->stats->rx);
25721 iadev->rx_pkt_cnt++;
25722 }
25723 INCR_DLE:
25724 @@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev,
25725 {
25726 struct k_sonet_stats *stats;
25727 stats = &PRIV(_ia_dev[board])->sonet_stats;
25728 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
25729 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
25730 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
25731 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
25732 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
25733 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
25734 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
25735 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
25736 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
25737 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
25738 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
25739 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
25740 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
25741 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
25742 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
25743 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
25744 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
25745 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
25746 }
25747 ia_cmds.status = 0;
25748 break;
25749 @@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
25750 if ((desc == 0) || (desc > iadev->num_tx_desc))
25751 {
25752 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
25753 - atomic_inc(&vcc->stats->tx);
25754 + atomic_inc_unchecked(&vcc->stats->tx);
25755 if (vcc->pop)
25756 vcc->pop(vcc, skb);
25757 else
25758 @@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
25759 ATM_DESC(skb) = vcc->vci;
25760 skb_queue_tail(&iadev->tx_dma_q, skb);
25761
25762 - atomic_inc(&vcc->stats->tx);
25763 + atomic_inc_unchecked(&vcc->stats->tx);
25764 iadev->tx_pkt_cnt++;
25765 /* Increment transaction counter */
25766 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
25767
25768 #if 0
25769 /* add flow control logic */
25770 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
25771 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
25772 if (iavcc->vc_desc_cnt > 10) {
25773 vcc->tx_quota = vcc->tx_quota * 3 / 4;
25774 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
25775 diff -urNp linux-2.6.32.42/drivers/atm/lanai.c linux-2.6.32.42/drivers/atm/lanai.c
25776 --- linux-2.6.32.42/drivers/atm/lanai.c 2011-03-27 14:31:47.000000000 -0400
25777 +++ linux-2.6.32.42/drivers/atm/lanai.c 2011-04-17 15:56:46.000000000 -0400
25778 @@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
25779 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
25780 lanai_endtx(lanai, lvcc);
25781 lanai_free_skb(lvcc->tx.atmvcc, skb);
25782 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
25783 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
25784 }
25785
25786 /* Try to fill the buffer - don't call unless there is backlog */
25787 @@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
25788 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
25789 __net_timestamp(skb);
25790 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
25791 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
25792 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
25793 out:
25794 lvcc->rx.buf.ptr = end;
25795 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
25796 @@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
25797 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
25798 "vcc %d\n", lanai->number, (unsigned int) s, vci);
25799 lanai->stats.service_rxnotaal5++;
25800 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25801 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25802 return 0;
25803 }
25804 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
25805 @@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
25806 int bytes;
25807 read_unlock(&vcc_sklist_lock);
25808 DPRINTK("got trashed rx pdu on vci %d\n", vci);
25809 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25810 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25811 lvcc->stats.x.aal5.service_trash++;
25812 bytes = (SERVICE_GET_END(s) * 16) -
25813 (((unsigned long) lvcc->rx.buf.ptr) -
25814 @@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
25815 }
25816 if (s & SERVICE_STREAM) {
25817 read_unlock(&vcc_sklist_lock);
25818 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25819 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25820 lvcc->stats.x.aal5.service_stream++;
25821 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
25822 "PDU on VCI %d!\n", lanai->number, vci);
25823 @@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
25824 return 0;
25825 }
25826 DPRINTK("got rx crc error on vci %d\n", vci);
25827 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25828 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25829 lvcc->stats.x.aal5.service_rxcrc++;
25830 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
25831 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
25832 diff -urNp linux-2.6.32.42/drivers/atm/nicstar.c linux-2.6.32.42/drivers/atm/nicstar.c
25833 --- linux-2.6.32.42/drivers/atm/nicstar.c 2011-03-27 14:31:47.000000000 -0400
25834 +++ linux-2.6.32.42/drivers/atm/nicstar.c 2011-04-17 15:56:46.000000000 -0400
25835 @@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
25836 if ((vc = (vc_map *) vcc->dev_data) == NULL)
25837 {
25838 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
25839 - atomic_inc(&vcc->stats->tx_err);
25840 + atomic_inc_unchecked(&vcc->stats->tx_err);
25841 dev_kfree_skb_any(skb);
25842 return -EINVAL;
25843 }
25844 @@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
25845 if (!vc->tx)
25846 {
25847 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
25848 - atomic_inc(&vcc->stats->tx_err);
25849 + atomic_inc_unchecked(&vcc->stats->tx_err);
25850 dev_kfree_skb_any(skb);
25851 return -EINVAL;
25852 }
25853 @@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
25854 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
25855 {
25856 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
25857 - atomic_inc(&vcc->stats->tx_err);
25858 + atomic_inc_unchecked(&vcc->stats->tx_err);
25859 dev_kfree_skb_any(skb);
25860 return -EINVAL;
25861 }
25862 @@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
25863 if (skb_shinfo(skb)->nr_frags != 0)
25864 {
25865 printk("nicstar%d: No scatter-gather yet.\n", card->index);
25866 - atomic_inc(&vcc->stats->tx_err);
25867 + atomic_inc_unchecked(&vcc->stats->tx_err);
25868 dev_kfree_skb_any(skb);
25869 return -EINVAL;
25870 }
25871 @@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
25872
25873 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
25874 {
25875 - atomic_inc(&vcc->stats->tx_err);
25876 + atomic_inc_unchecked(&vcc->stats->tx_err);
25877 dev_kfree_skb_any(skb);
25878 return -EIO;
25879 }
25880 - atomic_inc(&vcc->stats->tx);
25881 + atomic_inc_unchecked(&vcc->stats->tx);
25882
25883 return 0;
25884 }
25885 @@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
25886 {
25887 printk("nicstar%d: Can't allocate buffers for aal0.\n",
25888 card->index);
25889 - atomic_add(i,&vcc->stats->rx_drop);
25890 + atomic_add_unchecked(i,&vcc->stats->rx_drop);
25891 break;
25892 }
25893 if (!atm_charge(vcc, sb->truesize))
25894 {
25895 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
25896 card->index);
25897 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
25898 + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
25899 dev_kfree_skb_any(sb);
25900 break;
25901 }
25902 @@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
25903 ATM_SKB(sb)->vcc = vcc;
25904 __net_timestamp(sb);
25905 vcc->push(vcc, sb);
25906 - atomic_inc(&vcc->stats->rx);
25907 + atomic_inc_unchecked(&vcc->stats->rx);
25908 cell += ATM_CELL_PAYLOAD;
25909 }
25910
25911 @@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
25912 if (iovb == NULL)
25913 {
25914 printk("nicstar%d: Out of iovec buffers.\n", card->index);
25915 - atomic_inc(&vcc->stats->rx_drop);
25916 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25917 recycle_rx_buf(card, skb);
25918 return;
25919 }
25920 @@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
25921 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
25922 {
25923 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
25924 - atomic_inc(&vcc->stats->rx_err);
25925 + atomic_inc_unchecked(&vcc->stats->rx_err);
25926 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
25927 NS_SKB(iovb)->iovcnt = 0;
25928 iovb->len = 0;
25929 @@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
25930 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
25931 card->index);
25932 which_list(card, skb);
25933 - atomic_inc(&vcc->stats->rx_err);
25934 + atomic_inc_unchecked(&vcc->stats->rx_err);
25935 recycle_rx_buf(card, skb);
25936 vc->rx_iov = NULL;
25937 recycle_iov_buf(card, iovb);
25938 @@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
25939 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
25940 card->index);
25941 which_list(card, skb);
25942 - atomic_inc(&vcc->stats->rx_err);
25943 + atomic_inc_unchecked(&vcc->stats->rx_err);
25944 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
25945 NS_SKB(iovb)->iovcnt);
25946 vc->rx_iov = NULL;
25947 @@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
25948 printk(" - PDU size mismatch.\n");
25949 else
25950 printk(".\n");
25951 - atomic_inc(&vcc->stats->rx_err);
25952 + atomic_inc_unchecked(&vcc->stats->rx_err);
25953 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
25954 NS_SKB(iovb)->iovcnt);
25955 vc->rx_iov = NULL;
25956 @@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
25957 if (!atm_charge(vcc, skb->truesize))
25958 {
25959 push_rxbufs(card, skb);
25960 - atomic_inc(&vcc->stats->rx_drop);
25961 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25962 }
25963 else
25964 {
25965 @@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
25966 ATM_SKB(skb)->vcc = vcc;
25967 __net_timestamp(skb);
25968 vcc->push(vcc, skb);
25969 - atomic_inc(&vcc->stats->rx);
25970 + atomic_inc_unchecked(&vcc->stats->rx);
25971 }
25972 }
25973 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
25974 @@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
25975 if (!atm_charge(vcc, sb->truesize))
25976 {
25977 push_rxbufs(card, sb);
25978 - atomic_inc(&vcc->stats->rx_drop);
25979 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25980 }
25981 else
25982 {
25983 @@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
25984 ATM_SKB(sb)->vcc = vcc;
25985 __net_timestamp(sb);
25986 vcc->push(vcc, sb);
25987 - atomic_inc(&vcc->stats->rx);
25988 + atomic_inc_unchecked(&vcc->stats->rx);
25989 }
25990
25991 push_rxbufs(card, skb);
25992 @@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
25993 if (!atm_charge(vcc, skb->truesize))
25994 {
25995 push_rxbufs(card, skb);
25996 - atomic_inc(&vcc->stats->rx_drop);
25997 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25998 }
25999 else
26000 {
26001 @@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
26002 ATM_SKB(skb)->vcc = vcc;
26003 __net_timestamp(skb);
26004 vcc->push(vcc, skb);
26005 - atomic_inc(&vcc->stats->rx);
26006 + atomic_inc_unchecked(&vcc->stats->rx);
26007 }
26008
26009 push_rxbufs(card, sb);
26010 @@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
26011 if (hb == NULL)
26012 {
26013 printk("nicstar%d: Out of huge buffers.\n", card->index);
26014 - atomic_inc(&vcc->stats->rx_drop);
26015 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26016 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26017 NS_SKB(iovb)->iovcnt);
26018 vc->rx_iov = NULL;
26019 @@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
26020 }
26021 else
26022 dev_kfree_skb_any(hb);
26023 - atomic_inc(&vcc->stats->rx_drop);
26024 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26025 }
26026 else
26027 {
26028 @@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
26029 #endif /* NS_USE_DESTRUCTORS */
26030 __net_timestamp(hb);
26031 vcc->push(vcc, hb);
26032 - atomic_inc(&vcc->stats->rx);
26033 + atomic_inc_unchecked(&vcc->stats->rx);
26034 }
26035 }
26036
26037 diff -urNp linux-2.6.32.42/drivers/atm/solos-pci.c linux-2.6.32.42/drivers/atm/solos-pci.c
26038 --- linux-2.6.32.42/drivers/atm/solos-pci.c 2011-04-17 17:00:52.000000000 -0400
26039 +++ linux-2.6.32.42/drivers/atm/solos-pci.c 2011-05-16 21:46:57.000000000 -0400
26040 @@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
26041 }
26042 atm_charge(vcc, skb->truesize);
26043 vcc->push(vcc, skb);
26044 - atomic_inc(&vcc->stats->rx);
26045 + atomic_inc_unchecked(&vcc->stats->rx);
26046 break;
26047
26048 case PKT_STATUS:
26049 @@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *
26050 char msg[500];
26051 char item[10];
26052
26053 + pax_track_stack();
26054 +
26055 len = buf->len;
26056 for (i = 0; i < len; i++){
26057 if(i % 8 == 0)
26058 @@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_car
26059 vcc = SKB_CB(oldskb)->vcc;
26060
26061 if (vcc) {
26062 - atomic_inc(&vcc->stats->tx);
26063 + atomic_inc_unchecked(&vcc->stats->tx);
26064 solos_pop(vcc, oldskb);
26065 } else
26066 dev_kfree_skb_irq(oldskb);
26067 diff -urNp linux-2.6.32.42/drivers/atm/suni.c linux-2.6.32.42/drivers/atm/suni.c
26068 --- linux-2.6.32.42/drivers/atm/suni.c 2011-03-27 14:31:47.000000000 -0400
26069 +++ linux-2.6.32.42/drivers/atm/suni.c 2011-04-17 15:56:46.000000000 -0400
26070 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26071
26072
26073 #define ADD_LIMITED(s,v) \
26074 - atomic_add((v),&stats->s); \
26075 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26076 + atomic_add_unchecked((v),&stats->s); \
26077 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26078
26079
26080 static void suni_hz(unsigned long from_timer)
26081 diff -urNp linux-2.6.32.42/drivers/atm/uPD98402.c linux-2.6.32.42/drivers/atm/uPD98402.c
26082 --- linux-2.6.32.42/drivers/atm/uPD98402.c 2011-03-27 14:31:47.000000000 -0400
26083 +++ linux-2.6.32.42/drivers/atm/uPD98402.c 2011-04-17 15:56:46.000000000 -0400
26084 @@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
26085 struct sonet_stats tmp;
26086 int error = 0;
26087
26088 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26089 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26090 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26091 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26092 if (zero && !error) {
26093 @@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev
26094
26095
26096 #define ADD_LIMITED(s,v) \
26097 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26098 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26099 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26100 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26101 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26102 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26103
26104
26105 static void stat_event(struct atm_dev *dev)
26106 @@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
26107 if (reason & uPD98402_INT_PFM) stat_event(dev);
26108 if (reason & uPD98402_INT_PCO) {
26109 (void) GET(PCOCR); /* clear interrupt cause */
26110 - atomic_add(GET(HECCT),
26111 + atomic_add_unchecked(GET(HECCT),
26112 &PRIV(dev)->sonet_stats.uncorr_hcs);
26113 }
26114 if ((reason & uPD98402_INT_RFO) &&
26115 @@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev
26116 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26117 uPD98402_INT_LOS),PIMR); /* enable them */
26118 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26119 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26120 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26121 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26122 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26123 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26124 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26125 return 0;
26126 }
26127
26128 diff -urNp linux-2.6.32.42/drivers/atm/zatm.c linux-2.6.32.42/drivers/atm/zatm.c
26129 --- linux-2.6.32.42/drivers/atm/zatm.c 2011-03-27 14:31:47.000000000 -0400
26130 +++ linux-2.6.32.42/drivers/atm/zatm.c 2011-04-17 15:56:46.000000000 -0400
26131 @@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
26132 }
26133 if (!size) {
26134 dev_kfree_skb_irq(skb);
26135 - if (vcc) atomic_inc(&vcc->stats->rx_err);
26136 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26137 continue;
26138 }
26139 if (!atm_charge(vcc,skb->truesize)) {
26140 @@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
26141 skb->len = size;
26142 ATM_SKB(skb)->vcc = vcc;
26143 vcc->push(vcc,skb);
26144 - atomic_inc(&vcc->stats->rx);
26145 + atomic_inc_unchecked(&vcc->stats->rx);
26146 }
26147 zout(pos & 0xffff,MTA(mbx));
26148 #if 0 /* probably a stupid idea */
26149 @@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
26150 skb_queue_head(&zatm_vcc->backlog,skb);
26151 break;
26152 }
26153 - atomic_inc(&vcc->stats->tx);
26154 + atomic_inc_unchecked(&vcc->stats->tx);
26155 wake_up(&zatm_vcc->tx_wait);
26156 }
26157
26158 diff -urNp linux-2.6.32.42/drivers/base/bus.c linux-2.6.32.42/drivers/base/bus.c
26159 --- linux-2.6.32.42/drivers/base/bus.c 2011-03-27 14:31:47.000000000 -0400
26160 +++ linux-2.6.32.42/drivers/base/bus.c 2011-04-17 15:56:46.000000000 -0400
26161 @@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kob
26162 return ret;
26163 }
26164
26165 -static struct sysfs_ops driver_sysfs_ops = {
26166 +static const struct sysfs_ops driver_sysfs_ops = {
26167 .show = drv_attr_show,
26168 .store = drv_attr_store,
26169 };
26170 @@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kob
26171 return ret;
26172 }
26173
26174 -static struct sysfs_ops bus_sysfs_ops = {
26175 +static const struct sysfs_ops bus_sysfs_ops = {
26176 .show = bus_attr_show,
26177 .store = bus_attr_store,
26178 };
26179 @@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset
26180 return 0;
26181 }
26182
26183 -static struct kset_uevent_ops bus_uevent_ops = {
26184 +static const struct kset_uevent_ops bus_uevent_ops = {
26185 .filter = bus_uevent_filter,
26186 };
26187
26188 diff -urNp linux-2.6.32.42/drivers/base/class.c linux-2.6.32.42/drivers/base/class.c
26189 --- linux-2.6.32.42/drivers/base/class.c 2011-03-27 14:31:47.000000000 -0400
26190 +++ linux-2.6.32.42/drivers/base/class.c 2011-04-17 15:56:46.000000000 -0400
26191 @@ -63,7 +63,7 @@ static void class_release(struct kobject
26192 kfree(cp);
26193 }
26194
26195 -static struct sysfs_ops class_sysfs_ops = {
26196 +static const struct sysfs_ops class_sysfs_ops = {
26197 .show = class_attr_show,
26198 .store = class_attr_store,
26199 };
26200 diff -urNp linux-2.6.32.42/drivers/base/core.c linux-2.6.32.42/drivers/base/core.c
26201 --- linux-2.6.32.42/drivers/base/core.c 2011-03-27 14:31:47.000000000 -0400
26202 +++ linux-2.6.32.42/drivers/base/core.c 2011-04-17 15:56:46.000000000 -0400
26203 @@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kob
26204 return ret;
26205 }
26206
26207 -static struct sysfs_ops dev_sysfs_ops = {
26208 +static const struct sysfs_ops dev_sysfs_ops = {
26209 .show = dev_attr_show,
26210 .store = dev_attr_store,
26211 };
26212 @@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset,
26213 return retval;
26214 }
26215
26216 -static struct kset_uevent_ops device_uevent_ops = {
26217 +static const struct kset_uevent_ops device_uevent_ops = {
26218 .filter = dev_uevent_filter,
26219 .name = dev_uevent_name,
26220 .uevent = dev_uevent,
26221 diff -urNp linux-2.6.32.42/drivers/base/memory.c linux-2.6.32.42/drivers/base/memory.c
26222 --- linux-2.6.32.42/drivers/base/memory.c 2011-03-27 14:31:47.000000000 -0400
26223 +++ linux-2.6.32.42/drivers/base/memory.c 2011-04-17 15:56:46.000000000 -0400
26224 @@ -44,7 +44,7 @@ static int memory_uevent(struct kset *ks
26225 return retval;
26226 }
26227
26228 -static struct kset_uevent_ops memory_uevent_ops = {
26229 +static const struct kset_uevent_ops memory_uevent_ops = {
26230 .name = memory_uevent_name,
26231 .uevent = memory_uevent,
26232 };
26233 diff -urNp linux-2.6.32.42/drivers/base/sys.c linux-2.6.32.42/drivers/base/sys.c
26234 --- linux-2.6.32.42/drivers/base/sys.c 2011-03-27 14:31:47.000000000 -0400
26235 +++ linux-2.6.32.42/drivers/base/sys.c 2011-04-17 15:56:46.000000000 -0400
26236 @@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struc
26237 return -EIO;
26238 }
26239
26240 -static struct sysfs_ops sysfs_ops = {
26241 +static const struct sysfs_ops sysfs_ops = {
26242 .show = sysdev_show,
26243 .store = sysdev_store,
26244 };
26245 @@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct
26246 return -EIO;
26247 }
26248
26249 -static struct sysfs_ops sysfs_class_ops = {
26250 +static const struct sysfs_ops sysfs_class_ops = {
26251 .show = sysdev_class_show,
26252 .store = sysdev_class_store,
26253 };
26254 diff -urNp linux-2.6.32.42/drivers/block/cciss.c linux-2.6.32.42/drivers/block/cciss.c
26255 --- linux-2.6.32.42/drivers/block/cciss.c 2011-03-27 14:31:47.000000000 -0400
26256 +++ linux-2.6.32.42/drivers/block/cciss.c 2011-04-17 15:56:46.000000000 -0400
26257 @@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct
26258 int err;
26259 u32 cp;
26260
26261 + memset(&arg64, 0, sizeof(arg64));
26262 +
26263 err = 0;
26264 err |=
26265 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26266 diff -urNp linux-2.6.32.42/drivers/block/cpqarray.c linux-2.6.32.42/drivers/block/cpqarray.c
26267 --- linux-2.6.32.42/drivers/block/cpqarray.c 2011-03-27 14:31:47.000000000 -0400
26268 +++ linux-2.6.32.42/drivers/block/cpqarray.c 2011-05-16 21:46:57.000000000 -0400
26269 @@ -896,6 +896,8 @@ static void do_ida_request(struct reques
26270 struct scatterlist tmp_sg[SG_MAX];
26271 int i, dir, seg;
26272
26273 + pax_track_stack();
26274 +
26275 if (blk_queue_plugged(q))
26276 goto startio;
26277
26278 diff -urNp linux-2.6.32.42/drivers/block/DAC960.c linux-2.6.32.42/drivers/block/DAC960.c
26279 --- linux-2.6.32.42/drivers/block/DAC960.c 2011-03-27 14:31:47.000000000 -0400
26280 +++ linux-2.6.32.42/drivers/block/DAC960.c 2011-05-16 21:46:57.000000000 -0400
26281 @@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfigur
26282 unsigned long flags;
26283 int Channel, TargetID;
26284
26285 + pax_track_stack();
26286 +
26287 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
26288 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
26289 sizeof(DAC960_SCSI_Inquiry_T) +
26290 diff -urNp linux-2.6.32.42/drivers/block/nbd.c linux-2.6.32.42/drivers/block/nbd.c
26291 --- linux-2.6.32.42/drivers/block/nbd.c 2011-06-25 12:55:34.000000000 -0400
26292 +++ linux-2.6.32.42/drivers/block/nbd.c 2011-06-25 12:56:37.000000000 -0400
26293 @@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *
26294 struct kvec iov;
26295 sigset_t blocked, oldset;
26296
26297 + pax_track_stack();
26298 +
26299 if (unlikely(!sock)) {
26300 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
26301 lo->disk->disk_name, (send ? "send" : "recv"));
26302 @@ -569,6 +571,8 @@ static void do_nbd_request(struct reques
26303 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
26304 unsigned int cmd, unsigned long arg)
26305 {
26306 + pax_track_stack();
26307 +
26308 switch (cmd) {
26309 case NBD_DISCONNECT: {
26310 struct request sreq;
26311 diff -urNp linux-2.6.32.42/drivers/block/pktcdvd.c linux-2.6.32.42/drivers/block/pktcdvd.c
26312 --- linux-2.6.32.42/drivers/block/pktcdvd.c 2011-03-27 14:31:47.000000000 -0400
26313 +++ linux-2.6.32.42/drivers/block/pktcdvd.c 2011-04-17 15:56:46.000000000 -0400
26314 @@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kob
26315 return len;
26316 }
26317
26318 -static struct sysfs_ops kobj_pkt_ops = {
26319 +static const struct sysfs_ops kobj_pkt_ops = {
26320 .show = kobj_pkt_show,
26321 .store = kobj_pkt_store
26322 };
26323 diff -urNp linux-2.6.32.42/drivers/char/agp/frontend.c linux-2.6.32.42/drivers/char/agp/frontend.c
26324 --- linux-2.6.32.42/drivers/char/agp/frontend.c 2011-03-27 14:31:47.000000000 -0400
26325 +++ linux-2.6.32.42/drivers/char/agp/frontend.c 2011-04-17 15:56:46.000000000 -0400
26326 @@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
26327 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
26328 return -EFAULT;
26329
26330 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
26331 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
26332 return -EFAULT;
26333
26334 client = agp_find_client_by_pid(reserve.pid);
26335 diff -urNp linux-2.6.32.42/drivers/char/briq_panel.c linux-2.6.32.42/drivers/char/briq_panel.c
26336 --- linux-2.6.32.42/drivers/char/briq_panel.c 2011-03-27 14:31:47.000000000 -0400
26337 +++ linux-2.6.32.42/drivers/char/briq_panel.c 2011-04-18 19:48:57.000000000 -0400
26338 @@ -10,6 +10,7 @@
26339 #include <linux/types.h>
26340 #include <linux/errno.h>
26341 #include <linux/tty.h>
26342 +#include <linux/mutex.h>
26343 #include <linux/timer.h>
26344 #include <linux/kernel.h>
26345 #include <linux/wait.h>
26346 @@ -36,6 +37,7 @@ static int vfd_is_open;
26347 static unsigned char vfd[40];
26348 static int vfd_cursor;
26349 static unsigned char ledpb, led;
26350 +static DEFINE_MUTEX(vfd_mutex);
26351
26352 static void update_vfd(void)
26353 {
26354 @@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct f
26355 if (!vfd_is_open)
26356 return -EBUSY;
26357
26358 + mutex_lock(&vfd_mutex);
26359 for (;;) {
26360 char c;
26361 if (!indx)
26362 break;
26363 - if (get_user(c, buf))
26364 + if (get_user(c, buf)) {
26365 + mutex_unlock(&vfd_mutex);
26366 return -EFAULT;
26367 + }
26368 if (esc) {
26369 set_led(c);
26370 esc = 0;
26371 @@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct f
26372 buf++;
26373 }
26374 update_vfd();
26375 + mutex_unlock(&vfd_mutex);
26376
26377 return len;
26378 }
26379 diff -urNp linux-2.6.32.42/drivers/char/genrtc.c linux-2.6.32.42/drivers/char/genrtc.c
26380 --- linux-2.6.32.42/drivers/char/genrtc.c 2011-03-27 14:31:47.000000000 -0400
26381 +++ linux-2.6.32.42/drivers/char/genrtc.c 2011-04-18 19:45:42.000000000 -0400
26382 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *i
26383 switch (cmd) {
26384
26385 case RTC_PLL_GET:
26386 + memset(&pll, 0, sizeof(pll));
26387 if (get_rtc_pll(&pll))
26388 return -EINVAL;
26389 else
26390 diff -urNp linux-2.6.32.42/drivers/char/hpet.c linux-2.6.32.42/drivers/char/hpet.c
26391 --- linux-2.6.32.42/drivers/char/hpet.c 2011-03-27 14:31:47.000000000 -0400
26392 +++ linux-2.6.32.42/drivers/char/hpet.c 2011-04-23 12:56:11.000000000 -0400
26393 @@ -430,7 +430,7 @@ static int hpet_release(struct inode *in
26394 return 0;
26395 }
26396
26397 -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
26398 +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
26399
26400 static int
26401 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
26402 @@ -565,7 +565,7 @@ static inline unsigned long hpet_time_di
26403 }
26404
26405 static int
26406 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
26407 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
26408 {
26409 struct hpet_timer __iomem *timer;
26410 struct hpet __iomem *hpet;
26411 @@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp,
26412 {
26413 struct hpet_info info;
26414
26415 + memset(&info, 0, sizeof(info));
26416 +
26417 if (devp->hd_ireqfreq)
26418 info.hi_ireqfreq =
26419 hpet_time_div(hpetp, devp->hd_ireqfreq);
26420 - else
26421 - info.hi_ireqfreq = 0;
26422 info.hi_flags =
26423 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
26424 info.hi_hpet = hpetp->hp_which;
26425 diff -urNp linux-2.6.32.42/drivers/char/hvc_beat.c linux-2.6.32.42/drivers/char/hvc_beat.c
26426 --- linux-2.6.32.42/drivers/char/hvc_beat.c 2011-03-27 14:31:47.000000000 -0400
26427 +++ linux-2.6.32.42/drivers/char/hvc_beat.c 2011-04-17 15:56:46.000000000 -0400
26428 @@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t v
26429 return cnt;
26430 }
26431
26432 -static struct hv_ops hvc_beat_get_put_ops = {
26433 +static const struct hv_ops hvc_beat_get_put_ops = {
26434 .get_chars = hvc_beat_get_chars,
26435 .put_chars = hvc_beat_put_chars,
26436 };
26437 diff -urNp linux-2.6.32.42/drivers/char/hvc_console.c linux-2.6.32.42/drivers/char/hvc_console.c
26438 --- linux-2.6.32.42/drivers/char/hvc_console.c 2011-03-27 14:31:47.000000000 -0400
26439 +++ linux-2.6.32.42/drivers/char/hvc_console.c 2011-04-17 15:56:46.000000000 -0400
26440 @@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_ind
26441 * console interfaces but can still be used as a tty device. This has to be
26442 * static because kmalloc will not work during early console init.
26443 */
26444 -static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
26445 +static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
26446 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
26447 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
26448
26449 @@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kr
26450 * vty adapters do NOT get an hvc_instantiate() callback since they
26451 * appear after early console init.
26452 */
26453 -int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
26454 +int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
26455 {
26456 struct hvc_struct *hp;
26457
26458 @@ -756,7 +756,7 @@ static const struct tty_operations hvc_o
26459 };
26460
26461 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
26462 - struct hv_ops *ops, int outbuf_size)
26463 + const struct hv_ops *ops, int outbuf_size)
26464 {
26465 struct hvc_struct *hp;
26466 int i;
26467 diff -urNp linux-2.6.32.42/drivers/char/hvc_console.h linux-2.6.32.42/drivers/char/hvc_console.h
26468 --- linux-2.6.32.42/drivers/char/hvc_console.h 2011-03-27 14:31:47.000000000 -0400
26469 +++ linux-2.6.32.42/drivers/char/hvc_console.h 2011-04-17 15:56:46.000000000 -0400
26470 @@ -55,7 +55,7 @@ struct hvc_struct {
26471 int outbuf_size;
26472 int n_outbuf;
26473 uint32_t vtermno;
26474 - struct hv_ops *ops;
26475 + const struct hv_ops *ops;
26476 int irq_requested;
26477 int data;
26478 struct winsize ws;
26479 @@ -76,11 +76,11 @@ struct hv_ops {
26480 };
26481
26482 /* Register a vterm and a slot index for use as a console (console_init) */
26483 -extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
26484 +extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
26485
26486 /* register a vterm for hvc tty operation (module_init or hotplug add) */
26487 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
26488 - struct hv_ops *ops, int outbuf_size);
26489 + const struct hv_ops *ops, int outbuf_size);
26490 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
26491 extern int hvc_remove(struct hvc_struct *hp);
26492
26493 diff -urNp linux-2.6.32.42/drivers/char/hvc_iseries.c linux-2.6.32.42/drivers/char/hvc_iseries.c
26494 --- linux-2.6.32.42/drivers/char/hvc_iseries.c 2011-03-27 14:31:47.000000000 -0400
26495 +++ linux-2.6.32.42/drivers/char/hvc_iseries.c 2011-04-17 15:56:46.000000000 -0400
26496 @@ -197,7 +197,7 @@ done:
26497 return sent;
26498 }
26499
26500 -static struct hv_ops hvc_get_put_ops = {
26501 +static const struct hv_ops hvc_get_put_ops = {
26502 .get_chars = get_chars,
26503 .put_chars = put_chars,
26504 .notifier_add = notifier_add_irq,
26505 diff -urNp linux-2.6.32.42/drivers/char/hvc_iucv.c linux-2.6.32.42/drivers/char/hvc_iucv.c
26506 --- linux-2.6.32.42/drivers/char/hvc_iucv.c 2011-03-27 14:31:47.000000000 -0400
26507 +++ linux-2.6.32.42/drivers/char/hvc_iucv.c 2011-04-17 15:56:46.000000000 -0400
26508 @@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(stru
26509
26510
26511 /* HVC operations */
26512 -static struct hv_ops hvc_iucv_ops = {
26513 +static const struct hv_ops hvc_iucv_ops = {
26514 .get_chars = hvc_iucv_get_chars,
26515 .put_chars = hvc_iucv_put_chars,
26516 .notifier_add = hvc_iucv_notifier_add,
26517 diff -urNp linux-2.6.32.42/drivers/char/hvc_rtas.c linux-2.6.32.42/drivers/char/hvc_rtas.c
26518 --- linux-2.6.32.42/drivers/char/hvc_rtas.c 2011-03-27 14:31:47.000000000 -0400
26519 +++ linux-2.6.32.42/drivers/char/hvc_rtas.c 2011-04-17 15:56:46.000000000 -0400
26520 @@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_
26521 return i;
26522 }
26523
26524 -static struct hv_ops hvc_rtas_get_put_ops = {
26525 +static const struct hv_ops hvc_rtas_get_put_ops = {
26526 .get_chars = hvc_rtas_read_console,
26527 .put_chars = hvc_rtas_write_console,
26528 };
26529 diff -urNp linux-2.6.32.42/drivers/char/hvcs.c linux-2.6.32.42/drivers/char/hvcs.c
26530 --- linux-2.6.32.42/drivers/char/hvcs.c 2011-03-27 14:31:47.000000000 -0400
26531 +++ linux-2.6.32.42/drivers/char/hvcs.c 2011-04-17 15:56:46.000000000 -0400
26532 @@ -82,6 +82,7 @@
26533 #include <asm/hvcserver.h>
26534 #include <asm/uaccess.h>
26535 #include <asm/vio.h>
26536 +#include <asm/local.h>
26537
26538 /*
26539 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
26540 @@ -269,7 +270,7 @@ struct hvcs_struct {
26541 unsigned int index;
26542
26543 struct tty_struct *tty;
26544 - int open_count;
26545 + local_t open_count;
26546
26547 /*
26548 * Used to tell the driver kernel_thread what operations need to take
26549 @@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
26550
26551 spin_lock_irqsave(&hvcsd->lock, flags);
26552
26553 - if (hvcsd->open_count > 0) {
26554 + if (local_read(&hvcsd->open_count) > 0) {
26555 spin_unlock_irqrestore(&hvcsd->lock, flags);
26556 printk(KERN_INFO "HVCS: vterm state unchanged. "
26557 "The hvcs device node is still in use.\n");
26558 @@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
26559 if ((retval = hvcs_partner_connect(hvcsd)))
26560 goto error_release;
26561
26562 - hvcsd->open_count = 1;
26563 + local_set(&hvcsd->open_count, 1);
26564 hvcsd->tty = tty;
26565 tty->driver_data = hvcsd;
26566
26567 @@ -1169,7 +1170,7 @@ fast_open:
26568
26569 spin_lock_irqsave(&hvcsd->lock, flags);
26570 kref_get(&hvcsd->kref);
26571 - hvcsd->open_count++;
26572 + local_inc(&hvcsd->open_count);
26573 hvcsd->todo_mask |= HVCS_SCHED_READ;
26574 spin_unlock_irqrestore(&hvcsd->lock, flags);
26575
26576 @@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct
26577 hvcsd = tty->driver_data;
26578
26579 spin_lock_irqsave(&hvcsd->lock, flags);
26580 - if (--hvcsd->open_count == 0) {
26581 + if (local_dec_and_test(&hvcsd->open_count)) {
26582
26583 vio_disable_interrupts(hvcsd->vdev);
26584
26585 @@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct
26586 free_irq(irq, hvcsd);
26587 kref_put(&hvcsd->kref, destroy_hvcs_struct);
26588 return;
26589 - } else if (hvcsd->open_count < 0) {
26590 + } else if (local_read(&hvcsd->open_count) < 0) {
26591 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
26592 " is missmanaged.\n",
26593 - hvcsd->vdev->unit_address, hvcsd->open_count);
26594 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
26595 }
26596
26597 spin_unlock_irqrestore(&hvcsd->lock, flags);
26598 @@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
26599
26600 spin_lock_irqsave(&hvcsd->lock, flags);
26601 /* Preserve this so that we know how many kref refs to put */
26602 - temp_open_count = hvcsd->open_count;
26603 + temp_open_count = local_read(&hvcsd->open_count);
26604
26605 /*
26606 * Don't kref put inside the spinlock because the destruction
26607 @@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
26608 hvcsd->tty->driver_data = NULL;
26609 hvcsd->tty = NULL;
26610
26611 - hvcsd->open_count = 0;
26612 + local_set(&hvcsd->open_count, 0);
26613
26614 /* This will drop any buffered data on the floor which is OK in a hangup
26615 * scenario. */
26616 @@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct
26617 * the middle of a write operation? This is a crummy place to do this
26618 * but we want to keep it all in the spinlock.
26619 */
26620 - if (hvcsd->open_count <= 0) {
26621 + if (local_read(&hvcsd->open_count) <= 0) {
26622 spin_unlock_irqrestore(&hvcsd->lock, flags);
26623 return -ENODEV;
26624 }
26625 @@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
26626 {
26627 struct hvcs_struct *hvcsd = tty->driver_data;
26628
26629 - if (!hvcsd || hvcsd->open_count <= 0)
26630 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
26631 return 0;
26632
26633 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
26634 diff -urNp linux-2.6.32.42/drivers/char/hvc_udbg.c linux-2.6.32.42/drivers/char/hvc_udbg.c
26635 --- linux-2.6.32.42/drivers/char/hvc_udbg.c 2011-03-27 14:31:47.000000000 -0400
26636 +++ linux-2.6.32.42/drivers/char/hvc_udbg.c 2011-04-17 15:56:46.000000000 -0400
26637 @@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno
26638 return i;
26639 }
26640
26641 -static struct hv_ops hvc_udbg_ops = {
26642 +static const struct hv_ops hvc_udbg_ops = {
26643 .get_chars = hvc_udbg_get,
26644 .put_chars = hvc_udbg_put,
26645 };
26646 diff -urNp linux-2.6.32.42/drivers/char/hvc_vio.c linux-2.6.32.42/drivers/char/hvc_vio.c
26647 --- linux-2.6.32.42/drivers/char/hvc_vio.c 2011-03-27 14:31:47.000000000 -0400
26648 +++ linux-2.6.32.42/drivers/char/hvc_vio.c 2011-04-17 15:56:46.000000000 -0400
26649 @@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t v
26650 return got;
26651 }
26652
26653 -static struct hv_ops hvc_get_put_ops = {
26654 +static const struct hv_ops hvc_get_put_ops = {
26655 .get_chars = filtered_get_chars,
26656 .put_chars = hvc_put_chars,
26657 .notifier_add = notifier_add_irq,
26658 diff -urNp linux-2.6.32.42/drivers/char/hvc_xen.c linux-2.6.32.42/drivers/char/hvc_xen.c
26659 --- linux-2.6.32.42/drivers/char/hvc_xen.c 2011-03-27 14:31:47.000000000 -0400
26660 +++ linux-2.6.32.42/drivers/char/hvc_xen.c 2011-04-17 15:56:46.000000000 -0400
26661 @@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno
26662 return recv;
26663 }
26664
26665 -static struct hv_ops hvc_ops = {
26666 +static const struct hv_ops hvc_ops = {
26667 .get_chars = read_console,
26668 .put_chars = write_console,
26669 .notifier_add = notifier_add_irq,
26670 diff -urNp linux-2.6.32.42/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.32.42/drivers/char/ipmi/ipmi_msghandler.c
26671 --- linux-2.6.32.42/drivers/char/ipmi/ipmi_msghandler.c 2011-03-27 14:31:47.000000000 -0400
26672 +++ linux-2.6.32.42/drivers/char/ipmi/ipmi_msghandler.c 2011-05-16 21:46:57.000000000 -0400
26673 @@ -414,7 +414,7 @@ struct ipmi_smi {
26674 struct proc_dir_entry *proc_dir;
26675 char proc_dir_name[10];
26676
26677 - atomic_t stats[IPMI_NUM_STATS];
26678 + atomic_unchecked_t stats[IPMI_NUM_STATS];
26679
26680 /*
26681 * run_to_completion duplicate of smb_info, smi_info
26682 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
26683
26684
26685 #define ipmi_inc_stat(intf, stat) \
26686 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
26687 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
26688 #define ipmi_get_stat(intf, stat) \
26689 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
26690 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
26691
26692 static int is_lan_addr(struct ipmi_addr *addr)
26693 {
26694 @@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
26695 INIT_LIST_HEAD(&intf->cmd_rcvrs);
26696 init_waitqueue_head(&intf->waitq);
26697 for (i = 0; i < IPMI_NUM_STATS; i++)
26698 - atomic_set(&intf->stats[i], 0);
26699 + atomic_set_unchecked(&intf->stats[i], 0);
26700
26701 intf->proc_dir = NULL;
26702
26703 @@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
26704 struct ipmi_smi_msg smi_msg;
26705 struct ipmi_recv_msg recv_msg;
26706
26707 + pax_track_stack();
26708 +
26709 si = (struct ipmi_system_interface_addr *) &addr;
26710 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
26711 si->channel = IPMI_BMC_CHANNEL;
26712 diff -urNp linux-2.6.32.42/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.32.42/drivers/char/ipmi/ipmi_si_intf.c
26713 --- linux-2.6.32.42/drivers/char/ipmi/ipmi_si_intf.c 2011-03-27 14:31:47.000000000 -0400
26714 +++ linux-2.6.32.42/drivers/char/ipmi/ipmi_si_intf.c 2011-04-17 15:56:46.000000000 -0400
26715 @@ -277,7 +277,7 @@ struct smi_info {
26716 unsigned char slave_addr;
26717
26718 /* Counters and things for the proc filesystem. */
26719 - atomic_t stats[SI_NUM_STATS];
26720 + atomic_unchecked_t stats[SI_NUM_STATS];
26721
26722 struct task_struct *thread;
26723
26724 @@ -285,9 +285,9 @@ struct smi_info {
26725 };
26726
26727 #define smi_inc_stat(smi, stat) \
26728 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
26729 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
26730 #define smi_get_stat(smi, stat) \
26731 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
26732 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
26733
26734 #define SI_MAX_PARMS 4
26735
26736 @@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info
26737 atomic_set(&new_smi->req_events, 0);
26738 new_smi->run_to_completion = 0;
26739 for (i = 0; i < SI_NUM_STATS; i++)
26740 - atomic_set(&new_smi->stats[i], 0);
26741 + atomic_set_unchecked(&new_smi->stats[i], 0);
26742
26743 new_smi->interrupt_disabled = 0;
26744 atomic_set(&new_smi->stop_operation, 0);
26745 diff -urNp linux-2.6.32.42/drivers/char/istallion.c linux-2.6.32.42/drivers/char/istallion.c
26746 --- linux-2.6.32.42/drivers/char/istallion.c 2011-03-27 14:31:47.000000000 -0400
26747 +++ linux-2.6.32.42/drivers/char/istallion.c 2011-05-16 21:46:57.000000000 -0400
26748 @@ -187,7 +187,6 @@ static struct ktermios stli_deftermios
26749 * re-used for each stats call.
26750 */
26751 static comstats_t stli_comstats;
26752 -static combrd_t stli_brdstats;
26753 static struct asystats stli_cdkstats;
26754
26755 /*****************************************************************************/
26756 @@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __u
26757 {
26758 struct stlibrd *brdp;
26759 unsigned int i;
26760 + combrd_t stli_brdstats;
26761
26762 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
26763 return -EFAULT;
26764 @@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stl
26765 struct stliport stli_dummyport;
26766 struct stliport *portp;
26767
26768 + pax_track_stack();
26769 +
26770 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
26771 return -EFAULT;
26772 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
26773 @@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stli
26774 struct stlibrd stli_dummybrd;
26775 struct stlibrd *brdp;
26776
26777 + pax_track_stack();
26778 +
26779 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
26780 return -EFAULT;
26781 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
26782 diff -urNp linux-2.6.32.42/drivers/char/Kconfig linux-2.6.32.42/drivers/char/Kconfig
26783 --- linux-2.6.32.42/drivers/char/Kconfig 2011-03-27 14:31:47.000000000 -0400
26784 +++ linux-2.6.32.42/drivers/char/Kconfig 2011-04-18 19:20:15.000000000 -0400
26785 @@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
26786
26787 config DEVKMEM
26788 bool "/dev/kmem virtual device support"
26789 - default y
26790 + default n
26791 + depends on !GRKERNSEC_KMEM
26792 help
26793 Say Y here if you want to support the /dev/kmem device. The
26794 /dev/kmem device is rarely used, but can be used for certain
26795 @@ -1114,6 +1115,7 @@ config DEVPORT
26796 bool
26797 depends on !M68K
26798 depends on ISA || PCI
26799 + depends on !GRKERNSEC_KMEM
26800 default y
26801
26802 source "drivers/s390/char/Kconfig"
26803 diff -urNp linux-2.6.32.42/drivers/char/keyboard.c linux-2.6.32.42/drivers/char/keyboard.c
26804 --- linux-2.6.32.42/drivers/char/keyboard.c 2011-03-27 14:31:47.000000000 -0400
26805 +++ linux-2.6.32.42/drivers/char/keyboard.c 2011-04-17 15:56:46.000000000 -0400
26806 @@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
26807 kbd->kbdmode == VC_MEDIUMRAW) &&
26808 value != KVAL(K_SAK))
26809 return; /* SAK is allowed even in raw mode */
26810 +
26811 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
26812 + {
26813 + void *func = fn_handler[value];
26814 + if (func == fn_show_state || func == fn_show_ptregs ||
26815 + func == fn_show_mem)
26816 + return;
26817 + }
26818 +#endif
26819 +
26820 fn_handler[value](vc);
26821 }
26822
26823 @@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_
26824 .evbit = { BIT_MASK(EV_SND) },
26825 },
26826
26827 - { }, /* Terminating entry */
26828 + { 0 }, /* Terminating entry */
26829 };
26830
26831 MODULE_DEVICE_TABLE(input, kbd_ids);
26832 diff -urNp linux-2.6.32.42/drivers/char/mem.c linux-2.6.32.42/drivers/char/mem.c
26833 --- linux-2.6.32.42/drivers/char/mem.c 2011-03-27 14:31:47.000000000 -0400
26834 +++ linux-2.6.32.42/drivers/char/mem.c 2011-04-17 15:56:46.000000000 -0400
26835 @@ -18,6 +18,7 @@
26836 #include <linux/raw.h>
26837 #include <linux/tty.h>
26838 #include <linux/capability.h>
26839 +#include <linux/security.h>
26840 #include <linux/ptrace.h>
26841 #include <linux/device.h>
26842 #include <linux/highmem.h>
26843 @@ -35,6 +36,10 @@
26844 # include <linux/efi.h>
26845 #endif
26846
26847 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
26848 +extern struct file_operations grsec_fops;
26849 +#endif
26850 +
26851 static inline unsigned long size_inside_page(unsigned long start,
26852 unsigned long size)
26853 {
26854 @@ -102,9 +107,13 @@ static inline int range_is_allowed(unsig
26855
26856 while (cursor < to) {
26857 if (!devmem_is_allowed(pfn)) {
26858 +#ifdef CONFIG_GRKERNSEC_KMEM
26859 + gr_handle_mem_readwrite(from, to);
26860 +#else
26861 printk(KERN_INFO
26862 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
26863 current->comm, from, to);
26864 +#endif
26865 return 0;
26866 }
26867 cursor += PAGE_SIZE;
26868 @@ -112,6 +121,11 @@ static inline int range_is_allowed(unsig
26869 }
26870 return 1;
26871 }
26872 +#elif defined(CONFIG_GRKERNSEC_KMEM)
26873 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26874 +{
26875 + return 0;
26876 +}
26877 #else
26878 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26879 {
26880 @@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * fi
26881 #endif
26882
26883 while (count > 0) {
26884 + char *temp;
26885 +
26886 /*
26887 * Handle first page in case it's not aligned
26888 */
26889 @@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * fi
26890 if (!ptr)
26891 return -EFAULT;
26892
26893 - if (copy_to_user(buf, ptr, sz)) {
26894 +#ifdef CONFIG_PAX_USERCOPY
26895 + temp = kmalloc(sz, GFP_KERNEL);
26896 + if (!temp) {
26897 + unxlate_dev_mem_ptr(p, ptr);
26898 + return -ENOMEM;
26899 + }
26900 + memcpy(temp, ptr, sz);
26901 +#else
26902 + temp = ptr;
26903 +#endif
26904 +
26905 + if (copy_to_user(buf, temp, sz)) {
26906 +
26907 +#ifdef CONFIG_PAX_USERCOPY
26908 + kfree(temp);
26909 +#endif
26910 +
26911 unxlate_dev_mem_ptr(p, ptr);
26912 return -EFAULT;
26913 }
26914
26915 +#ifdef CONFIG_PAX_USERCOPY
26916 + kfree(temp);
26917 +#endif
26918 +
26919 unxlate_dev_mem_ptr(p, ptr);
26920
26921 buf += sz;
26922 @@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *fi
26923 size_t count, loff_t *ppos)
26924 {
26925 unsigned long p = *ppos;
26926 - ssize_t low_count, read, sz;
26927 + ssize_t low_count, read, sz, err = 0;
26928 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
26929 - int err = 0;
26930
26931 read = 0;
26932 if (p < (unsigned long) high_memory) {
26933 @@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *fi
26934 }
26935 #endif
26936 while (low_count > 0) {
26937 + char *temp;
26938 +
26939 sz = size_inside_page(p, low_count);
26940
26941 /*
26942 @@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *fi
26943 */
26944 kbuf = xlate_dev_kmem_ptr((char *)p);
26945
26946 - if (copy_to_user(buf, kbuf, sz))
26947 +#ifdef CONFIG_PAX_USERCOPY
26948 + temp = kmalloc(sz, GFP_KERNEL);
26949 + if (!temp)
26950 + return -ENOMEM;
26951 + memcpy(temp, kbuf, sz);
26952 +#else
26953 + temp = kbuf;
26954 +#endif
26955 +
26956 + err = copy_to_user(buf, temp, sz);
26957 +
26958 +#ifdef CONFIG_PAX_USERCOPY
26959 + kfree(temp);
26960 +#endif
26961 +
26962 + if (err)
26963 return -EFAULT;
26964 buf += sz;
26965 p += sz;
26966 @@ -889,6 +941,9 @@ static const struct memdev {
26967 #ifdef CONFIG_CRASH_DUMP
26968 [12] = { "oldmem", 0, &oldmem_fops, NULL },
26969 #endif
26970 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
26971 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
26972 +#endif
26973 };
26974
26975 static int memory_open(struct inode *inode, struct file *filp)
26976 diff -urNp linux-2.6.32.42/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.32.42/drivers/char/pcmcia/ipwireless/tty.c
26977 --- linux-2.6.32.42/drivers/char/pcmcia/ipwireless/tty.c 2011-03-27 14:31:47.000000000 -0400
26978 +++ linux-2.6.32.42/drivers/char/pcmcia/ipwireless/tty.c 2011-04-17 15:56:46.000000000 -0400
26979 @@ -29,6 +29,7 @@
26980 #include <linux/tty_driver.h>
26981 #include <linux/tty_flip.h>
26982 #include <linux/uaccess.h>
26983 +#include <asm/local.h>
26984
26985 #include "tty.h"
26986 #include "network.h"
26987 @@ -51,7 +52,7 @@ struct ipw_tty {
26988 int tty_type;
26989 struct ipw_network *network;
26990 struct tty_struct *linux_tty;
26991 - int open_count;
26992 + local_t open_count;
26993 unsigned int control_lines;
26994 struct mutex ipw_tty_mutex;
26995 int tx_bytes_queued;
26996 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
26997 mutex_unlock(&tty->ipw_tty_mutex);
26998 return -ENODEV;
26999 }
27000 - if (tty->open_count == 0)
27001 + if (local_read(&tty->open_count) == 0)
27002 tty->tx_bytes_queued = 0;
27003
27004 - tty->open_count++;
27005 + local_inc(&tty->open_count);
27006
27007 tty->linux_tty = linux_tty;
27008 linux_tty->driver_data = tty;
27009 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
27010
27011 static void do_ipw_close(struct ipw_tty *tty)
27012 {
27013 - tty->open_count--;
27014 -
27015 - if (tty->open_count == 0) {
27016 + if (local_dec_return(&tty->open_count) == 0) {
27017 struct tty_struct *linux_tty = tty->linux_tty;
27018
27019 if (linux_tty != NULL) {
27020 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
27021 return;
27022
27023 mutex_lock(&tty->ipw_tty_mutex);
27024 - if (tty->open_count == 0) {
27025 + if (local_read(&tty->open_count) == 0) {
27026 mutex_unlock(&tty->ipw_tty_mutex);
27027 return;
27028 }
27029 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
27030 return;
27031 }
27032
27033 - if (!tty->open_count) {
27034 + if (!local_read(&tty->open_count)) {
27035 mutex_unlock(&tty->ipw_tty_mutex);
27036 return;
27037 }
27038 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
27039 return -ENODEV;
27040
27041 mutex_lock(&tty->ipw_tty_mutex);
27042 - if (!tty->open_count) {
27043 + if (!local_read(&tty->open_count)) {
27044 mutex_unlock(&tty->ipw_tty_mutex);
27045 return -EINVAL;
27046 }
27047 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
27048 if (!tty)
27049 return -ENODEV;
27050
27051 - if (!tty->open_count)
27052 + if (!local_read(&tty->open_count))
27053 return -EINVAL;
27054
27055 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
27056 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
27057 if (!tty)
27058 return 0;
27059
27060 - if (!tty->open_count)
27061 + if (!local_read(&tty->open_count))
27062 return 0;
27063
27064 return tty->tx_bytes_queued;
27065 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
27066 if (!tty)
27067 return -ENODEV;
27068
27069 - if (!tty->open_count)
27070 + if (!local_read(&tty->open_count))
27071 return -EINVAL;
27072
27073 return get_control_lines(tty);
27074 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
27075 if (!tty)
27076 return -ENODEV;
27077
27078 - if (!tty->open_count)
27079 + if (!local_read(&tty->open_count))
27080 return -EINVAL;
27081
27082 return set_control_lines(tty, set, clear);
27083 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
27084 if (!tty)
27085 return -ENODEV;
27086
27087 - if (!tty->open_count)
27088 + if (!local_read(&tty->open_count))
27089 return -EINVAL;
27090
27091 /* FIXME: Exactly how is the tty object locked here .. */
27092 @@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty
27093 against a parallel ioctl etc */
27094 mutex_lock(&ttyj->ipw_tty_mutex);
27095 }
27096 - while (ttyj->open_count)
27097 + while (local_read(&ttyj->open_count))
27098 do_ipw_close(ttyj);
27099 ipwireless_disassociate_network_ttys(network,
27100 ttyj->channel_idx);
27101 diff -urNp linux-2.6.32.42/drivers/char/pty.c linux-2.6.32.42/drivers/char/pty.c
27102 --- linux-2.6.32.42/drivers/char/pty.c 2011-03-27 14:31:47.000000000 -0400
27103 +++ linux-2.6.32.42/drivers/char/pty.c 2011-04-17 15:56:46.000000000 -0400
27104 @@ -682,7 +682,18 @@ static int ptmx_open(struct inode *inode
27105 return ret;
27106 }
27107
27108 -static struct file_operations ptmx_fops;
27109 +static const struct file_operations ptmx_fops = {
27110 + .llseek = no_llseek,
27111 + .read = tty_read,
27112 + .write = tty_write,
27113 + .poll = tty_poll,
27114 + .unlocked_ioctl = tty_ioctl,
27115 + .compat_ioctl = tty_compat_ioctl,
27116 + .open = ptmx_open,
27117 + .release = tty_release,
27118 + .fasync = tty_fasync,
27119 +};
27120 +
27121
27122 static void __init unix98_pty_init(void)
27123 {
27124 @@ -736,9 +747,6 @@ static void __init unix98_pty_init(void)
27125 register_sysctl_table(pty_root_table);
27126
27127 /* Now create the /dev/ptmx special device */
27128 - tty_default_fops(&ptmx_fops);
27129 - ptmx_fops.open = ptmx_open;
27130 -
27131 cdev_init(&ptmx_cdev, &ptmx_fops);
27132 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
27133 register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
27134 diff -urNp linux-2.6.32.42/drivers/char/random.c linux-2.6.32.42/drivers/char/random.c
27135 --- linux-2.6.32.42/drivers/char/random.c 2011-03-27 14:31:47.000000000 -0400
27136 +++ linux-2.6.32.42/drivers/char/random.c 2011-04-17 15:56:46.000000000 -0400
27137 @@ -254,8 +254,13 @@
27138 /*
27139 * Configuration information
27140 */
27141 +#ifdef CONFIG_GRKERNSEC_RANDNET
27142 +#define INPUT_POOL_WORDS 512
27143 +#define OUTPUT_POOL_WORDS 128
27144 +#else
27145 #define INPUT_POOL_WORDS 128
27146 #define OUTPUT_POOL_WORDS 32
27147 +#endif
27148 #define SEC_XFER_SIZE 512
27149
27150 /*
27151 @@ -292,10 +297,17 @@ static struct poolinfo {
27152 int poolwords;
27153 int tap1, tap2, tap3, tap4, tap5;
27154 } poolinfo_table[] = {
27155 +#ifdef CONFIG_GRKERNSEC_RANDNET
27156 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
27157 + { 512, 411, 308, 208, 104, 1 },
27158 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
27159 + { 128, 103, 76, 51, 25, 1 },
27160 +#else
27161 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
27162 { 128, 103, 76, 51, 25, 1 },
27163 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
27164 { 32, 26, 20, 14, 7, 1 },
27165 +#endif
27166 #if 0
27167 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
27168 { 2048, 1638, 1231, 819, 411, 1 },
27169 @@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
27170 #include <linux/sysctl.h>
27171
27172 static int min_read_thresh = 8, min_write_thresh;
27173 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
27174 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
27175 static int max_write_thresh = INPUT_POOL_WORDS * 32;
27176 static char sysctl_bootid[16];
27177
27178 diff -urNp linux-2.6.32.42/drivers/char/rocket.c linux-2.6.32.42/drivers/char/rocket.c
27179 --- linux-2.6.32.42/drivers/char/rocket.c 2011-03-27 14:31:47.000000000 -0400
27180 +++ linux-2.6.32.42/drivers/char/rocket.c 2011-05-16 21:46:57.000000000 -0400
27181 @@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info
27182 struct rocket_ports tmp;
27183 int board;
27184
27185 + pax_track_stack();
27186 +
27187 if (!retports)
27188 return -EFAULT;
27189 memset(&tmp, 0, sizeof (tmp));
27190 diff -urNp linux-2.6.32.42/drivers/char/sonypi.c linux-2.6.32.42/drivers/char/sonypi.c
27191 --- linux-2.6.32.42/drivers/char/sonypi.c 2011-03-27 14:31:47.000000000 -0400
27192 +++ linux-2.6.32.42/drivers/char/sonypi.c 2011-04-17 15:56:46.000000000 -0400
27193 @@ -55,6 +55,7 @@
27194 #include <asm/uaccess.h>
27195 #include <asm/io.h>
27196 #include <asm/system.h>
27197 +#include <asm/local.h>
27198
27199 #include <linux/sonypi.h>
27200
27201 @@ -491,7 +492,7 @@ static struct sonypi_device {
27202 spinlock_t fifo_lock;
27203 wait_queue_head_t fifo_proc_list;
27204 struct fasync_struct *fifo_async;
27205 - int open_count;
27206 + local_t open_count;
27207 int model;
27208 struct input_dev *input_jog_dev;
27209 struct input_dev *input_key_dev;
27210 @@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, st
27211 static int sonypi_misc_release(struct inode *inode, struct file *file)
27212 {
27213 mutex_lock(&sonypi_device.lock);
27214 - sonypi_device.open_count--;
27215 + local_dec(&sonypi_device.open_count);
27216 mutex_unlock(&sonypi_device.lock);
27217 return 0;
27218 }
27219 @@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode
27220 lock_kernel();
27221 mutex_lock(&sonypi_device.lock);
27222 /* Flush input queue on first open */
27223 - if (!sonypi_device.open_count)
27224 + if (!local_read(&sonypi_device.open_count))
27225 kfifo_reset(sonypi_device.fifo);
27226 - sonypi_device.open_count++;
27227 + local_inc(&sonypi_device.open_count);
27228 mutex_unlock(&sonypi_device.lock);
27229 unlock_kernel();
27230 return 0;
27231 diff -urNp linux-2.6.32.42/drivers/char/stallion.c linux-2.6.32.42/drivers/char/stallion.c
27232 --- linux-2.6.32.42/drivers/char/stallion.c 2011-03-27 14:31:47.000000000 -0400
27233 +++ linux-2.6.32.42/drivers/char/stallion.c 2011-05-16 21:46:57.000000000 -0400
27234 @@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlp
27235 struct stlport stl_dummyport;
27236 struct stlport *portp;
27237
27238 + pax_track_stack();
27239 +
27240 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
27241 return -EFAULT;
27242 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
27243 diff -urNp linux-2.6.32.42/drivers/char/tpm/tpm_bios.c linux-2.6.32.42/drivers/char/tpm/tpm_bios.c
27244 --- linux-2.6.32.42/drivers/char/tpm/tpm_bios.c 2011-03-27 14:31:47.000000000 -0400
27245 +++ linux-2.6.32.42/drivers/char/tpm/tpm_bios.c 2011-04-17 15:56:46.000000000 -0400
27246 @@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start
27247 event = addr;
27248
27249 if ((event->event_type == 0 && event->event_size == 0) ||
27250 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27251 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27252 return NULL;
27253
27254 return addr;
27255 @@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(
27256 return NULL;
27257
27258 if ((event->event_type == 0 && event->event_size == 0) ||
27259 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27260 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27261 return NULL;
27262
27263 (*pos)++;
27264 @@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_
27265 int i;
27266
27267 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27268 - seq_putc(m, data[i]);
27269 + if (!seq_putc(m, data[i]))
27270 + return -EFAULT;
27271
27272 return 0;
27273 }
27274 @@ -409,6 +410,11 @@ static int read_log(struct tpm_bios_log
27275 log->bios_event_log_end = log->bios_event_log + len;
27276
27277 virt = acpi_os_map_memory(start, len);
27278 + if (!virt) {
27279 + kfree(log->bios_event_log);
27280 + log->bios_event_log = NULL;
27281 + return -EFAULT;
27282 + }
27283
27284 memcpy(log->bios_event_log, virt, len);
27285
27286 diff -urNp linux-2.6.32.42/drivers/char/tpm/tpm.c linux-2.6.32.42/drivers/char/tpm/tpm.c
27287 --- linux-2.6.32.42/drivers/char/tpm/tpm.c 2011-04-17 17:00:52.000000000 -0400
27288 +++ linux-2.6.32.42/drivers/char/tpm/tpm.c 2011-05-16 21:46:57.000000000 -0400
27289 @@ -402,7 +402,7 @@ static ssize_t tpm_transmit(struct tpm_c
27290 chip->vendor.req_complete_val)
27291 goto out_recv;
27292
27293 - if ((status == chip->vendor.req_canceled)) {
27294 + if (status == chip->vendor.req_canceled) {
27295 dev_err(chip->dev, "Operation Canceled\n");
27296 rc = -ECANCELED;
27297 goto out;
27298 @@ -821,6 +821,8 @@ ssize_t tpm_show_pubek(struct device *de
27299
27300 struct tpm_chip *chip = dev_get_drvdata(dev);
27301
27302 + pax_track_stack();
27303 +
27304 tpm_cmd.header.in = tpm_readpubek_header;
27305 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
27306 "attempting to read the PUBEK");
27307 diff -urNp linux-2.6.32.42/drivers/char/tty_io.c linux-2.6.32.42/drivers/char/tty_io.c
27308 --- linux-2.6.32.42/drivers/char/tty_io.c 2011-03-27 14:31:47.000000000 -0400
27309 +++ linux-2.6.32.42/drivers/char/tty_io.c 2011-04-17 15:56:46.000000000 -0400
27310 @@ -136,21 +136,10 @@ LIST_HEAD(tty_drivers); /* linked list
27311 DEFINE_MUTEX(tty_mutex);
27312 EXPORT_SYMBOL(tty_mutex);
27313
27314 -static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
27315 -static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
27316 ssize_t redirected_tty_write(struct file *, const char __user *,
27317 size_t, loff_t *);
27318 -static unsigned int tty_poll(struct file *, poll_table *);
27319 static int tty_open(struct inode *, struct file *);
27320 -static int tty_release(struct inode *, struct file *);
27321 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
27322 -#ifdef CONFIG_COMPAT
27323 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
27324 - unsigned long arg);
27325 -#else
27326 -#define tty_compat_ioctl NULL
27327 -#endif
27328 -static int tty_fasync(int fd, struct file *filp, int on);
27329 static void release_tty(struct tty_struct *tty, int idx);
27330 static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
27331 static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
27332 @@ -870,7 +859,7 @@ EXPORT_SYMBOL(start_tty);
27333 * read calls may be outstanding in parallel.
27334 */
27335
27336 -static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
27337 +ssize_t tty_read(struct file *file, char __user *buf, size_t count,
27338 loff_t *ppos)
27339 {
27340 int i;
27341 @@ -898,6 +887,8 @@ static ssize_t tty_read(struct file *fil
27342 return i;
27343 }
27344
27345 +EXPORT_SYMBOL(tty_read);
27346 +
27347 void tty_write_unlock(struct tty_struct *tty)
27348 {
27349 mutex_unlock(&tty->atomic_write_lock);
27350 @@ -1045,7 +1036,7 @@ void tty_write_message(struct tty_struct
27351 * write method will not be invoked in parallel for each device.
27352 */
27353
27354 -static ssize_t tty_write(struct file *file, const char __user *buf,
27355 +ssize_t tty_write(struct file *file, const char __user *buf,
27356 size_t count, loff_t *ppos)
27357 {
27358 struct tty_struct *tty;
27359 @@ -1072,6 +1063,8 @@ static ssize_t tty_write(struct file *fi
27360 return ret;
27361 }
27362
27363 +EXPORT_SYMBOL(tty_write);
27364 +
27365 ssize_t redirected_tty_write(struct file *file, const char __user *buf,
27366 size_t count, loff_t *ppos)
27367 {
27368 @@ -1867,7 +1860,7 @@ static int tty_open(struct inode *inode,
27369 * Takes bkl. See tty_release_dev
27370 */
27371
27372 -static int tty_release(struct inode *inode, struct file *filp)
27373 +int tty_release(struct inode *inode, struct file *filp)
27374 {
27375 lock_kernel();
27376 tty_release_dev(filp);
27377 @@ -1875,6 +1868,8 @@ static int tty_release(struct inode *ino
27378 return 0;
27379 }
27380
27381 +EXPORT_SYMBOL(tty_release);
27382 +
27383 /**
27384 * tty_poll - check tty status
27385 * @filp: file being polled
27386 @@ -1887,7 +1882,7 @@ static int tty_release(struct inode *ino
27387 * may be re-entered freely by other callers.
27388 */
27389
27390 -static unsigned int tty_poll(struct file *filp, poll_table *wait)
27391 +unsigned int tty_poll(struct file *filp, poll_table *wait)
27392 {
27393 struct tty_struct *tty;
27394 struct tty_ldisc *ld;
27395 @@ -1904,7 +1899,9 @@ static unsigned int tty_poll(struct file
27396 return ret;
27397 }
27398
27399 -static int tty_fasync(int fd, struct file *filp, int on)
27400 +EXPORT_SYMBOL(tty_poll);
27401 +
27402 +int tty_fasync(int fd, struct file *filp, int on)
27403 {
27404 struct tty_struct *tty;
27405 unsigned long flags;
27406 @@ -1948,6 +1945,8 @@ out:
27407 return retval;
27408 }
27409
27410 +EXPORT_SYMBOL(tty_fasync);
27411 +
27412 /**
27413 * tiocsti - fake input character
27414 * @tty: tty to fake input into
27415 @@ -2582,8 +2581,10 @@ long tty_ioctl(struct file *file, unsign
27416 return retval;
27417 }
27418
27419 +EXPORT_SYMBOL(tty_ioctl);
27420 +
27421 #ifdef CONFIG_COMPAT
27422 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
27423 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
27424 unsigned long arg)
27425 {
27426 struct inode *inode = file->f_dentry->d_inode;
27427 @@ -2607,6 +2608,8 @@ static long tty_compat_ioctl(struct file
27428
27429 return retval;
27430 }
27431 +
27432 +EXPORT_SYMBOL(tty_compat_ioctl);
27433 #endif
27434
27435 /*
27436 @@ -3050,11 +3053,6 @@ struct tty_struct *get_current_tty(void)
27437 }
27438 EXPORT_SYMBOL_GPL(get_current_tty);
27439
27440 -void tty_default_fops(struct file_operations *fops)
27441 -{
27442 - *fops = tty_fops;
27443 -}
27444 -
27445 /*
27446 * Initialize the console device. This is called *early*, so
27447 * we can't necessarily depend on lots of kernel help here.
27448 diff -urNp linux-2.6.32.42/drivers/char/tty_ldisc.c linux-2.6.32.42/drivers/char/tty_ldisc.c
27449 --- linux-2.6.32.42/drivers/char/tty_ldisc.c 2011-03-27 14:31:47.000000000 -0400
27450 +++ linux-2.6.32.42/drivers/char/tty_ldisc.c 2011-04-17 15:56:46.000000000 -0400
27451 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
27452 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
27453 struct tty_ldisc_ops *ldo = ld->ops;
27454
27455 - ldo->refcount--;
27456 + atomic_dec(&ldo->refcount);
27457 module_put(ldo->owner);
27458 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27459
27460 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
27461 spin_lock_irqsave(&tty_ldisc_lock, flags);
27462 tty_ldiscs[disc] = new_ldisc;
27463 new_ldisc->num = disc;
27464 - new_ldisc->refcount = 0;
27465 + atomic_set(&new_ldisc->refcount, 0);
27466 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27467
27468 return ret;
27469 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
27470 return -EINVAL;
27471
27472 spin_lock_irqsave(&tty_ldisc_lock, flags);
27473 - if (tty_ldiscs[disc]->refcount)
27474 + if (atomic_read(&tty_ldiscs[disc]->refcount))
27475 ret = -EBUSY;
27476 else
27477 tty_ldiscs[disc] = NULL;
27478 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
27479 if (ldops) {
27480 ret = ERR_PTR(-EAGAIN);
27481 if (try_module_get(ldops->owner)) {
27482 - ldops->refcount++;
27483 + atomic_inc(&ldops->refcount);
27484 ret = ldops;
27485 }
27486 }
27487 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
27488 unsigned long flags;
27489
27490 spin_lock_irqsave(&tty_ldisc_lock, flags);
27491 - ldops->refcount--;
27492 + atomic_dec(&ldops->refcount);
27493 module_put(ldops->owner);
27494 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27495 }
27496 diff -urNp linux-2.6.32.42/drivers/char/virtio_console.c linux-2.6.32.42/drivers/char/virtio_console.c
27497 --- linux-2.6.32.42/drivers/char/virtio_console.c 2011-03-27 14:31:47.000000000 -0400
27498 +++ linux-2.6.32.42/drivers/char/virtio_console.c 2011-04-17 15:56:46.000000000 -0400
27499 @@ -44,6 +44,7 @@ static unsigned int in_len;
27500 static char *in, *inbuf;
27501
27502 /* The operations for our console. */
27503 +/* cannot be const */
27504 static struct hv_ops virtio_cons;
27505
27506 /* The hvc device */
27507 diff -urNp linux-2.6.32.42/drivers/char/vt.c linux-2.6.32.42/drivers/char/vt.c
27508 --- linux-2.6.32.42/drivers/char/vt.c 2011-03-27 14:31:47.000000000 -0400
27509 +++ linux-2.6.32.42/drivers/char/vt.c 2011-04-17 15:56:46.000000000 -0400
27510 @@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
27511
27512 static void notify_write(struct vc_data *vc, unsigned int unicode)
27513 {
27514 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
27515 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
27516 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
27517 }
27518
27519 diff -urNp linux-2.6.32.42/drivers/char/vt_ioctl.c linux-2.6.32.42/drivers/char/vt_ioctl.c
27520 --- linux-2.6.32.42/drivers/char/vt_ioctl.c 2011-03-27 14:31:47.000000000 -0400
27521 +++ linux-2.6.32.42/drivers/char/vt_ioctl.c 2011-04-17 15:56:46.000000000 -0400
27522 @@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
27523 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
27524 return -EFAULT;
27525
27526 - if (!capable(CAP_SYS_TTY_CONFIG))
27527 - perm = 0;
27528 -
27529 switch (cmd) {
27530 case KDGKBENT:
27531 key_map = key_maps[s];
27532 @@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
27533 val = (i ? K_HOLE : K_NOSUCHMAP);
27534 return put_user(val, &user_kbe->kb_value);
27535 case KDSKBENT:
27536 + if (!capable(CAP_SYS_TTY_CONFIG))
27537 + perm = 0;
27538 +
27539 if (!perm)
27540 return -EPERM;
27541 +
27542 if (!i && v == K_NOSUCHMAP) {
27543 /* deallocate map */
27544 key_map = key_maps[s];
27545 @@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
27546 int i, j, k;
27547 int ret;
27548
27549 - if (!capable(CAP_SYS_TTY_CONFIG))
27550 - perm = 0;
27551 -
27552 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
27553 if (!kbs) {
27554 ret = -ENOMEM;
27555 @@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
27556 kfree(kbs);
27557 return ((p && *p) ? -EOVERFLOW : 0);
27558 case KDSKBSENT:
27559 + if (!capable(CAP_SYS_TTY_CONFIG))
27560 + perm = 0;
27561 +
27562 if (!perm) {
27563 ret = -EPERM;
27564 goto reterr;
27565 diff -urNp linux-2.6.32.42/drivers/cpufreq/cpufreq.c linux-2.6.32.42/drivers/cpufreq/cpufreq.c
27566 --- linux-2.6.32.42/drivers/cpufreq/cpufreq.c 2011-06-25 12:55:34.000000000 -0400
27567 +++ linux-2.6.32.42/drivers/cpufreq/cpufreq.c 2011-06-25 12:56:37.000000000 -0400
27568 @@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct
27569 complete(&policy->kobj_unregister);
27570 }
27571
27572 -static struct sysfs_ops sysfs_ops = {
27573 +static const struct sysfs_ops sysfs_ops = {
27574 .show = show,
27575 .store = store,
27576 };
27577 diff -urNp linux-2.6.32.42/drivers/cpuidle/sysfs.c linux-2.6.32.42/drivers/cpuidle/sysfs.c
27578 --- linux-2.6.32.42/drivers/cpuidle/sysfs.c 2011-03-27 14:31:47.000000000 -0400
27579 +++ linux-2.6.32.42/drivers/cpuidle/sysfs.c 2011-04-17 15:56:46.000000000 -0400
27580 @@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobj
27581 return ret;
27582 }
27583
27584 -static struct sysfs_ops cpuidle_sysfs_ops = {
27585 +static const struct sysfs_ops cpuidle_sysfs_ops = {
27586 .show = cpuidle_show,
27587 .store = cpuidle_store,
27588 };
27589 @@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct
27590 return ret;
27591 }
27592
27593 -static struct sysfs_ops cpuidle_state_sysfs_ops = {
27594 +static const struct sysfs_ops cpuidle_state_sysfs_ops = {
27595 .show = cpuidle_state_show,
27596 };
27597
27598 @@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpui
27599 .release = cpuidle_state_sysfs_release,
27600 };
27601
27602 -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
27603 +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
27604 {
27605 kobject_put(&device->kobjs[i]->kobj);
27606 wait_for_completion(&device->kobjs[i]->kobj_unregister);
27607 diff -urNp linux-2.6.32.42/drivers/crypto/hifn_795x.c linux-2.6.32.42/drivers/crypto/hifn_795x.c
27608 --- linux-2.6.32.42/drivers/crypto/hifn_795x.c 2011-03-27 14:31:47.000000000 -0400
27609 +++ linux-2.6.32.42/drivers/crypto/hifn_795x.c 2011-05-16 21:46:57.000000000 -0400
27610 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
27611 0xCA, 0x34, 0x2B, 0x2E};
27612 struct scatterlist sg;
27613
27614 + pax_track_stack();
27615 +
27616 memset(src, 0, sizeof(src));
27617 memset(ctx.key, 0, sizeof(ctx.key));
27618
27619 diff -urNp linux-2.6.32.42/drivers/crypto/padlock-aes.c linux-2.6.32.42/drivers/crypto/padlock-aes.c
27620 --- linux-2.6.32.42/drivers/crypto/padlock-aes.c 2011-03-27 14:31:47.000000000 -0400
27621 +++ linux-2.6.32.42/drivers/crypto/padlock-aes.c 2011-05-16 21:46:57.000000000 -0400
27622 @@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm
27623 struct crypto_aes_ctx gen_aes;
27624 int cpu;
27625
27626 + pax_track_stack();
27627 +
27628 if (key_len % 8) {
27629 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
27630 return -EINVAL;
27631 diff -urNp linux-2.6.32.42/drivers/dma/ioat/dma.c linux-2.6.32.42/drivers/dma/ioat/dma.c
27632 --- linux-2.6.32.42/drivers/dma/ioat/dma.c 2011-03-27 14:31:47.000000000 -0400
27633 +++ linux-2.6.32.42/drivers/dma/ioat/dma.c 2011-04-17 15:56:46.000000000 -0400
27634 @@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, str
27635 return entry->show(&chan->common, page);
27636 }
27637
27638 -struct sysfs_ops ioat_sysfs_ops = {
27639 +const struct sysfs_ops ioat_sysfs_ops = {
27640 .show = ioat_attr_show,
27641 };
27642
27643 diff -urNp linux-2.6.32.42/drivers/dma/ioat/dma.h linux-2.6.32.42/drivers/dma/ioat/dma.h
27644 --- linux-2.6.32.42/drivers/dma/ioat/dma.h 2011-03-27 14:31:47.000000000 -0400
27645 +++ linux-2.6.32.42/drivers/dma/ioat/dma.h 2011-04-17 15:56:46.000000000 -0400
27646 @@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_c
27647 unsigned long *phys_complete);
27648 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
27649 void ioat_kobject_del(struct ioatdma_device *device);
27650 -extern struct sysfs_ops ioat_sysfs_ops;
27651 +extern const struct sysfs_ops ioat_sysfs_ops;
27652 extern struct ioat_sysfs_entry ioat_version_attr;
27653 extern struct ioat_sysfs_entry ioat_cap_attr;
27654 #endif /* IOATDMA_H */
27655 diff -urNp linux-2.6.32.42/drivers/edac/edac_device_sysfs.c linux-2.6.32.42/drivers/edac/edac_device_sysfs.c
27656 --- linux-2.6.32.42/drivers/edac/edac_device_sysfs.c 2011-03-27 14:31:47.000000000 -0400
27657 +++ linux-2.6.32.42/drivers/edac/edac_device_sysfs.c 2011-04-17 15:56:46.000000000 -0400
27658 @@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(s
27659 }
27660
27661 /* edac_dev file operations for an 'ctl_info' */
27662 -static struct sysfs_ops device_ctl_info_ops = {
27663 +static const struct sysfs_ops device_ctl_info_ops = {
27664 .show = edac_dev_ctl_info_show,
27665 .store = edac_dev_ctl_info_store
27666 };
27667 @@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(s
27668 }
27669
27670 /* edac_dev file operations for an 'instance' */
27671 -static struct sysfs_ops device_instance_ops = {
27672 +static const struct sysfs_ops device_instance_ops = {
27673 .show = edac_dev_instance_show,
27674 .store = edac_dev_instance_store
27675 };
27676 @@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(stru
27677 }
27678
27679 /* edac_dev file operations for a 'block' */
27680 -static struct sysfs_ops device_block_ops = {
27681 +static const struct sysfs_ops device_block_ops = {
27682 .show = edac_dev_block_show,
27683 .store = edac_dev_block_store
27684 };
27685 diff -urNp linux-2.6.32.42/drivers/edac/edac_mc_sysfs.c linux-2.6.32.42/drivers/edac/edac_mc_sysfs.c
27686 --- linux-2.6.32.42/drivers/edac/edac_mc_sysfs.c 2011-03-27 14:31:47.000000000 -0400
27687 +++ linux-2.6.32.42/drivers/edac/edac_mc_sysfs.c 2011-04-17 15:56:46.000000000 -0400
27688 @@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kob
27689 return -EIO;
27690 }
27691
27692 -static struct sysfs_ops csrowfs_ops = {
27693 +static const struct sysfs_ops csrowfs_ops = {
27694 .show = csrowdev_show,
27695 .store = csrowdev_store
27696 };
27697 @@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobje
27698 }
27699
27700 /* Intermediate show/store table */
27701 -static struct sysfs_ops mci_ops = {
27702 +static const struct sysfs_ops mci_ops = {
27703 .show = mcidev_show,
27704 .store = mcidev_store
27705 };
27706 diff -urNp linux-2.6.32.42/drivers/edac/edac_pci_sysfs.c linux-2.6.32.42/drivers/edac/edac_pci_sysfs.c
27707 --- linux-2.6.32.42/drivers/edac/edac_pci_sysfs.c 2011-03-27 14:31:47.000000000 -0400
27708 +++ linux-2.6.32.42/drivers/edac/edac_pci_sysfs.c 2011-05-04 17:56:20.000000000 -0400
27709 @@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log
27710 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
27711 static int edac_pci_poll_msec = 1000; /* one second workq period */
27712
27713 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
27714 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
27715 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
27716 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
27717
27718 static struct kobject *edac_pci_top_main_kobj;
27719 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
27720 @@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(s
27721 }
27722
27723 /* fs_ops table */
27724 -static struct sysfs_ops pci_instance_ops = {
27725 +static const struct sysfs_ops pci_instance_ops = {
27726 .show = edac_pci_instance_show,
27727 .store = edac_pci_instance_store
27728 };
27729 @@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct
27730 return -EIO;
27731 }
27732
27733 -static struct sysfs_ops edac_pci_sysfs_ops = {
27734 +static const struct sysfs_ops edac_pci_sysfs_ops = {
27735 .show = edac_pci_dev_show,
27736 .store = edac_pci_dev_store
27737 };
27738 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str
27739 edac_printk(KERN_CRIT, EDAC_PCI,
27740 "Signaled System Error on %s\n",
27741 pci_name(dev));
27742 - atomic_inc(&pci_nonparity_count);
27743 + atomic_inc_unchecked(&pci_nonparity_count);
27744 }
27745
27746 if (status & (PCI_STATUS_PARITY)) {
27747 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str
27748 "Master Data Parity Error on %s\n",
27749 pci_name(dev));
27750
27751 - atomic_inc(&pci_parity_count);
27752 + atomic_inc_unchecked(&pci_parity_count);
27753 }
27754
27755 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27756 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str
27757 "Detected Parity Error on %s\n",
27758 pci_name(dev));
27759
27760 - atomic_inc(&pci_parity_count);
27761 + atomic_inc_unchecked(&pci_parity_count);
27762 }
27763 }
27764
27765 @@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(str
27766 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
27767 "Signaled System Error on %s\n",
27768 pci_name(dev));
27769 - atomic_inc(&pci_nonparity_count);
27770 + atomic_inc_unchecked(&pci_nonparity_count);
27771 }
27772
27773 if (status & (PCI_STATUS_PARITY)) {
27774 @@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(str
27775 "Master Data Parity Error on "
27776 "%s\n", pci_name(dev));
27777
27778 - atomic_inc(&pci_parity_count);
27779 + atomic_inc_unchecked(&pci_parity_count);
27780 }
27781
27782 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27783 @@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(str
27784 "Detected Parity Error on %s\n",
27785 pci_name(dev));
27786
27787 - atomic_inc(&pci_parity_count);
27788 + atomic_inc_unchecked(&pci_parity_count);
27789 }
27790 }
27791 }
27792 @@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
27793 if (!check_pci_errors)
27794 return;
27795
27796 - before_count = atomic_read(&pci_parity_count);
27797 + before_count = atomic_read_unchecked(&pci_parity_count);
27798
27799 /* scan all PCI devices looking for a Parity Error on devices and
27800 * bridges.
27801 @@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
27802 /* Only if operator has selected panic on PCI Error */
27803 if (edac_pci_get_panic_on_pe()) {
27804 /* If the count is different 'after' from 'before' */
27805 - if (before_count != atomic_read(&pci_parity_count))
27806 + if (before_count != atomic_read_unchecked(&pci_parity_count))
27807 panic("EDAC: PCI Parity Error");
27808 }
27809 }
27810 diff -urNp linux-2.6.32.42/drivers/firewire/core-cdev.c linux-2.6.32.42/drivers/firewire/core-cdev.c
27811 --- linux-2.6.32.42/drivers/firewire/core-cdev.c 2011-03-27 14:31:47.000000000 -0400
27812 +++ linux-2.6.32.42/drivers/firewire/core-cdev.c 2011-04-17 15:56:46.000000000 -0400
27813 @@ -1141,8 +1141,7 @@ static int init_iso_resource(struct clie
27814 int ret;
27815
27816 if ((request->channels == 0 && request->bandwidth == 0) ||
27817 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
27818 - request->bandwidth < 0)
27819 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
27820 return -EINVAL;
27821
27822 r = kmalloc(sizeof(*r), GFP_KERNEL);
27823 diff -urNp linux-2.6.32.42/drivers/firewire/core-transaction.c linux-2.6.32.42/drivers/firewire/core-transaction.c
27824 --- linux-2.6.32.42/drivers/firewire/core-transaction.c 2011-03-27 14:31:47.000000000 -0400
27825 +++ linux-2.6.32.42/drivers/firewire/core-transaction.c 2011-05-16 21:46:57.000000000 -0400
27826 @@ -36,6 +36,7 @@
27827 #include <linux/string.h>
27828 #include <linux/timer.h>
27829 #include <linux/types.h>
27830 +#include <linux/sched.h>
27831
27832 #include <asm/byteorder.h>
27833
27834 @@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *c
27835 struct transaction_callback_data d;
27836 struct fw_transaction t;
27837
27838 + pax_track_stack();
27839 +
27840 init_completion(&d.done);
27841 d.payload = payload;
27842 fw_send_request(card, &t, tcode, destination_id, generation, speed,
27843 diff -urNp linux-2.6.32.42/drivers/firmware/dmi_scan.c linux-2.6.32.42/drivers/firmware/dmi_scan.c
27844 --- linux-2.6.32.42/drivers/firmware/dmi_scan.c 2011-03-27 14:31:47.000000000 -0400
27845 +++ linux-2.6.32.42/drivers/firmware/dmi_scan.c 2011-04-17 15:56:46.000000000 -0400
27846 @@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
27847 }
27848 }
27849 else {
27850 - /*
27851 - * no iounmap() for that ioremap(); it would be a no-op, but
27852 - * it's so early in setup that sucker gets confused into doing
27853 - * what it shouldn't if we actually call it.
27854 - */
27855 p = dmi_ioremap(0xF0000, 0x10000);
27856 if (p == NULL)
27857 goto error;
27858 diff -urNp linux-2.6.32.42/drivers/firmware/edd.c linux-2.6.32.42/drivers/firmware/edd.c
27859 --- linux-2.6.32.42/drivers/firmware/edd.c 2011-03-27 14:31:47.000000000 -0400
27860 +++ linux-2.6.32.42/drivers/firmware/edd.c 2011-04-17 15:56:46.000000000 -0400
27861 @@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, str
27862 return ret;
27863 }
27864
27865 -static struct sysfs_ops edd_attr_ops = {
27866 +static const struct sysfs_ops edd_attr_ops = {
27867 .show = edd_attr_show,
27868 };
27869
27870 diff -urNp linux-2.6.32.42/drivers/firmware/efivars.c linux-2.6.32.42/drivers/firmware/efivars.c
27871 --- linux-2.6.32.42/drivers/firmware/efivars.c 2011-03-27 14:31:47.000000000 -0400
27872 +++ linux-2.6.32.42/drivers/firmware/efivars.c 2011-04-17 15:56:46.000000000 -0400
27873 @@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct
27874 return ret;
27875 }
27876
27877 -static struct sysfs_ops efivar_attr_ops = {
27878 +static const struct sysfs_ops efivar_attr_ops = {
27879 .show = efivar_attr_show,
27880 .store = efivar_attr_store,
27881 };
27882 diff -urNp linux-2.6.32.42/drivers/firmware/iscsi_ibft.c linux-2.6.32.42/drivers/firmware/iscsi_ibft.c
27883 --- linux-2.6.32.42/drivers/firmware/iscsi_ibft.c 2011-03-27 14:31:47.000000000 -0400
27884 +++ linux-2.6.32.42/drivers/firmware/iscsi_ibft.c 2011-04-17 15:56:46.000000000 -0400
27885 @@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struc
27886 return ret;
27887 }
27888
27889 -static struct sysfs_ops ibft_attr_ops = {
27890 +static const struct sysfs_ops ibft_attr_ops = {
27891 .show = ibft_show_attribute,
27892 };
27893
27894 diff -urNp linux-2.6.32.42/drivers/firmware/memmap.c linux-2.6.32.42/drivers/firmware/memmap.c
27895 --- linux-2.6.32.42/drivers/firmware/memmap.c 2011-03-27 14:31:47.000000000 -0400
27896 +++ linux-2.6.32.42/drivers/firmware/memmap.c 2011-04-17 15:56:46.000000000 -0400
27897 @@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
27898 NULL
27899 };
27900
27901 -static struct sysfs_ops memmap_attr_ops = {
27902 +static const struct sysfs_ops memmap_attr_ops = {
27903 .show = memmap_attr_show,
27904 };
27905
27906 diff -urNp linux-2.6.32.42/drivers/gpio/vr41xx_giu.c linux-2.6.32.42/drivers/gpio/vr41xx_giu.c
27907 --- linux-2.6.32.42/drivers/gpio/vr41xx_giu.c 2011-03-27 14:31:47.000000000 -0400
27908 +++ linux-2.6.32.42/drivers/gpio/vr41xx_giu.c 2011-05-04 17:56:28.000000000 -0400
27909 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
27910 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
27911 maskl, pendl, maskh, pendh);
27912
27913 - atomic_inc(&irq_err_count);
27914 + atomic_inc_unchecked(&irq_err_count);
27915
27916 return -EINVAL;
27917 }
27918 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.32.42/drivers/gpu/drm/drm_crtc_helper.c
27919 --- linux-2.6.32.42/drivers/gpu/drm/drm_crtc_helper.c 2011-03-27 14:31:47.000000000 -0400
27920 +++ linux-2.6.32.42/drivers/gpu/drm/drm_crtc_helper.c 2011-05-16 21:46:57.000000000 -0400
27921 @@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct d
27922 struct drm_crtc *tmp;
27923 int crtc_mask = 1;
27924
27925 - WARN(!crtc, "checking null crtc?");
27926 + BUG_ON(!crtc);
27927
27928 dev = crtc->dev;
27929
27930 @@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm
27931
27932 adjusted_mode = drm_mode_duplicate(dev, mode);
27933
27934 + pax_track_stack();
27935 +
27936 crtc->enabled = drm_helper_crtc_in_use(crtc);
27937
27938 if (!crtc->enabled)
27939 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_drv.c linux-2.6.32.42/drivers/gpu/drm/drm_drv.c
27940 --- linux-2.6.32.42/drivers/gpu/drm/drm_drv.c 2011-03-27 14:31:47.000000000 -0400
27941 +++ linux-2.6.32.42/drivers/gpu/drm/drm_drv.c 2011-04-17 15:56:46.000000000 -0400
27942 @@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struc
27943 char *kdata = NULL;
27944
27945 atomic_inc(&dev->ioctl_count);
27946 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
27947 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
27948 ++file_priv->ioctl_count;
27949
27950 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
27951 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_fops.c linux-2.6.32.42/drivers/gpu/drm/drm_fops.c
27952 --- linux-2.6.32.42/drivers/gpu/drm/drm_fops.c 2011-03-27 14:31:47.000000000 -0400
27953 +++ linux-2.6.32.42/drivers/gpu/drm/drm_fops.c 2011-04-17 15:56:46.000000000 -0400
27954 @@ -66,7 +66,7 @@ static int drm_setup(struct drm_device *
27955 }
27956
27957 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
27958 - atomic_set(&dev->counts[i], 0);
27959 + atomic_set_unchecked(&dev->counts[i], 0);
27960
27961 dev->sigdata.lock = NULL;
27962
27963 @@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
27964
27965 retcode = drm_open_helper(inode, filp, dev);
27966 if (!retcode) {
27967 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
27968 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
27969 spin_lock(&dev->count_lock);
27970 - if (!dev->open_count++) {
27971 + if (local_inc_return(&dev->open_count) == 1) {
27972 spin_unlock(&dev->count_lock);
27973 retcode = drm_setup(dev);
27974 goto out;
27975 @@ -435,7 +435,7 @@ int drm_release(struct inode *inode, str
27976
27977 lock_kernel();
27978
27979 - DRM_DEBUG("open_count = %d\n", dev->open_count);
27980 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
27981
27982 if (dev->driver->preclose)
27983 dev->driver->preclose(dev, file_priv);
27984 @@ -447,7 +447,7 @@ int drm_release(struct inode *inode, str
27985 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
27986 task_pid_nr(current),
27987 (long)old_encode_dev(file_priv->minor->device),
27988 - dev->open_count);
27989 + local_read(&dev->open_count));
27990
27991 /* if the master has gone away we can't do anything with the lock */
27992 if (file_priv->minor->master)
27993 @@ -524,9 +524,9 @@ int drm_release(struct inode *inode, str
27994 * End inline drm_release
27995 */
27996
27997 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
27998 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
27999 spin_lock(&dev->count_lock);
28000 - if (!--dev->open_count) {
28001 + if (local_dec_and_test(&dev->open_count)) {
28002 if (atomic_read(&dev->ioctl_count)) {
28003 DRM_ERROR("Device busy: %d\n",
28004 atomic_read(&dev->ioctl_count));
28005 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_gem.c linux-2.6.32.42/drivers/gpu/drm/drm_gem.c
28006 --- linux-2.6.32.42/drivers/gpu/drm/drm_gem.c 2011-03-27 14:31:47.000000000 -0400
28007 +++ linux-2.6.32.42/drivers/gpu/drm/drm_gem.c 2011-04-17 15:56:46.000000000 -0400
28008 @@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
28009 spin_lock_init(&dev->object_name_lock);
28010 idr_init(&dev->object_name_idr);
28011 atomic_set(&dev->object_count, 0);
28012 - atomic_set(&dev->object_memory, 0);
28013 + atomic_set_unchecked(&dev->object_memory, 0);
28014 atomic_set(&dev->pin_count, 0);
28015 - atomic_set(&dev->pin_memory, 0);
28016 + atomic_set_unchecked(&dev->pin_memory, 0);
28017 atomic_set(&dev->gtt_count, 0);
28018 - atomic_set(&dev->gtt_memory, 0);
28019 + atomic_set_unchecked(&dev->gtt_memory, 0);
28020
28021 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
28022 if (!mm) {
28023 @@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *
28024 goto fput;
28025 }
28026 atomic_inc(&dev->object_count);
28027 - atomic_add(obj->size, &dev->object_memory);
28028 + atomic_add_unchecked(obj->size, &dev->object_memory);
28029 return obj;
28030 fput:
28031 fput(obj->filp);
28032 @@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
28033
28034 fput(obj->filp);
28035 atomic_dec(&dev->object_count);
28036 - atomic_sub(obj->size, &dev->object_memory);
28037 + atomic_sub_unchecked(obj->size, &dev->object_memory);
28038 kfree(obj);
28039 }
28040 EXPORT_SYMBOL(drm_gem_object_free);
28041 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_info.c linux-2.6.32.42/drivers/gpu/drm/drm_info.c
28042 --- linux-2.6.32.42/drivers/gpu/drm/drm_info.c 2011-03-27 14:31:47.000000000 -0400
28043 +++ linux-2.6.32.42/drivers/gpu/drm/drm_info.c 2011-04-17 15:56:46.000000000 -0400
28044 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
28045 struct drm_local_map *map;
28046 struct drm_map_list *r_list;
28047
28048 - /* Hardcoded from _DRM_FRAME_BUFFER,
28049 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
28050 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
28051 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
28052 + static const char * const types[] = {
28053 + [_DRM_FRAME_BUFFER] = "FB",
28054 + [_DRM_REGISTERS] = "REG",
28055 + [_DRM_SHM] = "SHM",
28056 + [_DRM_AGP] = "AGP",
28057 + [_DRM_SCATTER_GATHER] = "SG",
28058 + [_DRM_CONSISTENT] = "PCI",
28059 + [_DRM_GEM] = "GEM" };
28060 const char *type;
28061 int i;
28062
28063 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
28064 map = r_list->map;
28065 if (!map)
28066 continue;
28067 - if (map->type < 0 || map->type > 5)
28068 + if (map->type >= ARRAY_SIZE(types))
28069 type = "??";
28070 else
28071 type = types[map->type];
28072 @@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file
28073 struct drm_device *dev = node->minor->dev;
28074
28075 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
28076 - seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
28077 + seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
28078 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
28079 - seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
28080 - seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
28081 + seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
28082 + seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
28083 seq_printf(m, "%d gtt total\n", dev->gtt_total);
28084 return 0;
28085 }
28086 @@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, voi
28087 mutex_lock(&dev->struct_mutex);
28088 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
28089 atomic_read(&dev->vma_count),
28090 +#ifdef CONFIG_GRKERNSEC_HIDESYM
28091 + NULL, 0);
28092 +#else
28093 high_memory, (u64)virt_to_phys(high_memory));
28094 +#endif
28095
28096 list_for_each_entry(pt, &dev->vmalist, head) {
28097 vma = pt->vma;
28098 @@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, voi
28099 continue;
28100 seq_printf(m,
28101 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
28102 - pt->pid, vma->vm_start, vma->vm_end,
28103 + pt->pid,
28104 +#ifdef CONFIG_GRKERNSEC_HIDESYM
28105 + 0, 0,
28106 +#else
28107 + vma->vm_start, vma->vm_end,
28108 +#endif
28109 vma->vm_flags & VM_READ ? 'r' : '-',
28110 vma->vm_flags & VM_WRITE ? 'w' : '-',
28111 vma->vm_flags & VM_EXEC ? 'x' : '-',
28112 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
28113 vma->vm_flags & VM_LOCKED ? 'l' : '-',
28114 vma->vm_flags & VM_IO ? 'i' : '-',
28115 +#ifdef CONFIG_GRKERNSEC_HIDESYM
28116 + 0);
28117 +#else
28118 vma->vm_pgoff);
28119 +#endif
28120
28121 #if defined(__i386__)
28122 pgprot = pgprot_val(vma->vm_page_prot);
28123 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_ioctl.c linux-2.6.32.42/drivers/gpu/drm/drm_ioctl.c
28124 --- linux-2.6.32.42/drivers/gpu/drm/drm_ioctl.c 2011-03-27 14:31:47.000000000 -0400
28125 +++ linux-2.6.32.42/drivers/gpu/drm/drm_ioctl.c 2011-04-17 15:56:46.000000000 -0400
28126 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
28127 stats->data[i].value =
28128 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
28129 else
28130 - stats->data[i].value = atomic_read(&dev->counts[i]);
28131 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
28132 stats->data[i].type = dev->types[i];
28133 }
28134
28135 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_lock.c linux-2.6.32.42/drivers/gpu/drm/drm_lock.c
28136 --- linux-2.6.32.42/drivers/gpu/drm/drm_lock.c 2011-03-27 14:31:47.000000000 -0400
28137 +++ linux-2.6.32.42/drivers/gpu/drm/drm_lock.c 2011-04-17 15:56:46.000000000 -0400
28138 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
28139 if (drm_lock_take(&master->lock, lock->context)) {
28140 master->lock.file_priv = file_priv;
28141 master->lock.lock_time = jiffies;
28142 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
28143 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
28144 break; /* Got lock */
28145 }
28146
28147 @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
28148 return -EINVAL;
28149 }
28150
28151 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
28152 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
28153
28154 /* kernel_context_switch isn't used by any of the x86 drm
28155 * modules but is required by the Sparc driver.
28156 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i810/i810_dma.c linux-2.6.32.42/drivers/gpu/drm/i810/i810_dma.c
28157 --- linux-2.6.32.42/drivers/gpu/drm/i810/i810_dma.c 2011-03-27 14:31:47.000000000 -0400
28158 +++ linux-2.6.32.42/drivers/gpu/drm/i810/i810_dma.c 2011-04-17 15:56:46.000000000 -0400
28159 @@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de
28160 dma->buflist[vertex->idx],
28161 vertex->discard, vertex->used);
28162
28163 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28164 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28165 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28166 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28167 sarea_priv->last_enqueue = dev_priv->counter - 1;
28168 sarea_priv->last_dispatch = (int)hw_status[5];
28169
28170 @@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device
28171 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
28172 mc->last_render);
28173
28174 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28175 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28176 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28177 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28178 sarea_priv->last_enqueue = dev_priv->counter - 1;
28179 sarea_priv->last_dispatch = (int)hw_status[5];
28180
28181 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i810/i810_drv.h linux-2.6.32.42/drivers/gpu/drm/i810/i810_drv.h
28182 --- linux-2.6.32.42/drivers/gpu/drm/i810/i810_drv.h 2011-03-27 14:31:47.000000000 -0400
28183 +++ linux-2.6.32.42/drivers/gpu/drm/i810/i810_drv.h 2011-05-04 17:56:28.000000000 -0400
28184 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
28185 int page_flipping;
28186
28187 wait_queue_head_t irq_queue;
28188 - atomic_t irq_received;
28189 - atomic_t irq_emitted;
28190 + atomic_unchecked_t irq_received;
28191 + atomic_unchecked_t irq_emitted;
28192
28193 int front_offset;
28194 } drm_i810_private_t;
28195 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i830/i830_drv.h linux-2.6.32.42/drivers/gpu/drm/i830/i830_drv.h
28196 --- linux-2.6.32.42/drivers/gpu/drm/i830/i830_drv.h 2011-03-27 14:31:47.000000000 -0400
28197 +++ linux-2.6.32.42/drivers/gpu/drm/i830/i830_drv.h 2011-05-04 17:56:28.000000000 -0400
28198 @@ -115,8 +115,8 @@ typedef struct drm_i830_private {
28199 int page_flipping;
28200
28201 wait_queue_head_t irq_queue;
28202 - atomic_t irq_received;
28203 - atomic_t irq_emitted;
28204 + atomic_unchecked_t irq_received;
28205 + atomic_unchecked_t irq_emitted;
28206
28207 int use_mi_batchbuffer_start;
28208
28209 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i830/i830_irq.c linux-2.6.32.42/drivers/gpu/drm/i830/i830_irq.c
28210 --- linux-2.6.32.42/drivers/gpu/drm/i830/i830_irq.c 2011-03-27 14:31:47.000000000 -0400
28211 +++ linux-2.6.32.42/drivers/gpu/drm/i830/i830_irq.c 2011-05-04 17:56:28.000000000 -0400
28212 @@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_
28213
28214 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
28215
28216 - atomic_inc(&dev_priv->irq_received);
28217 + atomic_inc_unchecked(&dev_priv->irq_received);
28218 wake_up_interruptible(&dev_priv->irq_queue);
28219
28220 return IRQ_HANDLED;
28221 @@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_devi
28222
28223 DRM_DEBUG("%s\n", __func__);
28224
28225 - atomic_inc(&dev_priv->irq_emitted);
28226 + atomic_inc_unchecked(&dev_priv->irq_emitted);
28227
28228 BEGIN_LP_RING(2);
28229 OUT_RING(0);
28230 OUT_RING(GFX_OP_USER_INTERRUPT);
28231 ADVANCE_LP_RING();
28232
28233 - return atomic_read(&dev_priv->irq_emitted);
28234 + return atomic_read_unchecked(&dev_priv->irq_emitted);
28235 }
28236
28237 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
28238 @@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_devi
28239
28240 DRM_DEBUG("%s\n", __func__);
28241
28242 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
28243 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
28244 return 0;
28245
28246 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
28247 @@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_devi
28248
28249 for (;;) {
28250 __set_current_state(TASK_INTERRUPTIBLE);
28251 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
28252 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
28253 break;
28254 if ((signed)(end - jiffies) <= 0) {
28255 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
28256 @@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct d
28257 I830_WRITE16(I830REG_HWSTAM, 0xffff);
28258 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
28259 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
28260 - atomic_set(&dev_priv->irq_received, 0);
28261 - atomic_set(&dev_priv->irq_emitted, 0);
28262 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28263 + atomic_set_unchecked(&dev_priv->irq_emitted, 0);
28264 init_waitqueue_head(&dev_priv->irq_queue);
28265 }
28266
28267 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7017.c
28268 --- linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7017.c 2011-03-27 14:31:47.000000000 -0400
28269 +++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7017.c 2011-04-17 15:56:46.000000000 -0400
28270 @@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_
28271 }
28272 }
28273
28274 -struct intel_dvo_dev_ops ch7017_ops = {
28275 +const struct intel_dvo_dev_ops ch7017_ops = {
28276 .init = ch7017_init,
28277 .detect = ch7017_detect,
28278 .mode_valid = ch7017_mode_valid,
28279 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7xxx.c
28280 --- linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-03-27 14:31:47.000000000 -0400
28281 +++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-04-17 15:56:46.000000000 -0400
28282 @@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_
28283 }
28284 }
28285
28286 -struct intel_dvo_dev_ops ch7xxx_ops = {
28287 +const struct intel_dvo_dev_ops ch7xxx_ops = {
28288 .init = ch7xxx_init,
28289 .detect = ch7xxx_detect,
28290 .mode_valid = ch7xxx_mode_valid,
28291 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo.h linux-2.6.32.42/drivers/gpu/drm/i915/dvo.h
28292 --- linux-2.6.32.42/drivers/gpu/drm/i915/dvo.h 2011-03-27 14:31:47.000000000 -0400
28293 +++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo.h 2011-04-17 15:56:46.000000000 -0400
28294 @@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
28295 *
28296 * \return singly-linked list of modes or NULL if no modes found.
28297 */
28298 - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
28299 + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
28300
28301 /**
28302 * Clean up driver-specific bits of the output
28303 */
28304 - void (*destroy) (struct intel_dvo_device *dvo);
28305 + void (* const destroy) (struct intel_dvo_device *dvo);
28306
28307 /**
28308 * Debugging hook to dump device registers to log file
28309 */
28310 - void (*dump_regs)(struct intel_dvo_device *dvo);
28311 + void (* const dump_regs)(struct intel_dvo_device *dvo);
28312 };
28313
28314 -extern struct intel_dvo_dev_ops sil164_ops;
28315 -extern struct intel_dvo_dev_ops ch7xxx_ops;
28316 -extern struct intel_dvo_dev_ops ivch_ops;
28317 -extern struct intel_dvo_dev_ops tfp410_ops;
28318 -extern struct intel_dvo_dev_ops ch7017_ops;
28319 +extern const struct intel_dvo_dev_ops sil164_ops;
28320 +extern const struct intel_dvo_dev_ops ch7xxx_ops;
28321 +extern const struct intel_dvo_dev_ops ivch_ops;
28322 +extern const struct intel_dvo_dev_ops tfp410_ops;
28323 +extern const struct intel_dvo_dev_ops ch7017_ops;
28324
28325 #endif /* _INTEL_DVO_H */
28326 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ivch.c
28327 --- linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ivch.c 2011-03-27 14:31:47.000000000 -0400
28328 +++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ivch.c 2011-04-17 15:56:46.000000000 -0400
28329 @@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dv
28330 }
28331 }
28332
28333 -struct intel_dvo_dev_ops ivch_ops= {
28334 +const struct intel_dvo_dev_ops ivch_ops= {
28335 .init = ivch_init,
28336 .dpms = ivch_dpms,
28337 .save = ivch_save,
28338 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.32.42/drivers/gpu/drm/i915/dvo_sil164.c
28339 --- linux-2.6.32.42/drivers/gpu/drm/i915/dvo_sil164.c 2011-03-27 14:31:47.000000000 -0400
28340 +++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo_sil164.c 2011-04-17 15:56:46.000000000 -0400
28341 @@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
28342 }
28343 }
28344
28345 -struct intel_dvo_dev_ops sil164_ops = {
28346 +const struct intel_dvo_dev_ops sil164_ops = {
28347 .init = sil164_init,
28348 .detect = sil164_detect,
28349 .mode_valid = sil164_mode_valid,
28350 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.32.42/drivers/gpu/drm/i915/dvo_tfp410.c
28351 --- linux-2.6.32.42/drivers/gpu/drm/i915/dvo_tfp410.c 2011-03-27 14:31:47.000000000 -0400
28352 +++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo_tfp410.c 2011-04-17 15:56:46.000000000 -0400
28353 @@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_
28354 }
28355 }
28356
28357 -struct intel_dvo_dev_ops tfp410_ops = {
28358 +const struct intel_dvo_dev_ops tfp410_ops = {
28359 .init = tfp410_init,
28360 .detect = tfp410_detect,
28361 .mode_valid = tfp410_mode_valid,
28362 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.32.42/drivers/gpu/drm/i915/i915_debugfs.c
28363 --- linux-2.6.32.42/drivers/gpu/drm/i915/i915_debugfs.c 2011-03-27 14:31:47.000000000 -0400
28364 +++ linux-2.6.32.42/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-04 17:56:28.000000000 -0400
28365 @@ -192,7 +192,7 @@ static int i915_interrupt_info(struct se
28366 I915_READ(GTIMR));
28367 }
28368 seq_printf(m, "Interrupts received: %d\n",
28369 - atomic_read(&dev_priv->irq_received));
28370 + atomic_read_unchecked(&dev_priv->irq_received));
28371 if (dev_priv->hw_status_page != NULL) {
28372 seq_printf(m, "Current sequence: %d\n",
28373 i915_get_gem_seqno(dev));
28374 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.c linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.c
28375 --- linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.c 2011-03-27 14:31:47.000000000 -0400
28376 +++ linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.c 2011-04-17 15:56:46.000000000 -0400
28377 @@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
28378 return i915_resume(dev);
28379 }
28380
28381 -static struct vm_operations_struct i915_gem_vm_ops = {
28382 +static const struct vm_operations_struct i915_gem_vm_ops = {
28383 .fault = i915_gem_fault,
28384 .open = drm_gem_vm_open,
28385 .close = drm_gem_vm_close,
28386 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.h linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.h
28387 --- linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.h 2011-03-27 14:31:47.000000000 -0400
28388 +++ linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.h 2011-05-04 17:56:28.000000000 -0400
28389 @@ -197,7 +197,7 @@ typedef struct drm_i915_private {
28390 int page_flipping;
28391
28392 wait_queue_head_t irq_queue;
28393 - atomic_t irq_received;
28394 + atomic_unchecked_t irq_received;
28395 /** Protects user_irq_refcount and irq_mask_reg */
28396 spinlock_t user_irq_lock;
28397 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
28398 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/i915_gem.c linux-2.6.32.42/drivers/gpu/drm/i915/i915_gem.c
28399 --- linux-2.6.32.42/drivers/gpu/drm/i915/i915_gem.c 2011-03-27 14:31:47.000000000 -0400
28400 +++ linux-2.6.32.42/drivers/gpu/drm/i915/i915_gem.c 2011-04-17 15:56:46.000000000 -0400
28401 @@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_d
28402
28403 args->aper_size = dev->gtt_total;
28404 args->aper_available_size = (args->aper_size -
28405 - atomic_read(&dev->pin_memory));
28406 + atomic_read_unchecked(&dev->pin_memory));
28407
28408 return 0;
28409 }
28410 @@ -492,6 +492,11 @@ i915_gem_pread_ioctl(struct drm_device *
28411 return -EINVAL;
28412 }
28413
28414 + if (!access_ok(VERIFY_WRITE, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
28415 + drm_gem_object_unreference(obj);
28416 + return -EFAULT;
28417 + }
28418 +
28419 if (i915_gem_object_needs_bit17_swizzle(obj)) {
28420 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
28421 } else {
28422 @@ -965,6 +970,11 @@ i915_gem_pwrite_ioctl(struct drm_device
28423 return -EINVAL;
28424 }
28425
28426 + if (!access_ok(VERIFY_READ, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
28427 + drm_gem_object_unreference(obj);
28428 + return -EFAULT;
28429 + }
28430 +
28431 /* We can only do the GTT pwrite on untiled buffers, as otherwise
28432 * it would end up going through the fenced access, and we'll get
28433 * different detiling behavior between reading and writing.
28434 @@ -2054,7 +2064,7 @@ i915_gem_object_unbind(struct drm_gem_ob
28435
28436 if (obj_priv->gtt_space) {
28437 atomic_dec(&dev->gtt_count);
28438 - atomic_sub(obj->size, &dev->gtt_memory);
28439 + atomic_sub_unchecked(obj->size, &dev->gtt_memory);
28440
28441 drm_mm_put_block(obj_priv->gtt_space);
28442 obj_priv->gtt_space = NULL;
28443 @@ -2697,7 +2707,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
28444 goto search_free;
28445 }
28446 atomic_inc(&dev->gtt_count);
28447 - atomic_add(obj->size, &dev->gtt_memory);
28448 + atomic_add_unchecked(obj->size, &dev->gtt_memory);
28449
28450 /* Assert that the object is not currently in any GPU domain. As it
28451 * wasn't in the GTT, there shouldn't be any way it could have been in
28452 @@ -3751,9 +3761,9 @@ i915_gem_execbuffer(struct drm_device *d
28453 "%d/%d gtt bytes\n",
28454 atomic_read(&dev->object_count),
28455 atomic_read(&dev->pin_count),
28456 - atomic_read(&dev->object_memory),
28457 - atomic_read(&dev->pin_memory),
28458 - atomic_read(&dev->gtt_memory),
28459 + atomic_read_unchecked(&dev->object_memory),
28460 + atomic_read_unchecked(&dev->pin_memory),
28461 + atomic_read_unchecked(&dev->gtt_memory),
28462 dev->gtt_total);
28463 }
28464 goto err;
28465 @@ -3985,7 +3995,7 @@ i915_gem_object_pin(struct drm_gem_objec
28466 */
28467 if (obj_priv->pin_count == 1) {
28468 atomic_inc(&dev->pin_count);
28469 - atomic_add(obj->size, &dev->pin_memory);
28470 + atomic_add_unchecked(obj->size, &dev->pin_memory);
28471 if (!obj_priv->active &&
28472 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
28473 !list_empty(&obj_priv->list))
28474 @@ -4018,7 +4028,7 @@ i915_gem_object_unpin(struct drm_gem_obj
28475 list_move_tail(&obj_priv->list,
28476 &dev_priv->mm.inactive_list);
28477 atomic_dec(&dev->pin_count);
28478 - atomic_sub(obj->size, &dev->pin_memory);
28479 + atomic_sub_unchecked(obj->size, &dev->pin_memory);
28480 }
28481 i915_verify_inactive(dev, __FILE__, __LINE__);
28482 }
28483 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/i915_irq.c linux-2.6.32.42/drivers/gpu/drm/i915/i915_irq.c
28484 --- linux-2.6.32.42/drivers/gpu/drm/i915/i915_irq.c 2011-03-27 14:31:47.000000000 -0400
28485 +++ linux-2.6.32.42/drivers/gpu/drm/i915/i915_irq.c 2011-05-04 17:56:28.000000000 -0400
28486 @@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
28487 int irq_received;
28488 int ret = IRQ_NONE;
28489
28490 - atomic_inc(&dev_priv->irq_received);
28491 + atomic_inc_unchecked(&dev_priv->irq_received);
28492
28493 if (IS_IGDNG(dev))
28494 return igdng_irq_handler(dev);
28495 @@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct d
28496 {
28497 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28498
28499 - atomic_set(&dev_priv->irq_received, 0);
28500 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28501
28502 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28503 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28504 diff -urNp linux-2.6.32.42/drivers/gpu/drm/mga/mga_drv.h linux-2.6.32.42/drivers/gpu/drm/mga/mga_drv.h
28505 --- linux-2.6.32.42/drivers/gpu/drm/mga/mga_drv.h 2011-03-27 14:31:47.000000000 -0400
28506 +++ linux-2.6.32.42/drivers/gpu/drm/mga/mga_drv.h 2011-05-04 17:56:28.000000000 -0400
28507 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28508 u32 clear_cmd;
28509 u32 maccess;
28510
28511 - atomic_t vbl_received; /**< Number of vblanks received. */
28512 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28513 wait_queue_head_t fence_queue;
28514 - atomic_t last_fence_retired;
28515 + atomic_unchecked_t last_fence_retired;
28516 u32 next_fence_to_post;
28517
28518 unsigned int fb_cpp;
28519 diff -urNp linux-2.6.32.42/drivers/gpu/drm/mga/mga_irq.c linux-2.6.32.42/drivers/gpu/drm/mga/mga_irq.c
28520 --- linux-2.6.32.42/drivers/gpu/drm/mga/mga_irq.c 2011-03-27 14:31:47.000000000 -0400
28521 +++ linux-2.6.32.42/drivers/gpu/drm/mga/mga_irq.c 2011-05-04 17:56:28.000000000 -0400
28522 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
28523 if (crtc != 0)
28524 return 0;
28525
28526 - return atomic_read(&dev_priv->vbl_received);
28527 + return atomic_read_unchecked(&dev_priv->vbl_received);
28528 }
28529
28530
28531 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
28532 /* VBLANK interrupt */
28533 if (status & MGA_VLINEPEN) {
28534 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28535 - atomic_inc(&dev_priv->vbl_received);
28536 + atomic_inc_unchecked(&dev_priv->vbl_received);
28537 drm_handle_vblank(dev, 0);
28538 handled = 1;
28539 }
28540 @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
28541 MGA_WRITE(MGA_PRIMEND, prim_end);
28542 }
28543
28544 - atomic_inc(&dev_priv->last_fence_retired);
28545 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
28546 DRM_WAKEUP(&dev_priv->fence_queue);
28547 handled = 1;
28548 }
28549 @@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_dev
28550 * using fences.
28551 */
28552 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28553 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28554 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28555 - *sequence) <= (1 << 23)));
28556
28557 *sequence = cur_fence;
28558 diff -urNp linux-2.6.32.42/drivers/gpu/drm/r128/r128_cce.c linux-2.6.32.42/drivers/gpu/drm/r128/r128_cce.c
28559 --- linux-2.6.32.42/drivers/gpu/drm/r128/r128_cce.c 2011-03-27 14:31:47.000000000 -0400
28560 +++ linux-2.6.32.42/drivers/gpu/drm/r128/r128_cce.c 2011-05-04 17:56:28.000000000 -0400
28561 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
28562
28563 /* GH: Simple idle check.
28564 */
28565 - atomic_set(&dev_priv->idle_count, 0);
28566 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28567
28568 /* We don't support anything other than bus-mastering ring mode,
28569 * but the ring can be in either AGP or PCI space for the ring
28570 diff -urNp linux-2.6.32.42/drivers/gpu/drm/r128/r128_drv.h linux-2.6.32.42/drivers/gpu/drm/r128/r128_drv.h
28571 --- linux-2.6.32.42/drivers/gpu/drm/r128/r128_drv.h 2011-03-27 14:31:47.000000000 -0400
28572 +++ linux-2.6.32.42/drivers/gpu/drm/r128/r128_drv.h 2011-05-04 17:56:28.000000000 -0400
28573 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28574 int is_pci;
28575 unsigned long cce_buffers_offset;
28576
28577 - atomic_t idle_count;
28578 + atomic_unchecked_t idle_count;
28579
28580 int page_flipping;
28581 int current_page;
28582 u32 crtc_offset;
28583 u32 crtc_offset_cntl;
28584
28585 - atomic_t vbl_received;
28586 + atomic_unchecked_t vbl_received;
28587
28588 u32 color_fmt;
28589 unsigned int front_offset;
28590 diff -urNp linux-2.6.32.42/drivers/gpu/drm/r128/r128_irq.c linux-2.6.32.42/drivers/gpu/drm/r128/r128_irq.c
28591 --- linux-2.6.32.42/drivers/gpu/drm/r128/r128_irq.c 2011-03-27 14:31:47.000000000 -0400
28592 +++ linux-2.6.32.42/drivers/gpu/drm/r128/r128_irq.c 2011-05-04 17:56:28.000000000 -0400
28593 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
28594 if (crtc != 0)
28595 return 0;
28596
28597 - return atomic_read(&dev_priv->vbl_received);
28598 + return atomic_read_unchecked(&dev_priv->vbl_received);
28599 }
28600
28601 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28602 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
28603 /* VBLANK interrupt */
28604 if (status & R128_CRTC_VBLANK_INT) {
28605 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28606 - atomic_inc(&dev_priv->vbl_received);
28607 + atomic_inc_unchecked(&dev_priv->vbl_received);
28608 drm_handle_vblank(dev, 0);
28609 return IRQ_HANDLED;
28610 }
28611 diff -urNp linux-2.6.32.42/drivers/gpu/drm/r128/r128_state.c linux-2.6.32.42/drivers/gpu/drm/r128/r128_state.c
28612 --- linux-2.6.32.42/drivers/gpu/drm/r128/r128_state.c 2011-03-27 14:31:47.000000000 -0400
28613 +++ linux-2.6.32.42/drivers/gpu/drm/r128/r128_state.c 2011-05-04 17:56:28.000000000 -0400
28614 @@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_priv
28615
28616 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
28617 {
28618 - if (atomic_read(&dev_priv->idle_count) == 0) {
28619 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
28620 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28621 } else {
28622 - atomic_set(&dev_priv->idle_count, 0);
28623 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28624 }
28625 }
28626
28627 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/atom.c linux-2.6.32.42/drivers/gpu/drm/radeon/atom.c
28628 --- linux-2.6.32.42/drivers/gpu/drm/radeon/atom.c 2011-05-10 22:12:01.000000000 -0400
28629 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/atom.c 2011-05-16 21:46:57.000000000 -0400
28630 @@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct c
28631 char name[512];
28632 int i;
28633
28634 + pax_track_stack();
28635 +
28636 ctx->card = card;
28637 ctx->bios = bios;
28638
28639 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.32.42/drivers/gpu/drm/radeon/mkregtable.c
28640 --- linux-2.6.32.42/drivers/gpu/drm/radeon/mkregtable.c 2011-03-27 14:31:47.000000000 -0400
28641 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/mkregtable.c 2011-04-17 15:56:46.000000000 -0400
28642 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
28643 regex_t mask_rex;
28644 regmatch_t match[4];
28645 char buf[1024];
28646 - size_t end;
28647 + long end;
28648 int len;
28649 int done = 0;
28650 int r;
28651 unsigned o;
28652 struct offset *offset;
28653 char last_reg_s[10];
28654 - int last_reg;
28655 + unsigned long last_reg;
28656
28657 if (regcomp
28658 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
28659 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_atombios.c
28660 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_atombios.c 2011-03-27 14:31:47.000000000 -0400
28661 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-16 21:46:57.000000000 -0400
28662 @@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from
28663 bool linkb;
28664 struct radeon_i2c_bus_rec ddc_bus;
28665
28666 + pax_track_stack();
28667 +
28668 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
28669
28670 if (data_offset == 0)
28671 @@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_o
28672 }
28673 }
28674
28675 -struct bios_connector {
28676 +static struct bios_connector {
28677 bool valid;
28678 uint16_t line_mux;
28679 uint16_t devices;
28680 int connector_type;
28681 struct radeon_i2c_bus_rec ddc_bus;
28682 -};
28683 +} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
28684
28685 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
28686 drm_device
28687 @@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from
28688 uint8_t dac;
28689 union atom_supported_devices *supported_devices;
28690 int i, j;
28691 - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
28692
28693 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
28694
28695 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_display.c
28696 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_display.c 2011-03-27 14:31:47.000000000 -0400
28697 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_display.c 2011-04-17 15:56:46.000000000 -0400
28698 @@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pl
28699
28700 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
28701 error = freq - current_freq;
28702 - error = error < 0 ? 0xffffffff : error;
28703 + error = (int32_t)error < 0 ? 0xffffffff : error;
28704 } else
28705 error = abs(current_freq - freq);
28706 vco_diff = abs(vco - best_vco);
28707 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_drv.h
28708 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_drv.h 2011-03-27 14:31:47.000000000 -0400
28709 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-04 17:56:28.000000000 -0400
28710 @@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
28711
28712 /* SW interrupt */
28713 wait_queue_head_t swi_queue;
28714 - atomic_t swi_emitted;
28715 + atomic_unchecked_t swi_emitted;
28716 int vblank_crtc;
28717 uint32_t irq_enable_reg;
28718 uint32_t r500_disp_irq_reg;
28719 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_fence.c
28720 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_fence.c 2011-03-27 14:31:47.000000000 -0400
28721 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-04 17:56:28.000000000 -0400
28722 @@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_devi
28723 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
28724 return 0;
28725 }
28726 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
28727 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
28728 if (!rdev->cp.ready) {
28729 /* FIXME: cp is not running assume everythings is done right
28730 * away
28731 @@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct rade
28732 return r;
28733 }
28734 WREG32(rdev->fence_drv.scratch_reg, 0);
28735 - atomic_set(&rdev->fence_drv.seq, 0);
28736 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
28737 INIT_LIST_HEAD(&rdev->fence_drv.created);
28738 INIT_LIST_HEAD(&rdev->fence_drv.emited);
28739 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
28740 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon.h linux-2.6.32.42/drivers/gpu/drm/radeon/radeon.h
28741 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon.h 2011-03-27 14:31:47.000000000 -0400
28742 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon.h 2011-05-04 17:56:28.000000000 -0400
28743 @@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device
28744 */
28745 struct radeon_fence_driver {
28746 uint32_t scratch_reg;
28747 - atomic_t seq;
28748 + atomic_unchecked_t seq;
28749 uint32_t last_seq;
28750 unsigned long count_timeout;
28751 wait_queue_head_t queue;
28752 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ioc32.c
28753 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-03-27 14:31:47.000000000 -0400
28754 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-04-23 13:57:24.000000000 -0400
28755 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(str
28756 request = compat_alloc_user_space(sizeof(*request));
28757 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
28758 || __put_user(req32.param, &request->param)
28759 - || __put_user((void __user *)(unsigned long)req32.value,
28760 + || __put_user((unsigned long)req32.value,
28761 &request->value))
28762 return -EFAULT;
28763
28764 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_irq.c
28765 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_irq.c 2011-03-27 14:31:47.000000000 -0400
28766 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-04 17:56:28.000000000 -0400
28767 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
28768 unsigned int ret;
28769 RING_LOCALS;
28770
28771 - atomic_inc(&dev_priv->swi_emitted);
28772 - ret = atomic_read(&dev_priv->swi_emitted);
28773 + atomic_inc_unchecked(&dev_priv->swi_emitted);
28774 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
28775
28776 BEGIN_RING(4);
28777 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
28778 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
28779 drm_radeon_private_t *dev_priv =
28780 (drm_radeon_private_t *) dev->dev_private;
28781
28782 - atomic_set(&dev_priv->swi_emitted, 0);
28783 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
28784 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
28785
28786 dev->max_vblank_count = 0x001fffff;
28787 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_state.c
28788 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_state.c 2011-03-27 14:31:47.000000000 -0400
28789 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_state.c 2011-04-17 15:56:46.000000000 -0400
28790 @@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm
28791 {
28792 drm_radeon_private_t *dev_priv = dev->dev_private;
28793 drm_radeon_getparam_t *param = data;
28794 - int value;
28795 + int value = 0;
28796
28797 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
28798
28799 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ttm.c
28800 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ttm.c 2011-03-27 14:31:47.000000000 -0400
28801 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ttm.c 2011-04-17 15:56:46.000000000 -0400
28802 @@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_devic
28803 DRM_INFO("radeon: ttm finalized\n");
28804 }
28805
28806 -static struct vm_operations_struct radeon_ttm_vm_ops;
28807 -static const struct vm_operations_struct *ttm_vm_ops = NULL;
28808 -
28809 -static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
28810 -{
28811 - struct ttm_buffer_object *bo;
28812 - int r;
28813 -
28814 - bo = (struct ttm_buffer_object *)vma->vm_private_data;
28815 - if (bo == NULL) {
28816 - return VM_FAULT_NOPAGE;
28817 - }
28818 - r = ttm_vm_ops->fault(vma, vmf);
28819 - return r;
28820 -}
28821 -
28822 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
28823 {
28824 struct drm_file *file_priv;
28825 struct radeon_device *rdev;
28826 - int r;
28827
28828 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
28829 return drm_mmap(filp, vma);
28830 @@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struc
28831
28832 file_priv = (struct drm_file *)filp->private_data;
28833 rdev = file_priv->minor->dev->dev_private;
28834 - if (rdev == NULL) {
28835 + if (!rdev)
28836 return -EINVAL;
28837 - }
28838 - r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
28839 - if (unlikely(r != 0)) {
28840 - return r;
28841 - }
28842 - if (unlikely(ttm_vm_ops == NULL)) {
28843 - ttm_vm_ops = vma->vm_ops;
28844 - radeon_ttm_vm_ops = *ttm_vm_ops;
28845 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28846 - }
28847 - vma->vm_ops = &radeon_ttm_vm_ops;
28848 - return 0;
28849 + return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
28850 }
28851
28852
28853 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/rs690.c linux-2.6.32.42/drivers/gpu/drm/radeon/rs690.c
28854 --- linux-2.6.32.42/drivers/gpu/drm/radeon/rs690.c 2011-03-27 14:31:47.000000000 -0400
28855 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/rs690.c 2011-04-17 15:56:46.000000000 -0400
28856 @@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct
28857 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
28858 rdev->pm.sideport_bandwidth.full)
28859 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
28860 - read_delay_latency.full = rfixed_const(370 * 800 * 1000);
28861 + read_delay_latency.full = rfixed_const(800 * 1000);
28862 read_delay_latency.full = rfixed_div(read_delay_latency,
28863 rdev->pm.igp_sideport_mclk);
28864 + a.full = rfixed_const(370);
28865 + read_delay_latency.full = rfixed_mul(read_delay_latency, a);
28866 } else {
28867 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
28868 rdev->pm.k8_bandwidth.full)
28869 diff -urNp linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo.c
28870 --- linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo.c 2011-03-27 14:31:47.000000000 -0400
28871 +++ linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo.c 2011-04-23 12:56:11.000000000 -0400
28872 @@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_a
28873 NULL
28874 };
28875
28876 -static struct sysfs_ops ttm_bo_global_ops = {
28877 +static const struct sysfs_ops ttm_bo_global_ops = {
28878 .show = &ttm_bo_global_show
28879 };
28880
28881 diff -urNp linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo_vm.c
28882 --- linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-03-27 14:31:47.000000000 -0400
28883 +++ linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-04-17 15:56:46.000000000 -0400
28884 @@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
28885 {
28886 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
28887 vma->vm_private_data;
28888 - struct ttm_bo_device *bdev = bo->bdev;
28889 + struct ttm_bo_device *bdev;
28890 unsigned long bus_base;
28891 unsigned long bus_offset;
28892 unsigned long bus_size;
28893 @@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
28894 unsigned long address = (unsigned long)vmf->virtual_address;
28895 int retval = VM_FAULT_NOPAGE;
28896
28897 + if (!bo)
28898 + return VM_FAULT_NOPAGE;
28899 + bdev = bo->bdev;
28900 +
28901 /*
28902 * Work around locking order reversal in fault / nopfn
28903 * between mmap_sem and bo_reserve: Perform a trylock operation
28904 diff -urNp linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_global.c
28905 --- linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_global.c 2011-03-27 14:31:47.000000000 -0400
28906 +++ linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_global.c 2011-04-17 15:56:46.000000000 -0400
28907 @@ -36,7 +36,7 @@
28908 struct ttm_global_item {
28909 struct mutex mutex;
28910 void *object;
28911 - int refcount;
28912 + atomic_t refcount;
28913 };
28914
28915 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
28916 @@ -49,7 +49,7 @@ void ttm_global_init(void)
28917 struct ttm_global_item *item = &glob[i];
28918 mutex_init(&item->mutex);
28919 item->object = NULL;
28920 - item->refcount = 0;
28921 + atomic_set(&item->refcount, 0);
28922 }
28923 }
28924
28925 @@ -59,7 +59,7 @@ void ttm_global_release(void)
28926 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
28927 struct ttm_global_item *item = &glob[i];
28928 BUG_ON(item->object != NULL);
28929 - BUG_ON(item->refcount != 0);
28930 + BUG_ON(atomic_read(&item->refcount) != 0);
28931 }
28932 }
28933
28934 @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
28935 void *object;
28936
28937 mutex_lock(&item->mutex);
28938 - if (item->refcount == 0) {
28939 + if (atomic_read(&item->refcount) == 0) {
28940 item->object = kzalloc(ref->size, GFP_KERNEL);
28941 if (unlikely(item->object == NULL)) {
28942 ret = -ENOMEM;
28943 @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
28944 goto out_err;
28945
28946 }
28947 - ++item->refcount;
28948 + atomic_inc(&item->refcount);
28949 ref->object = item->object;
28950 object = item->object;
28951 mutex_unlock(&item->mutex);
28952 @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
28953 struct ttm_global_item *item = &glob[ref->global_type];
28954
28955 mutex_lock(&item->mutex);
28956 - BUG_ON(item->refcount == 0);
28957 + BUG_ON(atomic_read(&item->refcount) == 0);
28958 BUG_ON(ref->object != item->object);
28959 - if (--item->refcount == 0) {
28960 + if (atomic_dec_and_test(&item->refcount)) {
28961 ref->release(ref);
28962 item->object = NULL;
28963 }
28964 diff -urNp linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_memory.c linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_memory.c
28965 --- linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_memory.c 2011-03-27 14:31:47.000000000 -0400
28966 +++ linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_memory.c 2011-04-17 15:56:46.000000000 -0400
28967 @@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_at
28968 NULL
28969 };
28970
28971 -static struct sysfs_ops ttm_mem_zone_ops = {
28972 +static const struct sysfs_ops ttm_mem_zone_ops = {
28973 .show = &ttm_mem_zone_show,
28974 .store = &ttm_mem_zone_store
28975 };
28976 diff -urNp linux-2.6.32.42/drivers/gpu/drm/via/via_drv.h linux-2.6.32.42/drivers/gpu/drm/via/via_drv.h
28977 --- linux-2.6.32.42/drivers/gpu/drm/via/via_drv.h 2011-03-27 14:31:47.000000000 -0400
28978 +++ linux-2.6.32.42/drivers/gpu/drm/via/via_drv.h 2011-05-04 17:56:28.000000000 -0400
28979 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
28980 typedef uint32_t maskarray_t[5];
28981
28982 typedef struct drm_via_irq {
28983 - atomic_t irq_received;
28984 + atomic_unchecked_t irq_received;
28985 uint32_t pending_mask;
28986 uint32_t enable_mask;
28987 wait_queue_head_t irq_queue;
28988 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
28989 struct timeval last_vblank;
28990 int last_vblank_valid;
28991 unsigned usec_per_vblank;
28992 - atomic_t vbl_received;
28993 + atomic_unchecked_t vbl_received;
28994 drm_via_state_t hc_state;
28995 char pci_buf[VIA_PCI_BUF_SIZE];
28996 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
28997 diff -urNp linux-2.6.32.42/drivers/gpu/drm/via/via_irq.c linux-2.6.32.42/drivers/gpu/drm/via/via_irq.c
28998 --- linux-2.6.32.42/drivers/gpu/drm/via/via_irq.c 2011-03-27 14:31:47.000000000 -0400
28999 +++ linux-2.6.32.42/drivers/gpu/drm/via/via_irq.c 2011-05-04 17:56:28.000000000 -0400
29000 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
29001 if (crtc != 0)
29002 return 0;
29003
29004 - return atomic_read(&dev_priv->vbl_received);
29005 + return atomic_read_unchecked(&dev_priv->vbl_received);
29006 }
29007
29008 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29009 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
29010
29011 status = VIA_READ(VIA_REG_INTERRUPT);
29012 if (status & VIA_IRQ_VBLANK_PENDING) {
29013 - atomic_inc(&dev_priv->vbl_received);
29014 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
29015 + atomic_inc_unchecked(&dev_priv->vbl_received);
29016 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
29017 do_gettimeofday(&cur_vblank);
29018 if (dev_priv->last_vblank_valid) {
29019 dev_priv->usec_per_vblank =
29020 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
29021 dev_priv->last_vblank = cur_vblank;
29022 dev_priv->last_vblank_valid = 1;
29023 }
29024 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
29025 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
29026 DRM_DEBUG("US per vblank is: %u\n",
29027 dev_priv->usec_per_vblank);
29028 }
29029 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
29030
29031 for (i = 0; i < dev_priv->num_irqs; ++i) {
29032 if (status & cur_irq->pending_mask) {
29033 - atomic_inc(&cur_irq->irq_received);
29034 + atomic_inc_unchecked(&cur_irq->irq_received);
29035 DRM_WAKEUP(&cur_irq->irq_queue);
29036 handled = 1;
29037 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
29038 @@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device *
29039 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29040 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
29041 masks[irq][4]));
29042 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
29043 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
29044 } else {
29045 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29046 (((cur_irq_sequence =
29047 - atomic_read(&cur_irq->irq_received)) -
29048 + atomic_read_unchecked(&cur_irq->irq_received)) -
29049 *sequence) <= (1 << 23)));
29050 }
29051 *sequence = cur_irq_sequence;
29052 @@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct dr
29053 }
29054
29055 for (i = 0; i < dev_priv->num_irqs; ++i) {
29056 - atomic_set(&cur_irq->irq_received, 0);
29057 + atomic_set_unchecked(&cur_irq->irq_received, 0);
29058 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
29059 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
29060 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
29061 @@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev,
29062 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
29063 case VIA_IRQ_RELATIVE:
29064 irqwait->request.sequence +=
29065 - atomic_read(&cur_irq->irq_received);
29066 + atomic_read_unchecked(&cur_irq->irq_received);
29067 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
29068 case VIA_IRQ_ABSOLUTE:
29069 break;
29070 diff -urNp linux-2.6.32.42/drivers/hid/hid-core.c linux-2.6.32.42/drivers/hid/hid-core.c
29071 --- linux-2.6.32.42/drivers/hid/hid-core.c 2011-05-10 22:12:01.000000000 -0400
29072 +++ linux-2.6.32.42/drivers/hid/hid-core.c 2011-05-10 22:12:32.000000000 -0400
29073 @@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device
29074
29075 int hid_add_device(struct hid_device *hdev)
29076 {
29077 - static atomic_t id = ATOMIC_INIT(0);
29078 + static atomic_unchecked_t id = ATOMIC_INIT(0);
29079 int ret;
29080
29081 if (WARN_ON(hdev->status & HID_STAT_ADDED))
29082 @@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hd
29083 /* XXX hack, any other cleaner solution after the driver core
29084 * is converted to allow more than 20 bytes as the device name? */
29085 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
29086 - hdev->vendor, hdev->product, atomic_inc_return(&id));
29087 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
29088
29089 ret = device_add(&hdev->dev);
29090 if (!ret)
29091 diff -urNp linux-2.6.32.42/drivers/hid/usbhid/hiddev.c linux-2.6.32.42/drivers/hid/usbhid/hiddev.c
29092 --- linux-2.6.32.42/drivers/hid/usbhid/hiddev.c 2011-03-27 14:31:47.000000000 -0400
29093 +++ linux-2.6.32.42/drivers/hid/usbhid/hiddev.c 2011-04-17 15:56:46.000000000 -0400
29094 @@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *fi
29095 return put_user(HID_VERSION, (int __user *)arg);
29096
29097 case HIDIOCAPPLICATION:
29098 - if (arg < 0 || arg >= hid->maxapplication)
29099 + if (arg >= hid->maxapplication)
29100 return -EINVAL;
29101
29102 for (i = 0; i < hid->maxcollection; i++)
29103 diff -urNp linux-2.6.32.42/drivers/hwmon/lis3lv02d.c linux-2.6.32.42/drivers/hwmon/lis3lv02d.c
29104 --- linux-2.6.32.42/drivers/hwmon/lis3lv02d.c 2011-03-27 14:31:47.000000000 -0400
29105 +++ linux-2.6.32.42/drivers/hwmon/lis3lv02d.c 2011-05-04 17:56:28.000000000 -0400
29106 @@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(in
29107 * the lid is closed. This leads to interrupts as soon as a little move
29108 * is done.
29109 */
29110 - atomic_inc(&lis3_dev.count);
29111 + atomic_inc_unchecked(&lis3_dev.count);
29112
29113 wake_up_interruptible(&lis3_dev.misc_wait);
29114 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
29115 @@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct in
29116 if (test_and_set_bit(0, &lis3_dev.misc_opened))
29117 return -EBUSY; /* already open */
29118
29119 - atomic_set(&lis3_dev.count, 0);
29120 + atomic_set_unchecked(&lis3_dev.count, 0);
29121
29122 /*
29123 * The sensor can generate interrupts for free-fall and direction
29124 @@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struc
29125 add_wait_queue(&lis3_dev.misc_wait, &wait);
29126 while (true) {
29127 set_current_state(TASK_INTERRUPTIBLE);
29128 - data = atomic_xchg(&lis3_dev.count, 0);
29129 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
29130 if (data)
29131 break;
29132
29133 @@ -244,7 +244,7 @@ out:
29134 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
29135 {
29136 poll_wait(file, &lis3_dev.misc_wait, wait);
29137 - if (atomic_read(&lis3_dev.count))
29138 + if (atomic_read_unchecked(&lis3_dev.count))
29139 return POLLIN | POLLRDNORM;
29140 return 0;
29141 }
29142 diff -urNp linux-2.6.32.42/drivers/hwmon/lis3lv02d.h linux-2.6.32.42/drivers/hwmon/lis3lv02d.h
29143 --- linux-2.6.32.42/drivers/hwmon/lis3lv02d.h 2011-03-27 14:31:47.000000000 -0400
29144 +++ linux-2.6.32.42/drivers/hwmon/lis3lv02d.h 2011-05-04 17:56:28.000000000 -0400
29145 @@ -201,7 +201,7 @@ struct lis3lv02d {
29146
29147 struct input_polled_dev *idev; /* input device */
29148 struct platform_device *pdev; /* platform device */
29149 - atomic_t count; /* interrupt count after last read */
29150 + atomic_unchecked_t count; /* interrupt count after last read */
29151 int xcalib; /* calibrated null value for x */
29152 int ycalib; /* calibrated null value for y */
29153 int zcalib; /* calibrated null value for z */
29154 diff -urNp linux-2.6.32.42/drivers/hwmon/sht15.c linux-2.6.32.42/drivers/hwmon/sht15.c
29155 --- linux-2.6.32.42/drivers/hwmon/sht15.c 2011-03-27 14:31:47.000000000 -0400
29156 +++ linux-2.6.32.42/drivers/hwmon/sht15.c 2011-05-04 17:56:28.000000000 -0400
29157 @@ -112,7 +112,7 @@ struct sht15_data {
29158 int supply_uV;
29159 int supply_uV_valid;
29160 struct work_struct update_supply_work;
29161 - atomic_t interrupt_handled;
29162 + atomic_unchecked_t interrupt_handled;
29163 };
29164
29165 /**
29166 @@ -245,13 +245,13 @@ static inline int sht15_update_single_va
29167 return ret;
29168
29169 gpio_direction_input(data->pdata->gpio_data);
29170 - atomic_set(&data->interrupt_handled, 0);
29171 + atomic_set_unchecked(&data->interrupt_handled, 0);
29172
29173 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29174 if (gpio_get_value(data->pdata->gpio_data) == 0) {
29175 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
29176 /* Only relevant if the interrupt hasn't occured. */
29177 - if (!atomic_read(&data->interrupt_handled))
29178 + if (!atomic_read_unchecked(&data->interrupt_handled))
29179 schedule_work(&data->read_work);
29180 }
29181 ret = wait_event_timeout(data->wait_queue,
29182 @@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired
29183 struct sht15_data *data = d;
29184 /* First disable the interrupt */
29185 disable_irq_nosync(irq);
29186 - atomic_inc(&data->interrupt_handled);
29187 + atomic_inc_unchecked(&data->interrupt_handled);
29188 /* Then schedule a reading work struct */
29189 if (data->flag != SHT15_READING_NOTHING)
29190 schedule_work(&data->read_work);
29191 @@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct wo
29192 here as could have gone low in meantime so verify
29193 it hasn't!
29194 */
29195 - atomic_set(&data->interrupt_handled, 0);
29196 + atomic_set_unchecked(&data->interrupt_handled, 0);
29197 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29198 /* If still not occured or another handler has been scheduled */
29199 if (gpio_get_value(data->pdata->gpio_data)
29200 - || atomic_read(&data->interrupt_handled))
29201 + || atomic_read_unchecked(&data->interrupt_handled))
29202 return;
29203 }
29204 /* Read the data back from the device */
29205 diff -urNp linux-2.6.32.42/drivers/hwmon/w83791d.c linux-2.6.32.42/drivers/hwmon/w83791d.c
29206 --- linux-2.6.32.42/drivers/hwmon/w83791d.c 2011-03-27 14:31:47.000000000 -0400
29207 +++ linux-2.6.32.42/drivers/hwmon/w83791d.c 2011-04-17 15:56:46.000000000 -0400
29208 @@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
29209 struct i2c_board_info *info);
29210 static int w83791d_remove(struct i2c_client *client);
29211
29212 -static int w83791d_read(struct i2c_client *client, u8 register);
29213 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
29214 +static int w83791d_read(struct i2c_client *client, u8 reg);
29215 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
29216 static struct w83791d_data *w83791d_update_device(struct device *dev);
29217
29218 #ifdef DEBUG
29219 diff -urNp linux-2.6.32.42/drivers/ide/ide-cd.c linux-2.6.32.42/drivers/ide/ide-cd.c
29220 --- linux-2.6.32.42/drivers/ide/ide-cd.c 2011-03-27 14:31:47.000000000 -0400
29221 +++ linux-2.6.32.42/drivers/ide/ide-cd.c 2011-04-17 15:56:46.000000000 -0400
29222 @@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_
29223 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
29224 if ((unsigned long)buf & alignment
29225 || blk_rq_bytes(rq) & q->dma_pad_mask
29226 - || object_is_on_stack(buf))
29227 + || object_starts_on_stack(buf))
29228 drive->dma = 0;
29229 }
29230 }
29231 diff -urNp linux-2.6.32.42/drivers/ide/ide-floppy.c linux-2.6.32.42/drivers/ide/ide-floppy.c
29232 --- linux-2.6.32.42/drivers/ide/ide-floppy.c 2011-03-27 14:31:47.000000000 -0400
29233 +++ linux-2.6.32.42/drivers/ide/ide-floppy.c 2011-05-16 21:46:57.000000000 -0400
29234 @@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_d
29235 u8 pc_buf[256], header_len, desc_cnt;
29236 int i, rc = 1, blocks, length;
29237
29238 + pax_track_stack();
29239 +
29240 ide_debug_log(IDE_DBG_FUNC, "enter");
29241
29242 drive->bios_cyl = 0;
29243 diff -urNp linux-2.6.32.42/drivers/ide/setup-pci.c linux-2.6.32.42/drivers/ide/setup-pci.c
29244 --- linux-2.6.32.42/drivers/ide/setup-pci.c 2011-03-27 14:31:47.000000000 -0400
29245 +++ linux-2.6.32.42/drivers/ide/setup-pci.c 2011-05-16 21:46:57.000000000 -0400
29246 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
29247 int ret, i, n_ports = dev2 ? 4 : 2;
29248 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
29249
29250 + pax_track_stack();
29251 +
29252 for (i = 0; i < n_ports / 2; i++) {
29253 ret = ide_setup_pci_controller(pdev[i], d, !i);
29254 if (ret < 0)
29255 diff -urNp linux-2.6.32.42/drivers/ieee1394/dv1394.c linux-2.6.32.42/drivers/ieee1394/dv1394.c
29256 --- linux-2.6.32.42/drivers/ieee1394/dv1394.c 2011-03-27 14:31:47.000000000 -0400
29257 +++ linux-2.6.32.42/drivers/ieee1394/dv1394.c 2011-04-23 12:56:11.000000000 -0400
29258 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
29259 based upon DIF section and sequence
29260 */
29261
29262 -static void inline
29263 +static inline void
29264 frame_put_packet (struct frame *f, struct packet *p)
29265 {
29266 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
29267 diff -urNp linux-2.6.32.42/drivers/ieee1394/hosts.c linux-2.6.32.42/drivers/ieee1394/hosts.c
29268 --- linux-2.6.32.42/drivers/ieee1394/hosts.c 2011-03-27 14:31:47.000000000 -0400
29269 +++ linux-2.6.32.42/drivers/ieee1394/hosts.c 2011-04-17 15:56:46.000000000 -0400
29270 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
29271 }
29272
29273 static struct hpsb_host_driver dummy_driver = {
29274 + .name = "dummy",
29275 .transmit_packet = dummy_transmit_packet,
29276 .devctl = dummy_devctl,
29277 .isoctl = dummy_isoctl
29278 diff -urNp linux-2.6.32.42/drivers/ieee1394/init_ohci1394_dma.c linux-2.6.32.42/drivers/ieee1394/init_ohci1394_dma.c
29279 --- linux-2.6.32.42/drivers/ieee1394/init_ohci1394_dma.c 2011-03-27 14:31:47.000000000 -0400
29280 +++ linux-2.6.32.42/drivers/ieee1394/init_ohci1394_dma.c 2011-04-17 15:56:46.000000000 -0400
29281 @@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_con
29282 for (func = 0; func < 8; func++) {
29283 u32 class = read_pci_config(num,slot,func,
29284 PCI_CLASS_REVISION);
29285 - if ((class == 0xffffffff))
29286 + if (class == 0xffffffff)
29287 continue; /* No device at this func */
29288
29289 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
29290 diff -urNp linux-2.6.32.42/drivers/ieee1394/ohci1394.c linux-2.6.32.42/drivers/ieee1394/ohci1394.c
29291 --- linux-2.6.32.42/drivers/ieee1394/ohci1394.c 2011-03-27 14:31:47.000000000 -0400
29292 +++ linux-2.6.32.42/drivers/ieee1394/ohci1394.c 2011-04-23 12:56:11.000000000 -0400
29293 @@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
29294 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
29295
29296 /* Module Parameters */
29297 -static int phys_dma = 1;
29298 +static int phys_dma;
29299 module_param(phys_dma, int, 0444);
29300 -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
29301 +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
29302
29303 static void dma_trm_tasklet(unsigned long data);
29304 static void dma_trm_reset(struct dma_trm_ctx *d);
29305 diff -urNp linux-2.6.32.42/drivers/ieee1394/sbp2.c linux-2.6.32.42/drivers/ieee1394/sbp2.c
29306 --- linux-2.6.32.42/drivers/ieee1394/sbp2.c 2011-03-27 14:31:47.000000000 -0400
29307 +++ linux-2.6.32.42/drivers/ieee1394/sbp2.c 2011-04-23 12:56:11.000000000 -0400
29308 @@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
29309 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
29310 MODULE_LICENSE("GPL");
29311
29312 -static int sbp2_module_init(void)
29313 +static int __init sbp2_module_init(void)
29314 {
29315 int ret;
29316
29317 diff -urNp linux-2.6.32.42/drivers/infiniband/core/cm.c linux-2.6.32.42/drivers/infiniband/core/cm.c
29318 --- linux-2.6.32.42/drivers/infiniband/core/cm.c 2011-03-27 14:31:47.000000000 -0400
29319 +++ linux-2.6.32.42/drivers/infiniband/core/cm.c 2011-04-17 15:56:46.000000000 -0400
29320 @@ -112,7 +112,7 @@ static char const counter_group_names[CM
29321
29322 struct cm_counter_group {
29323 struct kobject obj;
29324 - atomic_long_t counter[CM_ATTR_COUNT];
29325 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29326 };
29327
29328 struct cm_counter_attribute {
29329 @@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm
29330 struct ib_mad_send_buf *msg = NULL;
29331 int ret;
29332
29333 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29334 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29335 counter[CM_REQ_COUNTER]);
29336
29337 /* Quick state check to discard duplicate REQs. */
29338 @@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm
29339 if (!cm_id_priv)
29340 return;
29341
29342 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29343 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29344 counter[CM_REP_COUNTER]);
29345 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
29346 if (ret)
29347 @@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work
29348 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
29349 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
29350 spin_unlock_irq(&cm_id_priv->lock);
29351 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29352 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29353 counter[CM_RTU_COUNTER]);
29354 goto out;
29355 }
29356 @@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_wor
29357 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
29358 dreq_msg->local_comm_id);
29359 if (!cm_id_priv) {
29360 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29361 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29362 counter[CM_DREQ_COUNTER]);
29363 cm_issue_drep(work->port, work->mad_recv_wc);
29364 return -EINVAL;
29365 @@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_wor
29366 case IB_CM_MRA_REP_RCVD:
29367 break;
29368 case IB_CM_TIMEWAIT:
29369 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29370 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29371 counter[CM_DREQ_COUNTER]);
29372 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29373 goto unlock;
29374 @@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_wor
29375 cm_free_msg(msg);
29376 goto deref;
29377 case IB_CM_DREQ_RCVD:
29378 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29379 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29380 counter[CM_DREQ_COUNTER]);
29381 goto unlock;
29382 default:
29383 @@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work
29384 ib_modify_mad(cm_id_priv->av.port->mad_agent,
29385 cm_id_priv->msg, timeout)) {
29386 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
29387 - atomic_long_inc(&work->port->
29388 + atomic_long_inc_unchecked(&work->port->
29389 counter_group[CM_RECV_DUPLICATES].
29390 counter[CM_MRA_COUNTER]);
29391 goto out;
29392 @@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work
29393 break;
29394 case IB_CM_MRA_REQ_RCVD:
29395 case IB_CM_MRA_REP_RCVD:
29396 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29397 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29398 counter[CM_MRA_COUNTER]);
29399 /* fall through */
29400 default:
29401 @@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work
29402 case IB_CM_LAP_IDLE:
29403 break;
29404 case IB_CM_MRA_LAP_SENT:
29405 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29406 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29407 counter[CM_LAP_COUNTER]);
29408 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29409 goto unlock;
29410 @@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work
29411 cm_free_msg(msg);
29412 goto deref;
29413 case IB_CM_LAP_RCVD:
29414 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29415 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29416 counter[CM_LAP_COUNTER]);
29417 goto unlock;
29418 default:
29419 @@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm
29420 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
29421 if (cur_cm_id_priv) {
29422 spin_unlock_irq(&cm.lock);
29423 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29424 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29425 counter[CM_SIDR_REQ_COUNTER]);
29426 goto out; /* Duplicate message. */
29427 }
29428 @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
29429 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
29430 msg->retries = 1;
29431
29432 - atomic_long_add(1 + msg->retries,
29433 + atomic_long_add_unchecked(1 + msg->retries,
29434 &port->counter_group[CM_XMIT].counter[attr_index]);
29435 if (msg->retries)
29436 - atomic_long_add(msg->retries,
29437 + atomic_long_add_unchecked(msg->retries,
29438 &port->counter_group[CM_XMIT_RETRIES].
29439 counter[attr_index]);
29440
29441 @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
29442 }
29443
29444 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
29445 - atomic_long_inc(&port->counter_group[CM_RECV].
29446 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
29447 counter[attr_id - CM_ATTR_ID_OFFSET]);
29448
29449 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
29450 @@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct ko
29451 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
29452
29453 return sprintf(buf, "%ld\n",
29454 - atomic_long_read(&group->counter[cm_attr->index]));
29455 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
29456 }
29457
29458 -static struct sysfs_ops cm_counter_ops = {
29459 +static const struct sysfs_ops cm_counter_ops = {
29460 .show = cm_show_counter
29461 };
29462
29463 diff -urNp linux-2.6.32.42/drivers/infiniband/core/fmr_pool.c linux-2.6.32.42/drivers/infiniband/core/fmr_pool.c
29464 --- linux-2.6.32.42/drivers/infiniband/core/fmr_pool.c 2011-03-27 14:31:47.000000000 -0400
29465 +++ linux-2.6.32.42/drivers/infiniband/core/fmr_pool.c 2011-05-04 17:56:28.000000000 -0400
29466 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
29467
29468 struct task_struct *thread;
29469
29470 - atomic_t req_ser;
29471 - atomic_t flush_ser;
29472 + atomic_unchecked_t req_ser;
29473 + atomic_unchecked_t flush_ser;
29474
29475 wait_queue_head_t force_wait;
29476 };
29477 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
29478 struct ib_fmr_pool *pool = pool_ptr;
29479
29480 do {
29481 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
29482 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
29483 ib_fmr_batch_release(pool);
29484
29485 - atomic_inc(&pool->flush_ser);
29486 + atomic_inc_unchecked(&pool->flush_ser);
29487 wake_up_interruptible(&pool->force_wait);
29488
29489 if (pool->flush_function)
29490 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
29491 }
29492
29493 set_current_state(TASK_INTERRUPTIBLE);
29494 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
29495 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
29496 !kthread_should_stop())
29497 schedule();
29498 __set_current_state(TASK_RUNNING);
29499 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
29500 pool->dirty_watermark = params->dirty_watermark;
29501 pool->dirty_len = 0;
29502 spin_lock_init(&pool->pool_lock);
29503 - atomic_set(&pool->req_ser, 0);
29504 - atomic_set(&pool->flush_ser, 0);
29505 + atomic_set_unchecked(&pool->req_ser, 0);
29506 + atomic_set_unchecked(&pool->flush_ser, 0);
29507 init_waitqueue_head(&pool->force_wait);
29508
29509 pool->thread = kthread_run(ib_fmr_cleanup_thread,
29510 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
29511 }
29512 spin_unlock_irq(&pool->pool_lock);
29513
29514 - serial = atomic_inc_return(&pool->req_ser);
29515 + serial = atomic_inc_return_unchecked(&pool->req_ser);
29516 wake_up_process(pool->thread);
29517
29518 if (wait_event_interruptible(pool->force_wait,
29519 - atomic_read(&pool->flush_ser) - serial >= 0))
29520 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
29521 return -EINTR;
29522
29523 return 0;
29524 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
29525 } else {
29526 list_add_tail(&fmr->list, &pool->dirty_list);
29527 if (++pool->dirty_len >= pool->dirty_watermark) {
29528 - atomic_inc(&pool->req_ser);
29529 + atomic_inc_unchecked(&pool->req_ser);
29530 wake_up_process(pool->thread);
29531 }
29532 }
29533 diff -urNp linux-2.6.32.42/drivers/infiniband/core/sysfs.c linux-2.6.32.42/drivers/infiniband/core/sysfs.c
29534 --- linux-2.6.32.42/drivers/infiniband/core/sysfs.c 2011-03-27 14:31:47.000000000 -0400
29535 +++ linux-2.6.32.42/drivers/infiniband/core/sysfs.c 2011-04-17 15:56:46.000000000 -0400
29536 @@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kob
29537 return port_attr->show(p, port_attr, buf);
29538 }
29539
29540 -static struct sysfs_ops port_sysfs_ops = {
29541 +static const struct sysfs_ops port_sysfs_ops = {
29542 .show = port_attr_show
29543 };
29544
29545 diff -urNp linux-2.6.32.42/drivers/infiniband/core/uverbs_marshall.c linux-2.6.32.42/drivers/infiniband/core/uverbs_marshall.c
29546 --- linux-2.6.32.42/drivers/infiniband/core/uverbs_marshall.c 2011-03-27 14:31:47.000000000 -0400
29547 +++ linux-2.6.32.42/drivers/infiniband/core/uverbs_marshall.c 2011-04-17 15:56:46.000000000 -0400
29548 @@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_u
29549 dst->grh.sgid_index = src->grh.sgid_index;
29550 dst->grh.hop_limit = src->grh.hop_limit;
29551 dst->grh.traffic_class = src->grh.traffic_class;
29552 + memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
29553 dst->dlid = src->dlid;
29554 dst->sl = src->sl;
29555 dst->src_path_bits = src->src_path_bits;
29556 dst->static_rate = src->static_rate;
29557 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
29558 dst->port_num = src->port_num;
29559 + dst->reserved = 0;
29560 }
29561 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
29562
29563 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
29564 struct ib_qp_attr *src)
29565 {
29566 + dst->qp_state = src->qp_state;
29567 dst->cur_qp_state = src->cur_qp_state;
29568 dst->path_mtu = src->path_mtu;
29569 dst->path_mig_state = src->path_mig_state;
29570 @@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_u
29571 dst->rnr_retry = src->rnr_retry;
29572 dst->alt_port_num = src->alt_port_num;
29573 dst->alt_timeout = src->alt_timeout;
29574 + memset(dst->reserved, 0, sizeof(dst->reserved));
29575 }
29576 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
29577
29578 diff -urNp linux-2.6.32.42/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.32.42/drivers/infiniband/hw/ipath/ipath_fs.c
29579 --- linux-2.6.32.42/drivers/infiniband/hw/ipath/ipath_fs.c 2011-03-27 14:31:47.000000000 -0400
29580 +++ linux-2.6.32.42/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-16 21:46:57.000000000 -0400
29581 @@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(stru
29582 struct infinipath_counters counters;
29583 struct ipath_devdata *dd;
29584
29585 + pax_track_stack();
29586 +
29587 dd = file->f_path.dentry->d_inode->i_private;
29588 dd->ipath_f_read_counters(dd, &counters);
29589
29590 diff -urNp linux-2.6.32.42/drivers/infiniband/hw/nes/nes.c linux-2.6.32.42/drivers/infiniband/hw/nes/nes.c
29591 --- linux-2.6.32.42/drivers/infiniband/hw/nes/nes.c 2011-03-27 14:31:47.000000000 -0400
29592 +++ linux-2.6.32.42/drivers/infiniband/hw/nes/nes.c 2011-05-04 17:56:28.000000000 -0400
29593 @@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
29594 LIST_HEAD(nes_adapter_list);
29595 static LIST_HEAD(nes_dev_list);
29596
29597 -atomic_t qps_destroyed;
29598 +atomic_unchecked_t qps_destroyed;
29599
29600 static unsigned int ee_flsh_adapter;
29601 static unsigned int sysfs_nonidx_addr;
29602 @@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(str
29603 struct nes_adapter *nesadapter = nesdev->nesadapter;
29604 u32 qp_id;
29605
29606 - atomic_inc(&qps_destroyed);
29607 + atomic_inc_unchecked(&qps_destroyed);
29608
29609 /* Free the control structures */
29610
29611 diff -urNp linux-2.6.32.42/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.32.42/drivers/infiniband/hw/nes/nes_cm.c
29612 --- linux-2.6.32.42/drivers/infiniband/hw/nes/nes_cm.c 2011-03-27 14:31:47.000000000 -0400
29613 +++ linux-2.6.32.42/drivers/infiniband/hw/nes/nes_cm.c 2011-05-04 17:56:28.000000000 -0400
29614 @@ -69,11 +69,11 @@ u32 cm_packets_received;
29615 u32 cm_listens_created;
29616 u32 cm_listens_destroyed;
29617 u32 cm_backlog_drops;
29618 -atomic_t cm_loopbacks;
29619 -atomic_t cm_nodes_created;
29620 -atomic_t cm_nodes_destroyed;
29621 -atomic_t cm_accel_dropped_pkts;
29622 -atomic_t cm_resets_recvd;
29623 +atomic_unchecked_t cm_loopbacks;
29624 +atomic_unchecked_t cm_nodes_created;
29625 +atomic_unchecked_t cm_nodes_destroyed;
29626 +atomic_unchecked_t cm_accel_dropped_pkts;
29627 +atomic_unchecked_t cm_resets_recvd;
29628
29629 static inline int mini_cm_accelerated(struct nes_cm_core *,
29630 struct nes_cm_node *);
29631 @@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
29632
29633 static struct nes_cm_core *g_cm_core;
29634
29635 -atomic_t cm_connects;
29636 -atomic_t cm_accepts;
29637 -atomic_t cm_disconnects;
29638 -atomic_t cm_closes;
29639 -atomic_t cm_connecteds;
29640 -atomic_t cm_connect_reqs;
29641 -atomic_t cm_rejects;
29642 +atomic_unchecked_t cm_connects;
29643 +atomic_unchecked_t cm_accepts;
29644 +atomic_unchecked_t cm_disconnects;
29645 +atomic_unchecked_t cm_closes;
29646 +atomic_unchecked_t cm_connecteds;
29647 +atomic_unchecked_t cm_connect_reqs;
29648 +atomic_unchecked_t cm_rejects;
29649
29650
29651 /**
29652 @@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(
29653 cm_node->rem_mac);
29654
29655 add_hte_node(cm_core, cm_node);
29656 - atomic_inc(&cm_nodes_created);
29657 + atomic_inc_unchecked(&cm_nodes_created);
29658
29659 return cm_node;
29660 }
29661 @@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm
29662 }
29663
29664 atomic_dec(&cm_core->node_cnt);
29665 - atomic_inc(&cm_nodes_destroyed);
29666 + atomic_inc_unchecked(&cm_nodes_destroyed);
29667 nesqp = cm_node->nesqp;
29668 if (nesqp) {
29669 nesqp->cm_node = NULL;
29670 @@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm
29671
29672 static void drop_packet(struct sk_buff *skb)
29673 {
29674 - atomic_inc(&cm_accel_dropped_pkts);
29675 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
29676 dev_kfree_skb_any(skb);
29677 }
29678
29679 @@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm
29680
29681 int reset = 0; /* whether to send reset in case of err.. */
29682 int passive_state;
29683 - atomic_inc(&cm_resets_recvd);
29684 + atomic_inc_unchecked(&cm_resets_recvd);
29685 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
29686 " refcnt=%d\n", cm_node, cm_node->state,
29687 atomic_read(&cm_node->ref_count));
29688 @@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_conne
29689 rem_ref_cm_node(cm_node->cm_core, cm_node);
29690 return NULL;
29691 }
29692 - atomic_inc(&cm_loopbacks);
29693 + atomic_inc_unchecked(&cm_loopbacks);
29694 loopbackremotenode->loopbackpartner = cm_node;
29695 loopbackremotenode->tcp_cntxt.rcv_wscale =
29696 NES_CM_DEFAULT_RCV_WND_SCALE;
29697 @@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_c
29698 add_ref_cm_node(cm_node);
29699 } else if (cm_node->state == NES_CM_STATE_TSA) {
29700 rem_ref_cm_node(cm_core, cm_node);
29701 - atomic_inc(&cm_accel_dropped_pkts);
29702 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
29703 dev_kfree_skb_any(skb);
29704 break;
29705 }
29706 @@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct ne
29707
29708 if ((cm_id) && (cm_id->event_handler)) {
29709 if (issue_disconn) {
29710 - atomic_inc(&cm_disconnects);
29711 + atomic_inc_unchecked(&cm_disconnects);
29712 cm_event.event = IW_CM_EVENT_DISCONNECT;
29713 cm_event.status = disconn_status;
29714 cm_event.local_addr = cm_id->local_addr;
29715 @@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct ne
29716 }
29717
29718 if (issue_close) {
29719 - atomic_inc(&cm_closes);
29720 + atomic_inc_unchecked(&cm_closes);
29721 nes_disconnect(nesqp, 1);
29722
29723 cm_id->provider_data = nesqp;
29724 @@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
29725
29726 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
29727 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
29728 - atomic_inc(&cm_accepts);
29729 + atomic_inc_unchecked(&cm_accepts);
29730
29731 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
29732 atomic_read(&nesvnic->netdev->refcnt));
29733 @@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
29734
29735 struct nes_cm_core *cm_core;
29736
29737 - atomic_inc(&cm_rejects);
29738 + atomic_inc_unchecked(&cm_rejects);
29739 cm_node = (struct nes_cm_node *) cm_id->provider_data;
29740 loopback = cm_node->loopbackpartner;
29741 cm_core = cm_node->cm_core;
29742 @@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id,
29743 ntohl(cm_id->local_addr.sin_addr.s_addr),
29744 ntohs(cm_id->local_addr.sin_port));
29745
29746 - atomic_inc(&cm_connects);
29747 + atomic_inc_unchecked(&cm_connects);
29748 nesqp->active_conn = 1;
29749
29750 /* cache the cm_id in the qp */
29751 @@ -3195,7 +3195,7 @@ static void cm_event_connected(struct ne
29752 if (nesqp->destroyed) {
29753 return;
29754 }
29755 - atomic_inc(&cm_connecteds);
29756 + atomic_inc_unchecked(&cm_connecteds);
29757 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
29758 " local port 0x%04X. jiffies = %lu.\n",
29759 nesqp->hwqp.qp_id,
29760 @@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm
29761
29762 ret = cm_id->event_handler(cm_id, &cm_event);
29763 cm_id->add_ref(cm_id);
29764 - atomic_inc(&cm_closes);
29765 + atomic_inc_unchecked(&cm_closes);
29766 cm_event.event = IW_CM_EVENT_CLOSE;
29767 cm_event.status = IW_CM_EVENT_STATUS_OK;
29768 cm_event.provider_data = cm_id->provider_data;
29769 @@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_
29770 return;
29771 cm_id = cm_node->cm_id;
29772
29773 - atomic_inc(&cm_connect_reqs);
29774 + atomic_inc_unchecked(&cm_connect_reqs);
29775 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
29776 cm_node, cm_id, jiffies);
29777
29778 @@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct n
29779 return;
29780 cm_id = cm_node->cm_id;
29781
29782 - atomic_inc(&cm_connect_reqs);
29783 + atomic_inc_unchecked(&cm_connect_reqs);
29784 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
29785 cm_node, cm_id, jiffies);
29786
29787 diff -urNp linux-2.6.32.42/drivers/infiniband/hw/nes/nes.h linux-2.6.32.42/drivers/infiniband/hw/nes/nes.h
29788 --- linux-2.6.32.42/drivers/infiniband/hw/nes/nes.h 2011-03-27 14:31:47.000000000 -0400
29789 +++ linux-2.6.32.42/drivers/infiniband/hw/nes/nes.h 2011-05-04 17:56:28.000000000 -0400
29790 @@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
29791 extern unsigned int wqm_quanta;
29792 extern struct list_head nes_adapter_list;
29793
29794 -extern atomic_t cm_connects;
29795 -extern atomic_t cm_accepts;
29796 -extern atomic_t cm_disconnects;
29797 -extern atomic_t cm_closes;
29798 -extern atomic_t cm_connecteds;
29799 -extern atomic_t cm_connect_reqs;
29800 -extern atomic_t cm_rejects;
29801 -extern atomic_t mod_qp_timouts;
29802 -extern atomic_t qps_created;
29803 -extern atomic_t qps_destroyed;
29804 -extern atomic_t sw_qps_destroyed;
29805 +extern atomic_unchecked_t cm_connects;
29806 +extern atomic_unchecked_t cm_accepts;
29807 +extern atomic_unchecked_t cm_disconnects;
29808 +extern atomic_unchecked_t cm_closes;
29809 +extern atomic_unchecked_t cm_connecteds;
29810 +extern atomic_unchecked_t cm_connect_reqs;
29811 +extern atomic_unchecked_t cm_rejects;
29812 +extern atomic_unchecked_t mod_qp_timouts;
29813 +extern atomic_unchecked_t qps_created;
29814 +extern atomic_unchecked_t qps_destroyed;
29815 +extern atomic_unchecked_t sw_qps_destroyed;
29816 extern u32 mh_detected;
29817 extern u32 mh_pauses_sent;
29818 extern u32 cm_packets_sent;
29819 @@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
29820 extern u32 cm_listens_created;
29821 extern u32 cm_listens_destroyed;
29822 extern u32 cm_backlog_drops;
29823 -extern atomic_t cm_loopbacks;
29824 -extern atomic_t cm_nodes_created;
29825 -extern atomic_t cm_nodes_destroyed;
29826 -extern atomic_t cm_accel_dropped_pkts;
29827 -extern atomic_t cm_resets_recvd;
29828 +extern atomic_unchecked_t cm_loopbacks;
29829 +extern atomic_unchecked_t cm_nodes_created;
29830 +extern atomic_unchecked_t cm_nodes_destroyed;
29831 +extern atomic_unchecked_t cm_accel_dropped_pkts;
29832 +extern atomic_unchecked_t cm_resets_recvd;
29833
29834 extern u32 int_mod_timer_init;
29835 extern u32 int_mod_cq_depth_256;
29836 diff -urNp linux-2.6.32.42/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.32.42/drivers/infiniband/hw/nes/nes_nic.c
29837 --- linux-2.6.32.42/drivers/infiniband/hw/nes/nes_nic.c 2011-03-27 14:31:47.000000000 -0400
29838 +++ linux-2.6.32.42/drivers/infiniband/hw/nes/nes_nic.c 2011-05-04 17:56:28.000000000 -0400
29839 @@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats
29840 target_stat_values[++index] = mh_detected;
29841 target_stat_values[++index] = mh_pauses_sent;
29842 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
29843 - target_stat_values[++index] = atomic_read(&cm_connects);
29844 - target_stat_values[++index] = atomic_read(&cm_accepts);
29845 - target_stat_values[++index] = atomic_read(&cm_disconnects);
29846 - target_stat_values[++index] = atomic_read(&cm_connecteds);
29847 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
29848 - target_stat_values[++index] = atomic_read(&cm_rejects);
29849 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
29850 - target_stat_values[++index] = atomic_read(&qps_created);
29851 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
29852 - target_stat_values[++index] = atomic_read(&qps_destroyed);
29853 - target_stat_values[++index] = atomic_read(&cm_closes);
29854 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
29855 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
29856 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
29857 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
29858 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
29859 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
29860 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
29861 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
29862 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
29863 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
29864 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
29865 target_stat_values[++index] = cm_packets_sent;
29866 target_stat_values[++index] = cm_packets_bounced;
29867 target_stat_values[++index] = cm_packets_created;
29868 @@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats
29869 target_stat_values[++index] = cm_listens_created;
29870 target_stat_values[++index] = cm_listens_destroyed;
29871 target_stat_values[++index] = cm_backlog_drops;
29872 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
29873 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
29874 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
29875 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
29876 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
29877 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
29878 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
29879 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
29880 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
29881 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
29882 target_stat_values[++index] = int_mod_timer_init;
29883 target_stat_values[++index] = int_mod_cq_depth_1;
29884 target_stat_values[++index] = int_mod_cq_depth_4;
29885 diff -urNp linux-2.6.32.42/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.32.42/drivers/infiniband/hw/nes/nes_verbs.c
29886 --- linux-2.6.32.42/drivers/infiniband/hw/nes/nes_verbs.c 2011-03-27 14:31:47.000000000 -0400
29887 +++ linux-2.6.32.42/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-04 17:56:28.000000000 -0400
29888 @@ -45,9 +45,9 @@
29889
29890 #include <rdma/ib_umem.h>
29891
29892 -atomic_t mod_qp_timouts;
29893 -atomic_t qps_created;
29894 -atomic_t sw_qps_destroyed;
29895 +atomic_unchecked_t mod_qp_timouts;
29896 +atomic_unchecked_t qps_created;
29897 +atomic_unchecked_t sw_qps_destroyed;
29898
29899 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
29900
29901 @@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struc
29902 if (init_attr->create_flags)
29903 return ERR_PTR(-EINVAL);
29904
29905 - atomic_inc(&qps_created);
29906 + atomic_inc_unchecked(&qps_created);
29907 switch (init_attr->qp_type) {
29908 case IB_QPT_RC:
29909 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
29910 @@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *
29911 struct iw_cm_event cm_event;
29912 int ret;
29913
29914 - atomic_inc(&sw_qps_destroyed);
29915 + atomic_inc_unchecked(&sw_qps_destroyed);
29916 nesqp->destroyed = 1;
29917
29918 /* Blow away the connection if it exists. */
29919 diff -urNp linux-2.6.32.42/drivers/input/gameport/gameport.c linux-2.6.32.42/drivers/input/gameport/gameport.c
29920 --- linux-2.6.32.42/drivers/input/gameport/gameport.c 2011-03-27 14:31:47.000000000 -0400
29921 +++ linux-2.6.32.42/drivers/input/gameport/gameport.c 2011-05-04 17:56:28.000000000 -0400
29922 @@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
29923 */
29924 static void gameport_init_port(struct gameport *gameport)
29925 {
29926 - static atomic_t gameport_no = ATOMIC_INIT(0);
29927 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
29928
29929 __module_get(THIS_MODULE);
29930
29931 mutex_init(&gameport->drv_mutex);
29932 device_initialize(&gameport->dev);
29933 - dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
29934 + dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
29935 gameport->dev.bus = &gameport_bus;
29936 gameport->dev.release = gameport_release_port;
29937 if (gameport->parent)
29938 diff -urNp linux-2.6.32.42/drivers/input/input.c linux-2.6.32.42/drivers/input/input.c
29939 --- linux-2.6.32.42/drivers/input/input.c 2011-03-27 14:31:47.000000000 -0400
29940 +++ linux-2.6.32.42/drivers/input/input.c 2011-05-04 17:56:28.000000000 -0400
29941 @@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
29942 */
29943 int input_register_device(struct input_dev *dev)
29944 {
29945 - static atomic_t input_no = ATOMIC_INIT(0);
29946 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
29947 struct input_handler *handler;
29948 const char *path;
29949 int error;
29950 @@ -1585,7 +1585,7 @@ int input_register_device(struct input_d
29951 dev->setkeycode = input_default_setkeycode;
29952
29953 dev_set_name(&dev->dev, "input%ld",
29954 - (unsigned long) atomic_inc_return(&input_no) - 1);
29955 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
29956
29957 error = device_add(&dev->dev);
29958 if (error)
29959 diff -urNp linux-2.6.32.42/drivers/input/joystick/sidewinder.c linux-2.6.32.42/drivers/input/joystick/sidewinder.c
29960 --- linux-2.6.32.42/drivers/input/joystick/sidewinder.c 2011-03-27 14:31:47.000000000 -0400
29961 +++ linux-2.6.32.42/drivers/input/joystick/sidewinder.c 2011-05-18 20:09:36.000000000 -0400
29962 @@ -30,6 +30,7 @@
29963 #include <linux/kernel.h>
29964 #include <linux/module.h>
29965 #include <linux/slab.h>
29966 +#include <linux/sched.h>
29967 #include <linux/init.h>
29968 #include <linux/input.h>
29969 #include <linux/gameport.h>
29970 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
29971 unsigned char buf[SW_LENGTH];
29972 int i;
29973
29974 + pax_track_stack();
29975 +
29976 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
29977
29978 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
29979 diff -urNp linux-2.6.32.42/drivers/input/joystick/xpad.c linux-2.6.32.42/drivers/input/joystick/xpad.c
29980 --- linux-2.6.32.42/drivers/input/joystick/xpad.c 2011-03-27 14:31:47.000000000 -0400
29981 +++ linux-2.6.32.42/drivers/input/joystick/xpad.c 2011-05-04 17:56:28.000000000 -0400
29982 @@ -621,7 +621,7 @@ static void xpad_led_set(struct led_clas
29983
29984 static int xpad_led_probe(struct usb_xpad *xpad)
29985 {
29986 - static atomic_t led_seq = ATOMIC_INIT(0);
29987 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
29988 long led_no;
29989 struct xpad_led *led;
29990 struct led_classdev *led_cdev;
29991 @@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpa
29992 if (!led)
29993 return -ENOMEM;
29994
29995 - led_no = (long)atomic_inc_return(&led_seq) - 1;
29996 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
29997
29998 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
29999 led->xpad = xpad;
30000 diff -urNp linux-2.6.32.42/drivers/input/serio/serio.c linux-2.6.32.42/drivers/input/serio/serio.c
30001 --- linux-2.6.32.42/drivers/input/serio/serio.c 2011-03-27 14:31:47.000000000 -0400
30002 +++ linux-2.6.32.42/drivers/input/serio/serio.c 2011-05-04 17:56:28.000000000 -0400
30003 @@ -527,7 +527,7 @@ static void serio_release_port(struct de
30004 */
30005 static void serio_init_port(struct serio *serio)
30006 {
30007 - static atomic_t serio_no = ATOMIC_INIT(0);
30008 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
30009
30010 __module_get(THIS_MODULE);
30011
30012 @@ -536,7 +536,7 @@ static void serio_init_port(struct serio
30013 mutex_init(&serio->drv_mutex);
30014 device_initialize(&serio->dev);
30015 dev_set_name(&serio->dev, "serio%ld",
30016 - (long)atomic_inc_return(&serio_no) - 1);
30017 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
30018 serio->dev.bus = &serio_bus;
30019 serio->dev.release = serio_release_port;
30020 if (serio->parent) {
30021 diff -urNp linux-2.6.32.42/drivers/isdn/gigaset/common.c linux-2.6.32.42/drivers/isdn/gigaset/common.c
30022 --- linux-2.6.32.42/drivers/isdn/gigaset/common.c 2011-03-27 14:31:47.000000000 -0400
30023 +++ linux-2.6.32.42/drivers/isdn/gigaset/common.c 2011-04-17 15:56:46.000000000 -0400
30024 @@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct
30025 cs->commands_pending = 0;
30026 cs->cur_at_seq = 0;
30027 cs->gotfwver = -1;
30028 - cs->open_count = 0;
30029 + local_set(&cs->open_count, 0);
30030 cs->dev = NULL;
30031 cs->tty = NULL;
30032 cs->tty_dev = NULL;
30033 diff -urNp linux-2.6.32.42/drivers/isdn/gigaset/gigaset.h linux-2.6.32.42/drivers/isdn/gigaset/gigaset.h
30034 --- linux-2.6.32.42/drivers/isdn/gigaset/gigaset.h 2011-03-27 14:31:47.000000000 -0400
30035 +++ linux-2.6.32.42/drivers/isdn/gigaset/gigaset.h 2011-04-17 15:56:46.000000000 -0400
30036 @@ -34,6 +34,7 @@
30037 #include <linux/tty_driver.h>
30038 #include <linux/list.h>
30039 #include <asm/atomic.h>
30040 +#include <asm/local.h>
30041
30042 #define GIG_VERSION {0,5,0,0}
30043 #define GIG_COMPAT {0,4,0,0}
30044 @@ -446,7 +447,7 @@ struct cardstate {
30045 spinlock_t cmdlock;
30046 unsigned curlen, cmdbytes;
30047
30048 - unsigned open_count;
30049 + local_t open_count;
30050 struct tty_struct *tty;
30051 struct tasklet_struct if_wake_tasklet;
30052 unsigned control_state;
30053 diff -urNp linux-2.6.32.42/drivers/isdn/gigaset/interface.c linux-2.6.32.42/drivers/isdn/gigaset/interface.c
30054 --- linux-2.6.32.42/drivers/isdn/gigaset/interface.c 2011-03-27 14:31:47.000000000 -0400
30055 +++ linux-2.6.32.42/drivers/isdn/gigaset/interface.c 2011-04-17 15:56:46.000000000 -0400
30056 @@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tt
30057 return -ERESTARTSYS; // FIXME -EINTR?
30058 tty->driver_data = cs;
30059
30060 - ++cs->open_count;
30061 -
30062 - if (cs->open_count == 1) {
30063 + if (local_inc_return(&cs->open_count) == 1) {
30064 spin_lock_irqsave(&cs->lock, flags);
30065 cs->tty = tty;
30066 spin_unlock_irqrestore(&cs->lock, flags);
30067 @@ -195,10 +193,10 @@ static void if_close(struct tty_struct *
30068
30069 if (!cs->connected)
30070 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30071 - else if (!cs->open_count)
30072 + else if (!local_read(&cs->open_count))
30073 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30074 else {
30075 - if (!--cs->open_count) {
30076 + if (!local_dec_return(&cs->open_count)) {
30077 spin_lock_irqsave(&cs->lock, flags);
30078 cs->tty = NULL;
30079 spin_unlock_irqrestore(&cs->lock, flags);
30080 @@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *t
30081 if (!cs->connected) {
30082 gig_dbg(DEBUG_IF, "not connected");
30083 retval = -ENODEV;
30084 - } else if (!cs->open_count)
30085 + } else if (!local_read(&cs->open_count))
30086 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30087 else {
30088 retval = 0;
30089 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *t
30090 if (!cs->connected) {
30091 gig_dbg(DEBUG_IF, "not connected");
30092 retval = -ENODEV;
30093 - } else if (!cs->open_count)
30094 + } else if (!local_read(&cs->open_count))
30095 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30096 else if (cs->mstate != MS_LOCKED) {
30097 dev_warn(cs->dev, "can't write to unlocked device\n");
30098 @@ -395,7 +393,7 @@ static int if_write_room(struct tty_stru
30099 if (!cs->connected) {
30100 gig_dbg(DEBUG_IF, "not connected");
30101 retval = -ENODEV;
30102 - } else if (!cs->open_count)
30103 + } else if (!local_read(&cs->open_count))
30104 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30105 else if (cs->mstate != MS_LOCKED) {
30106 dev_warn(cs->dev, "can't write to unlocked device\n");
30107 @@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty
30108
30109 if (!cs->connected)
30110 gig_dbg(DEBUG_IF, "not connected");
30111 - else if (!cs->open_count)
30112 + else if (!local_read(&cs->open_count))
30113 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30114 else if (cs->mstate != MS_LOCKED)
30115 dev_warn(cs->dev, "can't write to unlocked device\n");
30116 @@ -453,7 +451,7 @@ static void if_throttle(struct tty_struc
30117
30118 if (!cs->connected)
30119 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30120 - else if (!cs->open_count)
30121 + else if (!local_read(&cs->open_count))
30122 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30123 else {
30124 //FIXME
30125 @@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_str
30126
30127 if (!cs->connected)
30128 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30129 - else if (!cs->open_count)
30130 + else if (!local_read(&cs->open_count))
30131 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30132 else {
30133 //FIXME
30134 @@ -510,7 +508,7 @@ static void if_set_termios(struct tty_st
30135 goto out;
30136 }
30137
30138 - if (!cs->open_count) {
30139 + if (!local_read(&cs->open_count)) {
30140 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30141 goto out;
30142 }
30143 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/avm/b1.c linux-2.6.32.42/drivers/isdn/hardware/avm/b1.c
30144 --- linux-2.6.32.42/drivers/isdn/hardware/avm/b1.c 2011-03-27 14:31:47.000000000 -0400
30145 +++ linux-2.6.32.42/drivers/isdn/hardware/avm/b1.c 2011-04-17 15:56:46.000000000 -0400
30146 @@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capilo
30147 }
30148 if (left) {
30149 if (t4file->user) {
30150 - if (copy_from_user(buf, dp, left))
30151 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30152 return -EFAULT;
30153 } else {
30154 memcpy(buf, dp, left);
30155 @@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capilo
30156 }
30157 if (left) {
30158 if (config->user) {
30159 - if (copy_from_user(buf, dp, left))
30160 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30161 return -EFAULT;
30162 } else {
30163 memcpy(buf, dp, left);
30164 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.32.42/drivers/isdn/hardware/eicon/capidtmf.c
30165 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/capidtmf.c 2011-03-27 14:31:47.000000000 -0400
30166 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-16 21:46:57.000000000 -0400
30167 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
30168 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
30169 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
30170
30171 + pax_track_stack();
30172
30173 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
30174 {
30175 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.32.42/drivers/isdn/hardware/eicon/capifunc.c
30176 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/capifunc.c 2011-03-27 14:31:47.000000000 -0400
30177 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/capifunc.c 2011-05-16 21:46:57.000000000 -0400
30178 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
30179 IDI_SYNC_REQ req;
30180 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30181
30182 + pax_track_stack();
30183 +
30184 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30185
30186 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30187 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.32.42/drivers/isdn/hardware/eicon/diddfunc.c
30188 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/diddfunc.c 2011-03-27 14:31:47.000000000 -0400
30189 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-16 21:46:57.000000000 -0400
30190 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
30191 IDI_SYNC_REQ req;
30192 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30193
30194 + pax_track_stack();
30195 +
30196 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30197
30198 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30199 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.32.42/drivers/isdn/hardware/eicon/divasfunc.c
30200 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/divasfunc.c 2011-03-27 14:31:47.000000000 -0400
30201 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-16 21:46:57.000000000 -0400
30202 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
30203 IDI_SYNC_REQ req;
30204 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30205
30206 + pax_track_stack();
30207 +
30208 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30209
30210 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30211 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.32.42/drivers/isdn/hardware/eicon/idifunc.c
30212 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/idifunc.c 2011-03-27 14:31:47.000000000 -0400
30213 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/idifunc.c 2011-05-16 21:46:57.000000000 -0400
30214 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
30215 IDI_SYNC_REQ req;
30216 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30217
30218 + pax_track_stack();
30219 +
30220 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30221
30222 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30223 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/message.c linux-2.6.32.42/drivers/isdn/hardware/eicon/message.c
30224 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/message.c 2011-03-27 14:31:47.000000000 -0400
30225 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/message.c 2011-05-16 21:46:57.000000000 -0400
30226 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
30227 dword d;
30228 word w;
30229
30230 + pax_track_stack();
30231 +
30232 a = plci->adapter;
30233 Id = ((word)plci->Id<<8)|a->Id;
30234 PUT_WORD(&SS_Ind[4],0x0000);
30235 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
30236 word j, n, w;
30237 dword d;
30238
30239 + pax_track_stack();
30240 +
30241
30242 for(i=0;i<8;i++) bp_parms[i].length = 0;
30243 for(i=0;i<2;i++) global_config[i].length = 0;
30244 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
30245 const byte llc3[] = {4,3,2,2,6,6,0};
30246 const byte header[] = {0,2,3,3,0,0,0};
30247
30248 + pax_track_stack();
30249 +
30250 for(i=0;i<8;i++) bp_parms[i].length = 0;
30251 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
30252 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
30253 @@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI
30254 word appl_number_group_type[MAX_APPL];
30255 PLCI *auxplci;
30256
30257 + pax_track_stack();
30258 +
30259 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
30260
30261 if(!a->group_optimization_enabled)
30262 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.32.42/drivers/isdn/hardware/eicon/mntfunc.c
30263 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/mntfunc.c 2011-03-27 14:31:47.000000000 -0400
30264 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-16 21:46:57.000000000 -0400
30265 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
30266 IDI_SYNC_REQ req;
30267 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30268
30269 + pax_track_stack();
30270 +
30271 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30272
30273 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30274 diff -urNp linux-2.6.32.42/drivers/isdn/i4l/isdn_common.c linux-2.6.32.42/drivers/isdn/i4l/isdn_common.c
30275 --- linux-2.6.32.42/drivers/isdn/i4l/isdn_common.c 2011-03-27 14:31:47.000000000 -0400
30276 +++ linux-2.6.32.42/drivers/isdn/i4l/isdn_common.c 2011-05-16 21:46:57.000000000 -0400
30277 @@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct f
30278 } iocpar;
30279 void __user *argp = (void __user *)arg;
30280
30281 + pax_track_stack();
30282 +
30283 #define name iocpar.name
30284 #define bname iocpar.bname
30285 #define iocts iocpar.iocts
30286 diff -urNp linux-2.6.32.42/drivers/isdn/icn/icn.c linux-2.6.32.42/drivers/isdn/icn/icn.c
30287 --- linux-2.6.32.42/drivers/isdn/icn/icn.c 2011-03-27 14:31:47.000000000 -0400
30288 +++ linux-2.6.32.42/drivers/isdn/icn/icn.c 2011-04-17 15:56:46.000000000 -0400
30289 @@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len
30290 if (count > len)
30291 count = len;
30292 if (user) {
30293 - if (copy_from_user(msg, buf, count))
30294 + if (count > sizeof msg || copy_from_user(msg, buf, count))
30295 return -EFAULT;
30296 } else
30297 memcpy(msg, buf, count);
30298 diff -urNp linux-2.6.32.42/drivers/isdn/mISDN/socket.c linux-2.6.32.42/drivers/isdn/mISDN/socket.c
30299 --- linux-2.6.32.42/drivers/isdn/mISDN/socket.c 2011-03-27 14:31:47.000000000 -0400
30300 +++ linux-2.6.32.42/drivers/isdn/mISDN/socket.c 2011-04-17 15:56:46.000000000 -0400
30301 @@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, uns
30302 if (dev) {
30303 struct mISDN_devinfo di;
30304
30305 + memset(&di, 0, sizeof(di));
30306 di.id = dev->id;
30307 di.Dprotocols = dev->Dprotocols;
30308 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
30309 @@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, uns
30310 if (dev) {
30311 struct mISDN_devinfo di;
30312
30313 + memset(&di, 0, sizeof(di));
30314 di.id = dev->id;
30315 di.Dprotocols = dev->Dprotocols;
30316 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
30317 diff -urNp linux-2.6.32.42/drivers/isdn/sc/interrupt.c linux-2.6.32.42/drivers/isdn/sc/interrupt.c
30318 --- linux-2.6.32.42/drivers/isdn/sc/interrupt.c 2011-03-27 14:31:47.000000000 -0400
30319 +++ linux-2.6.32.42/drivers/isdn/sc/interrupt.c 2011-04-17 15:56:46.000000000 -0400
30320 @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy,
30321 }
30322 else if(callid>=0x0000 && callid<=0x7FFF)
30323 {
30324 + int len;
30325 +
30326 pr_debug("%s: Got Incoming Call\n",
30327 sc_adapter[card]->devicename);
30328 - strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
30329 - strcpy(setup.eazmsn,
30330 - sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
30331 + len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
30332 + sizeof(setup.phone));
30333 + if (len >= sizeof(setup.phone))
30334 + continue;
30335 + len = strlcpy(setup.eazmsn,
30336 + sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
30337 + sizeof(setup.eazmsn));
30338 + if (len >= sizeof(setup.eazmsn))
30339 + continue;
30340 setup.si1 = 7;
30341 setup.si2 = 0;
30342 setup.plan = 0;
30343 @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy,
30344 * Handle a GetMyNumber Rsp
30345 */
30346 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
30347 - strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
30348 + strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
30349 + rcvmsg.msg_data.byte_array,
30350 + sizeof(rcvmsg.msg_data.byte_array));
30351 continue;
30352 }
30353
30354 diff -urNp linux-2.6.32.42/drivers/lguest/core.c linux-2.6.32.42/drivers/lguest/core.c
30355 --- linux-2.6.32.42/drivers/lguest/core.c 2011-03-27 14:31:47.000000000 -0400
30356 +++ linux-2.6.32.42/drivers/lguest/core.c 2011-04-17 15:56:46.000000000 -0400
30357 @@ -91,9 +91,17 @@ static __init int map_switcher(void)
30358 * it's worked so far. The end address needs +1 because __get_vm_area
30359 * allocates an extra guard page, so we need space for that.
30360 */
30361 +
30362 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30363 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30364 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
30365 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30366 +#else
30367 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30368 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
30369 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30370 +#endif
30371 +
30372 if (!switcher_vma) {
30373 err = -ENOMEM;
30374 printk("lguest: could not map switcher pages high\n");
30375 @@ -118,7 +126,7 @@ static __init int map_switcher(void)
30376 * Now the Switcher is mapped at the right address, we can't fail!
30377 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
30378 */
30379 - memcpy(switcher_vma->addr, start_switcher_text,
30380 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
30381 end_switcher_text - start_switcher_text);
30382
30383 printk(KERN_INFO "lguest: mapped switcher at %p\n",
30384 diff -urNp linux-2.6.32.42/drivers/lguest/x86/core.c linux-2.6.32.42/drivers/lguest/x86/core.c
30385 --- linux-2.6.32.42/drivers/lguest/x86/core.c 2011-03-27 14:31:47.000000000 -0400
30386 +++ linux-2.6.32.42/drivers/lguest/x86/core.c 2011-04-17 15:56:46.000000000 -0400
30387 @@ -59,7 +59,7 @@ static struct {
30388 /* Offset from where switcher.S was compiled to where we've copied it */
30389 static unsigned long switcher_offset(void)
30390 {
30391 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
30392 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
30393 }
30394
30395 /* This cpu's struct lguest_pages. */
30396 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
30397 * These copies are pretty cheap, so we do them unconditionally: */
30398 /* Save the current Host top-level page directory.
30399 */
30400 +
30401 +#ifdef CONFIG_PAX_PER_CPU_PGD
30402 + pages->state.host_cr3 = read_cr3();
30403 +#else
30404 pages->state.host_cr3 = __pa(current->mm->pgd);
30405 +#endif
30406 +
30407 /*
30408 * Set up the Guest's page tables to see this CPU's pages (and no
30409 * other CPU's pages).
30410 @@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
30411 * compiled-in switcher code and the high-mapped copy we just made.
30412 */
30413 for (i = 0; i < IDT_ENTRIES; i++)
30414 - default_idt_entries[i] += switcher_offset();
30415 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
30416
30417 /*
30418 * Set up the Switcher's per-cpu areas.
30419 @@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
30420 * it will be undisturbed when we switch. To change %cs and jump we
30421 * need this structure to feed to Intel's "lcall" instruction.
30422 */
30423 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
30424 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
30425 lguest_entry.segment = LGUEST_CS;
30426
30427 /*
30428 diff -urNp linux-2.6.32.42/drivers/lguest/x86/switcher_32.S linux-2.6.32.42/drivers/lguest/x86/switcher_32.S
30429 --- linux-2.6.32.42/drivers/lguest/x86/switcher_32.S 2011-03-27 14:31:47.000000000 -0400
30430 +++ linux-2.6.32.42/drivers/lguest/x86/switcher_32.S 2011-04-17 15:56:46.000000000 -0400
30431 @@ -87,6 +87,7 @@
30432 #include <asm/page.h>
30433 #include <asm/segment.h>
30434 #include <asm/lguest.h>
30435 +#include <asm/processor-flags.h>
30436
30437 // We mark the start of the code to copy
30438 // It's placed in .text tho it's never run here
30439 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
30440 // Changes type when we load it: damn Intel!
30441 // For after we switch over our page tables
30442 // That entry will be read-only: we'd crash.
30443 +
30444 +#ifdef CONFIG_PAX_KERNEXEC
30445 + mov %cr0, %edx
30446 + xor $X86_CR0_WP, %edx
30447 + mov %edx, %cr0
30448 +#endif
30449 +
30450 movl $(GDT_ENTRY_TSS*8), %edx
30451 ltr %dx
30452
30453 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
30454 // Let's clear it again for our return.
30455 // The GDT descriptor of the Host
30456 // Points to the table after two "size" bytes
30457 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
30458 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
30459 // Clear "used" from type field (byte 5, bit 2)
30460 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
30461 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
30462 +
30463 +#ifdef CONFIG_PAX_KERNEXEC
30464 + mov %cr0, %eax
30465 + xor $X86_CR0_WP, %eax
30466 + mov %eax, %cr0
30467 +#endif
30468
30469 // Once our page table's switched, the Guest is live!
30470 // The Host fades as we run this final step.
30471 @@ -295,13 +309,12 @@ deliver_to_host:
30472 // I consulted gcc, and it gave
30473 // These instructions, which I gladly credit:
30474 leal (%edx,%ebx,8), %eax
30475 - movzwl (%eax),%edx
30476 - movl 4(%eax), %eax
30477 - xorw %ax, %ax
30478 - orl %eax, %edx
30479 + movl 4(%eax), %edx
30480 + movw (%eax), %dx
30481 // Now the address of the handler's in %edx
30482 // We call it now: its "iret" drops us home.
30483 - jmp *%edx
30484 + ljmp $__KERNEL_CS, $1f
30485 +1: jmp *%edx
30486
30487 // Every interrupt can come to us here
30488 // But we must truly tell each apart.
30489 diff -urNp linux-2.6.32.42/drivers/macintosh/via-pmu-backlight.c linux-2.6.32.42/drivers/macintosh/via-pmu-backlight.c
30490 --- linux-2.6.32.42/drivers/macintosh/via-pmu-backlight.c 2011-03-27 14:31:47.000000000 -0400
30491 +++ linux-2.6.32.42/drivers/macintosh/via-pmu-backlight.c 2011-04-17 15:56:46.000000000 -0400
30492 @@ -15,7 +15,7 @@
30493
30494 #define MAX_PMU_LEVEL 0xFF
30495
30496 -static struct backlight_ops pmu_backlight_data;
30497 +static const struct backlight_ops pmu_backlight_data;
30498 static DEFINE_SPINLOCK(pmu_backlight_lock);
30499 static int sleeping, uses_pmu_bl;
30500 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
30501 @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
30502 return bd->props.brightness;
30503 }
30504
30505 -static struct backlight_ops pmu_backlight_data = {
30506 +static const struct backlight_ops pmu_backlight_data = {
30507 .get_brightness = pmu_backlight_get_brightness,
30508 .update_status = pmu_backlight_update_status,
30509
30510 diff -urNp linux-2.6.32.42/drivers/macintosh/via-pmu.c linux-2.6.32.42/drivers/macintosh/via-pmu.c
30511 --- linux-2.6.32.42/drivers/macintosh/via-pmu.c 2011-03-27 14:31:47.000000000 -0400
30512 +++ linux-2.6.32.42/drivers/macintosh/via-pmu.c 2011-04-17 15:56:46.000000000 -0400
30513 @@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state
30514 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
30515 }
30516
30517 -static struct platform_suspend_ops pmu_pm_ops = {
30518 +static const struct platform_suspend_ops pmu_pm_ops = {
30519 .enter = powerbook_sleep,
30520 .valid = pmu_sleep_valid,
30521 };
30522 diff -urNp linux-2.6.32.42/drivers/md/dm.c linux-2.6.32.42/drivers/md/dm.c
30523 --- linux-2.6.32.42/drivers/md/dm.c 2011-03-27 14:31:47.000000000 -0400
30524 +++ linux-2.6.32.42/drivers/md/dm.c 2011-05-04 17:56:28.000000000 -0400
30525 @@ -163,9 +163,9 @@ struct mapped_device {
30526 /*
30527 * Event handling.
30528 */
30529 - atomic_t event_nr;
30530 + atomic_unchecked_t event_nr;
30531 wait_queue_head_t eventq;
30532 - atomic_t uevent_seq;
30533 + atomic_unchecked_t uevent_seq;
30534 struct list_head uevent_list;
30535 spinlock_t uevent_lock; /* Protect access to uevent_list */
30536
30537 @@ -1770,8 +1770,8 @@ static struct mapped_device *alloc_dev(i
30538 rwlock_init(&md->map_lock);
30539 atomic_set(&md->holders, 1);
30540 atomic_set(&md->open_count, 0);
30541 - atomic_set(&md->event_nr, 0);
30542 - atomic_set(&md->uevent_seq, 0);
30543 + atomic_set_unchecked(&md->event_nr, 0);
30544 + atomic_set_unchecked(&md->uevent_seq, 0);
30545 INIT_LIST_HEAD(&md->uevent_list);
30546 spin_lock_init(&md->uevent_lock);
30547
30548 @@ -1921,7 +1921,7 @@ static void event_callback(void *context
30549
30550 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
30551
30552 - atomic_inc(&md->event_nr);
30553 + atomic_inc_unchecked(&md->event_nr);
30554 wake_up(&md->eventq);
30555 }
30556
30557 @@ -2556,18 +2556,18 @@ void dm_kobject_uevent(struct mapped_dev
30558
30559 uint32_t dm_next_uevent_seq(struct mapped_device *md)
30560 {
30561 - return atomic_add_return(1, &md->uevent_seq);
30562 + return atomic_add_return_unchecked(1, &md->uevent_seq);
30563 }
30564
30565 uint32_t dm_get_event_nr(struct mapped_device *md)
30566 {
30567 - return atomic_read(&md->event_nr);
30568 + return atomic_read_unchecked(&md->event_nr);
30569 }
30570
30571 int dm_wait_event(struct mapped_device *md, int event_nr)
30572 {
30573 return wait_event_interruptible(md->eventq,
30574 - (event_nr != atomic_read(&md->event_nr)));
30575 + (event_nr != atomic_read_unchecked(&md->event_nr)));
30576 }
30577
30578 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
30579 diff -urNp linux-2.6.32.42/drivers/md/dm-ioctl.c linux-2.6.32.42/drivers/md/dm-ioctl.c
30580 --- linux-2.6.32.42/drivers/md/dm-ioctl.c 2011-03-27 14:31:47.000000000 -0400
30581 +++ linux-2.6.32.42/drivers/md/dm-ioctl.c 2011-04-17 15:56:46.000000000 -0400
30582 @@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, str
30583 cmd == DM_LIST_VERSIONS_CMD)
30584 return 0;
30585
30586 - if ((cmd == DM_DEV_CREATE_CMD)) {
30587 + if (cmd == DM_DEV_CREATE_CMD) {
30588 if (!*param->name) {
30589 DMWARN("name not supplied when creating device");
30590 return -EINVAL;
30591 diff -urNp linux-2.6.32.42/drivers/md/dm-raid1.c linux-2.6.32.42/drivers/md/dm-raid1.c
30592 --- linux-2.6.32.42/drivers/md/dm-raid1.c 2011-03-27 14:31:47.000000000 -0400
30593 +++ linux-2.6.32.42/drivers/md/dm-raid1.c 2011-05-04 17:56:28.000000000 -0400
30594 @@ -41,7 +41,7 @@ enum dm_raid1_error {
30595
30596 struct mirror {
30597 struct mirror_set *ms;
30598 - atomic_t error_count;
30599 + atomic_unchecked_t error_count;
30600 unsigned long error_type;
30601 struct dm_dev *dev;
30602 sector_t offset;
30603 @@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m
30604 * simple way to tell if a device has encountered
30605 * errors.
30606 */
30607 - atomic_inc(&m->error_count);
30608 + atomic_inc_unchecked(&m->error_count);
30609
30610 if (test_and_set_bit(error_type, &m->error_type))
30611 return;
30612 @@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m
30613 }
30614
30615 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
30616 - if (!atomic_read(&new->error_count)) {
30617 + if (!atomic_read_unchecked(&new->error_count)) {
30618 set_default_mirror(new);
30619 break;
30620 }
30621 @@ -363,7 +363,7 @@ static struct mirror *choose_mirror(stru
30622 struct mirror *m = get_default_mirror(ms);
30623
30624 do {
30625 - if (likely(!atomic_read(&m->error_count)))
30626 + if (likely(!atomic_read_unchecked(&m->error_count)))
30627 return m;
30628
30629 if (m-- == ms->mirror)
30630 @@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
30631 {
30632 struct mirror *default_mirror = get_default_mirror(m->ms);
30633
30634 - return !atomic_read(&default_mirror->error_count);
30635 + return !atomic_read_unchecked(&default_mirror->error_count);
30636 }
30637
30638 static int mirror_available(struct mirror_set *ms, struct bio *bio)
30639 @@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *
30640 */
30641 if (likely(region_in_sync(ms, region, 1)))
30642 m = choose_mirror(ms, bio->bi_sector);
30643 - else if (m && atomic_read(&m->error_count))
30644 + else if (m && atomic_read_unchecked(&m->error_count))
30645 m = NULL;
30646
30647 if (likely(m))
30648 @@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set
30649 }
30650
30651 ms->mirror[mirror].ms = ms;
30652 - atomic_set(&(ms->mirror[mirror].error_count), 0);
30653 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
30654 ms->mirror[mirror].error_type = 0;
30655 ms->mirror[mirror].offset = offset;
30656
30657 @@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_targ
30658 */
30659 static char device_status_char(struct mirror *m)
30660 {
30661 - if (!atomic_read(&(m->error_count)))
30662 + if (!atomic_read_unchecked(&(m->error_count)))
30663 return 'A';
30664
30665 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
30666 diff -urNp linux-2.6.32.42/drivers/md/dm-stripe.c linux-2.6.32.42/drivers/md/dm-stripe.c
30667 --- linux-2.6.32.42/drivers/md/dm-stripe.c 2011-03-27 14:31:47.000000000 -0400
30668 +++ linux-2.6.32.42/drivers/md/dm-stripe.c 2011-05-04 17:56:28.000000000 -0400
30669 @@ -20,7 +20,7 @@ struct stripe {
30670 struct dm_dev *dev;
30671 sector_t physical_start;
30672
30673 - atomic_t error_count;
30674 + atomic_unchecked_t error_count;
30675 };
30676
30677 struct stripe_c {
30678 @@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *
30679 kfree(sc);
30680 return r;
30681 }
30682 - atomic_set(&(sc->stripe[i].error_count), 0);
30683 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
30684 }
30685
30686 ti->private = sc;
30687 @@ -257,7 +257,7 @@ static int stripe_status(struct dm_targe
30688 DMEMIT("%d ", sc->stripes);
30689 for (i = 0; i < sc->stripes; i++) {
30690 DMEMIT("%s ", sc->stripe[i].dev->name);
30691 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
30692 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
30693 'D' : 'A';
30694 }
30695 buffer[i] = '\0';
30696 @@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_targe
30697 */
30698 for (i = 0; i < sc->stripes; i++)
30699 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
30700 - atomic_inc(&(sc->stripe[i].error_count));
30701 - if (atomic_read(&(sc->stripe[i].error_count)) <
30702 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
30703 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
30704 DM_IO_ERROR_THRESHOLD)
30705 queue_work(kstriped, &sc->kstriped_ws);
30706 }
30707 diff -urNp linux-2.6.32.42/drivers/md/dm-sysfs.c linux-2.6.32.42/drivers/md/dm-sysfs.c
30708 --- linux-2.6.32.42/drivers/md/dm-sysfs.c 2011-03-27 14:31:47.000000000 -0400
30709 +++ linux-2.6.32.42/drivers/md/dm-sysfs.c 2011-04-17 15:56:46.000000000 -0400
30710 @@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
30711 NULL,
30712 };
30713
30714 -static struct sysfs_ops dm_sysfs_ops = {
30715 +static const struct sysfs_ops dm_sysfs_ops = {
30716 .show = dm_attr_show,
30717 };
30718
30719 diff -urNp linux-2.6.32.42/drivers/md/dm-table.c linux-2.6.32.42/drivers/md/dm-table.c
30720 --- linux-2.6.32.42/drivers/md/dm-table.c 2011-06-25 12:55:34.000000000 -0400
30721 +++ linux-2.6.32.42/drivers/md/dm-table.c 2011-06-25 12:56:37.000000000 -0400
30722 @@ -376,7 +376,7 @@ static int device_area_is_invalid(struct
30723 if (!dev_size)
30724 return 0;
30725
30726 - if ((start >= dev_size) || (start + len > dev_size)) {
30727 + if ((start >= dev_size) || (len > dev_size - start)) {
30728 DMWARN("%s: %s too small for target: "
30729 "start=%llu, len=%llu, dev_size=%llu",
30730 dm_device_name(ti->table->md), bdevname(bdev, b),
30731 diff -urNp linux-2.6.32.42/drivers/md/md.c linux-2.6.32.42/drivers/md/md.c
30732 --- linux-2.6.32.42/drivers/md/md.c 2011-06-25 12:55:34.000000000 -0400
30733 +++ linux-2.6.32.42/drivers/md/md.c 2011-06-25 12:56:37.000000000 -0400
30734 @@ -153,10 +153,10 @@ static int start_readonly;
30735 * start build, activate spare
30736 */
30737 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
30738 -static atomic_t md_event_count;
30739 +static atomic_unchecked_t md_event_count;
30740 void md_new_event(mddev_t *mddev)
30741 {
30742 - atomic_inc(&md_event_count);
30743 + atomic_inc_unchecked(&md_event_count);
30744 wake_up(&md_event_waiters);
30745 }
30746 EXPORT_SYMBOL_GPL(md_new_event);
30747 @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
30748 */
30749 static void md_new_event_inintr(mddev_t *mddev)
30750 {
30751 - atomic_inc(&md_event_count);
30752 + atomic_inc_unchecked(&md_event_count);
30753 wake_up(&md_event_waiters);
30754 }
30755
30756 @@ -1218,7 +1218,7 @@ static int super_1_load(mdk_rdev_t *rdev
30757
30758 rdev->preferred_minor = 0xffff;
30759 rdev->data_offset = le64_to_cpu(sb->data_offset);
30760 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
30761 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
30762
30763 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
30764 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
30765 @@ -1392,7 +1392,7 @@ static void super_1_sync(mddev_t *mddev,
30766 else
30767 sb->resync_offset = cpu_to_le64(0);
30768
30769 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
30770 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
30771
30772 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
30773 sb->size = cpu_to_le64(mddev->dev_sectors);
30774 @@ -2214,7 +2214,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
30775 static ssize_t
30776 errors_show(mdk_rdev_t *rdev, char *page)
30777 {
30778 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
30779 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
30780 }
30781
30782 static ssize_t
30783 @@ -2223,7 +2223,7 @@ errors_store(mdk_rdev_t *rdev, const cha
30784 char *e;
30785 unsigned long n = simple_strtoul(buf, &e, 10);
30786 if (*buf && (*e == 0 || *e == '\n')) {
30787 - atomic_set(&rdev->corrected_errors, n);
30788 + atomic_set_unchecked(&rdev->corrected_errors, n);
30789 return len;
30790 }
30791 return -EINVAL;
30792 @@ -2517,7 +2517,7 @@ static void rdev_free(struct kobject *ko
30793 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
30794 kfree(rdev);
30795 }
30796 -static struct sysfs_ops rdev_sysfs_ops = {
30797 +static const struct sysfs_ops rdev_sysfs_ops = {
30798 .show = rdev_attr_show,
30799 .store = rdev_attr_store,
30800 };
30801 @@ -2566,8 +2566,8 @@ static mdk_rdev_t *md_import_device(dev_
30802 rdev->data_offset = 0;
30803 rdev->sb_events = 0;
30804 atomic_set(&rdev->nr_pending, 0);
30805 - atomic_set(&rdev->read_errors, 0);
30806 - atomic_set(&rdev->corrected_errors, 0);
30807 + atomic_set_unchecked(&rdev->read_errors, 0);
30808 + atomic_set_unchecked(&rdev->corrected_errors, 0);
30809
30810 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
30811 if (!size) {
30812 @@ -3887,7 +3887,7 @@ static void md_free(struct kobject *ko)
30813 kfree(mddev);
30814 }
30815
30816 -static struct sysfs_ops md_sysfs_ops = {
30817 +static const struct sysfs_ops md_sysfs_ops = {
30818 .show = md_attr_show,
30819 .store = md_attr_store,
30820 };
30821 @@ -4474,7 +4474,8 @@ out:
30822 err = 0;
30823 blk_integrity_unregister(disk);
30824 md_new_event(mddev);
30825 - sysfs_notify_dirent(mddev->sysfs_state);
30826 + if (mddev->sysfs_state)
30827 + sysfs_notify_dirent(mddev->sysfs_state);
30828 return err;
30829 }
30830
30831 @@ -5954,7 +5955,7 @@ static int md_seq_show(struct seq_file *
30832
30833 spin_unlock(&pers_lock);
30834 seq_printf(seq, "\n");
30835 - mi->event = atomic_read(&md_event_count);
30836 + mi->event = atomic_read_unchecked(&md_event_count);
30837 return 0;
30838 }
30839 if (v == (void*)2) {
30840 @@ -6043,7 +6044,7 @@ static int md_seq_show(struct seq_file *
30841 chunk_kb ? "KB" : "B");
30842 if (bitmap->file) {
30843 seq_printf(seq, ", file: ");
30844 - seq_path(seq, &bitmap->file->f_path, " \t\n");
30845 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
30846 }
30847
30848 seq_printf(seq, "\n");
30849 @@ -6077,7 +6078,7 @@ static int md_seq_open(struct inode *ino
30850 else {
30851 struct seq_file *p = file->private_data;
30852 p->private = mi;
30853 - mi->event = atomic_read(&md_event_count);
30854 + mi->event = atomic_read_unchecked(&md_event_count);
30855 }
30856 return error;
30857 }
30858 @@ -6093,7 +6094,7 @@ static unsigned int mdstat_poll(struct f
30859 /* always allow read */
30860 mask = POLLIN | POLLRDNORM;
30861
30862 - if (mi->event != atomic_read(&md_event_count))
30863 + if (mi->event != atomic_read_unchecked(&md_event_count))
30864 mask |= POLLERR | POLLPRI;
30865 return mask;
30866 }
30867 @@ -6137,7 +6138,7 @@ static int is_mddev_idle(mddev_t *mddev,
30868 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
30869 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
30870 (int)part_stat_read(&disk->part0, sectors[1]) -
30871 - atomic_read(&disk->sync_io);
30872 + atomic_read_unchecked(&disk->sync_io);
30873 /* sync IO will cause sync_io to increase before the disk_stats
30874 * as sync_io is counted when a request starts, and
30875 * disk_stats is counted when it completes.
30876 diff -urNp linux-2.6.32.42/drivers/md/md.h linux-2.6.32.42/drivers/md/md.h
30877 --- linux-2.6.32.42/drivers/md/md.h 2011-03-27 14:31:47.000000000 -0400
30878 +++ linux-2.6.32.42/drivers/md/md.h 2011-05-04 17:56:20.000000000 -0400
30879 @@ -94,10 +94,10 @@ struct mdk_rdev_s
30880 * only maintained for arrays that
30881 * support hot removal
30882 */
30883 - atomic_t read_errors; /* number of consecutive read errors that
30884 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
30885 * we have tried to ignore.
30886 */
30887 - atomic_t corrected_errors; /* number of corrected read errors,
30888 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
30889 * for reporting to userspace and storing
30890 * in superblock.
30891 */
30892 @@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_
30893
30894 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
30895 {
30896 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
30897 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
30898 }
30899
30900 struct mdk_personality
30901 diff -urNp linux-2.6.32.42/drivers/md/raid10.c linux-2.6.32.42/drivers/md/raid10.c
30902 --- linux-2.6.32.42/drivers/md/raid10.c 2011-03-27 14:31:47.000000000 -0400
30903 +++ linux-2.6.32.42/drivers/md/raid10.c 2011-05-04 17:56:28.000000000 -0400
30904 @@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bi
30905 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
30906 set_bit(R10BIO_Uptodate, &r10_bio->state);
30907 else {
30908 - atomic_add(r10_bio->sectors,
30909 + atomic_add_unchecked(r10_bio->sectors,
30910 &conf->mirrors[d].rdev->corrected_errors);
30911 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
30912 md_error(r10_bio->mddev,
30913 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
30914 test_bit(In_sync, &rdev->flags)) {
30915 atomic_inc(&rdev->nr_pending);
30916 rcu_read_unlock();
30917 - atomic_add(s, &rdev->corrected_errors);
30918 + atomic_add_unchecked(s, &rdev->corrected_errors);
30919 if (sync_page_io(rdev->bdev,
30920 r10_bio->devs[sl].addr +
30921 sect + rdev->data_offset,
30922 diff -urNp linux-2.6.32.42/drivers/md/raid1.c linux-2.6.32.42/drivers/md/raid1.c
30923 --- linux-2.6.32.42/drivers/md/raid1.c 2011-03-27 14:31:47.000000000 -0400
30924 +++ linux-2.6.32.42/drivers/md/raid1.c 2011-05-04 17:56:28.000000000 -0400
30925 @@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *
30926 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
30927 continue;
30928 rdev = conf->mirrors[d].rdev;
30929 - atomic_add(s, &rdev->corrected_errors);
30930 + atomic_add_unchecked(s, &rdev->corrected_errors);
30931 if (sync_page_io(rdev->bdev,
30932 sect + rdev->data_offset,
30933 s<<9,
30934 @@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf,
30935 /* Well, this device is dead */
30936 md_error(mddev, rdev);
30937 else {
30938 - atomic_add(s, &rdev->corrected_errors);
30939 + atomic_add_unchecked(s, &rdev->corrected_errors);
30940 printk(KERN_INFO
30941 "raid1:%s: read error corrected "
30942 "(%d sectors at %llu on %s)\n",
30943 diff -urNp linux-2.6.32.42/drivers/md/raid5.c linux-2.6.32.42/drivers/md/raid5.c
30944 --- linux-2.6.32.42/drivers/md/raid5.c 2011-06-25 12:55:34.000000000 -0400
30945 +++ linux-2.6.32.42/drivers/md/raid5.c 2011-06-25 12:58:39.000000000 -0400
30946 @@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_hea
30947 bi->bi_next = NULL;
30948 if ((rw & WRITE) &&
30949 test_bit(R5_ReWrite, &sh->dev[i].flags))
30950 - atomic_add(STRIPE_SECTORS,
30951 + atomic_add_unchecked(STRIPE_SECTORS,
30952 &rdev->corrected_errors);
30953 generic_make_request(bi);
30954 } else {
30955 @@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struc
30956 clear_bit(R5_ReadError, &sh->dev[i].flags);
30957 clear_bit(R5_ReWrite, &sh->dev[i].flags);
30958 }
30959 - if (atomic_read(&conf->disks[i].rdev->read_errors))
30960 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
30961 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
30962 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
30963 } else {
30964 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
30965 int retry = 0;
30966 rdev = conf->disks[i].rdev;
30967
30968 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
30969 - atomic_inc(&rdev->read_errors);
30970 + atomic_inc_unchecked(&rdev->read_errors);
30971 if (conf->mddev->degraded >= conf->max_degraded)
30972 printk_rl(KERN_WARNING
30973 "raid5:%s: read error not correctable "
30974 @@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struc
30975 (unsigned long long)(sh->sector
30976 + rdev->data_offset),
30977 bdn);
30978 - else if (atomic_read(&rdev->read_errors)
30979 + else if (atomic_read_unchecked(&rdev->read_errors)
30980 > conf->max_nr_stripes)
30981 printk(KERN_WARNING
30982 "raid5:%s: Too many read errors, failing device %s.\n",
30983 @@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct s
30984 sector_t r_sector;
30985 struct stripe_head sh2;
30986
30987 + pax_track_stack();
30988
30989 chunk_offset = sector_div(new_sector, sectors_per_chunk);
30990 stripe = new_sector;
30991 diff -urNp linux-2.6.32.42/drivers/media/common/saa7146_hlp.c linux-2.6.32.42/drivers/media/common/saa7146_hlp.c
30992 --- linux-2.6.32.42/drivers/media/common/saa7146_hlp.c 2011-03-27 14:31:47.000000000 -0400
30993 +++ linux-2.6.32.42/drivers/media/common/saa7146_hlp.c 2011-05-16 21:46:57.000000000 -0400
30994 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
30995
30996 int x[32], y[32], w[32], h[32];
30997
30998 + pax_track_stack();
30999 +
31000 /* clear out memory */
31001 memset(&line_list[0], 0x00, sizeof(u32)*32);
31002 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
31003 diff -urNp linux-2.6.32.42/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.32.42/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
31004 --- linux-2.6.32.42/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-03-27 14:31:47.000000000 -0400
31005 +++ linux-2.6.32.42/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-16 21:46:57.000000000 -0400
31006 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
31007 u8 buf[HOST_LINK_BUF_SIZE];
31008 int i;
31009
31010 + pax_track_stack();
31011 +
31012 dprintk("%s\n", __func__);
31013
31014 /* check if we have space for a link buf in the rx_buffer */
31015 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
31016 unsigned long timeout;
31017 int written;
31018
31019 + pax_track_stack();
31020 +
31021 dprintk("%s\n", __func__);
31022
31023 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
31024 diff -urNp linux-2.6.32.42/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.32.42/drivers/media/dvb/dvb-core/dvbdev.c
31025 --- linux-2.6.32.42/drivers/media/dvb/dvb-core/dvbdev.c 2011-03-27 14:31:47.000000000 -0400
31026 +++ linux-2.6.32.42/drivers/media/dvb/dvb-core/dvbdev.c 2011-04-17 15:56:46.000000000 -0400
31027 @@ -191,6 +191,7 @@ int dvb_register_device(struct dvb_adapt
31028 const struct dvb_device *template, void *priv, int type)
31029 {
31030 struct dvb_device *dvbdev;
31031 + /* cannot be const */
31032 struct file_operations *dvbdevfops;
31033 struct device *clsdev;
31034 int minor;
31035 diff -urNp linux-2.6.32.42/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.32.42/drivers/media/dvb/dvb-usb/dib0700_core.c
31036 --- linux-2.6.32.42/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-03-27 14:31:47.000000000 -0400
31037 +++ linux-2.6.32.42/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-16 21:46:57.000000000 -0400
31038 @@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb
31039
31040 u8 buf[260];
31041
31042 + pax_track_stack();
31043 +
31044 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
31045 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
31046
31047 diff -urNp linux-2.6.32.42/drivers/media/dvb/frontends/or51211.c linux-2.6.32.42/drivers/media/dvb/frontends/or51211.c
31048 --- linux-2.6.32.42/drivers/media/dvb/frontends/or51211.c 2011-03-27 14:31:47.000000000 -0400
31049 +++ linux-2.6.32.42/drivers/media/dvb/frontends/or51211.c 2011-05-16 21:46:57.000000000 -0400
31050 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
31051 u8 tudata[585];
31052 int i;
31053
31054 + pax_track_stack();
31055 +
31056 dprintk("Firmware is %zd bytes\n",fw->size);
31057
31058 /* Get eprom data */
31059 diff -urNp linux-2.6.32.42/drivers/media/radio/radio-cadet.c linux-2.6.32.42/drivers/media/radio/radio-cadet.c
31060 --- linux-2.6.32.42/drivers/media/radio/radio-cadet.c 2011-03-27 14:31:47.000000000 -0400
31061 +++ linux-2.6.32.42/drivers/media/radio/radio-cadet.c 2011-04-17 15:56:46.000000000 -0400
31062 @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
31063 while (i < count && dev->rdsin != dev->rdsout)
31064 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
31065
31066 - if (copy_to_user(data, readbuf, i))
31067 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
31068 return -EFAULT;
31069 return i;
31070 }
31071 diff -urNp linux-2.6.32.42/drivers/media/video/cx18/cx18-driver.c linux-2.6.32.42/drivers/media/video/cx18/cx18-driver.c
31072 --- linux-2.6.32.42/drivers/media/video/cx18/cx18-driver.c 2011-03-27 14:31:47.000000000 -0400
31073 +++ linux-2.6.32.42/drivers/media/video/cx18/cx18-driver.c 2011-05-16 21:46:57.000000000 -0400
31074 @@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl
31075
31076 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
31077
31078 -static atomic_t cx18_instance = ATOMIC_INIT(0);
31079 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
31080
31081 /* Parameter declarations */
31082 static int cardtype[CX18_MAX_CARDS];
31083 @@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
31084 struct i2c_client c;
31085 u8 eedata[256];
31086
31087 + pax_track_stack();
31088 +
31089 memset(&c, 0, sizeof(c));
31090 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
31091 c.adapter = &cx->i2c_adap[0];
31092 @@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct p
31093 struct cx18 *cx;
31094
31095 /* FIXME - module parameter arrays constrain max instances */
31096 - i = atomic_inc_return(&cx18_instance) - 1;
31097 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
31098 if (i >= CX18_MAX_CARDS) {
31099 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
31100 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
31101 diff -urNp linux-2.6.32.42/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.32.42/drivers/media/video/ivtv/ivtv-driver.c
31102 --- linux-2.6.32.42/drivers/media/video/ivtv/ivtv-driver.c 2011-03-27 14:31:47.000000000 -0400
31103 +++ linux-2.6.32.42/drivers/media/video/ivtv/ivtv-driver.c 2011-05-04 17:56:28.000000000 -0400
31104 @@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl
31105 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
31106
31107 /* ivtv instance counter */
31108 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
31109 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
31110
31111 /* Parameter declarations */
31112 static int cardtype[IVTV_MAX_CARDS];
31113 diff -urNp linux-2.6.32.42/drivers/media/video/omap24xxcam.c linux-2.6.32.42/drivers/media/video/omap24xxcam.c
31114 --- linux-2.6.32.42/drivers/media/video/omap24xxcam.c 2011-03-27 14:31:47.000000000 -0400
31115 +++ linux-2.6.32.42/drivers/media/video/omap24xxcam.c 2011-05-04 17:56:28.000000000 -0400
31116 @@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(str
31117 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
31118
31119 do_gettimeofday(&vb->ts);
31120 - vb->field_count = atomic_add_return(2, &fh->field_count);
31121 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
31122 if (csr & csr_error) {
31123 vb->state = VIDEOBUF_ERROR;
31124 if (!atomic_read(&fh->cam->in_reset)) {
31125 diff -urNp linux-2.6.32.42/drivers/media/video/omap24xxcam.h linux-2.6.32.42/drivers/media/video/omap24xxcam.h
31126 --- linux-2.6.32.42/drivers/media/video/omap24xxcam.h 2011-03-27 14:31:47.000000000 -0400
31127 +++ linux-2.6.32.42/drivers/media/video/omap24xxcam.h 2011-05-04 17:56:28.000000000 -0400
31128 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
31129 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
31130 struct videobuf_queue vbq;
31131 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
31132 - atomic_t field_count; /* field counter for videobuf_buffer */
31133 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
31134 /* accessing cam here doesn't need serialisation: it's constant */
31135 struct omap24xxcam_device *cam;
31136 };
31137 diff -urNp linux-2.6.32.42/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.32.42/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
31138 --- linux-2.6.32.42/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-03-27 14:31:47.000000000 -0400
31139 +++ linux-2.6.32.42/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-16 21:46:57.000000000 -0400
31140 @@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
31141 u8 *eeprom;
31142 struct tveeprom tvdata;
31143
31144 + pax_track_stack();
31145 +
31146 memset(&tvdata,0,sizeof(tvdata));
31147
31148 eeprom = pvr2_eeprom_fetch(hdw);
31149 diff -urNp linux-2.6.32.42/drivers/media/video/saa7134/saa6752hs.c linux-2.6.32.42/drivers/media/video/saa7134/saa6752hs.c
31150 --- linux-2.6.32.42/drivers/media/video/saa7134/saa6752hs.c 2011-03-27 14:31:47.000000000 -0400
31151 +++ linux-2.6.32.42/drivers/media/video/saa7134/saa6752hs.c 2011-05-16 21:46:57.000000000 -0400
31152 @@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_su
31153 unsigned char localPAT[256];
31154 unsigned char localPMT[256];
31155
31156 + pax_track_stack();
31157 +
31158 /* Set video format - must be done first as it resets other settings */
31159 set_reg8(client, 0x41, h->video_format);
31160
31161 diff -urNp linux-2.6.32.42/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.32.42/drivers/media/video/saa7164/saa7164-cmd.c
31162 --- linux-2.6.32.42/drivers/media/video/saa7164/saa7164-cmd.c 2011-03-27 14:31:47.000000000 -0400
31163 +++ linux-2.6.32.42/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-16 21:46:57.000000000 -0400
31164 @@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_d
31165 wait_queue_head_t *q = 0;
31166 dprintk(DBGLVL_CMD, "%s()\n", __func__);
31167
31168 + pax_track_stack();
31169 +
31170 /* While any outstand message on the bus exists... */
31171 do {
31172
31173 @@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
31174 u8 tmp[512];
31175 dprintk(DBGLVL_CMD, "%s()\n", __func__);
31176
31177 + pax_track_stack();
31178 +
31179 while (loop) {
31180
31181 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
31182 diff -urNp linux-2.6.32.42/drivers/media/video/usbvideo/konicawc.c linux-2.6.32.42/drivers/media/video/usbvideo/konicawc.c
31183 --- linux-2.6.32.42/drivers/media/video/usbvideo/konicawc.c 2011-03-27 14:31:47.000000000 -0400
31184 +++ linux-2.6.32.42/drivers/media/video/usbvideo/konicawc.c 2011-04-17 15:56:46.000000000 -0400
31185 @@ -225,7 +225,7 @@ static void konicawc_register_input(stru
31186 int error;
31187
31188 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
31189 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31190 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31191
31192 cam->input = input_dev = input_allocate_device();
31193 if (!input_dev) {
31194 diff -urNp linux-2.6.32.42/drivers/media/video/usbvideo/quickcam_messenger.c linux-2.6.32.42/drivers/media/video/usbvideo/quickcam_messenger.c
31195 --- linux-2.6.32.42/drivers/media/video/usbvideo/quickcam_messenger.c 2011-03-27 14:31:47.000000000 -0400
31196 +++ linux-2.6.32.42/drivers/media/video/usbvideo/quickcam_messenger.c 2011-04-17 15:56:46.000000000 -0400
31197 @@ -89,7 +89,7 @@ static void qcm_register_input(struct qc
31198 int error;
31199
31200 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
31201 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31202 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31203
31204 cam->input = input_dev = input_allocate_device();
31205 if (!input_dev) {
31206 diff -urNp linux-2.6.32.42/drivers/media/video/usbvision/usbvision-core.c linux-2.6.32.42/drivers/media/video/usbvision/usbvision-core.c
31207 --- linux-2.6.32.42/drivers/media/video/usbvision/usbvision-core.c 2011-03-27 14:31:47.000000000 -0400
31208 +++ linux-2.6.32.42/drivers/media/video/usbvision/usbvision-core.c 2011-05-16 21:46:57.000000000 -0400
31209 @@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_c
31210 unsigned char rv, gv, bv;
31211 static unsigned char *Y, *U, *V;
31212
31213 + pax_track_stack();
31214 +
31215 frame = usbvision->curFrame;
31216 imageSize = frame->frmwidth * frame->frmheight;
31217 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
31218 diff -urNp linux-2.6.32.42/drivers/media/video/v4l2-device.c linux-2.6.32.42/drivers/media/video/v4l2-device.c
31219 --- linux-2.6.32.42/drivers/media/video/v4l2-device.c 2011-03-27 14:31:47.000000000 -0400
31220 +++ linux-2.6.32.42/drivers/media/video/v4l2-device.c 2011-05-04 17:56:28.000000000 -0400
31221 @@ -50,9 +50,9 @@ int v4l2_device_register(struct device *
31222 EXPORT_SYMBOL_GPL(v4l2_device_register);
31223
31224 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
31225 - atomic_t *instance)
31226 + atomic_unchecked_t *instance)
31227 {
31228 - int num = atomic_inc_return(instance) - 1;
31229 + int num = atomic_inc_return_unchecked(instance) - 1;
31230 int len = strlen(basename);
31231
31232 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
31233 diff -urNp linux-2.6.32.42/drivers/media/video/videobuf-dma-sg.c linux-2.6.32.42/drivers/media/video/videobuf-dma-sg.c
31234 --- linux-2.6.32.42/drivers/media/video/videobuf-dma-sg.c 2011-03-27 14:31:47.000000000 -0400
31235 +++ linux-2.6.32.42/drivers/media/video/videobuf-dma-sg.c 2011-05-16 21:46:57.000000000 -0400
31236 @@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
31237 {
31238 struct videobuf_queue q;
31239
31240 + pax_track_stack();
31241 +
31242 /* Required to make generic handler to call __videobuf_alloc */
31243 q.int_ops = &sg_ops;
31244
31245 diff -urNp linux-2.6.32.42/drivers/message/fusion/mptbase.c linux-2.6.32.42/drivers/message/fusion/mptbase.c
31246 --- linux-2.6.32.42/drivers/message/fusion/mptbase.c 2011-03-27 14:31:47.000000000 -0400
31247 +++ linux-2.6.32.42/drivers/message/fusion/mptbase.c 2011-04-17 15:56:46.000000000 -0400
31248 @@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **s
31249 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
31250 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
31251
31252 +#ifdef CONFIG_GRKERNSEC_HIDESYM
31253 + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31254 + NULL, NULL);
31255 +#else
31256 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31257 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
31258 +#endif
31259 +
31260 /*
31261 * Rounding UP to nearest 4-kB boundary here...
31262 */
31263 diff -urNp linux-2.6.32.42/drivers/message/fusion/mptsas.c linux-2.6.32.42/drivers/message/fusion/mptsas.c
31264 --- linux-2.6.32.42/drivers/message/fusion/mptsas.c 2011-03-27 14:31:47.000000000 -0400
31265 +++ linux-2.6.32.42/drivers/message/fusion/mptsas.c 2011-04-17 15:56:46.000000000 -0400
31266 @@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devin
31267 return 0;
31268 }
31269
31270 +static inline void
31271 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31272 +{
31273 + if (phy_info->port_details) {
31274 + phy_info->port_details->rphy = rphy;
31275 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31276 + ioc->name, rphy));
31277 + }
31278 +
31279 + if (rphy) {
31280 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31281 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31282 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31283 + ioc->name, rphy, rphy->dev.release));
31284 + }
31285 +}
31286 +
31287 /* no mutex */
31288 static void
31289 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
31290 @@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
31291 return NULL;
31292 }
31293
31294 -static inline void
31295 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31296 -{
31297 - if (phy_info->port_details) {
31298 - phy_info->port_details->rphy = rphy;
31299 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31300 - ioc->name, rphy));
31301 - }
31302 -
31303 - if (rphy) {
31304 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31305 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31306 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31307 - ioc->name, rphy, rphy->dev.release));
31308 - }
31309 -}
31310 -
31311 static inline struct sas_port *
31312 mptsas_get_port(struct mptsas_phyinfo *phy_info)
31313 {
31314 diff -urNp linux-2.6.32.42/drivers/message/fusion/mptscsih.c linux-2.6.32.42/drivers/message/fusion/mptscsih.c
31315 --- linux-2.6.32.42/drivers/message/fusion/mptscsih.c 2011-03-27 14:31:47.000000000 -0400
31316 +++ linux-2.6.32.42/drivers/message/fusion/mptscsih.c 2011-04-17 15:56:46.000000000 -0400
31317 @@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
31318
31319 h = shost_priv(SChost);
31320
31321 - if (h) {
31322 - if (h->info_kbuf == NULL)
31323 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31324 - return h->info_kbuf;
31325 - h->info_kbuf[0] = '\0';
31326 + if (!h)
31327 + return NULL;
31328
31329 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31330 - h->info_kbuf[size-1] = '\0';
31331 - }
31332 + if (h->info_kbuf == NULL)
31333 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31334 + return h->info_kbuf;
31335 + h->info_kbuf[0] = '\0';
31336 +
31337 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31338 + h->info_kbuf[size-1] = '\0';
31339
31340 return h->info_kbuf;
31341 }
31342 diff -urNp linux-2.6.32.42/drivers/message/i2o/i2o_config.c linux-2.6.32.42/drivers/message/i2o/i2o_config.c
31343 --- linux-2.6.32.42/drivers/message/i2o/i2o_config.c 2011-03-27 14:31:47.000000000 -0400
31344 +++ linux-2.6.32.42/drivers/message/i2o/i2o_config.c 2011-05-16 21:46:57.000000000 -0400
31345 @@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned lon
31346 struct i2o_message *msg;
31347 unsigned int iop;
31348
31349 + pax_track_stack();
31350 +
31351 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
31352 return -EFAULT;
31353
31354 diff -urNp linux-2.6.32.42/drivers/message/i2o/i2o_proc.c linux-2.6.32.42/drivers/message/i2o/i2o_proc.c
31355 --- linux-2.6.32.42/drivers/message/i2o/i2o_proc.c 2011-03-27 14:31:47.000000000 -0400
31356 +++ linux-2.6.32.42/drivers/message/i2o/i2o_proc.c 2011-04-17 15:56:46.000000000 -0400
31357 @@ -259,13 +259,6 @@ static char *scsi_devices[] = {
31358 "Array Controller Device"
31359 };
31360
31361 -static char *chtostr(u8 * chars, int n)
31362 -{
31363 - char tmp[256];
31364 - tmp[0] = 0;
31365 - return strncat(tmp, (char *)chars, n);
31366 -}
31367 -
31368 static int i2o_report_query_status(struct seq_file *seq, int block_status,
31369 char *group)
31370 {
31371 @@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct
31372
31373 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
31374 seq_printf(seq, "%-#8x", ddm_table.module_id);
31375 - seq_printf(seq, "%-29s",
31376 - chtostr(ddm_table.module_name_version, 28));
31377 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
31378 seq_printf(seq, "%9d ", ddm_table.data_size);
31379 seq_printf(seq, "%8d", ddm_table.code_size);
31380
31381 @@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(s
31382
31383 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
31384 seq_printf(seq, "%-#8x", dst->module_id);
31385 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
31386 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
31387 + seq_printf(seq, "%-.28s", dst->module_name_version);
31388 + seq_printf(seq, "%-.8s", dst->date);
31389 seq_printf(seq, "%8d ", dst->module_size);
31390 seq_printf(seq, "%8d ", dst->mpb_size);
31391 seq_printf(seq, "0x%04x", dst->module_flags);
31392 @@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(str
31393 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
31394 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
31395 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
31396 - seq_printf(seq, "Vendor info : %s\n",
31397 - chtostr((u8 *) (work32 + 2), 16));
31398 - seq_printf(seq, "Product info : %s\n",
31399 - chtostr((u8 *) (work32 + 6), 16));
31400 - seq_printf(seq, "Description : %s\n",
31401 - chtostr((u8 *) (work32 + 10), 16));
31402 - seq_printf(seq, "Product rev. : %s\n",
31403 - chtostr((u8 *) (work32 + 14), 8));
31404 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
31405 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
31406 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
31407 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
31408
31409 seq_printf(seq, "Serial number : ");
31410 print_serial_number(seq, (u8 *) (work32 + 16),
31411 @@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(str
31412 }
31413
31414 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
31415 - seq_printf(seq, "Module name : %s\n",
31416 - chtostr(result.module_name, 24));
31417 - seq_printf(seq, "Module revision : %s\n",
31418 - chtostr(result.module_rev, 8));
31419 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
31420 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
31421
31422 seq_printf(seq, "Serial number : ");
31423 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
31424 @@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq
31425 return 0;
31426 }
31427
31428 - seq_printf(seq, "Device name : %s\n",
31429 - chtostr(result.device_name, 64));
31430 - seq_printf(seq, "Service name : %s\n",
31431 - chtostr(result.service_name, 64));
31432 - seq_printf(seq, "Physical name : %s\n",
31433 - chtostr(result.physical_location, 64));
31434 - seq_printf(seq, "Instance number : %s\n",
31435 - chtostr(result.instance_number, 4));
31436 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
31437 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
31438 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
31439 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
31440
31441 return 0;
31442 }
31443 diff -urNp linux-2.6.32.42/drivers/message/i2o/iop.c linux-2.6.32.42/drivers/message/i2o/iop.c
31444 --- linux-2.6.32.42/drivers/message/i2o/iop.c 2011-03-27 14:31:47.000000000 -0400
31445 +++ linux-2.6.32.42/drivers/message/i2o/iop.c 2011-05-04 17:56:28.000000000 -0400
31446 @@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
31447
31448 spin_lock_irqsave(&c->context_list_lock, flags);
31449
31450 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
31451 - atomic_inc(&c->context_list_counter);
31452 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
31453 + atomic_inc_unchecked(&c->context_list_counter);
31454
31455 - entry->context = atomic_read(&c->context_list_counter);
31456 + entry->context = atomic_read_unchecked(&c->context_list_counter);
31457
31458 list_add(&entry->list, &c->context_list);
31459
31460 @@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(voi
31461
31462 #if BITS_PER_LONG == 64
31463 spin_lock_init(&c->context_list_lock);
31464 - atomic_set(&c->context_list_counter, 0);
31465 + atomic_set_unchecked(&c->context_list_counter, 0);
31466 INIT_LIST_HEAD(&c->context_list);
31467 #endif
31468
31469 diff -urNp linux-2.6.32.42/drivers/mfd/wm8350-i2c.c linux-2.6.32.42/drivers/mfd/wm8350-i2c.c
31470 --- linux-2.6.32.42/drivers/mfd/wm8350-i2c.c 2011-03-27 14:31:47.000000000 -0400
31471 +++ linux-2.6.32.42/drivers/mfd/wm8350-i2c.c 2011-05-16 21:46:57.000000000 -0400
31472 @@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struc
31473 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
31474 int ret;
31475
31476 + pax_track_stack();
31477 +
31478 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
31479 return -EINVAL;
31480
31481 diff -urNp linux-2.6.32.42/drivers/misc/kgdbts.c linux-2.6.32.42/drivers/misc/kgdbts.c
31482 --- linux-2.6.32.42/drivers/misc/kgdbts.c 2011-03-27 14:31:47.000000000 -0400
31483 +++ linux-2.6.32.42/drivers/misc/kgdbts.c 2011-04-17 15:56:46.000000000 -0400
31484 @@ -118,7 +118,7 @@
31485 } while (0)
31486 #define MAX_CONFIG_LEN 40
31487
31488 -static struct kgdb_io kgdbts_io_ops;
31489 +static const struct kgdb_io kgdbts_io_ops;
31490 static char get_buf[BUFMAX];
31491 static int get_buf_cnt;
31492 static char put_buf[BUFMAX];
31493 @@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void
31494 module_put(THIS_MODULE);
31495 }
31496
31497 -static struct kgdb_io kgdbts_io_ops = {
31498 +static const struct kgdb_io kgdbts_io_ops = {
31499 .name = "kgdbts",
31500 .read_char = kgdbts_get_char,
31501 .write_char = kgdbts_put_char,
31502 diff -urNp linux-2.6.32.42/drivers/misc/sgi-gru/gruhandles.c linux-2.6.32.42/drivers/misc/sgi-gru/gruhandles.c
31503 --- linux-2.6.32.42/drivers/misc/sgi-gru/gruhandles.c 2011-03-27 14:31:47.000000000 -0400
31504 +++ linux-2.6.32.42/drivers/misc/sgi-gru/gruhandles.c 2011-04-17 15:56:46.000000000 -0400
31505 @@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistic
31506
31507 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
31508 {
31509 - atomic_long_inc(&mcs_op_statistics[op].count);
31510 - atomic_long_add(clks, &mcs_op_statistics[op].total);
31511 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
31512 + atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
31513 if (mcs_op_statistics[op].max < clks)
31514 mcs_op_statistics[op].max = clks;
31515 }
31516 diff -urNp linux-2.6.32.42/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.32.42/drivers/misc/sgi-gru/gruprocfs.c
31517 --- linux-2.6.32.42/drivers/misc/sgi-gru/gruprocfs.c 2011-03-27 14:31:47.000000000 -0400
31518 +++ linux-2.6.32.42/drivers/misc/sgi-gru/gruprocfs.c 2011-04-17 15:56:46.000000000 -0400
31519 @@ -32,9 +32,9 @@
31520
31521 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
31522
31523 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
31524 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
31525 {
31526 - unsigned long val = atomic_long_read(v);
31527 + unsigned long val = atomic_long_read_unchecked(v);
31528
31529 if (val)
31530 seq_printf(s, "%16lu %s\n", val, id);
31531 @@ -136,8 +136,8 @@ static int mcs_statistics_show(struct se
31532 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
31533
31534 for (op = 0; op < mcsop_last; op++) {
31535 - count = atomic_long_read(&mcs_op_statistics[op].count);
31536 - total = atomic_long_read(&mcs_op_statistics[op].total);
31537 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
31538 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
31539 max = mcs_op_statistics[op].max;
31540 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
31541 count ? total / count : 0, max);
31542 diff -urNp linux-2.6.32.42/drivers/misc/sgi-gru/grutables.h linux-2.6.32.42/drivers/misc/sgi-gru/grutables.h
31543 --- linux-2.6.32.42/drivers/misc/sgi-gru/grutables.h 2011-03-27 14:31:47.000000000 -0400
31544 +++ linux-2.6.32.42/drivers/misc/sgi-gru/grutables.h 2011-04-17 15:56:46.000000000 -0400
31545 @@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
31546 * GRU statistics.
31547 */
31548 struct gru_stats_s {
31549 - atomic_long_t vdata_alloc;
31550 - atomic_long_t vdata_free;
31551 - atomic_long_t gts_alloc;
31552 - atomic_long_t gts_free;
31553 - atomic_long_t vdata_double_alloc;
31554 - atomic_long_t gts_double_allocate;
31555 - atomic_long_t assign_context;
31556 - atomic_long_t assign_context_failed;
31557 - atomic_long_t free_context;
31558 - atomic_long_t load_user_context;
31559 - atomic_long_t load_kernel_context;
31560 - atomic_long_t lock_kernel_context;
31561 - atomic_long_t unlock_kernel_context;
31562 - atomic_long_t steal_user_context;
31563 - atomic_long_t steal_kernel_context;
31564 - atomic_long_t steal_context_failed;
31565 - atomic_long_t nopfn;
31566 - atomic_long_t break_cow;
31567 - atomic_long_t asid_new;
31568 - atomic_long_t asid_next;
31569 - atomic_long_t asid_wrap;
31570 - atomic_long_t asid_reuse;
31571 - atomic_long_t intr;
31572 - atomic_long_t intr_mm_lock_failed;
31573 - atomic_long_t call_os;
31574 - atomic_long_t call_os_offnode_reference;
31575 - atomic_long_t call_os_check_for_bug;
31576 - atomic_long_t call_os_wait_queue;
31577 - atomic_long_t user_flush_tlb;
31578 - atomic_long_t user_unload_context;
31579 - atomic_long_t user_exception;
31580 - atomic_long_t set_context_option;
31581 - atomic_long_t migrate_check;
31582 - atomic_long_t migrated_retarget;
31583 - atomic_long_t migrated_unload;
31584 - atomic_long_t migrated_unload_delay;
31585 - atomic_long_t migrated_nopfn_retarget;
31586 - atomic_long_t migrated_nopfn_unload;
31587 - atomic_long_t tlb_dropin;
31588 - atomic_long_t tlb_dropin_fail_no_asid;
31589 - atomic_long_t tlb_dropin_fail_upm;
31590 - atomic_long_t tlb_dropin_fail_invalid;
31591 - atomic_long_t tlb_dropin_fail_range_active;
31592 - atomic_long_t tlb_dropin_fail_idle;
31593 - atomic_long_t tlb_dropin_fail_fmm;
31594 - atomic_long_t tlb_dropin_fail_no_exception;
31595 - atomic_long_t tlb_dropin_fail_no_exception_war;
31596 - atomic_long_t tfh_stale_on_fault;
31597 - atomic_long_t mmu_invalidate_range;
31598 - atomic_long_t mmu_invalidate_page;
31599 - atomic_long_t mmu_clear_flush_young;
31600 - atomic_long_t flush_tlb;
31601 - atomic_long_t flush_tlb_gru;
31602 - atomic_long_t flush_tlb_gru_tgh;
31603 - atomic_long_t flush_tlb_gru_zero_asid;
31604 -
31605 - atomic_long_t copy_gpa;
31606 -
31607 - atomic_long_t mesq_receive;
31608 - atomic_long_t mesq_receive_none;
31609 - atomic_long_t mesq_send;
31610 - atomic_long_t mesq_send_failed;
31611 - atomic_long_t mesq_noop;
31612 - atomic_long_t mesq_send_unexpected_error;
31613 - atomic_long_t mesq_send_lb_overflow;
31614 - atomic_long_t mesq_send_qlimit_reached;
31615 - atomic_long_t mesq_send_amo_nacked;
31616 - atomic_long_t mesq_send_put_nacked;
31617 - atomic_long_t mesq_qf_not_full;
31618 - atomic_long_t mesq_qf_locked;
31619 - atomic_long_t mesq_qf_noop_not_full;
31620 - atomic_long_t mesq_qf_switch_head_failed;
31621 - atomic_long_t mesq_qf_unexpected_error;
31622 - atomic_long_t mesq_noop_unexpected_error;
31623 - atomic_long_t mesq_noop_lb_overflow;
31624 - atomic_long_t mesq_noop_qlimit_reached;
31625 - atomic_long_t mesq_noop_amo_nacked;
31626 - atomic_long_t mesq_noop_put_nacked;
31627 + atomic_long_unchecked_t vdata_alloc;
31628 + atomic_long_unchecked_t vdata_free;
31629 + atomic_long_unchecked_t gts_alloc;
31630 + atomic_long_unchecked_t gts_free;
31631 + atomic_long_unchecked_t vdata_double_alloc;
31632 + atomic_long_unchecked_t gts_double_allocate;
31633 + atomic_long_unchecked_t assign_context;
31634 + atomic_long_unchecked_t assign_context_failed;
31635 + atomic_long_unchecked_t free_context;
31636 + atomic_long_unchecked_t load_user_context;
31637 + atomic_long_unchecked_t load_kernel_context;
31638 + atomic_long_unchecked_t lock_kernel_context;
31639 + atomic_long_unchecked_t unlock_kernel_context;
31640 + atomic_long_unchecked_t steal_user_context;
31641 + atomic_long_unchecked_t steal_kernel_context;
31642 + atomic_long_unchecked_t steal_context_failed;
31643 + atomic_long_unchecked_t nopfn;
31644 + atomic_long_unchecked_t break_cow;
31645 + atomic_long_unchecked_t asid_new;
31646 + atomic_long_unchecked_t asid_next;
31647 + atomic_long_unchecked_t asid_wrap;
31648 + atomic_long_unchecked_t asid_reuse;
31649 + atomic_long_unchecked_t intr;
31650 + atomic_long_unchecked_t intr_mm_lock_failed;
31651 + atomic_long_unchecked_t call_os;
31652 + atomic_long_unchecked_t call_os_offnode_reference;
31653 + atomic_long_unchecked_t call_os_check_for_bug;
31654 + atomic_long_unchecked_t call_os_wait_queue;
31655 + atomic_long_unchecked_t user_flush_tlb;
31656 + atomic_long_unchecked_t user_unload_context;
31657 + atomic_long_unchecked_t user_exception;
31658 + atomic_long_unchecked_t set_context_option;
31659 + atomic_long_unchecked_t migrate_check;
31660 + atomic_long_unchecked_t migrated_retarget;
31661 + atomic_long_unchecked_t migrated_unload;
31662 + atomic_long_unchecked_t migrated_unload_delay;
31663 + atomic_long_unchecked_t migrated_nopfn_retarget;
31664 + atomic_long_unchecked_t migrated_nopfn_unload;
31665 + atomic_long_unchecked_t tlb_dropin;
31666 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
31667 + atomic_long_unchecked_t tlb_dropin_fail_upm;
31668 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
31669 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
31670 + atomic_long_unchecked_t tlb_dropin_fail_idle;
31671 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
31672 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
31673 + atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
31674 + atomic_long_unchecked_t tfh_stale_on_fault;
31675 + atomic_long_unchecked_t mmu_invalidate_range;
31676 + atomic_long_unchecked_t mmu_invalidate_page;
31677 + atomic_long_unchecked_t mmu_clear_flush_young;
31678 + atomic_long_unchecked_t flush_tlb;
31679 + atomic_long_unchecked_t flush_tlb_gru;
31680 + atomic_long_unchecked_t flush_tlb_gru_tgh;
31681 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
31682 +
31683 + atomic_long_unchecked_t copy_gpa;
31684 +
31685 + atomic_long_unchecked_t mesq_receive;
31686 + atomic_long_unchecked_t mesq_receive_none;
31687 + atomic_long_unchecked_t mesq_send;
31688 + atomic_long_unchecked_t mesq_send_failed;
31689 + atomic_long_unchecked_t mesq_noop;
31690 + atomic_long_unchecked_t mesq_send_unexpected_error;
31691 + atomic_long_unchecked_t mesq_send_lb_overflow;
31692 + atomic_long_unchecked_t mesq_send_qlimit_reached;
31693 + atomic_long_unchecked_t mesq_send_amo_nacked;
31694 + atomic_long_unchecked_t mesq_send_put_nacked;
31695 + atomic_long_unchecked_t mesq_qf_not_full;
31696 + atomic_long_unchecked_t mesq_qf_locked;
31697 + atomic_long_unchecked_t mesq_qf_noop_not_full;
31698 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
31699 + atomic_long_unchecked_t mesq_qf_unexpected_error;
31700 + atomic_long_unchecked_t mesq_noop_unexpected_error;
31701 + atomic_long_unchecked_t mesq_noop_lb_overflow;
31702 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
31703 + atomic_long_unchecked_t mesq_noop_amo_nacked;
31704 + atomic_long_unchecked_t mesq_noop_put_nacked;
31705
31706 };
31707
31708 @@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start
31709 cchop_deallocate, tghop_invalidate, mcsop_last};
31710
31711 struct mcs_op_statistic {
31712 - atomic_long_t count;
31713 - atomic_long_t total;
31714 + atomic_long_unchecked_t count;
31715 + atomic_long_unchecked_t total;
31716 unsigned long max;
31717 };
31718
31719 @@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_st
31720
31721 #define STAT(id) do { \
31722 if (gru_options & OPT_STATS) \
31723 - atomic_long_inc(&gru_stats.id); \
31724 + atomic_long_inc_unchecked(&gru_stats.id); \
31725 } while (0)
31726
31727 #ifdef CONFIG_SGI_GRU_DEBUG
31728 diff -urNp linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0001.c
31729 --- linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0001.c 2011-03-27 14:31:47.000000000 -0400
31730 +++ linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-16 21:46:57.000000000 -0400
31731 @@ -743,6 +743,8 @@ static int chip_ready (struct map_info *
31732 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
31733 unsigned long timeo = jiffies + HZ;
31734
31735 + pax_track_stack();
31736 +
31737 /* Prevent setting state FL_SYNCING for chip in suspended state. */
31738 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
31739 goto sleep;
31740 @@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(stru
31741 unsigned long initial_adr;
31742 int initial_len = len;
31743
31744 + pax_track_stack();
31745 +
31746 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
31747 adr += chip->start;
31748 initial_adr = adr;
31749 @@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(st
31750 int retries = 3;
31751 int ret;
31752
31753 + pax_track_stack();
31754 +
31755 adr += chip->start;
31756
31757 retry:
31758 diff -urNp linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0020.c
31759 --- linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0020.c 2011-03-27 14:31:47.000000000 -0400
31760 +++ linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-16 21:46:57.000000000 -0400
31761 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
31762 unsigned long cmd_addr;
31763 struct cfi_private *cfi = map->fldrv_priv;
31764
31765 + pax_track_stack();
31766 +
31767 adr += chip->start;
31768
31769 /* Ensure cmd read/writes are aligned. */
31770 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
31771 DECLARE_WAITQUEUE(wait, current);
31772 int wbufsize, z;
31773
31774 + pax_track_stack();
31775 +
31776 /* M58LW064A requires bus alignment for buffer wriets -- saw */
31777 if (adr & (map_bankwidth(map)-1))
31778 return -EINVAL;
31779 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
31780 DECLARE_WAITQUEUE(wait, current);
31781 int ret = 0;
31782
31783 + pax_track_stack();
31784 +
31785 adr += chip->start;
31786
31787 /* Let's determine this according to the interleave only once */
31788 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
31789 unsigned long timeo = jiffies + HZ;
31790 DECLARE_WAITQUEUE(wait, current);
31791
31792 + pax_track_stack();
31793 +
31794 adr += chip->start;
31795
31796 /* Let's determine this according to the interleave only once */
31797 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
31798 unsigned long timeo = jiffies + HZ;
31799 DECLARE_WAITQUEUE(wait, current);
31800
31801 + pax_track_stack();
31802 +
31803 adr += chip->start;
31804
31805 /* Let's determine this according to the interleave only once */
31806 diff -urNp linux-2.6.32.42/drivers/mtd/devices/doc2000.c linux-2.6.32.42/drivers/mtd/devices/doc2000.c
31807 --- linux-2.6.32.42/drivers/mtd/devices/doc2000.c 2011-03-27 14:31:47.000000000 -0400
31808 +++ linux-2.6.32.42/drivers/mtd/devices/doc2000.c 2011-04-17 15:56:46.000000000 -0400
31809 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
31810
31811 /* The ECC will not be calculated correctly if less than 512 is written */
31812 /* DBB-
31813 - if (len != 0x200 && eccbuf)
31814 + if (len != 0x200)
31815 printk(KERN_WARNING
31816 "ECC needs a full sector write (adr: %lx size %lx)\n",
31817 (long) to, (long) len);
31818 diff -urNp linux-2.6.32.42/drivers/mtd/devices/doc2001.c linux-2.6.32.42/drivers/mtd/devices/doc2001.c
31819 --- linux-2.6.32.42/drivers/mtd/devices/doc2001.c 2011-03-27 14:31:47.000000000 -0400
31820 +++ linux-2.6.32.42/drivers/mtd/devices/doc2001.c 2011-04-17 15:56:46.000000000 -0400
31821 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
31822 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
31823
31824 /* Don't allow read past end of device */
31825 - if (from >= this->totlen)
31826 + if (from >= this->totlen || !len)
31827 return -EINVAL;
31828
31829 /* Don't allow a single read to cross a 512-byte block boundary */
31830 diff -urNp linux-2.6.32.42/drivers/mtd/ftl.c linux-2.6.32.42/drivers/mtd/ftl.c
31831 --- linux-2.6.32.42/drivers/mtd/ftl.c 2011-03-27 14:31:47.000000000 -0400
31832 +++ linux-2.6.32.42/drivers/mtd/ftl.c 2011-05-16 21:46:57.000000000 -0400
31833 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
31834 loff_t offset;
31835 uint16_t srcunitswap = cpu_to_le16(srcunit);
31836
31837 + pax_track_stack();
31838 +
31839 eun = &part->EUNInfo[srcunit];
31840 xfer = &part->XferInfo[xferunit];
31841 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
31842 diff -urNp linux-2.6.32.42/drivers/mtd/inftlcore.c linux-2.6.32.42/drivers/mtd/inftlcore.c
31843 --- linux-2.6.32.42/drivers/mtd/inftlcore.c 2011-03-27 14:31:47.000000000 -0400
31844 +++ linux-2.6.32.42/drivers/mtd/inftlcore.c 2011-05-16 21:46:57.000000000 -0400
31845 @@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLr
31846 struct inftl_oob oob;
31847 size_t retlen;
31848
31849 + pax_track_stack();
31850 +
31851 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
31852 "pending=%d)\n", inftl, thisVUC, pendingblock);
31853
31854 diff -urNp linux-2.6.32.42/drivers/mtd/inftlmount.c linux-2.6.32.42/drivers/mtd/inftlmount.c
31855 --- linux-2.6.32.42/drivers/mtd/inftlmount.c 2011-03-27 14:31:47.000000000 -0400
31856 +++ linux-2.6.32.42/drivers/mtd/inftlmount.c 2011-05-16 21:46:57.000000000 -0400
31857 @@ -54,6 +54,8 @@ static int find_boot_record(struct INFTL
31858 struct INFTLPartition *ip;
31859 size_t retlen;
31860
31861 + pax_track_stack();
31862 +
31863 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
31864
31865 /*
31866 diff -urNp linux-2.6.32.42/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.32.42/drivers/mtd/lpddr/qinfo_probe.c
31867 --- linux-2.6.32.42/drivers/mtd/lpddr/qinfo_probe.c 2011-03-27 14:31:47.000000000 -0400
31868 +++ linux-2.6.32.42/drivers/mtd/lpddr/qinfo_probe.c 2011-05-16 21:46:57.000000000 -0400
31869 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
31870 {
31871 map_word pfow_val[4];
31872
31873 + pax_track_stack();
31874 +
31875 /* Check identification string */
31876 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
31877 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
31878 diff -urNp linux-2.6.32.42/drivers/mtd/mtdchar.c linux-2.6.32.42/drivers/mtd/mtdchar.c
31879 --- linux-2.6.32.42/drivers/mtd/mtdchar.c 2011-03-27 14:31:47.000000000 -0400
31880 +++ linux-2.6.32.42/drivers/mtd/mtdchar.c 2011-05-16 21:46:57.000000000 -0400
31881 @@ -460,6 +460,8 @@ static int mtd_ioctl(struct inode *inode
31882 u_long size;
31883 struct mtd_info_user info;
31884
31885 + pax_track_stack();
31886 +
31887 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
31888
31889 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
31890 diff -urNp linux-2.6.32.42/drivers/mtd/nftlcore.c linux-2.6.32.42/drivers/mtd/nftlcore.c
31891 --- linux-2.6.32.42/drivers/mtd/nftlcore.c 2011-03-27 14:31:47.000000000 -0400
31892 +++ linux-2.6.32.42/drivers/mtd/nftlcore.c 2011-05-16 21:46:57.000000000 -0400
31893 @@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLre
31894 int inplace = 1;
31895 size_t retlen;
31896
31897 + pax_track_stack();
31898 +
31899 memset(BlockMap, 0xff, sizeof(BlockMap));
31900 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
31901
31902 diff -urNp linux-2.6.32.42/drivers/mtd/nftlmount.c linux-2.6.32.42/drivers/mtd/nftlmount.c
31903 --- linux-2.6.32.42/drivers/mtd/nftlmount.c 2011-03-27 14:31:47.000000000 -0400
31904 +++ linux-2.6.32.42/drivers/mtd/nftlmount.c 2011-05-18 20:09:37.000000000 -0400
31905 @@ -23,6 +23,7 @@
31906 #include <asm/errno.h>
31907 #include <linux/delay.h>
31908 #include <linux/slab.h>
31909 +#include <linux/sched.h>
31910 #include <linux/mtd/mtd.h>
31911 #include <linux/mtd/nand.h>
31912 #include <linux/mtd/nftl.h>
31913 @@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLr
31914 struct mtd_info *mtd = nftl->mbd.mtd;
31915 unsigned int i;
31916
31917 + pax_track_stack();
31918 +
31919 /* Assume logical EraseSize == physical erasesize for starting the scan.
31920 We'll sort it out later if we find a MediaHeader which says otherwise */
31921 /* Actually, we won't. The new DiskOnChip driver has already scanned
31922 diff -urNp linux-2.6.32.42/drivers/mtd/ubi/build.c linux-2.6.32.42/drivers/mtd/ubi/build.c
31923 --- linux-2.6.32.42/drivers/mtd/ubi/build.c 2011-03-27 14:31:47.000000000 -0400
31924 +++ linux-2.6.32.42/drivers/mtd/ubi/build.c 2011-04-17 15:56:46.000000000 -0400
31925 @@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
31926 static int __init bytes_str_to_int(const char *str)
31927 {
31928 char *endp;
31929 - unsigned long result;
31930 + unsigned long result, scale = 1;
31931
31932 result = simple_strtoul(str, &endp, 0);
31933 if (str == endp || result >= INT_MAX) {
31934 @@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const
31935
31936 switch (*endp) {
31937 case 'G':
31938 - result *= 1024;
31939 + scale *= 1024;
31940 case 'M':
31941 - result *= 1024;
31942 + scale *= 1024;
31943 case 'K':
31944 - result *= 1024;
31945 + scale *= 1024;
31946 if (endp[1] == 'i' && endp[2] == 'B')
31947 endp += 2;
31948 case '\0':
31949 @@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const
31950 return -EINVAL;
31951 }
31952
31953 - return result;
31954 + if ((intoverflow_t)result*scale >= INT_MAX) {
31955 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
31956 + str);
31957 + return -EINVAL;
31958 + }
31959 +
31960 + return result*scale;
31961 }
31962
31963 /**
31964 diff -urNp linux-2.6.32.42/drivers/net/bnx2.c linux-2.6.32.42/drivers/net/bnx2.c
31965 --- linux-2.6.32.42/drivers/net/bnx2.c 2011-03-27 14:31:47.000000000 -0400
31966 +++ linux-2.6.32.42/drivers/net/bnx2.c 2011-05-16 21:46:57.000000000 -0400
31967 @@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
31968 int rc = 0;
31969 u32 magic, csum;
31970
31971 + pax_track_stack();
31972 +
31973 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
31974 goto test_nvram_done;
31975
31976 diff -urNp linux-2.6.32.42/drivers/net/cxgb3/t3_hw.c linux-2.6.32.42/drivers/net/cxgb3/t3_hw.c
31977 --- linux-2.6.32.42/drivers/net/cxgb3/t3_hw.c 2011-03-27 14:31:47.000000000 -0400
31978 +++ linux-2.6.32.42/drivers/net/cxgb3/t3_hw.c 2011-05-16 21:46:57.000000000 -0400
31979 @@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter
31980 int i, addr, ret;
31981 struct t3_vpd vpd;
31982
31983 + pax_track_stack();
31984 +
31985 /*
31986 * Card information is normally at VPD_BASE but some early cards had
31987 * it at 0.
31988 diff -urNp linux-2.6.32.42/drivers/net/e1000e/82571.c linux-2.6.32.42/drivers/net/e1000e/82571.c
31989 --- linux-2.6.32.42/drivers/net/e1000e/82571.c 2011-03-27 14:31:47.000000000 -0400
31990 +++ linux-2.6.32.42/drivers/net/e1000e/82571.c 2011-04-17 15:56:46.000000000 -0400
31991 @@ -212,6 +212,7 @@ static s32 e1000_init_mac_params_82571(s
31992 {
31993 struct e1000_hw *hw = &adapter->hw;
31994 struct e1000_mac_info *mac = &hw->mac;
31995 + /* cannot be const */
31996 struct e1000_mac_operations *func = &mac->ops;
31997 u32 swsm = 0;
31998 u32 swsm2 = 0;
31999 @@ -1656,7 +1657,7 @@ static void e1000_clear_hw_cntrs_82571(s
32000 temp = er32(ICRXDMTC);
32001 }
32002
32003 -static struct e1000_mac_operations e82571_mac_ops = {
32004 +static const struct e1000_mac_operations e82571_mac_ops = {
32005 /* .check_mng_mode: mac type dependent */
32006 /* .check_for_link: media type dependent */
32007 .id_led_init = e1000e_id_led_init,
32008 @@ -1674,7 +1675,7 @@ static struct e1000_mac_operations e8257
32009 .setup_led = e1000e_setup_led_generic,
32010 };
32011
32012 -static struct e1000_phy_operations e82_phy_ops_igp = {
32013 +static const struct e1000_phy_operations e82_phy_ops_igp = {
32014 .acquire_phy = e1000_get_hw_semaphore_82571,
32015 .check_reset_block = e1000e_check_reset_block_generic,
32016 .commit_phy = NULL,
32017 @@ -1691,7 +1692,7 @@ static struct e1000_phy_operations e82_p
32018 .cfg_on_link_up = NULL,
32019 };
32020
32021 -static struct e1000_phy_operations e82_phy_ops_m88 = {
32022 +static const struct e1000_phy_operations e82_phy_ops_m88 = {
32023 .acquire_phy = e1000_get_hw_semaphore_82571,
32024 .check_reset_block = e1000e_check_reset_block_generic,
32025 .commit_phy = e1000e_phy_sw_reset,
32026 @@ -1708,7 +1709,7 @@ static struct e1000_phy_operations e82_p
32027 .cfg_on_link_up = NULL,
32028 };
32029
32030 -static struct e1000_phy_operations e82_phy_ops_bm = {
32031 +static const struct e1000_phy_operations e82_phy_ops_bm = {
32032 .acquire_phy = e1000_get_hw_semaphore_82571,
32033 .check_reset_block = e1000e_check_reset_block_generic,
32034 .commit_phy = e1000e_phy_sw_reset,
32035 @@ -1725,7 +1726,7 @@ static struct e1000_phy_operations e82_p
32036 .cfg_on_link_up = NULL,
32037 };
32038
32039 -static struct e1000_nvm_operations e82571_nvm_ops = {
32040 +static const struct e1000_nvm_operations e82571_nvm_ops = {
32041 .acquire_nvm = e1000_acquire_nvm_82571,
32042 .read_nvm = e1000e_read_nvm_eerd,
32043 .release_nvm = e1000_release_nvm_82571,
32044 diff -urNp linux-2.6.32.42/drivers/net/e1000e/e1000.h linux-2.6.32.42/drivers/net/e1000e/e1000.h
32045 --- linux-2.6.32.42/drivers/net/e1000e/e1000.h 2011-03-27 14:31:47.000000000 -0400
32046 +++ linux-2.6.32.42/drivers/net/e1000e/e1000.h 2011-04-17 15:56:46.000000000 -0400
32047 @@ -375,9 +375,9 @@ struct e1000_info {
32048 u32 pba;
32049 u32 max_hw_frame_size;
32050 s32 (*get_variants)(struct e1000_adapter *);
32051 - struct e1000_mac_operations *mac_ops;
32052 - struct e1000_phy_operations *phy_ops;
32053 - struct e1000_nvm_operations *nvm_ops;
32054 + const struct e1000_mac_operations *mac_ops;
32055 + const struct e1000_phy_operations *phy_ops;
32056 + const struct e1000_nvm_operations *nvm_ops;
32057 };
32058
32059 /* hardware capability, feature, and workaround flags */
32060 diff -urNp linux-2.6.32.42/drivers/net/e1000e/es2lan.c linux-2.6.32.42/drivers/net/e1000e/es2lan.c
32061 --- linux-2.6.32.42/drivers/net/e1000e/es2lan.c 2011-03-27 14:31:47.000000000 -0400
32062 +++ linux-2.6.32.42/drivers/net/e1000e/es2lan.c 2011-04-17 15:56:46.000000000 -0400
32063 @@ -207,6 +207,7 @@ static s32 e1000_init_mac_params_80003es
32064 {
32065 struct e1000_hw *hw = &adapter->hw;
32066 struct e1000_mac_info *mac = &hw->mac;
32067 + /* cannot be const */
32068 struct e1000_mac_operations *func = &mac->ops;
32069
32070 /* Set media type */
32071 @@ -1365,7 +1366,7 @@ static void e1000_clear_hw_cntrs_80003es
32072 temp = er32(ICRXDMTC);
32073 }
32074
32075 -static struct e1000_mac_operations es2_mac_ops = {
32076 +static const struct e1000_mac_operations es2_mac_ops = {
32077 .id_led_init = e1000e_id_led_init,
32078 .check_mng_mode = e1000e_check_mng_mode_generic,
32079 /* check_for_link dependent on media type */
32080 @@ -1383,7 +1384,7 @@ static struct e1000_mac_operations es2_m
32081 .setup_led = e1000e_setup_led_generic,
32082 };
32083
32084 -static struct e1000_phy_operations es2_phy_ops = {
32085 +static const struct e1000_phy_operations es2_phy_ops = {
32086 .acquire_phy = e1000_acquire_phy_80003es2lan,
32087 .check_reset_block = e1000e_check_reset_block_generic,
32088 .commit_phy = e1000e_phy_sw_reset,
32089 @@ -1400,7 +1401,7 @@ static struct e1000_phy_operations es2_p
32090 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
32091 };
32092
32093 -static struct e1000_nvm_operations es2_nvm_ops = {
32094 +static const struct e1000_nvm_operations es2_nvm_ops = {
32095 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
32096 .read_nvm = e1000e_read_nvm_eerd,
32097 .release_nvm = e1000_release_nvm_80003es2lan,
32098 diff -urNp linux-2.6.32.42/drivers/net/e1000e/hw.h linux-2.6.32.42/drivers/net/e1000e/hw.h
32099 --- linux-2.6.32.42/drivers/net/e1000e/hw.h 2011-03-27 14:31:47.000000000 -0400
32100 +++ linux-2.6.32.42/drivers/net/e1000e/hw.h 2011-04-17 15:56:46.000000000 -0400
32101 @@ -756,34 +756,34 @@ struct e1000_mac_operations {
32102
32103 /* Function pointers for the PHY. */
32104 struct e1000_phy_operations {
32105 - s32 (*acquire_phy)(struct e1000_hw *);
32106 - s32 (*check_polarity)(struct e1000_hw *);
32107 - s32 (*check_reset_block)(struct e1000_hw *);
32108 - s32 (*commit_phy)(struct e1000_hw *);
32109 - s32 (*force_speed_duplex)(struct e1000_hw *);
32110 - s32 (*get_cfg_done)(struct e1000_hw *hw);
32111 - s32 (*get_cable_length)(struct e1000_hw *);
32112 - s32 (*get_phy_info)(struct e1000_hw *);
32113 - s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
32114 - s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
32115 - void (*release_phy)(struct e1000_hw *);
32116 - s32 (*reset_phy)(struct e1000_hw *);
32117 - s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
32118 - s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
32119 - s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
32120 - s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
32121 - s32 (*cfg_on_link_up)(struct e1000_hw *);
32122 + s32 (* acquire_phy)(struct e1000_hw *);
32123 + s32 (* check_polarity)(struct e1000_hw *);
32124 + s32 (* check_reset_block)(struct e1000_hw *);
32125 + s32 (* commit_phy)(struct e1000_hw *);
32126 + s32 (* force_speed_duplex)(struct e1000_hw *);
32127 + s32 (* get_cfg_done)(struct e1000_hw *hw);
32128 + s32 (* get_cable_length)(struct e1000_hw *);
32129 + s32 (* get_phy_info)(struct e1000_hw *);
32130 + s32 (* read_phy_reg)(struct e1000_hw *, u32, u16 *);
32131 + s32 (* read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
32132 + void (* release_phy)(struct e1000_hw *);
32133 + s32 (* reset_phy)(struct e1000_hw *);
32134 + s32 (* set_d0_lplu_state)(struct e1000_hw *, bool);
32135 + s32 (* set_d3_lplu_state)(struct e1000_hw *, bool);
32136 + s32 (* write_phy_reg)(struct e1000_hw *, u32, u16);
32137 + s32 (* write_phy_reg_locked)(struct e1000_hw *, u32, u16);
32138 + s32 (* cfg_on_link_up)(struct e1000_hw *);
32139 };
32140
32141 /* Function pointers for the NVM. */
32142 struct e1000_nvm_operations {
32143 - s32 (*acquire_nvm)(struct e1000_hw *);
32144 - s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
32145 - void (*release_nvm)(struct e1000_hw *);
32146 - s32 (*update_nvm)(struct e1000_hw *);
32147 - s32 (*valid_led_default)(struct e1000_hw *, u16 *);
32148 - s32 (*validate_nvm)(struct e1000_hw *);
32149 - s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
32150 + s32 (* const acquire_nvm)(struct e1000_hw *);
32151 + s32 (* const read_nvm)(struct e1000_hw *, u16, u16, u16 *);
32152 + void (* const release_nvm)(struct e1000_hw *);
32153 + s32 (* const update_nvm)(struct e1000_hw *);
32154 + s32 (* const valid_led_default)(struct e1000_hw *, u16 *);
32155 + s32 (* const validate_nvm)(struct e1000_hw *);
32156 + s32 (* const write_nvm)(struct e1000_hw *, u16, u16, u16 *);
32157 };
32158
32159 struct e1000_mac_info {
32160 diff -urNp linux-2.6.32.42/drivers/net/e1000e/ich8lan.c linux-2.6.32.42/drivers/net/e1000e/ich8lan.c
32161 --- linux-2.6.32.42/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:01.000000000 -0400
32162 +++ linux-2.6.32.42/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:32.000000000 -0400
32163 @@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan
32164 }
32165 }
32166
32167 -static struct e1000_mac_operations ich8_mac_ops = {
32168 +static const struct e1000_mac_operations ich8_mac_ops = {
32169 .id_led_init = e1000e_id_led_init,
32170 .check_mng_mode = e1000_check_mng_mode_ich8lan,
32171 .check_for_link = e1000_check_for_copper_link_ich8lan,
32172 @@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_
32173 /* id_led_init dependent on mac type */
32174 };
32175
32176 -static struct e1000_phy_operations ich8_phy_ops = {
32177 +static const struct e1000_phy_operations ich8_phy_ops = {
32178 .acquire_phy = e1000_acquire_swflag_ich8lan,
32179 .check_reset_block = e1000_check_reset_block_ich8lan,
32180 .commit_phy = NULL,
32181 @@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_
32182 .write_phy_reg = e1000e_write_phy_reg_igp,
32183 };
32184
32185 -static struct e1000_nvm_operations ich8_nvm_ops = {
32186 +static const struct e1000_nvm_operations ich8_nvm_ops = {
32187 .acquire_nvm = e1000_acquire_nvm_ich8lan,
32188 .read_nvm = e1000_read_nvm_ich8lan,
32189 .release_nvm = e1000_release_nvm_ich8lan,
32190 diff -urNp linux-2.6.32.42/drivers/net/hamradio/6pack.c linux-2.6.32.42/drivers/net/hamradio/6pack.c
32191 --- linux-2.6.32.42/drivers/net/hamradio/6pack.c 2011-03-27 14:31:47.000000000 -0400
32192 +++ linux-2.6.32.42/drivers/net/hamradio/6pack.c 2011-05-16 21:46:57.000000000 -0400
32193 @@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct t
32194 unsigned char buf[512];
32195 int count1;
32196
32197 + pax_track_stack();
32198 +
32199 if (!count)
32200 return;
32201
32202 diff -urNp linux-2.6.32.42/drivers/net/ibmveth.c linux-2.6.32.42/drivers/net/ibmveth.c
32203 --- linux-2.6.32.42/drivers/net/ibmveth.c 2011-03-27 14:31:47.000000000 -0400
32204 +++ linux-2.6.32.42/drivers/net/ibmveth.c 2011-04-17 15:56:46.000000000 -0400
32205 @@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attr
32206 NULL,
32207 };
32208
32209 -static struct sysfs_ops veth_pool_ops = {
32210 +static const struct sysfs_ops veth_pool_ops = {
32211 .show = veth_pool_show,
32212 .store = veth_pool_store,
32213 };
32214 diff -urNp linux-2.6.32.42/drivers/net/igb/e1000_82575.c linux-2.6.32.42/drivers/net/igb/e1000_82575.c
32215 --- linux-2.6.32.42/drivers/net/igb/e1000_82575.c 2011-03-27 14:31:47.000000000 -0400
32216 +++ linux-2.6.32.42/drivers/net/igb/e1000_82575.c 2011-04-17 15:56:46.000000000 -0400
32217 @@ -1410,7 +1410,7 @@ void igb_vmdq_set_replication_pf(struct
32218 wr32(E1000_VT_CTL, vt_ctl);
32219 }
32220
32221 -static struct e1000_mac_operations e1000_mac_ops_82575 = {
32222 +static const struct e1000_mac_operations e1000_mac_ops_82575 = {
32223 .reset_hw = igb_reset_hw_82575,
32224 .init_hw = igb_init_hw_82575,
32225 .check_for_link = igb_check_for_link_82575,
32226 @@ -1419,13 +1419,13 @@ static struct e1000_mac_operations e1000
32227 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
32228 };
32229
32230 -static struct e1000_phy_operations e1000_phy_ops_82575 = {
32231 +static const struct e1000_phy_operations e1000_phy_ops_82575 = {
32232 .acquire = igb_acquire_phy_82575,
32233 .get_cfg_done = igb_get_cfg_done_82575,
32234 .release = igb_release_phy_82575,
32235 };
32236
32237 -static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
32238 +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
32239 .acquire = igb_acquire_nvm_82575,
32240 .read = igb_read_nvm_eerd,
32241 .release = igb_release_nvm_82575,
32242 diff -urNp linux-2.6.32.42/drivers/net/igb/e1000_hw.h linux-2.6.32.42/drivers/net/igb/e1000_hw.h
32243 --- linux-2.6.32.42/drivers/net/igb/e1000_hw.h 2011-03-27 14:31:47.000000000 -0400
32244 +++ linux-2.6.32.42/drivers/net/igb/e1000_hw.h 2011-04-17 15:56:46.000000000 -0400
32245 @@ -305,17 +305,17 @@ struct e1000_phy_operations {
32246 };
32247
32248 struct e1000_nvm_operations {
32249 - s32 (*acquire)(struct e1000_hw *);
32250 - s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
32251 - void (*release)(struct e1000_hw *);
32252 - s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
32253 + s32 (* const acquire)(struct e1000_hw *);
32254 + s32 (* const read)(struct e1000_hw *, u16, u16, u16 *);
32255 + void (* const release)(struct e1000_hw *);
32256 + s32 (* const write)(struct e1000_hw *, u16, u16, u16 *);
32257 };
32258
32259 struct e1000_info {
32260 s32 (*get_invariants)(struct e1000_hw *);
32261 - struct e1000_mac_operations *mac_ops;
32262 - struct e1000_phy_operations *phy_ops;
32263 - struct e1000_nvm_operations *nvm_ops;
32264 + const struct e1000_mac_operations *mac_ops;
32265 + const struct e1000_phy_operations *phy_ops;
32266 + const struct e1000_nvm_operations *nvm_ops;
32267 };
32268
32269 extern const struct e1000_info e1000_82575_info;
32270 diff -urNp linux-2.6.32.42/drivers/net/iseries_veth.c linux-2.6.32.42/drivers/net/iseries_veth.c
32271 --- linux-2.6.32.42/drivers/net/iseries_veth.c 2011-03-27 14:31:47.000000000 -0400
32272 +++ linux-2.6.32.42/drivers/net/iseries_veth.c 2011-04-17 15:56:46.000000000 -0400
32273 @@ -384,7 +384,7 @@ static struct attribute *veth_cnx_defaul
32274 NULL
32275 };
32276
32277 -static struct sysfs_ops veth_cnx_sysfs_ops = {
32278 +static const struct sysfs_ops veth_cnx_sysfs_ops = {
32279 .show = veth_cnx_attribute_show
32280 };
32281
32282 @@ -441,7 +441,7 @@ static struct attribute *veth_port_defau
32283 NULL
32284 };
32285
32286 -static struct sysfs_ops veth_port_sysfs_ops = {
32287 +static const struct sysfs_ops veth_port_sysfs_ops = {
32288 .show = veth_port_attribute_show
32289 };
32290
32291 diff -urNp linux-2.6.32.42/drivers/net/ixgb/ixgb_main.c linux-2.6.32.42/drivers/net/ixgb/ixgb_main.c
32292 --- linux-2.6.32.42/drivers/net/ixgb/ixgb_main.c 2011-03-27 14:31:47.000000000 -0400
32293 +++ linux-2.6.32.42/drivers/net/ixgb/ixgb_main.c 2011-05-16 21:46:57.000000000 -0400
32294 @@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev
32295 u32 rctl;
32296 int i;
32297
32298 + pax_track_stack();
32299 +
32300 /* Check for Promiscuous and All Multicast modes */
32301
32302 rctl = IXGB_READ_REG(hw, RCTL);
32303 diff -urNp linux-2.6.32.42/drivers/net/ixgb/ixgb_param.c linux-2.6.32.42/drivers/net/ixgb/ixgb_param.c
32304 --- linux-2.6.32.42/drivers/net/ixgb/ixgb_param.c 2011-03-27 14:31:47.000000000 -0400
32305 +++ linux-2.6.32.42/drivers/net/ixgb/ixgb_param.c 2011-05-16 21:46:57.000000000 -0400
32306 @@ -260,6 +260,9 @@ void __devinit
32307 ixgb_check_options(struct ixgb_adapter *adapter)
32308 {
32309 int bd = adapter->bd_number;
32310 +
32311 + pax_track_stack();
32312 +
32313 if (bd >= IXGB_MAX_NIC) {
32314 printk(KERN_NOTICE
32315 "Warning: no configuration for board #%i\n", bd);
32316 diff -urNp linux-2.6.32.42/drivers/net/mlx4/main.c linux-2.6.32.42/drivers/net/mlx4/main.c
32317 --- linux-2.6.32.42/drivers/net/mlx4/main.c 2011-03-27 14:31:47.000000000 -0400
32318 +++ linux-2.6.32.42/drivers/net/mlx4/main.c 2011-05-18 20:09:37.000000000 -0400
32319 @@ -38,6 +38,7 @@
32320 #include <linux/errno.h>
32321 #include <linux/pci.h>
32322 #include <linux/dma-mapping.h>
32323 +#include <linux/sched.h>
32324
32325 #include <linux/mlx4/device.h>
32326 #include <linux/mlx4/doorbell.h>
32327 @@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev
32328 u64 icm_size;
32329 int err;
32330
32331 + pax_track_stack();
32332 +
32333 err = mlx4_QUERY_FW(dev);
32334 if (err) {
32335 if (err == -EACCES)
32336 diff -urNp linux-2.6.32.42/drivers/net/niu.c linux-2.6.32.42/drivers/net/niu.c
32337 --- linux-2.6.32.42/drivers/net/niu.c 2011-05-10 22:12:01.000000000 -0400
32338 +++ linux-2.6.32.42/drivers/net/niu.c 2011-05-16 21:46:57.000000000 -0400
32339 @@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struc
32340 int i, num_irqs, err;
32341 u8 first_ldg;
32342
32343 + pax_track_stack();
32344 +
32345 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
32346 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
32347 ldg_num_map[i] = first_ldg + i;
32348 diff -urNp linux-2.6.32.42/drivers/net/pcnet32.c linux-2.6.32.42/drivers/net/pcnet32.c
32349 --- linux-2.6.32.42/drivers/net/pcnet32.c 2011-03-27 14:31:47.000000000 -0400
32350 +++ linux-2.6.32.42/drivers/net/pcnet32.c 2011-04-17 15:56:46.000000000 -0400
32351 @@ -79,7 +79,7 @@ static int cards_found;
32352 /*
32353 * VLB I/O addresses
32354 */
32355 -static unsigned int pcnet32_portlist[] __initdata =
32356 +static unsigned int pcnet32_portlist[] __devinitdata =
32357 { 0x300, 0x320, 0x340, 0x360, 0 };
32358
32359 static int pcnet32_debug = 0;
32360 diff -urNp linux-2.6.32.42/drivers/net/tg3.h linux-2.6.32.42/drivers/net/tg3.h
32361 --- linux-2.6.32.42/drivers/net/tg3.h 2011-03-27 14:31:47.000000000 -0400
32362 +++ linux-2.6.32.42/drivers/net/tg3.h 2011-04-17 15:56:46.000000000 -0400
32363 @@ -95,6 +95,7 @@
32364 #define CHIPREV_ID_5750_A0 0x4000
32365 #define CHIPREV_ID_5750_A1 0x4001
32366 #define CHIPREV_ID_5750_A3 0x4003
32367 +#define CHIPREV_ID_5750_C1 0x4201
32368 #define CHIPREV_ID_5750_C2 0x4202
32369 #define CHIPREV_ID_5752_A0_HW 0x5000
32370 #define CHIPREV_ID_5752_A0 0x6000
32371 diff -urNp linux-2.6.32.42/drivers/net/tulip/de2104x.c linux-2.6.32.42/drivers/net/tulip/de2104x.c
32372 --- linux-2.6.32.42/drivers/net/tulip/de2104x.c 2011-03-27 14:31:47.000000000 -0400
32373 +++ linux-2.6.32.42/drivers/net/tulip/de2104x.c 2011-05-16 21:46:57.000000000 -0400
32374 @@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_i
32375 struct de_srom_info_leaf *il;
32376 void *bufp;
32377
32378 + pax_track_stack();
32379 +
32380 /* download entire eeprom */
32381 for (i = 0; i < DE_EEPROM_WORDS; i++)
32382 ((__le16 *)ee_data)[i] =
32383 diff -urNp linux-2.6.32.42/drivers/net/tulip/de4x5.c linux-2.6.32.42/drivers/net/tulip/de4x5.c
32384 --- linux-2.6.32.42/drivers/net/tulip/de4x5.c 2011-03-27 14:31:47.000000000 -0400
32385 +++ linux-2.6.32.42/drivers/net/tulip/de4x5.c 2011-04-17 15:56:46.000000000 -0400
32386 @@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, stru
32387 for (i=0; i<ETH_ALEN; i++) {
32388 tmp.addr[i] = dev->dev_addr[i];
32389 }
32390 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32391 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32392 break;
32393
32394 case DE4X5_SET_HWADDR: /* Set the hardware address */
32395 @@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, stru
32396 spin_lock_irqsave(&lp->lock, flags);
32397 memcpy(&statbuf, &lp->pktStats, ioc->len);
32398 spin_unlock_irqrestore(&lp->lock, flags);
32399 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
32400 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32401 return -EFAULT;
32402 break;
32403 }
32404 diff -urNp linux-2.6.32.42/drivers/net/usb/hso.c linux-2.6.32.42/drivers/net/usb/hso.c
32405 --- linux-2.6.32.42/drivers/net/usb/hso.c 2011-03-27 14:31:47.000000000 -0400
32406 +++ linux-2.6.32.42/drivers/net/usb/hso.c 2011-04-17 15:56:46.000000000 -0400
32407 @@ -71,7 +71,7 @@
32408 #include <asm/byteorder.h>
32409 #include <linux/serial_core.h>
32410 #include <linux/serial.h>
32411 -
32412 +#include <asm/local.h>
32413
32414 #define DRIVER_VERSION "1.2"
32415 #define MOD_AUTHOR "Option Wireless"
32416 @@ -258,7 +258,7 @@ struct hso_serial {
32417
32418 /* from usb_serial_port */
32419 struct tty_struct *tty;
32420 - int open_count;
32421 + local_t open_count;
32422 spinlock_t serial_lock;
32423
32424 int (*write_data) (struct hso_serial *serial);
32425 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_
32426 struct urb *urb;
32427
32428 urb = serial->rx_urb[0];
32429 - if (serial->open_count > 0) {
32430 + if (local_read(&serial->open_count) > 0) {
32431 count = put_rxbuf_data(urb, serial);
32432 if (count == -1)
32433 return;
32434 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_cal
32435 DUMP1(urb->transfer_buffer, urb->actual_length);
32436
32437 /* Anyone listening? */
32438 - if (serial->open_count == 0)
32439 + if (local_read(&serial->open_count) == 0)
32440 return;
32441
32442 if (status == 0) {
32443 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
32444 spin_unlock_irq(&serial->serial_lock);
32445
32446 /* check for port already opened, if not set the termios */
32447 - serial->open_count++;
32448 - if (serial->open_count == 1) {
32449 + if (local_inc_return(&serial->open_count) == 1) {
32450 tty->low_latency = 1;
32451 serial->rx_state = RX_IDLE;
32452 /* Force default termio settings */
32453 @@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_st
32454 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
32455 if (result) {
32456 hso_stop_serial_device(serial->parent);
32457 - serial->open_count--;
32458 + local_dec(&serial->open_count);
32459 kref_put(&serial->parent->ref, hso_serial_ref_free);
32460 }
32461 } else {
32462 @@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_
32463
32464 /* reset the rts and dtr */
32465 /* do the actual close */
32466 - serial->open_count--;
32467 + local_dec(&serial->open_count);
32468
32469 - if (serial->open_count <= 0) {
32470 - serial->open_count = 0;
32471 + if (local_read(&serial->open_count) <= 0) {
32472 + local_set(&serial->open_count, 0);
32473 spin_lock_irq(&serial->serial_lock);
32474 if (serial->tty == tty) {
32475 serial->tty->driver_data = NULL;
32476 @@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struc
32477
32478 /* the actual setup */
32479 spin_lock_irqsave(&serial->serial_lock, flags);
32480 - if (serial->open_count)
32481 + if (local_read(&serial->open_count))
32482 _hso_serial_set_termios(tty, old);
32483 else
32484 tty->termios = old;
32485 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
32486 /* Start all serial ports */
32487 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
32488 if (serial_table[i] && (serial_table[i]->interface == iface)) {
32489 - if (dev2ser(serial_table[i])->open_count) {
32490 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
32491 result =
32492 hso_start_serial_device(serial_table[i], GFP_NOIO);
32493 hso_kick_transmit(dev2ser(serial_table[i]));
32494 diff -urNp linux-2.6.32.42/drivers/net/vxge/vxge-main.c linux-2.6.32.42/drivers/net/vxge/vxge-main.c
32495 --- linux-2.6.32.42/drivers/net/vxge/vxge-main.c 2011-03-27 14:31:47.000000000 -0400
32496 +++ linux-2.6.32.42/drivers/net/vxge/vxge-main.c 2011-05-16 21:46:57.000000000 -0400
32497 @@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_T
32498 struct sk_buff *completed[NR_SKB_COMPLETED];
32499 int more;
32500
32501 + pax_track_stack();
32502 +
32503 do {
32504 more = 0;
32505 skb_ptr = completed;
32506 @@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_conf
32507 u8 mtable[256] = {0}; /* CPU to vpath mapping */
32508 int index;
32509
32510 + pax_track_stack();
32511 +
32512 /*
32513 * Filling
32514 * - itable with bucket numbers
32515 diff -urNp linux-2.6.32.42/drivers/net/wan/cycx_x25.c linux-2.6.32.42/drivers/net/wan/cycx_x25.c
32516 --- linux-2.6.32.42/drivers/net/wan/cycx_x25.c 2011-03-27 14:31:47.000000000 -0400
32517 +++ linux-2.6.32.42/drivers/net/wan/cycx_x25.c 2011-05-16 21:46:57.000000000 -0400
32518 @@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned
32519 unsigned char hex[1024],
32520 * phex = hex;
32521
32522 + pax_track_stack();
32523 +
32524 if (len >= (sizeof(hex) / 2))
32525 len = (sizeof(hex) / 2) - 1;
32526
32527 diff -urNp linux-2.6.32.42/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.32.42/drivers/net/wimax/i2400m/usb-fw.c
32528 --- linux-2.6.32.42/drivers/net/wimax/i2400m/usb-fw.c 2011-03-27 14:31:47.000000000 -0400
32529 +++ linux-2.6.32.42/drivers/net/wimax/i2400m/usb-fw.c 2011-05-16 21:46:57.000000000 -0400
32530 @@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
32531 int do_autopm = 1;
32532 DECLARE_COMPLETION_ONSTACK(notif_completion);
32533
32534 + pax_track_stack();
32535 +
32536 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
32537 i2400m, ack, ack_size);
32538 BUG_ON(_ack == i2400m->bm_ack_buf);
32539 diff -urNp linux-2.6.32.42/drivers/net/wireless/airo.c linux-2.6.32.42/drivers/net/wireless/airo.c
32540 --- linux-2.6.32.42/drivers/net/wireless/airo.c 2011-03-27 14:31:47.000000000 -0400
32541 +++ linux-2.6.32.42/drivers/net/wireless/airo.c 2011-05-16 21:46:57.000000000 -0400
32542 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
32543 BSSListElement * loop_net;
32544 BSSListElement * tmp_net;
32545
32546 + pax_track_stack();
32547 +
32548 /* Blow away current list of scan results */
32549 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
32550 list_move_tail (&loop_net->list, &ai->network_free_list);
32551 @@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *
32552 WepKeyRid wkr;
32553 int rc;
32554
32555 + pax_track_stack();
32556 +
32557 memset( &mySsid, 0, sizeof( mySsid ) );
32558 kfree (ai->flash);
32559 ai->flash = NULL;
32560 @@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct i
32561 __le32 *vals = stats.vals;
32562 int len;
32563
32564 + pax_track_stack();
32565 +
32566 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32567 return -ENOMEM;
32568 data = (struct proc_data *)file->private_data;
32569 @@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct ino
32570 /* If doLoseSync is not 1, we won't do a Lose Sync */
32571 int doLoseSync = -1;
32572
32573 + pax_track_stack();
32574 +
32575 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32576 return -ENOMEM;
32577 data = (struct proc_data *)file->private_data;
32578 @@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_de
32579 int i;
32580 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
32581
32582 + pax_track_stack();
32583 +
32584 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
32585 if (!qual)
32586 return -ENOMEM;
32587 @@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(str
32588 CapabilityRid cap_rid;
32589 __le32 *vals = stats_rid.vals;
32590
32591 + pax_track_stack();
32592 +
32593 /* Get stats out of the card */
32594 clear_bit(JOB_WSTATS, &local->jobs);
32595 if (local->power.event) {
32596 diff -urNp linux-2.6.32.42/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.32.42/drivers/net/wireless/ath/ath5k/debug.c
32597 --- linux-2.6.32.42/drivers/net/wireless/ath/ath5k/debug.c 2011-03-27 14:31:47.000000000 -0400
32598 +++ linux-2.6.32.42/drivers/net/wireless/ath/ath5k/debug.c 2011-05-16 21:46:57.000000000 -0400
32599 @@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct f
32600 unsigned int v;
32601 u64 tsf;
32602
32603 + pax_track_stack();
32604 +
32605 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
32606 len += snprintf(buf+len, sizeof(buf)-len,
32607 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
32608 @@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct fi
32609 unsigned int len = 0;
32610 unsigned int i;
32611
32612 + pax_track_stack();
32613 +
32614 len += snprintf(buf+len, sizeof(buf)-len,
32615 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
32616
32617 diff -urNp linux-2.6.32.42/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.32.42/drivers/net/wireless/ath/ath9k/debug.c
32618 --- linux-2.6.32.42/drivers/net/wireless/ath/ath9k/debug.c 2011-03-27 14:31:47.000000000 -0400
32619 +++ linux-2.6.32.42/drivers/net/wireless/ath/ath9k/debug.c 2011-05-16 21:46:57.000000000 -0400
32620 @@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struc
32621 char buf[512];
32622 unsigned int len = 0;
32623
32624 + pax_track_stack();
32625 +
32626 len += snprintf(buf + len, sizeof(buf) - len,
32627 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
32628 len += snprintf(buf + len, sizeof(buf) - len,
32629 @@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct fi
32630 int i;
32631 u8 addr[ETH_ALEN];
32632
32633 + pax_track_stack();
32634 +
32635 len += snprintf(buf + len, sizeof(buf) - len,
32636 "primary: %s (%s chan=%d ht=%d)\n",
32637 wiphy_name(sc->pri_wiphy->hw->wiphy),
32638 diff -urNp linux-2.6.32.42/drivers/net/wireless/b43/debugfs.c linux-2.6.32.42/drivers/net/wireless/b43/debugfs.c
32639 --- linux-2.6.32.42/drivers/net/wireless/b43/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32640 +++ linux-2.6.32.42/drivers/net/wireless/b43/debugfs.c 2011-04-17 15:56:46.000000000 -0400
32641 @@ -43,7 +43,7 @@ static struct dentry *rootdir;
32642 struct b43_debugfs_fops {
32643 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
32644 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
32645 - struct file_operations fops;
32646 + const struct file_operations fops;
32647 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
32648 size_t file_struct_offset;
32649 };
32650 diff -urNp linux-2.6.32.42/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.32.42/drivers/net/wireless/b43legacy/debugfs.c
32651 --- linux-2.6.32.42/drivers/net/wireless/b43legacy/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32652 +++ linux-2.6.32.42/drivers/net/wireless/b43legacy/debugfs.c 2011-04-17 15:56:46.000000000 -0400
32653 @@ -44,7 +44,7 @@ static struct dentry *rootdir;
32654 struct b43legacy_debugfs_fops {
32655 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
32656 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
32657 - struct file_operations fops;
32658 + const struct file_operations fops;
32659 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
32660 size_t file_struct_offset;
32661 /* Take wl->irq_lock before calling read/write? */
32662 diff -urNp linux-2.6.32.42/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.32.42/drivers/net/wireless/ipw2x00/ipw2100.c
32663 --- linux-2.6.32.42/drivers/net/wireless/ipw2x00/ipw2100.c 2011-03-27 14:31:47.000000000 -0400
32664 +++ linux-2.6.32.42/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-16 21:46:57.000000000 -0400
32665 @@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2
32666 int err;
32667 DECLARE_SSID_BUF(ssid);
32668
32669 + pax_track_stack();
32670 +
32671 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
32672
32673 if (ssid_len)
32674 @@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw210
32675 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
32676 int err;
32677
32678 + pax_track_stack();
32679 +
32680 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
32681 idx, keylen, len);
32682
32683 diff -urNp linux-2.6.32.42/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.32.42/drivers/net/wireless/ipw2x00/libipw_rx.c
32684 --- linux-2.6.32.42/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-03-27 14:31:47.000000000 -0400
32685 +++ linux-2.6.32.42/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-16 21:46:57.000000000 -0400
32686 @@ -1566,6 +1566,8 @@ static void libipw_process_probe_respons
32687 unsigned long flags;
32688 DECLARE_SSID_BUF(ssid);
32689
32690 + pax_track_stack();
32691 +
32692 LIBIPW_DEBUG_SCAN("'%s' (%pM"
32693 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
32694 print_ssid(ssid, info_element->data, info_element->len),
32695 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-1000.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-1000.c
32696 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-03-27 14:31:47.000000000 -0400
32697 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-04-17 15:56:46.000000000 -0400
32698 @@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib =
32699 },
32700 };
32701
32702 -static struct iwl_ops iwl1000_ops = {
32703 +static const struct iwl_ops iwl1000_ops = {
32704 .ucode = &iwl5000_ucode,
32705 .lib = &iwl1000_lib,
32706 .hcmd = &iwl5000_hcmd,
32707 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-3945.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-3945.c
32708 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-03-27 14:31:47.000000000 -0400
32709 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-04-17 15:56:46.000000000 -0400
32710 @@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945
32711 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
32712 };
32713
32714 -static struct iwl_ops iwl3945_ops = {
32715 +static const struct iwl_ops iwl3945_ops = {
32716 .ucode = &iwl3945_ucode,
32717 .lib = &iwl3945_lib,
32718 .hcmd = &iwl3945_hcmd,
32719 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-4965.c
32720 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-03-27 14:31:47.000000000 -0400
32721 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-04-17 15:56:46.000000000 -0400
32722 @@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib =
32723 },
32724 };
32725
32726 -static struct iwl_ops iwl4965_ops = {
32727 +static const struct iwl_ops iwl4965_ops = {
32728 .ucode = &iwl4965_ucode,
32729 .lib = &iwl4965_lib,
32730 .hcmd = &iwl4965_hcmd,
32731 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-5000.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-5000.c
32732 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:55:34.000000000 -0400
32733 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:56:37.000000000 -0400
32734 @@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib =
32735 },
32736 };
32737
32738 -struct iwl_ops iwl5000_ops = {
32739 +const struct iwl_ops iwl5000_ops = {
32740 .ucode = &iwl5000_ucode,
32741 .lib = &iwl5000_lib,
32742 .hcmd = &iwl5000_hcmd,
32743 .utils = &iwl5000_hcmd_utils,
32744 };
32745
32746 -static struct iwl_ops iwl5150_ops = {
32747 +static const struct iwl_ops iwl5150_ops = {
32748 .ucode = &iwl5000_ucode,
32749 .lib = &iwl5150_lib,
32750 .hcmd = &iwl5000_hcmd,
32751 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-6000.c
32752 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-03-27 14:31:47.000000000 -0400
32753 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-04-17 15:56:46.000000000 -0400
32754 @@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000
32755 .calc_rssi = iwl5000_calc_rssi,
32756 };
32757
32758 -static struct iwl_ops iwl6000_ops = {
32759 +static const struct iwl_ops iwl6000_ops = {
32760 .ucode = &iwl5000_ucode,
32761 .lib = &iwl6000_lib,
32762 .hcmd = &iwl5000_hcmd,
32763 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
32764 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-03-27 14:31:47.000000000 -0400
32765 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-16 21:46:57.000000000 -0400
32766 @@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, s
32767 u8 active_index = 0;
32768 s32 tpt = 0;
32769
32770 + pax_track_stack();
32771 +
32772 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
32773
32774 if (!ieee80211_is_data(hdr->frame_control) ||
32775 @@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_
32776 u8 valid_tx_ant = 0;
32777 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
32778
32779 + pax_track_stack();
32780 +
32781 /* Override starting rate (index 0) if needed for debug purposes */
32782 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
32783
32784 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debugfs.c
32785 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-03-27 14:31:47.000000000 -0400
32786 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-16 21:46:57.000000000 -0400
32787 @@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(str
32788 int pos = 0;
32789 const size_t bufsz = sizeof(buf);
32790
32791 + pax_track_stack();
32792 +
32793 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
32794 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
32795 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
32796 @@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
32797 const size_t bufsz = sizeof(buf);
32798 ssize_t ret;
32799
32800 + pax_track_stack();
32801 +
32802 for (i = 0; i < AC_NUM; i++) {
32803 pos += scnprintf(buf + pos, bufsz - pos,
32804 "\tcw_min\tcw_max\taifsn\ttxop\n");
32805 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debug.h
32806 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-03-27 14:31:47.000000000 -0400
32807 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-04-17 15:56:46.000000000 -0400
32808 @@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_pri
32809 #endif
32810
32811 #else
32812 -#define IWL_DEBUG(__priv, level, fmt, args...)
32813 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
32814 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
32815 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
32816 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
32817 void *p, u32 len)
32818 {}
32819 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-dev.h linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-dev.h
32820 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-03-27 14:31:47.000000000 -0400
32821 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-04-17 15:56:46.000000000 -0400
32822 @@ -68,7 +68,7 @@ struct iwl_tx_queue;
32823
32824 /* shared structures from iwl-5000.c */
32825 extern struct iwl_mod_params iwl50_mod_params;
32826 -extern struct iwl_ops iwl5000_ops;
32827 +extern const struct iwl_ops iwl5000_ops;
32828 extern struct iwl_ucode_ops iwl5000_ucode;
32829 extern struct iwl_lib_ops iwl5000_lib;
32830 extern struct iwl_hcmd_ops iwl5000_hcmd;
32831 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.32.42/drivers/net/wireless/iwmc3200wifi/debugfs.c
32832 --- linux-2.6.32.42/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32833 +++ linux-2.6.32.42/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-16 21:46:57.000000000 -0400
32834 @@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
32835 int buf_len = 512;
32836 size_t len = 0;
32837
32838 + pax_track_stack();
32839 +
32840 if (*ppos != 0)
32841 return 0;
32842 if (count < sizeof(buf))
32843 diff -urNp linux-2.6.32.42/drivers/net/wireless/libertas/debugfs.c linux-2.6.32.42/drivers/net/wireless/libertas/debugfs.c
32844 --- linux-2.6.32.42/drivers/net/wireless/libertas/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32845 +++ linux-2.6.32.42/drivers/net/wireless/libertas/debugfs.c 2011-04-17 15:56:46.000000000 -0400
32846 @@ -708,7 +708,7 @@ out_unlock:
32847 struct lbs_debugfs_files {
32848 const char *name;
32849 int perm;
32850 - struct file_operations fops;
32851 + const struct file_operations fops;
32852 };
32853
32854 static const struct lbs_debugfs_files debugfs_files[] = {
32855 diff -urNp linux-2.6.32.42/drivers/net/wireless/rndis_wlan.c linux-2.6.32.42/drivers/net/wireless/rndis_wlan.c
32856 --- linux-2.6.32.42/drivers/net/wireless/rndis_wlan.c 2011-03-27 14:31:47.000000000 -0400
32857 +++ linux-2.6.32.42/drivers/net/wireless/rndis_wlan.c 2011-04-17 15:56:46.000000000 -0400
32858 @@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbn
32859
32860 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
32861
32862 - if (rts_threshold < 0 || rts_threshold > 2347)
32863 + if (rts_threshold > 2347)
32864 rts_threshold = 2347;
32865
32866 tmp = cpu_to_le32(rts_threshold);
32867 diff -urNp linux-2.6.32.42/drivers/oprofile/buffer_sync.c linux-2.6.32.42/drivers/oprofile/buffer_sync.c
32868 --- linux-2.6.32.42/drivers/oprofile/buffer_sync.c 2011-03-27 14:31:47.000000000 -0400
32869 +++ linux-2.6.32.42/drivers/oprofile/buffer_sync.c 2011-04-17 15:56:46.000000000 -0400
32870 @@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
32871 if (cookie == NO_COOKIE)
32872 offset = pc;
32873 if (cookie == INVALID_COOKIE) {
32874 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
32875 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
32876 offset = pc;
32877 }
32878 if (cookie != last_cookie) {
32879 @@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
32880 /* add userspace sample */
32881
32882 if (!mm) {
32883 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
32884 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
32885 return 0;
32886 }
32887
32888 cookie = lookup_dcookie(mm, s->eip, &offset);
32889
32890 if (cookie == INVALID_COOKIE) {
32891 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
32892 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
32893 return 0;
32894 }
32895
32896 @@ -561,7 +561,7 @@ void sync_buffer(int cpu)
32897 /* ignore backtraces if failed to add a sample */
32898 if (state == sb_bt_start) {
32899 state = sb_bt_ignore;
32900 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
32901 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
32902 }
32903 }
32904 release_mm(mm);
32905 diff -urNp linux-2.6.32.42/drivers/oprofile/event_buffer.c linux-2.6.32.42/drivers/oprofile/event_buffer.c
32906 --- linux-2.6.32.42/drivers/oprofile/event_buffer.c 2011-03-27 14:31:47.000000000 -0400
32907 +++ linux-2.6.32.42/drivers/oprofile/event_buffer.c 2011-04-17 15:56:46.000000000 -0400
32908 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
32909 }
32910
32911 if (buffer_pos == buffer_size) {
32912 - atomic_inc(&oprofile_stats.event_lost_overflow);
32913 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
32914 return;
32915 }
32916
32917 diff -urNp linux-2.6.32.42/drivers/oprofile/oprof.c linux-2.6.32.42/drivers/oprofile/oprof.c
32918 --- linux-2.6.32.42/drivers/oprofile/oprof.c 2011-03-27 14:31:47.000000000 -0400
32919 +++ linux-2.6.32.42/drivers/oprofile/oprof.c 2011-04-17 15:56:46.000000000 -0400
32920 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
32921 if (oprofile_ops.switch_events())
32922 return;
32923
32924 - atomic_inc(&oprofile_stats.multiplex_counter);
32925 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
32926 start_switch_worker();
32927 }
32928
32929 diff -urNp linux-2.6.32.42/drivers/oprofile/oprofilefs.c linux-2.6.32.42/drivers/oprofile/oprofilefs.c
32930 --- linux-2.6.32.42/drivers/oprofile/oprofilefs.c 2011-03-27 14:31:47.000000000 -0400
32931 +++ linux-2.6.32.42/drivers/oprofile/oprofilefs.c 2011-04-17 15:56:46.000000000 -0400
32932 @@ -187,7 +187,7 @@ static const struct file_operations atom
32933
32934
32935 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
32936 - char const *name, atomic_t *val)
32937 + char const *name, atomic_unchecked_t *val)
32938 {
32939 struct dentry *d = __oprofilefs_create_file(sb, root, name,
32940 &atomic_ro_fops, 0444);
32941 diff -urNp linux-2.6.32.42/drivers/oprofile/oprofile_stats.c linux-2.6.32.42/drivers/oprofile/oprofile_stats.c
32942 --- linux-2.6.32.42/drivers/oprofile/oprofile_stats.c 2011-03-27 14:31:47.000000000 -0400
32943 +++ linux-2.6.32.42/drivers/oprofile/oprofile_stats.c 2011-04-17 15:56:46.000000000 -0400
32944 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
32945 cpu_buf->sample_invalid_eip = 0;
32946 }
32947
32948 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
32949 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
32950 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
32951 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
32952 - atomic_set(&oprofile_stats.multiplex_counter, 0);
32953 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
32954 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
32955 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
32956 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
32957 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
32958 }
32959
32960
32961 diff -urNp linux-2.6.32.42/drivers/oprofile/oprofile_stats.h linux-2.6.32.42/drivers/oprofile/oprofile_stats.h
32962 --- linux-2.6.32.42/drivers/oprofile/oprofile_stats.h 2011-03-27 14:31:47.000000000 -0400
32963 +++ linux-2.6.32.42/drivers/oprofile/oprofile_stats.h 2011-04-17 15:56:46.000000000 -0400
32964 @@ -13,11 +13,11 @@
32965 #include <asm/atomic.h>
32966
32967 struct oprofile_stat_struct {
32968 - atomic_t sample_lost_no_mm;
32969 - atomic_t sample_lost_no_mapping;
32970 - atomic_t bt_lost_no_mapping;
32971 - atomic_t event_lost_overflow;
32972 - atomic_t multiplex_counter;
32973 + atomic_unchecked_t sample_lost_no_mm;
32974 + atomic_unchecked_t sample_lost_no_mapping;
32975 + atomic_unchecked_t bt_lost_no_mapping;
32976 + atomic_unchecked_t event_lost_overflow;
32977 + atomic_unchecked_t multiplex_counter;
32978 };
32979
32980 extern struct oprofile_stat_struct oprofile_stats;
32981 diff -urNp linux-2.6.32.42/drivers/parisc/pdc_stable.c linux-2.6.32.42/drivers/parisc/pdc_stable.c
32982 --- linux-2.6.32.42/drivers/parisc/pdc_stable.c 2011-03-27 14:31:47.000000000 -0400
32983 +++ linux-2.6.32.42/drivers/parisc/pdc_stable.c 2011-04-17 15:56:46.000000000 -0400
32984 @@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj
32985 return ret;
32986 }
32987
32988 -static struct sysfs_ops pdcspath_attr_ops = {
32989 +static const struct sysfs_ops pdcspath_attr_ops = {
32990 .show = pdcspath_attr_show,
32991 .store = pdcspath_attr_store,
32992 };
32993 diff -urNp linux-2.6.32.42/drivers/parport/procfs.c linux-2.6.32.42/drivers/parport/procfs.c
32994 --- linux-2.6.32.42/drivers/parport/procfs.c 2011-03-27 14:31:47.000000000 -0400
32995 +++ linux-2.6.32.42/drivers/parport/procfs.c 2011-04-17 15:56:46.000000000 -0400
32996 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
32997
32998 *ppos += len;
32999
33000 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
33001 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
33002 }
33003
33004 #ifdef CONFIG_PARPORT_1284
33005 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
33006
33007 *ppos += len;
33008
33009 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
33010 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
33011 }
33012 #endif /* IEEE1284.3 support. */
33013
33014 diff -urNp linux-2.6.32.42/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.32.42/drivers/pci/hotplug/acpiphp_glue.c
33015 --- linux-2.6.32.42/drivers/pci/hotplug/acpiphp_glue.c 2011-03-27 14:31:47.000000000 -0400
33016 +++ linux-2.6.32.42/drivers/pci/hotplug/acpiphp_glue.c 2011-04-17 15:56:46.000000000 -0400
33017 @@ -111,7 +111,7 @@ static int post_dock_fixups(struct notif
33018 }
33019
33020
33021 -static struct acpi_dock_ops acpiphp_dock_ops = {
33022 +static const struct acpi_dock_ops acpiphp_dock_ops = {
33023 .handler = handle_hotplug_event_func,
33024 };
33025
33026 diff -urNp linux-2.6.32.42/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.32.42/drivers/pci/hotplug/cpqphp_nvram.c
33027 --- linux-2.6.32.42/drivers/pci/hotplug/cpqphp_nvram.c 2011-03-27 14:31:47.000000000 -0400
33028 +++ linux-2.6.32.42/drivers/pci/hotplug/cpqphp_nvram.c 2011-04-17 15:56:46.000000000 -0400
33029 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
33030
33031 void compaq_nvram_init (void __iomem *rom_start)
33032 {
33033 +
33034 +#ifndef CONFIG_PAX_KERNEXEC
33035 if (rom_start) {
33036 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
33037 }
33038 +#endif
33039 +
33040 dbg("int15 entry = %p\n", compaq_int15_entry_point);
33041
33042 /* initialize our int15 lock */
33043 diff -urNp linux-2.6.32.42/drivers/pci/hotplug/fakephp.c linux-2.6.32.42/drivers/pci/hotplug/fakephp.c
33044 --- linux-2.6.32.42/drivers/pci/hotplug/fakephp.c 2011-03-27 14:31:47.000000000 -0400
33045 +++ linux-2.6.32.42/drivers/pci/hotplug/fakephp.c 2011-04-17 15:56:46.000000000 -0400
33046 @@ -73,7 +73,7 @@ static void legacy_release(struct kobjec
33047 }
33048
33049 static struct kobj_type legacy_ktype = {
33050 - .sysfs_ops = &(struct sysfs_ops){
33051 + .sysfs_ops = &(const struct sysfs_ops){
33052 .store = legacy_store, .show = legacy_show
33053 },
33054 .release = &legacy_release,
33055 diff -urNp linux-2.6.32.42/drivers/pci/intel-iommu.c linux-2.6.32.42/drivers/pci/intel-iommu.c
33056 --- linux-2.6.32.42/drivers/pci/intel-iommu.c 2011-05-10 22:12:01.000000000 -0400
33057 +++ linux-2.6.32.42/drivers/pci/intel-iommu.c 2011-05-10 22:12:33.000000000 -0400
33058 @@ -2643,7 +2643,7 @@ error:
33059 return 0;
33060 }
33061
33062 -static dma_addr_t intel_map_page(struct device *dev, struct page *page,
33063 +dma_addr_t intel_map_page(struct device *dev, struct page *page,
33064 unsigned long offset, size_t size,
33065 enum dma_data_direction dir,
33066 struct dma_attrs *attrs)
33067 @@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain
33068 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
33069 }
33070
33071 -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
33072 +void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
33073 size_t size, enum dma_data_direction dir,
33074 struct dma_attrs *attrs)
33075 {
33076 @@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct devi
33077 }
33078 }
33079
33080 -static void *intel_alloc_coherent(struct device *hwdev, size_t size,
33081 +void *intel_alloc_coherent(struct device *hwdev, size_t size,
33082 dma_addr_t *dma_handle, gfp_t flags)
33083 {
33084 void *vaddr;
33085 @@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct
33086 return NULL;
33087 }
33088
33089 -static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
33090 +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
33091 dma_addr_t dma_handle)
33092 {
33093 int order;
33094 @@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct d
33095 free_pages((unsigned long)vaddr, order);
33096 }
33097
33098 -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
33099 +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
33100 int nelems, enum dma_data_direction dir,
33101 struct dma_attrs *attrs)
33102 {
33103 @@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(str
33104 return nelems;
33105 }
33106
33107 -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
33108 +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
33109 enum dma_data_direction dir, struct dma_attrs *attrs)
33110 {
33111 int i;
33112 @@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *h
33113 return nelems;
33114 }
33115
33116 -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
33117 +int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
33118 {
33119 return !dma_addr;
33120 }
33121
33122 -struct dma_map_ops intel_dma_ops = {
33123 +const struct dma_map_ops intel_dma_ops = {
33124 .alloc_coherent = intel_alloc_coherent,
33125 .free_coherent = intel_free_coherent,
33126 .map_sg = intel_map_sg,
33127 diff -urNp linux-2.6.32.42/drivers/pci/pcie/aspm.c linux-2.6.32.42/drivers/pci/pcie/aspm.c
33128 --- linux-2.6.32.42/drivers/pci/pcie/aspm.c 2011-03-27 14:31:47.000000000 -0400
33129 +++ linux-2.6.32.42/drivers/pci/pcie/aspm.c 2011-04-17 15:56:46.000000000 -0400
33130 @@ -27,9 +27,9 @@
33131 #define MODULE_PARAM_PREFIX "pcie_aspm."
33132
33133 /* Note: those are not register definitions */
33134 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
33135 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
33136 -#define ASPM_STATE_L1 (4) /* L1 state */
33137 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
33138 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
33139 +#define ASPM_STATE_L1 (4U) /* L1 state */
33140 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
33141 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
33142
33143 diff -urNp linux-2.6.32.42/drivers/pci/probe.c linux-2.6.32.42/drivers/pci/probe.c
33144 --- linux-2.6.32.42/drivers/pci/probe.c 2011-03-27 14:31:47.000000000 -0400
33145 +++ linux-2.6.32.42/drivers/pci/probe.c 2011-04-17 15:56:46.000000000 -0400
33146 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
33147 return ret;
33148 }
33149
33150 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
33151 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
33152 struct device_attribute *attr,
33153 char *buf)
33154 {
33155 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
33156 }
33157
33158 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
33159 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
33160 struct device_attribute *attr,
33161 char *buf)
33162 {
33163 diff -urNp linux-2.6.32.42/drivers/pci/proc.c linux-2.6.32.42/drivers/pci/proc.c
33164 --- linux-2.6.32.42/drivers/pci/proc.c 2011-03-27 14:31:47.000000000 -0400
33165 +++ linux-2.6.32.42/drivers/pci/proc.c 2011-04-17 15:56:46.000000000 -0400
33166 @@ -480,7 +480,16 @@ static const struct file_operations proc
33167 static int __init pci_proc_init(void)
33168 {
33169 struct pci_dev *dev = NULL;
33170 +
33171 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
33172 +#ifdef CONFIG_GRKERNSEC_PROC_USER
33173 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
33174 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33175 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33176 +#endif
33177 +#else
33178 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
33179 +#endif
33180 proc_create("devices", 0, proc_bus_pci_dir,
33181 &proc_bus_pci_dev_operations);
33182 proc_initialized = 1;
33183 diff -urNp linux-2.6.32.42/drivers/pci/slot.c linux-2.6.32.42/drivers/pci/slot.c
33184 --- linux-2.6.32.42/drivers/pci/slot.c 2011-03-27 14:31:47.000000000 -0400
33185 +++ linux-2.6.32.42/drivers/pci/slot.c 2011-04-17 15:56:46.000000000 -0400
33186 @@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struc
33187 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
33188 }
33189
33190 -static struct sysfs_ops pci_slot_sysfs_ops = {
33191 +static const struct sysfs_ops pci_slot_sysfs_ops = {
33192 .show = pci_slot_attr_show,
33193 .store = pci_slot_attr_store,
33194 };
33195 diff -urNp linux-2.6.32.42/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.32.42/drivers/pcmcia/pcmcia_ioctl.c
33196 --- linux-2.6.32.42/drivers/pcmcia/pcmcia_ioctl.c 2011-03-27 14:31:47.000000000 -0400
33197 +++ linux-2.6.32.42/drivers/pcmcia/pcmcia_ioctl.c 2011-04-17 15:56:46.000000000 -0400
33198 @@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode
33199 return -EFAULT;
33200 }
33201 }
33202 - buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
33203 + buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
33204 if (!buf)
33205 return -ENOMEM;
33206
33207 diff -urNp linux-2.6.32.42/drivers/platform/x86/acer-wmi.c linux-2.6.32.42/drivers/platform/x86/acer-wmi.c
33208 --- linux-2.6.32.42/drivers/platform/x86/acer-wmi.c 2011-03-27 14:31:47.000000000 -0400
33209 +++ linux-2.6.32.42/drivers/platform/x86/acer-wmi.c 2011-04-17 15:56:46.000000000 -0400
33210 @@ -918,7 +918,7 @@ static int update_bl_status(struct backl
33211 return 0;
33212 }
33213
33214 -static struct backlight_ops acer_bl_ops = {
33215 +static const struct backlight_ops acer_bl_ops = {
33216 .get_brightness = read_brightness,
33217 .update_status = update_bl_status,
33218 };
33219 diff -urNp linux-2.6.32.42/drivers/platform/x86/asus_acpi.c linux-2.6.32.42/drivers/platform/x86/asus_acpi.c
33220 --- linux-2.6.32.42/drivers/platform/x86/asus_acpi.c 2011-03-27 14:31:47.000000000 -0400
33221 +++ linux-2.6.32.42/drivers/platform/x86/asus_acpi.c 2011-04-17 15:56:46.000000000 -0400
33222 @@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_
33223 return 0;
33224 }
33225
33226 -static struct backlight_ops asus_backlight_data = {
33227 +static const struct backlight_ops asus_backlight_data = {
33228 .get_brightness = read_brightness,
33229 .update_status = set_brightness_status,
33230 };
33231 diff -urNp linux-2.6.32.42/drivers/platform/x86/asus-laptop.c linux-2.6.32.42/drivers/platform/x86/asus-laptop.c
33232 --- linux-2.6.32.42/drivers/platform/x86/asus-laptop.c 2011-03-27 14:31:47.000000000 -0400
33233 +++ linux-2.6.32.42/drivers/platform/x86/asus-laptop.c 2011-04-17 15:56:46.000000000 -0400
33234 @@ -250,7 +250,7 @@ static struct backlight_device *asus_bac
33235 */
33236 static int read_brightness(struct backlight_device *bd);
33237 static int update_bl_status(struct backlight_device *bd);
33238 -static struct backlight_ops asusbl_ops = {
33239 +static const struct backlight_ops asusbl_ops = {
33240 .get_brightness = read_brightness,
33241 .update_status = update_bl_status,
33242 };
33243 diff -urNp linux-2.6.32.42/drivers/platform/x86/compal-laptop.c linux-2.6.32.42/drivers/platform/x86/compal-laptop.c
33244 --- linux-2.6.32.42/drivers/platform/x86/compal-laptop.c 2011-03-27 14:31:47.000000000 -0400
33245 +++ linux-2.6.32.42/drivers/platform/x86/compal-laptop.c 2011-04-17 15:56:46.000000000 -0400
33246 @@ -163,7 +163,7 @@ static int bl_update_status(struct backl
33247 return set_lcd_level(b->props.brightness);
33248 }
33249
33250 -static struct backlight_ops compalbl_ops = {
33251 +static const struct backlight_ops compalbl_ops = {
33252 .get_brightness = bl_get_brightness,
33253 .update_status = bl_update_status,
33254 };
33255 diff -urNp linux-2.6.32.42/drivers/platform/x86/dell-laptop.c linux-2.6.32.42/drivers/platform/x86/dell-laptop.c
33256 --- linux-2.6.32.42/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:01.000000000 -0400
33257 +++ linux-2.6.32.42/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:33.000000000 -0400
33258 @@ -318,7 +318,7 @@ static int dell_get_intensity(struct bac
33259 return buffer.output[1];
33260 }
33261
33262 -static struct backlight_ops dell_ops = {
33263 +static const struct backlight_ops dell_ops = {
33264 .get_brightness = dell_get_intensity,
33265 .update_status = dell_send_intensity,
33266 };
33267 diff -urNp linux-2.6.32.42/drivers/platform/x86/eeepc-laptop.c linux-2.6.32.42/drivers/platform/x86/eeepc-laptop.c
33268 --- linux-2.6.32.42/drivers/platform/x86/eeepc-laptop.c 2011-03-27 14:31:47.000000000 -0400
33269 +++ linux-2.6.32.42/drivers/platform/x86/eeepc-laptop.c 2011-04-17 15:56:46.000000000 -0400
33270 @@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device
33271 */
33272 static int read_brightness(struct backlight_device *bd);
33273 static int update_bl_status(struct backlight_device *bd);
33274 -static struct backlight_ops eeepcbl_ops = {
33275 +static const struct backlight_ops eeepcbl_ops = {
33276 .get_brightness = read_brightness,
33277 .update_status = update_bl_status,
33278 };
33279 diff -urNp linux-2.6.32.42/drivers/platform/x86/fujitsu-laptop.c linux-2.6.32.42/drivers/platform/x86/fujitsu-laptop.c
33280 --- linux-2.6.32.42/drivers/platform/x86/fujitsu-laptop.c 2011-03-27 14:31:47.000000000 -0400
33281 +++ linux-2.6.32.42/drivers/platform/x86/fujitsu-laptop.c 2011-04-17 15:56:46.000000000 -0400
33282 @@ -436,7 +436,7 @@ static int bl_update_status(struct backl
33283 return ret;
33284 }
33285
33286 -static struct backlight_ops fujitsubl_ops = {
33287 +static const struct backlight_ops fujitsubl_ops = {
33288 .get_brightness = bl_get_brightness,
33289 .update_status = bl_update_status,
33290 };
33291 diff -urNp linux-2.6.32.42/drivers/platform/x86/msi-laptop.c linux-2.6.32.42/drivers/platform/x86/msi-laptop.c
33292 --- linux-2.6.32.42/drivers/platform/x86/msi-laptop.c 2011-03-27 14:31:47.000000000 -0400
33293 +++ linux-2.6.32.42/drivers/platform/x86/msi-laptop.c 2011-04-17 15:56:46.000000000 -0400
33294 @@ -161,7 +161,7 @@ static int bl_update_status(struct backl
33295 return set_lcd_level(b->props.brightness);
33296 }
33297
33298 -static struct backlight_ops msibl_ops = {
33299 +static const struct backlight_ops msibl_ops = {
33300 .get_brightness = bl_get_brightness,
33301 .update_status = bl_update_status,
33302 };
33303 diff -urNp linux-2.6.32.42/drivers/platform/x86/panasonic-laptop.c linux-2.6.32.42/drivers/platform/x86/panasonic-laptop.c
33304 --- linux-2.6.32.42/drivers/platform/x86/panasonic-laptop.c 2011-03-27 14:31:47.000000000 -0400
33305 +++ linux-2.6.32.42/drivers/platform/x86/panasonic-laptop.c 2011-04-17 15:56:46.000000000 -0400
33306 @@ -352,7 +352,7 @@ static int bl_set_status(struct backligh
33307 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
33308 }
33309
33310 -static struct backlight_ops pcc_backlight_ops = {
33311 +static const struct backlight_ops pcc_backlight_ops = {
33312 .get_brightness = bl_get,
33313 .update_status = bl_set_status,
33314 };
33315 diff -urNp linux-2.6.32.42/drivers/platform/x86/sony-laptop.c linux-2.6.32.42/drivers/platform/x86/sony-laptop.c
33316 --- linux-2.6.32.42/drivers/platform/x86/sony-laptop.c 2011-03-27 14:31:47.000000000 -0400
33317 +++ linux-2.6.32.42/drivers/platform/x86/sony-laptop.c 2011-04-17 15:56:46.000000000 -0400
33318 @@ -850,7 +850,7 @@ static int sony_backlight_get_brightness
33319 }
33320
33321 static struct backlight_device *sony_backlight_device;
33322 -static struct backlight_ops sony_backlight_ops = {
33323 +static const struct backlight_ops sony_backlight_ops = {
33324 .update_status = sony_backlight_update_status,
33325 .get_brightness = sony_backlight_get_brightness,
33326 };
33327 diff -urNp linux-2.6.32.42/drivers/platform/x86/thinkpad_acpi.c linux-2.6.32.42/drivers/platform/x86/thinkpad_acpi.c
33328 --- linux-2.6.32.42/drivers/platform/x86/thinkpad_acpi.c 2011-03-27 14:31:47.000000000 -0400
33329 +++ linux-2.6.32.42/drivers/platform/x86/thinkpad_acpi.c 2011-04-17 15:56:46.000000000 -0400
33330 @@ -6122,7 +6122,7 @@ static void tpacpi_brightness_notify_cha
33331 BACKLIGHT_UPDATE_HOTKEY);
33332 }
33333
33334 -static struct backlight_ops ibm_backlight_data = {
33335 +static const struct backlight_ops ibm_backlight_data = {
33336 .get_brightness = brightness_get,
33337 .update_status = brightness_update_status,
33338 };
33339 diff -urNp linux-2.6.32.42/drivers/platform/x86/toshiba_acpi.c linux-2.6.32.42/drivers/platform/x86/toshiba_acpi.c
33340 --- linux-2.6.32.42/drivers/platform/x86/toshiba_acpi.c 2011-03-27 14:31:47.000000000 -0400
33341 +++ linux-2.6.32.42/drivers/platform/x86/toshiba_acpi.c 2011-04-17 15:56:46.000000000 -0400
33342 @@ -671,7 +671,7 @@ static acpi_status remove_device(void)
33343 return AE_OK;
33344 }
33345
33346 -static struct backlight_ops toshiba_backlight_data = {
33347 +static const struct backlight_ops toshiba_backlight_data = {
33348 .get_brightness = get_lcd,
33349 .update_status = set_lcd_status,
33350 };
33351 diff -urNp linux-2.6.32.42/drivers/pnp/pnpbios/bioscalls.c linux-2.6.32.42/drivers/pnp/pnpbios/bioscalls.c
33352 --- linux-2.6.32.42/drivers/pnp/pnpbios/bioscalls.c 2011-03-27 14:31:47.000000000 -0400
33353 +++ linux-2.6.32.42/drivers/pnp/pnpbios/bioscalls.c 2011-04-17 15:56:46.000000000 -0400
33354 @@ -60,7 +60,7 @@ do { \
33355 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
33356 } while(0)
33357
33358 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
33359 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
33360 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
33361
33362 /*
33363 @@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func
33364
33365 cpu = get_cpu();
33366 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
33367 +
33368 + pax_open_kernel();
33369 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
33370 + pax_close_kernel();
33371
33372 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
33373 spin_lock_irqsave(&pnp_bios_lock, flags);
33374 @@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func
33375 :"memory");
33376 spin_unlock_irqrestore(&pnp_bios_lock, flags);
33377
33378 + pax_open_kernel();
33379 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
33380 + pax_close_kernel();
33381 +
33382 put_cpu();
33383
33384 /* If we get here and this is set then the PnP BIOS faulted on us. */
33385 @@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 n
33386 return status;
33387 }
33388
33389 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
33390 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
33391 {
33392 int i;
33393
33394 @@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_i
33395 pnp_bios_callpoint.offset = header->fields.pm16offset;
33396 pnp_bios_callpoint.segment = PNP_CS16;
33397
33398 + pax_open_kernel();
33399 +
33400 for_each_possible_cpu(i) {
33401 struct desc_struct *gdt = get_cpu_gdt_table(i);
33402 if (!gdt)
33403 @@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_i
33404 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
33405 (unsigned long)__va(header->fields.pm16dseg));
33406 }
33407 +
33408 + pax_close_kernel();
33409 }
33410 diff -urNp linux-2.6.32.42/drivers/pnp/resource.c linux-2.6.32.42/drivers/pnp/resource.c
33411 --- linux-2.6.32.42/drivers/pnp/resource.c 2011-03-27 14:31:47.000000000 -0400
33412 +++ linux-2.6.32.42/drivers/pnp/resource.c 2011-04-17 15:56:46.000000000 -0400
33413 @@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
33414 return 1;
33415
33416 /* check if the resource is valid */
33417 - if (*irq < 0 || *irq > 15)
33418 + if (*irq > 15)
33419 return 0;
33420
33421 /* check if the resource is reserved */
33422 @@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
33423 return 1;
33424
33425 /* check if the resource is valid */
33426 - if (*dma < 0 || *dma == 4 || *dma > 7)
33427 + if (*dma == 4 || *dma > 7)
33428 return 0;
33429
33430 /* check if the resource is reserved */
33431 diff -urNp linux-2.6.32.42/drivers/rtc/rtc-dev.c linux-2.6.32.42/drivers/rtc/rtc-dev.c
33432 --- linux-2.6.32.42/drivers/rtc/rtc-dev.c 2011-03-27 14:31:47.000000000 -0400
33433 +++ linux-2.6.32.42/drivers/rtc/rtc-dev.c 2011-04-17 15:56:46.000000000 -0400
33434 @@ -14,6 +14,7 @@
33435 #include <linux/module.h>
33436 #include <linux/rtc.h>
33437 #include <linux/sched.h>
33438 +#include <linux/grsecurity.h>
33439 #include "rtc-core.h"
33440
33441 static dev_t rtc_devt;
33442 @@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *f
33443 if (copy_from_user(&tm, uarg, sizeof(tm)))
33444 return -EFAULT;
33445
33446 + gr_log_timechange();
33447 +
33448 return rtc_set_time(rtc, &tm);
33449
33450 case RTC_PIE_ON:
33451 diff -urNp linux-2.6.32.42/drivers/s390/cio/qdio_perf.c linux-2.6.32.42/drivers/s390/cio/qdio_perf.c
33452 --- linux-2.6.32.42/drivers/s390/cio/qdio_perf.c 2011-03-27 14:31:47.000000000 -0400
33453 +++ linux-2.6.32.42/drivers/s390/cio/qdio_perf.c 2011-04-17 15:56:46.000000000 -0400
33454 @@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_
33455 static int qdio_perf_proc_show(struct seq_file *m, void *v)
33456 {
33457 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
33458 - (long)atomic_long_read(&perf_stats.qdio_int));
33459 + (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
33460 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
33461 - (long)atomic_long_read(&perf_stats.pci_int));
33462 + (long)atomic_long_read_unchecked(&perf_stats.pci_int));
33463 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
33464 - (long)atomic_long_read(&perf_stats.thin_int));
33465 + (long)atomic_long_read_unchecked(&perf_stats.thin_int));
33466 seq_printf(m, "\n");
33467 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
33468 - (long)atomic_long_read(&perf_stats.tasklet_inbound));
33469 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
33470 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
33471 - (long)atomic_long_read(&perf_stats.tasklet_outbound));
33472 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
33473 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
33474 - (long)atomic_long_read(&perf_stats.tasklet_thinint),
33475 - (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
33476 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
33477 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
33478 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
33479 - (long)atomic_long_read(&perf_stats.thinint_inbound),
33480 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
33481 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
33482 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
33483 seq_printf(m, "\n");
33484 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
33485 - (long)atomic_long_read(&perf_stats.siga_in));
33486 + (long)atomic_long_read_unchecked(&perf_stats.siga_in));
33487 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
33488 - (long)atomic_long_read(&perf_stats.siga_out));
33489 + (long)atomic_long_read_unchecked(&perf_stats.siga_out));
33490 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
33491 - (long)atomic_long_read(&perf_stats.siga_sync));
33492 + (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
33493 seq_printf(m, "\n");
33494 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
33495 - (long)atomic_long_read(&perf_stats.inbound_handler));
33496 + (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
33497 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
33498 - (long)atomic_long_read(&perf_stats.outbound_handler));
33499 + (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
33500 seq_printf(m, "\n");
33501 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
33502 - (long)atomic_long_read(&perf_stats.fast_requeue));
33503 + (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
33504 seq_printf(m, "Number of outbound target full condition\t: %li\n",
33505 - (long)atomic_long_read(&perf_stats.outbound_target_full));
33506 + (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
33507 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
33508 - (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
33509 + (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
33510 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
33511 - (long)atomic_long_read(&perf_stats.debug_stop_polling));
33512 + (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
33513 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
33514 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
33515 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
33516 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
33517 - (long)atomic_long_read(&perf_stats.debug_eqbs_all),
33518 - (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
33519 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
33520 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
33521 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
33522 - (long)atomic_long_read(&perf_stats.debug_sqbs_all),
33523 - (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
33524 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
33525 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
33526 seq_printf(m, "\n");
33527 return 0;
33528 }
33529 diff -urNp linux-2.6.32.42/drivers/s390/cio/qdio_perf.h linux-2.6.32.42/drivers/s390/cio/qdio_perf.h
33530 --- linux-2.6.32.42/drivers/s390/cio/qdio_perf.h 2011-03-27 14:31:47.000000000 -0400
33531 +++ linux-2.6.32.42/drivers/s390/cio/qdio_perf.h 2011-04-17 15:56:46.000000000 -0400
33532 @@ -13,46 +13,46 @@
33533
33534 struct qdio_perf_stats {
33535 /* interrupt handler calls */
33536 - atomic_long_t qdio_int;
33537 - atomic_long_t pci_int;
33538 - atomic_long_t thin_int;
33539 + atomic_long_unchecked_t qdio_int;
33540 + atomic_long_unchecked_t pci_int;
33541 + atomic_long_unchecked_t thin_int;
33542
33543 /* tasklet runs */
33544 - atomic_long_t tasklet_inbound;
33545 - atomic_long_t tasklet_outbound;
33546 - atomic_long_t tasklet_thinint;
33547 - atomic_long_t tasklet_thinint_loop;
33548 - atomic_long_t thinint_inbound;
33549 - atomic_long_t thinint_inbound_loop;
33550 - atomic_long_t thinint_inbound_loop2;
33551 + atomic_long_unchecked_t tasklet_inbound;
33552 + atomic_long_unchecked_t tasklet_outbound;
33553 + atomic_long_unchecked_t tasklet_thinint;
33554 + atomic_long_unchecked_t tasklet_thinint_loop;
33555 + atomic_long_unchecked_t thinint_inbound;
33556 + atomic_long_unchecked_t thinint_inbound_loop;
33557 + atomic_long_unchecked_t thinint_inbound_loop2;
33558
33559 /* signal adapter calls */
33560 - atomic_long_t siga_out;
33561 - atomic_long_t siga_in;
33562 - atomic_long_t siga_sync;
33563 + atomic_long_unchecked_t siga_out;
33564 + atomic_long_unchecked_t siga_in;
33565 + atomic_long_unchecked_t siga_sync;
33566
33567 /* misc */
33568 - atomic_long_t inbound_handler;
33569 - atomic_long_t outbound_handler;
33570 - atomic_long_t fast_requeue;
33571 - atomic_long_t outbound_target_full;
33572 + atomic_long_unchecked_t inbound_handler;
33573 + atomic_long_unchecked_t outbound_handler;
33574 + atomic_long_unchecked_t fast_requeue;
33575 + atomic_long_unchecked_t outbound_target_full;
33576
33577 /* for debugging */
33578 - atomic_long_t debug_tl_out_timer;
33579 - atomic_long_t debug_stop_polling;
33580 - atomic_long_t debug_eqbs_all;
33581 - atomic_long_t debug_eqbs_incomplete;
33582 - atomic_long_t debug_sqbs_all;
33583 - atomic_long_t debug_sqbs_incomplete;
33584 + atomic_long_unchecked_t debug_tl_out_timer;
33585 + atomic_long_unchecked_t debug_stop_polling;
33586 + atomic_long_unchecked_t debug_eqbs_all;
33587 + atomic_long_unchecked_t debug_eqbs_incomplete;
33588 + atomic_long_unchecked_t debug_sqbs_all;
33589 + atomic_long_unchecked_t debug_sqbs_incomplete;
33590 };
33591
33592 extern struct qdio_perf_stats perf_stats;
33593 extern int qdio_performance_stats;
33594
33595 -static inline void qdio_perf_stat_inc(atomic_long_t *count)
33596 +static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
33597 {
33598 if (qdio_performance_stats)
33599 - atomic_long_inc(count);
33600 + atomic_long_inc_unchecked(count);
33601 }
33602
33603 int qdio_setup_perf_stats(void);
33604 diff -urNp linux-2.6.32.42/drivers/scsi/aacraid/commctrl.c linux-2.6.32.42/drivers/scsi/aacraid/commctrl.c
33605 --- linux-2.6.32.42/drivers/scsi/aacraid/commctrl.c 2011-03-27 14:31:47.000000000 -0400
33606 +++ linux-2.6.32.42/drivers/scsi/aacraid/commctrl.c 2011-05-16 21:46:57.000000000 -0400
33607 @@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_d
33608 u32 actual_fibsize64, actual_fibsize = 0;
33609 int i;
33610
33611 + pax_track_stack();
33612
33613 if (dev->in_reset) {
33614 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
33615 diff -urNp linux-2.6.32.42/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.32.42/drivers/scsi/aic94xx/aic94xx_init.c
33616 --- linux-2.6.32.42/drivers/scsi/aic94xx/aic94xx_init.c 2011-03-27 14:31:47.000000000 -0400
33617 +++ linux-2.6.32.42/drivers/scsi/aic94xx/aic94xx_init.c 2011-04-17 15:56:46.000000000 -0400
33618 @@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(stru
33619 flash_error_table[i].reason);
33620 }
33621
33622 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
33623 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
33624 asd_show_update_bios, asd_store_update_bios);
33625
33626 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
33627 diff -urNp linux-2.6.32.42/drivers/scsi/BusLogic.c linux-2.6.32.42/drivers/scsi/BusLogic.c
33628 --- linux-2.6.32.42/drivers/scsi/BusLogic.c 2011-03-27 14:31:47.000000000 -0400
33629 +++ linux-2.6.32.42/drivers/scsi/BusLogic.c 2011-05-16 21:46:57.000000000 -0400
33630 @@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFla
33631 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
33632 *PrototypeHostAdapter)
33633 {
33634 + pax_track_stack();
33635 +
33636 /*
33637 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
33638 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
33639 diff -urNp linux-2.6.32.42/drivers/scsi/dpt_i2o.c linux-2.6.32.42/drivers/scsi/dpt_i2o.c
33640 --- linux-2.6.32.42/drivers/scsi/dpt_i2o.c 2011-03-27 14:31:47.000000000 -0400
33641 +++ linux-2.6.32.42/drivers/scsi/dpt_i2o.c 2011-05-16 21:46:57.000000000 -0400
33642 @@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* p
33643 dma_addr_t addr;
33644 ulong flags = 0;
33645
33646 + pax_track_stack();
33647 +
33648 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
33649 // get user msg size in u32s
33650 if(get_user(size, &user_msg[0])){
33651 @@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
33652 s32 rcode;
33653 dma_addr_t addr;
33654
33655 + pax_track_stack();
33656 +
33657 memset(msg, 0 , sizeof(msg));
33658 len = scsi_bufflen(cmd);
33659 direction = 0x00000000;
33660 diff -urNp linux-2.6.32.42/drivers/scsi/eata.c linux-2.6.32.42/drivers/scsi/eata.c
33661 --- linux-2.6.32.42/drivers/scsi/eata.c 2011-03-27 14:31:47.000000000 -0400
33662 +++ linux-2.6.32.42/drivers/scsi/eata.c 2011-05-16 21:46:57.000000000 -0400
33663 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
33664 struct hostdata *ha;
33665 char name[16];
33666
33667 + pax_track_stack();
33668 +
33669 sprintf(name, "%s%d", driver_name, j);
33670
33671 if (!request_region(port_base, REGION_SIZE, driver_name)) {
33672 diff -urNp linux-2.6.32.42/drivers/scsi/fcoe/libfcoe.c linux-2.6.32.42/drivers/scsi/fcoe/libfcoe.c
33673 --- linux-2.6.32.42/drivers/scsi/fcoe/libfcoe.c 2011-03-27 14:31:47.000000000 -0400
33674 +++ linux-2.6.32.42/drivers/scsi/fcoe/libfcoe.c 2011-05-16 21:46:57.000000000 -0400
33675 @@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fc
33676 size_t rlen;
33677 size_t dlen;
33678
33679 + pax_track_stack();
33680 +
33681 fiph = (struct fip_header *)skb->data;
33682 sub = fiph->fip_subcode;
33683 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
33684 diff -urNp linux-2.6.32.42/drivers/scsi/gdth.c linux-2.6.32.42/drivers/scsi/gdth.c
33685 --- linux-2.6.32.42/drivers/scsi/gdth.c 2011-03-27 14:31:47.000000000 -0400
33686 +++ linux-2.6.32.42/drivers/scsi/gdth.c 2011-05-16 21:46:57.000000000 -0400
33687 @@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
33688 ulong flags;
33689 gdth_ha_str *ha;
33690
33691 + pax_track_stack();
33692 +
33693 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
33694 return -EFAULT;
33695 ha = gdth_find_ha(ldrv.ionode);
33696 @@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg
33697 gdth_ha_str *ha;
33698 int rval;
33699
33700 + pax_track_stack();
33701 +
33702 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
33703 res.number >= MAX_HDRIVES)
33704 return -EFAULT;
33705 @@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg,
33706 gdth_ha_str *ha;
33707 int rval;
33708
33709 + pax_track_stack();
33710 +
33711 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
33712 return -EFAULT;
33713 ha = gdth_find_ha(gen.ionode);
33714 @@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
33715 int i;
33716 gdth_cmd_str gdtcmd;
33717 char cmnd[MAX_COMMAND_SIZE];
33718 +
33719 + pax_track_stack();
33720 +
33721 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
33722
33723 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
33724 diff -urNp linux-2.6.32.42/drivers/scsi/gdth_proc.c linux-2.6.32.42/drivers/scsi/gdth_proc.c
33725 --- linux-2.6.32.42/drivers/scsi/gdth_proc.c 2011-03-27 14:31:47.000000000 -0400
33726 +++ linux-2.6.32.42/drivers/scsi/gdth_proc.c 2011-05-16 21:46:57.000000000 -0400
33727 @@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi
33728 ulong64 paddr;
33729
33730 char cmnd[MAX_COMMAND_SIZE];
33731 +
33732 + pax_track_stack();
33733 +
33734 memset(cmnd, 0xff, 12);
33735 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
33736
33737 @@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,ch
33738 gdth_hget_str *phg;
33739 char cmnd[MAX_COMMAND_SIZE];
33740
33741 + pax_track_stack();
33742 +
33743 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
33744 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
33745 if (!gdtcmd || !estr)
33746 diff -urNp linux-2.6.32.42/drivers/scsi/hosts.c linux-2.6.32.42/drivers/scsi/hosts.c
33747 --- linux-2.6.32.42/drivers/scsi/hosts.c 2011-03-27 14:31:47.000000000 -0400
33748 +++ linux-2.6.32.42/drivers/scsi/hosts.c 2011-05-04 17:56:28.000000000 -0400
33749 @@ -40,7 +40,7 @@
33750 #include "scsi_logging.h"
33751
33752
33753 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
33754 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
33755
33756
33757 static void scsi_host_cls_release(struct device *dev)
33758 @@ -344,7 +344,7 @@ struct Scsi_Host *scsi_host_alloc(struct
33759 * subtract one because we increment first then return, but we need to
33760 * know what the next host number was before increment
33761 */
33762 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
33763 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
33764 shost->dma_channel = 0xff;
33765
33766 /* These three are default values which can be overridden */
33767 diff -urNp linux-2.6.32.42/drivers/scsi/ipr.c linux-2.6.32.42/drivers/scsi/ipr.c
33768 --- linux-2.6.32.42/drivers/scsi/ipr.c 2011-03-27 14:31:47.000000000 -0400
33769 +++ linux-2.6.32.42/drivers/scsi/ipr.c 2011-04-17 15:56:46.000000000 -0400
33770 @@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
33771 return true;
33772 }
33773
33774 -static struct ata_port_operations ipr_sata_ops = {
33775 +static const struct ata_port_operations ipr_sata_ops = {
33776 .phy_reset = ipr_ata_phy_reset,
33777 .hardreset = ipr_sata_reset,
33778 .post_internal_cmd = ipr_ata_post_internal,
33779 diff -urNp linux-2.6.32.42/drivers/scsi/libfc/fc_exch.c linux-2.6.32.42/drivers/scsi/libfc/fc_exch.c
33780 --- linux-2.6.32.42/drivers/scsi/libfc/fc_exch.c 2011-03-27 14:31:47.000000000 -0400
33781 +++ linux-2.6.32.42/drivers/scsi/libfc/fc_exch.c 2011-04-17 15:56:46.000000000 -0400
33782 @@ -86,12 +86,12 @@ struct fc_exch_mgr {
33783 * all together if not used XXX
33784 */
33785 struct {
33786 - atomic_t no_free_exch;
33787 - atomic_t no_free_exch_xid;
33788 - atomic_t xid_not_found;
33789 - atomic_t xid_busy;
33790 - atomic_t seq_not_found;
33791 - atomic_t non_bls_resp;
33792 + atomic_unchecked_t no_free_exch;
33793 + atomic_unchecked_t no_free_exch_xid;
33794 + atomic_unchecked_t xid_not_found;
33795 + atomic_unchecked_t xid_busy;
33796 + atomic_unchecked_t seq_not_found;
33797 + atomic_unchecked_t non_bls_resp;
33798 } stats;
33799 };
33800 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
33801 @@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(
33802 /* allocate memory for exchange */
33803 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
33804 if (!ep) {
33805 - atomic_inc(&mp->stats.no_free_exch);
33806 + atomic_inc_unchecked(&mp->stats.no_free_exch);
33807 goto out;
33808 }
33809 memset(ep, 0, sizeof(*ep));
33810 @@ -557,7 +557,7 @@ out:
33811 return ep;
33812 err:
33813 spin_unlock_bh(&pool->lock);
33814 - atomic_inc(&mp->stats.no_free_exch_xid);
33815 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
33816 mempool_free(ep, mp->ep_pool);
33817 return NULL;
33818 }
33819 @@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33820 xid = ntohs(fh->fh_ox_id); /* we originated exch */
33821 ep = fc_exch_find(mp, xid);
33822 if (!ep) {
33823 - atomic_inc(&mp->stats.xid_not_found);
33824 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33825 reject = FC_RJT_OX_ID;
33826 goto out;
33827 }
33828 @@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33829 ep = fc_exch_find(mp, xid);
33830 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
33831 if (ep) {
33832 - atomic_inc(&mp->stats.xid_busy);
33833 + atomic_inc_unchecked(&mp->stats.xid_busy);
33834 reject = FC_RJT_RX_ID;
33835 goto rel;
33836 }
33837 @@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33838 }
33839 xid = ep->xid; /* get our XID */
33840 } else if (!ep) {
33841 - atomic_inc(&mp->stats.xid_not_found);
33842 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33843 reject = FC_RJT_RX_ID; /* XID not found */
33844 goto out;
33845 }
33846 @@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33847 } else {
33848 sp = &ep->seq;
33849 if (sp->id != fh->fh_seq_id) {
33850 - atomic_inc(&mp->stats.seq_not_found);
33851 + atomic_inc_unchecked(&mp->stats.seq_not_found);
33852 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
33853 goto rel;
33854 }
33855 @@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct
33856
33857 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
33858 if (!ep) {
33859 - atomic_inc(&mp->stats.xid_not_found);
33860 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33861 goto out;
33862 }
33863 if (ep->esb_stat & ESB_ST_COMPLETE) {
33864 - atomic_inc(&mp->stats.xid_not_found);
33865 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33866 goto out;
33867 }
33868 if (ep->rxid == FC_XID_UNKNOWN)
33869 ep->rxid = ntohs(fh->fh_rx_id);
33870 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
33871 - atomic_inc(&mp->stats.xid_not_found);
33872 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33873 goto rel;
33874 }
33875 if (ep->did != ntoh24(fh->fh_s_id) &&
33876 ep->did != FC_FID_FLOGI) {
33877 - atomic_inc(&mp->stats.xid_not_found);
33878 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33879 goto rel;
33880 }
33881 sof = fr_sof(fp);
33882 @@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct
33883 } else {
33884 sp = &ep->seq;
33885 if (sp->id != fh->fh_seq_id) {
33886 - atomic_inc(&mp->stats.seq_not_found);
33887 + atomic_inc_unchecked(&mp->stats.seq_not_found);
33888 goto rel;
33889 }
33890 }
33891 @@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_
33892 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
33893
33894 if (!sp)
33895 - atomic_inc(&mp->stats.xid_not_found);
33896 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33897 else
33898 - atomic_inc(&mp->stats.non_bls_resp);
33899 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
33900
33901 fc_frame_free(fp);
33902 }
33903 diff -urNp linux-2.6.32.42/drivers/scsi/libsas/sas_ata.c linux-2.6.32.42/drivers/scsi/libsas/sas_ata.c
33904 --- linux-2.6.32.42/drivers/scsi/libsas/sas_ata.c 2011-03-27 14:31:47.000000000 -0400
33905 +++ linux-2.6.32.42/drivers/scsi/libsas/sas_ata.c 2011-04-23 12:56:11.000000000 -0400
33906 @@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_l
33907 }
33908 }
33909
33910 -static struct ata_port_operations sas_sata_ops = {
33911 +static const struct ata_port_operations sas_sata_ops = {
33912 .phy_reset = sas_ata_phy_reset,
33913 .post_internal_cmd = sas_ata_post_internal,
33914 .qc_defer = ata_std_qc_defer,
33915 diff -urNp linux-2.6.32.42/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.32.42/drivers/scsi/lpfc/lpfc_debugfs.c
33916 --- linux-2.6.32.42/drivers/scsi/lpfc/lpfc_debugfs.c 2011-03-27 14:31:47.000000000 -0400
33917 +++ linux-2.6.32.42/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-16 21:46:57.000000000 -0400
33918 @@ -124,7 +124,7 @@ struct lpfc_debug {
33919 int len;
33920 };
33921
33922 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
33923 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
33924 static unsigned long lpfc_debugfs_start_time = 0L;
33925
33926 /**
33927 @@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
33928 lpfc_debugfs_enable = 0;
33929
33930 len = 0;
33931 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
33932 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
33933 (lpfc_debugfs_max_disc_trc - 1);
33934 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
33935 dtp = vport->disc_trc + i;
33936 @@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
33937 lpfc_debugfs_enable = 0;
33938
33939 len = 0;
33940 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
33941 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
33942 (lpfc_debugfs_max_slow_ring_trc - 1);
33943 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
33944 dtp = phba->slow_ring_trc + i;
33945 @@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
33946 uint32_t *ptr;
33947 char buffer[1024];
33948
33949 + pax_track_stack();
33950 +
33951 off = 0;
33952 spin_lock_irq(&phba->hbalock);
33953
33954 @@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
33955 !vport || !vport->disc_trc)
33956 return;
33957
33958 - index = atomic_inc_return(&vport->disc_trc_cnt) &
33959 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
33960 (lpfc_debugfs_max_disc_trc - 1);
33961 dtp = vport->disc_trc + index;
33962 dtp->fmt = fmt;
33963 dtp->data1 = data1;
33964 dtp->data2 = data2;
33965 dtp->data3 = data3;
33966 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
33967 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
33968 dtp->jif = jiffies;
33969 #endif
33970 return;
33971 @@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
33972 !phba || !phba->slow_ring_trc)
33973 return;
33974
33975 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
33976 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
33977 (lpfc_debugfs_max_slow_ring_trc - 1);
33978 dtp = phba->slow_ring_trc + index;
33979 dtp->fmt = fmt;
33980 dtp->data1 = data1;
33981 dtp->data2 = data2;
33982 dtp->data3 = data3;
33983 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
33984 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
33985 dtp->jif = jiffies;
33986 #endif
33987 return;
33988 @@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
33989 "slow_ring buffer\n");
33990 goto debug_failed;
33991 }
33992 - atomic_set(&phba->slow_ring_trc_cnt, 0);
33993 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
33994 memset(phba->slow_ring_trc, 0,
33995 (sizeof(struct lpfc_debugfs_trc) *
33996 lpfc_debugfs_max_slow_ring_trc));
33997 @@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
33998 "buffer\n");
33999 goto debug_failed;
34000 }
34001 - atomic_set(&vport->disc_trc_cnt, 0);
34002 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
34003
34004 snprintf(name, sizeof(name), "discovery_trace");
34005 vport->debug_disc_trc =
34006 diff -urNp linux-2.6.32.42/drivers/scsi/lpfc/lpfc.h linux-2.6.32.42/drivers/scsi/lpfc/lpfc.h
34007 --- linux-2.6.32.42/drivers/scsi/lpfc/lpfc.h 2011-03-27 14:31:47.000000000 -0400
34008 +++ linux-2.6.32.42/drivers/scsi/lpfc/lpfc.h 2011-05-04 17:56:28.000000000 -0400
34009 @@ -400,7 +400,7 @@ struct lpfc_vport {
34010 struct dentry *debug_nodelist;
34011 struct dentry *vport_debugfs_root;
34012 struct lpfc_debugfs_trc *disc_trc;
34013 - atomic_t disc_trc_cnt;
34014 + atomic_unchecked_t disc_trc_cnt;
34015 #endif
34016 uint8_t stat_data_enabled;
34017 uint8_t stat_data_blocked;
34018 @@ -725,8 +725,8 @@ struct lpfc_hba {
34019 struct timer_list fabric_block_timer;
34020 unsigned long bit_flags;
34021 #define FABRIC_COMANDS_BLOCKED 0
34022 - atomic_t num_rsrc_err;
34023 - atomic_t num_cmd_success;
34024 + atomic_unchecked_t num_rsrc_err;
34025 + atomic_unchecked_t num_cmd_success;
34026 unsigned long last_rsrc_error_time;
34027 unsigned long last_ramp_down_time;
34028 unsigned long last_ramp_up_time;
34029 @@ -740,7 +740,7 @@ struct lpfc_hba {
34030 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
34031 struct dentry *debug_slow_ring_trc;
34032 struct lpfc_debugfs_trc *slow_ring_trc;
34033 - atomic_t slow_ring_trc_cnt;
34034 + atomic_unchecked_t slow_ring_trc_cnt;
34035 #endif
34036
34037 /* Used for deferred freeing of ELS data buffers */
34038 diff -urNp linux-2.6.32.42/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.32.42/drivers/scsi/lpfc/lpfc_scsi.c
34039 --- linux-2.6.32.42/drivers/scsi/lpfc/lpfc_scsi.c 2011-03-27 14:31:47.000000000 -0400
34040 +++ linux-2.6.32.42/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-04 17:56:28.000000000 -0400
34041 @@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
34042 uint32_t evt_posted;
34043
34044 spin_lock_irqsave(&phba->hbalock, flags);
34045 - atomic_inc(&phba->num_rsrc_err);
34046 + atomic_inc_unchecked(&phba->num_rsrc_err);
34047 phba->last_rsrc_error_time = jiffies;
34048
34049 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
34050 @@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
34051 unsigned long flags;
34052 struct lpfc_hba *phba = vport->phba;
34053 uint32_t evt_posted;
34054 - atomic_inc(&phba->num_cmd_success);
34055 + atomic_inc_unchecked(&phba->num_cmd_success);
34056
34057 if (vport->cfg_lun_queue_depth <= queue_depth)
34058 return;
34059 @@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
34060 int i;
34061 struct lpfc_rport_data *rdata;
34062
34063 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
34064 - num_cmd_success = atomic_read(&phba->num_cmd_success);
34065 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
34066 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
34067
34068 vports = lpfc_create_vport_work_array(phba);
34069 if (vports != NULL)
34070 @@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
34071 }
34072 }
34073 lpfc_destroy_vport_work_array(phba, vports);
34074 - atomic_set(&phba->num_rsrc_err, 0);
34075 - atomic_set(&phba->num_cmd_success, 0);
34076 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
34077 + atomic_set_unchecked(&phba->num_cmd_success, 0);
34078 }
34079
34080 /**
34081 @@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
34082 }
34083 }
34084 lpfc_destroy_vport_work_array(phba, vports);
34085 - atomic_set(&phba->num_rsrc_err, 0);
34086 - atomic_set(&phba->num_cmd_success, 0);
34087 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
34088 + atomic_set_unchecked(&phba->num_cmd_success, 0);
34089 }
34090
34091 /**
34092 diff -urNp linux-2.6.32.42/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.32.42/drivers/scsi/megaraid/megaraid_mbox.c
34093 --- linux-2.6.32.42/drivers/scsi/megaraid/megaraid_mbox.c 2011-03-27 14:31:47.000000000 -0400
34094 +++ linux-2.6.32.42/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-16 21:46:57.000000000 -0400
34095 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
34096 int rval;
34097 int i;
34098
34099 + pax_track_stack();
34100 +
34101 // Allocate memory for the base list of scb for management module.
34102 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
34103
34104 diff -urNp linux-2.6.32.42/drivers/scsi/osd/osd_initiator.c linux-2.6.32.42/drivers/scsi/osd/osd_initiator.c
34105 --- linux-2.6.32.42/drivers/scsi/osd/osd_initiator.c 2011-03-27 14:31:47.000000000 -0400
34106 +++ linux-2.6.32.42/drivers/scsi/osd/osd_initiator.c 2011-05-16 21:46:57.000000000 -0400
34107 @@ -94,6 +94,8 @@ static int _osd_print_system_info(struct
34108 int nelem = ARRAY_SIZE(get_attrs), a = 0;
34109 int ret;
34110
34111 + pax_track_stack();
34112 +
34113 or = osd_start_request(od, GFP_KERNEL);
34114 if (!or)
34115 return -ENOMEM;
34116 diff -urNp linux-2.6.32.42/drivers/scsi/pmcraid.c linux-2.6.32.42/drivers/scsi/pmcraid.c
34117 --- linux-2.6.32.42/drivers/scsi/pmcraid.c 2011-05-10 22:12:01.000000000 -0400
34118 +++ linux-2.6.32.42/drivers/scsi/pmcraid.c 2011-05-10 22:12:33.000000000 -0400
34119 @@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct sc
34120 res->scsi_dev = scsi_dev;
34121 scsi_dev->hostdata = res;
34122 res->change_detected = 0;
34123 - atomic_set(&res->read_failures, 0);
34124 - atomic_set(&res->write_failures, 0);
34125 + atomic_set_unchecked(&res->read_failures, 0);
34126 + atomic_set_unchecked(&res->write_failures, 0);
34127 rc = 0;
34128 }
34129 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
34130 @@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct
34131
34132 /* If this was a SCSI read/write command keep count of errors */
34133 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
34134 - atomic_inc(&res->read_failures);
34135 + atomic_inc_unchecked(&res->read_failures);
34136 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
34137 - atomic_inc(&res->write_failures);
34138 + atomic_inc_unchecked(&res->write_failures);
34139
34140 if (!RES_IS_GSCSI(res->cfg_entry) &&
34141 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
34142 @@ -4113,7 +4113,7 @@ static void pmcraid_worker_function(stru
34143
34144 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
34145 /* add resources only after host is added into system */
34146 - if (!atomic_read(&pinstance->expose_resources))
34147 + if (!atomic_read_unchecked(&pinstance->expose_resources))
34148 return;
34149
34150 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
34151 @@ -4847,7 +4847,7 @@ static int __devinit pmcraid_init_instan
34152 init_waitqueue_head(&pinstance->reset_wait_q);
34153
34154 atomic_set(&pinstance->outstanding_cmds, 0);
34155 - atomic_set(&pinstance->expose_resources, 0);
34156 + atomic_set_unchecked(&pinstance->expose_resources, 0);
34157
34158 INIT_LIST_HEAD(&pinstance->free_res_q);
34159 INIT_LIST_HEAD(&pinstance->used_res_q);
34160 @@ -5499,7 +5499,7 @@ static int __devinit pmcraid_probe(
34161 /* Schedule worker thread to handle CCN and take care of adding and
34162 * removing devices to OS
34163 */
34164 - atomic_set(&pinstance->expose_resources, 1);
34165 + atomic_set_unchecked(&pinstance->expose_resources, 1);
34166 schedule_work(&pinstance->worker_q);
34167 return rc;
34168
34169 diff -urNp linux-2.6.32.42/drivers/scsi/pmcraid.h linux-2.6.32.42/drivers/scsi/pmcraid.h
34170 --- linux-2.6.32.42/drivers/scsi/pmcraid.h 2011-03-27 14:31:47.000000000 -0400
34171 +++ linux-2.6.32.42/drivers/scsi/pmcraid.h 2011-05-04 17:56:28.000000000 -0400
34172 @@ -690,7 +690,7 @@ struct pmcraid_instance {
34173 atomic_t outstanding_cmds;
34174
34175 /* should add/delete resources to mid-layer now ?*/
34176 - atomic_t expose_resources;
34177 + atomic_unchecked_t expose_resources;
34178
34179 /* Tasklet to handle deferred processing */
34180 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
34181 @@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
34182 struct list_head queue; /* link to "to be exposed" resources */
34183 struct pmcraid_config_table_entry cfg_entry;
34184 struct scsi_device *scsi_dev; /* Link scsi_device structure */
34185 - atomic_t read_failures; /* count of failed READ commands */
34186 - atomic_t write_failures; /* count of failed WRITE commands */
34187 + atomic_unchecked_t read_failures; /* count of failed READ commands */
34188 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34189
34190 /* To indicate add/delete/modify during CCN */
34191 u8 change_detected;
34192 diff -urNp linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_def.h
34193 --- linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_def.h 2011-03-27 14:31:47.000000000 -0400
34194 +++ linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_def.h 2011-05-04 17:56:28.000000000 -0400
34195 @@ -240,7 +240,7 @@ struct ddb_entry {
34196 atomic_t retry_relogin_timer; /* Min Time between relogins
34197 * (4000 only) */
34198 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
34199 - atomic_t relogin_retry_count; /* Num of times relogin has been
34200 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34201 * retried */
34202
34203 uint16_t port;
34204 diff -urNp linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_init.c
34205 --- linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_init.c 2011-03-27 14:31:47.000000000 -0400
34206 +++ linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_init.c 2011-05-04 17:56:28.000000000 -0400
34207 @@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_
34208 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
34209 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34210 atomic_set(&ddb_entry->relogin_timer, 0);
34211 - atomic_set(&ddb_entry->relogin_retry_count, 0);
34212 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34213 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34214 list_add_tail(&ddb_entry->list, &ha->ddb_list);
34215 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
34216 @@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct s
34217 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34218 atomic_set(&ddb_entry->port_down_timer,
34219 ha->port_down_retry_count);
34220 - atomic_set(&ddb_entry->relogin_retry_count, 0);
34221 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34222 atomic_set(&ddb_entry->relogin_timer, 0);
34223 clear_bit(DF_RELOGIN, &ddb_entry->flags);
34224 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
34225 diff -urNp linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_os.c
34226 --- linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_os.c 2011-03-27 14:31:47.000000000 -0400
34227 +++ linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_os.c 2011-05-04 17:56:28.000000000 -0400
34228 @@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_ql
34229 ddb_entry->fw_ddb_device_state ==
34230 DDB_DS_SESSION_FAILED) {
34231 /* Reset retry relogin timer */
34232 - atomic_inc(&ddb_entry->relogin_retry_count);
34233 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34234 DEBUG2(printk("scsi%ld: index[%d] relogin"
34235 " timed out-retrying"
34236 " relogin (%d)\n",
34237 ha->host_no,
34238 ddb_entry->fw_ddb_index,
34239 - atomic_read(&ddb_entry->
34240 + atomic_read_unchecked(&ddb_entry->
34241 relogin_retry_count))
34242 );
34243 start_dpc++;
34244 diff -urNp linux-2.6.32.42/drivers/scsi/scsi.c linux-2.6.32.42/drivers/scsi/scsi.c
34245 --- linux-2.6.32.42/drivers/scsi/scsi.c 2011-03-27 14:31:47.000000000 -0400
34246 +++ linux-2.6.32.42/drivers/scsi/scsi.c 2011-05-04 17:56:28.000000000 -0400
34247 @@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
34248 unsigned long timeout;
34249 int rtn = 0;
34250
34251 - atomic_inc(&cmd->device->iorequest_cnt);
34252 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34253
34254 /* check if the device is still usable */
34255 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34256 diff -urNp linux-2.6.32.42/drivers/scsi/scsi_debug.c linux-2.6.32.42/drivers/scsi/scsi_debug.c
34257 --- linux-2.6.32.42/drivers/scsi/scsi_debug.c 2011-03-27 14:31:47.000000000 -0400
34258 +++ linux-2.6.32.42/drivers/scsi/scsi_debug.c 2011-05-16 21:46:57.000000000 -0400
34259 @@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_
34260 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
34261 unsigned char *cmd = (unsigned char *)scp->cmnd;
34262
34263 + pax_track_stack();
34264 +
34265 if ((errsts = check_readiness(scp, 1, devip)))
34266 return errsts;
34267 memset(arr, 0, sizeof(arr));
34268 @@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cm
34269 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
34270 unsigned char *cmd = (unsigned char *)scp->cmnd;
34271
34272 + pax_track_stack();
34273 +
34274 if ((errsts = check_readiness(scp, 1, devip)))
34275 return errsts;
34276 memset(arr, 0, sizeof(arr));
34277 diff -urNp linux-2.6.32.42/drivers/scsi/scsi_lib.c linux-2.6.32.42/drivers/scsi/scsi_lib.c
34278 --- linux-2.6.32.42/drivers/scsi/scsi_lib.c 2011-05-10 22:12:01.000000000 -0400
34279 +++ linux-2.6.32.42/drivers/scsi/scsi_lib.c 2011-05-10 22:12:33.000000000 -0400
34280 @@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct req
34281
34282 scsi_init_cmd_errh(cmd);
34283 cmd->result = DID_NO_CONNECT << 16;
34284 - atomic_inc(&cmd->device->iorequest_cnt);
34285 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34286
34287 /*
34288 * SCSI request completion path will do scsi_device_unbusy(),
34289 @@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct req
34290 */
34291 cmd->serial_number = 0;
34292
34293 - atomic_inc(&cmd->device->iodone_cnt);
34294 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
34295 if (cmd->result)
34296 - atomic_inc(&cmd->device->ioerr_cnt);
34297 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34298
34299 disposition = scsi_decide_disposition(cmd);
34300 if (disposition != SUCCESS &&
34301 diff -urNp linux-2.6.32.42/drivers/scsi/scsi_sysfs.c linux-2.6.32.42/drivers/scsi/scsi_sysfs.c
34302 --- linux-2.6.32.42/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:34.000000000 -0400
34303 +++ linux-2.6.32.42/drivers/scsi/scsi_sysfs.c 2011-06-25 12:56:37.000000000 -0400
34304 @@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev,
34305 char *buf) \
34306 { \
34307 struct scsi_device *sdev = to_scsi_device(dev); \
34308 - unsigned long long count = atomic_read(&sdev->field); \
34309 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
34310 return snprintf(buf, 20, "0x%llx\n", count); \
34311 } \
34312 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34313 diff -urNp linux-2.6.32.42/drivers/scsi/scsi_transport_fc.c linux-2.6.32.42/drivers/scsi/scsi_transport_fc.c
34314 --- linux-2.6.32.42/drivers/scsi/scsi_transport_fc.c 2011-03-27 14:31:47.000000000 -0400
34315 +++ linux-2.6.32.42/drivers/scsi/scsi_transport_fc.c 2011-05-04 17:56:28.000000000 -0400
34316 @@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
34317 * Netlink Infrastructure
34318 */
34319
34320 -static atomic_t fc_event_seq;
34321 +static atomic_unchecked_t fc_event_seq;
34322
34323 /**
34324 * fc_get_event_number - Obtain the next sequential FC event number
34325 @@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
34326 u32
34327 fc_get_event_number(void)
34328 {
34329 - return atomic_add_return(1, &fc_event_seq);
34330 + return atomic_add_return_unchecked(1, &fc_event_seq);
34331 }
34332 EXPORT_SYMBOL(fc_get_event_number);
34333
34334 @@ -641,7 +641,7 @@ static __init int fc_transport_init(void
34335 {
34336 int error;
34337
34338 - atomic_set(&fc_event_seq, 0);
34339 + atomic_set_unchecked(&fc_event_seq, 0);
34340
34341 error = transport_class_register(&fc_host_class);
34342 if (error)
34343 diff -urNp linux-2.6.32.42/drivers/scsi/scsi_transport_iscsi.c linux-2.6.32.42/drivers/scsi/scsi_transport_iscsi.c
34344 --- linux-2.6.32.42/drivers/scsi/scsi_transport_iscsi.c 2011-03-27 14:31:47.000000000 -0400
34345 +++ linux-2.6.32.42/drivers/scsi/scsi_transport_iscsi.c 2011-05-04 17:56:28.000000000 -0400
34346 @@ -81,7 +81,7 @@ struct iscsi_internal {
34347 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
34348 };
34349
34350 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34351 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34352 static struct workqueue_struct *iscsi_eh_timer_workq;
34353
34354 /*
34355 @@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_s
34356 int err;
34357
34358 ihost = shost->shost_data;
34359 - session->sid = atomic_add_return(1, &iscsi_session_nr);
34360 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34361
34362 if (id == ISCSI_MAX_TARGET) {
34363 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
34364 @@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(v
34365 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34366 ISCSI_TRANSPORT_VERSION);
34367
34368 - atomic_set(&iscsi_session_nr, 0);
34369 + atomic_set_unchecked(&iscsi_session_nr, 0);
34370
34371 err = class_register(&iscsi_transport_class);
34372 if (err)
34373 diff -urNp linux-2.6.32.42/drivers/scsi/scsi_transport_srp.c linux-2.6.32.42/drivers/scsi/scsi_transport_srp.c
34374 --- linux-2.6.32.42/drivers/scsi/scsi_transport_srp.c 2011-03-27 14:31:47.000000000 -0400
34375 +++ linux-2.6.32.42/drivers/scsi/scsi_transport_srp.c 2011-05-04 17:56:28.000000000 -0400
34376 @@ -33,7 +33,7 @@
34377 #include "scsi_transport_srp_internal.h"
34378
34379 struct srp_host_attrs {
34380 - atomic_t next_port_id;
34381 + atomic_unchecked_t next_port_id;
34382 };
34383 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34384
34385 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
34386 struct Scsi_Host *shost = dev_to_shost(dev);
34387 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34388
34389 - atomic_set(&srp_host->next_port_id, 0);
34390 + atomic_set_unchecked(&srp_host->next_port_id, 0);
34391 return 0;
34392 }
34393
34394 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
34395 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34396 rport->roles = ids->roles;
34397
34398 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34399 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34400 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34401
34402 transport_setup_device(&rport->dev);
34403 diff -urNp linux-2.6.32.42/drivers/scsi/sg.c linux-2.6.32.42/drivers/scsi/sg.c
34404 --- linux-2.6.32.42/drivers/scsi/sg.c 2011-03-27 14:31:47.000000000 -0400
34405 +++ linux-2.6.32.42/drivers/scsi/sg.c 2011-04-17 15:56:46.000000000 -0400
34406 @@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
34407 const struct file_operations * fops;
34408 };
34409
34410 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34411 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34412 {"allow_dio", &adio_fops},
34413 {"debug", &debug_fops},
34414 {"def_reserved_size", &dressz_fops},
34415 @@ -2307,7 +2307,7 @@ sg_proc_init(void)
34416 {
34417 int k, mask;
34418 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34419 - struct sg_proc_leaf * leaf;
34420 + const struct sg_proc_leaf * leaf;
34421
34422 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34423 if (!sg_proc_sgp)
34424 diff -urNp linux-2.6.32.42/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.32.42/drivers/scsi/sym53c8xx_2/sym_glue.c
34425 --- linux-2.6.32.42/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-03-27 14:31:47.000000000 -0400
34426 +++ linux-2.6.32.42/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-16 21:46:57.000000000 -0400
34427 @@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct p
34428 int do_iounmap = 0;
34429 int do_disable_device = 1;
34430
34431 + pax_track_stack();
34432 +
34433 memset(&sym_dev, 0, sizeof(sym_dev));
34434 memset(&nvram, 0, sizeof(nvram));
34435 sym_dev.pdev = pdev;
34436 diff -urNp linux-2.6.32.42/drivers/serial/kgdboc.c linux-2.6.32.42/drivers/serial/kgdboc.c
34437 --- linux-2.6.32.42/drivers/serial/kgdboc.c 2011-03-27 14:31:47.000000000 -0400
34438 +++ linux-2.6.32.42/drivers/serial/kgdboc.c 2011-04-17 15:56:46.000000000 -0400
34439 @@ -18,7 +18,7 @@
34440
34441 #define MAX_CONFIG_LEN 40
34442
34443 -static struct kgdb_io kgdboc_io_ops;
34444 +static const struct kgdb_io kgdboc_io_ops;
34445
34446 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
34447 static int configured = -1;
34448 @@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
34449 module_put(THIS_MODULE);
34450 }
34451
34452 -static struct kgdb_io kgdboc_io_ops = {
34453 +static const struct kgdb_io kgdboc_io_ops = {
34454 .name = "kgdboc",
34455 .read_char = kgdboc_get_char,
34456 .write_char = kgdboc_put_char,
34457 diff -urNp linux-2.6.32.42/drivers/spi/spi.c linux-2.6.32.42/drivers/spi/spi.c
34458 --- linux-2.6.32.42/drivers/spi/spi.c 2011-03-27 14:31:47.000000000 -0400
34459 +++ linux-2.6.32.42/drivers/spi/spi.c 2011-05-04 17:56:28.000000000 -0400
34460 @@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, str
34461 EXPORT_SYMBOL_GPL(spi_sync);
34462
34463 /* portable code must never pass more than 32 bytes */
34464 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34465 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
34466
34467 static u8 *buf;
34468
34469 diff -urNp linux-2.6.32.42/drivers/staging/android/binder.c linux-2.6.32.42/drivers/staging/android/binder.c
34470 --- linux-2.6.32.42/drivers/staging/android/binder.c 2011-03-27 14:31:47.000000000 -0400
34471 +++ linux-2.6.32.42/drivers/staging/android/binder.c 2011-04-17 15:56:46.000000000 -0400
34472 @@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_a
34473 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
34474 }
34475
34476 -static struct vm_operations_struct binder_vm_ops = {
34477 +static const struct vm_operations_struct binder_vm_ops = {
34478 .open = binder_vma_open,
34479 .close = binder_vma_close,
34480 };
34481 diff -urNp linux-2.6.32.42/drivers/staging/b3dfg/b3dfg.c linux-2.6.32.42/drivers/staging/b3dfg/b3dfg.c
34482 --- linux-2.6.32.42/drivers/staging/b3dfg/b3dfg.c 2011-03-27 14:31:47.000000000 -0400
34483 +++ linux-2.6.32.42/drivers/staging/b3dfg/b3dfg.c 2011-04-17 15:56:46.000000000 -0400
34484 @@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_are
34485 return VM_FAULT_NOPAGE;
34486 }
34487
34488 -static struct vm_operations_struct b3dfg_vm_ops = {
34489 +static const struct vm_operations_struct b3dfg_vm_ops = {
34490 .fault = b3dfg_vma_fault,
34491 };
34492
34493 @@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp,
34494 return r;
34495 }
34496
34497 -static struct file_operations b3dfg_fops = {
34498 +static const struct file_operations b3dfg_fops = {
34499 .owner = THIS_MODULE,
34500 .open = b3dfg_open,
34501 .release = b3dfg_release,
34502 diff -urNp linux-2.6.32.42/drivers/staging/comedi/comedi_fops.c linux-2.6.32.42/drivers/staging/comedi/comedi_fops.c
34503 --- linux-2.6.32.42/drivers/staging/comedi/comedi_fops.c 2011-03-27 14:31:47.000000000 -0400
34504 +++ linux-2.6.32.42/drivers/staging/comedi/comedi_fops.c 2011-04-17 15:56:46.000000000 -0400
34505 @@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct
34506 mutex_unlock(&dev->mutex);
34507 }
34508
34509 -static struct vm_operations_struct comedi_vm_ops = {
34510 +static const struct vm_operations_struct comedi_vm_ops = {
34511 .close = comedi_unmap,
34512 };
34513
34514 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.32.42/drivers/staging/dream/qdsp5/adsp_driver.c
34515 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/adsp_driver.c 2011-03-27 14:31:47.000000000 -0400
34516 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/adsp_driver.c 2011-04-17 15:56:46.000000000 -0400
34517 @@ -576,7 +576,7 @@ static struct adsp_device *inode_to_devi
34518 static dev_t adsp_devno;
34519 static struct class *adsp_class;
34520
34521 -static struct file_operations adsp_fops = {
34522 +static const struct file_operations adsp_fops = {
34523 .owner = THIS_MODULE,
34524 .open = adsp_open,
34525 .unlocked_ioctl = adsp_ioctl,
34526 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_aac.c
34527 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_aac.c 2011-03-27 14:31:47.000000000 -0400
34528 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_aac.c 2011-04-17 15:56:46.000000000 -0400
34529 @@ -1022,7 +1022,7 @@ done:
34530 return rc;
34531 }
34532
34533 -static struct file_operations audio_aac_fops = {
34534 +static const struct file_operations audio_aac_fops = {
34535 .owner = THIS_MODULE,
34536 .open = audio_open,
34537 .release = audio_release,
34538 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_amrnb.c
34539 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-03-27 14:31:47.000000000 -0400
34540 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-04-17 15:56:46.000000000 -0400
34541 @@ -833,7 +833,7 @@ done:
34542 return rc;
34543 }
34544
34545 -static struct file_operations audio_amrnb_fops = {
34546 +static const struct file_operations audio_amrnb_fops = {
34547 .owner = THIS_MODULE,
34548 .open = audamrnb_open,
34549 .release = audamrnb_release,
34550 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_evrc.c
34551 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_evrc.c 2011-03-27 14:31:47.000000000 -0400
34552 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_evrc.c 2011-04-17 15:56:46.000000000 -0400
34553 @@ -805,7 +805,7 @@ dma_fail:
34554 return rc;
34555 }
34556
34557 -static struct file_operations audio_evrc_fops = {
34558 +static const struct file_operations audio_evrc_fops = {
34559 .owner = THIS_MODULE,
34560 .open = audevrc_open,
34561 .release = audevrc_release,
34562 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_in.c
34563 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_in.c 2011-03-27 14:31:47.000000000 -0400
34564 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_in.c 2011-04-17 15:56:46.000000000 -0400
34565 @@ -913,7 +913,7 @@ static int audpre_open(struct inode *ino
34566 return 0;
34567 }
34568
34569 -static struct file_operations audio_fops = {
34570 +static const struct file_operations audio_fops = {
34571 .owner = THIS_MODULE,
34572 .open = audio_in_open,
34573 .release = audio_in_release,
34574 @@ -922,7 +922,7 @@ static struct file_operations audio_fops
34575 .unlocked_ioctl = audio_in_ioctl,
34576 };
34577
34578 -static struct file_operations audpre_fops = {
34579 +static const struct file_operations audpre_fops = {
34580 .owner = THIS_MODULE,
34581 .open = audpre_open,
34582 .unlocked_ioctl = audpre_ioctl,
34583 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_mp3.c
34584 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_mp3.c 2011-03-27 14:31:47.000000000 -0400
34585 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_mp3.c 2011-04-17 15:56:46.000000000 -0400
34586 @@ -941,7 +941,7 @@ done:
34587 return rc;
34588 }
34589
34590 -static struct file_operations audio_mp3_fops = {
34591 +static const struct file_operations audio_mp3_fops = {
34592 .owner = THIS_MODULE,
34593 .open = audio_open,
34594 .release = audio_release,
34595 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_out.c
34596 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_out.c 2011-03-27 14:31:47.000000000 -0400
34597 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_out.c 2011-04-17 15:56:46.000000000 -0400
34598 @@ -810,7 +810,7 @@ static int audpp_open(struct inode *inod
34599 return 0;
34600 }
34601
34602 -static struct file_operations audio_fops = {
34603 +static const struct file_operations audio_fops = {
34604 .owner = THIS_MODULE,
34605 .open = audio_open,
34606 .release = audio_release,
34607 @@ -819,7 +819,7 @@ static struct file_operations audio_fops
34608 .unlocked_ioctl = audio_ioctl,
34609 };
34610
34611 -static struct file_operations audpp_fops = {
34612 +static const struct file_operations audpp_fops = {
34613 .owner = THIS_MODULE,
34614 .open = audpp_open,
34615 .unlocked_ioctl = audpp_ioctl,
34616 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_qcelp.c
34617 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-03-27 14:31:47.000000000 -0400
34618 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-04-17 15:56:46.000000000 -0400
34619 @@ -816,7 +816,7 @@ err:
34620 return rc;
34621 }
34622
34623 -static struct file_operations audio_qcelp_fops = {
34624 +static const struct file_operations audio_qcelp_fops = {
34625 .owner = THIS_MODULE,
34626 .open = audqcelp_open,
34627 .release = audqcelp_release,
34628 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/snd.c linux-2.6.32.42/drivers/staging/dream/qdsp5/snd.c
34629 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/snd.c 2011-03-27 14:31:47.000000000 -0400
34630 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/snd.c 2011-04-17 15:56:46.000000000 -0400
34631 @@ -242,7 +242,7 @@ err:
34632 return rc;
34633 }
34634
34635 -static struct file_operations snd_fops = {
34636 +static const struct file_operations snd_fops = {
34637 .owner = THIS_MODULE,
34638 .open = snd_open,
34639 .release = snd_release,
34640 diff -urNp linux-2.6.32.42/drivers/staging/dream/smd/smd_qmi.c linux-2.6.32.42/drivers/staging/dream/smd/smd_qmi.c
34641 --- linux-2.6.32.42/drivers/staging/dream/smd/smd_qmi.c 2011-03-27 14:31:47.000000000 -0400
34642 +++ linux-2.6.32.42/drivers/staging/dream/smd/smd_qmi.c 2011-04-17 15:56:46.000000000 -0400
34643 @@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip,
34644 return 0;
34645 }
34646
34647 -static struct file_operations qmi_fops = {
34648 +static const struct file_operations qmi_fops = {
34649 .owner = THIS_MODULE,
34650 .read = qmi_read,
34651 .write = qmi_write,
34652 diff -urNp linux-2.6.32.42/drivers/staging/dream/smd/smd_rpcrouter_device.c linux-2.6.32.42/drivers/staging/dream/smd/smd_rpcrouter_device.c
34653 --- linux-2.6.32.42/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-03-27 14:31:47.000000000 -0400
34654 +++ linux-2.6.32.42/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-04-17 15:56:46.000000000 -0400
34655 @@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file
34656 return rc;
34657 }
34658
34659 -static struct file_operations rpcrouter_server_fops = {
34660 +static const struct file_operations rpcrouter_server_fops = {
34661 .owner = THIS_MODULE,
34662 .open = rpcrouter_open,
34663 .release = rpcrouter_release,
34664 @@ -224,7 +224,7 @@ static struct file_operations rpcrouter_
34665 .unlocked_ioctl = rpcrouter_ioctl,
34666 };
34667
34668 -static struct file_operations rpcrouter_router_fops = {
34669 +static const struct file_operations rpcrouter_router_fops = {
34670 .owner = THIS_MODULE,
34671 .open = rpcrouter_open,
34672 .release = rpcrouter_release,
34673 diff -urNp linux-2.6.32.42/drivers/staging/dst/dcore.c linux-2.6.32.42/drivers/staging/dst/dcore.c
34674 --- linux-2.6.32.42/drivers/staging/dst/dcore.c 2011-03-27 14:31:47.000000000 -0400
34675 +++ linux-2.6.32.42/drivers/staging/dst/dcore.c 2011-04-17 15:56:46.000000000 -0400
34676 @@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendi
34677 return 0;
34678 }
34679
34680 -static struct block_device_operations dst_blk_ops = {
34681 +static const struct block_device_operations dst_blk_ops = {
34682 .open = dst_bdev_open,
34683 .release = dst_bdev_release,
34684 .owner = THIS_MODULE,
34685 @@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(s
34686 n->size = ctl->size;
34687
34688 atomic_set(&n->refcnt, 1);
34689 - atomic_long_set(&n->gen, 0);
34690 + atomic_long_set_unchecked(&n->gen, 0);
34691 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
34692
34693 err = dst_node_sysfs_init(n);
34694 diff -urNp linux-2.6.32.42/drivers/staging/dst/trans.c linux-2.6.32.42/drivers/staging/dst/trans.c
34695 --- linux-2.6.32.42/drivers/staging/dst/trans.c 2011-03-27 14:31:47.000000000 -0400
34696 +++ linux-2.6.32.42/drivers/staging/dst/trans.c 2011-04-17 15:56:46.000000000 -0400
34697 @@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n,
34698 t->error = 0;
34699 t->retries = 0;
34700 atomic_set(&t->refcnt, 1);
34701 - t->gen = atomic_long_inc_return(&n->gen);
34702 + t->gen = atomic_long_inc_return_unchecked(&n->gen);
34703
34704 t->enc = bio_data_dir(bio);
34705 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
34706 diff -urNp linux-2.6.32.42/drivers/staging/et131x/et1310_tx.c linux-2.6.32.42/drivers/staging/et131x/et1310_tx.c
34707 --- linux-2.6.32.42/drivers/staging/et131x/et1310_tx.c 2011-03-27 14:31:47.000000000 -0400
34708 +++ linux-2.6.32.42/drivers/staging/et131x/et1310_tx.c 2011-05-04 17:56:28.000000000 -0400
34709 @@ -710,11 +710,11 @@ inline void et131x_free_send_packet(stru
34710 struct net_device_stats *stats = &etdev->net_stats;
34711
34712 if (pMpTcb->Flags & fMP_DEST_BROAD)
34713 - atomic_inc(&etdev->Stats.brdcstxmt);
34714 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
34715 else if (pMpTcb->Flags & fMP_DEST_MULTI)
34716 - atomic_inc(&etdev->Stats.multixmt);
34717 + atomic_inc_unchecked(&etdev->Stats.multixmt);
34718 else
34719 - atomic_inc(&etdev->Stats.unixmt);
34720 + atomic_inc_unchecked(&etdev->Stats.unixmt);
34721
34722 if (pMpTcb->Packet) {
34723 stats->tx_bytes += pMpTcb->Packet->len;
34724 diff -urNp linux-2.6.32.42/drivers/staging/et131x/et131x_adapter.h linux-2.6.32.42/drivers/staging/et131x/et131x_adapter.h
34725 --- linux-2.6.32.42/drivers/staging/et131x/et131x_adapter.h 2011-03-27 14:31:47.000000000 -0400
34726 +++ linux-2.6.32.42/drivers/staging/et131x/et131x_adapter.h 2011-05-04 17:56:28.000000000 -0400
34727 @@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
34728 * operations
34729 */
34730 u32 unircv; /* # multicast packets received */
34731 - atomic_t unixmt; /* # multicast packets for Tx */
34732 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
34733 u32 multircv; /* # multicast packets received */
34734 - atomic_t multixmt; /* # multicast packets for Tx */
34735 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
34736 u32 brdcstrcv; /* # broadcast packets received */
34737 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
34738 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
34739 u32 norcvbuf; /* # Rx packets discarded */
34740 u32 noxmtbuf; /* # Tx packets discarded */
34741
34742 diff -urNp linux-2.6.32.42/drivers/staging/go7007/go7007-v4l2.c linux-2.6.32.42/drivers/staging/go7007/go7007-v4l2.c
34743 --- linux-2.6.32.42/drivers/staging/go7007/go7007-v4l2.c 2011-03-27 14:31:47.000000000 -0400
34744 +++ linux-2.6.32.42/drivers/staging/go7007/go7007-v4l2.c 2011-04-17 15:56:46.000000000 -0400
34745 @@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_are
34746 return 0;
34747 }
34748
34749 -static struct vm_operations_struct go7007_vm_ops = {
34750 +static const struct vm_operations_struct go7007_vm_ops = {
34751 .open = go7007_vm_open,
34752 .close = go7007_vm_close,
34753 .fault = go7007_vm_fault,
34754 diff -urNp linux-2.6.32.42/drivers/staging/hv/blkvsc_drv.c linux-2.6.32.42/drivers/staging/hv/blkvsc_drv.c
34755 --- linux-2.6.32.42/drivers/staging/hv/blkvsc_drv.c 2011-03-27 14:31:47.000000000 -0400
34756 +++ linux-2.6.32.42/drivers/staging/hv/blkvsc_drv.c 2011-04-17 15:56:46.000000000 -0400
34757 @@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKV
34758 /* The one and only one */
34759 static struct blkvsc_driver_context g_blkvsc_drv;
34760
34761 -static struct block_device_operations block_ops = {
34762 +static const struct block_device_operations block_ops = {
34763 .owner = THIS_MODULE,
34764 .open = blkvsc_open,
34765 .release = blkvsc_release,
34766 diff -urNp linux-2.6.32.42/drivers/staging/hv/Channel.c linux-2.6.32.42/drivers/staging/hv/Channel.c
34767 --- linux-2.6.32.42/drivers/staging/hv/Channel.c 2011-04-17 17:00:52.000000000 -0400
34768 +++ linux-2.6.32.42/drivers/staging/hv/Channel.c 2011-05-04 17:56:28.000000000 -0400
34769 @@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vm
34770
34771 DPRINT_ENTER(VMBUS);
34772
34773 - nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
34774 - atomic_inc(&gVmbusConnection.NextGpadlHandle);
34775 + nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
34776 + atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
34777
34778 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
34779 ASSERT(msgInfo != NULL);
34780 diff -urNp linux-2.6.32.42/drivers/staging/hv/Hv.c linux-2.6.32.42/drivers/staging/hv/Hv.c
34781 --- linux-2.6.32.42/drivers/staging/hv/Hv.c 2011-03-27 14:31:47.000000000 -0400
34782 +++ linux-2.6.32.42/drivers/staging/hv/Hv.c 2011-04-17 15:56:46.000000000 -0400
34783 @@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, vo
34784 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
34785 u32 outputAddressHi = outputAddress >> 32;
34786 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
34787 - volatile void *hypercallPage = gHvContext.HypercallPage;
34788 + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
34789
34790 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
34791 Control, Input, Output);
34792 diff -urNp linux-2.6.32.42/drivers/staging/hv/vmbus_drv.c linux-2.6.32.42/drivers/staging/hv/vmbus_drv.c
34793 --- linux-2.6.32.42/drivers/staging/hv/vmbus_drv.c 2011-03-27 14:31:47.000000000 -0400
34794 +++ linux-2.6.32.42/drivers/staging/hv/vmbus_drv.c 2011-05-04 17:56:28.000000000 -0400
34795 @@ -532,7 +532,7 @@ static int vmbus_child_device_register(s
34796 to_device_context(root_device_obj);
34797 struct device_context *child_device_ctx =
34798 to_device_context(child_device_obj);
34799 - static atomic_t device_num = ATOMIC_INIT(0);
34800 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
34801
34802 DPRINT_ENTER(VMBUS_DRV);
34803
34804 @@ -541,7 +541,7 @@ static int vmbus_child_device_register(s
34805
34806 /* Set the device name. Otherwise, device_register() will fail. */
34807 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
34808 - atomic_inc_return(&device_num));
34809 + atomic_inc_return_unchecked(&device_num));
34810
34811 /* The new device belongs to this bus */
34812 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
34813 diff -urNp linux-2.6.32.42/drivers/staging/hv/VmbusPrivate.h linux-2.6.32.42/drivers/staging/hv/VmbusPrivate.h
34814 --- linux-2.6.32.42/drivers/staging/hv/VmbusPrivate.h 2011-04-17 17:00:52.000000000 -0400
34815 +++ linux-2.6.32.42/drivers/staging/hv/VmbusPrivate.h 2011-05-04 17:56:28.000000000 -0400
34816 @@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
34817 struct VMBUS_CONNECTION {
34818 enum VMBUS_CONNECT_STATE ConnectState;
34819
34820 - atomic_t NextGpadlHandle;
34821 + atomic_unchecked_t NextGpadlHandle;
34822
34823 /*
34824 * Represents channel interrupts. Each bit position represents a
34825 diff -urNp linux-2.6.32.42/drivers/staging/octeon/ethernet.c linux-2.6.32.42/drivers/staging/octeon/ethernet.c
34826 --- linux-2.6.32.42/drivers/staging/octeon/ethernet.c 2011-03-27 14:31:47.000000000 -0400
34827 +++ linux-2.6.32.42/drivers/staging/octeon/ethernet.c 2011-05-04 17:56:28.000000000 -0400
34828 @@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_
34829 * since the RX tasklet also increments it.
34830 */
34831 #ifdef CONFIG_64BIT
34832 - atomic64_add(rx_status.dropped_packets,
34833 - (atomic64_t *)&priv->stats.rx_dropped);
34834 + atomic64_add_unchecked(rx_status.dropped_packets,
34835 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
34836 #else
34837 - atomic_add(rx_status.dropped_packets,
34838 - (atomic_t *)&priv->stats.rx_dropped);
34839 + atomic_add_unchecked(rx_status.dropped_packets,
34840 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
34841 #endif
34842 }
34843
34844 diff -urNp linux-2.6.32.42/drivers/staging/octeon/ethernet-rx.c linux-2.6.32.42/drivers/staging/octeon/ethernet-rx.c
34845 --- linux-2.6.32.42/drivers/staging/octeon/ethernet-rx.c 2011-03-27 14:31:47.000000000 -0400
34846 +++ linux-2.6.32.42/drivers/staging/octeon/ethernet-rx.c 2011-05-04 17:56:28.000000000 -0400
34847 @@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long un
34848 /* Increment RX stats for virtual ports */
34849 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
34850 #ifdef CONFIG_64BIT
34851 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
34852 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
34853 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
34854 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
34855 #else
34856 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
34857 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
34858 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
34859 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
34860 #endif
34861 }
34862 netif_receive_skb(skb);
34863 @@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long un
34864 dev->name);
34865 */
34866 #ifdef CONFIG_64BIT
34867 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
34868 + atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
34869 #else
34870 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
34871 + atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
34872 #endif
34873 dev_kfree_skb_irq(skb);
34874 }
34875 diff -urNp linux-2.6.32.42/drivers/staging/panel/panel.c linux-2.6.32.42/drivers/staging/panel/panel.c
34876 --- linux-2.6.32.42/drivers/staging/panel/panel.c 2011-03-27 14:31:47.000000000 -0400
34877 +++ linux-2.6.32.42/drivers/staging/panel/panel.c 2011-04-17 15:56:46.000000000 -0400
34878 @@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *ino
34879 return 0;
34880 }
34881
34882 -static struct file_operations lcd_fops = {
34883 +static const struct file_operations lcd_fops = {
34884 .write = lcd_write,
34885 .open = lcd_open,
34886 .release = lcd_release,
34887 @@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *
34888 return 0;
34889 }
34890
34891 -static struct file_operations keypad_fops = {
34892 +static const struct file_operations keypad_fops = {
34893 .read = keypad_read, /* read */
34894 .open = keypad_open, /* open */
34895 .release = keypad_release, /* close */
34896 diff -urNp linux-2.6.32.42/drivers/staging/phison/phison.c linux-2.6.32.42/drivers/staging/phison/phison.c
34897 --- linux-2.6.32.42/drivers/staging/phison/phison.c 2011-03-27 14:31:47.000000000 -0400
34898 +++ linux-2.6.32.42/drivers/staging/phison/phison.c 2011-04-17 15:56:46.000000000 -0400
34899 @@ -43,7 +43,7 @@ static struct scsi_host_template phison_
34900 ATA_BMDMA_SHT(DRV_NAME),
34901 };
34902
34903 -static struct ata_port_operations phison_ops = {
34904 +static const struct ata_port_operations phison_ops = {
34905 .inherits = &ata_bmdma_port_ops,
34906 .prereset = phison_pre_reset,
34907 };
34908 diff -urNp linux-2.6.32.42/drivers/staging/poch/poch.c linux-2.6.32.42/drivers/staging/poch/poch.c
34909 --- linux-2.6.32.42/drivers/staging/poch/poch.c 2011-03-27 14:31:47.000000000 -0400
34910 +++ linux-2.6.32.42/drivers/staging/poch/poch.c 2011-04-17 15:56:46.000000000 -0400
34911 @@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inod
34912 return 0;
34913 }
34914
34915 -static struct file_operations poch_fops = {
34916 +static const struct file_operations poch_fops = {
34917 .owner = THIS_MODULE,
34918 .open = poch_open,
34919 .release = poch_release,
34920 diff -urNp linux-2.6.32.42/drivers/staging/pohmelfs/inode.c linux-2.6.32.42/drivers/staging/pohmelfs/inode.c
34921 --- linux-2.6.32.42/drivers/staging/pohmelfs/inode.c 2011-03-27 14:31:47.000000000 -0400
34922 +++ linux-2.6.32.42/drivers/staging/pohmelfs/inode.c 2011-05-04 17:56:20.000000000 -0400
34923 @@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct su
34924 mutex_init(&psb->mcache_lock);
34925 psb->mcache_root = RB_ROOT;
34926 psb->mcache_timeout = msecs_to_jiffies(5000);
34927 - atomic_long_set(&psb->mcache_gen, 0);
34928 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
34929
34930 psb->trans_max_pages = 100;
34931
34932 @@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct su
34933 INIT_LIST_HEAD(&psb->crypto_ready_list);
34934 INIT_LIST_HEAD(&psb->crypto_active_list);
34935
34936 - atomic_set(&psb->trans_gen, 1);
34937 + atomic_set_unchecked(&psb->trans_gen, 1);
34938 atomic_long_set(&psb->total_inodes, 0);
34939
34940 mutex_init(&psb->state_lock);
34941 diff -urNp linux-2.6.32.42/drivers/staging/pohmelfs/mcache.c linux-2.6.32.42/drivers/staging/pohmelfs/mcache.c
34942 --- linux-2.6.32.42/drivers/staging/pohmelfs/mcache.c 2011-03-27 14:31:47.000000000 -0400
34943 +++ linux-2.6.32.42/drivers/staging/pohmelfs/mcache.c 2011-04-17 15:56:46.000000000 -0400
34944 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
34945 m->data = data;
34946 m->start = start;
34947 m->size = size;
34948 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
34949 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
34950
34951 mutex_lock(&psb->mcache_lock);
34952 err = pohmelfs_mcache_insert(psb, m);
34953 diff -urNp linux-2.6.32.42/drivers/staging/pohmelfs/netfs.h linux-2.6.32.42/drivers/staging/pohmelfs/netfs.h
34954 --- linux-2.6.32.42/drivers/staging/pohmelfs/netfs.h 2011-03-27 14:31:47.000000000 -0400
34955 +++ linux-2.6.32.42/drivers/staging/pohmelfs/netfs.h 2011-05-04 17:56:20.000000000 -0400
34956 @@ -570,14 +570,14 @@ struct pohmelfs_config;
34957 struct pohmelfs_sb {
34958 struct rb_root mcache_root;
34959 struct mutex mcache_lock;
34960 - atomic_long_t mcache_gen;
34961 + atomic_long_unchecked_t mcache_gen;
34962 unsigned long mcache_timeout;
34963
34964 unsigned int idx;
34965
34966 unsigned int trans_retries;
34967
34968 - atomic_t trans_gen;
34969 + atomic_unchecked_t trans_gen;
34970
34971 unsigned int crypto_attached_size;
34972 unsigned int crypto_align_size;
34973 diff -urNp linux-2.6.32.42/drivers/staging/pohmelfs/trans.c linux-2.6.32.42/drivers/staging/pohmelfs/trans.c
34974 --- linux-2.6.32.42/drivers/staging/pohmelfs/trans.c 2011-03-27 14:31:47.000000000 -0400
34975 +++ linux-2.6.32.42/drivers/staging/pohmelfs/trans.c 2011-05-04 17:56:28.000000000 -0400
34976 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
34977 int err;
34978 struct netfs_cmd *cmd = t->iovec.iov_base;
34979
34980 - t->gen = atomic_inc_return(&psb->trans_gen);
34981 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
34982
34983 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
34984 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
34985 diff -urNp linux-2.6.32.42/drivers/staging/sep/sep_driver.c linux-2.6.32.42/drivers/staging/sep/sep_driver.c
34986 --- linux-2.6.32.42/drivers/staging/sep/sep_driver.c 2011-03-27 14:31:47.000000000 -0400
34987 +++ linux-2.6.32.42/drivers/staging/sep/sep_driver.c 2011-04-17 15:56:46.000000000 -0400
34988 @@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver
34989 static dev_t sep_devno;
34990
34991 /* the files operations structure of the driver */
34992 -static struct file_operations sep_file_operations = {
34993 +static const struct file_operations sep_file_operations = {
34994 .owner = THIS_MODULE,
34995 .ioctl = sep_ioctl,
34996 .poll = sep_poll,
34997 diff -urNp linux-2.6.32.42/drivers/staging/usbip/vhci.h linux-2.6.32.42/drivers/staging/usbip/vhci.h
34998 --- linux-2.6.32.42/drivers/staging/usbip/vhci.h 2011-03-27 14:31:47.000000000 -0400
34999 +++ linux-2.6.32.42/drivers/staging/usbip/vhci.h 2011-05-04 17:56:28.000000000 -0400
35000 @@ -92,7 +92,7 @@ struct vhci_hcd {
35001 unsigned resuming:1;
35002 unsigned long re_timeout;
35003
35004 - atomic_t seqnum;
35005 + atomic_unchecked_t seqnum;
35006
35007 /*
35008 * NOTE:
35009 diff -urNp linux-2.6.32.42/drivers/staging/usbip/vhci_hcd.c linux-2.6.32.42/drivers/staging/usbip/vhci_hcd.c
35010 --- linux-2.6.32.42/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:01.000000000 -0400
35011 +++ linux-2.6.32.42/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:33.000000000 -0400
35012 @@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
35013 return;
35014 }
35015
35016 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
35017 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35018 if (priv->seqnum == 0xffff)
35019 usbip_uinfo("seqnum max\n");
35020
35021 @@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_h
35022 return -ENOMEM;
35023 }
35024
35025 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
35026 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35027 if (unlink->seqnum == 0xffff)
35028 usbip_uinfo("seqnum max\n");
35029
35030 @@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hc
35031 vdev->rhport = rhport;
35032 }
35033
35034 - atomic_set(&vhci->seqnum, 0);
35035 + atomic_set_unchecked(&vhci->seqnum, 0);
35036 spin_lock_init(&vhci->lock);
35037
35038
35039 diff -urNp linux-2.6.32.42/drivers/staging/usbip/vhci_rx.c linux-2.6.32.42/drivers/staging/usbip/vhci_rx.c
35040 --- linux-2.6.32.42/drivers/staging/usbip/vhci_rx.c 2011-04-17 17:00:52.000000000 -0400
35041 +++ linux-2.6.32.42/drivers/staging/usbip/vhci_rx.c 2011-05-04 17:56:28.000000000 -0400
35042 @@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct
35043 usbip_uerr("cannot find a urb of seqnum %u\n",
35044 pdu->base.seqnum);
35045 usbip_uinfo("max seqnum %d\n",
35046 - atomic_read(&the_controller->seqnum));
35047 + atomic_read_unchecked(&the_controller->seqnum));
35048 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
35049 return;
35050 }
35051 diff -urNp linux-2.6.32.42/drivers/staging/vme/devices/vme_user.c linux-2.6.32.42/drivers/staging/vme/devices/vme_user.c
35052 --- linux-2.6.32.42/drivers/staging/vme/devices/vme_user.c 2011-03-27 14:31:47.000000000 -0400
35053 +++ linux-2.6.32.42/drivers/staging/vme/devices/vme_user.c 2011-04-17 15:56:46.000000000 -0400
35054 @@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *
35055 static int __init vme_user_probe(struct device *, int, int);
35056 static int __exit vme_user_remove(struct device *, int, int);
35057
35058 -static struct file_operations vme_user_fops = {
35059 +static const struct file_operations vme_user_fops = {
35060 .open = vme_user_open,
35061 .release = vme_user_release,
35062 .read = vme_user_read,
35063 diff -urNp linux-2.6.32.42/drivers/telephony/ixj.c linux-2.6.32.42/drivers/telephony/ixj.c
35064 --- linux-2.6.32.42/drivers/telephony/ixj.c 2011-03-27 14:31:47.000000000 -0400
35065 +++ linux-2.6.32.42/drivers/telephony/ixj.c 2011-05-16 21:46:57.000000000 -0400
35066 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
35067 bool mContinue;
35068 char *pIn, *pOut;
35069
35070 + pax_track_stack();
35071 +
35072 if (!SCI_Prepare(j))
35073 return 0;
35074
35075 diff -urNp linux-2.6.32.42/drivers/uio/uio.c linux-2.6.32.42/drivers/uio/uio.c
35076 --- linux-2.6.32.42/drivers/uio/uio.c 2011-03-27 14:31:47.000000000 -0400
35077 +++ linux-2.6.32.42/drivers/uio/uio.c 2011-05-04 17:56:20.000000000 -0400
35078 @@ -23,6 +23,7 @@
35079 #include <linux/string.h>
35080 #include <linux/kobject.h>
35081 #include <linux/uio_driver.h>
35082 +#include <asm/local.h>
35083
35084 #define UIO_MAX_DEVICES 255
35085
35086 @@ -30,10 +31,10 @@ struct uio_device {
35087 struct module *owner;
35088 struct device *dev;
35089 int minor;
35090 - atomic_t event;
35091 + atomic_unchecked_t event;
35092 struct fasync_struct *async_queue;
35093 wait_queue_head_t wait;
35094 - int vma_count;
35095 + local_t vma_count;
35096 struct uio_info *info;
35097 struct kobject *map_dir;
35098 struct kobject *portio_dir;
35099 @@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobj
35100 return entry->show(mem, buf);
35101 }
35102
35103 -static struct sysfs_ops map_sysfs_ops = {
35104 +static const struct sysfs_ops map_sysfs_ops = {
35105 .show = map_type_show,
35106 };
35107
35108 @@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct k
35109 return entry->show(port, buf);
35110 }
35111
35112 -static struct sysfs_ops portio_sysfs_ops = {
35113 +static const struct sysfs_ops portio_sysfs_ops = {
35114 .show = portio_type_show,
35115 };
35116
35117 @@ -255,7 +256,7 @@ static ssize_t show_event(struct device
35118 struct uio_device *idev = dev_get_drvdata(dev);
35119 if (idev)
35120 return sprintf(buf, "%u\n",
35121 - (unsigned int)atomic_read(&idev->event));
35122 + (unsigned int)atomic_read_unchecked(&idev->event));
35123 else
35124 return -ENODEV;
35125 }
35126 @@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *i
35127 {
35128 struct uio_device *idev = info->uio_dev;
35129
35130 - atomic_inc(&idev->event);
35131 + atomic_inc_unchecked(&idev->event);
35132 wake_up_interruptible(&idev->wait);
35133 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
35134 }
35135 @@ -477,7 +478,7 @@ static int uio_open(struct inode *inode,
35136 }
35137
35138 listener->dev = idev;
35139 - listener->event_count = atomic_read(&idev->event);
35140 + listener->event_count = atomic_read_unchecked(&idev->event);
35141 filep->private_data = listener;
35142
35143 if (idev->info->open) {
35144 @@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file
35145 return -EIO;
35146
35147 poll_wait(filep, &idev->wait, wait);
35148 - if (listener->event_count != atomic_read(&idev->event))
35149 + if (listener->event_count != atomic_read_unchecked(&idev->event))
35150 return POLLIN | POLLRDNORM;
35151 return 0;
35152 }
35153 @@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *fil
35154 do {
35155 set_current_state(TASK_INTERRUPTIBLE);
35156
35157 - event_count = atomic_read(&idev->event);
35158 + event_count = atomic_read_unchecked(&idev->event);
35159 if (event_count != listener->event_count) {
35160 if (copy_to_user(buf, &event_count, count))
35161 retval = -EFAULT;
35162 @@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_
35163 static void uio_vma_open(struct vm_area_struct *vma)
35164 {
35165 struct uio_device *idev = vma->vm_private_data;
35166 - idev->vma_count++;
35167 + local_inc(&idev->vma_count);
35168 }
35169
35170 static void uio_vma_close(struct vm_area_struct *vma)
35171 {
35172 struct uio_device *idev = vma->vm_private_data;
35173 - idev->vma_count--;
35174 + local_dec(&idev->vma_count);
35175 }
35176
35177 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35178 @@ -840,7 +841,7 @@ int __uio_register_device(struct module
35179 idev->owner = owner;
35180 idev->info = info;
35181 init_waitqueue_head(&idev->wait);
35182 - atomic_set(&idev->event, 0);
35183 + atomic_set_unchecked(&idev->event, 0);
35184
35185 ret = uio_get_minor(idev);
35186 if (ret)
35187 diff -urNp linux-2.6.32.42/drivers/usb/atm/usbatm.c linux-2.6.32.42/drivers/usb/atm/usbatm.c
35188 --- linux-2.6.32.42/drivers/usb/atm/usbatm.c 2011-03-27 14:31:47.000000000 -0400
35189 +++ linux-2.6.32.42/drivers/usb/atm/usbatm.c 2011-04-17 15:56:46.000000000 -0400
35190 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
35191 if (printk_ratelimit())
35192 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
35193 __func__, vpi, vci);
35194 - atomic_inc(&vcc->stats->rx_err);
35195 + atomic_inc_unchecked(&vcc->stats->rx_err);
35196 return;
35197 }
35198
35199 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
35200 if (length > ATM_MAX_AAL5_PDU) {
35201 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
35202 __func__, length, vcc);
35203 - atomic_inc(&vcc->stats->rx_err);
35204 + atomic_inc_unchecked(&vcc->stats->rx_err);
35205 goto out;
35206 }
35207
35208 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
35209 if (sarb->len < pdu_length) {
35210 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
35211 __func__, pdu_length, sarb->len, vcc);
35212 - atomic_inc(&vcc->stats->rx_err);
35213 + atomic_inc_unchecked(&vcc->stats->rx_err);
35214 goto out;
35215 }
35216
35217 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
35218 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
35219 __func__, vcc);
35220 - atomic_inc(&vcc->stats->rx_err);
35221 + atomic_inc_unchecked(&vcc->stats->rx_err);
35222 goto out;
35223 }
35224
35225 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
35226 if (printk_ratelimit())
35227 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
35228 __func__, length);
35229 - atomic_inc(&vcc->stats->rx_drop);
35230 + atomic_inc_unchecked(&vcc->stats->rx_drop);
35231 goto out;
35232 }
35233
35234 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
35235
35236 vcc->push(vcc, skb);
35237
35238 - atomic_inc(&vcc->stats->rx);
35239 + atomic_inc_unchecked(&vcc->stats->rx);
35240 out:
35241 skb_trim(sarb, 0);
35242 }
35243 @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
35244 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
35245
35246 usbatm_pop(vcc, skb);
35247 - atomic_inc(&vcc->stats->tx);
35248 + atomic_inc_unchecked(&vcc->stats->tx);
35249
35250 skb = skb_dequeue(&instance->sndqueue);
35251 }
35252 @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
35253 if (!left--)
35254 return sprintf(page,
35255 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
35256 - atomic_read(&atm_dev->stats.aal5.tx),
35257 - atomic_read(&atm_dev->stats.aal5.tx_err),
35258 - atomic_read(&atm_dev->stats.aal5.rx),
35259 - atomic_read(&atm_dev->stats.aal5.rx_err),
35260 - atomic_read(&atm_dev->stats.aal5.rx_drop));
35261 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
35262 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
35263 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
35264 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
35265 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
35266
35267 if (!left--) {
35268 if (instance->disconnected)
35269 diff -urNp linux-2.6.32.42/drivers/usb/class/cdc-wdm.c linux-2.6.32.42/drivers/usb/class/cdc-wdm.c
35270 --- linux-2.6.32.42/drivers/usb/class/cdc-wdm.c 2011-03-27 14:31:47.000000000 -0400
35271 +++ linux-2.6.32.42/drivers/usb/class/cdc-wdm.c 2011-04-17 15:56:46.000000000 -0400
35272 @@ -314,7 +314,7 @@ static ssize_t wdm_write
35273 if (r < 0)
35274 goto outnp;
35275
35276 - if (!file->f_flags && O_NONBLOCK)
35277 + if (!(file->f_flags & O_NONBLOCK))
35278 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
35279 &desc->flags));
35280 else
35281 diff -urNp linux-2.6.32.42/drivers/usb/core/hcd.c linux-2.6.32.42/drivers/usb/core/hcd.c
35282 --- linux-2.6.32.42/drivers/usb/core/hcd.c 2011-03-27 14:31:47.000000000 -0400
35283 +++ linux-2.6.32.42/drivers/usb/core/hcd.c 2011-04-17 15:56:46.000000000 -0400
35284 @@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
35285
35286 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
35287
35288 -struct usb_mon_operations *mon_ops;
35289 +const struct usb_mon_operations *mon_ops;
35290
35291 /*
35292 * The registration is unlocked.
35293 @@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
35294 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
35295 */
35296
35297 -int usb_mon_register (struct usb_mon_operations *ops)
35298 +int usb_mon_register (const struct usb_mon_operations *ops)
35299 {
35300
35301 if (mon_ops)
35302 diff -urNp linux-2.6.32.42/drivers/usb/core/hcd.h linux-2.6.32.42/drivers/usb/core/hcd.h
35303 --- linux-2.6.32.42/drivers/usb/core/hcd.h 2011-03-27 14:31:47.000000000 -0400
35304 +++ linux-2.6.32.42/drivers/usb/core/hcd.h 2011-04-17 15:56:46.000000000 -0400
35305 @@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) {
35306 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
35307
35308 struct usb_mon_operations {
35309 - void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
35310 - void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
35311 - void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
35312 + void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
35313 + void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
35314 + void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
35315 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
35316 };
35317
35318 -extern struct usb_mon_operations *mon_ops;
35319 +extern const struct usb_mon_operations *mon_ops;
35320
35321 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
35322 {
35323 @@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(s
35324 (*mon_ops->urb_complete)(bus, urb, status);
35325 }
35326
35327 -int usb_mon_register(struct usb_mon_operations *ops);
35328 +int usb_mon_register(const struct usb_mon_operations *ops);
35329 void usb_mon_deregister(void);
35330
35331 #else
35332 diff -urNp linux-2.6.32.42/drivers/usb/core/message.c linux-2.6.32.42/drivers/usb/core/message.c
35333 --- linux-2.6.32.42/drivers/usb/core/message.c 2011-03-27 14:31:47.000000000 -0400
35334 +++ linux-2.6.32.42/drivers/usb/core/message.c 2011-04-17 15:56:46.000000000 -0400
35335 @@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device
35336 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
35337 if (buf) {
35338 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
35339 - if (len > 0) {
35340 - smallbuf = kmalloc(++len, GFP_NOIO);
35341 + if (len++ > 0) {
35342 + smallbuf = kmalloc(len, GFP_NOIO);
35343 if (!smallbuf)
35344 return buf;
35345 memcpy(smallbuf, buf, len);
35346 diff -urNp linux-2.6.32.42/drivers/usb/misc/appledisplay.c linux-2.6.32.42/drivers/usb/misc/appledisplay.c
35347 --- linux-2.6.32.42/drivers/usb/misc/appledisplay.c 2011-03-27 14:31:47.000000000 -0400
35348 +++ linux-2.6.32.42/drivers/usb/misc/appledisplay.c 2011-04-17 15:56:46.000000000 -0400
35349 @@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightnes
35350 return pdata->msgdata[1];
35351 }
35352
35353 -static struct backlight_ops appledisplay_bl_data = {
35354 +static const struct backlight_ops appledisplay_bl_data = {
35355 .get_brightness = appledisplay_bl_get_brightness,
35356 .update_status = appledisplay_bl_update_status,
35357 };
35358 diff -urNp linux-2.6.32.42/drivers/usb/mon/mon_main.c linux-2.6.32.42/drivers/usb/mon/mon_main.c
35359 --- linux-2.6.32.42/drivers/usb/mon/mon_main.c 2011-03-27 14:31:47.000000000 -0400
35360 +++ linux-2.6.32.42/drivers/usb/mon/mon_main.c 2011-04-17 15:56:46.000000000 -0400
35361 @@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
35362 /*
35363 * Ops
35364 */
35365 -static struct usb_mon_operations mon_ops_0 = {
35366 +static const struct usb_mon_operations mon_ops_0 = {
35367 .urb_submit = mon_submit,
35368 .urb_submit_error = mon_submit_error,
35369 .urb_complete = mon_complete,
35370 diff -urNp linux-2.6.32.42/drivers/usb/wusbcore/wa-hc.h linux-2.6.32.42/drivers/usb/wusbcore/wa-hc.h
35371 --- linux-2.6.32.42/drivers/usb/wusbcore/wa-hc.h 2011-03-27 14:31:47.000000000 -0400
35372 +++ linux-2.6.32.42/drivers/usb/wusbcore/wa-hc.h 2011-05-04 17:56:28.000000000 -0400
35373 @@ -192,7 +192,7 @@ struct wahc {
35374 struct list_head xfer_delayed_list;
35375 spinlock_t xfer_list_lock;
35376 struct work_struct xfer_work;
35377 - atomic_t xfer_id_count;
35378 + atomic_unchecked_t xfer_id_count;
35379 };
35380
35381
35382 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
35383 INIT_LIST_HEAD(&wa->xfer_delayed_list);
35384 spin_lock_init(&wa->xfer_list_lock);
35385 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
35386 - atomic_set(&wa->xfer_id_count, 1);
35387 + atomic_set_unchecked(&wa->xfer_id_count, 1);
35388 }
35389
35390 /**
35391 diff -urNp linux-2.6.32.42/drivers/usb/wusbcore/wa-xfer.c linux-2.6.32.42/drivers/usb/wusbcore/wa-xfer.c
35392 --- linux-2.6.32.42/drivers/usb/wusbcore/wa-xfer.c 2011-03-27 14:31:47.000000000 -0400
35393 +++ linux-2.6.32.42/drivers/usb/wusbcore/wa-xfer.c 2011-05-04 17:56:28.000000000 -0400
35394 @@ -293,7 +293,7 @@ out:
35395 */
35396 static void wa_xfer_id_init(struct wa_xfer *xfer)
35397 {
35398 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
35399 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
35400 }
35401
35402 /*
35403 diff -urNp linux-2.6.32.42/drivers/uwb/wlp/messages.c linux-2.6.32.42/drivers/uwb/wlp/messages.c
35404 --- linux-2.6.32.42/drivers/uwb/wlp/messages.c 2011-03-27 14:31:47.000000000 -0400
35405 +++ linux-2.6.32.42/drivers/uwb/wlp/messages.c 2011-04-17 15:56:46.000000000 -0400
35406 @@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
35407 size_t len = skb->len;
35408 size_t used;
35409 ssize_t result;
35410 - struct wlp_nonce enonce, rnonce;
35411 + struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
35412 enum wlp_assc_error assc_err;
35413 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
35414 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
35415 diff -urNp linux-2.6.32.42/drivers/uwb/wlp/sysfs.c linux-2.6.32.42/drivers/uwb/wlp/sysfs.c
35416 --- linux-2.6.32.42/drivers/uwb/wlp/sysfs.c 2011-03-27 14:31:47.000000000 -0400
35417 +++ linux-2.6.32.42/drivers/uwb/wlp/sysfs.c 2011-04-17 15:56:46.000000000 -0400
35418 @@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobjec
35419 return ret;
35420 }
35421
35422 -static
35423 -struct sysfs_ops wss_sysfs_ops = {
35424 +static const struct sysfs_ops wss_sysfs_ops = {
35425 .show = wlp_wss_attr_show,
35426 .store = wlp_wss_attr_store,
35427 };
35428 diff -urNp linux-2.6.32.42/drivers/video/atmel_lcdfb.c linux-2.6.32.42/drivers/video/atmel_lcdfb.c
35429 --- linux-2.6.32.42/drivers/video/atmel_lcdfb.c 2011-03-27 14:31:47.000000000 -0400
35430 +++ linux-2.6.32.42/drivers/video/atmel_lcdfb.c 2011-04-17 15:56:46.000000000 -0400
35431 @@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struc
35432 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
35433 }
35434
35435 -static struct backlight_ops atmel_lcdc_bl_ops = {
35436 +static const struct backlight_ops atmel_lcdc_bl_ops = {
35437 .update_status = atmel_bl_update_status,
35438 .get_brightness = atmel_bl_get_brightness,
35439 };
35440 diff -urNp linux-2.6.32.42/drivers/video/aty/aty128fb.c linux-2.6.32.42/drivers/video/aty/aty128fb.c
35441 --- linux-2.6.32.42/drivers/video/aty/aty128fb.c 2011-03-27 14:31:47.000000000 -0400
35442 +++ linux-2.6.32.42/drivers/video/aty/aty128fb.c 2011-04-17 15:56:46.000000000 -0400
35443 @@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(stru
35444 return bd->props.brightness;
35445 }
35446
35447 -static struct backlight_ops aty128_bl_data = {
35448 +static const struct backlight_ops aty128_bl_data = {
35449 .get_brightness = aty128_bl_get_brightness,
35450 .update_status = aty128_bl_update_status,
35451 };
35452 diff -urNp linux-2.6.32.42/drivers/video/aty/atyfb_base.c linux-2.6.32.42/drivers/video/aty/atyfb_base.c
35453 --- linux-2.6.32.42/drivers/video/aty/atyfb_base.c 2011-03-27 14:31:47.000000000 -0400
35454 +++ linux-2.6.32.42/drivers/video/aty/atyfb_base.c 2011-04-17 15:56:46.000000000 -0400
35455 @@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct
35456 return bd->props.brightness;
35457 }
35458
35459 -static struct backlight_ops aty_bl_data = {
35460 +static const struct backlight_ops aty_bl_data = {
35461 .get_brightness = aty_bl_get_brightness,
35462 .update_status = aty_bl_update_status,
35463 };
35464 diff -urNp linux-2.6.32.42/drivers/video/aty/radeon_backlight.c linux-2.6.32.42/drivers/video/aty/radeon_backlight.c
35465 --- linux-2.6.32.42/drivers/video/aty/radeon_backlight.c 2011-03-27 14:31:47.000000000 -0400
35466 +++ linux-2.6.32.42/drivers/video/aty/radeon_backlight.c 2011-04-17 15:56:46.000000000 -0400
35467 @@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(stru
35468 return bd->props.brightness;
35469 }
35470
35471 -static struct backlight_ops radeon_bl_data = {
35472 +static const struct backlight_ops radeon_bl_data = {
35473 .get_brightness = radeon_bl_get_brightness,
35474 .update_status = radeon_bl_update_status,
35475 };
35476 diff -urNp linux-2.6.32.42/drivers/video/backlight/adp5520_bl.c linux-2.6.32.42/drivers/video/backlight/adp5520_bl.c
35477 --- linux-2.6.32.42/drivers/video/backlight/adp5520_bl.c 2011-03-27 14:31:47.000000000 -0400
35478 +++ linux-2.6.32.42/drivers/video/backlight/adp5520_bl.c 2011-04-17 15:56:46.000000000 -0400
35479 @@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(str
35480 return error ? data->current_brightness : reg_val;
35481 }
35482
35483 -static struct backlight_ops adp5520_bl_ops = {
35484 +static const struct backlight_ops adp5520_bl_ops = {
35485 .update_status = adp5520_bl_update_status,
35486 .get_brightness = adp5520_bl_get_brightness,
35487 };
35488 diff -urNp linux-2.6.32.42/drivers/video/backlight/adx_bl.c linux-2.6.32.42/drivers/video/backlight/adx_bl.c
35489 --- linux-2.6.32.42/drivers/video/backlight/adx_bl.c 2011-03-27 14:31:47.000000000 -0400
35490 +++ linux-2.6.32.42/drivers/video/backlight/adx_bl.c 2011-04-17 15:56:46.000000000 -0400
35491 @@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct
35492 return 1;
35493 }
35494
35495 -static struct backlight_ops adx_backlight_ops = {
35496 +static const struct backlight_ops adx_backlight_ops = {
35497 .options = 0,
35498 .update_status = adx_backlight_update_status,
35499 .get_brightness = adx_backlight_get_brightness,
35500 diff -urNp linux-2.6.32.42/drivers/video/backlight/atmel-pwm-bl.c linux-2.6.32.42/drivers/video/backlight/atmel-pwm-bl.c
35501 --- linux-2.6.32.42/drivers/video/backlight/atmel-pwm-bl.c 2011-03-27 14:31:47.000000000 -0400
35502 +++ linux-2.6.32.42/drivers/video/backlight/atmel-pwm-bl.c 2011-04-17 15:56:46.000000000 -0400
35503 @@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct
35504 return pwm_channel_enable(&pwmbl->pwmc);
35505 }
35506
35507 -static struct backlight_ops atmel_pwm_bl_ops = {
35508 +static const struct backlight_ops atmel_pwm_bl_ops = {
35509 .get_brightness = atmel_pwm_bl_get_intensity,
35510 .update_status = atmel_pwm_bl_set_intensity,
35511 };
35512 diff -urNp linux-2.6.32.42/drivers/video/backlight/backlight.c linux-2.6.32.42/drivers/video/backlight/backlight.c
35513 --- linux-2.6.32.42/drivers/video/backlight/backlight.c 2011-03-27 14:31:47.000000000 -0400
35514 +++ linux-2.6.32.42/drivers/video/backlight/backlight.c 2011-04-17 15:56:46.000000000 -0400
35515 @@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
35516 * ERR_PTR() or a pointer to the newly allocated device.
35517 */
35518 struct backlight_device *backlight_device_register(const char *name,
35519 - struct device *parent, void *devdata, struct backlight_ops *ops)
35520 + struct device *parent, void *devdata, const struct backlight_ops *ops)
35521 {
35522 struct backlight_device *new_bd;
35523 int rc;
35524 diff -urNp linux-2.6.32.42/drivers/video/backlight/corgi_lcd.c linux-2.6.32.42/drivers/video/backlight/corgi_lcd.c
35525 --- linux-2.6.32.42/drivers/video/backlight/corgi_lcd.c 2011-03-27 14:31:47.000000000 -0400
35526 +++ linux-2.6.32.42/drivers/video/backlight/corgi_lcd.c 2011-04-17 15:56:46.000000000 -0400
35527 @@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit
35528 }
35529 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
35530
35531 -static struct backlight_ops corgi_bl_ops = {
35532 +static const struct backlight_ops corgi_bl_ops = {
35533 .get_brightness = corgi_bl_get_intensity,
35534 .update_status = corgi_bl_update_status,
35535 };
35536 diff -urNp linux-2.6.32.42/drivers/video/backlight/cr_bllcd.c linux-2.6.32.42/drivers/video/backlight/cr_bllcd.c
35537 --- linux-2.6.32.42/drivers/video/backlight/cr_bllcd.c 2011-03-27 14:31:47.000000000 -0400
35538 +++ linux-2.6.32.42/drivers/video/backlight/cr_bllcd.c 2011-04-17 15:56:46.000000000 -0400
35539 @@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(st
35540 return intensity;
35541 }
35542
35543 -static struct backlight_ops cr_backlight_ops = {
35544 +static const struct backlight_ops cr_backlight_ops = {
35545 .get_brightness = cr_backlight_get_intensity,
35546 .update_status = cr_backlight_set_intensity,
35547 };
35548 diff -urNp linux-2.6.32.42/drivers/video/backlight/da903x_bl.c linux-2.6.32.42/drivers/video/backlight/da903x_bl.c
35549 --- linux-2.6.32.42/drivers/video/backlight/da903x_bl.c 2011-03-27 14:31:47.000000000 -0400
35550 +++ linux-2.6.32.42/drivers/video/backlight/da903x_bl.c 2011-04-17 15:56:46.000000000 -0400
35551 @@ -94,7 +94,7 @@ static int da903x_backlight_get_brightne
35552 return data->current_brightness;
35553 }
35554
35555 -static struct backlight_ops da903x_backlight_ops = {
35556 +static const struct backlight_ops da903x_backlight_ops = {
35557 .update_status = da903x_backlight_update_status,
35558 .get_brightness = da903x_backlight_get_brightness,
35559 };
35560 diff -urNp linux-2.6.32.42/drivers/video/backlight/generic_bl.c linux-2.6.32.42/drivers/video/backlight/generic_bl.c
35561 --- linux-2.6.32.42/drivers/video/backlight/generic_bl.c 2011-03-27 14:31:47.000000000 -0400
35562 +++ linux-2.6.32.42/drivers/video/backlight/generic_bl.c 2011-04-17 15:56:46.000000000 -0400
35563 @@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
35564 }
35565 EXPORT_SYMBOL(corgibl_limit_intensity);
35566
35567 -static struct backlight_ops genericbl_ops = {
35568 +static const struct backlight_ops genericbl_ops = {
35569 .options = BL_CORE_SUSPENDRESUME,
35570 .get_brightness = genericbl_get_intensity,
35571 .update_status = genericbl_send_intensity,
35572 diff -urNp linux-2.6.32.42/drivers/video/backlight/hp680_bl.c linux-2.6.32.42/drivers/video/backlight/hp680_bl.c
35573 --- linux-2.6.32.42/drivers/video/backlight/hp680_bl.c 2011-03-27 14:31:47.000000000 -0400
35574 +++ linux-2.6.32.42/drivers/video/backlight/hp680_bl.c 2011-04-17 15:56:46.000000000 -0400
35575 @@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct
35576 return current_intensity;
35577 }
35578
35579 -static struct backlight_ops hp680bl_ops = {
35580 +static const struct backlight_ops hp680bl_ops = {
35581 .get_brightness = hp680bl_get_intensity,
35582 .update_status = hp680bl_set_intensity,
35583 };
35584 diff -urNp linux-2.6.32.42/drivers/video/backlight/jornada720_bl.c linux-2.6.32.42/drivers/video/backlight/jornada720_bl.c
35585 --- linux-2.6.32.42/drivers/video/backlight/jornada720_bl.c 2011-03-27 14:31:47.000000000 -0400
35586 +++ linux-2.6.32.42/drivers/video/backlight/jornada720_bl.c 2011-04-17 15:56:46.000000000 -0400
35587 @@ -93,7 +93,7 @@ out:
35588 return ret;
35589 }
35590
35591 -static struct backlight_ops jornada_bl_ops = {
35592 +static const struct backlight_ops jornada_bl_ops = {
35593 .get_brightness = jornada_bl_get_brightness,
35594 .update_status = jornada_bl_update_status,
35595 .options = BL_CORE_SUSPENDRESUME,
35596 diff -urNp linux-2.6.32.42/drivers/video/backlight/kb3886_bl.c linux-2.6.32.42/drivers/video/backlight/kb3886_bl.c
35597 --- linux-2.6.32.42/drivers/video/backlight/kb3886_bl.c 2011-03-27 14:31:47.000000000 -0400
35598 +++ linux-2.6.32.42/drivers/video/backlight/kb3886_bl.c 2011-04-17 15:56:46.000000000 -0400
35599 @@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct
35600 return kb3886bl_intensity;
35601 }
35602
35603 -static struct backlight_ops kb3886bl_ops = {
35604 +static const struct backlight_ops kb3886bl_ops = {
35605 .get_brightness = kb3886bl_get_intensity,
35606 .update_status = kb3886bl_send_intensity,
35607 };
35608 diff -urNp linux-2.6.32.42/drivers/video/backlight/locomolcd.c linux-2.6.32.42/drivers/video/backlight/locomolcd.c
35609 --- linux-2.6.32.42/drivers/video/backlight/locomolcd.c 2011-03-27 14:31:47.000000000 -0400
35610 +++ linux-2.6.32.42/drivers/video/backlight/locomolcd.c 2011-04-17 15:56:46.000000000 -0400
35611 @@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struc
35612 return current_intensity;
35613 }
35614
35615 -static struct backlight_ops locomobl_data = {
35616 +static const struct backlight_ops locomobl_data = {
35617 .get_brightness = locomolcd_get_intensity,
35618 .update_status = locomolcd_set_intensity,
35619 };
35620 diff -urNp linux-2.6.32.42/drivers/video/backlight/mbp_nvidia_bl.c linux-2.6.32.42/drivers/video/backlight/mbp_nvidia_bl.c
35621 --- linux-2.6.32.42/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:01.000000000 -0400
35622 +++ linux-2.6.32.42/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:33.000000000 -0400
35623 @@ -33,7 +33,7 @@ struct dmi_match_data {
35624 unsigned long iostart;
35625 unsigned long iolen;
35626 /* Backlight operations structure. */
35627 - struct backlight_ops backlight_ops;
35628 + const struct backlight_ops backlight_ops;
35629 };
35630
35631 /* Module parameters. */
35632 diff -urNp linux-2.6.32.42/drivers/video/backlight/omap1_bl.c linux-2.6.32.42/drivers/video/backlight/omap1_bl.c
35633 --- linux-2.6.32.42/drivers/video/backlight/omap1_bl.c 2011-03-27 14:31:47.000000000 -0400
35634 +++ linux-2.6.32.42/drivers/video/backlight/omap1_bl.c 2011-04-17 15:56:46.000000000 -0400
35635 @@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct b
35636 return bl->current_intensity;
35637 }
35638
35639 -static struct backlight_ops omapbl_ops = {
35640 +static const struct backlight_ops omapbl_ops = {
35641 .get_brightness = omapbl_get_intensity,
35642 .update_status = omapbl_update_status,
35643 };
35644 diff -urNp linux-2.6.32.42/drivers/video/backlight/progear_bl.c linux-2.6.32.42/drivers/video/backlight/progear_bl.c
35645 --- linux-2.6.32.42/drivers/video/backlight/progear_bl.c 2011-03-27 14:31:47.000000000 -0400
35646 +++ linux-2.6.32.42/drivers/video/backlight/progear_bl.c 2011-04-17 15:56:46.000000000 -0400
35647 @@ -54,7 +54,7 @@ static int progearbl_get_intensity(struc
35648 return intensity - HW_LEVEL_MIN;
35649 }
35650
35651 -static struct backlight_ops progearbl_ops = {
35652 +static const struct backlight_ops progearbl_ops = {
35653 .get_brightness = progearbl_get_intensity,
35654 .update_status = progearbl_set_intensity,
35655 };
35656 diff -urNp linux-2.6.32.42/drivers/video/backlight/pwm_bl.c linux-2.6.32.42/drivers/video/backlight/pwm_bl.c
35657 --- linux-2.6.32.42/drivers/video/backlight/pwm_bl.c 2011-03-27 14:31:47.000000000 -0400
35658 +++ linux-2.6.32.42/drivers/video/backlight/pwm_bl.c 2011-04-17 15:56:46.000000000 -0400
35659 @@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(
35660 return bl->props.brightness;
35661 }
35662
35663 -static struct backlight_ops pwm_backlight_ops = {
35664 +static const struct backlight_ops pwm_backlight_ops = {
35665 .update_status = pwm_backlight_update_status,
35666 .get_brightness = pwm_backlight_get_brightness,
35667 };
35668 diff -urNp linux-2.6.32.42/drivers/video/backlight/tosa_bl.c linux-2.6.32.42/drivers/video/backlight/tosa_bl.c
35669 --- linux-2.6.32.42/drivers/video/backlight/tosa_bl.c 2011-03-27 14:31:47.000000000 -0400
35670 +++ linux-2.6.32.42/drivers/video/backlight/tosa_bl.c 2011-04-17 15:56:46.000000000 -0400
35671 @@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct
35672 return props->brightness;
35673 }
35674
35675 -static struct backlight_ops bl_ops = {
35676 +static const struct backlight_ops bl_ops = {
35677 .get_brightness = tosa_bl_get_brightness,
35678 .update_status = tosa_bl_update_status,
35679 };
35680 diff -urNp linux-2.6.32.42/drivers/video/backlight/wm831x_bl.c linux-2.6.32.42/drivers/video/backlight/wm831x_bl.c
35681 --- linux-2.6.32.42/drivers/video/backlight/wm831x_bl.c 2011-03-27 14:31:47.000000000 -0400
35682 +++ linux-2.6.32.42/drivers/video/backlight/wm831x_bl.c 2011-04-17 15:56:46.000000000 -0400
35683 @@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightne
35684 return data->current_brightness;
35685 }
35686
35687 -static struct backlight_ops wm831x_backlight_ops = {
35688 +static const struct backlight_ops wm831x_backlight_ops = {
35689 .options = BL_CORE_SUSPENDRESUME,
35690 .update_status = wm831x_backlight_update_status,
35691 .get_brightness = wm831x_backlight_get_brightness,
35692 diff -urNp linux-2.6.32.42/drivers/video/bf54x-lq043fb.c linux-2.6.32.42/drivers/video/bf54x-lq043fb.c
35693 --- linux-2.6.32.42/drivers/video/bf54x-lq043fb.c 2011-03-27 14:31:47.000000000 -0400
35694 +++ linux-2.6.32.42/drivers/video/bf54x-lq043fb.c 2011-04-17 15:56:46.000000000 -0400
35695 @@ -463,7 +463,7 @@ static int bl_get_brightness(struct back
35696 return 0;
35697 }
35698
35699 -static struct backlight_ops bfin_lq043fb_bl_ops = {
35700 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
35701 .get_brightness = bl_get_brightness,
35702 };
35703
35704 diff -urNp linux-2.6.32.42/drivers/video/bfin-t350mcqb-fb.c linux-2.6.32.42/drivers/video/bfin-t350mcqb-fb.c
35705 --- linux-2.6.32.42/drivers/video/bfin-t350mcqb-fb.c 2011-03-27 14:31:47.000000000 -0400
35706 +++ linux-2.6.32.42/drivers/video/bfin-t350mcqb-fb.c 2011-04-17 15:56:46.000000000 -0400
35707 @@ -381,7 +381,7 @@ static int bl_get_brightness(struct back
35708 return 0;
35709 }
35710
35711 -static struct backlight_ops bfin_lq043fb_bl_ops = {
35712 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
35713 .get_brightness = bl_get_brightness,
35714 };
35715
35716 diff -urNp linux-2.6.32.42/drivers/video/fbcmap.c linux-2.6.32.42/drivers/video/fbcmap.c
35717 --- linux-2.6.32.42/drivers/video/fbcmap.c 2011-03-27 14:31:47.000000000 -0400
35718 +++ linux-2.6.32.42/drivers/video/fbcmap.c 2011-04-17 15:56:46.000000000 -0400
35719 @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
35720 rc = -ENODEV;
35721 goto out;
35722 }
35723 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
35724 - !info->fbops->fb_setcmap)) {
35725 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
35726 rc = -EINVAL;
35727 goto out1;
35728 }
35729 diff -urNp linux-2.6.32.42/drivers/video/fbmem.c linux-2.6.32.42/drivers/video/fbmem.c
35730 --- linux-2.6.32.42/drivers/video/fbmem.c 2011-03-27 14:31:47.000000000 -0400
35731 +++ linux-2.6.32.42/drivers/video/fbmem.c 2011-05-16 21:46:57.000000000 -0400
35732 @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
35733 image->dx += image->width + 8;
35734 }
35735 } else if (rotate == FB_ROTATE_UD) {
35736 - for (x = 0; x < num && image->dx >= 0; x++) {
35737 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
35738 info->fbops->fb_imageblit(info, image);
35739 image->dx -= image->width + 8;
35740 }
35741 @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
35742 image->dy += image->height + 8;
35743 }
35744 } else if (rotate == FB_ROTATE_CCW) {
35745 - for (x = 0; x < num && image->dy >= 0; x++) {
35746 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
35747 info->fbops->fb_imageblit(info, image);
35748 image->dy -= image->height + 8;
35749 }
35750 @@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct
35751 int flags = info->flags;
35752 int ret = 0;
35753
35754 + pax_track_stack();
35755 +
35756 if (var->activate & FB_ACTIVATE_INV_MODE) {
35757 struct fb_videomode mode1, mode2;
35758
35759 @@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *
35760 void __user *argp = (void __user *)arg;
35761 long ret = 0;
35762
35763 + pax_track_stack();
35764 +
35765 switch (cmd) {
35766 case FBIOGET_VSCREENINFO:
35767 if (!lock_fb_info(info))
35768 @@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *
35769 return -EFAULT;
35770 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
35771 return -EINVAL;
35772 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
35773 + if (con2fb.framebuffer >= FB_MAX)
35774 return -EINVAL;
35775 if (!registered_fb[con2fb.framebuffer])
35776 request_module("fb%d", con2fb.framebuffer);
35777 diff -urNp linux-2.6.32.42/drivers/video/i810/i810_accel.c linux-2.6.32.42/drivers/video/i810/i810_accel.c
35778 --- linux-2.6.32.42/drivers/video/i810/i810_accel.c 2011-03-27 14:31:47.000000000 -0400
35779 +++ linux-2.6.32.42/drivers/video/i810/i810_accel.c 2011-04-17 15:56:46.000000000 -0400
35780 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
35781 }
35782 }
35783 printk("ringbuffer lockup!!!\n");
35784 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
35785 i810_report_error(mmio);
35786 par->dev_flags |= LOCKUP;
35787 info->pixmap.scan_align = 1;
35788 diff -urNp linux-2.6.32.42/drivers/video/nvidia/nv_backlight.c linux-2.6.32.42/drivers/video/nvidia/nv_backlight.c
35789 --- linux-2.6.32.42/drivers/video/nvidia/nv_backlight.c 2011-03-27 14:31:47.000000000 -0400
35790 +++ linux-2.6.32.42/drivers/video/nvidia/nv_backlight.c 2011-04-17 15:56:46.000000000 -0400
35791 @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
35792 return bd->props.brightness;
35793 }
35794
35795 -static struct backlight_ops nvidia_bl_ops = {
35796 +static const struct backlight_ops nvidia_bl_ops = {
35797 .get_brightness = nvidia_bl_get_brightness,
35798 .update_status = nvidia_bl_update_status,
35799 };
35800 diff -urNp linux-2.6.32.42/drivers/video/riva/fbdev.c linux-2.6.32.42/drivers/video/riva/fbdev.c
35801 --- linux-2.6.32.42/drivers/video/riva/fbdev.c 2011-03-27 14:31:47.000000000 -0400
35802 +++ linux-2.6.32.42/drivers/video/riva/fbdev.c 2011-04-17 15:56:46.000000000 -0400
35803 @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
35804 return bd->props.brightness;
35805 }
35806
35807 -static struct backlight_ops riva_bl_ops = {
35808 +static const struct backlight_ops riva_bl_ops = {
35809 .get_brightness = riva_bl_get_brightness,
35810 .update_status = riva_bl_update_status,
35811 };
35812 diff -urNp linux-2.6.32.42/drivers/video/uvesafb.c linux-2.6.32.42/drivers/video/uvesafb.c
35813 --- linux-2.6.32.42/drivers/video/uvesafb.c 2011-03-27 14:31:47.000000000 -0400
35814 +++ linux-2.6.32.42/drivers/video/uvesafb.c 2011-04-17 15:56:46.000000000 -0400
35815 @@ -18,6 +18,7 @@
35816 #include <linux/fb.h>
35817 #include <linux/io.h>
35818 #include <linux/mutex.h>
35819 +#include <linux/moduleloader.h>
35820 #include <video/edid.h>
35821 #include <video/uvesafb.h>
35822 #ifdef CONFIG_X86
35823 @@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
35824 NULL,
35825 };
35826
35827 - return call_usermodehelper(v86d_path, argv, envp, 1);
35828 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
35829 }
35830
35831 /*
35832 @@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(
35833 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
35834 par->pmi_setpal = par->ypan = 0;
35835 } else {
35836 +
35837 +#ifdef CONFIG_PAX_KERNEXEC
35838 +#ifdef CONFIG_MODULES
35839 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
35840 +#endif
35841 + if (!par->pmi_code) {
35842 + par->pmi_setpal = par->ypan = 0;
35843 + return 0;
35844 + }
35845 +#endif
35846 +
35847 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
35848 + task->t.regs.edi);
35849 +
35850 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35851 + pax_open_kernel();
35852 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
35853 + pax_close_kernel();
35854 +
35855 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
35856 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
35857 +#else
35858 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
35859 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
35860 +#endif
35861 +
35862 printk(KERN_INFO "uvesafb: protected mode interface info at "
35863 "%04x:%04x\n",
35864 (u16)task->t.regs.es, (u16)task->t.regs.edi);
35865 @@ -1799,6 +1822,11 @@ out:
35866 if (par->vbe_modes)
35867 kfree(par->vbe_modes);
35868
35869 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35870 + if (par->pmi_code)
35871 + module_free_exec(NULL, par->pmi_code);
35872 +#endif
35873 +
35874 framebuffer_release(info);
35875 return err;
35876 }
35877 @@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platfor
35878 kfree(par->vbe_state_orig);
35879 if (par->vbe_state_saved)
35880 kfree(par->vbe_state_saved);
35881 +
35882 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35883 + if (par->pmi_code)
35884 + module_free_exec(NULL, par->pmi_code);
35885 +#endif
35886 +
35887 }
35888
35889 framebuffer_release(info);
35890 diff -urNp linux-2.6.32.42/drivers/video/vesafb.c linux-2.6.32.42/drivers/video/vesafb.c
35891 --- linux-2.6.32.42/drivers/video/vesafb.c 2011-03-27 14:31:47.000000000 -0400
35892 +++ linux-2.6.32.42/drivers/video/vesafb.c 2011-04-17 15:56:46.000000000 -0400
35893 @@ -9,6 +9,7 @@
35894 */
35895
35896 #include <linux/module.h>
35897 +#include <linux/moduleloader.h>
35898 #include <linux/kernel.h>
35899 #include <linux/errno.h>
35900 #include <linux/string.h>
35901 @@ -53,8 +54,8 @@ static int vram_remap __initdata; /*
35902 static int vram_total __initdata; /* Set total amount of memory */
35903 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
35904 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
35905 -static void (*pmi_start)(void) __read_mostly;
35906 -static void (*pmi_pal) (void) __read_mostly;
35907 +static void (*pmi_start)(void) __read_only;
35908 +static void (*pmi_pal) (void) __read_only;
35909 static int depth __read_mostly;
35910 static int vga_compat __read_mostly;
35911 /* --------------------------------------------------------------------- */
35912 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
35913 unsigned int size_vmode;
35914 unsigned int size_remap;
35915 unsigned int size_total;
35916 + void *pmi_code = NULL;
35917
35918 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
35919 return -ENODEV;
35920 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
35921 size_remap = size_total;
35922 vesafb_fix.smem_len = size_remap;
35923
35924 -#ifndef __i386__
35925 - screen_info.vesapm_seg = 0;
35926 -#endif
35927 -
35928 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
35929 printk(KERN_WARNING
35930 "vesafb: cannot reserve video memory at 0x%lx\n",
35931 @@ -315,9 +313,21 @@ static int __init vesafb_probe(struct pl
35932 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
35933 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
35934
35935 +#ifdef __i386__
35936 +
35937 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35938 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
35939 + if (!pmi_code)
35940 +#elif !defined(CONFIG_PAX_KERNEXEC)
35941 + if (0)
35942 +#endif
35943 +
35944 +#endif
35945 + screen_info.vesapm_seg = 0;
35946 +
35947 if (screen_info.vesapm_seg) {
35948 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
35949 - screen_info.vesapm_seg,screen_info.vesapm_off);
35950 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
35951 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
35952 }
35953
35954 if (screen_info.vesapm_seg < 0xc000)
35955 @@ -325,9 +335,25 @@ static int __init vesafb_probe(struct pl
35956
35957 if (ypan || pmi_setpal) {
35958 unsigned short *pmi_base;
35959 - pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
35960 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
35961 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
35962 +
35963 + pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
35964 +
35965 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35966 + pax_open_kernel();
35967 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
35968 +#else
35969 + pmi_code = pmi_base;
35970 +#endif
35971 +
35972 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
35973 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
35974 +
35975 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35976 + pmi_start = ktva_ktla(pmi_start);
35977 + pmi_pal = ktva_ktla(pmi_pal);
35978 + pax_close_kernel();
35979 +#endif
35980 +
35981 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
35982 if (pmi_base[3]) {
35983 printk(KERN_INFO "vesafb: pmi: ports = ");
35984 @@ -469,6 +495,11 @@ static int __init vesafb_probe(struct pl
35985 info->node, info->fix.id);
35986 return 0;
35987 err:
35988 +
35989 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35990 + module_free_exec(NULL, pmi_code);
35991 +#endif
35992 +
35993 if (info->screen_base)
35994 iounmap(info->screen_base);
35995 framebuffer_release(info);
35996 diff -urNp linux-2.6.32.42/drivers/xen/sys-hypervisor.c linux-2.6.32.42/drivers/xen/sys-hypervisor.c
35997 --- linux-2.6.32.42/drivers/xen/sys-hypervisor.c 2011-03-27 14:31:47.000000000 -0400
35998 +++ linux-2.6.32.42/drivers/xen/sys-hypervisor.c 2011-04-17 15:56:46.000000000 -0400
35999 @@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct ko
36000 return 0;
36001 }
36002
36003 -static struct sysfs_ops hyp_sysfs_ops = {
36004 +static const struct sysfs_ops hyp_sysfs_ops = {
36005 .show = hyp_sysfs_show,
36006 .store = hyp_sysfs_store,
36007 };
36008 diff -urNp linux-2.6.32.42/fs/9p/vfs_inode.c linux-2.6.32.42/fs/9p/vfs_inode.c
36009 --- linux-2.6.32.42/fs/9p/vfs_inode.c 2011-03-27 14:31:47.000000000 -0400
36010 +++ linux-2.6.32.42/fs/9p/vfs_inode.c 2011-04-17 15:56:46.000000000 -0400
36011 @@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct
36012 static void
36013 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
36014 {
36015 - char *s = nd_get_link(nd);
36016 + const char *s = nd_get_link(nd);
36017
36018 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
36019 IS_ERR(s) ? "<error>" : s);
36020 diff -urNp linux-2.6.32.42/fs/aio.c linux-2.6.32.42/fs/aio.c
36021 --- linux-2.6.32.42/fs/aio.c 2011-03-27 14:31:47.000000000 -0400
36022 +++ linux-2.6.32.42/fs/aio.c 2011-06-04 20:40:21.000000000 -0400
36023 @@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx
36024 size += sizeof(struct io_event) * nr_events;
36025 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
36026
36027 - if (nr_pages < 0)
36028 + if (nr_pages <= 0)
36029 return -EINVAL;
36030
36031 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
36032 @@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ct
36033 struct aio_timeout to;
36034 int retry = 0;
36035
36036 + pax_track_stack();
36037 +
36038 /* needed to zero any padding within an entry (there shouldn't be
36039 * any, but C is fun!
36040 */
36041 @@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *i
36042 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
36043 {
36044 ssize_t ret;
36045 + struct iovec iovstack;
36046
36047 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
36048 kiocb->ki_nbytes, 1,
36049 - &kiocb->ki_inline_vec, &kiocb->ki_iovec);
36050 + &iovstack, &kiocb->ki_iovec);
36051 if (ret < 0)
36052 goto out;
36053
36054 + if (kiocb->ki_iovec == &iovstack) {
36055 + kiocb->ki_inline_vec = iovstack;
36056 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
36057 + }
36058 kiocb->ki_nr_segs = kiocb->ki_nbytes;
36059 kiocb->ki_cur_seg = 0;
36060 /* ki_nbytes/left now reflect bytes instead of segs */
36061 diff -urNp linux-2.6.32.42/fs/attr.c linux-2.6.32.42/fs/attr.c
36062 --- linux-2.6.32.42/fs/attr.c 2011-03-27 14:31:47.000000000 -0400
36063 +++ linux-2.6.32.42/fs/attr.c 2011-04-17 15:56:46.000000000 -0400
36064 @@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode
36065 unsigned long limit;
36066
36067 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
36068 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
36069 if (limit != RLIM_INFINITY && offset > limit)
36070 goto out_sig;
36071 if (offset > inode->i_sb->s_maxbytes)
36072 diff -urNp linux-2.6.32.42/fs/autofs/root.c linux-2.6.32.42/fs/autofs/root.c
36073 --- linux-2.6.32.42/fs/autofs/root.c 2011-03-27 14:31:47.000000000 -0400
36074 +++ linux-2.6.32.42/fs/autofs/root.c 2011-04-17 15:56:46.000000000 -0400
36075 @@ -299,7 +299,8 @@ static int autofs_root_symlink(struct in
36076 set_bit(n,sbi->symlink_bitmap);
36077 sl = &sbi->symlink[n];
36078 sl->len = strlen(symname);
36079 - sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
36080 + slsize = sl->len+1;
36081 + sl->data = kmalloc(slsize, GFP_KERNEL);
36082 if (!sl->data) {
36083 clear_bit(n,sbi->symlink_bitmap);
36084 unlock_kernel();
36085 diff -urNp linux-2.6.32.42/fs/autofs4/symlink.c linux-2.6.32.42/fs/autofs4/symlink.c
36086 --- linux-2.6.32.42/fs/autofs4/symlink.c 2011-03-27 14:31:47.000000000 -0400
36087 +++ linux-2.6.32.42/fs/autofs4/symlink.c 2011-04-17 15:56:46.000000000 -0400
36088 @@ -15,7 +15,7 @@
36089 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
36090 {
36091 struct autofs_info *ino = autofs4_dentry_ino(dentry);
36092 - nd_set_link(nd, (char *)ino->u.symlink);
36093 + nd_set_link(nd, ino->u.symlink);
36094 return NULL;
36095 }
36096
36097 diff -urNp linux-2.6.32.42/fs/befs/linuxvfs.c linux-2.6.32.42/fs/befs/linuxvfs.c
36098 --- linux-2.6.32.42/fs/befs/linuxvfs.c 2011-03-27 14:31:47.000000000 -0400
36099 +++ linux-2.6.32.42/fs/befs/linuxvfs.c 2011-04-17 15:56:46.000000000 -0400
36100 @@ -493,7 +493,7 @@ static void befs_put_link(struct dentry
36101 {
36102 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
36103 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
36104 - char *link = nd_get_link(nd);
36105 + const char *link = nd_get_link(nd);
36106 if (!IS_ERR(link))
36107 kfree(link);
36108 }
36109 diff -urNp linux-2.6.32.42/fs/binfmt_aout.c linux-2.6.32.42/fs/binfmt_aout.c
36110 --- linux-2.6.32.42/fs/binfmt_aout.c 2011-03-27 14:31:47.000000000 -0400
36111 +++ linux-2.6.32.42/fs/binfmt_aout.c 2011-04-17 15:56:46.000000000 -0400
36112 @@ -16,6 +16,7 @@
36113 #include <linux/string.h>
36114 #include <linux/fs.h>
36115 #include <linux/file.h>
36116 +#include <linux/security.h>
36117 #include <linux/stat.h>
36118 #include <linux/fcntl.h>
36119 #include <linux/ptrace.h>
36120 @@ -102,6 +103,8 @@ static int aout_core_dump(long signr, st
36121 #endif
36122 # define START_STACK(u) (u.start_stack)
36123
36124 + memset(&dump, 0, sizeof(dump));
36125 +
36126 fs = get_fs();
36127 set_fs(KERNEL_DS);
36128 has_dumped = 1;
36129 @@ -113,10 +116,12 @@ static int aout_core_dump(long signr, st
36130
36131 /* If the size of the dump file exceeds the rlimit, then see what would happen
36132 if we wrote the stack, but not the data area. */
36133 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
36134 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
36135 dump.u_dsize = 0;
36136
36137 /* Make sure we have enough room to write the stack and data areas. */
36138 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
36139 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
36140 dump.u_ssize = 0;
36141
36142 @@ -146,9 +151,7 @@ static int aout_core_dump(long signr, st
36143 dump_size = dump.u_ssize << PAGE_SHIFT;
36144 DUMP_WRITE(dump_start,dump_size);
36145 }
36146 -/* Finally dump the task struct. Not be used by gdb, but could be useful */
36147 - set_fs(KERNEL_DS);
36148 - DUMP_WRITE(current,sizeof(*current));
36149 +/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
36150 end_coredump:
36151 set_fs(fs);
36152 return has_dumped;
36153 @@ -249,6 +252,8 @@ static int load_aout_binary(struct linux
36154 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
36155 if (rlim >= RLIM_INFINITY)
36156 rlim = ~0;
36157 +
36158 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
36159 if (ex.a_data + ex.a_bss > rlim)
36160 return -ENOMEM;
36161
36162 @@ -277,6 +282,27 @@ static int load_aout_binary(struct linux
36163 install_exec_creds(bprm);
36164 current->flags &= ~PF_FORKNOEXEC;
36165
36166 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
36167 + current->mm->pax_flags = 0UL;
36168 +#endif
36169 +
36170 +#ifdef CONFIG_PAX_PAGEEXEC
36171 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
36172 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
36173 +
36174 +#ifdef CONFIG_PAX_EMUTRAMP
36175 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
36176 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
36177 +#endif
36178 +
36179 +#ifdef CONFIG_PAX_MPROTECT
36180 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
36181 + current->mm->pax_flags |= MF_PAX_MPROTECT;
36182 +#endif
36183 +
36184 + }
36185 +#endif
36186 +
36187 if (N_MAGIC(ex) == OMAGIC) {
36188 unsigned long text_addr, map_size;
36189 loff_t pos;
36190 @@ -349,7 +375,7 @@ static int load_aout_binary(struct linux
36191
36192 down_write(&current->mm->mmap_sem);
36193 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
36194 - PROT_READ | PROT_WRITE | PROT_EXEC,
36195 + PROT_READ | PROT_WRITE,
36196 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
36197 fd_offset + ex.a_text);
36198 up_write(&current->mm->mmap_sem);
36199 diff -urNp linux-2.6.32.42/fs/binfmt_elf.c linux-2.6.32.42/fs/binfmt_elf.c
36200 --- linux-2.6.32.42/fs/binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
36201 +++ linux-2.6.32.42/fs/binfmt_elf.c 2011-05-16 21:46:57.000000000 -0400
36202 @@ -50,6 +50,10 @@ static int elf_core_dump(long signr, str
36203 #define elf_core_dump NULL
36204 #endif
36205
36206 +#ifdef CONFIG_PAX_MPROTECT
36207 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
36208 +#endif
36209 +
36210 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
36211 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
36212 #else
36213 @@ -69,6 +73,11 @@ static struct linux_binfmt elf_format =
36214 .load_binary = load_elf_binary,
36215 .load_shlib = load_elf_library,
36216 .core_dump = elf_core_dump,
36217 +
36218 +#ifdef CONFIG_PAX_MPROTECT
36219 + .handle_mprotect= elf_handle_mprotect,
36220 +#endif
36221 +
36222 .min_coredump = ELF_EXEC_PAGESIZE,
36223 .hasvdso = 1
36224 };
36225 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
36226
36227 static int set_brk(unsigned long start, unsigned long end)
36228 {
36229 + unsigned long e = end;
36230 +
36231 start = ELF_PAGEALIGN(start);
36232 end = ELF_PAGEALIGN(end);
36233 if (end > start) {
36234 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
36235 if (BAD_ADDR(addr))
36236 return addr;
36237 }
36238 - current->mm->start_brk = current->mm->brk = end;
36239 + current->mm->start_brk = current->mm->brk = e;
36240 return 0;
36241 }
36242
36243 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
36244 elf_addr_t __user *u_rand_bytes;
36245 const char *k_platform = ELF_PLATFORM;
36246 const char *k_base_platform = ELF_BASE_PLATFORM;
36247 - unsigned char k_rand_bytes[16];
36248 + u32 k_rand_bytes[4];
36249 int items;
36250 elf_addr_t *elf_info;
36251 int ei_index = 0;
36252 const struct cred *cred = current_cred();
36253 struct vm_area_struct *vma;
36254 + unsigned long saved_auxv[AT_VECTOR_SIZE];
36255 +
36256 + pax_track_stack();
36257
36258 /*
36259 * In some cases (e.g. Hyper-Threading), we want to avoid L1
36260 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
36261 * Generate 16 random bytes for userspace PRNG seeding.
36262 */
36263 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
36264 - u_rand_bytes = (elf_addr_t __user *)
36265 - STACK_ALLOC(p, sizeof(k_rand_bytes));
36266 + srandom32(k_rand_bytes[0] ^ random32());
36267 + srandom32(k_rand_bytes[1] ^ random32());
36268 + srandom32(k_rand_bytes[2] ^ random32());
36269 + srandom32(k_rand_bytes[3] ^ random32());
36270 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
36271 + u_rand_bytes = (elf_addr_t __user *) p;
36272 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
36273 return -EFAULT;
36274
36275 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
36276 return -EFAULT;
36277 current->mm->env_end = p;
36278
36279 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
36280 +
36281 /* Put the elf_info on the stack in the right place. */
36282 sp = (elf_addr_t __user *)envp + 1;
36283 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
36284 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
36285 return -EFAULT;
36286 return 0;
36287 }
36288 @@ -385,10 +405,10 @@ static unsigned long load_elf_interp(str
36289 {
36290 struct elf_phdr *elf_phdata;
36291 struct elf_phdr *eppnt;
36292 - unsigned long load_addr = 0;
36293 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
36294 int load_addr_set = 0;
36295 unsigned long last_bss = 0, elf_bss = 0;
36296 - unsigned long error = ~0UL;
36297 + unsigned long error = -EINVAL;
36298 unsigned long total_size;
36299 int retval, i, size;
36300
36301 @@ -434,6 +454,11 @@ static unsigned long load_elf_interp(str
36302 goto out_close;
36303 }
36304
36305 +#ifdef CONFIG_PAX_SEGMEXEC
36306 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
36307 + pax_task_size = SEGMEXEC_TASK_SIZE;
36308 +#endif
36309 +
36310 eppnt = elf_phdata;
36311 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
36312 if (eppnt->p_type == PT_LOAD) {
36313 @@ -477,8 +502,8 @@ static unsigned long load_elf_interp(str
36314 k = load_addr + eppnt->p_vaddr;
36315 if (BAD_ADDR(k) ||
36316 eppnt->p_filesz > eppnt->p_memsz ||
36317 - eppnt->p_memsz > TASK_SIZE ||
36318 - TASK_SIZE - eppnt->p_memsz < k) {
36319 + eppnt->p_memsz > pax_task_size ||
36320 + pax_task_size - eppnt->p_memsz < k) {
36321 error = -ENOMEM;
36322 goto out_close;
36323 }
36324 @@ -532,6 +557,194 @@ out:
36325 return error;
36326 }
36327
36328 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
36329 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
36330 +{
36331 + unsigned long pax_flags = 0UL;
36332 +
36333 +#ifdef CONFIG_PAX_PAGEEXEC
36334 + if (elf_phdata->p_flags & PF_PAGEEXEC)
36335 + pax_flags |= MF_PAX_PAGEEXEC;
36336 +#endif
36337 +
36338 +#ifdef CONFIG_PAX_SEGMEXEC
36339 + if (elf_phdata->p_flags & PF_SEGMEXEC)
36340 + pax_flags |= MF_PAX_SEGMEXEC;
36341 +#endif
36342 +
36343 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36344 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36345 + if (nx_enabled)
36346 + pax_flags &= ~MF_PAX_SEGMEXEC;
36347 + else
36348 + pax_flags &= ~MF_PAX_PAGEEXEC;
36349 + }
36350 +#endif
36351 +
36352 +#ifdef CONFIG_PAX_EMUTRAMP
36353 + if (elf_phdata->p_flags & PF_EMUTRAMP)
36354 + pax_flags |= MF_PAX_EMUTRAMP;
36355 +#endif
36356 +
36357 +#ifdef CONFIG_PAX_MPROTECT
36358 + if (elf_phdata->p_flags & PF_MPROTECT)
36359 + pax_flags |= MF_PAX_MPROTECT;
36360 +#endif
36361 +
36362 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36363 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
36364 + pax_flags |= MF_PAX_RANDMMAP;
36365 +#endif
36366 +
36367 + return pax_flags;
36368 +}
36369 +#endif
36370 +
36371 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36372 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
36373 +{
36374 + unsigned long pax_flags = 0UL;
36375 +
36376 +#ifdef CONFIG_PAX_PAGEEXEC
36377 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
36378 + pax_flags |= MF_PAX_PAGEEXEC;
36379 +#endif
36380 +
36381 +#ifdef CONFIG_PAX_SEGMEXEC
36382 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
36383 + pax_flags |= MF_PAX_SEGMEXEC;
36384 +#endif
36385 +
36386 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36387 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36388 + if (nx_enabled)
36389 + pax_flags &= ~MF_PAX_SEGMEXEC;
36390 + else
36391 + pax_flags &= ~MF_PAX_PAGEEXEC;
36392 + }
36393 +#endif
36394 +
36395 +#ifdef CONFIG_PAX_EMUTRAMP
36396 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
36397 + pax_flags |= MF_PAX_EMUTRAMP;
36398 +#endif
36399 +
36400 +#ifdef CONFIG_PAX_MPROTECT
36401 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
36402 + pax_flags |= MF_PAX_MPROTECT;
36403 +#endif
36404 +
36405 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36406 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
36407 + pax_flags |= MF_PAX_RANDMMAP;
36408 +#endif
36409 +
36410 + return pax_flags;
36411 +}
36412 +#endif
36413 +
36414 +#ifdef CONFIG_PAX_EI_PAX
36415 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
36416 +{
36417 + unsigned long pax_flags = 0UL;
36418 +
36419 +#ifdef CONFIG_PAX_PAGEEXEC
36420 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
36421 + pax_flags |= MF_PAX_PAGEEXEC;
36422 +#endif
36423 +
36424 +#ifdef CONFIG_PAX_SEGMEXEC
36425 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
36426 + pax_flags |= MF_PAX_SEGMEXEC;
36427 +#endif
36428 +
36429 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36430 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36431 + if (nx_enabled)
36432 + pax_flags &= ~MF_PAX_SEGMEXEC;
36433 + else
36434 + pax_flags &= ~MF_PAX_PAGEEXEC;
36435 + }
36436 +#endif
36437 +
36438 +#ifdef CONFIG_PAX_EMUTRAMP
36439 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
36440 + pax_flags |= MF_PAX_EMUTRAMP;
36441 +#endif
36442 +
36443 +#ifdef CONFIG_PAX_MPROTECT
36444 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
36445 + pax_flags |= MF_PAX_MPROTECT;
36446 +#endif
36447 +
36448 +#ifdef CONFIG_PAX_ASLR
36449 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
36450 + pax_flags |= MF_PAX_RANDMMAP;
36451 +#endif
36452 +
36453 + return pax_flags;
36454 +}
36455 +#endif
36456 +
36457 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
36458 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
36459 +{
36460 + unsigned long pax_flags = 0UL;
36461 +
36462 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36463 + unsigned long i;
36464 + int found_flags = 0;
36465 +#endif
36466 +
36467 +#ifdef CONFIG_PAX_EI_PAX
36468 + pax_flags = pax_parse_ei_pax(elf_ex);
36469 +#endif
36470 +
36471 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36472 + for (i = 0UL; i < elf_ex->e_phnum; i++)
36473 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
36474 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
36475 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
36476 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
36477 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
36478 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
36479 + return -EINVAL;
36480 +
36481 +#ifdef CONFIG_PAX_SOFTMODE
36482 + if (pax_softmode)
36483 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
36484 + else
36485 +#endif
36486 +
36487 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
36488 + found_flags = 1;
36489 + break;
36490 + }
36491 +#endif
36492 +
36493 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
36494 + if (found_flags == 0) {
36495 + struct elf_phdr phdr;
36496 + memset(&phdr, 0, sizeof(phdr));
36497 + phdr.p_flags = PF_NOEMUTRAMP;
36498 +#ifdef CONFIG_PAX_SOFTMODE
36499 + if (pax_softmode)
36500 + pax_flags = pax_parse_softmode(&phdr);
36501 + else
36502 +#endif
36503 + pax_flags = pax_parse_hardmode(&phdr);
36504 + }
36505 +#endif
36506 +
36507 +
36508 + if (0 > pax_check_flags(&pax_flags))
36509 + return -EINVAL;
36510 +
36511 + current->mm->pax_flags = pax_flags;
36512 + return 0;
36513 +}
36514 +#endif
36515 +
36516 /*
36517 * These are the functions used to load ELF style executables and shared
36518 * libraries. There is no binary dependent code anywhere else.
36519 @@ -548,6 +761,11 @@ static unsigned long randomize_stack_top
36520 {
36521 unsigned int random_variable = 0;
36522
36523 +#ifdef CONFIG_PAX_RANDUSTACK
36524 + if (randomize_va_space)
36525 + return stack_top - current->mm->delta_stack;
36526 +#endif
36527 +
36528 if ((current->flags & PF_RANDOMIZE) &&
36529 !(current->personality & ADDR_NO_RANDOMIZE)) {
36530 random_variable = get_random_int() & STACK_RND_MASK;
36531 @@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_
36532 unsigned long load_addr = 0, load_bias = 0;
36533 int load_addr_set = 0;
36534 char * elf_interpreter = NULL;
36535 - unsigned long error;
36536 + unsigned long error = 0;
36537 struct elf_phdr *elf_ppnt, *elf_phdata;
36538 unsigned long elf_bss, elf_brk;
36539 int retval, i;
36540 @@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_
36541 unsigned long start_code, end_code, start_data, end_data;
36542 unsigned long reloc_func_desc = 0;
36543 int executable_stack = EXSTACK_DEFAULT;
36544 - unsigned long def_flags = 0;
36545 struct {
36546 struct elfhdr elf_ex;
36547 struct elfhdr interp_elf_ex;
36548 } *loc;
36549 + unsigned long pax_task_size = TASK_SIZE;
36550
36551 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
36552 if (!loc) {
36553 @@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_
36554
36555 /* OK, This is the point of no return */
36556 current->flags &= ~PF_FORKNOEXEC;
36557 - current->mm->def_flags = def_flags;
36558 +
36559 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
36560 + current->mm->pax_flags = 0UL;
36561 +#endif
36562 +
36563 +#ifdef CONFIG_PAX_DLRESOLVE
36564 + current->mm->call_dl_resolve = 0UL;
36565 +#endif
36566 +
36567 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
36568 + current->mm->call_syscall = 0UL;
36569 +#endif
36570 +
36571 +#ifdef CONFIG_PAX_ASLR
36572 + current->mm->delta_mmap = 0UL;
36573 + current->mm->delta_stack = 0UL;
36574 +#endif
36575 +
36576 + current->mm->def_flags = 0;
36577 +
36578 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
36579 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
36580 + send_sig(SIGKILL, current, 0);
36581 + goto out_free_dentry;
36582 + }
36583 +#endif
36584 +
36585 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
36586 + pax_set_initial_flags(bprm);
36587 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
36588 + if (pax_set_initial_flags_func)
36589 + (pax_set_initial_flags_func)(bprm);
36590 +#endif
36591 +
36592 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
36593 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
36594 + current->mm->context.user_cs_limit = PAGE_SIZE;
36595 + current->mm->def_flags |= VM_PAGEEXEC;
36596 + }
36597 +#endif
36598 +
36599 +#ifdef CONFIG_PAX_SEGMEXEC
36600 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
36601 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
36602 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
36603 + pax_task_size = SEGMEXEC_TASK_SIZE;
36604 + }
36605 +#endif
36606 +
36607 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
36608 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36609 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
36610 + put_cpu();
36611 + }
36612 +#endif
36613
36614 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
36615 may depend on the personality. */
36616 SET_PERSONALITY(loc->elf_ex);
36617 +
36618 +#ifdef CONFIG_PAX_ASLR
36619 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
36620 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
36621 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
36622 + }
36623 +#endif
36624 +
36625 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
36626 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36627 + executable_stack = EXSTACK_DISABLE_X;
36628 + current->personality &= ~READ_IMPLIES_EXEC;
36629 + } else
36630 +#endif
36631 +
36632 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
36633 current->personality |= READ_IMPLIES_EXEC;
36634
36635 @@ -804,6 +1091,20 @@ static int load_elf_binary(struct linux_
36636 #else
36637 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
36638 #endif
36639 +
36640 +#ifdef CONFIG_PAX_RANDMMAP
36641 + /* PaX: randomize base address at the default exe base if requested */
36642 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
36643 +#ifdef CONFIG_SPARC64
36644 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
36645 +#else
36646 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
36647 +#endif
36648 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
36649 + elf_flags |= MAP_FIXED;
36650 + }
36651 +#endif
36652 +
36653 }
36654
36655 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
36656 @@ -836,9 +1137,9 @@ static int load_elf_binary(struct linux_
36657 * allowed task size. Note that p_filesz must always be
36658 * <= p_memsz so it is only necessary to check p_memsz.
36659 */
36660 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
36661 - elf_ppnt->p_memsz > TASK_SIZE ||
36662 - TASK_SIZE - elf_ppnt->p_memsz < k) {
36663 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
36664 + elf_ppnt->p_memsz > pax_task_size ||
36665 + pax_task_size - elf_ppnt->p_memsz < k) {
36666 /* set_brk can never work. Avoid overflows. */
36667 send_sig(SIGKILL, current, 0);
36668 retval = -EINVAL;
36669 @@ -866,6 +1167,11 @@ static int load_elf_binary(struct linux_
36670 start_data += load_bias;
36671 end_data += load_bias;
36672
36673 +#ifdef CONFIG_PAX_RANDMMAP
36674 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
36675 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
36676 +#endif
36677 +
36678 /* Calling set_brk effectively mmaps the pages that we need
36679 * for the bss and break sections. We must do this before
36680 * mapping in the interpreter, to make sure it doesn't wind
36681 @@ -877,9 +1183,11 @@ static int load_elf_binary(struct linux_
36682 goto out_free_dentry;
36683 }
36684 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
36685 - send_sig(SIGSEGV, current, 0);
36686 - retval = -EFAULT; /* Nobody gets to see this, but.. */
36687 - goto out_free_dentry;
36688 + /*
36689 + * This bss-zeroing can fail if the ELF
36690 + * file specifies odd protections. So
36691 + * we don't check the return value
36692 + */
36693 }
36694
36695 if (elf_interpreter) {
36696 @@ -1112,8 +1420,10 @@ static int dump_seek(struct file *file,
36697 unsigned long n = off;
36698 if (n > PAGE_SIZE)
36699 n = PAGE_SIZE;
36700 - if (!dump_write(file, buf, n))
36701 + if (!dump_write(file, buf, n)) {
36702 + free_page((unsigned long)buf);
36703 return 0;
36704 + }
36705 off -= n;
36706 }
36707 free_page((unsigned long)buf);
36708 @@ -1125,7 +1435,7 @@ static int dump_seek(struct file *file,
36709 * Decide what to dump of a segment, part, all or none.
36710 */
36711 static unsigned long vma_dump_size(struct vm_area_struct *vma,
36712 - unsigned long mm_flags)
36713 + unsigned long mm_flags, long signr)
36714 {
36715 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
36716
36717 @@ -1159,7 +1469,7 @@ static unsigned long vma_dump_size(struc
36718 if (vma->vm_file == NULL)
36719 return 0;
36720
36721 - if (FILTER(MAPPED_PRIVATE))
36722 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
36723 goto whole;
36724
36725 /*
36726 @@ -1255,8 +1565,11 @@ static int writenote(struct memelfnote *
36727 #undef DUMP_WRITE
36728
36729 #define DUMP_WRITE(addr, nr) \
36730 + do { \
36731 + gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
36732 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
36733 - goto end_coredump;
36734 + goto end_coredump; \
36735 + } while (0);
36736
36737 static void fill_elf_header(struct elfhdr *elf, int segs,
36738 u16 machine, u32 flags, u8 osabi)
36739 @@ -1385,9 +1698,9 @@ static void fill_auxv_note(struct memelf
36740 {
36741 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
36742 int i = 0;
36743 - do
36744 + do {
36745 i += 2;
36746 - while (auxv[i - 2] != AT_NULL);
36747 + } while (auxv[i - 2] != AT_NULL);
36748 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
36749 }
36750
36751 @@ -1973,7 +2286,7 @@ static int elf_core_dump(long signr, str
36752 phdr.p_offset = offset;
36753 phdr.p_vaddr = vma->vm_start;
36754 phdr.p_paddr = 0;
36755 - phdr.p_filesz = vma_dump_size(vma, mm_flags);
36756 + phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
36757 phdr.p_memsz = vma->vm_end - vma->vm_start;
36758 offset += phdr.p_filesz;
36759 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
36760 @@ -2006,7 +2319,7 @@ static int elf_core_dump(long signr, str
36761 unsigned long addr;
36762 unsigned long end;
36763
36764 - end = vma->vm_start + vma_dump_size(vma, mm_flags);
36765 + end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
36766
36767 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
36768 struct page *page;
36769 @@ -2015,6 +2328,7 @@ static int elf_core_dump(long signr, str
36770 page = get_dump_page(addr);
36771 if (page) {
36772 void *kaddr = kmap(page);
36773 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
36774 stop = ((size += PAGE_SIZE) > limit) ||
36775 !dump_write(file, kaddr, PAGE_SIZE);
36776 kunmap(page);
36777 @@ -2042,6 +2356,97 @@ out:
36778
36779 #endif /* USE_ELF_CORE_DUMP */
36780
36781 +#ifdef CONFIG_PAX_MPROTECT
36782 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
36783 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
36784 + * we'll remove VM_MAYWRITE for good on RELRO segments.
36785 + *
36786 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
36787 + * basis because we want to allow the common case and not the special ones.
36788 + */
36789 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
36790 +{
36791 + struct elfhdr elf_h;
36792 + struct elf_phdr elf_p;
36793 + unsigned long i;
36794 + unsigned long oldflags;
36795 + bool is_textrel_rw, is_textrel_rx, is_relro;
36796 +
36797 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
36798 + return;
36799 +
36800 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
36801 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
36802 +
36803 +#ifdef CONFIG_PAX_ELFRELOCS
36804 + /* possible TEXTREL */
36805 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
36806 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
36807 +#else
36808 + is_textrel_rw = false;
36809 + is_textrel_rx = false;
36810 +#endif
36811 +
36812 + /* possible RELRO */
36813 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
36814 +
36815 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
36816 + return;
36817 +
36818 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
36819 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
36820 +
36821 +#ifdef CONFIG_PAX_ETEXECRELOCS
36822 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
36823 +#else
36824 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
36825 +#endif
36826 +
36827 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
36828 + !elf_check_arch(&elf_h) ||
36829 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
36830 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
36831 + return;
36832 +
36833 + for (i = 0UL; i < elf_h.e_phnum; i++) {
36834 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
36835 + return;
36836 + switch (elf_p.p_type) {
36837 + case PT_DYNAMIC:
36838 + if (!is_textrel_rw && !is_textrel_rx)
36839 + continue;
36840 + i = 0UL;
36841 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
36842 + elf_dyn dyn;
36843 +
36844 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
36845 + return;
36846 + if (dyn.d_tag == DT_NULL)
36847 + return;
36848 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
36849 + gr_log_textrel(vma);
36850 + if (is_textrel_rw)
36851 + vma->vm_flags |= VM_MAYWRITE;
36852 + else
36853 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
36854 + vma->vm_flags &= ~VM_MAYWRITE;
36855 + return;
36856 + }
36857 + i++;
36858 + }
36859 + return;
36860 +
36861 + case PT_GNU_RELRO:
36862 + if (!is_relro)
36863 + continue;
36864 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
36865 + vma->vm_flags &= ~VM_MAYWRITE;
36866 + return;
36867 + }
36868 + }
36869 +}
36870 +#endif
36871 +
36872 static int __init init_elf_binfmt(void)
36873 {
36874 return register_binfmt(&elf_format);
36875 diff -urNp linux-2.6.32.42/fs/binfmt_flat.c linux-2.6.32.42/fs/binfmt_flat.c
36876 --- linux-2.6.32.42/fs/binfmt_flat.c 2011-03-27 14:31:47.000000000 -0400
36877 +++ linux-2.6.32.42/fs/binfmt_flat.c 2011-04-17 15:56:46.000000000 -0400
36878 @@ -564,7 +564,9 @@ static int load_flat_file(struct linux_b
36879 realdatastart = (unsigned long) -ENOMEM;
36880 printk("Unable to allocate RAM for process data, errno %d\n",
36881 (int)-realdatastart);
36882 + down_write(&current->mm->mmap_sem);
36883 do_munmap(current->mm, textpos, text_len);
36884 + up_write(&current->mm->mmap_sem);
36885 ret = realdatastart;
36886 goto err;
36887 }
36888 @@ -588,8 +590,10 @@ static int load_flat_file(struct linux_b
36889 }
36890 if (IS_ERR_VALUE(result)) {
36891 printk("Unable to read data+bss, errno %d\n", (int)-result);
36892 + down_write(&current->mm->mmap_sem);
36893 do_munmap(current->mm, textpos, text_len);
36894 do_munmap(current->mm, realdatastart, data_len + extra);
36895 + up_write(&current->mm->mmap_sem);
36896 ret = result;
36897 goto err;
36898 }
36899 @@ -658,8 +662,10 @@ static int load_flat_file(struct linux_b
36900 }
36901 if (IS_ERR_VALUE(result)) {
36902 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
36903 + down_write(&current->mm->mmap_sem);
36904 do_munmap(current->mm, textpos, text_len + data_len + extra +
36905 MAX_SHARED_LIBS * sizeof(unsigned long));
36906 + up_write(&current->mm->mmap_sem);
36907 ret = result;
36908 goto err;
36909 }
36910 diff -urNp linux-2.6.32.42/fs/bio.c linux-2.6.32.42/fs/bio.c
36911 --- linux-2.6.32.42/fs/bio.c 2011-03-27 14:31:47.000000000 -0400
36912 +++ linux-2.6.32.42/fs/bio.c 2011-04-17 15:56:46.000000000 -0400
36913 @@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_cr
36914
36915 i = 0;
36916 while (i < bio_slab_nr) {
36917 - struct bio_slab *bslab = &bio_slabs[i];
36918 + bslab = &bio_slabs[i];
36919
36920 if (!bslab->slab && entry == -1)
36921 entry = i;
36922 @@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct b
36923 const int read = bio_data_dir(bio) == READ;
36924 struct bio_map_data *bmd = bio->bi_private;
36925 int i;
36926 - char *p = bmd->sgvecs[0].iov_base;
36927 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
36928
36929 __bio_for_each_segment(bvec, bio, i, 0) {
36930 char *addr = page_address(bvec->bv_page);
36931 diff -urNp linux-2.6.32.42/fs/block_dev.c linux-2.6.32.42/fs/block_dev.c
36932 --- linux-2.6.32.42/fs/block_dev.c 2011-06-25 12:55:34.000000000 -0400
36933 +++ linux-2.6.32.42/fs/block_dev.c 2011-06-25 12:56:37.000000000 -0400
36934 @@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev,
36935 else if (bdev->bd_contains == bdev)
36936 res = 0; /* is a whole device which isn't held */
36937
36938 - else if (bdev->bd_contains->bd_holder == bd_claim)
36939 + else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
36940 res = 0; /* is a partition of a device that is being partitioned */
36941 else if (bdev->bd_contains->bd_holder != NULL)
36942 res = -EBUSY; /* is a partition of a held device */
36943 diff -urNp linux-2.6.32.42/fs/btrfs/ctree.c linux-2.6.32.42/fs/btrfs/ctree.c
36944 --- linux-2.6.32.42/fs/btrfs/ctree.c 2011-03-27 14:31:47.000000000 -0400
36945 +++ linux-2.6.32.42/fs/btrfs/ctree.c 2011-04-17 15:56:46.000000000 -0400
36946 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
36947 free_extent_buffer(buf);
36948 add_root_to_dirty_list(root);
36949 } else {
36950 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
36951 - parent_start = parent->start;
36952 - else
36953 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
36954 + if (parent)
36955 + parent_start = parent->start;
36956 + else
36957 + parent_start = 0;
36958 + } else
36959 parent_start = 0;
36960
36961 WARN_ON(trans->transid != btrfs_header_generation(parent));
36962 @@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_tran
36963
36964 ret = 0;
36965 if (slot == 0) {
36966 - struct btrfs_disk_key disk_key;
36967 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
36968 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
36969 }
36970 diff -urNp linux-2.6.32.42/fs/btrfs/disk-io.c linux-2.6.32.42/fs/btrfs/disk-io.c
36971 --- linux-2.6.32.42/fs/btrfs/disk-io.c 2011-04-17 17:00:52.000000000 -0400
36972 +++ linux-2.6.32.42/fs/btrfs/disk-io.c 2011-04-17 17:03:11.000000000 -0400
36973 @@ -39,7 +39,7 @@
36974 #include "tree-log.h"
36975 #include "free-space-cache.h"
36976
36977 -static struct extent_io_ops btree_extent_io_ops;
36978 +static const struct extent_io_ops btree_extent_io_ops;
36979 static void end_workqueue_fn(struct btrfs_work *work);
36980 static void free_fs_root(struct btrfs_root *root);
36981
36982 @@ -2607,7 +2607,7 @@ out:
36983 return 0;
36984 }
36985
36986 -static struct extent_io_ops btree_extent_io_ops = {
36987 +static const struct extent_io_ops btree_extent_io_ops = {
36988 .write_cache_pages_lock_hook = btree_lock_page_hook,
36989 .readpage_end_io_hook = btree_readpage_end_io_hook,
36990 .submit_bio_hook = btree_submit_bio_hook,
36991 diff -urNp linux-2.6.32.42/fs/btrfs/extent_io.h linux-2.6.32.42/fs/btrfs/extent_io.h
36992 --- linux-2.6.32.42/fs/btrfs/extent_io.h 2011-03-27 14:31:47.000000000 -0400
36993 +++ linux-2.6.32.42/fs/btrfs/extent_io.h 2011-04-17 15:56:46.000000000 -0400
36994 @@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(s
36995 struct bio *bio, int mirror_num,
36996 unsigned long bio_flags);
36997 struct extent_io_ops {
36998 - int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
36999 + int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
37000 u64 start, u64 end, int *page_started,
37001 unsigned long *nr_written);
37002 - int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
37003 - int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
37004 + int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
37005 + int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
37006 extent_submit_bio_hook_t *submit_bio_hook;
37007 - int (*merge_bio_hook)(struct page *page, unsigned long offset,
37008 + int (* const merge_bio_hook)(struct page *page, unsigned long offset,
37009 size_t size, struct bio *bio,
37010 unsigned long bio_flags);
37011 - int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
37012 - int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
37013 + int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
37014 + int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
37015 u64 start, u64 end,
37016 struct extent_state *state);
37017 - int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
37018 + int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
37019 u64 start, u64 end,
37020 struct extent_state *state);
37021 - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
37022 + int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
37023 struct extent_state *state);
37024 - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
37025 + int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
37026 struct extent_state *state, int uptodate);
37027 - int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
37028 + int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
37029 unsigned long old, unsigned long bits);
37030 - int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
37031 + int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
37032 unsigned long bits);
37033 - int (*merge_extent_hook)(struct inode *inode,
37034 + int (* const merge_extent_hook)(struct inode *inode,
37035 struct extent_state *new,
37036 struct extent_state *other);
37037 - int (*split_extent_hook)(struct inode *inode,
37038 + int (* const split_extent_hook)(struct inode *inode,
37039 struct extent_state *orig, u64 split);
37040 - int (*write_cache_pages_lock_hook)(struct page *page);
37041 + int (* const write_cache_pages_lock_hook)(struct page *page);
37042 };
37043
37044 struct extent_io_tree {
37045 @@ -88,7 +88,7 @@ struct extent_io_tree {
37046 u64 dirty_bytes;
37047 spinlock_t lock;
37048 spinlock_t buffer_lock;
37049 - struct extent_io_ops *ops;
37050 + const struct extent_io_ops *ops;
37051 };
37052
37053 struct extent_state {
37054 diff -urNp linux-2.6.32.42/fs/btrfs/extent-tree.c linux-2.6.32.42/fs/btrfs/extent-tree.c
37055 --- linux-2.6.32.42/fs/btrfs/extent-tree.c 2011-03-27 14:31:47.000000000 -0400
37056 +++ linux-2.6.32.42/fs/btrfs/extent-tree.c 2011-06-12 06:39:08.000000000 -0400
37057 @@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(
37058 u64 group_start = group->key.objectid;
37059 new_extents = kmalloc(sizeof(*new_extents),
37060 GFP_NOFS);
37061 + if (!new_extents) {
37062 + ret = -ENOMEM;
37063 + goto out;
37064 + }
37065 nr_extents = 1;
37066 ret = get_new_locations(reloc_inode,
37067 extent_key,
37068 diff -urNp linux-2.6.32.42/fs/btrfs/free-space-cache.c linux-2.6.32.42/fs/btrfs/free-space-cache.c
37069 --- linux-2.6.32.42/fs/btrfs/free-space-cache.c 2011-03-27 14:31:47.000000000 -0400
37070 +++ linux-2.6.32.42/fs/btrfs/free-space-cache.c 2011-04-17 15:56:46.000000000 -0400
37071 @@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
37072
37073 while(1) {
37074 if (entry->bytes < bytes || entry->offset < min_start) {
37075 - struct rb_node *node;
37076 -
37077 node = rb_next(&entry->offset_index);
37078 if (!node)
37079 break;
37080 @@ -1226,7 +1224,7 @@ again:
37081 */
37082 while (entry->bitmap || found_bitmap ||
37083 (!entry->bitmap && entry->bytes < min_bytes)) {
37084 - struct rb_node *node = rb_next(&entry->offset_index);
37085 + node = rb_next(&entry->offset_index);
37086
37087 if (entry->bitmap && entry->bytes > bytes + empty_size) {
37088 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
37089 diff -urNp linux-2.6.32.42/fs/btrfs/inode.c linux-2.6.32.42/fs/btrfs/inode.c
37090 --- linux-2.6.32.42/fs/btrfs/inode.c 2011-03-27 14:31:47.000000000 -0400
37091 +++ linux-2.6.32.42/fs/btrfs/inode.c 2011-06-12 06:39:58.000000000 -0400
37092 @@ -63,7 +63,7 @@ static const struct inode_operations btr
37093 static const struct address_space_operations btrfs_aops;
37094 static const struct address_space_operations btrfs_symlink_aops;
37095 static const struct file_operations btrfs_dir_file_operations;
37096 -static struct extent_io_ops btrfs_extent_io_ops;
37097 +static const struct extent_io_ops btrfs_extent_io_ops;
37098
37099 static struct kmem_cache *btrfs_inode_cachep;
37100 struct kmem_cache *btrfs_trans_handle_cachep;
37101 @@ -925,6 +925,7 @@ static int cow_file_range_async(struct i
37102 1, 0, NULL, GFP_NOFS);
37103 while (start < end) {
37104 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
37105 + BUG_ON(!async_cow);
37106 async_cow->inode = inode;
37107 async_cow->root = root;
37108 async_cow->locked_page = locked_page;
37109 @@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(st
37110 inline_size = btrfs_file_extent_inline_item_len(leaf,
37111 btrfs_item_nr(leaf, path->slots[0]));
37112 tmp = kmalloc(inline_size, GFP_NOFS);
37113 + if (!tmp)
37114 + return -ENOMEM;
37115 ptr = btrfs_file_extent_inline_start(item);
37116
37117 read_extent_buffer(leaf, tmp, ptr, inline_size);
37118 @@ -5410,7 +5413,7 @@ fail:
37119 return -ENOMEM;
37120 }
37121
37122 -static int btrfs_getattr(struct vfsmount *mnt,
37123 +int btrfs_getattr(struct vfsmount *mnt,
37124 struct dentry *dentry, struct kstat *stat)
37125 {
37126 struct inode *inode = dentry->d_inode;
37127 @@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount
37128 return 0;
37129 }
37130
37131 +EXPORT_SYMBOL(btrfs_getattr);
37132 +
37133 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
37134 +{
37135 + return BTRFS_I(inode)->root->anon_super.s_dev;
37136 +}
37137 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
37138 +
37139 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
37140 struct inode *new_dir, struct dentry *new_dentry)
37141 {
37142 @@ -5972,7 +5983,7 @@ static const struct file_operations btrf
37143 .fsync = btrfs_sync_file,
37144 };
37145
37146 -static struct extent_io_ops btrfs_extent_io_ops = {
37147 +static const struct extent_io_ops btrfs_extent_io_ops = {
37148 .fill_delalloc = run_delalloc_range,
37149 .submit_bio_hook = btrfs_submit_bio_hook,
37150 .merge_bio_hook = btrfs_merge_bio_hook,
37151 diff -urNp linux-2.6.32.42/fs/btrfs/relocation.c linux-2.6.32.42/fs/btrfs/relocation.c
37152 --- linux-2.6.32.42/fs/btrfs/relocation.c 2011-03-27 14:31:47.000000000 -0400
37153 +++ linux-2.6.32.42/fs/btrfs/relocation.c 2011-04-17 15:56:46.000000000 -0400
37154 @@ -884,7 +884,7 @@ static int __update_reloc_root(struct bt
37155 }
37156 spin_unlock(&rc->reloc_root_tree.lock);
37157
37158 - BUG_ON((struct btrfs_root *)node->data != root);
37159 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
37160
37161 if (!del) {
37162 spin_lock(&rc->reloc_root_tree.lock);
37163 diff -urNp linux-2.6.32.42/fs/btrfs/sysfs.c linux-2.6.32.42/fs/btrfs/sysfs.c
37164 --- linux-2.6.32.42/fs/btrfs/sysfs.c 2011-03-27 14:31:47.000000000 -0400
37165 +++ linux-2.6.32.42/fs/btrfs/sysfs.c 2011-04-17 15:56:46.000000000 -0400
37166 @@ -164,12 +164,12 @@ static void btrfs_root_release(struct ko
37167 complete(&root->kobj_unregister);
37168 }
37169
37170 -static struct sysfs_ops btrfs_super_attr_ops = {
37171 +static const struct sysfs_ops btrfs_super_attr_ops = {
37172 .show = btrfs_super_attr_show,
37173 .store = btrfs_super_attr_store,
37174 };
37175
37176 -static struct sysfs_ops btrfs_root_attr_ops = {
37177 +static const struct sysfs_ops btrfs_root_attr_ops = {
37178 .show = btrfs_root_attr_show,
37179 .store = btrfs_root_attr_store,
37180 };
37181 diff -urNp linux-2.6.32.42/fs/buffer.c linux-2.6.32.42/fs/buffer.c
37182 --- linux-2.6.32.42/fs/buffer.c 2011-03-27 14:31:47.000000000 -0400
37183 +++ linux-2.6.32.42/fs/buffer.c 2011-04-17 15:56:46.000000000 -0400
37184 @@ -25,6 +25,7 @@
37185 #include <linux/percpu.h>
37186 #include <linux/slab.h>
37187 #include <linux/capability.h>
37188 +#include <linux/security.h>
37189 #include <linux/blkdev.h>
37190 #include <linux/file.h>
37191 #include <linux/quotaops.h>
37192 diff -urNp linux-2.6.32.42/fs/cachefiles/bind.c linux-2.6.32.42/fs/cachefiles/bind.c
37193 --- linux-2.6.32.42/fs/cachefiles/bind.c 2011-03-27 14:31:47.000000000 -0400
37194 +++ linux-2.6.32.42/fs/cachefiles/bind.c 2011-04-17 15:56:46.000000000 -0400
37195 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
37196 args);
37197
37198 /* start by checking things over */
37199 - ASSERT(cache->fstop_percent >= 0 &&
37200 - cache->fstop_percent < cache->fcull_percent &&
37201 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
37202 cache->fcull_percent < cache->frun_percent &&
37203 cache->frun_percent < 100);
37204
37205 - ASSERT(cache->bstop_percent >= 0 &&
37206 - cache->bstop_percent < cache->bcull_percent &&
37207 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
37208 cache->bcull_percent < cache->brun_percent &&
37209 cache->brun_percent < 100);
37210
37211 diff -urNp linux-2.6.32.42/fs/cachefiles/daemon.c linux-2.6.32.42/fs/cachefiles/daemon.c
37212 --- linux-2.6.32.42/fs/cachefiles/daemon.c 2011-03-27 14:31:47.000000000 -0400
37213 +++ linux-2.6.32.42/fs/cachefiles/daemon.c 2011-04-17 15:56:46.000000000 -0400
37214 @@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(s
37215 if (test_bit(CACHEFILES_DEAD, &cache->flags))
37216 return -EIO;
37217
37218 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
37219 + if (datalen > PAGE_SIZE - 1)
37220 return -EOPNOTSUPP;
37221
37222 /* drag the command string into the kernel so we can parse it */
37223 @@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
37224 if (args[0] != '%' || args[1] != '\0')
37225 return -EINVAL;
37226
37227 - if (fstop < 0 || fstop >= cache->fcull_percent)
37228 + if (fstop >= cache->fcull_percent)
37229 return cachefiles_daemon_range_error(cache, args);
37230
37231 cache->fstop_percent = fstop;
37232 @@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
37233 if (args[0] != '%' || args[1] != '\0')
37234 return -EINVAL;
37235
37236 - if (bstop < 0 || bstop >= cache->bcull_percent)
37237 + if (bstop >= cache->bcull_percent)
37238 return cachefiles_daemon_range_error(cache, args);
37239
37240 cache->bstop_percent = bstop;
37241 diff -urNp linux-2.6.32.42/fs/cachefiles/internal.h linux-2.6.32.42/fs/cachefiles/internal.h
37242 --- linux-2.6.32.42/fs/cachefiles/internal.h 2011-03-27 14:31:47.000000000 -0400
37243 +++ linux-2.6.32.42/fs/cachefiles/internal.h 2011-05-04 17:56:28.000000000 -0400
37244 @@ -56,7 +56,7 @@ struct cachefiles_cache {
37245 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
37246 struct rb_root active_nodes; /* active nodes (can't be culled) */
37247 rwlock_t active_lock; /* lock for active_nodes */
37248 - atomic_t gravecounter; /* graveyard uniquifier */
37249 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
37250 unsigned frun_percent; /* when to stop culling (% files) */
37251 unsigned fcull_percent; /* when to start culling (% files) */
37252 unsigned fstop_percent; /* when to stop allocating (% files) */
37253 @@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struc
37254 * proc.c
37255 */
37256 #ifdef CONFIG_CACHEFILES_HISTOGRAM
37257 -extern atomic_t cachefiles_lookup_histogram[HZ];
37258 -extern atomic_t cachefiles_mkdir_histogram[HZ];
37259 -extern atomic_t cachefiles_create_histogram[HZ];
37260 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
37261 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
37262 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
37263
37264 extern int __init cachefiles_proc_init(void);
37265 extern void cachefiles_proc_cleanup(void);
37266 static inline
37267 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
37268 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
37269 {
37270 unsigned long jif = jiffies - start_jif;
37271 if (jif >= HZ)
37272 jif = HZ - 1;
37273 - atomic_inc(&histogram[jif]);
37274 + atomic_inc_unchecked(&histogram[jif]);
37275 }
37276
37277 #else
37278 diff -urNp linux-2.6.32.42/fs/cachefiles/namei.c linux-2.6.32.42/fs/cachefiles/namei.c
37279 --- linux-2.6.32.42/fs/cachefiles/namei.c 2011-03-27 14:31:47.000000000 -0400
37280 +++ linux-2.6.32.42/fs/cachefiles/namei.c 2011-05-04 17:56:28.000000000 -0400
37281 @@ -250,7 +250,7 @@ try_again:
37282 /* first step is to make up a grave dentry in the graveyard */
37283 sprintf(nbuffer, "%08x%08x",
37284 (uint32_t) get_seconds(),
37285 - (uint32_t) atomic_inc_return(&cache->gravecounter));
37286 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
37287
37288 /* do the multiway lock magic */
37289 trap = lock_rename(cache->graveyard, dir);
37290 diff -urNp linux-2.6.32.42/fs/cachefiles/proc.c linux-2.6.32.42/fs/cachefiles/proc.c
37291 --- linux-2.6.32.42/fs/cachefiles/proc.c 2011-03-27 14:31:47.000000000 -0400
37292 +++ linux-2.6.32.42/fs/cachefiles/proc.c 2011-05-04 17:56:28.000000000 -0400
37293 @@ -14,9 +14,9 @@
37294 #include <linux/seq_file.h>
37295 #include "internal.h"
37296
37297 -atomic_t cachefiles_lookup_histogram[HZ];
37298 -atomic_t cachefiles_mkdir_histogram[HZ];
37299 -atomic_t cachefiles_create_histogram[HZ];
37300 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
37301 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
37302 +atomic_unchecked_t cachefiles_create_histogram[HZ];
37303
37304 /*
37305 * display the latency histogram
37306 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
37307 return 0;
37308 default:
37309 index = (unsigned long) v - 3;
37310 - x = atomic_read(&cachefiles_lookup_histogram[index]);
37311 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
37312 - z = atomic_read(&cachefiles_create_histogram[index]);
37313 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
37314 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
37315 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
37316 if (x == 0 && y == 0 && z == 0)
37317 return 0;
37318
37319 diff -urNp linux-2.6.32.42/fs/cachefiles/rdwr.c linux-2.6.32.42/fs/cachefiles/rdwr.c
37320 --- linux-2.6.32.42/fs/cachefiles/rdwr.c 2011-03-27 14:31:47.000000000 -0400
37321 +++ linux-2.6.32.42/fs/cachefiles/rdwr.c 2011-04-17 15:56:46.000000000 -0400
37322 @@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache
37323 old_fs = get_fs();
37324 set_fs(KERNEL_DS);
37325 ret = file->f_op->write(
37326 - file, (const void __user *) data, len, &pos);
37327 + file, (__force const void __user *) data, len, &pos);
37328 set_fs(old_fs);
37329 kunmap(page);
37330 if (ret != len)
37331 diff -urNp linux-2.6.32.42/fs/cifs/cifs_debug.c linux-2.6.32.42/fs/cifs/cifs_debug.c
37332 --- linux-2.6.32.42/fs/cifs/cifs_debug.c 2011-03-27 14:31:47.000000000 -0400
37333 +++ linux-2.6.32.42/fs/cifs/cifs_debug.c 2011-05-04 17:56:28.000000000 -0400
37334 @@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(str
37335 tcon = list_entry(tmp3,
37336 struct cifsTconInfo,
37337 tcon_list);
37338 - atomic_set(&tcon->num_smbs_sent, 0);
37339 - atomic_set(&tcon->num_writes, 0);
37340 - atomic_set(&tcon->num_reads, 0);
37341 - atomic_set(&tcon->num_oplock_brks, 0);
37342 - atomic_set(&tcon->num_opens, 0);
37343 - atomic_set(&tcon->num_posixopens, 0);
37344 - atomic_set(&tcon->num_posixmkdirs, 0);
37345 - atomic_set(&tcon->num_closes, 0);
37346 - atomic_set(&tcon->num_deletes, 0);
37347 - atomic_set(&tcon->num_mkdirs, 0);
37348 - atomic_set(&tcon->num_rmdirs, 0);
37349 - atomic_set(&tcon->num_renames, 0);
37350 - atomic_set(&tcon->num_t2renames, 0);
37351 - atomic_set(&tcon->num_ffirst, 0);
37352 - atomic_set(&tcon->num_fnext, 0);
37353 - atomic_set(&tcon->num_fclose, 0);
37354 - atomic_set(&tcon->num_hardlinks, 0);
37355 - atomic_set(&tcon->num_symlinks, 0);
37356 - atomic_set(&tcon->num_locks, 0);
37357 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
37358 + atomic_set_unchecked(&tcon->num_writes, 0);
37359 + atomic_set_unchecked(&tcon->num_reads, 0);
37360 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
37361 + atomic_set_unchecked(&tcon->num_opens, 0);
37362 + atomic_set_unchecked(&tcon->num_posixopens, 0);
37363 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
37364 + atomic_set_unchecked(&tcon->num_closes, 0);
37365 + atomic_set_unchecked(&tcon->num_deletes, 0);
37366 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
37367 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
37368 + atomic_set_unchecked(&tcon->num_renames, 0);
37369 + atomic_set_unchecked(&tcon->num_t2renames, 0);
37370 + atomic_set_unchecked(&tcon->num_ffirst, 0);
37371 + atomic_set_unchecked(&tcon->num_fnext, 0);
37372 + atomic_set_unchecked(&tcon->num_fclose, 0);
37373 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
37374 + atomic_set_unchecked(&tcon->num_symlinks, 0);
37375 + atomic_set_unchecked(&tcon->num_locks, 0);
37376 }
37377 }
37378 }
37379 @@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct s
37380 if (tcon->need_reconnect)
37381 seq_puts(m, "\tDISCONNECTED ");
37382 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
37383 - atomic_read(&tcon->num_smbs_sent),
37384 - atomic_read(&tcon->num_oplock_brks));
37385 + atomic_read_unchecked(&tcon->num_smbs_sent),
37386 + atomic_read_unchecked(&tcon->num_oplock_brks));
37387 seq_printf(m, "\nReads: %d Bytes: %lld",
37388 - atomic_read(&tcon->num_reads),
37389 + atomic_read_unchecked(&tcon->num_reads),
37390 (long long)(tcon->bytes_read));
37391 seq_printf(m, "\nWrites: %d Bytes: %lld",
37392 - atomic_read(&tcon->num_writes),
37393 + atomic_read_unchecked(&tcon->num_writes),
37394 (long long)(tcon->bytes_written));
37395 seq_printf(m, "\nFlushes: %d",
37396 - atomic_read(&tcon->num_flushes));
37397 + atomic_read_unchecked(&tcon->num_flushes));
37398 seq_printf(m, "\nLocks: %d HardLinks: %d "
37399 "Symlinks: %d",
37400 - atomic_read(&tcon->num_locks),
37401 - atomic_read(&tcon->num_hardlinks),
37402 - atomic_read(&tcon->num_symlinks));
37403 + atomic_read_unchecked(&tcon->num_locks),
37404 + atomic_read_unchecked(&tcon->num_hardlinks),
37405 + atomic_read_unchecked(&tcon->num_symlinks));
37406 seq_printf(m, "\nOpens: %d Closes: %d "
37407 "Deletes: %d",
37408 - atomic_read(&tcon->num_opens),
37409 - atomic_read(&tcon->num_closes),
37410 - atomic_read(&tcon->num_deletes));
37411 + atomic_read_unchecked(&tcon->num_opens),
37412 + atomic_read_unchecked(&tcon->num_closes),
37413 + atomic_read_unchecked(&tcon->num_deletes));
37414 seq_printf(m, "\nPosix Opens: %d "
37415 "Posix Mkdirs: %d",
37416 - atomic_read(&tcon->num_posixopens),
37417 - atomic_read(&tcon->num_posixmkdirs));
37418 + atomic_read_unchecked(&tcon->num_posixopens),
37419 + atomic_read_unchecked(&tcon->num_posixmkdirs));
37420 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
37421 - atomic_read(&tcon->num_mkdirs),
37422 - atomic_read(&tcon->num_rmdirs));
37423 + atomic_read_unchecked(&tcon->num_mkdirs),
37424 + atomic_read_unchecked(&tcon->num_rmdirs));
37425 seq_printf(m, "\nRenames: %d T2 Renames %d",
37426 - atomic_read(&tcon->num_renames),
37427 - atomic_read(&tcon->num_t2renames));
37428 + atomic_read_unchecked(&tcon->num_renames),
37429 + atomic_read_unchecked(&tcon->num_t2renames));
37430 seq_printf(m, "\nFindFirst: %d FNext %d "
37431 "FClose %d",
37432 - atomic_read(&tcon->num_ffirst),
37433 - atomic_read(&tcon->num_fnext),
37434 - atomic_read(&tcon->num_fclose));
37435 + atomic_read_unchecked(&tcon->num_ffirst),
37436 + atomic_read_unchecked(&tcon->num_fnext),
37437 + atomic_read_unchecked(&tcon->num_fclose));
37438 }
37439 }
37440 }
37441 diff -urNp linux-2.6.32.42/fs/cifs/cifsglob.h linux-2.6.32.42/fs/cifs/cifsglob.h
37442 --- linux-2.6.32.42/fs/cifs/cifsglob.h 2011-03-27 14:31:47.000000000 -0400
37443 +++ linux-2.6.32.42/fs/cifs/cifsglob.h 2011-05-04 17:56:28.000000000 -0400
37444 @@ -252,28 +252,28 @@ struct cifsTconInfo {
37445 __u16 Flags; /* optional support bits */
37446 enum statusEnum tidStatus;
37447 #ifdef CONFIG_CIFS_STATS
37448 - atomic_t num_smbs_sent;
37449 - atomic_t num_writes;
37450 - atomic_t num_reads;
37451 - atomic_t num_flushes;
37452 - atomic_t num_oplock_brks;
37453 - atomic_t num_opens;
37454 - atomic_t num_closes;
37455 - atomic_t num_deletes;
37456 - atomic_t num_mkdirs;
37457 - atomic_t num_posixopens;
37458 - atomic_t num_posixmkdirs;
37459 - atomic_t num_rmdirs;
37460 - atomic_t num_renames;
37461 - atomic_t num_t2renames;
37462 - atomic_t num_ffirst;
37463 - atomic_t num_fnext;
37464 - atomic_t num_fclose;
37465 - atomic_t num_hardlinks;
37466 - atomic_t num_symlinks;
37467 - atomic_t num_locks;
37468 - atomic_t num_acl_get;
37469 - atomic_t num_acl_set;
37470 + atomic_unchecked_t num_smbs_sent;
37471 + atomic_unchecked_t num_writes;
37472 + atomic_unchecked_t num_reads;
37473 + atomic_unchecked_t num_flushes;
37474 + atomic_unchecked_t num_oplock_brks;
37475 + atomic_unchecked_t num_opens;
37476 + atomic_unchecked_t num_closes;
37477 + atomic_unchecked_t num_deletes;
37478 + atomic_unchecked_t num_mkdirs;
37479 + atomic_unchecked_t num_posixopens;
37480 + atomic_unchecked_t num_posixmkdirs;
37481 + atomic_unchecked_t num_rmdirs;
37482 + atomic_unchecked_t num_renames;
37483 + atomic_unchecked_t num_t2renames;
37484 + atomic_unchecked_t num_ffirst;
37485 + atomic_unchecked_t num_fnext;
37486 + atomic_unchecked_t num_fclose;
37487 + atomic_unchecked_t num_hardlinks;
37488 + atomic_unchecked_t num_symlinks;
37489 + atomic_unchecked_t num_locks;
37490 + atomic_unchecked_t num_acl_get;
37491 + atomic_unchecked_t num_acl_set;
37492 #ifdef CONFIG_CIFS_STATS2
37493 unsigned long long time_writes;
37494 unsigned long long time_reads;
37495 @@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const st
37496 }
37497
37498 #ifdef CONFIG_CIFS_STATS
37499 -#define cifs_stats_inc atomic_inc
37500 +#define cifs_stats_inc atomic_inc_unchecked
37501
37502 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
37503 unsigned int bytes)
37504 diff -urNp linux-2.6.32.42/fs/cifs/link.c linux-2.6.32.42/fs/cifs/link.c
37505 --- linux-2.6.32.42/fs/cifs/link.c 2011-03-27 14:31:47.000000000 -0400
37506 +++ linux-2.6.32.42/fs/cifs/link.c 2011-04-17 15:56:46.000000000 -0400
37507 @@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct
37508
37509 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
37510 {
37511 - char *p = nd_get_link(nd);
37512 + const char *p = nd_get_link(nd);
37513 if (!IS_ERR(p))
37514 kfree(p);
37515 }
37516 diff -urNp linux-2.6.32.42/fs/coda/cache.c linux-2.6.32.42/fs/coda/cache.c
37517 --- linux-2.6.32.42/fs/coda/cache.c 2011-03-27 14:31:47.000000000 -0400
37518 +++ linux-2.6.32.42/fs/coda/cache.c 2011-05-04 17:56:28.000000000 -0400
37519 @@ -24,14 +24,14 @@
37520 #include <linux/coda_fs_i.h>
37521 #include <linux/coda_cache.h>
37522
37523 -static atomic_t permission_epoch = ATOMIC_INIT(0);
37524 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
37525
37526 /* replace or extend an acl cache hit */
37527 void coda_cache_enter(struct inode *inode, int mask)
37528 {
37529 struct coda_inode_info *cii = ITOC(inode);
37530
37531 - cii->c_cached_epoch = atomic_read(&permission_epoch);
37532 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
37533 if (cii->c_uid != current_fsuid()) {
37534 cii->c_uid = current_fsuid();
37535 cii->c_cached_perm = mask;
37536 @@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inod
37537 void coda_cache_clear_inode(struct inode *inode)
37538 {
37539 struct coda_inode_info *cii = ITOC(inode);
37540 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
37541 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
37542 }
37543
37544 /* remove all acl caches */
37545 void coda_cache_clear_all(struct super_block *sb)
37546 {
37547 - atomic_inc(&permission_epoch);
37548 + atomic_inc_unchecked(&permission_epoch);
37549 }
37550
37551
37552 @@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode
37553
37554 hit = (mask & cii->c_cached_perm) == mask &&
37555 cii->c_uid == current_fsuid() &&
37556 - cii->c_cached_epoch == atomic_read(&permission_epoch);
37557 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
37558
37559 return hit;
37560 }
37561 diff -urNp linux-2.6.32.42/fs/compat_binfmt_elf.c linux-2.6.32.42/fs/compat_binfmt_elf.c
37562 --- linux-2.6.32.42/fs/compat_binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
37563 +++ linux-2.6.32.42/fs/compat_binfmt_elf.c 2011-04-17 15:56:46.000000000 -0400
37564 @@ -29,10 +29,12 @@
37565 #undef elfhdr
37566 #undef elf_phdr
37567 #undef elf_note
37568 +#undef elf_dyn
37569 #undef elf_addr_t
37570 #define elfhdr elf32_hdr
37571 #define elf_phdr elf32_phdr
37572 #define elf_note elf32_note
37573 +#define elf_dyn Elf32_Dyn
37574 #define elf_addr_t Elf32_Addr
37575
37576 /*
37577 diff -urNp linux-2.6.32.42/fs/compat.c linux-2.6.32.42/fs/compat.c
37578 --- linux-2.6.32.42/fs/compat.c 2011-04-17 17:00:52.000000000 -0400
37579 +++ linux-2.6.32.42/fs/compat.c 2011-05-16 21:46:57.000000000 -0400
37580 @@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
37581
37582 struct compat_readdir_callback {
37583 struct compat_old_linux_dirent __user *dirent;
37584 + struct file * file;
37585 int result;
37586 };
37587
37588 @@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf
37589 buf->result = -EOVERFLOW;
37590 return -EOVERFLOW;
37591 }
37592 +
37593 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
37594 + return 0;
37595 +
37596 buf->result++;
37597 dirent = buf->dirent;
37598 if (!access_ok(VERIFY_WRITE, dirent,
37599 @@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(u
37600
37601 buf.result = 0;
37602 buf.dirent = dirent;
37603 + buf.file = file;
37604
37605 error = vfs_readdir(file, compat_fillonedir, &buf);
37606 if (buf.result)
37607 @@ -899,6 +905,7 @@ struct compat_linux_dirent {
37608 struct compat_getdents_callback {
37609 struct compat_linux_dirent __user *current_dir;
37610 struct compat_linux_dirent __user *previous;
37611 + struct file * file;
37612 int count;
37613 int error;
37614 };
37615 @@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, c
37616 buf->error = -EOVERFLOW;
37617 return -EOVERFLOW;
37618 }
37619 +
37620 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
37621 + return 0;
37622 +
37623 dirent = buf->previous;
37624 if (dirent) {
37625 if (__put_user(offset, &dirent->d_off))
37626 @@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsi
37627 buf.previous = NULL;
37628 buf.count = count;
37629 buf.error = 0;
37630 + buf.file = file;
37631
37632 error = vfs_readdir(file, compat_filldir, &buf);
37633 if (error >= 0)
37634 @@ -987,6 +999,7 @@ out:
37635 struct compat_getdents_callback64 {
37636 struct linux_dirent64 __user *current_dir;
37637 struct linux_dirent64 __user *previous;
37638 + struct file * file;
37639 int count;
37640 int error;
37641 };
37642 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf
37643 buf->error = -EINVAL; /* only used if we fail.. */
37644 if (reclen > buf->count)
37645 return -EINVAL;
37646 +
37647 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
37648 + return 0;
37649 +
37650 dirent = buf->previous;
37651
37652 if (dirent) {
37653 @@ -1054,6 +1071,7 @@ asmlinkage long compat_sys_getdents64(un
37654 buf.previous = NULL;
37655 buf.count = count;
37656 buf.error = 0;
37657 + buf.file = file;
37658
37659 error = vfs_readdir(file, compat_filldir64, &buf);
37660 if (error >= 0)
37661 @@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(in
37662 * verify all the pointers
37663 */
37664 ret = -EINVAL;
37665 - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
37666 + if (nr_segs > UIO_MAXIOV)
37667 goto out;
37668 if (!file->f_op)
37669 goto out;
37670 @@ -1463,6 +1481,11 @@ int compat_do_execve(char * filename,
37671 compat_uptr_t __user *envp,
37672 struct pt_regs * regs)
37673 {
37674 +#ifdef CONFIG_GRKERNSEC
37675 + struct file *old_exec_file;
37676 + struct acl_subject_label *old_acl;
37677 + struct rlimit old_rlim[RLIM_NLIMITS];
37678 +#endif
37679 struct linux_binprm *bprm;
37680 struct file *file;
37681 struct files_struct *displaced;
37682 @@ -1499,6 +1522,19 @@ int compat_do_execve(char * filename,
37683 bprm->filename = filename;
37684 bprm->interp = filename;
37685
37686 + if (gr_process_user_ban()) {
37687 + retval = -EPERM;
37688 + goto out_file;
37689 + }
37690 +
37691 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
37692 + retval = -EAGAIN;
37693 + if (gr_handle_nproc())
37694 + goto out_file;
37695 + retval = -EACCES;
37696 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
37697 + goto out_file;
37698 +
37699 retval = bprm_mm_init(bprm);
37700 if (retval)
37701 goto out_file;
37702 @@ -1528,9 +1564,40 @@ int compat_do_execve(char * filename,
37703 if (retval < 0)
37704 goto out;
37705
37706 + if (!gr_tpe_allow(file)) {
37707 + retval = -EACCES;
37708 + goto out;
37709 + }
37710 +
37711 + if (gr_check_crash_exec(file)) {
37712 + retval = -EACCES;
37713 + goto out;
37714 + }
37715 +
37716 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
37717 +
37718 + gr_handle_exec_args_compat(bprm, argv);
37719 +
37720 +#ifdef CONFIG_GRKERNSEC
37721 + old_acl = current->acl;
37722 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
37723 + old_exec_file = current->exec_file;
37724 + get_file(file);
37725 + current->exec_file = file;
37726 +#endif
37727 +
37728 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
37729 + bprm->unsafe & LSM_UNSAFE_SHARE);
37730 + if (retval < 0)
37731 + goto out_fail;
37732 +
37733 retval = search_binary_handler(bprm, regs);
37734 if (retval < 0)
37735 - goto out;
37736 + goto out_fail;
37737 +#ifdef CONFIG_GRKERNSEC
37738 + if (old_exec_file)
37739 + fput(old_exec_file);
37740 +#endif
37741
37742 /* execve succeeded */
37743 current->fs->in_exec = 0;
37744 @@ -1541,6 +1608,14 @@ int compat_do_execve(char * filename,
37745 put_files_struct(displaced);
37746 return retval;
37747
37748 +out_fail:
37749 +#ifdef CONFIG_GRKERNSEC
37750 + current->acl = old_acl;
37751 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
37752 + fput(current->exec_file);
37753 + current->exec_file = old_exec_file;
37754 +#endif
37755 +
37756 out:
37757 if (bprm->mm) {
37758 acct_arg_size(bprm, 0);
37759 @@ -1711,6 +1786,8 @@ int compat_core_sys_select(int n, compat
37760 struct fdtable *fdt;
37761 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
37762
37763 + pax_track_stack();
37764 +
37765 if (n < 0)
37766 goto out_nofds;
37767
37768 diff -urNp linux-2.6.32.42/fs/compat_ioctl.c linux-2.6.32.42/fs/compat_ioctl.c
37769 --- linux-2.6.32.42/fs/compat_ioctl.c 2011-03-27 14:31:47.000000000 -0400
37770 +++ linux-2.6.32.42/fs/compat_ioctl.c 2011-04-23 12:56:11.000000000 -0400
37771 @@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsi
37772 up = (struct compat_video_spu_palette __user *) arg;
37773 err = get_user(palp, &up->palette);
37774 err |= get_user(length, &up->length);
37775 + if (err)
37776 + return -EFAULT;
37777
37778 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
37779 err = put_user(compat_ptr(palp), &up_native->palette);
37780 diff -urNp linux-2.6.32.42/fs/configfs/dir.c linux-2.6.32.42/fs/configfs/dir.c
37781 --- linux-2.6.32.42/fs/configfs/dir.c 2011-03-27 14:31:47.000000000 -0400
37782 +++ linux-2.6.32.42/fs/configfs/dir.c 2011-05-11 18:25:15.000000000 -0400
37783 @@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file
37784 }
37785 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
37786 struct configfs_dirent *next;
37787 - const char * name;
37788 + const unsigned char * name;
37789 + char d_name[sizeof(next->s_dentry->d_iname)];
37790 int len;
37791
37792 next = list_entry(p, struct configfs_dirent,
37793 @@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file
37794 continue;
37795
37796 name = configfs_get_name(next);
37797 - len = strlen(name);
37798 + if (next->s_dentry && name == next->s_dentry->d_iname) {
37799 + len = next->s_dentry->d_name.len;
37800 + memcpy(d_name, name, len);
37801 + name = d_name;
37802 + } else
37803 + len = strlen(name);
37804 if (next->s_dentry)
37805 ino = next->s_dentry->d_inode->i_ino;
37806 else
37807 diff -urNp linux-2.6.32.42/fs/dcache.c linux-2.6.32.42/fs/dcache.c
37808 --- linux-2.6.32.42/fs/dcache.c 2011-03-27 14:31:47.000000000 -0400
37809 +++ linux-2.6.32.42/fs/dcache.c 2011-04-23 13:32:21.000000000 -0400
37810 @@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
37811
37812 static struct kmem_cache *dentry_cache __read_mostly;
37813
37814 -#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
37815 -
37816 /*
37817 * This is the single most critical data structure when it comes
37818 * to the dcache: the hashtable for lookups. Somebody should try
37819 @@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned lon
37820 mempages -= reserve;
37821
37822 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
37823 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
37824 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
37825
37826 dcache_init();
37827 inode_init();
37828 diff -urNp linux-2.6.32.42/fs/dlm/lockspace.c linux-2.6.32.42/fs/dlm/lockspace.c
37829 --- linux-2.6.32.42/fs/dlm/lockspace.c 2011-03-27 14:31:47.000000000 -0400
37830 +++ linux-2.6.32.42/fs/dlm/lockspace.c 2011-04-17 15:56:46.000000000 -0400
37831 @@ -148,7 +148,7 @@ static void lockspace_kobj_release(struc
37832 kfree(ls);
37833 }
37834
37835 -static struct sysfs_ops dlm_attr_ops = {
37836 +static const struct sysfs_ops dlm_attr_ops = {
37837 .show = dlm_attr_show,
37838 .store = dlm_attr_store,
37839 };
37840 diff -urNp linux-2.6.32.42/fs/ecryptfs/inode.c linux-2.6.32.42/fs/ecryptfs/inode.c
37841 --- linux-2.6.32.42/fs/ecryptfs/inode.c 2011-03-27 14:31:47.000000000 -0400
37842 +++ linux-2.6.32.42/fs/ecryptfs/inode.c 2011-04-17 15:56:46.000000000 -0400
37843 @@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struc
37844 old_fs = get_fs();
37845 set_fs(get_ds());
37846 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
37847 - (char __user *)lower_buf,
37848 + (__force char __user *)lower_buf,
37849 lower_bufsiz);
37850 set_fs(old_fs);
37851 if (rc < 0)
37852 @@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct
37853 }
37854 old_fs = get_fs();
37855 set_fs(get_ds());
37856 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
37857 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
37858 set_fs(old_fs);
37859 if (rc < 0)
37860 goto out_free;
37861 diff -urNp linux-2.6.32.42/fs/exec.c linux-2.6.32.42/fs/exec.c
37862 --- linux-2.6.32.42/fs/exec.c 2011-06-25 12:55:34.000000000 -0400
37863 +++ linux-2.6.32.42/fs/exec.c 2011-06-25 12:56:37.000000000 -0400
37864 @@ -56,12 +56,24 @@
37865 #include <linux/fsnotify.h>
37866 #include <linux/fs_struct.h>
37867 #include <linux/pipe_fs_i.h>
37868 +#include <linux/random.h>
37869 +#include <linux/seq_file.h>
37870 +
37871 +#ifdef CONFIG_PAX_REFCOUNT
37872 +#include <linux/kallsyms.h>
37873 +#include <linux/kdebug.h>
37874 +#endif
37875
37876 #include <asm/uaccess.h>
37877 #include <asm/mmu_context.h>
37878 #include <asm/tlb.h>
37879 #include "internal.h"
37880
37881 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
37882 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
37883 +EXPORT_SYMBOL(pax_set_initial_flags_func);
37884 +#endif
37885 +
37886 int core_uses_pid;
37887 char core_pattern[CORENAME_MAX_SIZE] = "core";
37888 unsigned int core_pipe_limit;
37889 @@ -115,7 +127,7 @@ SYSCALL_DEFINE1(uselib, const char __use
37890 goto out;
37891
37892 file = do_filp_open(AT_FDCWD, tmp,
37893 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
37894 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
37895 MAY_READ | MAY_EXEC | MAY_OPEN);
37896 putname(tmp);
37897 error = PTR_ERR(file);
37898 @@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_b
37899 int write)
37900 {
37901 struct page *page;
37902 - int ret;
37903
37904 -#ifdef CONFIG_STACK_GROWSUP
37905 - if (write) {
37906 - ret = expand_stack_downwards(bprm->vma, pos);
37907 - if (ret < 0)
37908 - return NULL;
37909 - }
37910 -#endif
37911 - ret = get_user_pages(current, bprm->mm, pos,
37912 - 1, write, 1, &page, NULL);
37913 - if (ret <= 0)
37914 + if (0 > expand_stack_downwards(bprm->vma, pos))
37915 + return NULL;
37916 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
37917 return NULL;
37918
37919 if (write) {
37920 @@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_b
37921 vma->vm_end = STACK_TOP_MAX;
37922 vma->vm_start = vma->vm_end - PAGE_SIZE;
37923 vma->vm_flags = VM_STACK_FLAGS;
37924 +
37925 +#ifdef CONFIG_PAX_SEGMEXEC
37926 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
37927 +#endif
37928 +
37929 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
37930
37931 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
37932 @@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_b
37933 mm->stack_vm = mm->total_vm = 1;
37934 up_write(&mm->mmap_sem);
37935 bprm->p = vma->vm_end - sizeof(void *);
37936 +
37937 +#ifdef CONFIG_PAX_RANDUSTACK
37938 + if (randomize_va_space)
37939 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
37940 +#endif
37941 +
37942 return 0;
37943 err:
37944 up_write(&mm->mmap_sem);
37945 @@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char **
37946 int r;
37947 mm_segment_t oldfs = get_fs();
37948 set_fs(KERNEL_DS);
37949 - r = copy_strings(argc, (char __user * __user *)argv, bprm);
37950 + r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
37951 set_fs(oldfs);
37952 return r;
37953 }
37954 @@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_are
37955 unsigned long new_end = old_end - shift;
37956 struct mmu_gather *tlb;
37957
37958 - BUG_ON(new_start > new_end);
37959 + if (new_start >= new_end || new_start < mmap_min_addr)
37960 + return -ENOMEM;
37961
37962 /*
37963 * ensure there are no vmas between where we want to go
37964 @@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_are
37965 if (vma != find_vma(mm, new_start))
37966 return -EFAULT;
37967
37968 +#ifdef CONFIG_PAX_SEGMEXEC
37969 + BUG_ON(pax_find_mirror_vma(vma));
37970 +#endif
37971 +
37972 /*
37973 * cover the whole range: [new_start, old_end)
37974 */
37975 @@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm
37976 stack_top = arch_align_stack(stack_top);
37977 stack_top = PAGE_ALIGN(stack_top);
37978
37979 - if (unlikely(stack_top < mmap_min_addr) ||
37980 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
37981 - return -ENOMEM;
37982 -
37983 stack_shift = vma->vm_end - stack_top;
37984
37985 bprm->p -= stack_shift;
37986 @@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm
37987 bprm->exec -= stack_shift;
37988
37989 down_write(&mm->mmap_sem);
37990 +
37991 + /* Move stack pages down in memory. */
37992 + if (stack_shift) {
37993 + ret = shift_arg_pages(vma, stack_shift);
37994 + if (ret)
37995 + goto out_unlock;
37996 + }
37997 +
37998 vm_flags = VM_STACK_FLAGS;
37999
38000 /*
38001 @@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm
38002 vm_flags &= ~VM_EXEC;
38003 vm_flags |= mm->def_flags;
38004
38005 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
38006 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38007 + vm_flags &= ~VM_EXEC;
38008 +
38009 +#ifdef CONFIG_PAX_MPROTECT
38010 + if (mm->pax_flags & MF_PAX_MPROTECT)
38011 + vm_flags &= ~VM_MAYEXEC;
38012 +#endif
38013 +
38014 + }
38015 +#endif
38016 +
38017 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
38018 vm_flags);
38019 if (ret)
38020 goto out_unlock;
38021 BUG_ON(prev != vma);
38022
38023 - /* Move stack pages down in memory. */
38024 - if (stack_shift) {
38025 - ret = shift_arg_pages(vma, stack_shift);
38026 - if (ret)
38027 - goto out_unlock;
38028 - }
38029 -
38030 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
38031 stack_size = vma->vm_end - vma->vm_start;
38032 /*
38033 @@ -707,7 +736,7 @@ struct file *open_exec(const char *name)
38034 int err;
38035
38036 file = do_filp_open(AT_FDCWD, name,
38037 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
38038 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
38039 MAY_EXEC | MAY_OPEN);
38040 if (IS_ERR(file))
38041 goto out;
38042 @@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_
38043 old_fs = get_fs();
38044 set_fs(get_ds());
38045 /* The cast to a user pointer is valid due to the set_fs() */
38046 - result = vfs_read(file, (void __user *)addr, count, &pos);
38047 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
38048 set_fs(old_fs);
38049 return result;
38050 }
38051 @@ -1152,7 +1181,7 @@ int check_unsafe_exec(struct linux_binpr
38052 }
38053 rcu_read_unlock();
38054
38055 - if (p->fs->users > n_fs) {
38056 + if (atomic_read(&p->fs->users) > n_fs) {
38057 bprm->unsafe |= LSM_UNSAFE_SHARE;
38058 } else {
38059 res = -EAGAIN;
38060 @@ -1347,6 +1376,11 @@ int do_execve(char * filename,
38061 char __user *__user *envp,
38062 struct pt_regs * regs)
38063 {
38064 +#ifdef CONFIG_GRKERNSEC
38065 + struct file *old_exec_file;
38066 + struct acl_subject_label *old_acl;
38067 + struct rlimit old_rlim[RLIM_NLIMITS];
38068 +#endif
38069 struct linux_binprm *bprm;
38070 struct file *file;
38071 struct files_struct *displaced;
38072 @@ -1383,6 +1417,23 @@ int do_execve(char * filename,
38073 bprm->filename = filename;
38074 bprm->interp = filename;
38075
38076 + if (gr_process_user_ban()) {
38077 + retval = -EPERM;
38078 + goto out_file;
38079 + }
38080 +
38081 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
38082 +
38083 + if (gr_handle_nproc()) {
38084 + retval = -EAGAIN;
38085 + goto out_file;
38086 + }
38087 +
38088 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
38089 + retval = -EACCES;
38090 + goto out_file;
38091 + }
38092 +
38093 retval = bprm_mm_init(bprm);
38094 if (retval)
38095 goto out_file;
38096 @@ -1412,10 +1463,41 @@ int do_execve(char * filename,
38097 if (retval < 0)
38098 goto out;
38099
38100 + if (!gr_tpe_allow(file)) {
38101 + retval = -EACCES;
38102 + goto out;
38103 + }
38104 +
38105 + if (gr_check_crash_exec(file)) {
38106 + retval = -EACCES;
38107 + goto out;
38108 + }
38109 +
38110 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
38111 +
38112 + gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
38113 +
38114 +#ifdef CONFIG_GRKERNSEC
38115 + old_acl = current->acl;
38116 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
38117 + old_exec_file = current->exec_file;
38118 + get_file(file);
38119 + current->exec_file = file;
38120 +#endif
38121 +
38122 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
38123 + bprm->unsafe & LSM_UNSAFE_SHARE);
38124 + if (retval < 0)
38125 + goto out_fail;
38126 +
38127 current->flags &= ~PF_KTHREAD;
38128 retval = search_binary_handler(bprm,regs);
38129 if (retval < 0)
38130 - goto out;
38131 + goto out_fail;
38132 +#ifdef CONFIG_GRKERNSEC
38133 + if (old_exec_file)
38134 + fput(old_exec_file);
38135 +#endif
38136
38137 /* execve succeeded */
38138 current->fs->in_exec = 0;
38139 @@ -1426,6 +1508,14 @@ int do_execve(char * filename,
38140 put_files_struct(displaced);
38141 return retval;
38142
38143 +out_fail:
38144 +#ifdef CONFIG_GRKERNSEC
38145 + current->acl = old_acl;
38146 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
38147 + fput(current->exec_file);
38148 + current->exec_file = old_exec_file;
38149 +#endif
38150 +
38151 out:
38152 if (bprm->mm) {
38153 acct_arg_size(bprm, 0);
38154 @@ -1591,6 +1681,220 @@ out:
38155 return ispipe;
38156 }
38157
38158 +int pax_check_flags(unsigned long *flags)
38159 +{
38160 + int retval = 0;
38161 +
38162 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
38163 + if (*flags & MF_PAX_SEGMEXEC)
38164 + {
38165 + *flags &= ~MF_PAX_SEGMEXEC;
38166 + retval = -EINVAL;
38167 + }
38168 +#endif
38169 +
38170 + if ((*flags & MF_PAX_PAGEEXEC)
38171 +
38172 +#ifdef CONFIG_PAX_PAGEEXEC
38173 + && (*flags & MF_PAX_SEGMEXEC)
38174 +#endif
38175 +
38176 + )
38177 + {
38178 + *flags &= ~MF_PAX_PAGEEXEC;
38179 + retval = -EINVAL;
38180 + }
38181 +
38182 + if ((*flags & MF_PAX_MPROTECT)
38183 +
38184 +#ifdef CONFIG_PAX_MPROTECT
38185 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
38186 +#endif
38187 +
38188 + )
38189 + {
38190 + *flags &= ~MF_PAX_MPROTECT;
38191 + retval = -EINVAL;
38192 + }
38193 +
38194 + if ((*flags & MF_PAX_EMUTRAMP)
38195 +
38196 +#ifdef CONFIG_PAX_EMUTRAMP
38197 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
38198 +#endif
38199 +
38200 + )
38201 + {
38202 + *flags &= ~MF_PAX_EMUTRAMP;
38203 + retval = -EINVAL;
38204 + }
38205 +
38206 + return retval;
38207 +}
38208 +
38209 +EXPORT_SYMBOL(pax_check_flags);
38210 +
38211 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
38212 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
38213 +{
38214 + struct task_struct *tsk = current;
38215 + struct mm_struct *mm = current->mm;
38216 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
38217 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
38218 + char *path_exec = NULL;
38219 + char *path_fault = NULL;
38220 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
38221 +
38222 + if (buffer_exec && buffer_fault) {
38223 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
38224 +
38225 + down_read(&mm->mmap_sem);
38226 + vma = mm->mmap;
38227 + while (vma && (!vma_exec || !vma_fault)) {
38228 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
38229 + vma_exec = vma;
38230 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
38231 + vma_fault = vma;
38232 + vma = vma->vm_next;
38233 + }
38234 + if (vma_exec) {
38235 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
38236 + if (IS_ERR(path_exec))
38237 + path_exec = "<path too long>";
38238 + else {
38239 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
38240 + if (path_exec) {
38241 + *path_exec = 0;
38242 + path_exec = buffer_exec;
38243 + } else
38244 + path_exec = "<path too long>";
38245 + }
38246 + }
38247 + if (vma_fault) {
38248 + start = vma_fault->vm_start;
38249 + end = vma_fault->vm_end;
38250 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
38251 + if (vma_fault->vm_file) {
38252 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
38253 + if (IS_ERR(path_fault))
38254 + path_fault = "<path too long>";
38255 + else {
38256 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
38257 + if (path_fault) {
38258 + *path_fault = 0;
38259 + path_fault = buffer_fault;
38260 + } else
38261 + path_fault = "<path too long>";
38262 + }
38263 + } else
38264 + path_fault = "<anonymous mapping>";
38265 + }
38266 + up_read(&mm->mmap_sem);
38267 + }
38268 + if (tsk->signal->curr_ip)
38269 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
38270 + else
38271 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
38272 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
38273 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
38274 + task_uid(tsk), task_euid(tsk), pc, sp);
38275 + free_page((unsigned long)buffer_exec);
38276 + free_page((unsigned long)buffer_fault);
38277 + pax_report_insns(pc, sp);
38278 + do_coredump(SIGKILL, SIGKILL, regs);
38279 +}
38280 +#endif
38281 +
38282 +#ifdef CONFIG_PAX_REFCOUNT
38283 +void pax_report_refcount_overflow(struct pt_regs *regs)
38284 +{
38285 + if (current->signal->curr_ip)
38286 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
38287 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
38288 + else
38289 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
38290 + current->comm, task_pid_nr(current), current_uid(), current_euid());
38291 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
38292 + show_regs(regs);
38293 + force_sig_specific(SIGKILL, current);
38294 +}
38295 +#endif
38296 +
38297 +#ifdef CONFIG_PAX_USERCOPY
38298 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
38299 +int object_is_on_stack(const void *obj, unsigned long len)
38300 +{
38301 + const void * const stack = task_stack_page(current);
38302 + const void * const stackend = stack + THREAD_SIZE;
38303 +
38304 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38305 + const void *frame = NULL;
38306 + const void *oldframe;
38307 +#endif
38308 +
38309 + if (obj + len < obj)
38310 + return -1;
38311 +
38312 + if (obj + len <= stack || stackend <= obj)
38313 + return 0;
38314 +
38315 + if (obj < stack || stackend < obj + len)
38316 + return -1;
38317 +
38318 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38319 + oldframe = __builtin_frame_address(1);
38320 + if (oldframe)
38321 + frame = __builtin_frame_address(2);
38322 + /*
38323 + low ----------------------------------------------> high
38324 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
38325 + ^----------------^
38326 + allow copies only within here
38327 + */
38328 + while (stack <= frame && frame < stackend) {
38329 + /* if obj + len extends past the last frame, this
38330 + check won't pass and the next frame will be 0,
38331 + causing us to bail out and correctly report
38332 + the copy as invalid
38333 + */
38334 + if (obj + len <= frame)
38335 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
38336 + oldframe = frame;
38337 + frame = *(const void * const *)frame;
38338 + }
38339 + return -1;
38340 +#else
38341 + return 1;
38342 +#endif
38343 +}
38344 +
38345 +
38346 +void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
38347 +{
38348 + if (current->signal->curr_ip)
38349 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38350 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38351 + else
38352 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38353 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38354 +
38355 + dump_stack();
38356 + gr_handle_kernel_exploit();
38357 + do_group_exit(SIGKILL);
38358 +}
38359 +#endif
38360 +
38361 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
38362 +void pax_track_stack(void)
38363 +{
38364 + unsigned long sp = (unsigned long)&sp;
38365 + if (sp < current_thread_info()->lowest_stack &&
38366 + sp > (unsigned long)task_stack_page(current))
38367 + current_thread_info()->lowest_stack = sp;
38368 +}
38369 +EXPORT_SYMBOL(pax_track_stack);
38370 +#endif
38371 +
38372 static int zap_process(struct task_struct *start)
38373 {
38374 struct task_struct *t;
38375 @@ -1793,17 +2097,17 @@ static void wait_for_dump_helpers(struct
38376 pipe = file->f_path.dentry->d_inode->i_pipe;
38377
38378 pipe_lock(pipe);
38379 - pipe->readers++;
38380 - pipe->writers--;
38381 + atomic_inc(&pipe->readers);
38382 + atomic_dec(&pipe->writers);
38383
38384 - while ((pipe->readers > 1) && (!signal_pending(current))) {
38385 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
38386 wake_up_interruptible_sync(&pipe->wait);
38387 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
38388 pipe_wait(pipe);
38389 }
38390
38391 - pipe->readers--;
38392 - pipe->writers++;
38393 + atomic_dec(&pipe->readers);
38394 + atomic_inc(&pipe->writers);
38395 pipe_unlock(pipe);
38396
38397 }
38398 @@ -1826,10 +2130,13 @@ void do_coredump(long signr, int exit_co
38399 char **helper_argv = NULL;
38400 int helper_argc = 0;
38401 int dump_count = 0;
38402 - static atomic_t core_dump_count = ATOMIC_INIT(0);
38403 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
38404
38405 audit_core_dumps(signr);
38406
38407 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
38408 + gr_handle_brute_attach(current, mm->flags);
38409 +
38410 binfmt = mm->binfmt;
38411 if (!binfmt || !binfmt->core_dump)
38412 goto fail;
38413 @@ -1874,6 +2181,8 @@ void do_coredump(long signr, int exit_co
38414 */
38415 clear_thread_flag(TIF_SIGPENDING);
38416
38417 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
38418 +
38419 /*
38420 * lock_kernel() because format_corename() is controlled by sysctl, which
38421 * uses lock_kernel()
38422 @@ -1908,7 +2217,7 @@ void do_coredump(long signr, int exit_co
38423 goto fail_unlock;
38424 }
38425
38426 - dump_count = atomic_inc_return(&core_dump_count);
38427 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
38428 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
38429 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
38430 task_tgid_vnr(current), current->comm);
38431 @@ -1972,7 +2281,7 @@ close_fail:
38432 filp_close(file, NULL);
38433 fail_dropcount:
38434 if (dump_count)
38435 - atomic_dec(&core_dump_count);
38436 + atomic_dec_unchecked(&core_dump_count);
38437 fail_unlock:
38438 if (helper_argv)
38439 argv_free(helper_argv);
38440 diff -urNp linux-2.6.32.42/fs/ext2/balloc.c linux-2.6.32.42/fs/ext2/balloc.c
38441 --- linux-2.6.32.42/fs/ext2/balloc.c 2011-03-27 14:31:47.000000000 -0400
38442 +++ linux-2.6.32.42/fs/ext2/balloc.c 2011-04-17 15:56:46.000000000 -0400
38443 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
38444
38445 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38446 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38447 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38448 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38449 sbi->s_resuid != current_fsuid() &&
38450 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38451 return 0;
38452 diff -urNp linux-2.6.32.42/fs/ext3/balloc.c linux-2.6.32.42/fs/ext3/balloc.c
38453 --- linux-2.6.32.42/fs/ext3/balloc.c 2011-03-27 14:31:47.000000000 -0400
38454 +++ linux-2.6.32.42/fs/ext3/balloc.c 2011-04-17 15:56:46.000000000 -0400
38455 @@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct e
38456
38457 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38458 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38459 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38460 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38461 sbi->s_resuid != current_fsuid() &&
38462 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38463 return 0;
38464 diff -urNp linux-2.6.32.42/fs/ext4/balloc.c linux-2.6.32.42/fs/ext4/balloc.c
38465 --- linux-2.6.32.42/fs/ext4/balloc.c 2011-03-27 14:31:47.000000000 -0400
38466 +++ linux-2.6.32.42/fs/ext4/balloc.c 2011-04-17 15:56:46.000000000 -0400
38467 @@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_
38468 /* Hm, nope. Are (enough) root reserved blocks available? */
38469 if (sbi->s_resuid == current_fsuid() ||
38470 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
38471 - capable(CAP_SYS_RESOURCE)) {
38472 + capable_nolog(CAP_SYS_RESOURCE)) {
38473 if (free_blocks >= (nblocks + dirty_blocks))
38474 return 1;
38475 }
38476 diff -urNp linux-2.6.32.42/fs/ext4/ext4.h linux-2.6.32.42/fs/ext4/ext4.h
38477 --- linux-2.6.32.42/fs/ext4/ext4.h 2011-03-27 14:31:47.000000000 -0400
38478 +++ linux-2.6.32.42/fs/ext4/ext4.h 2011-04-17 15:56:46.000000000 -0400
38479 @@ -1078,19 +1078,19 @@ struct ext4_sb_info {
38480
38481 /* stats for buddy allocator */
38482 spinlock_t s_mb_pa_lock;
38483 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
38484 - atomic_t s_bal_success; /* we found long enough chunks */
38485 - atomic_t s_bal_allocated; /* in blocks */
38486 - atomic_t s_bal_ex_scanned; /* total extents scanned */
38487 - atomic_t s_bal_goals; /* goal hits */
38488 - atomic_t s_bal_breaks; /* too long searches */
38489 - atomic_t s_bal_2orders; /* 2^order hits */
38490 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
38491 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
38492 + atomic_unchecked_t s_bal_allocated; /* in blocks */
38493 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
38494 + atomic_unchecked_t s_bal_goals; /* goal hits */
38495 + atomic_unchecked_t s_bal_breaks; /* too long searches */
38496 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
38497 spinlock_t s_bal_lock;
38498 unsigned long s_mb_buddies_generated;
38499 unsigned long long s_mb_generation_time;
38500 - atomic_t s_mb_lost_chunks;
38501 - atomic_t s_mb_preallocated;
38502 - atomic_t s_mb_discarded;
38503 + atomic_unchecked_t s_mb_lost_chunks;
38504 + atomic_unchecked_t s_mb_preallocated;
38505 + atomic_unchecked_t s_mb_discarded;
38506 atomic_t s_lock_busy;
38507
38508 /* locality groups */
38509 diff -urNp linux-2.6.32.42/fs/ext4/mballoc.c linux-2.6.32.42/fs/ext4/mballoc.c
38510 --- linux-2.6.32.42/fs/ext4/mballoc.c 2011-06-25 12:55:34.000000000 -0400
38511 +++ linux-2.6.32.42/fs/ext4/mballoc.c 2011-06-25 12:56:37.000000000 -0400
38512 @@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ex
38513 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
38514
38515 if (EXT4_SB(sb)->s_mb_stats)
38516 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
38517 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
38518
38519 break;
38520 }
38521 @@ -2131,7 +2131,7 @@ repeat:
38522 ac->ac_status = AC_STATUS_CONTINUE;
38523 ac->ac_flags |= EXT4_MB_HINT_FIRST;
38524 cr = 3;
38525 - atomic_inc(&sbi->s_mb_lost_chunks);
38526 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
38527 goto repeat;
38528 }
38529 }
38530 @@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struc
38531 ext4_grpblk_t counters[16];
38532 } sg;
38533
38534 + pax_track_stack();
38535 +
38536 group--;
38537 if (group == 0)
38538 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
38539 @@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *
38540 if (sbi->s_mb_stats) {
38541 printk(KERN_INFO
38542 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
38543 - atomic_read(&sbi->s_bal_allocated),
38544 - atomic_read(&sbi->s_bal_reqs),
38545 - atomic_read(&sbi->s_bal_success));
38546 + atomic_read_unchecked(&sbi->s_bal_allocated),
38547 + atomic_read_unchecked(&sbi->s_bal_reqs),
38548 + atomic_read_unchecked(&sbi->s_bal_success));
38549 printk(KERN_INFO
38550 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
38551 "%u 2^N hits, %u breaks, %u lost\n",
38552 - atomic_read(&sbi->s_bal_ex_scanned),
38553 - atomic_read(&sbi->s_bal_goals),
38554 - atomic_read(&sbi->s_bal_2orders),
38555 - atomic_read(&sbi->s_bal_breaks),
38556 - atomic_read(&sbi->s_mb_lost_chunks));
38557 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
38558 + atomic_read_unchecked(&sbi->s_bal_goals),
38559 + atomic_read_unchecked(&sbi->s_bal_2orders),
38560 + atomic_read_unchecked(&sbi->s_bal_breaks),
38561 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
38562 printk(KERN_INFO
38563 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
38564 sbi->s_mb_buddies_generated++,
38565 sbi->s_mb_generation_time);
38566 printk(KERN_INFO
38567 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
38568 - atomic_read(&sbi->s_mb_preallocated),
38569 - atomic_read(&sbi->s_mb_discarded));
38570 + atomic_read_unchecked(&sbi->s_mb_preallocated),
38571 + atomic_read_unchecked(&sbi->s_mb_discarded));
38572 }
38573
38574 free_percpu(sbi->s_locality_groups);
38575 @@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct
38576 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
38577
38578 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
38579 - atomic_inc(&sbi->s_bal_reqs);
38580 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
38581 + atomic_inc_unchecked(&sbi->s_bal_reqs);
38582 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
38583 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
38584 - atomic_inc(&sbi->s_bal_success);
38585 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
38586 + atomic_inc_unchecked(&sbi->s_bal_success);
38587 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
38588 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
38589 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
38590 - atomic_inc(&sbi->s_bal_goals);
38591 + atomic_inc_unchecked(&sbi->s_bal_goals);
38592 if (ac->ac_found > sbi->s_mb_max_to_scan)
38593 - atomic_inc(&sbi->s_bal_breaks);
38594 + atomic_inc_unchecked(&sbi->s_bal_breaks);
38595 }
38596
38597 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
38598 @@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
38599 trace_ext4_mb_new_inode_pa(ac, pa);
38600
38601 ext4_mb_use_inode_pa(ac, pa);
38602 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38603 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38604
38605 ei = EXT4_I(ac->ac_inode);
38606 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
38607 @@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
38608 trace_ext4_mb_new_group_pa(ac, pa);
38609
38610 ext4_mb_use_group_pa(ac, pa);
38611 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38612 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38613
38614 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
38615 lg = ac->ac_lg;
38616 @@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
38617 * from the bitmap and continue.
38618 */
38619 }
38620 - atomic_add(free, &sbi->s_mb_discarded);
38621 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
38622
38623 return err;
38624 }
38625 @@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_bud
38626 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
38627 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
38628 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
38629 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
38630 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
38631
38632 if (ac) {
38633 ac->ac_sb = sb;
38634 diff -urNp linux-2.6.32.42/fs/ext4/super.c linux-2.6.32.42/fs/ext4/super.c
38635 --- linux-2.6.32.42/fs/ext4/super.c 2011-03-27 14:31:47.000000000 -0400
38636 +++ linux-2.6.32.42/fs/ext4/super.c 2011-04-17 15:56:46.000000000 -0400
38637 @@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobje
38638 }
38639
38640
38641 -static struct sysfs_ops ext4_attr_ops = {
38642 +static const struct sysfs_ops ext4_attr_ops = {
38643 .show = ext4_attr_show,
38644 .store = ext4_attr_store,
38645 };
38646 diff -urNp linux-2.6.32.42/fs/fcntl.c linux-2.6.32.42/fs/fcntl.c
38647 --- linux-2.6.32.42/fs/fcntl.c 2011-03-27 14:31:47.000000000 -0400
38648 +++ linux-2.6.32.42/fs/fcntl.c 2011-04-17 15:56:46.000000000 -0400
38649 @@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct
38650 if (err)
38651 return err;
38652
38653 + if (gr_handle_chroot_fowner(pid, type))
38654 + return -ENOENT;
38655 + if (gr_check_protected_task_fowner(pid, type))
38656 + return -EACCES;
38657 +
38658 f_modown(filp, pid, type, force);
38659 return 0;
38660 }
38661 @@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned in
38662 switch (cmd) {
38663 case F_DUPFD:
38664 case F_DUPFD_CLOEXEC:
38665 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
38666 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
38667 break;
38668 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
38669 diff -urNp linux-2.6.32.42/fs/fifo.c linux-2.6.32.42/fs/fifo.c
38670 --- linux-2.6.32.42/fs/fifo.c 2011-03-27 14:31:47.000000000 -0400
38671 +++ linux-2.6.32.42/fs/fifo.c 2011-04-17 15:56:46.000000000 -0400
38672 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode
38673 */
38674 filp->f_op = &read_pipefifo_fops;
38675 pipe->r_counter++;
38676 - if (pipe->readers++ == 0)
38677 + if (atomic_inc_return(&pipe->readers) == 1)
38678 wake_up_partner(inode);
38679
38680 - if (!pipe->writers) {
38681 + if (!atomic_read(&pipe->writers)) {
38682 if ((filp->f_flags & O_NONBLOCK)) {
38683 /* suppress POLLHUP until we have
38684 * seen a writer */
38685 @@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode
38686 * errno=ENXIO when there is no process reading the FIFO.
38687 */
38688 ret = -ENXIO;
38689 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
38690 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
38691 goto err;
38692
38693 filp->f_op = &write_pipefifo_fops;
38694 pipe->w_counter++;
38695 - if (!pipe->writers++)
38696 + if (atomic_inc_return(&pipe->writers) == 1)
38697 wake_up_partner(inode);
38698
38699 - if (!pipe->readers) {
38700 + if (!atomic_read(&pipe->readers)) {
38701 wait_for_partner(inode, &pipe->r_counter);
38702 if (signal_pending(current))
38703 goto err_wr;
38704 @@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode
38705 */
38706 filp->f_op = &rdwr_pipefifo_fops;
38707
38708 - pipe->readers++;
38709 - pipe->writers++;
38710 + atomic_inc(&pipe->readers);
38711 + atomic_inc(&pipe->writers);
38712 pipe->r_counter++;
38713 pipe->w_counter++;
38714 - if (pipe->readers == 1 || pipe->writers == 1)
38715 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
38716 wake_up_partner(inode);
38717 break;
38718
38719 @@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode
38720 return 0;
38721
38722 err_rd:
38723 - if (!--pipe->readers)
38724 + if (atomic_dec_and_test(&pipe->readers))
38725 wake_up_interruptible(&pipe->wait);
38726 ret = -ERESTARTSYS;
38727 goto err;
38728
38729 err_wr:
38730 - if (!--pipe->writers)
38731 + if (atomic_dec_and_test(&pipe->writers))
38732 wake_up_interruptible(&pipe->wait);
38733 ret = -ERESTARTSYS;
38734 goto err;
38735
38736 err:
38737 - if (!pipe->readers && !pipe->writers)
38738 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
38739 free_pipe_info(inode);
38740
38741 err_nocleanup:
38742 diff -urNp linux-2.6.32.42/fs/file.c linux-2.6.32.42/fs/file.c
38743 --- linux-2.6.32.42/fs/file.c 2011-03-27 14:31:47.000000000 -0400
38744 +++ linux-2.6.32.42/fs/file.c 2011-04-17 15:56:46.000000000 -0400
38745 @@ -14,6 +14,7 @@
38746 #include <linux/slab.h>
38747 #include <linux/vmalloc.h>
38748 #include <linux/file.h>
38749 +#include <linux/security.h>
38750 #include <linux/fdtable.h>
38751 #include <linux/bitops.h>
38752 #include <linux/interrupt.h>
38753 @@ -257,6 +258,8 @@ int expand_files(struct files_struct *fi
38754 * N.B. For clone tasks sharing a files structure, this test
38755 * will limit the total number of files that can be opened.
38756 */
38757 +
38758 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
38759 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
38760 return -EMFILE;
38761
38762 diff -urNp linux-2.6.32.42/fs/filesystems.c linux-2.6.32.42/fs/filesystems.c
38763 --- linux-2.6.32.42/fs/filesystems.c 2011-03-27 14:31:47.000000000 -0400
38764 +++ linux-2.6.32.42/fs/filesystems.c 2011-04-17 15:56:46.000000000 -0400
38765 @@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(con
38766 int len = dot ? dot - name : strlen(name);
38767
38768 fs = __get_fs_type(name, len);
38769 +
38770 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
38771 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
38772 +#else
38773 if (!fs && (request_module("%.*s", len, name) == 0))
38774 +#endif
38775 fs = __get_fs_type(name, len);
38776
38777 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
38778 diff -urNp linux-2.6.32.42/fs/fscache/cookie.c linux-2.6.32.42/fs/fscache/cookie.c
38779 --- linux-2.6.32.42/fs/fscache/cookie.c 2011-03-27 14:31:47.000000000 -0400
38780 +++ linux-2.6.32.42/fs/fscache/cookie.c 2011-05-04 17:56:28.000000000 -0400
38781 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
38782 parent ? (char *) parent->def->name : "<no-parent>",
38783 def->name, netfs_data);
38784
38785 - fscache_stat(&fscache_n_acquires);
38786 + fscache_stat_unchecked(&fscache_n_acquires);
38787
38788 /* if there's no parent cookie, then we don't create one here either */
38789 if (!parent) {
38790 - fscache_stat(&fscache_n_acquires_null);
38791 + fscache_stat_unchecked(&fscache_n_acquires_null);
38792 _leave(" [no parent]");
38793 return NULL;
38794 }
38795 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
38796 /* allocate and initialise a cookie */
38797 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
38798 if (!cookie) {
38799 - fscache_stat(&fscache_n_acquires_oom);
38800 + fscache_stat_unchecked(&fscache_n_acquires_oom);
38801 _leave(" [ENOMEM]");
38802 return NULL;
38803 }
38804 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
38805
38806 switch (cookie->def->type) {
38807 case FSCACHE_COOKIE_TYPE_INDEX:
38808 - fscache_stat(&fscache_n_cookie_index);
38809 + fscache_stat_unchecked(&fscache_n_cookie_index);
38810 break;
38811 case FSCACHE_COOKIE_TYPE_DATAFILE:
38812 - fscache_stat(&fscache_n_cookie_data);
38813 + fscache_stat_unchecked(&fscache_n_cookie_data);
38814 break;
38815 default:
38816 - fscache_stat(&fscache_n_cookie_special);
38817 + fscache_stat_unchecked(&fscache_n_cookie_special);
38818 break;
38819 }
38820
38821 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
38822 if (fscache_acquire_non_index_cookie(cookie) < 0) {
38823 atomic_dec(&parent->n_children);
38824 __fscache_cookie_put(cookie);
38825 - fscache_stat(&fscache_n_acquires_nobufs);
38826 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
38827 _leave(" = NULL");
38828 return NULL;
38829 }
38830 }
38831
38832 - fscache_stat(&fscache_n_acquires_ok);
38833 + fscache_stat_unchecked(&fscache_n_acquires_ok);
38834 _leave(" = %p", cookie);
38835 return cookie;
38836 }
38837 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
38838 cache = fscache_select_cache_for_object(cookie->parent);
38839 if (!cache) {
38840 up_read(&fscache_addremove_sem);
38841 - fscache_stat(&fscache_n_acquires_no_cache);
38842 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
38843 _leave(" = -ENOMEDIUM [no cache]");
38844 return -ENOMEDIUM;
38845 }
38846 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
38847 object = cache->ops->alloc_object(cache, cookie);
38848 fscache_stat_d(&fscache_n_cop_alloc_object);
38849 if (IS_ERR(object)) {
38850 - fscache_stat(&fscache_n_object_no_alloc);
38851 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
38852 ret = PTR_ERR(object);
38853 goto error;
38854 }
38855
38856 - fscache_stat(&fscache_n_object_alloc);
38857 + fscache_stat_unchecked(&fscache_n_object_alloc);
38858
38859 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
38860
38861 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
38862 struct fscache_object *object;
38863 struct hlist_node *_p;
38864
38865 - fscache_stat(&fscache_n_updates);
38866 + fscache_stat_unchecked(&fscache_n_updates);
38867
38868 if (!cookie) {
38869 - fscache_stat(&fscache_n_updates_null);
38870 + fscache_stat_unchecked(&fscache_n_updates_null);
38871 _leave(" [no cookie]");
38872 return;
38873 }
38874 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
38875 struct fscache_object *object;
38876 unsigned long event;
38877
38878 - fscache_stat(&fscache_n_relinquishes);
38879 + fscache_stat_unchecked(&fscache_n_relinquishes);
38880 if (retire)
38881 - fscache_stat(&fscache_n_relinquishes_retire);
38882 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
38883
38884 if (!cookie) {
38885 - fscache_stat(&fscache_n_relinquishes_null);
38886 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
38887 _leave(" [no cookie]");
38888 return;
38889 }
38890 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
38891
38892 /* wait for the cookie to finish being instantiated (or to fail) */
38893 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
38894 - fscache_stat(&fscache_n_relinquishes_waitcrt);
38895 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
38896 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
38897 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
38898 }
38899 diff -urNp linux-2.6.32.42/fs/fscache/internal.h linux-2.6.32.42/fs/fscache/internal.h
38900 --- linux-2.6.32.42/fs/fscache/internal.h 2011-03-27 14:31:47.000000000 -0400
38901 +++ linux-2.6.32.42/fs/fscache/internal.h 2011-05-04 17:56:28.000000000 -0400
38902 @@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
38903 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
38904 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
38905
38906 -extern atomic_t fscache_n_op_pend;
38907 -extern atomic_t fscache_n_op_run;
38908 -extern atomic_t fscache_n_op_enqueue;
38909 -extern atomic_t fscache_n_op_deferred_release;
38910 -extern atomic_t fscache_n_op_release;
38911 -extern atomic_t fscache_n_op_gc;
38912 -extern atomic_t fscache_n_op_cancelled;
38913 -extern atomic_t fscache_n_op_rejected;
38914 -
38915 -extern atomic_t fscache_n_attr_changed;
38916 -extern atomic_t fscache_n_attr_changed_ok;
38917 -extern atomic_t fscache_n_attr_changed_nobufs;
38918 -extern atomic_t fscache_n_attr_changed_nomem;
38919 -extern atomic_t fscache_n_attr_changed_calls;
38920 -
38921 -extern atomic_t fscache_n_allocs;
38922 -extern atomic_t fscache_n_allocs_ok;
38923 -extern atomic_t fscache_n_allocs_wait;
38924 -extern atomic_t fscache_n_allocs_nobufs;
38925 -extern atomic_t fscache_n_allocs_intr;
38926 -extern atomic_t fscache_n_allocs_object_dead;
38927 -extern atomic_t fscache_n_alloc_ops;
38928 -extern atomic_t fscache_n_alloc_op_waits;
38929 -
38930 -extern atomic_t fscache_n_retrievals;
38931 -extern atomic_t fscache_n_retrievals_ok;
38932 -extern atomic_t fscache_n_retrievals_wait;
38933 -extern atomic_t fscache_n_retrievals_nodata;
38934 -extern atomic_t fscache_n_retrievals_nobufs;
38935 -extern atomic_t fscache_n_retrievals_intr;
38936 -extern atomic_t fscache_n_retrievals_nomem;
38937 -extern atomic_t fscache_n_retrievals_object_dead;
38938 -extern atomic_t fscache_n_retrieval_ops;
38939 -extern atomic_t fscache_n_retrieval_op_waits;
38940 -
38941 -extern atomic_t fscache_n_stores;
38942 -extern atomic_t fscache_n_stores_ok;
38943 -extern atomic_t fscache_n_stores_again;
38944 -extern atomic_t fscache_n_stores_nobufs;
38945 -extern atomic_t fscache_n_stores_oom;
38946 -extern atomic_t fscache_n_store_ops;
38947 -extern atomic_t fscache_n_store_calls;
38948 -extern atomic_t fscache_n_store_pages;
38949 -extern atomic_t fscache_n_store_radix_deletes;
38950 -extern atomic_t fscache_n_store_pages_over_limit;
38951 -
38952 -extern atomic_t fscache_n_store_vmscan_not_storing;
38953 -extern atomic_t fscache_n_store_vmscan_gone;
38954 -extern atomic_t fscache_n_store_vmscan_busy;
38955 -extern atomic_t fscache_n_store_vmscan_cancelled;
38956 -
38957 -extern atomic_t fscache_n_marks;
38958 -extern atomic_t fscache_n_uncaches;
38959 -
38960 -extern atomic_t fscache_n_acquires;
38961 -extern atomic_t fscache_n_acquires_null;
38962 -extern atomic_t fscache_n_acquires_no_cache;
38963 -extern atomic_t fscache_n_acquires_ok;
38964 -extern atomic_t fscache_n_acquires_nobufs;
38965 -extern atomic_t fscache_n_acquires_oom;
38966 -
38967 -extern atomic_t fscache_n_updates;
38968 -extern atomic_t fscache_n_updates_null;
38969 -extern atomic_t fscache_n_updates_run;
38970 -
38971 -extern atomic_t fscache_n_relinquishes;
38972 -extern atomic_t fscache_n_relinquishes_null;
38973 -extern atomic_t fscache_n_relinquishes_waitcrt;
38974 -extern atomic_t fscache_n_relinquishes_retire;
38975 -
38976 -extern atomic_t fscache_n_cookie_index;
38977 -extern atomic_t fscache_n_cookie_data;
38978 -extern atomic_t fscache_n_cookie_special;
38979 -
38980 -extern atomic_t fscache_n_object_alloc;
38981 -extern atomic_t fscache_n_object_no_alloc;
38982 -extern atomic_t fscache_n_object_lookups;
38983 -extern atomic_t fscache_n_object_lookups_negative;
38984 -extern atomic_t fscache_n_object_lookups_positive;
38985 -extern atomic_t fscache_n_object_lookups_timed_out;
38986 -extern atomic_t fscache_n_object_created;
38987 -extern atomic_t fscache_n_object_avail;
38988 -extern atomic_t fscache_n_object_dead;
38989 -
38990 -extern atomic_t fscache_n_checkaux_none;
38991 -extern atomic_t fscache_n_checkaux_okay;
38992 -extern atomic_t fscache_n_checkaux_update;
38993 -extern atomic_t fscache_n_checkaux_obsolete;
38994 +extern atomic_unchecked_t fscache_n_op_pend;
38995 +extern atomic_unchecked_t fscache_n_op_run;
38996 +extern atomic_unchecked_t fscache_n_op_enqueue;
38997 +extern atomic_unchecked_t fscache_n_op_deferred_release;
38998 +extern atomic_unchecked_t fscache_n_op_release;
38999 +extern atomic_unchecked_t fscache_n_op_gc;
39000 +extern atomic_unchecked_t fscache_n_op_cancelled;
39001 +extern atomic_unchecked_t fscache_n_op_rejected;
39002 +
39003 +extern atomic_unchecked_t fscache_n_attr_changed;
39004 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
39005 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
39006 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
39007 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
39008 +
39009 +extern atomic_unchecked_t fscache_n_allocs;
39010 +extern atomic_unchecked_t fscache_n_allocs_ok;
39011 +extern atomic_unchecked_t fscache_n_allocs_wait;
39012 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
39013 +extern atomic_unchecked_t fscache_n_allocs_intr;
39014 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
39015 +extern atomic_unchecked_t fscache_n_alloc_ops;
39016 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
39017 +
39018 +extern atomic_unchecked_t fscache_n_retrievals;
39019 +extern atomic_unchecked_t fscache_n_retrievals_ok;
39020 +extern atomic_unchecked_t fscache_n_retrievals_wait;
39021 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
39022 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
39023 +extern atomic_unchecked_t fscache_n_retrievals_intr;
39024 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
39025 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
39026 +extern atomic_unchecked_t fscache_n_retrieval_ops;
39027 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
39028 +
39029 +extern atomic_unchecked_t fscache_n_stores;
39030 +extern atomic_unchecked_t fscache_n_stores_ok;
39031 +extern atomic_unchecked_t fscache_n_stores_again;
39032 +extern atomic_unchecked_t fscache_n_stores_nobufs;
39033 +extern atomic_unchecked_t fscache_n_stores_oom;
39034 +extern atomic_unchecked_t fscache_n_store_ops;
39035 +extern atomic_unchecked_t fscache_n_store_calls;
39036 +extern atomic_unchecked_t fscache_n_store_pages;
39037 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
39038 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
39039 +
39040 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
39041 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
39042 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
39043 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
39044 +
39045 +extern atomic_unchecked_t fscache_n_marks;
39046 +extern atomic_unchecked_t fscache_n_uncaches;
39047 +
39048 +extern atomic_unchecked_t fscache_n_acquires;
39049 +extern atomic_unchecked_t fscache_n_acquires_null;
39050 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
39051 +extern atomic_unchecked_t fscache_n_acquires_ok;
39052 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
39053 +extern atomic_unchecked_t fscache_n_acquires_oom;
39054 +
39055 +extern atomic_unchecked_t fscache_n_updates;
39056 +extern atomic_unchecked_t fscache_n_updates_null;
39057 +extern atomic_unchecked_t fscache_n_updates_run;
39058 +
39059 +extern atomic_unchecked_t fscache_n_relinquishes;
39060 +extern atomic_unchecked_t fscache_n_relinquishes_null;
39061 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
39062 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
39063 +
39064 +extern atomic_unchecked_t fscache_n_cookie_index;
39065 +extern atomic_unchecked_t fscache_n_cookie_data;
39066 +extern atomic_unchecked_t fscache_n_cookie_special;
39067 +
39068 +extern atomic_unchecked_t fscache_n_object_alloc;
39069 +extern atomic_unchecked_t fscache_n_object_no_alloc;
39070 +extern atomic_unchecked_t fscache_n_object_lookups;
39071 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
39072 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
39073 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
39074 +extern atomic_unchecked_t fscache_n_object_created;
39075 +extern atomic_unchecked_t fscache_n_object_avail;
39076 +extern atomic_unchecked_t fscache_n_object_dead;
39077 +
39078 +extern atomic_unchecked_t fscache_n_checkaux_none;
39079 +extern atomic_unchecked_t fscache_n_checkaux_okay;
39080 +extern atomic_unchecked_t fscache_n_checkaux_update;
39081 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
39082
39083 extern atomic_t fscache_n_cop_alloc_object;
39084 extern atomic_t fscache_n_cop_lookup_object;
39085 @@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t
39086 atomic_inc(stat);
39087 }
39088
39089 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
39090 +{
39091 + atomic_inc_unchecked(stat);
39092 +}
39093 +
39094 static inline void fscache_stat_d(atomic_t *stat)
39095 {
39096 atomic_dec(stat);
39097 @@ -259,6 +264,7 @@ extern const struct file_operations fsca
39098
39099 #define __fscache_stat(stat) (NULL)
39100 #define fscache_stat(stat) do {} while (0)
39101 +#define fscache_stat_unchecked(stat) do {} while (0)
39102 #define fscache_stat_d(stat) do {} while (0)
39103 #endif
39104
39105 diff -urNp linux-2.6.32.42/fs/fscache/object.c linux-2.6.32.42/fs/fscache/object.c
39106 --- linux-2.6.32.42/fs/fscache/object.c 2011-03-27 14:31:47.000000000 -0400
39107 +++ linux-2.6.32.42/fs/fscache/object.c 2011-05-04 17:56:28.000000000 -0400
39108 @@ -144,7 +144,7 @@ static void fscache_object_state_machine
39109 /* update the object metadata on disk */
39110 case FSCACHE_OBJECT_UPDATING:
39111 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
39112 - fscache_stat(&fscache_n_updates_run);
39113 + fscache_stat_unchecked(&fscache_n_updates_run);
39114 fscache_stat(&fscache_n_cop_update_object);
39115 object->cache->ops->update_object(object);
39116 fscache_stat_d(&fscache_n_cop_update_object);
39117 @@ -233,7 +233,7 @@ static void fscache_object_state_machine
39118 spin_lock(&object->lock);
39119 object->state = FSCACHE_OBJECT_DEAD;
39120 spin_unlock(&object->lock);
39121 - fscache_stat(&fscache_n_object_dead);
39122 + fscache_stat_unchecked(&fscache_n_object_dead);
39123 goto terminal_transit;
39124
39125 /* handle the parent cache of this object being withdrawn from
39126 @@ -248,7 +248,7 @@ static void fscache_object_state_machine
39127 spin_lock(&object->lock);
39128 object->state = FSCACHE_OBJECT_DEAD;
39129 spin_unlock(&object->lock);
39130 - fscache_stat(&fscache_n_object_dead);
39131 + fscache_stat_unchecked(&fscache_n_object_dead);
39132 goto terminal_transit;
39133
39134 /* complain about the object being woken up once it is
39135 @@ -492,7 +492,7 @@ static void fscache_lookup_object(struct
39136 parent->cookie->def->name, cookie->def->name,
39137 object->cache->tag->name);
39138
39139 - fscache_stat(&fscache_n_object_lookups);
39140 + fscache_stat_unchecked(&fscache_n_object_lookups);
39141 fscache_stat(&fscache_n_cop_lookup_object);
39142 ret = object->cache->ops->lookup_object(object);
39143 fscache_stat_d(&fscache_n_cop_lookup_object);
39144 @@ -503,7 +503,7 @@ static void fscache_lookup_object(struct
39145 if (ret == -ETIMEDOUT) {
39146 /* probably stuck behind another object, so move this one to
39147 * the back of the queue */
39148 - fscache_stat(&fscache_n_object_lookups_timed_out);
39149 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
39150 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
39151 }
39152
39153 @@ -526,7 +526,7 @@ void fscache_object_lookup_negative(stru
39154
39155 spin_lock(&object->lock);
39156 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
39157 - fscache_stat(&fscache_n_object_lookups_negative);
39158 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
39159
39160 /* transit here to allow write requests to begin stacking up
39161 * and read requests to begin returning ENODATA */
39162 @@ -572,7 +572,7 @@ void fscache_obtained_object(struct fsca
39163 * result, in which case there may be data available */
39164 spin_lock(&object->lock);
39165 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
39166 - fscache_stat(&fscache_n_object_lookups_positive);
39167 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
39168
39169 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
39170
39171 @@ -586,7 +586,7 @@ void fscache_obtained_object(struct fsca
39172 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
39173 } else {
39174 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
39175 - fscache_stat(&fscache_n_object_created);
39176 + fscache_stat_unchecked(&fscache_n_object_created);
39177
39178 object->state = FSCACHE_OBJECT_AVAILABLE;
39179 spin_unlock(&object->lock);
39180 @@ -633,7 +633,7 @@ static void fscache_object_available(str
39181 fscache_enqueue_dependents(object);
39182
39183 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
39184 - fscache_stat(&fscache_n_object_avail);
39185 + fscache_stat_unchecked(&fscache_n_object_avail);
39186
39187 _leave("");
39188 }
39189 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
39190 enum fscache_checkaux result;
39191
39192 if (!object->cookie->def->check_aux) {
39193 - fscache_stat(&fscache_n_checkaux_none);
39194 + fscache_stat_unchecked(&fscache_n_checkaux_none);
39195 return FSCACHE_CHECKAUX_OKAY;
39196 }
39197
39198 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
39199 switch (result) {
39200 /* entry okay as is */
39201 case FSCACHE_CHECKAUX_OKAY:
39202 - fscache_stat(&fscache_n_checkaux_okay);
39203 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
39204 break;
39205
39206 /* entry requires update */
39207 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
39208 - fscache_stat(&fscache_n_checkaux_update);
39209 + fscache_stat_unchecked(&fscache_n_checkaux_update);
39210 break;
39211
39212 /* entry requires deletion */
39213 case FSCACHE_CHECKAUX_OBSOLETE:
39214 - fscache_stat(&fscache_n_checkaux_obsolete);
39215 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
39216 break;
39217
39218 default:
39219 diff -urNp linux-2.6.32.42/fs/fscache/operation.c linux-2.6.32.42/fs/fscache/operation.c
39220 --- linux-2.6.32.42/fs/fscache/operation.c 2011-03-27 14:31:47.000000000 -0400
39221 +++ linux-2.6.32.42/fs/fscache/operation.c 2011-05-04 17:56:28.000000000 -0400
39222 @@ -16,7 +16,7 @@
39223 #include <linux/seq_file.h>
39224 #include "internal.h"
39225
39226 -atomic_t fscache_op_debug_id;
39227 +atomic_unchecked_t fscache_op_debug_id;
39228 EXPORT_SYMBOL(fscache_op_debug_id);
39229
39230 /**
39231 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs
39232 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
39233 ASSERTCMP(atomic_read(&op->usage), >, 0);
39234
39235 - fscache_stat(&fscache_n_op_enqueue);
39236 + fscache_stat_unchecked(&fscache_n_op_enqueue);
39237 switch (op->flags & FSCACHE_OP_TYPE) {
39238 case FSCACHE_OP_FAST:
39239 _debug("queue fast");
39240 @@ -76,7 +76,7 @@ static void fscache_run_op(struct fscach
39241 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
39242 if (op->processor)
39243 fscache_enqueue_operation(op);
39244 - fscache_stat(&fscache_n_op_run);
39245 + fscache_stat_unchecked(&fscache_n_op_run);
39246 }
39247
39248 /*
39249 @@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct f
39250 if (object->n_ops > 0) {
39251 atomic_inc(&op->usage);
39252 list_add_tail(&op->pend_link, &object->pending_ops);
39253 - fscache_stat(&fscache_n_op_pend);
39254 + fscache_stat_unchecked(&fscache_n_op_pend);
39255 } else if (!list_empty(&object->pending_ops)) {
39256 atomic_inc(&op->usage);
39257 list_add_tail(&op->pend_link, &object->pending_ops);
39258 - fscache_stat(&fscache_n_op_pend);
39259 + fscache_stat_unchecked(&fscache_n_op_pend);
39260 fscache_start_operations(object);
39261 } else {
39262 ASSERTCMP(object->n_in_progress, ==, 0);
39263 @@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct f
39264 object->n_exclusive++; /* reads and writes must wait */
39265 atomic_inc(&op->usage);
39266 list_add_tail(&op->pend_link, &object->pending_ops);
39267 - fscache_stat(&fscache_n_op_pend);
39268 + fscache_stat_unchecked(&fscache_n_op_pend);
39269 ret = 0;
39270 } else {
39271 /* not allowed to submit ops in any other state */
39272 @@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_obj
39273 if (object->n_exclusive > 0) {
39274 atomic_inc(&op->usage);
39275 list_add_tail(&op->pend_link, &object->pending_ops);
39276 - fscache_stat(&fscache_n_op_pend);
39277 + fscache_stat_unchecked(&fscache_n_op_pend);
39278 } else if (!list_empty(&object->pending_ops)) {
39279 atomic_inc(&op->usage);
39280 list_add_tail(&op->pend_link, &object->pending_ops);
39281 - fscache_stat(&fscache_n_op_pend);
39282 + fscache_stat_unchecked(&fscache_n_op_pend);
39283 fscache_start_operations(object);
39284 } else {
39285 ASSERTCMP(object->n_exclusive, ==, 0);
39286 @@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_obj
39287 object->n_ops++;
39288 atomic_inc(&op->usage);
39289 list_add_tail(&op->pend_link, &object->pending_ops);
39290 - fscache_stat(&fscache_n_op_pend);
39291 + fscache_stat_unchecked(&fscache_n_op_pend);
39292 ret = 0;
39293 } else if (object->state == FSCACHE_OBJECT_DYING ||
39294 object->state == FSCACHE_OBJECT_LC_DYING ||
39295 object->state == FSCACHE_OBJECT_WITHDRAWING) {
39296 - fscache_stat(&fscache_n_op_rejected);
39297 + fscache_stat_unchecked(&fscache_n_op_rejected);
39298 ret = -ENOBUFS;
39299 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
39300 fscache_report_unexpected_submission(object, op, ostate);
39301 @@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_ope
39302
39303 ret = -EBUSY;
39304 if (!list_empty(&op->pend_link)) {
39305 - fscache_stat(&fscache_n_op_cancelled);
39306 + fscache_stat_unchecked(&fscache_n_op_cancelled);
39307 list_del_init(&op->pend_link);
39308 object->n_ops--;
39309 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
39310 @@ -344,7 +344,7 @@ void fscache_put_operation(struct fscach
39311 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
39312 BUG();
39313
39314 - fscache_stat(&fscache_n_op_release);
39315 + fscache_stat_unchecked(&fscache_n_op_release);
39316
39317 if (op->release) {
39318 op->release(op);
39319 @@ -361,7 +361,7 @@ void fscache_put_operation(struct fscach
39320 * lock, and defer it otherwise */
39321 if (!spin_trylock(&object->lock)) {
39322 _debug("defer put");
39323 - fscache_stat(&fscache_n_op_deferred_release);
39324 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
39325
39326 cache = object->cache;
39327 spin_lock(&cache->op_gc_list_lock);
39328 @@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_st
39329
39330 _debug("GC DEFERRED REL OBJ%x OP%x",
39331 object->debug_id, op->debug_id);
39332 - fscache_stat(&fscache_n_op_gc);
39333 + fscache_stat_unchecked(&fscache_n_op_gc);
39334
39335 ASSERTCMP(atomic_read(&op->usage), ==, 0);
39336
39337 diff -urNp linux-2.6.32.42/fs/fscache/page.c linux-2.6.32.42/fs/fscache/page.c
39338 --- linux-2.6.32.42/fs/fscache/page.c 2011-03-27 14:31:47.000000000 -0400
39339 +++ linux-2.6.32.42/fs/fscache/page.c 2011-05-04 17:56:28.000000000 -0400
39340 @@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct
39341 val = radix_tree_lookup(&cookie->stores, page->index);
39342 if (!val) {
39343 rcu_read_unlock();
39344 - fscache_stat(&fscache_n_store_vmscan_not_storing);
39345 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
39346 __fscache_uncache_page(cookie, page);
39347 return true;
39348 }
39349 @@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct
39350 spin_unlock(&cookie->stores_lock);
39351
39352 if (xpage) {
39353 - fscache_stat(&fscache_n_store_vmscan_cancelled);
39354 - fscache_stat(&fscache_n_store_radix_deletes);
39355 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
39356 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39357 ASSERTCMP(xpage, ==, page);
39358 } else {
39359 - fscache_stat(&fscache_n_store_vmscan_gone);
39360 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
39361 }
39362
39363 wake_up_bit(&cookie->flags, 0);
39364 @@ -106,7 +106,7 @@ page_busy:
39365 /* we might want to wait here, but that could deadlock the allocator as
39366 * the slow-work threads writing to the cache may all end up sleeping
39367 * on memory allocation */
39368 - fscache_stat(&fscache_n_store_vmscan_busy);
39369 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
39370 return false;
39371 }
39372 EXPORT_SYMBOL(__fscache_maybe_release_page);
39373 @@ -130,7 +130,7 @@ static void fscache_end_page_write(struc
39374 FSCACHE_COOKIE_STORING_TAG);
39375 if (!radix_tree_tag_get(&cookie->stores, page->index,
39376 FSCACHE_COOKIE_PENDING_TAG)) {
39377 - fscache_stat(&fscache_n_store_radix_deletes);
39378 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39379 xpage = radix_tree_delete(&cookie->stores, page->index);
39380 }
39381 spin_unlock(&cookie->stores_lock);
39382 @@ -151,7 +151,7 @@ static void fscache_attr_changed_op(stru
39383
39384 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
39385
39386 - fscache_stat(&fscache_n_attr_changed_calls);
39387 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
39388
39389 if (fscache_object_is_active(object)) {
39390 fscache_set_op_state(op, "CallFS");
39391 @@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscach
39392
39393 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39394
39395 - fscache_stat(&fscache_n_attr_changed);
39396 + fscache_stat_unchecked(&fscache_n_attr_changed);
39397
39398 op = kzalloc(sizeof(*op), GFP_KERNEL);
39399 if (!op) {
39400 - fscache_stat(&fscache_n_attr_changed_nomem);
39401 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
39402 _leave(" = -ENOMEM");
39403 return -ENOMEM;
39404 }
39405 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
39406 if (fscache_submit_exclusive_op(object, op) < 0)
39407 goto nobufs;
39408 spin_unlock(&cookie->lock);
39409 - fscache_stat(&fscache_n_attr_changed_ok);
39410 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
39411 fscache_put_operation(op);
39412 _leave(" = 0");
39413 return 0;
39414 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
39415 nobufs:
39416 spin_unlock(&cookie->lock);
39417 kfree(op);
39418 - fscache_stat(&fscache_n_attr_changed_nobufs);
39419 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
39420 _leave(" = %d", -ENOBUFS);
39421 return -ENOBUFS;
39422 }
39423 @@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache
39424 /* allocate a retrieval operation and attempt to submit it */
39425 op = kzalloc(sizeof(*op), GFP_NOIO);
39426 if (!op) {
39427 - fscache_stat(&fscache_n_retrievals_nomem);
39428 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39429 return NULL;
39430 }
39431
39432 @@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_loo
39433 return 0;
39434 }
39435
39436 - fscache_stat(&fscache_n_retrievals_wait);
39437 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
39438
39439 jif = jiffies;
39440 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
39441 fscache_wait_bit_interruptible,
39442 TASK_INTERRUPTIBLE) != 0) {
39443 - fscache_stat(&fscache_n_retrievals_intr);
39444 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
39445 _leave(" = -ERESTARTSYS");
39446 return -ERESTARTSYS;
39447 }
39448 @@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_loo
39449 */
39450 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
39451 struct fscache_retrieval *op,
39452 - atomic_t *stat_op_waits,
39453 - atomic_t *stat_object_dead)
39454 + atomic_unchecked_t *stat_op_waits,
39455 + atomic_unchecked_t *stat_object_dead)
39456 {
39457 int ret;
39458
39459 @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_ac
39460 goto check_if_dead;
39461
39462 _debug(">>> WT");
39463 - fscache_stat(stat_op_waits);
39464 + fscache_stat_unchecked(stat_op_waits);
39465 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
39466 fscache_wait_bit_interruptible,
39467 TASK_INTERRUPTIBLE) < 0) {
39468 @@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_ac
39469
39470 check_if_dead:
39471 if (unlikely(fscache_object_is_dead(object))) {
39472 - fscache_stat(stat_object_dead);
39473 + fscache_stat_unchecked(stat_object_dead);
39474 return -ENOBUFS;
39475 }
39476 return 0;
39477 @@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct
39478
39479 _enter("%p,%p,,,", cookie, page);
39480
39481 - fscache_stat(&fscache_n_retrievals);
39482 + fscache_stat_unchecked(&fscache_n_retrievals);
39483
39484 if (hlist_empty(&cookie->backing_objects))
39485 goto nobufs;
39486 @@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct
39487 goto nobufs_unlock;
39488 spin_unlock(&cookie->lock);
39489
39490 - fscache_stat(&fscache_n_retrieval_ops);
39491 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
39492
39493 /* pin the netfs read context in case we need to do the actual netfs
39494 * read because we've encountered a cache read failure */
39495 @@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct
39496
39497 error:
39498 if (ret == -ENOMEM)
39499 - fscache_stat(&fscache_n_retrievals_nomem);
39500 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39501 else if (ret == -ERESTARTSYS)
39502 - fscache_stat(&fscache_n_retrievals_intr);
39503 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
39504 else if (ret == -ENODATA)
39505 - fscache_stat(&fscache_n_retrievals_nodata);
39506 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
39507 else if (ret < 0)
39508 - fscache_stat(&fscache_n_retrievals_nobufs);
39509 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39510 else
39511 - fscache_stat(&fscache_n_retrievals_ok);
39512 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
39513
39514 fscache_put_retrieval(op);
39515 _leave(" = %d", ret);
39516 @@ -453,7 +453,7 @@ nobufs_unlock:
39517 spin_unlock(&cookie->lock);
39518 kfree(op);
39519 nobufs:
39520 - fscache_stat(&fscache_n_retrievals_nobufs);
39521 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39522 _leave(" = -ENOBUFS");
39523 return -ENOBUFS;
39524 }
39525 @@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct
39526
39527 _enter("%p,,%d,,,", cookie, *nr_pages);
39528
39529 - fscache_stat(&fscache_n_retrievals);
39530 + fscache_stat_unchecked(&fscache_n_retrievals);
39531
39532 if (hlist_empty(&cookie->backing_objects))
39533 goto nobufs;
39534 @@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct
39535 goto nobufs_unlock;
39536 spin_unlock(&cookie->lock);
39537
39538 - fscache_stat(&fscache_n_retrieval_ops);
39539 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
39540
39541 /* pin the netfs read context in case we need to do the actual netfs
39542 * read because we've encountered a cache read failure */
39543 @@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct
39544
39545 error:
39546 if (ret == -ENOMEM)
39547 - fscache_stat(&fscache_n_retrievals_nomem);
39548 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39549 else if (ret == -ERESTARTSYS)
39550 - fscache_stat(&fscache_n_retrievals_intr);
39551 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
39552 else if (ret == -ENODATA)
39553 - fscache_stat(&fscache_n_retrievals_nodata);
39554 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
39555 else if (ret < 0)
39556 - fscache_stat(&fscache_n_retrievals_nobufs);
39557 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39558 else
39559 - fscache_stat(&fscache_n_retrievals_ok);
39560 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
39561
39562 fscache_put_retrieval(op);
39563 _leave(" = %d", ret);
39564 @@ -570,7 +570,7 @@ nobufs_unlock:
39565 spin_unlock(&cookie->lock);
39566 kfree(op);
39567 nobufs:
39568 - fscache_stat(&fscache_n_retrievals_nobufs);
39569 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39570 _leave(" = -ENOBUFS");
39571 return -ENOBUFS;
39572 }
39573 @@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_
39574
39575 _enter("%p,%p,,,", cookie, page);
39576
39577 - fscache_stat(&fscache_n_allocs);
39578 + fscache_stat_unchecked(&fscache_n_allocs);
39579
39580 if (hlist_empty(&cookie->backing_objects))
39581 goto nobufs;
39582 @@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_
39583 goto nobufs_unlock;
39584 spin_unlock(&cookie->lock);
39585
39586 - fscache_stat(&fscache_n_alloc_ops);
39587 + fscache_stat_unchecked(&fscache_n_alloc_ops);
39588
39589 ret = fscache_wait_for_retrieval_activation(
39590 object, op,
39591 @@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_
39592
39593 error:
39594 if (ret == -ERESTARTSYS)
39595 - fscache_stat(&fscache_n_allocs_intr);
39596 + fscache_stat_unchecked(&fscache_n_allocs_intr);
39597 else if (ret < 0)
39598 - fscache_stat(&fscache_n_allocs_nobufs);
39599 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
39600 else
39601 - fscache_stat(&fscache_n_allocs_ok);
39602 + fscache_stat_unchecked(&fscache_n_allocs_ok);
39603
39604 fscache_put_retrieval(op);
39605 _leave(" = %d", ret);
39606 @@ -651,7 +651,7 @@ nobufs_unlock:
39607 spin_unlock(&cookie->lock);
39608 kfree(op);
39609 nobufs:
39610 - fscache_stat(&fscache_n_allocs_nobufs);
39611 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
39612 _leave(" = -ENOBUFS");
39613 return -ENOBUFS;
39614 }
39615 @@ -694,7 +694,7 @@ static void fscache_write_op(struct fsca
39616
39617 spin_lock(&cookie->stores_lock);
39618
39619 - fscache_stat(&fscache_n_store_calls);
39620 + fscache_stat_unchecked(&fscache_n_store_calls);
39621
39622 /* find a page to store */
39623 page = NULL;
39624 @@ -705,7 +705,7 @@ static void fscache_write_op(struct fsca
39625 page = results[0];
39626 _debug("gang %d [%lx]", n, page->index);
39627 if (page->index > op->store_limit) {
39628 - fscache_stat(&fscache_n_store_pages_over_limit);
39629 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
39630 goto superseded;
39631 }
39632
39633 @@ -721,7 +721,7 @@ static void fscache_write_op(struct fsca
39634
39635 if (page) {
39636 fscache_set_op_state(&op->op, "Store");
39637 - fscache_stat(&fscache_n_store_pages);
39638 + fscache_stat_unchecked(&fscache_n_store_pages);
39639 fscache_stat(&fscache_n_cop_write_page);
39640 ret = object->cache->ops->write_page(op, page);
39641 fscache_stat_d(&fscache_n_cop_write_page);
39642 @@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_
39643 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39644 ASSERT(PageFsCache(page));
39645
39646 - fscache_stat(&fscache_n_stores);
39647 + fscache_stat_unchecked(&fscache_n_stores);
39648
39649 op = kzalloc(sizeof(*op), GFP_NOIO);
39650 if (!op)
39651 @@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_
39652 spin_unlock(&cookie->stores_lock);
39653 spin_unlock(&object->lock);
39654
39655 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
39656 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
39657 op->store_limit = object->store_limit;
39658
39659 if (fscache_submit_op(object, &op->op) < 0)
39660 @@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_
39661
39662 spin_unlock(&cookie->lock);
39663 radix_tree_preload_end();
39664 - fscache_stat(&fscache_n_store_ops);
39665 - fscache_stat(&fscache_n_stores_ok);
39666 + fscache_stat_unchecked(&fscache_n_store_ops);
39667 + fscache_stat_unchecked(&fscache_n_stores_ok);
39668
39669 /* the slow work queue now carries its own ref on the object */
39670 fscache_put_operation(&op->op);
39671 @@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_
39672 return 0;
39673
39674 already_queued:
39675 - fscache_stat(&fscache_n_stores_again);
39676 + fscache_stat_unchecked(&fscache_n_stores_again);
39677 already_pending:
39678 spin_unlock(&cookie->stores_lock);
39679 spin_unlock(&object->lock);
39680 spin_unlock(&cookie->lock);
39681 radix_tree_preload_end();
39682 kfree(op);
39683 - fscache_stat(&fscache_n_stores_ok);
39684 + fscache_stat_unchecked(&fscache_n_stores_ok);
39685 _leave(" = 0");
39686 return 0;
39687
39688 @@ -886,14 +886,14 @@ nobufs:
39689 spin_unlock(&cookie->lock);
39690 radix_tree_preload_end();
39691 kfree(op);
39692 - fscache_stat(&fscache_n_stores_nobufs);
39693 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
39694 _leave(" = -ENOBUFS");
39695 return -ENOBUFS;
39696
39697 nomem_free:
39698 kfree(op);
39699 nomem:
39700 - fscache_stat(&fscache_n_stores_oom);
39701 + fscache_stat_unchecked(&fscache_n_stores_oom);
39702 _leave(" = -ENOMEM");
39703 return -ENOMEM;
39704 }
39705 @@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscac
39706 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39707 ASSERTCMP(page, !=, NULL);
39708
39709 - fscache_stat(&fscache_n_uncaches);
39710 + fscache_stat_unchecked(&fscache_n_uncaches);
39711
39712 /* cache withdrawal may beat us to it */
39713 if (!PageFsCache(page))
39714 @@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fs
39715 unsigned long loop;
39716
39717 #ifdef CONFIG_FSCACHE_STATS
39718 - atomic_add(pagevec->nr, &fscache_n_marks);
39719 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
39720 #endif
39721
39722 for (loop = 0; loop < pagevec->nr; loop++) {
39723 diff -urNp linux-2.6.32.42/fs/fscache/stats.c linux-2.6.32.42/fs/fscache/stats.c
39724 --- linux-2.6.32.42/fs/fscache/stats.c 2011-03-27 14:31:47.000000000 -0400
39725 +++ linux-2.6.32.42/fs/fscache/stats.c 2011-05-04 17:56:28.000000000 -0400
39726 @@ -18,95 +18,95 @@
39727 /*
39728 * operation counters
39729 */
39730 -atomic_t fscache_n_op_pend;
39731 -atomic_t fscache_n_op_run;
39732 -atomic_t fscache_n_op_enqueue;
39733 -atomic_t fscache_n_op_requeue;
39734 -atomic_t fscache_n_op_deferred_release;
39735 -atomic_t fscache_n_op_release;
39736 -atomic_t fscache_n_op_gc;
39737 -atomic_t fscache_n_op_cancelled;
39738 -atomic_t fscache_n_op_rejected;
39739 -
39740 -atomic_t fscache_n_attr_changed;
39741 -atomic_t fscache_n_attr_changed_ok;
39742 -atomic_t fscache_n_attr_changed_nobufs;
39743 -atomic_t fscache_n_attr_changed_nomem;
39744 -atomic_t fscache_n_attr_changed_calls;
39745 -
39746 -atomic_t fscache_n_allocs;
39747 -atomic_t fscache_n_allocs_ok;
39748 -atomic_t fscache_n_allocs_wait;
39749 -atomic_t fscache_n_allocs_nobufs;
39750 -atomic_t fscache_n_allocs_intr;
39751 -atomic_t fscache_n_allocs_object_dead;
39752 -atomic_t fscache_n_alloc_ops;
39753 -atomic_t fscache_n_alloc_op_waits;
39754 -
39755 -atomic_t fscache_n_retrievals;
39756 -atomic_t fscache_n_retrievals_ok;
39757 -atomic_t fscache_n_retrievals_wait;
39758 -atomic_t fscache_n_retrievals_nodata;
39759 -atomic_t fscache_n_retrievals_nobufs;
39760 -atomic_t fscache_n_retrievals_intr;
39761 -atomic_t fscache_n_retrievals_nomem;
39762 -atomic_t fscache_n_retrievals_object_dead;
39763 -atomic_t fscache_n_retrieval_ops;
39764 -atomic_t fscache_n_retrieval_op_waits;
39765 -
39766 -atomic_t fscache_n_stores;
39767 -atomic_t fscache_n_stores_ok;
39768 -atomic_t fscache_n_stores_again;
39769 -atomic_t fscache_n_stores_nobufs;
39770 -atomic_t fscache_n_stores_oom;
39771 -atomic_t fscache_n_store_ops;
39772 -atomic_t fscache_n_store_calls;
39773 -atomic_t fscache_n_store_pages;
39774 -atomic_t fscache_n_store_radix_deletes;
39775 -atomic_t fscache_n_store_pages_over_limit;
39776 -
39777 -atomic_t fscache_n_store_vmscan_not_storing;
39778 -atomic_t fscache_n_store_vmscan_gone;
39779 -atomic_t fscache_n_store_vmscan_busy;
39780 -atomic_t fscache_n_store_vmscan_cancelled;
39781 -
39782 -atomic_t fscache_n_marks;
39783 -atomic_t fscache_n_uncaches;
39784 -
39785 -atomic_t fscache_n_acquires;
39786 -atomic_t fscache_n_acquires_null;
39787 -atomic_t fscache_n_acquires_no_cache;
39788 -atomic_t fscache_n_acquires_ok;
39789 -atomic_t fscache_n_acquires_nobufs;
39790 -atomic_t fscache_n_acquires_oom;
39791 -
39792 -atomic_t fscache_n_updates;
39793 -atomic_t fscache_n_updates_null;
39794 -atomic_t fscache_n_updates_run;
39795 -
39796 -atomic_t fscache_n_relinquishes;
39797 -atomic_t fscache_n_relinquishes_null;
39798 -atomic_t fscache_n_relinquishes_waitcrt;
39799 -atomic_t fscache_n_relinquishes_retire;
39800 -
39801 -atomic_t fscache_n_cookie_index;
39802 -atomic_t fscache_n_cookie_data;
39803 -atomic_t fscache_n_cookie_special;
39804 -
39805 -atomic_t fscache_n_object_alloc;
39806 -atomic_t fscache_n_object_no_alloc;
39807 -atomic_t fscache_n_object_lookups;
39808 -atomic_t fscache_n_object_lookups_negative;
39809 -atomic_t fscache_n_object_lookups_positive;
39810 -atomic_t fscache_n_object_lookups_timed_out;
39811 -atomic_t fscache_n_object_created;
39812 -atomic_t fscache_n_object_avail;
39813 -atomic_t fscache_n_object_dead;
39814 -
39815 -atomic_t fscache_n_checkaux_none;
39816 -atomic_t fscache_n_checkaux_okay;
39817 -atomic_t fscache_n_checkaux_update;
39818 -atomic_t fscache_n_checkaux_obsolete;
39819 +atomic_unchecked_t fscache_n_op_pend;
39820 +atomic_unchecked_t fscache_n_op_run;
39821 +atomic_unchecked_t fscache_n_op_enqueue;
39822 +atomic_unchecked_t fscache_n_op_requeue;
39823 +atomic_unchecked_t fscache_n_op_deferred_release;
39824 +atomic_unchecked_t fscache_n_op_release;
39825 +atomic_unchecked_t fscache_n_op_gc;
39826 +atomic_unchecked_t fscache_n_op_cancelled;
39827 +atomic_unchecked_t fscache_n_op_rejected;
39828 +
39829 +atomic_unchecked_t fscache_n_attr_changed;
39830 +atomic_unchecked_t fscache_n_attr_changed_ok;
39831 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
39832 +atomic_unchecked_t fscache_n_attr_changed_nomem;
39833 +atomic_unchecked_t fscache_n_attr_changed_calls;
39834 +
39835 +atomic_unchecked_t fscache_n_allocs;
39836 +atomic_unchecked_t fscache_n_allocs_ok;
39837 +atomic_unchecked_t fscache_n_allocs_wait;
39838 +atomic_unchecked_t fscache_n_allocs_nobufs;
39839 +atomic_unchecked_t fscache_n_allocs_intr;
39840 +atomic_unchecked_t fscache_n_allocs_object_dead;
39841 +atomic_unchecked_t fscache_n_alloc_ops;
39842 +atomic_unchecked_t fscache_n_alloc_op_waits;
39843 +
39844 +atomic_unchecked_t fscache_n_retrievals;
39845 +atomic_unchecked_t fscache_n_retrievals_ok;
39846 +atomic_unchecked_t fscache_n_retrievals_wait;
39847 +atomic_unchecked_t fscache_n_retrievals_nodata;
39848 +atomic_unchecked_t fscache_n_retrievals_nobufs;
39849 +atomic_unchecked_t fscache_n_retrievals_intr;
39850 +atomic_unchecked_t fscache_n_retrievals_nomem;
39851 +atomic_unchecked_t fscache_n_retrievals_object_dead;
39852 +atomic_unchecked_t fscache_n_retrieval_ops;
39853 +atomic_unchecked_t fscache_n_retrieval_op_waits;
39854 +
39855 +atomic_unchecked_t fscache_n_stores;
39856 +atomic_unchecked_t fscache_n_stores_ok;
39857 +atomic_unchecked_t fscache_n_stores_again;
39858 +atomic_unchecked_t fscache_n_stores_nobufs;
39859 +atomic_unchecked_t fscache_n_stores_oom;
39860 +atomic_unchecked_t fscache_n_store_ops;
39861 +atomic_unchecked_t fscache_n_store_calls;
39862 +atomic_unchecked_t fscache_n_store_pages;
39863 +atomic_unchecked_t fscache_n_store_radix_deletes;
39864 +atomic_unchecked_t fscache_n_store_pages_over_limit;
39865 +
39866 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
39867 +atomic_unchecked_t fscache_n_store_vmscan_gone;
39868 +atomic_unchecked_t fscache_n_store_vmscan_busy;
39869 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
39870 +
39871 +atomic_unchecked_t fscache_n_marks;
39872 +atomic_unchecked_t fscache_n_uncaches;
39873 +
39874 +atomic_unchecked_t fscache_n_acquires;
39875 +atomic_unchecked_t fscache_n_acquires_null;
39876 +atomic_unchecked_t fscache_n_acquires_no_cache;
39877 +atomic_unchecked_t fscache_n_acquires_ok;
39878 +atomic_unchecked_t fscache_n_acquires_nobufs;
39879 +atomic_unchecked_t fscache_n_acquires_oom;
39880 +
39881 +atomic_unchecked_t fscache_n_updates;
39882 +atomic_unchecked_t fscache_n_updates_null;
39883 +atomic_unchecked_t fscache_n_updates_run;
39884 +
39885 +atomic_unchecked_t fscache_n_relinquishes;
39886 +atomic_unchecked_t fscache_n_relinquishes_null;
39887 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
39888 +atomic_unchecked_t fscache_n_relinquishes_retire;
39889 +
39890 +atomic_unchecked_t fscache_n_cookie_index;
39891 +atomic_unchecked_t fscache_n_cookie_data;
39892 +atomic_unchecked_t fscache_n_cookie_special;
39893 +
39894 +atomic_unchecked_t fscache_n_object_alloc;
39895 +atomic_unchecked_t fscache_n_object_no_alloc;
39896 +atomic_unchecked_t fscache_n_object_lookups;
39897 +atomic_unchecked_t fscache_n_object_lookups_negative;
39898 +atomic_unchecked_t fscache_n_object_lookups_positive;
39899 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
39900 +atomic_unchecked_t fscache_n_object_created;
39901 +atomic_unchecked_t fscache_n_object_avail;
39902 +atomic_unchecked_t fscache_n_object_dead;
39903 +
39904 +atomic_unchecked_t fscache_n_checkaux_none;
39905 +atomic_unchecked_t fscache_n_checkaux_okay;
39906 +atomic_unchecked_t fscache_n_checkaux_update;
39907 +atomic_unchecked_t fscache_n_checkaux_obsolete;
39908
39909 atomic_t fscache_n_cop_alloc_object;
39910 atomic_t fscache_n_cop_lookup_object;
39911 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
39912 seq_puts(m, "FS-Cache statistics\n");
39913
39914 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
39915 - atomic_read(&fscache_n_cookie_index),
39916 - atomic_read(&fscache_n_cookie_data),
39917 - atomic_read(&fscache_n_cookie_special));
39918 + atomic_read_unchecked(&fscache_n_cookie_index),
39919 + atomic_read_unchecked(&fscache_n_cookie_data),
39920 + atomic_read_unchecked(&fscache_n_cookie_special));
39921
39922 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
39923 - atomic_read(&fscache_n_object_alloc),
39924 - atomic_read(&fscache_n_object_no_alloc),
39925 - atomic_read(&fscache_n_object_avail),
39926 - atomic_read(&fscache_n_object_dead));
39927 + atomic_read_unchecked(&fscache_n_object_alloc),
39928 + atomic_read_unchecked(&fscache_n_object_no_alloc),
39929 + atomic_read_unchecked(&fscache_n_object_avail),
39930 + atomic_read_unchecked(&fscache_n_object_dead));
39931 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
39932 - atomic_read(&fscache_n_checkaux_none),
39933 - atomic_read(&fscache_n_checkaux_okay),
39934 - atomic_read(&fscache_n_checkaux_update),
39935 - atomic_read(&fscache_n_checkaux_obsolete));
39936 + atomic_read_unchecked(&fscache_n_checkaux_none),
39937 + atomic_read_unchecked(&fscache_n_checkaux_okay),
39938 + atomic_read_unchecked(&fscache_n_checkaux_update),
39939 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
39940
39941 seq_printf(m, "Pages : mrk=%u unc=%u\n",
39942 - atomic_read(&fscache_n_marks),
39943 - atomic_read(&fscache_n_uncaches));
39944 + atomic_read_unchecked(&fscache_n_marks),
39945 + atomic_read_unchecked(&fscache_n_uncaches));
39946
39947 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
39948 " oom=%u\n",
39949 - atomic_read(&fscache_n_acquires),
39950 - atomic_read(&fscache_n_acquires_null),
39951 - atomic_read(&fscache_n_acquires_no_cache),
39952 - atomic_read(&fscache_n_acquires_ok),
39953 - atomic_read(&fscache_n_acquires_nobufs),
39954 - atomic_read(&fscache_n_acquires_oom));
39955 + atomic_read_unchecked(&fscache_n_acquires),
39956 + atomic_read_unchecked(&fscache_n_acquires_null),
39957 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
39958 + atomic_read_unchecked(&fscache_n_acquires_ok),
39959 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
39960 + atomic_read_unchecked(&fscache_n_acquires_oom));
39961
39962 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
39963 - atomic_read(&fscache_n_object_lookups),
39964 - atomic_read(&fscache_n_object_lookups_negative),
39965 - atomic_read(&fscache_n_object_lookups_positive),
39966 - atomic_read(&fscache_n_object_lookups_timed_out),
39967 - atomic_read(&fscache_n_object_created));
39968 + atomic_read_unchecked(&fscache_n_object_lookups),
39969 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
39970 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
39971 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
39972 + atomic_read_unchecked(&fscache_n_object_created));
39973
39974 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
39975 - atomic_read(&fscache_n_updates),
39976 - atomic_read(&fscache_n_updates_null),
39977 - atomic_read(&fscache_n_updates_run));
39978 + atomic_read_unchecked(&fscache_n_updates),
39979 + atomic_read_unchecked(&fscache_n_updates_null),
39980 + atomic_read_unchecked(&fscache_n_updates_run));
39981
39982 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
39983 - atomic_read(&fscache_n_relinquishes),
39984 - atomic_read(&fscache_n_relinquishes_null),
39985 - atomic_read(&fscache_n_relinquishes_waitcrt),
39986 - atomic_read(&fscache_n_relinquishes_retire));
39987 + atomic_read_unchecked(&fscache_n_relinquishes),
39988 + atomic_read_unchecked(&fscache_n_relinquishes_null),
39989 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
39990 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
39991
39992 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
39993 - atomic_read(&fscache_n_attr_changed),
39994 - atomic_read(&fscache_n_attr_changed_ok),
39995 - atomic_read(&fscache_n_attr_changed_nobufs),
39996 - atomic_read(&fscache_n_attr_changed_nomem),
39997 - atomic_read(&fscache_n_attr_changed_calls));
39998 + atomic_read_unchecked(&fscache_n_attr_changed),
39999 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
40000 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
40001 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
40002 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
40003
40004 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
40005 - atomic_read(&fscache_n_allocs),
40006 - atomic_read(&fscache_n_allocs_ok),
40007 - atomic_read(&fscache_n_allocs_wait),
40008 - atomic_read(&fscache_n_allocs_nobufs),
40009 - atomic_read(&fscache_n_allocs_intr));
40010 + atomic_read_unchecked(&fscache_n_allocs),
40011 + atomic_read_unchecked(&fscache_n_allocs_ok),
40012 + atomic_read_unchecked(&fscache_n_allocs_wait),
40013 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
40014 + atomic_read_unchecked(&fscache_n_allocs_intr));
40015 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
40016 - atomic_read(&fscache_n_alloc_ops),
40017 - atomic_read(&fscache_n_alloc_op_waits),
40018 - atomic_read(&fscache_n_allocs_object_dead));
40019 + atomic_read_unchecked(&fscache_n_alloc_ops),
40020 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
40021 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
40022
40023 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
40024 " int=%u oom=%u\n",
40025 - atomic_read(&fscache_n_retrievals),
40026 - atomic_read(&fscache_n_retrievals_ok),
40027 - atomic_read(&fscache_n_retrievals_wait),
40028 - atomic_read(&fscache_n_retrievals_nodata),
40029 - atomic_read(&fscache_n_retrievals_nobufs),
40030 - atomic_read(&fscache_n_retrievals_intr),
40031 - atomic_read(&fscache_n_retrievals_nomem));
40032 + atomic_read_unchecked(&fscache_n_retrievals),
40033 + atomic_read_unchecked(&fscache_n_retrievals_ok),
40034 + atomic_read_unchecked(&fscache_n_retrievals_wait),
40035 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
40036 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
40037 + atomic_read_unchecked(&fscache_n_retrievals_intr),
40038 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
40039 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
40040 - atomic_read(&fscache_n_retrieval_ops),
40041 - atomic_read(&fscache_n_retrieval_op_waits),
40042 - atomic_read(&fscache_n_retrievals_object_dead));
40043 + atomic_read_unchecked(&fscache_n_retrieval_ops),
40044 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
40045 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
40046
40047 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
40048 - atomic_read(&fscache_n_stores),
40049 - atomic_read(&fscache_n_stores_ok),
40050 - atomic_read(&fscache_n_stores_again),
40051 - atomic_read(&fscache_n_stores_nobufs),
40052 - atomic_read(&fscache_n_stores_oom));
40053 + atomic_read_unchecked(&fscache_n_stores),
40054 + atomic_read_unchecked(&fscache_n_stores_ok),
40055 + atomic_read_unchecked(&fscache_n_stores_again),
40056 + atomic_read_unchecked(&fscache_n_stores_nobufs),
40057 + atomic_read_unchecked(&fscache_n_stores_oom));
40058 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
40059 - atomic_read(&fscache_n_store_ops),
40060 - atomic_read(&fscache_n_store_calls),
40061 - atomic_read(&fscache_n_store_pages),
40062 - atomic_read(&fscache_n_store_radix_deletes),
40063 - atomic_read(&fscache_n_store_pages_over_limit));
40064 + atomic_read_unchecked(&fscache_n_store_ops),
40065 + atomic_read_unchecked(&fscache_n_store_calls),
40066 + atomic_read_unchecked(&fscache_n_store_pages),
40067 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
40068 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
40069
40070 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
40071 - atomic_read(&fscache_n_store_vmscan_not_storing),
40072 - atomic_read(&fscache_n_store_vmscan_gone),
40073 - atomic_read(&fscache_n_store_vmscan_busy),
40074 - atomic_read(&fscache_n_store_vmscan_cancelled));
40075 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
40076 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
40077 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
40078 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
40079
40080 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
40081 - atomic_read(&fscache_n_op_pend),
40082 - atomic_read(&fscache_n_op_run),
40083 - atomic_read(&fscache_n_op_enqueue),
40084 - atomic_read(&fscache_n_op_cancelled),
40085 - atomic_read(&fscache_n_op_rejected));
40086 + atomic_read_unchecked(&fscache_n_op_pend),
40087 + atomic_read_unchecked(&fscache_n_op_run),
40088 + atomic_read_unchecked(&fscache_n_op_enqueue),
40089 + atomic_read_unchecked(&fscache_n_op_cancelled),
40090 + atomic_read_unchecked(&fscache_n_op_rejected));
40091 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
40092 - atomic_read(&fscache_n_op_deferred_release),
40093 - atomic_read(&fscache_n_op_release),
40094 - atomic_read(&fscache_n_op_gc));
40095 + atomic_read_unchecked(&fscache_n_op_deferred_release),
40096 + atomic_read_unchecked(&fscache_n_op_release),
40097 + atomic_read_unchecked(&fscache_n_op_gc));
40098
40099 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
40100 atomic_read(&fscache_n_cop_alloc_object),
40101 diff -urNp linux-2.6.32.42/fs/fs_struct.c linux-2.6.32.42/fs/fs_struct.c
40102 --- linux-2.6.32.42/fs/fs_struct.c 2011-03-27 14:31:47.000000000 -0400
40103 +++ linux-2.6.32.42/fs/fs_struct.c 2011-04-17 15:56:46.000000000 -0400
40104 @@ -4,6 +4,7 @@
40105 #include <linux/path.h>
40106 #include <linux/slab.h>
40107 #include <linux/fs_struct.h>
40108 +#include <linux/grsecurity.h>
40109
40110 /*
40111 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
40112 @@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
40113 old_root = fs->root;
40114 fs->root = *path;
40115 path_get(path);
40116 + gr_set_chroot_entries(current, path);
40117 write_unlock(&fs->lock);
40118 if (old_root.dentry)
40119 path_put(&old_root);
40120 @@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
40121 && fs->root.mnt == old_root->mnt) {
40122 path_get(new_root);
40123 fs->root = *new_root;
40124 + gr_set_chroot_entries(p, new_root);
40125 count++;
40126 }
40127 if (fs->pwd.dentry == old_root->dentry
40128 @@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
40129 task_lock(tsk);
40130 write_lock(&fs->lock);
40131 tsk->fs = NULL;
40132 - kill = !--fs->users;
40133 + gr_clear_chroot_entries(tsk);
40134 + kill = !atomic_dec_return(&fs->users);
40135 write_unlock(&fs->lock);
40136 task_unlock(tsk);
40137 if (kill)
40138 @@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
40139 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
40140 /* We don't need to lock fs - think why ;-) */
40141 if (fs) {
40142 - fs->users = 1;
40143 + atomic_set(&fs->users, 1);
40144 fs->in_exec = 0;
40145 rwlock_init(&fs->lock);
40146 fs->umask = old->umask;
40147 @@ -127,8 +131,9 @@ int unshare_fs_struct(void)
40148
40149 task_lock(current);
40150 write_lock(&fs->lock);
40151 - kill = !--fs->users;
40152 + kill = !atomic_dec_return(&fs->users);
40153 current->fs = new_fs;
40154 + gr_set_chroot_entries(current, &new_fs->root);
40155 write_unlock(&fs->lock);
40156 task_unlock(current);
40157
40158 @@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
40159
40160 /* to be mentioned only in INIT_TASK */
40161 struct fs_struct init_fs = {
40162 - .users = 1,
40163 + .users = ATOMIC_INIT(1),
40164 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
40165 .umask = 0022,
40166 };
40167 @@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
40168 task_lock(current);
40169
40170 write_lock(&init_fs.lock);
40171 - init_fs.users++;
40172 + atomic_inc(&init_fs.users);
40173 write_unlock(&init_fs.lock);
40174
40175 write_lock(&fs->lock);
40176 current->fs = &init_fs;
40177 - kill = !--fs->users;
40178 + gr_set_chroot_entries(current, &current->fs->root);
40179 + kill = !atomic_dec_return(&fs->users);
40180 write_unlock(&fs->lock);
40181
40182 task_unlock(current);
40183 diff -urNp linux-2.6.32.42/fs/fuse/cuse.c linux-2.6.32.42/fs/fuse/cuse.c
40184 --- linux-2.6.32.42/fs/fuse/cuse.c 2011-03-27 14:31:47.000000000 -0400
40185 +++ linux-2.6.32.42/fs/fuse/cuse.c 2011-04-17 15:56:46.000000000 -0400
40186 @@ -528,8 +528,18 @@ static int cuse_channel_release(struct i
40187 return rc;
40188 }
40189
40190 -static struct file_operations cuse_channel_fops; /* initialized during init */
40191 -
40192 +static const struct file_operations cuse_channel_fops = { /* initialized during init */
40193 + .owner = THIS_MODULE,
40194 + .llseek = no_llseek,
40195 + .read = do_sync_read,
40196 + .aio_read = fuse_dev_read,
40197 + .write = do_sync_write,
40198 + .aio_write = fuse_dev_write,
40199 + .poll = fuse_dev_poll,
40200 + .open = cuse_channel_open,
40201 + .release = cuse_channel_release,
40202 + .fasync = fuse_dev_fasync,
40203 +};
40204
40205 /**************************************************************************
40206 * Misc stuff and module initializatiion
40207 @@ -575,12 +585,6 @@ static int __init cuse_init(void)
40208 for (i = 0; i < CUSE_CONNTBL_LEN; i++)
40209 INIT_LIST_HEAD(&cuse_conntbl[i]);
40210
40211 - /* inherit and extend fuse_dev_operations */
40212 - cuse_channel_fops = fuse_dev_operations;
40213 - cuse_channel_fops.owner = THIS_MODULE;
40214 - cuse_channel_fops.open = cuse_channel_open;
40215 - cuse_channel_fops.release = cuse_channel_release;
40216 -
40217 cuse_class = class_create(THIS_MODULE, "cuse");
40218 if (IS_ERR(cuse_class))
40219 return PTR_ERR(cuse_class);
40220 diff -urNp linux-2.6.32.42/fs/fuse/dev.c linux-2.6.32.42/fs/fuse/dev.c
40221 --- linux-2.6.32.42/fs/fuse/dev.c 2011-03-27 14:31:47.000000000 -0400
40222 +++ linux-2.6.32.42/fs/fuse/dev.c 2011-04-17 15:56:46.000000000 -0400
40223 @@ -745,7 +745,7 @@ __releases(&fc->lock)
40224 * request_end(). Otherwise add it to the processing list, and set
40225 * the 'sent' flag.
40226 */
40227 -static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
40228 +ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
40229 unsigned long nr_segs, loff_t pos)
40230 {
40231 int err;
40232 @@ -827,6 +827,7 @@ static ssize_t fuse_dev_read(struct kioc
40233 spin_unlock(&fc->lock);
40234 return err;
40235 }
40236 +EXPORT_SYMBOL_GPL(fuse_dev_read);
40237
40238 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
40239 struct fuse_copy_state *cs)
40240 @@ -885,7 +886,7 @@ static int fuse_notify_inval_entry(struc
40241 {
40242 struct fuse_notify_inval_entry_out outarg;
40243 int err = -EINVAL;
40244 - char buf[FUSE_NAME_MAX+1];
40245 + char *buf = NULL;
40246 struct qstr name;
40247
40248 if (size < sizeof(outarg))
40249 @@ -899,6 +900,11 @@ static int fuse_notify_inval_entry(struc
40250 if (outarg.namelen > FUSE_NAME_MAX)
40251 goto err;
40252
40253 + err = -ENOMEM;
40254 + buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
40255 + if (!buf)
40256 + goto err;
40257 +
40258 name.name = buf;
40259 name.len = outarg.namelen;
40260 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
40261 @@ -910,17 +916,15 @@ static int fuse_notify_inval_entry(struc
40262
40263 down_read(&fc->killsb);
40264 err = -ENOENT;
40265 - if (!fc->sb)
40266 - goto err_unlock;
40267 -
40268 - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
40269 -
40270 -err_unlock:
40271 + if (fc->sb)
40272 + err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
40273 up_read(&fc->killsb);
40274 + kfree(buf);
40275 return err;
40276
40277 err:
40278 fuse_copy_finish(cs);
40279 + kfree(buf);
40280 return err;
40281 }
40282
40283 @@ -987,7 +991,7 @@ static int copy_out_args(struct fuse_cop
40284 * it from the list and copy the rest of the buffer to the request.
40285 * The request is finished by calling request_end()
40286 */
40287 -static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40288 +ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40289 unsigned long nr_segs, loff_t pos)
40290 {
40291 int err;
40292 @@ -1083,8 +1087,9 @@ static ssize_t fuse_dev_write(struct kio
40293 fuse_copy_finish(&cs);
40294 return err;
40295 }
40296 +EXPORT_SYMBOL_GPL(fuse_dev_write);
40297
40298 -static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
40299 +unsigned fuse_dev_poll(struct file *file, poll_table *wait)
40300 {
40301 unsigned mask = POLLOUT | POLLWRNORM;
40302 struct fuse_conn *fc = fuse_get_conn(file);
40303 @@ -1102,6 +1107,7 @@ static unsigned fuse_dev_poll(struct fil
40304
40305 return mask;
40306 }
40307 +EXPORT_SYMBOL_GPL(fuse_dev_poll);
40308
40309 /*
40310 * Abort all requests on the given list (pending or processing)
40311 @@ -1218,7 +1224,7 @@ int fuse_dev_release(struct inode *inode
40312 }
40313 EXPORT_SYMBOL_GPL(fuse_dev_release);
40314
40315 -static int fuse_dev_fasync(int fd, struct file *file, int on)
40316 +int fuse_dev_fasync(int fd, struct file *file, int on)
40317 {
40318 struct fuse_conn *fc = fuse_get_conn(file);
40319 if (!fc)
40320 @@ -1227,6 +1233,7 @@ static int fuse_dev_fasync(int fd, struc
40321 /* No locking - fasync_helper does its own locking */
40322 return fasync_helper(fd, file, on, &fc->fasync);
40323 }
40324 +EXPORT_SYMBOL_GPL(fuse_dev_fasync);
40325
40326 const struct file_operations fuse_dev_operations = {
40327 .owner = THIS_MODULE,
40328 diff -urNp linux-2.6.32.42/fs/fuse/dir.c linux-2.6.32.42/fs/fuse/dir.c
40329 --- linux-2.6.32.42/fs/fuse/dir.c 2011-03-27 14:31:47.000000000 -0400
40330 +++ linux-2.6.32.42/fs/fuse/dir.c 2011-04-17 15:56:46.000000000 -0400
40331 @@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
40332 return link;
40333 }
40334
40335 -static void free_link(char *link)
40336 +static void free_link(const char *link)
40337 {
40338 if (!IS_ERR(link))
40339 free_page((unsigned long) link);
40340 diff -urNp linux-2.6.32.42/fs/fuse/fuse_i.h linux-2.6.32.42/fs/fuse/fuse_i.h
40341 --- linux-2.6.32.42/fs/fuse/fuse_i.h 2011-03-27 14:31:47.000000000 -0400
40342 +++ linux-2.6.32.42/fs/fuse/fuse_i.h 2011-04-17 15:56:46.000000000 -0400
40343 @@ -525,6 +525,16 @@ extern const struct file_operations fuse
40344
40345 extern const struct dentry_operations fuse_dentry_operations;
40346
40347 +extern ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
40348 + unsigned long nr_segs, loff_t pos);
40349 +
40350 +extern ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40351 + unsigned long nr_segs, loff_t pos);
40352 +
40353 +extern unsigned fuse_dev_poll(struct file *file, poll_table *wait);
40354 +
40355 +extern int fuse_dev_fasync(int fd, struct file *file, int on);
40356 +
40357 /**
40358 * Inode to nodeid comparison.
40359 */
40360 diff -urNp linux-2.6.32.42/fs/gfs2/ops_inode.c linux-2.6.32.42/fs/gfs2/ops_inode.c
40361 --- linux-2.6.32.42/fs/gfs2/ops_inode.c 2011-03-27 14:31:47.000000000 -0400
40362 +++ linux-2.6.32.42/fs/gfs2/ops_inode.c 2011-05-16 21:46:57.000000000 -0400
40363 @@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odi
40364 unsigned int x;
40365 int error;
40366
40367 + pax_track_stack();
40368 +
40369 if (ndentry->d_inode) {
40370 nip = GFS2_I(ndentry->d_inode);
40371 if (ip == nip)
40372 diff -urNp linux-2.6.32.42/fs/gfs2/sys.c linux-2.6.32.42/fs/gfs2/sys.c
40373 --- linux-2.6.32.42/fs/gfs2/sys.c 2011-03-27 14:31:47.000000000 -0400
40374 +++ linux-2.6.32.42/fs/gfs2/sys.c 2011-04-17 15:56:46.000000000 -0400
40375 @@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct ko
40376 return a->store ? a->store(sdp, buf, len) : len;
40377 }
40378
40379 -static struct sysfs_ops gfs2_attr_ops = {
40380 +static const struct sysfs_ops gfs2_attr_ops = {
40381 .show = gfs2_attr_show,
40382 .store = gfs2_attr_store,
40383 };
40384 @@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset
40385 return 0;
40386 }
40387
40388 -static struct kset_uevent_ops gfs2_uevent_ops = {
40389 +static const struct kset_uevent_ops gfs2_uevent_ops = {
40390 .uevent = gfs2_uevent,
40391 };
40392
40393 diff -urNp linux-2.6.32.42/fs/hfsplus/catalog.c linux-2.6.32.42/fs/hfsplus/catalog.c
40394 --- linux-2.6.32.42/fs/hfsplus/catalog.c 2011-03-27 14:31:47.000000000 -0400
40395 +++ linux-2.6.32.42/fs/hfsplus/catalog.c 2011-05-16 21:46:57.000000000 -0400
40396 @@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block
40397 int err;
40398 u16 type;
40399
40400 + pax_track_stack();
40401 +
40402 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
40403 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
40404 if (err)
40405 @@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct
40406 int entry_size;
40407 int err;
40408
40409 + pax_track_stack();
40410 +
40411 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
40412 sb = dir->i_sb;
40413 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
40414 @@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
40415 int entry_size, type;
40416 int err = 0;
40417
40418 + pax_track_stack();
40419 +
40420 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
40421 dst_dir->i_ino, dst_name->name);
40422 sb = src_dir->i_sb;
40423 diff -urNp linux-2.6.32.42/fs/hfsplus/dir.c linux-2.6.32.42/fs/hfsplus/dir.c
40424 --- linux-2.6.32.42/fs/hfsplus/dir.c 2011-03-27 14:31:47.000000000 -0400
40425 +++ linux-2.6.32.42/fs/hfsplus/dir.c 2011-05-16 21:46:57.000000000 -0400
40426 @@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *
40427 struct hfsplus_readdir_data *rd;
40428 u16 type;
40429
40430 + pax_track_stack();
40431 +
40432 if (filp->f_pos >= inode->i_size)
40433 return 0;
40434
40435 diff -urNp linux-2.6.32.42/fs/hfsplus/inode.c linux-2.6.32.42/fs/hfsplus/inode.c
40436 --- linux-2.6.32.42/fs/hfsplus/inode.c 2011-03-27 14:31:47.000000000 -0400
40437 +++ linux-2.6.32.42/fs/hfsplus/inode.c 2011-05-16 21:46:57.000000000 -0400
40438 @@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode
40439 int res = 0;
40440 u16 type;
40441
40442 + pax_track_stack();
40443 +
40444 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
40445
40446 HFSPLUS_I(inode).dev = 0;
40447 @@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode
40448 struct hfs_find_data fd;
40449 hfsplus_cat_entry entry;
40450
40451 + pax_track_stack();
40452 +
40453 if (HFSPLUS_IS_RSRC(inode))
40454 main_inode = HFSPLUS_I(inode).rsrc_inode;
40455
40456 diff -urNp linux-2.6.32.42/fs/hfsplus/ioctl.c linux-2.6.32.42/fs/hfsplus/ioctl.c
40457 --- linux-2.6.32.42/fs/hfsplus/ioctl.c 2011-03-27 14:31:47.000000000 -0400
40458 +++ linux-2.6.32.42/fs/hfsplus/ioctl.c 2011-05-16 21:46:57.000000000 -0400
40459 @@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dent
40460 struct hfsplus_cat_file *file;
40461 int res;
40462
40463 + pax_track_stack();
40464 +
40465 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40466 return -EOPNOTSUPP;
40467
40468 @@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *
40469 struct hfsplus_cat_file *file;
40470 ssize_t res = 0;
40471
40472 + pax_track_stack();
40473 +
40474 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40475 return -EOPNOTSUPP;
40476
40477 diff -urNp linux-2.6.32.42/fs/hfsplus/super.c linux-2.6.32.42/fs/hfsplus/super.c
40478 --- linux-2.6.32.42/fs/hfsplus/super.c 2011-03-27 14:31:47.000000000 -0400
40479 +++ linux-2.6.32.42/fs/hfsplus/super.c 2011-05-16 21:46:57.000000000 -0400
40480 @@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct sup
40481 struct nls_table *nls = NULL;
40482 int err = -EINVAL;
40483
40484 + pax_track_stack();
40485 +
40486 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
40487 if (!sbi)
40488 return -ENOMEM;
40489 diff -urNp linux-2.6.32.42/fs/hugetlbfs/inode.c linux-2.6.32.42/fs/hugetlbfs/inode.c
40490 --- linux-2.6.32.42/fs/hugetlbfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40491 +++ linux-2.6.32.42/fs/hugetlbfs/inode.c 2011-04-17 15:56:46.000000000 -0400
40492 @@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs
40493 .kill_sb = kill_litter_super,
40494 };
40495
40496 -static struct vfsmount *hugetlbfs_vfsmount;
40497 +struct vfsmount *hugetlbfs_vfsmount;
40498
40499 static int can_do_hugetlb_shm(void)
40500 {
40501 diff -urNp linux-2.6.32.42/fs/ioctl.c linux-2.6.32.42/fs/ioctl.c
40502 --- linux-2.6.32.42/fs/ioctl.c 2011-03-27 14:31:47.000000000 -0400
40503 +++ linux-2.6.32.42/fs/ioctl.c 2011-04-17 15:56:46.000000000 -0400
40504 @@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
40505 u64 phys, u64 len, u32 flags)
40506 {
40507 struct fiemap_extent extent;
40508 - struct fiemap_extent *dest = fieinfo->fi_extents_start;
40509 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
40510
40511 /* only count the extents */
40512 if (fieinfo->fi_extents_max == 0) {
40513 @@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
40514
40515 fieinfo.fi_flags = fiemap.fm_flags;
40516 fieinfo.fi_extents_max = fiemap.fm_extent_count;
40517 - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
40518 + fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
40519
40520 if (fiemap.fm_extent_count != 0 &&
40521 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
40522 @@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
40523 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
40524 fiemap.fm_flags = fieinfo.fi_flags;
40525 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
40526 - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
40527 + if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
40528 error = -EFAULT;
40529
40530 return error;
40531 diff -urNp linux-2.6.32.42/fs/jbd/checkpoint.c linux-2.6.32.42/fs/jbd/checkpoint.c
40532 --- linux-2.6.32.42/fs/jbd/checkpoint.c 2011-03-27 14:31:47.000000000 -0400
40533 +++ linux-2.6.32.42/fs/jbd/checkpoint.c 2011-05-16 21:46:57.000000000 -0400
40534 @@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal
40535 tid_t this_tid;
40536 int result;
40537
40538 + pax_track_stack();
40539 +
40540 jbd_debug(1, "Start checkpoint\n");
40541
40542 /*
40543 diff -urNp linux-2.6.32.42/fs/jffs2/compr_rtime.c linux-2.6.32.42/fs/jffs2/compr_rtime.c
40544 --- linux-2.6.32.42/fs/jffs2/compr_rtime.c 2011-03-27 14:31:47.000000000 -0400
40545 +++ linux-2.6.32.42/fs/jffs2/compr_rtime.c 2011-05-16 21:46:57.000000000 -0400
40546 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
40547 int outpos = 0;
40548 int pos=0;
40549
40550 + pax_track_stack();
40551 +
40552 memset(positions,0,sizeof(positions));
40553
40554 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
40555 @@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsign
40556 int outpos = 0;
40557 int pos=0;
40558
40559 + pax_track_stack();
40560 +
40561 memset(positions,0,sizeof(positions));
40562
40563 while (outpos<destlen) {
40564 diff -urNp linux-2.6.32.42/fs/jffs2/compr_rubin.c linux-2.6.32.42/fs/jffs2/compr_rubin.c
40565 --- linux-2.6.32.42/fs/jffs2/compr_rubin.c 2011-03-27 14:31:47.000000000 -0400
40566 +++ linux-2.6.32.42/fs/jffs2/compr_rubin.c 2011-05-16 21:46:57.000000000 -0400
40567 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
40568 int ret;
40569 uint32_t mysrclen, mydstlen;
40570
40571 + pax_track_stack();
40572 +
40573 mysrclen = *sourcelen;
40574 mydstlen = *dstlen - 8;
40575
40576 diff -urNp linux-2.6.32.42/fs/jffs2/erase.c linux-2.6.32.42/fs/jffs2/erase.c
40577 --- linux-2.6.32.42/fs/jffs2/erase.c 2011-03-27 14:31:47.000000000 -0400
40578 +++ linux-2.6.32.42/fs/jffs2/erase.c 2011-04-17 15:56:46.000000000 -0400
40579 @@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
40580 struct jffs2_unknown_node marker = {
40581 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
40582 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
40583 - .totlen = cpu_to_je32(c->cleanmarker_size)
40584 + .totlen = cpu_to_je32(c->cleanmarker_size),
40585 + .hdr_crc = cpu_to_je32(0)
40586 };
40587
40588 jffs2_prealloc_raw_node_refs(c, jeb, 1);
40589 diff -urNp linux-2.6.32.42/fs/jffs2/wbuf.c linux-2.6.32.42/fs/jffs2/wbuf.c
40590 --- linux-2.6.32.42/fs/jffs2/wbuf.c 2011-03-27 14:31:47.000000000 -0400
40591 +++ linux-2.6.32.42/fs/jffs2/wbuf.c 2011-04-17 15:56:46.000000000 -0400
40592 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
40593 {
40594 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
40595 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
40596 - .totlen = constant_cpu_to_je32(8)
40597 + .totlen = constant_cpu_to_je32(8),
40598 + .hdr_crc = constant_cpu_to_je32(0)
40599 };
40600
40601 /*
40602 diff -urNp linux-2.6.32.42/fs/jffs2/xattr.c linux-2.6.32.42/fs/jffs2/xattr.c
40603 --- linux-2.6.32.42/fs/jffs2/xattr.c 2011-03-27 14:31:47.000000000 -0400
40604 +++ linux-2.6.32.42/fs/jffs2/xattr.c 2011-05-16 21:46:57.000000000 -0400
40605 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
40606
40607 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
40608
40609 + pax_track_stack();
40610 +
40611 /* Phase.1 : Merge same xref */
40612 for (i=0; i < XREF_TMPHASH_SIZE; i++)
40613 xref_tmphash[i] = NULL;
40614 diff -urNp linux-2.6.32.42/fs/jfs/super.c linux-2.6.32.42/fs/jfs/super.c
40615 --- linux-2.6.32.42/fs/jfs/super.c 2011-03-27 14:31:47.000000000 -0400
40616 +++ linux-2.6.32.42/fs/jfs/super.c 2011-06-07 18:06:04.000000000 -0400
40617 @@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
40618
40619 jfs_inode_cachep =
40620 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
40621 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
40622 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
40623 init_once);
40624 if (jfs_inode_cachep == NULL)
40625 return -ENOMEM;
40626 diff -urNp linux-2.6.32.42/fs/Kconfig.binfmt linux-2.6.32.42/fs/Kconfig.binfmt
40627 --- linux-2.6.32.42/fs/Kconfig.binfmt 2011-03-27 14:31:47.000000000 -0400
40628 +++ linux-2.6.32.42/fs/Kconfig.binfmt 2011-04-17 15:56:46.000000000 -0400
40629 @@ -86,7 +86,7 @@ config HAVE_AOUT
40630
40631 config BINFMT_AOUT
40632 tristate "Kernel support for a.out and ECOFF binaries"
40633 - depends on HAVE_AOUT
40634 + depends on HAVE_AOUT && BROKEN
40635 ---help---
40636 A.out (Assembler.OUTput) is a set of formats for libraries and
40637 executables used in the earliest versions of UNIX. Linux used
40638 diff -urNp linux-2.6.32.42/fs/libfs.c linux-2.6.32.42/fs/libfs.c
40639 --- linux-2.6.32.42/fs/libfs.c 2011-03-27 14:31:47.000000000 -0400
40640 +++ linux-2.6.32.42/fs/libfs.c 2011-05-11 18:25:15.000000000 -0400
40641 @@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, v
40642
40643 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
40644 struct dentry *next;
40645 + char d_name[sizeof(next->d_iname)];
40646 + const unsigned char *name;
40647 +
40648 next = list_entry(p, struct dentry, d_u.d_child);
40649 if (d_unhashed(next) || !next->d_inode)
40650 continue;
40651
40652 spin_unlock(&dcache_lock);
40653 - if (filldir(dirent, next->d_name.name,
40654 + name = next->d_name.name;
40655 + if (name == next->d_iname) {
40656 + memcpy(d_name, name, next->d_name.len);
40657 + name = d_name;
40658 + }
40659 + if (filldir(dirent, name,
40660 next->d_name.len, filp->f_pos,
40661 next->d_inode->i_ino,
40662 dt_type(next->d_inode)) < 0)
40663 diff -urNp linux-2.6.32.42/fs/lockd/clntproc.c linux-2.6.32.42/fs/lockd/clntproc.c
40664 --- linux-2.6.32.42/fs/lockd/clntproc.c 2011-03-27 14:31:47.000000000 -0400
40665 +++ linux-2.6.32.42/fs/lockd/clntproc.c 2011-05-16 21:46:57.000000000 -0400
40666 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
40667 /*
40668 * Cookie counter for NLM requests
40669 */
40670 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
40671 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
40672
40673 void nlmclnt_next_cookie(struct nlm_cookie *c)
40674 {
40675 - u32 cookie = atomic_inc_return(&nlm_cookie);
40676 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
40677
40678 memcpy(c->data, &cookie, 4);
40679 c->len=4;
40680 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
40681 struct nlm_rqst reqst, *req;
40682 int status;
40683
40684 + pax_track_stack();
40685 +
40686 req = &reqst;
40687 memset(req, 0, sizeof(*req));
40688 locks_init_lock(&req->a_args.lock.fl);
40689 diff -urNp linux-2.6.32.42/fs/lockd/svc.c linux-2.6.32.42/fs/lockd/svc.c
40690 --- linux-2.6.32.42/fs/lockd/svc.c 2011-03-27 14:31:47.000000000 -0400
40691 +++ linux-2.6.32.42/fs/lockd/svc.c 2011-04-17 15:56:46.000000000 -0400
40692 @@ -43,7 +43,7 @@
40693
40694 static struct svc_program nlmsvc_program;
40695
40696 -struct nlmsvc_binding * nlmsvc_ops;
40697 +const struct nlmsvc_binding * nlmsvc_ops;
40698 EXPORT_SYMBOL_GPL(nlmsvc_ops);
40699
40700 static DEFINE_MUTEX(nlmsvc_mutex);
40701 diff -urNp linux-2.6.32.42/fs/locks.c linux-2.6.32.42/fs/locks.c
40702 --- linux-2.6.32.42/fs/locks.c 2011-03-27 14:31:47.000000000 -0400
40703 +++ linux-2.6.32.42/fs/locks.c 2011-04-17 15:56:46.000000000 -0400
40704 @@ -2007,16 +2007,16 @@ void locks_remove_flock(struct file *fil
40705 return;
40706
40707 if (filp->f_op && filp->f_op->flock) {
40708 - struct file_lock fl = {
40709 + struct file_lock flock = {
40710 .fl_pid = current->tgid,
40711 .fl_file = filp,
40712 .fl_flags = FL_FLOCK,
40713 .fl_type = F_UNLCK,
40714 .fl_end = OFFSET_MAX,
40715 };
40716 - filp->f_op->flock(filp, F_SETLKW, &fl);
40717 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
40718 - fl.fl_ops->fl_release_private(&fl);
40719 + filp->f_op->flock(filp, F_SETLKW, &flock);
40720 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
40721 + flock.fl_ops->fl_release_private(&flock);
40722 }
40723
40724 lock_kernel();
40725 diff -urNp linux-2.6.32.42/fs/namei.c linux-2.6.32.42/fs/namei.c
40726 --- linux-2.6.32.42/fs/namei.c 2011-03-27 14:31:47.000000000 -0400
40727 +++ linux-2.6.32.42/fs/namei.c 2011-05-16 21:46:57.000000000 -0400
40728 @@ -224,14 +224,6 @@ int generic_permission(struct inode *ino
40729 return ret;
40730
40731 /*
40732 - * Read/write DACs are always overridable.
40733 - * Executable DACs are overridable if at least one exec bit is set.
40734 - */
40735 - if (!(mask & MAY_EXEC) || execute_ok(inode))
40736 - if (capable(CAP_DAC_OVERRIDE))
40737 - return 0;
40738 -
40739 - /*
40740 * Searching includes executable on directories, else just read.
40741 */
40742 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
40743 @@ -239,6 +231,14 @@ int generic_permission(struct inode *ino
40744 if (capable(CAP_DAC_READ_SEARCH))
40745 return 0;
40746
40747 + /*
40748 + * Read/write DACs are always overridable.
40749 + * Executable DACs are overridable if at least one exec bit is set.
40750 + */
40751 + if (!(mask & MAY_EXEC) || execute_ok(inode))
40752 + if (capable(CAP_DAC_OVERRIDE))
40753 + return 0;
40754 +
40755 return -EACCES;
40756 }
40757
40758 @@ -458,7 +458,8 @@ static int exec_permission_lite(struct i
40759 if (!ret)
40760 goto ok;
40761
40762 - if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
40763 + if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
40764 + capable(CAP_DAC_OVERRIDE))
40765 goto ok;
40766
40767 return ret;
40768 @@ -638,7 +639,7 @@ static __always_inline int __do_follow_l
40769 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
40770 error = PTR_ERR(cookie);
40771 if (!IS_ERR(cookie)) {
40772 - char *s = nd_get_link(nd);
40773 + const char *s = nd_get_link(nd);
40774 error = 0;
40775 if (s)
40776 error = __vfs_follow_link(nd, s);
40777 @@ -669,6 +670,13 @@ static inline int do_follow_link(struct
40778 err = security_inode_follow_link(path->dentry, nd);
40779 if (err)
40780 goto loop;
40781 +
40782 + if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
40783 + path->dentry->d_inode, path->dentry, nd->path.mnt)) {
40784 + err = -EACCES;
40785 + goto loop;
40786 + }
40787 +
40788 current->link_count++;
40789 current->total_link_count++;
40790 nd->depth++;
40791 @@ -1016,11 +1024,18 @@ return_reval:
40792 break;
40793 }
40794 return_base:
40795 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
40796 + path_put(&nd->path);
40797 + return -ENOENT;
40798 + }
40799 return 0;
40800 out_dput:
40801 path_put_conditional(&next, nd);
40802 break;
40803 }
40804 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
40805 + err = -ENOENT;
40806 +
40807 path_put(&nd->path);
40808 return_err:
40809 return err;
40810 @@ -1091,13 +1106,20 @@ static int do_path_lookup(int dfd, const
40811 int retval = path_init(dfd, name, flags, nd);
40812 if (!retval)
40813 retval = path_walk(name, nd);
40814 - if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
40815 - nd->path.dentry->d_inode))
40816 - audit_inode(name, nd->path.dentry);
40817 +
40818 + if (likely(!retval)) {
40819 + if (nd->path.dentry && nd->path.dentry->d_inode) {
40820 + if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
40821 + retval = -ENOENT;
40822 + if (!audit_dummy_context())
40823 + audit_inode(name, nd->path.dentry);
40824 + }
40825 + }
40826 if (nd->root.mnt) {
40827 path_put(&nd->root);
40828 nd->root.mnt = NULL;
40829 }
40830 +
40831 return retval;
40832 }
40833
40834 @@ -1576,6 +1598,20 @@ int may_open(struct path *path, int acc_
40835 if (error)
40836 goto err_out;
40837
40838 +
40839 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
40840 + error = -EPERM;
40841 + goto err_out;
40842 + }
40843 + if (gr_handle_rawio(inode)) {
40844 + error = -EPERM;
40845 + goto err_out;
40846 + }
40847 + if (!gr_acl_handle_open(dentry, path->mnt, flag)) {
40848 + error = -EACCES;
40849 + goto err_out;
40850 + }
40851 +
40852 if (flag & O_TRUNC) {
40853 error = get_write_access(inode);
40854 if (error)
40855 @@ -1621,12 +1657,19 @@ static int __open_namei_create(struct na
40856 int error;
40857 struct dentry *dir = nd->path.dentry;
40858
40859 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, flag, mode)) {
40860 + error = -EACCES;
40861 + goto out_unlock;
40862 + }
40863 +
40864 if (!IS_POSIXACL(dir->d_inode))
40865 mode &= ~current_umask();
40866 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
40867 if (error)
40868 goto out_unlock;
40869 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
40870 + if (!error)
40871 + gr_handle_create(path->dentry, nd->path.mnt);
40872 out_unlock:
40873 mutex_unlock(&dir->d_inode->i_mutex);
40874 dput(nd->path.dentry);
40875 @@ -1709,6 +1752,22 @@ struct file *do_filp_open(int dfd, const
40876 &nd, flag);
40877 if (error)
40878 return ERR_PTR(error);
40879 +
40880 + if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
40881 + error = -EPERM;
40882 + goto exit;
40883 + }
40884 +
40885 + if (gr_handle_rawio(nd.path.dentry->d_inode)) {
40886 + error = -EPERM;
40887 + goto exit;
40888 + }
40889 +
40890 + if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, flag)) {
40891 + error = -EACCES;
40892 + goto exit;
40893 + }
40894 +
40895 goto ok;
40896 }
40897
40898 @@ -1795,6 +1854,14 @@ do_last:
40899 /*
40900 * It already exists.
40901 */
40902 +
40903 + /* only check if O_CREAT is specified, all other checks need
40904 + to go into may_open */
40905 + if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
40906 + error = -EACCES;
40907 + goto exit_mutex_unlock;
40908 + }
40909 +
40910 mutex_unlock(&dir->d_inode->i_mutex);
40911 audit_inode(pathname, path.dentry);
40912
40913 @@ -1887,6 +1954,13 @@ do_link:
40914 error = security_inode_follow_link(path.dentry, &nd);
40915 if (error)
40916 goto exit_dput;
40917 +
40918 + if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
40919 + path.dentry, nd.path.mnt)) {
40920 + error = -EACCES;
40921 + goto exit_dput;
40922 + }
40923 +
40924 error = __do_follow_link(&path, &nd);
40925 if (error) {
40926 /* Does someone understand code flow here? Or it is only
40927 @@ -2061,6 +2135,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
40928 error = may_mknod(mode);
40929 if (error)
40930 goto out_dput;
40931 +
40932 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
40933 + error = -EPERM;
40934 + goto out_dput;
40935 + }
40936 +
40937 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
40938 + error = -EACCES;
40939 + goto out_dput;
40940 + }
40941 +
40942 error = mnt_want_write(nd.path.mnt);
40943 if (error)
40944 goto out_dput;
40945 @@ -2081,6 +2166,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
40946 }
40947 out_drop_write:
40948 mnt_drop_write(nd.path.mnt);
40949 +
40950 + if (!error)
40951 + gr_handle_create(dentry, nd.path.mnt);
40952 out_dput:
40953 dput(dentry);
40954 out_unlock:
40955 @@ -2134,6 +2222,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
40956 if (IS_ERR(dentry))
40957 goto out_unlock;
40958
40959 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
40960 + error = -EACCES;
40961 + goto out_dput;
40962 + }
40963 +
40964 if (!IS_POSIXACL(nd.path.dentry->d_inode))
40965 mode &= ~current_umask();
40966 error = mnt_want_write(nd.path.mnt);
40967 @@ -2145,6 +2238,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
40968 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
40969 out_drop_write:
40970 mnt_drop_write(nd.path.mnt);
40971 +
40972 + if (!error)
40973 + gr_handle_create(dentry, nd.path.mnt);
40974 +
40975 out_dput:
40976 dput(dentry);
40977 out_unlock:
40978 @@ -2226,6 +2323,8 @@ static long do_rmdir(int dfd, const char
40979 char * name;
40980 struct dentry *dentry;
40981 struct nameidata nd;
40982 + ino_t saved_ino = 0;
40983 + dev_t saved_dev = 0;
40984
40985 error = user_path_parent(dfd, pathname, &nd, &name);
40986 if (error)
40987 @@ -2250,6 +2349,19 @@ static long do_rmdir(int dfd, const char
40988 error = PTR_ERR(dentry);
40989 if (IS_ERR(dentry))
40990 goto exit2;
40991 +
40992 + if (dentry->d_inode != NULL) {
40993 + if (dentry->d_inode->i_nlink <= 1) {
40994 + saved_ino = dentry->d_inode->i_ino;
40995 + saved_dev = gr_get_dev_from_dentry(dentry);
40996 + }
40997 +
40998 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
40999 + error = -EACCES;
41000 + goto exit3;
41001 + }
41002 + }
41003 +
41004 error = mnt_want_write(nd.path.mnt);
41005 if (error)
41006 goto exit3;
41007 @@ -2257,6 +2369,8 @@ static long do_rmdir(int dfd, const char
41008 if (error)
41009 goto exit4;
41010 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
41011 + if (!error && (saved_dev || saved_ino))
41012 + gr_handle_delete(saved_ino, saved_dev);
41013 exit4:
41014 mnt_drop_write(nd.path.mnt);
41015 exit3:
41016 @@ -2318,6 +2432,8 @@ static long do_unlinkat(int dfd, const c
41017 struct dentry *dentry;
41018 struct nameidata nd;
41019 struct inode *inode = NULL;
41020 + ino_t saved_ino = 0;
41021 + dev_t saved_dev = 0;
41022
41023 error = user_path_parent(dfd, pathname, &nd, &name);
41024 if (error)
41025 @@ -2337,8 +2453,19 @@ static long do_unlinkat(int dfd, const c
41026 if (nd.last.name[nd.last.len])
41027 goto slashes;
41028 inode = dentry->d_inode;
41029 - if (inode)
41030 + if (inode) {
41031 + if (inode->i_nlink <= 1) {
41032 + saved_ino = inode->i_ino;
41033 + saved_dev = gr_get_dev_from_dentry(dentry);
41034 + }
41035 +
41036 atomic_inc(&inode->i_count);
41037 +
41038 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
41039 + error = -EACCES;
41040 + goto exit2;
41041 + }
41042 + }
41043 error = mnt_want_write(nd.path.mnt);
41044 if (error)
41045 goto exit2;
41046 @@ -2346,6 +2473,8 @@ static long do_unlinkat(int dfd, const c
41047 if (error)
41048 goto exit3;
41049 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
41050 + if (!error && (saved_ino || saved_dev))
41051 + gr_handle_delete(saved_ino, saved_dev);
41052 exit3:
41053 mnt_drop_write(nd.path.mnt);
41054 exit2:
41055 @@ -2424,6 +2553,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
41056 if (IS_ERR(dentry))
41057 goto out_unlock;
41058
41059 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
41060 + error = -EACCES;
41061 + goto out_dput;
41062 + }
41063 +
41064 error = mnt_want_write(nd.path.mnt);
41065 if (error)
41066 goto out_dput;
41067 @@ -2431,6 +2565,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
41068 if (error)
41069 goto out_drop_write;
41070 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
41071 + if (!error)
41072 + gr_handle_create(dentry, nd.path.mnt);
41073 out_drop_write:
41074 mnt_drop_write(nd.path.mnt);
41075 out_dput:
41076 @@ -2524,6 +2660,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
41077 error = PTR_ERR(new_dentry);
41078 if (IS_ERR(new_dentry))
41079 goto out_unlock;
41080 +
41081 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
41082 + old_path.dentry->d_inode,
41083 + old_path.dentry->d_inode->i_mode, to)) {
41084 + error = -EACCES;
41085 + goto out_dput;
41086 + }
41087 +
41088 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
41089 + old_path.dentry, old_path.mnt, to)) {
41090 + error = -EACCES;
41091 + goto out_dput;
41092 + }
41093 +
41094 error = mnt_want_write(nd.path.mnt);
41095 if (error)
41096 goto out_dput;
41097 @@ -2531,6 +2681,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
41098 if (error)
41099 goto out_drop_write;
41100 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
41101 + if (!error)
41102 + gr_handle_create(new_dentry, nd.path.mnt);
41103 out_drop_write:
41104 mnt_drop_write(nd.path.mnt);
41105 out_dput:
41106 @@ -2708,6 +2860,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41107 char *to;
41108 int error;
41109
41110 + pax_track_stack();
41111 +
41112 error = user_path_parent(olddfd, oldname, &oldnd, &from);
41113 if (error)
41114 goto exit;
41115 @@ -2764,6 +2918,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41116 if (new_dentry == trap)
41117 goto exit5;
41118
41119 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
41120 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
41121 + to);
41122 + if (error)
41123 + goto exit5;
41124 +
41125 error = mnt_want_write(oldnd.path.mnt);
41126 if (error)
41127 goto exit5;
41128 @@ -2773,6 +2933,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41129 goto exit6;
41130 error = vfs_rename(old_dir->d_inode, old_dentry,
41131 new_dir->d_inode, new_dentry);
41132 + if (!error)
41133 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
41134 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
41135 exit6:
41136 mnt_drop_write(oldnd.path.mnt);
41137 exit5:
41138 @@ -2798,6 +2961,8 @@ SYSCALL_DEFINE2(rename, const char __use
41139
41140 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
41141 {
41142 + char tmpbuf[64];
41143 + const char *newlink;
41144 int len;
41145
41146 len = PTR_ERR(link);
41147 @@ -2807,7 +2972,14 @@ int vfs_readlink(struct dentry *dentry,
41148 len = strlen(link);
41149 if (len > (unsigned) buflen)
41150 len = buflen;
41151 - if (copy_to_user(buffer, link, len))
41152 +
41153 + if (len < sizeof(tmpbuf)) {
41154 + memcpy(tmpbuf, link, len);
41155 + newlink = tmpbuf;
41156 + } else
41157 + newlink = link;
41158 +
41159 + if (copy_to_user(buffer, newlink, len))
41160 len = -EFAULT;
41161 out:
41162 return len;
41163 diff -urNp linux-2.6.32.42/fs/namespace.c linux-2.6.32.42/fs/namespace.c
41164 --- linux-2.6.32.42/fs/namespace.c 2011-03-27 14:31:47.000000000 -0400
41165 +++ linux-2.6.32.42/fs/namespace.c 2011-04-17 15:56:46.000000000 -0400
41166 @@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mn
41167 if (!(sb->s_flags & MS_RDONLY))
41168 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
41169 up_write(&sb->s_umount);
41170 +
41171 + gr_log_remount(mnt->mnt_devname, retval);
41172 +
41173 return retval;
41174 }
41175
41176 @@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mn
41177 security_sb_umount_busy(mnt);
41178 up_write(&namespace_sem);
41179 release_mounts(&umount_list);
41180 +
41181 + gr_log_unmount(mnt->mnt_devname, retval);
41182 +
41183 return retval;
41184 }
41185
41186 @@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_
41187 if (retval)
41188 goto dput_out;
41189
41190 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
41191 + retval = -EPERM;
41192 + goto dput_out;
41193 + }
41194 +
41195 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
41196 + retval = -EPERM;
41197 + goto dput_out;
41198 + }
41199 +
41200 if (flags & MS_REMOUNT)
41201 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
41202 data_page);
41203 @@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_
41204 dev_name, data_page);
41205 dput_out:
41206 path_put(&path);
41207 +
41208 + gr_log_mount(dev_name, dir_name, retval);
41209 +
41210 return retval;
41211 }
41212
41213 @@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
41214 goto out1;
41215 }
41216
41217 + if (gr_handle_chroot_pivot()) {
41218 + error = -EPERM;
41219 + path_put(&old);
41220 + goto out1;
41221 + }
41222 +
41223 read_lock(&current->fs->lock);
41224 root = current->fs->root;
41225 path_get(&current->fs->root);
41226 diff -urNp linux-2.6.32.42/fs/ncpfs/dir.c linux-2.6.32.42/fs/ncpfs/dir.c
41227 --- linux-2.6.32.42/fs/ncpfs/dir.c 2011-03-27 14:31:47.000000000 -0400
41228 +++ linux-2.6.32.42/fs/ncpfs/dir.c 2011-05-16 21:46:57.000000000 -0400
41229 @@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *den
41230 int res, val = 0, len;
41231 __u8 __name[NCP_MAXPATHLEN + 1];
41232
41233 + pax_track_stack();
41234 +
41235 parent = dget_parent(dentry);
41236 dir = parent->d_inode;
41237
41238 @@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct
41239 int error, res, len;
41240 __u8 __name[NCP_MAXPATHLEN + 1];
41241
41242 + pax_track_stack();
41243 +
41244 lock_kernel();
41245 error = -EIO;
41246 if (!ncp_conn_valid(server))
41247 @@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, st
41248 int error, result, len;
41249 int opmode;
41250 __u8 __name[NCP_MAXPATHLEN + 1];
41251 -
41252 +
41253 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
41254 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
41255
41256 + pax_track_stack();
41257 +
41258 error = -EIO;
41259 lock_kernel();
41260 if (!ncp_conn_valid(server))
41261 @@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir,
41262 int error, len;
41263 __u8 __name[NCP_MAXPATHLEN + 1];
41264
41265 + pax_track_stack();
41266 +
41267 DPRINTK("ncp_mkdir: making %s/%s\n",
41268 dentry->d_parent->d_name.name, dentry->d_name.name);
41269
41270 @@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir,
41271 if (!ncp_conn_valid(server))
41272 goto out;
41273
41274 + pax_track_stack();
41275 +
41276 ncp_age_dentry(server, dentry);
41277 len = sizeof(__name);
41278 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
41279 @@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_
41280 int old_len, new_len;
41281 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
41282
41283 + pax_track_stack();
41284 +
41285 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
41286 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
41287 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
41288 diff -urNp linux-2.6.32.42/fs/ncpfs/inode.c linux-2.6.32.42/fs/ncpfs/inode.c
41289 --- linux-2.6.32.42/fs/ncpfs/inode.c 2011-03-27 14:31:47.000000000 -0400
41290 +++ linux-2.6.32.42/fs/ncpfs/inode.c 2011-05-16 21:46:57.000000000 -0400
41291 @@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_b
41292 #endif
41293 struct ncp_entry_info finfo;
41294
41295 + pax_track_stack();
41296 +
41297 data.wdog_pid = NULL;
41298 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
41299 if (!server)
41300 diff -urNp linux-2.6.32.42/fs/nfs/inode.c linux-2.6.32.42/fs/nfs/inode.c
41301 --- linux-2.6.32.42/fs/nfs/inode.c 2011-05-10 22:12:01.000000000 -0400
41302 +++ linux-2.6.32.42/fs/nfs/inode.c 2011-05-10 22:12:33.000000000 -0400
41303 @@ -973,16 +973,16 @@ static int nfs_size_need_update(const st
41304 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
41305 }
41306
41307 -static atomic_long_t nfs_attr_generation_counter;
41308 +static atomic_long_unchecked_t nfs_attr_generation_counter;
41309
41310 static unsigned long nfs_read_attr_generation_counter(void)
41311 {
41312 - return atomic_long_read(&nfs_attr_generation_counter);
41313 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
41314 }
41315
41316 unsigned long nfs_inc_attr_generation_counter(void)
41317 {
41318 - return atomic_long_inc_return(&nfs_attr_generation_counter);
41319 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
41320 }
41321
41322 void nfs_fattr_init(struct nfs_fattr *fattr)
41323 diff -urNp linux-2.6.32.42/fs/nfsd/lockd.c linux-2.6.32.42/fs/nfsd/lockd.c
41324 --- linux-2.6.32.42/fs/nfsd/lockd.c 2011-04-17 17:00:52.000000000 -0400
41325 +++ linux-2.6.32.42/fs/nfsd/lockd.c 2011-04-17 17:03:15.000000000 -0400
41326 @@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
41327 fput(filp);
41328 }
41329
41330 -static struct nlmsvc_binding nfsd_nlm_ops = {
41331 +static const struct nlmsvc_binding nfsd_nlm_ops = {
41332 .fopen = nlm_fopen, /* open file for locking */
41333 .fclose = nlm_fclose, /* close file */
41334 };
41335 diff -urNp linux-2.6.32.42/fs/nfsd/nfs4state.c linux-2.6.32.42/fs/nfsd/nfs4state.c
41336 --- linux-2.6.32.42/fs/nfsd/nfs4state.c 2011-03-27 14:31:47.000000000 -0400
41337 +++ linux-2.6.32.42/fs/nfsd/nfs4state.c 2011-05-16 21:46:57.000000000 -0400
41338 @@ -3457,6 +3457,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
41339 unsigned int cmd;
41340 int err;
41341
41342 + pax_track_stack();
41343 +
41344 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
41345 (long long) lock->lk_offset,
41346 (long long) lock->lk_length);
41347 diff -urNp linux-2.6.32.42/fs/nfsd/nfs4xdr.c linux-2.6.32.42/fs/nfsd/nfs4xdr.c
41348 --- linux-2.6.32.42/fs/nfsd/nfs4xdr.c 2011-03-27 14:31:47.000000000 -0400
41349 +++ linux-2.6.32.42/fs/nfsd/nfs4xdr.c 2011-05-16 21:46:57.000000000 -0400
41350 @@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
41351 struct nfsd4_compoundres *resp = rqstp->rq_resp;
41352 u32 minorversion = resp->cstate.minorversion;
41353
41354 + pax_track_stack();
41355 +
41356 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
41357 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
41358 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
41359 diff -urNp linux-2.6.32.42/fs/nfsd/vfs.c linux-2.6.32.42/fs/nfsd/vfs.c
41360 --- linux-2.6.32.42/fs/nfsd/vfs.c 2011-05-10 22:12:01.000000000 -0400
41361 +++ linux-2.6.32.42/fs/nfsd/vfs.c 2011-05-10 22:12:33.000000000 -0400
41362 @@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
41363 } else {
41364 oldfs = get_fs();
41365 set_fs(KERNEL_DS);
41366 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
41367 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
41368 set_fs(oldfs);
41369 }
41370
41371 @@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
41372
41373 /* Write the data. */
41374 oldfs = get_fs(); set_fs(KERNEL_DS);
41375 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
41376 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
41377 set_fs(oldfs);
41378 if (host_err < 0)
41379 goto out_nfserr;
41380 @@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
41381 */
41382
41383 oldfs = get_fs(); set_fs(KERNEL_DS);
41384 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
41385 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
41386 set_fs(oldfs);
41387
41388 if (host_err < 0)
41389 diff -urNp linux-2.6.32.42/fs/nilfs2/ioctl.c linux-2.6.32.42/fs/nilfs2/ioctl.c
41390 --- linux-2.6.32.42/fs/nilfs2/ioctl.c 2011-03-27 14:31:47.000000000 -0400
41391 +++ linux-2.6.32.42/fs/nilfs2/ioctl.c 2011-05-04 17:56:28.000000000 -0400
41392 @@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(st
41393 unsigned int cmd, void __user *argp)
41394 {
41395 struct nilfs_argv argv[5];
41396 - const static size_t argsz[5] = {
41397 + static const size_t argsz[5] = {
41398 sizeof(struct nilfs_vdesc),
41399 sizeof(struct nilfs_period),
41400 sizeof(__u64),
41401 diff -urNp linux-2.6.32.42/fs/notify/dnotify/dnotify.c linux-2.6.32.42/fs/notify/dnotify/dnotify.c
41402 --- linux-2.6.32.42/fs/notify/dnotify/dnotify.c 2011-03-27 14:31:47.000000000 -0400
41403 +++ linux-2.6.32.42/fs/notify/dnotify/dnotify.c 2011-04-17 15:56:46.000000000 -0400
41404 @@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsn
41405 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
41406 }
41407
41408 -static struct fsnotify_ops dnotify_fsnotify_ops = {
41409 +static const struct fsnotify_ops dnotify_fsnotify_ops = {
41410 .handle_event = dnotify_handle_event,
41411 .should_send_event = dnotify_should_send_event,
41412 .free_group_priv = NULL,
41413 diff -urNp linux-2.6.32.42/fs/notify/notification.c linux-2.6.32.42/fs/notify/notification.c
41414 --- linux-2.6.32.42/fs/notify/notification.c 2011-03-27 14:31:47.000000000 -0400
41415 +++ linux-2.6.32.42/fs/notify/notification.c 2011-05-04 17:56:28.000000000 -0400
41416 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
41417 * get set to 0 so it will never get 'freed'
41418 */
41419 static struct fsnotify_event q_overflow_event;
41420 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41421 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41422
41423 /**
41424 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
41425 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
41426 */
41427 u32 fsnotify_get_cookie(void)
41428 {
41429 - return atomic_inc_return(&fsnotify_sync_cookie);
41430 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
41431 }
41432 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
41433
41434 diff -urNp linux-2.6.32.42/fs/ntfs/dir.c linux-2.6.32.42/fs/ntfs/dir.c
41435 --- linux-2.6.32.42/fs/ntfs/dir.c 2011-03-27 14:31:47.000000000 -0400
41436 +++ linux-2.6.32.42/fs/ntfs/dir.c 2011-04-17 15:56:46.000000000 -0400
41437 @@ -1328,7 +1328,7 @@ find_next_index_buffer:
41438 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
41439 ~(s64)(ndir->itype.index.block_size - 1)));
41440 /* Bounds checks. */
41441 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41442 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41443 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
41444 "inode 0x%lx or driver bug.", vdir->i_ino);
41445 goto err_out;
41446 diff -urNp linux-2.6.32.42/fs/ntfs/file.c linux-2.6.32.42/fs/ntfs/file.c
41447 --- linux-2.6.32.42/fs/ntfs/file.c 2011-03-27 14:31:47.000000000 -0400
41448 +++ linux-2.6.32.42/fs/ntfs/file.c 2011-04-17 15:56:46.000000000 -0400
41449 @@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_
41450 #endif /* NTFS_RW */
41451 };
41452
41453 -const struct file_operations ntfs_empty_file_ops = {};
41454 +const struct file_operations ntfs_empty_file_ops __read_only;
41455
41456 -const struct inode_operations ntfs_empty_inode_ops = {};
41457 +const struct inode_operations ntfs_empty_inode_ops __read_only;
41458 diff -urNp linux-2.6.32.42/fs/ocfs2/cluster/masklog.c linux-2.6.32.42/fs/ocfs2/cluster/masklog.c
41459 --- linux-2.6.32.42/fs/ocfs2/cluster/masklog.c 2011-03-27 14:31:47.000000000 -0400
41460 +++ linux-2.6.32.42/fs/ocfs2/cluster/masklog.c 2011-04-17 15:56:46.000000000 -0400
41461 @@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject
41462 return mlog_mask_store(mlog_attr->mask, buf, count);
41463 }
41464
41465 -static struct sysfs_ops mlog_attr_ops = {
41466 +static const struct sysfs_ops mlog_attr_ops = {
41467 .show = mlog_show,
41468 .store = mlog_store,
41469 };
41470 diff -urNp linux-2.6.32.42/fs/ocfs2/localalloc.c linux-2.6.32.42/fs/ocfs2/localalloc.c
41471 --- linux-2.6.32.42/fs/ocfs2/localalloc.c 2011-03-27 14:31:47.000000000 -0400
41472 +++ linux-2.6.32.42/fs/ocfs2/localalloc.c 2011-04-17 15:56:46.000000000 -0400
41473 @@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_windo
41474 goto bail;
41475 }
41476
41477 - atomic_inc(&osb->alloc_stats.moves);
41478 + atomic_inc_unchecked(&osb->alloc_stats.moves);
41479
41480 status = 0;
41481 bail:
41482 diff -urNp linux-2.6.32.42/fs/ocfs2/namei.c linux-2.6.32.42/fs/ocfs2/namei.c
41483 --- linux-2.6.32.42/fs/ocfs2/namei.c 2011-03-27 14:31:47.000000000 -0400
41484 +++ linux-2.6.32.42/fs/ocfs2/namei.c 2011-05-16 21:46:57.000000000 -0400
41485 @@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *ol
41486 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
41487 struct ocfs2_dir_lookup_result target_insert = { NULL, };
41488
41489 + pax_track_stack();
41490 +
41491 /* At some point it might be nice to break this function up a
41492 * bit. */
41493
41494 diff -urNp linux-2.6.32.42/fs/ocfs2/ocfs2.h linux-2.6.32.42/fs/ocfs2/ocfs2.h
41495 --- linux-2.6.32.42/fs/ocfs2/ocfs2.h 2011-03-27 14:31:47.000000000 -0400
41496 +++ linux-2.6.32.42/fs/ocfs2/ocfs2.h 2011-04-17 15:56:46.000000000 -0400
41497 @@ -217,11 +217,11 @@ enum ocfs2_vol_state
41498
41499 struct ocfs2_alloc_stats
41500 {
41501 - atomic_t moves;
41502 - atomic_t local_data;
41503 - atomic_t bitmap_data;
41504 - atomic_t bg_allocs;
41505 - atomic_t bg_extends;
41506 + atomic_unchecked_t moves;
41507 + atomic_unchecked_t local_data;
41508 + atomic_unchecked_t bitmap_data;
41509 + atomic_unchecked_t bg_allocs;
41510 + atomic_unchecked_t bg_extends;
41511 };
41512
41513 enum ocfs2_local_alloc_state
41514 diff -urNp linux-2.6.32.42/fs/ocfs2/suballoc.c linux-2.6.32.42/fs/ocfs2/suballoc.c
41515 --- linux-2.6.32.42/fs/ocfs2/suballoc.c 2011-03-27 14:31:47.000000000 -0400
41516 +++ linux-2.6.32.42/fs/ocfs2/suballoc.c 2011-04-17 15:56:46.000000000 -0400
41517 @@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(s
41518 mlog_errno(status);
41519 goto bail;
41520 }
41521 - atomic_inc(&osb->alloc_stats.bg_extends);
41522 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
41523
41524 /* You should never ask for this much metadata */
41525 BUG_ON(bits_wanted >
41526 @@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
41527 mlog_errno(status);
41528 goto bail;
41529 }
41530 - atomic_inc(&osb->alloc_stats.bg_allocs);
41531 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
41532
41533 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
41534 ac->ac_bits_given += (*num_bits);
41535 @@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
41536 mlog_errno(status);
41537 goto bail;
41538 }
41539 - atomic_inc(&osb->alloc_stats.bg_allocs);
41540 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
41541
41542 BUG_ON(num_bits != 1);
41543
41544 @@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
41545 cluster_start,
41546 num_clusters);
41547 if (!status)
41548 - atomic_inc(&osb->alloc_stats.local_data);
41549 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
41550 } else {
41551 if (min_clusters > (osb->bitmap_cpg - 1)) {
41552 /* The only paths asking for contiguousness
41553 @@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
41554 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
41555 bg_blkno,
41556 bg_bit_off);
41557 - atomic_inc(&osb->alloc_stats.bitmap_data);
41558 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
41559 }
41560 }
41561 if (status < 0) {
41562 diff -urNp linux-2.6.32.42/fs/ocfs2/super.c linux-2.6.32.42/fs/ocfs2/super.c
41563 --- linux-2.6.32.42/fs/ocfs2/super.c 2011-03-27 14:31:47.000000000 -0400
41564 +++ linux-2.6.32.42/fs/ocfs2/super.c 2011-04-17 15:56:46.000000000 -0400
41565 @@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
41566 "%10s => GlobalAllocs: %d LocalAllocs: %d "
41567 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
41568 "Stats",
41569 - atomic_read(&osb->alloc_stats.bitmap_data),
41570 - atomic_read(&osb->alloc_stats.local_data),
41571 - atomic_read(&osb->alloc_stats.bg_allocs),
41572 - atomic_read(&osb->alloc_stats.moves),
41573 - atomic_read(&osb->alloc_stats.bg_extends));
41574 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
41575 + atomic_read_unchecked(&osb->alloc_stats.local_data),
41576 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
41577 + atomic_read_unchecked(&osb->alloc_stats.moves),
41578 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
41579
41580 out += snprintf(buf + out, len - out,
41581 "%10s => State: %u Descriptor: %llu Size: %u bits "
41582 @@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct
41583 spin_lock_init(&osb->osb_xattr_lock);
41584 ocfs2_init_inode_steal_slot(osb);
41585
41586 - atomic_set(&osb->alloc_stats.moves, 0);
41587 - atomic_set(&osb->alloc_stats.local_data, 0);
41588 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
41589 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
41590 - atomic_set(&osb->alloc_stats.bg_extends, 0);
41591 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
41592 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
41593 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
41594 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
41595 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
41596
41597 /* Copy the blockcheck stats from the superblock probe */
41598 osb->osb_ecc_stats = *stats;
41599 diff -urNp linux-2.6.32.42/fs/open.c linux-2.6.32.42/fs/open.c
41600 --- linux-2.6.32.42/fs/open.c 2011-03-27 14:31:47.000000000 -0400
41601 +++ linux-2.6.32.42/fs/open.c 2011-04-17 15:56:46.000000000 -0400
41602 @@ -275,6 +275,10 @@ static long do_sys_truncate(const char _
41603 error = locks_verify_truncate(inode, NULL, length);
41604 if (!error)
41605 error = security_path_truncate(&path, length, 0);
41606 +
41607 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
41608 + error = -EACCES;
41609 +
41610 if (!error) {
41611 vfs_dq_init(inode);
41612 error = do_truncate(path.dentry, length, 0, NULL);
41613 @@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
41614 if (__mnt_is_readonly(path.mnt))
41615 res = -EROFS;
41616
41617 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
41618 + res = -EACCES;
41619 +
41620 out_path_release:
41621 path_put(&path);
41622 out:
41623 @@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user
41624 if (error)
41625 goto dput_and_out;
41626
41627 + gr_log_chdir(path.dentry, path.mnt);
41628 +
41629 set_fs_pwd(current->fs, &path);
41630
41631 dput_and_out:
41632 @@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
41633 goto out_putf;
41634
41635 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
41636 +
41637 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
41638 + error = -EPERM;
41639 +
41640 + if (!error)
41641 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
41642 +
41643 if (!error)
41644 set_fs_pwd(current->fs, &file->f_path);
41645 out_putf:
41646 @@ -588,7 +604,18 @@ SYSCALL_DEFINE1(chroot, const char __use
41647 if (!capable(CAP_SYS_CHROOT))
41648 goto dput_and_out;
41649
41650 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
41651 + goto dput_and_out;
41652 +
41653 + if (gr_handle_chroot_caps(&path)) {
41654 + error = -ENOMEM;
41655 + goto dput_and_out;
41656 + }
41657 +
41658 set_fs_root(current->fs, &path);
41659 +
41660 + gr_handle_chroot_chdir(&path);
41661 +
41662 error = 0;
41663 dput_and_out:
41664 path_put(&path);
41665 @@ -616,12 +643,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
41666 err = mnt_want_write_file(file);
41667 if (err)
41668 goto out_putf;
41669 +
41670 mutex_lock(&inode->i_mutex);
41671 +
41672 + if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
41673 + err = -EACCES;
41674 + goto out_unlock;
41675 + }
41676 +
41677 if (mode == (mode_t) -1)
41678 mode = inode->i_mode;
41679 +
41680 + if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
41681 + err = -EPERM;
41682 + goto out_unlock;
41683 + }
41684 +
41685 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
41686 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
41687 err = notify_change(dentry, &newattrs);
41688 +
41689 +out_unlock:
41690 mutex_unlock(&inode->i_mutex);
41691 mnt_drop_write(file->f_path.mnt);
41692 out_putf:
41693 @@ -645,12 +687,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
41694 error = mnt_want_write(path.mnt);
41695 if (error)
41696 goto dput_and_out;
41697 +
41698 mutex_lock(&inode->i_mutex);
41699 +
41700 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
41701 + error = -EACCES;
41702 + goto out_unlock;
41703 + }
41704 +
41705 if (mode == (mode_t) -1)
41706 mode = inode->i_mode;
41707 +
41708 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
41709 + error = -EACCES;
41710 + goto out_unlock;
41711 + }
41712 +
41713 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
41714 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
41715 error = notify_change(path.dentry, &newattrs);
41716 +
41717 +out_unlock:
41718 mutex_unlock(&inode->i_mutex);
41719 mnt_drop_write(path.mnt);
41720 dput_and_out:
41721 @@ -664,12 +721,15 @@ SYSCALL_DEFINE2(chmod, const char __user
41722 return sys_fchmodat(AT_FDCWD, filename, mode);
41723 }
41724
41725 -static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
41726 +static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
41727 {
41728 struct inode *inode = dentry->d_inode;
41729 int error;
41730 struct iattr newattrs;
41731
41732 + if (!gr_acl_handle_chown(dentry, mnt))
41733 + return -EACCES;
41734 +
41735 newattrs.ia_valid = ATTR_CTIME;
41736 if (user != (uid_t) -1) {
41737 newattrs.ia_valid |= ATTR_UID;
41738 @@ -700,7 +760,7 @@ SYSCALL_DEFINE3(chown, const char __user
41739 error = mnt_want_write(path.mnt);
41740 if (error)
41741 goto out_release;
41742 - error = chown_common(path.dentry, user, group);
41743 + error = chown_common(path.dentry, user, group, path.mnt);
41744 mnt_drop_write(path.mnt);
41745 out_release:
41746 path_put(&path);
41747 @@ -725,7 +785,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
41748 error = mnt_want_write(path.mnt);
41749 if (error)
41750 goto out_release;
41751 - error = chown_common(path.dentry, user, group);
41752 + error = chown_common(path.dentry, user, group, path.mnt);
41753 mnt_drop_write(path.mnt);
41754 out_release:
41755 path_put(&path);
41756 @@ -744,7 +804,7 @@ SYSCALL_DEFINE3(lchown, const char __use
41757 error = mnt_want_write(path.mnt);
41758 if (error)
41759 goto out_release;
41760 - error = chown_common(path.dentry, user, group);
41761 + error = chown_common(path.dentry, user, group, path.mnt);
41762 mnt_drop_write(path.mnt);
41763 out_release:
41764 path_put(&path);
41765 @@ -767,7 +827,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
41766 goto out_fput;
41767 dentry = file->f_path.dentry;
41768 audit_inode(NULL, dentry);
41769 - error = chown_common(dentry, user, group);
41770 + error = chown_common(dentry, user, group, file->f_path.mnt);
41771 mnt_drop_write(file->f_path.mnt);
41772 out_fput:
41773 fput(file);
41774 @@ -1036,7 +1096,10 @@ long do_sys_open(int dfd, const char __u
41775 if (!IS_ERR(tmp)) {
41776 fd = get_unused_fd_flags(flags);
41777 if (fd >= 0) {
41778 - struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
41779 + struct file *f;
41780 + /* don't allow to be set by userland */
41781 + flags &= ~FMODE_GREXEC;
41782 + f = do_filp_open(dfd, tmp, flags, mode, 0);
41783 if (IS_ERR(f)) {
41784 put_unused_fd(fd);
41785 fd = PTR_ERR(f);
41786 diff -urNp linux-2.6.32.42/fs/partitions/ldm.c linux-2.6.32.42/fs/partitions/ldm.c
41787 --- linux-2.6.32.42/fs/partitions/ldm.c 2011-06-25 12:55:34.000000000 -0400
41788 +++ linux-2.6.32.42/fs/partitions/ldm.c 2011-06-25 12:56:37.000000000 -0400
41789 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
41790 ldm_error ("A VBLK claims to have %d parts.", num);
41791 return false;
41792 }
41793 +
41794 if (rec >= num) {
41795 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
41796 return false;
41797 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
41798 goto found;
41799 }
41800
41801 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
41802 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
41803 if (!f) {
41804 ldm_crit ("Out of memory.");
41805 return false;
41806 diff -urNp linux-2.6.32.42/fs/partitions/mac.c linux-2.6.32.42/fs/partitions/mac.c
41807 --- linux-2.6.32.42/fs/partitions/mac.c 2011-03-27 14:31:47.000000000 -0400
41808 +++ linux-2.6.32.42/fs/partitions/mac.c 2011-04-17 15:56:46.000000000 -0400
41809 @@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitio
41810 return 0; /* not a MacOS disk */
41811 }
41812 blocks_in_map = be32_to_cpu(part->map_count);
41813 + printk(" [mac]");
41814 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
41815 put_dev_sector(sect);
41816 return 0;
41817 }
41818 - printk(" [mac]");
41819 for (slot = 1; slot <= blocks_in_map; ++slot) {
41820 int pos = slot * secsize;
41821 put_dev_sector(sect);
41822 diff -urNp linux-2.6.32.42/fs/pipe.c linux-2.6.32.42/fs/pipe.c
41823 --- linux-2.6.32.42/fs/pipe.c 2011-03-27 14:31:47.000000000 -0400
41824 +++ linux-2.6.32.42/fs/pipe.c 2011-04-23 13:37:17.000000000 -0400
41825 @@ -401,9 +401,9 @@ redo:
41826 }
41827 if (bufs) /* More to do? */
41828 continue;
41829 - if (!pipe->writers)
41830 + if (!atomic_read(&pipe->writers))
41831 break;
41832 - if (!pipe->waiting_writers) {
41833 + if (!atomic_read(&pipe->waiting_writers)) {
41834 /* syscall merging: Usually we must not sleep
41835 * if O_NONBLOCK is set, or if we got some data.
41836 * But if a writer sleeps in kernel space, then
41837 @@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
41838 mutex_lock(&inode->i_mutex);
41839 pipe = inode->i_pipe;
41840
41841 - if (!pipe->readers) {
41842 + if (!atomic_read(&pipe->readers)) {
41843 send_sig(SIGPIPE, current, 0);
41844 ret = -EPIPE;
41845 goto out;
41846 @@ -511,7 +511,7 @@ redo1:
41847 for (;;) {
41848 int bufs;
41849
41850 - if (!pipe->readers) {
41851 + if (!atomic_read(&pipe->readers)) {
41852 send_sig(SIGPIPE, current, 0);
41853 if (!ret)
41854 ret = -EPIPE;
41855 @@ -597,9 +597,9 @@ redo2:
41856 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41857 do_wakeup = 0;
41858 }
41859 - pipe->waiting_writers++;
41860 + atomic_inc(&pipe->waiting_writers);
41861 pipe_wait(pipe);
41862 - pipe->waiting_writers--;
41863 + atomic_dec(&pipe->waiting_writers);
41864 }
41865 out:
41866 mutex_unlock(&inode->i_mutex);
41867 @@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
41868 mask = 0;
41869 if (filp->f_mode & FMODE_READ) {
41870 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
41871 - if (!pipe->writers && filp->f_version != pipe->w_counter)
41872 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
41873 mask |= POLLHUP;
41874 }
41875
41876 @@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
41877 * Most Unices do not set POLLERR for FIFOs but on Linux they
41878 * behave exactly like pipes for poll().
41879 */
41880 - if (!pipe->readers)
41881 + if (!atomic_read(&pipe->readers))
41882 mask |= POLLERR;
41883 }
41884
41885 @@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
41886
41887 mutex_lock(&inode->i_mutex);
41888 pipe = inode->i_pipe;
41889 - pipe->readers -= decr;
41890 - pipe->writers -= decw;
41891 + atomic_sub(decr, &pipe->readers);
41892 + atomic_sub(decw, &pipe->writers);
41893
41894 - if (!pipe->readers && !pipe->writers) {
41895 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
41896 free_pipe_info(inode);
41897 } else {
41898 wake_up_interruptible_sync(&pipe->wait);
41899 @@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
41900
41901 if (inode->i_pipe) {
41902 ret = 0;
41903 - inode->i_pipe->readers++;
41904 + atomic_inc(&inode->i_pipe->readers);
41905 }
41906
41907 mutex_unlock(&inode->i_mutex);
41908 @@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
41909
41910 if (inode->i_pipe) {
41911 ret = 0;
41912 - inode->i_pipe->writers++;
41913 + atomic_inc(&inode->i_pipe->writers);
41914 }
41915
41916 mutex_unlock(&inode->i_mutex);
41917 @@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
41918 if (inode->i_pipe) {
41919 ret = 0;
41920 if (filp->f_mode & FMODE_READ)
41921 - inode->i_pipe->readers++;
41922 + atomic_inc(&inode->i_pipe->readers);
41923 if (filp->f_mode & FMODE_WRITE)
41924 - inode->i_pipe->writers++;
41925 + atomic_inc(&inode->i_pipe->writers);
41926 }
41927
41928 mutex_unlock(&inode->i_mutex);
41929 @@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
41930 inode->i_pipe = NULL;
41931 }
41932
41933 -static struct vfsmount *pipe_mnt __read_mostly;
41934 +struct vfsmount *pipe_mnt __read_mostly;
41935 static int pipefs_delete_dentry(struct dentry *dentry)
41936 {
41937 /*
41938 @@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(voi
41939 goto fail_iput;
41940 inode->i_pipe = pipe;
41941
41942 - pipe->readers = pipe->writers = 1;
41943 + atomic_set(&pipe->readers, 1);
41944 + atomic_set(&pipe->writers, 1);
41945 inode->i_fop = &rdwr_pipefifo_fops;
41946
41947 /*
41948 diff -urNp linux-2.6.32.42/fs/proc/array.c linux-2.6.32.42/fs/proc/array.c
41949 --- linux-2.6.32.42/fs/proc/array.c 2011-03-27 14:31:47.000000000 -0400
41950 +++ linux-2.6.32.42/fs/proc/array.c 2011-05-16 21:46:57.000000000 -0400
41951 @@ -60,6 +60,7 @@
41952 #include <linux/tty.h>
41953 #include <linux/string.h>
41954 #include <linux/mman.h>
41955 +#include <linux/grsecurity.h>
41956 #include <linux/proc_fs.h>
41957 #include <linux/ioport.h>
41958 #include <linux/uaccess.h>
41959 @@ -321,6 +322,21 @@ static inline void task_context_switch_c
41960 p->nivcsw);
41961 }
41962
41963 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41964 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
41965 +{
41966 + if (p->mm)
41967 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
41968 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
41969 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
41970 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
41971 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
41972 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
41973 + else
41974 + seq_printf(m, "PaX:\t-----\n");
41975 +}
41976 +#endif
41977 +
41978 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
41979 struct pid *pid, struct task_struct *task)
41980 {
41981 @@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m,
41982 task_cap(m, task);
41983 cpuset_task_status_allowed(m, task);
41984 task_context_switch_counts(m, task);
41985 +
41986 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41987 + task_pax(m, task);
41988 +#endif
41989 +
41990 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
41991 + task_grsec_rbac(m, task);
41992 +#endif
41993 +
41994 return 0;
41995 }
41996
41997 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41998 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
41999 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
42000 + _mm->pax_flags & MF_PAX_SEGMEXEC))
42001 +#endif
42002 +
42003 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
42004 struct pid *pid, struct task_struct *task, int whole)
42005 {
42006 @@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file
42007 cputime_t cutime, cstime, utime, stime;
42008 cputime_t cgtime, gtime;
42009 unsigned long rsslim = 0;
42010 - char tcomm[sizeof(task->comm)];
42011 + char tcomm[sizeof(task->comm)] = { 0 };
42012 unsigned long flags;
42013
42014 + pax_track_stack();
42015 +
42016 state = *get_task_state(task);
42017 vsize = eip = esp = 0;
42018 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
42019 @@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file
42020 gtime = task_gtime(task);
42021 }
42022
42023 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42024 + if (PAX_RAND_FLAGS(mm)) {
42025 + eip = 0;
42026 + esp = 0;
42027 + wchan = 0;
42028 + }
42029 +#endif
42030 +#ifdef CONFIG_GRKERNSEC_HIDESYM
42031 + wchan = 0;
42032 + eip =0;
42033 + esp =0;
42034 +#endif
42035 +
42036 /* scale priority and nice values from timeslices to -20..20 */
42037 /* to make it look like a "normal" Unix priority/nice value */
42038 priority = task_prio(task);
42039 @@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file
42040 vsize,
42041 mm ? get_mm_rss(mm) : 0,
42042 rsslim,
42043 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42044 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
42045 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
42046 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
42047 +#else
42048 mm ? (permitted ? mm->start_code : 1) : 0,
42049 mm ? (permitted ? mm->end_code : 1) : 0,
42050 (permitted && mm) ? mm->start_stack : 0,
42051 +#endif
42052 esp,
42053 eip,
42054 /* The signal information here is obsolete.
42055 @@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, s
42056
42057 return 0;
42058 }
42059 +
42060 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42061 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
42062 +{
42063 + u32 curr_ip = 0;
42064 + unsigned long flags;
42065 +
42066 + if (lock_task_sighand(task, &flags)) {
42067 + curr_ip = task->signal->curr_ip;
42068 + unlock_task_sighand(task, &flags);
42069 + }
42070 +
42071 + return sprintf(buffer, "%pI4\n", &curr_ip);
42072 +}
42073 +#endif
42074 diff -urNp linux-2.6.32.42/fs/proc/base.c linux-2.6.32.42/fs/proc/base.c
42075 --- linux-2.6.32.42/fs/proc/base.c 2011-04-22 19:16:29.000000000 -0400
42076 +++ linux-2.6.32.42/fs/proc/base.c 2011-06-04 21:20:50.000000000 -0400
42077 @@ -102,6 +102,22 @@ struct pid_entry {
42078 union proc_op op;
42079 };
42080
42081 +struct getdents_callback {
42082 + struct linux_dirent __user * current_dir;
42083 + struct linux_dirent __user * previous;
42084 + struct file * file;
42085 + int count;
42086 + int error;
42087 +};
42088 +
42089 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
42090 + loff_t offset, u64 ino, unsigned int d_type)
42091 +{
42092 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
42093 + buf->error = -EINVAL;
42094 + return 0;
42095 +}
42096 +
42097 #define NOD(NAME, MODE, IOP, FOP, OP) { \
42098 .name = (NAME), \
42099 .len = sizeof(NAME) - 1, \
42100 @@ -213,6 +229,9 @@ static int check_mem_permission(struct t
42101 if (task == current)
42102 return 0;
42103
42104 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
42105 + return -EPERM;
42106 +
42107 /*
42108 * If current is actively ptrace'ing, and would also be
42109 * permitted to freshly attach with ptrace now, permit it.
42110 @@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_
42111 if (!mm->arg_end)
42112 goto out_mm; /* Shh! No looking before we're done */
42113
42114 + if (gr_acl_handle_procpidmem(task))
42115 + goto out_mm;
42116 +
42117 len = mm->arg_end - mm->arg_start;
42118
42119 if (len > PAGE_SIZE)
42120 @@ -287,12 +309,28 @@ out:
42121 return res;
42122 }
42123
42124 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42125 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42126 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
42127 + _mm->pax_flags & MF_PAX_SEGMEXEC))
42128 +#endif
42129 +
42130 static int proc_pid_auxv(struct task_struct *task, char *buffer)
42131 {
42132 int res = 0;
42133 struct mm_struct *mm = get_task_mm(task);
42134 if (mm) {
42135 unsigned int nwords = 0;
42136 +
42137 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42138 + /* allow if we're currently ptracing this task */
42139 + if (PAX_RAND_FLAGS(mm) &&
42140 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
42141 + mmput(mm);
42142 + return res;
42143 + }
42144 +#endif
42145 +
42146 do {
42147 nwords += 2;
42148 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
42149 @@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_str
42150 }
42151
42152
42153 -#ifdef CONFIG_KALLSYMS
42154 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42155 /*
42156 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
42157 * Returns the resolved symbol. If that fails, simply return the address.
42158 @@ -328,7 +366,7 @@ static int proc_pid_wchan(struct task_st
42159 }
42160 #endif /* CONFIG_KALLSYMS */
42161
42162 -#ifdef CONFIG_STACKTRACE
42163 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42164
42165 #define MAX_STACK_TRACE_DEPTH 64
42166
42167 @@ -522,7 +560,7 @@ static int proc_pid_limits(struct task_s
42168 return count;
42169 }
42170
42171 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42172 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42173 static int proc_pid_syscall(struct task_struct *task, char *buffer)
42174 {
42175 long nr;
42176 @@ -547,7 +585,7 @@ static int proc_pid_syscall(struct task_
42177 /************************************************************************/
42178
42179 /* permission checks */
42180 -static int proc_fd_access_allowed(struct inode *inode)
42181 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
42182 {
42183 struct task_struct *task;
42184 int allowed = 0;
42185 @@ -557,7 +595,10 @@ static int proc_fd_access_allowed(struct
42186 */
42187 task = get_proc_task(inode);
42188 if (task) {
42189 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
42190 + if (log)
42191 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
42192 + else
42193 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
42194 put_task_struct(task);
42195 }
42196 return allowed;
42197 @@ -936,6 +977,9 @@ static ssize_t environ_read(struct file
42198 if (!task)
42199 goto out_no_task;
42200
42201 + if (gr_acl_handle_procpidmem(task))
42202 + goto out;
42203 +
42204 if (!ptrace_may_access(task, PTRACE_MODE_READ))
42205 goto out;
42206
42207 @@ -1350,7 +1394,7 @@ static void *proc_pid_follow_link(struct
42208 path_put(&nd->path);
42209
42210 /* Are we allowed to snoop on the tasks file descriptors? */
42211 - if (!proc_fd_access_allowed(inode))
42212 + if (!proc_fd_access_allowed(inode,0))
42213 goto out;
42214
42215 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
42216 @@ -1390,8 +1434,18 @@ static int proc_pid_readlink(struct dent
42217 struct path path;
42218
42219 /* Are we allowed to snoop on the tasks file descriptors? */
42220 - if (!proc_fd_access_allowed(inode))
42221 - goto out;
42222 + /* logging this is needed for learning on chromium to work properly,
42223 + but we don't want to flood the logs from 'ps' which does a readlink
42224 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
42225 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
42226 + */
42227 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
42228 + if (!proc_fd_access_allowed(inode,0))
42229 + goto out;
42230 + } else {
42231 + if (!proc_fd_access_allowed(inode,1))
42232 + goto out;
42233 + }
42234
42235 error = PROC_I(inode)->op.proc_get_link(inode, &path);
42236 if (error)
42237 @@ -1456,7 +1510,11 @@ static struct inode *proc_pid_make_inode
42238 rcu_read_lock();
42239 cred = __task_cred(task);
42240 inode->i_uid = cred->euid;
42241 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42242 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42243 +#else
42244 inode->i_gid = cred->egid;
42245 +#endif
42246 rcu_read_unlock();
42247 }
42248 security_task_to_inode(task, inode);
42249 @@ -1474,6 +1532,9 @@ static int pid_getattr(struct vfsmount *
42250 struct inode *inode = dentry->d_inode;
42251 struct task_struct *task;
42252 const struct cred *cred;
42253 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42254 + const struct cred *tmpcred = current_cred();
42255 +#endif
42256
42257 generic_fillattr(inode, stat);
42258
42259 @@ -1481,13 +1542,41 @@ static int pid_getattr(struct vfsmount *
42260 stat->uid = 0;
42261 stat->gid = 0;
42262 task = pid_task(proc_pid(inode), PIDTYPE_PID);
42263 +
42264 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
42265 + rcu_read_unlock();
42266 + return -ENOENT;
42267 + }
42268 +
42269 if (task) {
42270 + cred = __task_cred(task);
42271 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42272 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
42273 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42274 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
42275 +#endif
42276 + ) {
42277 +#endif
42278 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
42279 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42280 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
42281 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42282 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
42283 +#endif
42284 task_dumpable(task)) {
42285 - cred = __task_cred(task);
42286 stat->uid = cred->euid;
42287 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42288 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
42289 +#else
42290 stat->gid = cred->egid;
42291 +#endif
42292 }
42293 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42294 + } else {
42295 + rcu_read_unlock();
42296 + return -ENOENT;
42297 + }
42298 +#endif
42299 }
42300 rcu_read_unlock();
42301 return 0;
42302 @@ -1518,11 +1607,20 @@ static int pid_revalidate(struct dentry
42303
42304 if (task) {
42305 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
42306 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42307 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
42308 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42309 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
42310 +#endif
42311 task_dumpable(task)) {
42312 rcu_read_lock();
42313 cred = __task_cred(task);
42314 inode->i_uid = cred->euid;
42315 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42316 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42317 +#else
42318 inode->i_gid = cred->egid;
42319 +#endif
42320 rcu_read_unlock();
42321 } else {
42322 inode->i_uid = 0;
42323 @@ -1643,7 +1741,8 @@ static int proc_fd_info(struct inode *in
42324 int fd = proc_fd(inode);
42325
42326 if (task) {
42327 - files = get_files_struct(task);
42328 + if (!gr_acl_handle_procpidmem(task))
42329 + files = get_files_struct(task);
42330 put_task_struct(task);
42331 }
42332 if (files) {
42333 @@ -1895,12 +1994,22 @@ static const struct file_operations proc
42334 static int proc_fd_permission(struct inode *inode, int mask)
42335 {
42336 int rv;
42337 + struct task_struct *task;
42338
42339 rv = generic_permission(inode, mask, NULL);
42340 - if (rv == 0)
42341 - return 0;
42342 +
42343 if (task_pid(current) == proc_pid(inode))
42344 rv = 0;
42345 +
42346 + task = get_proc_task(inode);
42347 + if (task == NULL)
42348 + return rv;
42349 +
42350 + if (gr_acl_handle_procpidmem(task))
42351 + rv = -EACCES;
42352 +
42353 + put_task_struct(task);
42354 +
42355 return rv;
42356 }
42357
42358 @@ -2009,6 +2118,9 @@ static struct dentry *proc_pident_lookup
42359 if (!task)
42360 goto out_no_task;
42361
42362 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42363 + goto out;
42364 +
42365 /*
42366 * Yes, it does not scale. And it should not. Don't add
42367 * new entries into /proc/<tgid>/ without very good reasons.
42368 @@ -2053,6 +2165,9 @@ static int proc_pident_readdir(struct fi
42369 if (!task)
42370 goto out_no_task;
42371
42372 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42373 + goto out;
42374 +
42375 ret = 0;
42376 i = filp->f_pos;
42377 switch (i) {
42378 @@ -2320,7 +2435,7 @@ static void *proc_self_follow_link(struc
42379 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
42380 void *cookie)
42381 {
42382 - char *s = nd_get_link(nd);
42383 + const char *s = nd_get_link(nd);
42384 if (!IS_ERR(s))
42385 __putname(s);
42386 }
42387 @@ -2519,7 +2634,7 @@ static const struct pid_entry tgid_base_
42388 #ifdef CONFIG_SCHED_DEBUG
42389 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
42390 #endif
42391 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42392 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42393 INF("syscall", S_IRUSR, proc_pid_syscall),
42394 #endif
42395 INF("cmdline", S_IRUGO, proc_pid_cmdline),
42396 @@ -2544,10 +2659,10 @@ static const struct pid_entry tgid_base_
42397 #ifdef CONFIG_SECURITY
42398 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42399 #endif
42400 -#ifdef CONFIG_KALLSYMS
42401 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42402 INF("wchan", S_IRUGO, proc_pid_wchan),
42403 #endif
42404 -#ifdef CONFIG_STACKTRACE
42405 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42406 ONE("stack", S_IRUSR, proc_pid_stack),
42407 #endif
42408 #ifdef CONFIG_SCHEDSTATS
42409 @@ -2577,6 +2692,9 @@ static const struct pid_entry tgid_base_
42410 #ifdef CONFIG_TASK_IO_ACCOUNTING
42411 INF("io", S_IRUGO, proc_tgid_io_accounting),
42412 #endif
42413 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42414 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
42415 +#endif
42416 };
42417
42418 static int proc_tgid_base_readdir(struct file * filp,
42419 @@ -2701,7 +2819,14 @@ static struct dentry *proc_pid_instantia
42420 if (!inode)
42421 goto out;
42422
42423 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42424 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
42425 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42426 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42427 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
42428 +#else
42429 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
42430 +#endif
42431 inode->i_op = &proc_tgid_base_inode_operations;
42432 inode->i_fop = &proc_tgid_base_operations;
42433 inode->i_flags|=S_IMMUTABLE;
42434 @@ -2743,7 +2868,11 @@ struct dentry *proc_pid_lookup(struct in
42435 if (!task)
42436 goto out;
42437
42438 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42439 + goto out_put_task;
42440 +
42441 result = proc_pid_instantiate(dir, dentry, task, NULL);
42442 +out_put_task:
42443 put_task_struct(task);
42444 out:
42445 return result;
42446 @@ -2808,6 +2937,11 @@ int proc_pid_readdir(struct file * filp,
42447 {
42448 unsigned int nr;
42449 struct task_struct *reaper;
42450 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42451 + const struct cred *tmpcred = current_cred();
42452 + const struct cred *itercred;
42453 +#endif
42454 + filldir_t __filldir = filldir;
42455 struct tgid_iter iter;
42456 struct pid_namespace *ns;
42457
42458 @@ -2831,8 +2965,27 @@ int proc_pid_readdir(struct file * filp,
42459 for (iter = next_tgid(ns, iter);
42460 iter.task;
42461 iter.tgid += 1, iter = next_tgid(ns, iter)) {
42462 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42463 + rcu_read_lock();
42464 + itercred = __task_cred(iter.task);
42465 +#endif
42466 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
42467 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42468 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
42469 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42470 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
42471 +#endif
42472 + )
42473 +#endif
42474 + )
42475 + __filldir = &gr_fake_filldir;
42476 + else
42477 + __filldir = filldir;
42478 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42479 + rcu_read_unlock();
42480 +#endif
42481 filp->f_pos = iter.tgid + TGID_OFFSET;
42482 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
42483 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
42484 put_task_struct(iter.task);
42485 goto out;
42486 }
42487 @@ -2858,7 +3011,7 @@ static const struct pid_entry tid_base_s
42488 #ifdef CONFIG_SCHED_DEBUG
42489 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
42490 #endif
42491 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42492 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42493 INF("syscall", S_IRUSR, proc_pid_syscall),
42494 #endif
42495 INF("cmdline", S_IRUGO, proc_pid_cmdline),
42496 @@ -2882,10 +3035,10 @@ static const struct pid_entry tid_base_s
42497 #ifdef CONFIG_SECURITY
42498 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42499 #endif
42500 -#ifdef CONFIG_KALLSYMS
42501 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42502 INF("wchan", S_IRUGO, proc_pid_wchan),
42503 #endif
42504 -#ifdef CONFIG_STACKTRACE
42505 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42506 ONE("stack", S_IRUSR, proc_pid_stack),
42507 #endif
42508 #ifdef CONFIG_SCHEDSTATS
42509 diff -urNp linux-2.6.32.42/fs/proc/cmdline.c linux-2.6.32.42/fs/proc/cmdline.c
42510 --- linux-2.6.32.42/fs/proc/cmdline.c 2011-03-27 14:31:47.000000000 -0400
42511 +++ linux-2.6.32.42/fs/proc/cmdline.c 2011-04-17 15:56:46.000000000 -0400
42512 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
42513
42514 static int __init proc_cmdline_init(void)
42515 {
42516 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
42517 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
42518 +#else
42519 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
42520 +#endif
42521 return 0;
42522 }
42523 module_init(proc_cmdline_init);
42524 diff -urNp linux-2.6.32.42/fs/proc/devices.c linux-2.6.32.42/fs/proc/devices.c
42525 --- linux-2.6.32.42/fs/proc/devices.c 2011-03-27 14:31:47.000000000 -0400
42526 +++ linux-2.6.32.42/fs/proc/devices.c 2011-04-17 15:56:46.000000000 -0400
42527 @@ -64,7 +64,11 @@ static const struct file_operations proc
42528
42529 static int __init proc_devices_init(void)
42530 {
42531 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
42532 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
42533 +#else
42534 proc_create("devices", 0, NULL, &proc_devinfo_operations);
42535 +#endif
42536 return 0;
42537 }
42538 module_init(proc_devices_init);
42539 diff -urNp linux-2.6.32.42/fs/proc/inode.c linux-2.6.32.42/fs/proc/inode.c
42540 --- linux-2.6.32.42/fs/proc/inode.c 2011-03-27 14:31:47.000000000 -0400
42541 +++ linux-2.6.32.42/fs/proc/inode.c 2011-04-17 15:56:46.000000000 -0400
42542 @@ -457,7 +457,11 @@ struct inode *proc_get_inode(struct supe
42543 if (de->mode) {
42544 inode->i_mode = de->mode;
42545 inode->i_uid = de->uid;
42546 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42547 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42548 +#else
42549 inode->i_gid = de->gid;
42550 +#endif
42551 }
42552 if (de->size)
42553 inode->i_size = de->size;
42554 diff -urNp linux-2.6.32.42/fs/proc/internal.h linux-2.6.32.42/fs/proc/internal.h
42555 --- linux-2.6.32.42/fs/proc/internal.h 2011-03-27 14:31:47.000000000 -0400
42556 +++ linux-2.6.32.42/fs/proc/internal.h 2011-04-17 15:56:46.000000000 -0400
42557 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
42558 struct pid *pid, struct task_struct *task);
42559 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
42560 struct pid *pid, struct task_struct *task);
42561 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42562 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
42563 +#endif
42564 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
42565
42566 extern const struct file_operations proc_maps_operations;
42567 diff -urNp linux-2.6.32.42/fs/proc/Kconfig linux-2.6.32.42/fs/proc/Kconfig
42568 --- linux-2.6.32.42/fs/proc/Kconfig 2011-03-27 14:31:47.000000000 -0400
42569 +++ linux-2.6.32.42/fs/proc/Kconfig 2011-04-17 15:56:46.000000000 -0400
42570 @@ -30,12 +30,12 @@ config PROC_FS
42571
42572 config PROC_KCORE
42573 bool "/proc/kcore support" if !ARM
42574 - depends on PROC_FS && MMU
42575 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
42576
42577 config PROC_VMCORE
42578 bool "/proc/vmcore support (EXPERIMENTAL)"
42579 - depends on PROC_FS && CRASH_DUMP
42580 - default y
42581 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
42582 + default n
42583 help
42584 Exports the dump image of crashed kernel in ELF format.
42585
42586 @@ -59,8 +59,8 @@ config PROC_SYSCTL
42587 limited in memory.
42588
42589 config PROC_PAGE_MONITOR
42590 - default y
42591 - depends on PROC_FS && MMU
42592 + default n
42593 + depends on PROC_FS && MMU && !GRKERNSEC
42594 bool "Enable /proc page monitoring" if EMBEDDED
42595 help
42596 Various /proc files exist to monitor process memory utilization:
42597 diff -urNp linux-2.6.32.42/fs/proc/kcore.c linux-2.6.32.42/fs/proc/kcore.c
42598 --- linux-2.6.32.42/fs/proc/kcore.c 2011-03-27 14:31:47.000000000 -0400
42599 +++ linux-2.6.32.42/fs/proc/kcore.c 2011-05-16 21:46:57.000000000 -0400
42600 @@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bu
42601 off_t offset = 0;
42602 struct kcore_list *m;
42603
42604 + pax_track_stack();
42605 +
42606 /* setup ELF header */
42607 elf = (struct elfhdr *) bufp;
42608 bufp += sizeof(struct elfhdr);
42609 @@ -477,9 +479,10 @@ read_kcore(struct file *file, char __use
42610 * the addresses in the elf_phdr on our list.
42611 */
42612 start = kc_offset_to_vaddr(*fpos - elf_buflen);
42613 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
42614 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
42615 + if (tsz > buflen)
42616 tsz = buflen;
42617 -
42618 +
42619 while (buflen) {
42620 struct kcore_list *m;
42621
42622 @@ -508,20 +511,23 @@ read_kcore(struct file *file, char __use
42623 kfree(elf_buf);
42624 } else {
42625 if (kern_addr_valid(start)) {
42626 - unsigned long n;
42627 + char *elf_buf;
42628 + mm_segment_t oldfs;
42629
42630 - n = copy_to_user(buffer, (char *)start, tsz);
42631 - /*
42632 - * We cannot distingush between fault on source
42633 - * and fault on destination. When this happens
42634 - * we clear too and hope it will trigger the
42635 - * EFAULT again.
42636 - */
42637 - if (n) {
42638 - if (clear_user(buffer + tsz - n,
42639 - n))
42640 + elf_buf = kmalloc(tsz, GFP_KERNEL);
42641 + if (!elf_buf)
42642 + return -ENOMEM;
42643 + oldfs = get_fs();
42644 + set_fs(KERNEL_DS);
42645 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
42646 + set_fs(oldfs);
42647 + if (copy_to_user(buffer, elf_buf, tsz)) {
42648 + kfree(elf_buf);
42649 return -EFAULT;
42650 + }
42651 }
42652 + set_fs(oldfs);
42653 + kfree(elf_buf);
42654 } else {
42655 if (clear_user(buffer, tsz))
42656 return -EFAULT;
42657 @@ -541,6 +547,9 @@ read_kcore(struct file *file, char __use
42658
42659 static int open_kcore(struct inode *inode, struct file *filp)
42660 {
42661 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
42662 + return -EPERM;
42663 +#endif
42664 if (!capable(CAP_SYS_RAWIO))
42665 return -EPERM;
42666 if (kcore_need_update)
42667 diff -urNp linux-2.6.32.42/fs/proc/meminfo.c linux-2.6.32.42/fs/proc/meminfo.c
42668 --- linux-2.6.32.42/fs/proc/meminfo.c 2011-03-27 14:31:47.000000000 -0400
42669 +++ linux-2.6.32.42/fs/proc/meminfo.c 2011-05-16 21:46:57.000000000 -0400
42670 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
42671 unsigned long pages[NR_LRU_LISTS];
42672 int lru;
42673
42674 + pax_track_stack();
42675 +
42676 /*
42677 * display in kilobytes.
42678 */
42679 @@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_
42680 vmi.used >> 10,
42681 vmi.largest_chunk >> 10
42682 #ifdef CONFIG_MEMORY_FAILURE
42683 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
42684 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
42685 #endif
42686 );
42687
42688 diff -urNp linux-2.6.32.42/fs/proc/nommu.c linux-2.6.32.42/fs/proc/nommu.c
42689 --- linux-2.6.32.42/fs/proc/nommu.c 2011-03-27 14:31:47.000000000 -0400
42690 +++ linux-2.6.32.42/fs/proc/nommu.c 2011-04-17 15:56:46.000000000 -0400
42691 @@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_
42692 if (len < 1)
42693 len = 1;
42694 seq_printf(m, "%*c", len, ' ');
42695 - seq_path(m, &file->f_path, "");
42696 + seq_path(m, &file->f_path, "\n\\");
42697 }
42698
42699 seq_putc(m, '\n');
42700 diff -urNp linux-2.6.32.42/fs/proc/proc_net.c linux-2.6.32.42/fs/proc/proc_net.c
42701 --- linux-2.6.32.42/fs/proc/proc_net.c 2011-03-27 14:31:47.000000000 -0400
42702 +++ linux-2.6.32.42/fs/proc/proc_net.c 2011-04-17 15:56:46.000000000 -0400
42703 @@ -104,6 +104,17 @@ static struct net *get_proc_task_net(str
42704 struct task_struct *task;
42705 struct nsproxy *ns;
42706 struct net *net = NULL;
42707 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42708 + const struct cred *cred = current_cred();
42709 +#endif
42710 +
42711 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42712 + if (cred->fsuid)
42713 + return net;
42714 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42715 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
42716 + return net;
42717 +#endif
42718
42719 rcu_read_lock();
42720 task = pid_task(proc_pid(dir), PIDTYPE_PID);
42721 diff -urNp linux-2.6.32.42/fs/proc/proc_sysctl.c linux-2.6.32.42/fs/proc/proc_sysctl.c
42722 --- linux-2.6.32.42/fs/proc/proc_sysctl.c 2011-03-27 14:31:47.000000000 -0400
42723 +++ linux-2.6.32.42/fs/proc/proc_sysctl.c 2011-04-17 15:56:46.000000000 -0400
42724 @@ -7,6 +7,8 @@
42725 #include <linux/security.h>
42726 #include "internal.h"
42727
42728 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
42729 +
42730 static const struct dentry_operations proc_sys_dentry_operations;
42731 static const struct file_operations proc_sys_file_operations;
42732 static const struct inode_operations proc_sys_inode_operations;
42733 @@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
42734 if (!p)
42735 goto out;
42736
42737 + if (gr_handle_sysctl(p, MAY_EXEC))
42738 + goto out;
42739 +
42740 err = ERR_PTR(-ENOMEM);
42741 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
42742 if (h)
42743 @@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
42744 if (*pos < file->f_pos)
42745 continue;
42746
42747 + if (gr_handle_sysctl(table, 0))
42748 + continue;
42749 +
42750 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
42751 if (res)
42752 return res;
42753 @@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
42754 if (IS_ERR(head))
42755 return PTR_ERR(head);
42756
42757 + if (table && gr_handle_sysctl(table, MAY_EXEC))
42758 + return -ENOENT;
42759 +
42760 generic_fillattr(inode, stat);
42761 if (table)
42762 stat->mode = (stat->mode & S_IFMT) | table->mode;
42763 diff -urNp linux-2.6.32.42/fs/proc/root.c linux-2.6.32.42/fs/proc/root.c
42764 --- linux-2.6.32.42/fs/proc/root.c 2011-03-27 14:31:47.000000000 -0400
42765 +++ linux-2.6.32.42/fs/proc/root.c 2011-04-17 15:56:46.000000000 -0400
42766 @@ -134,7 +134,15 @@ void __init proc_root_init(void)
42767 #ifdef CONFIG_PROC_DEVICETREE
42768 proc_device_tree_init();
42769 #endif
42770 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
42771 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42772 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
42773 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42774 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
42775 +#endif
42776 +#else
42777 proc_mkdir("bus", NULL);
42778 +#endif
42779 proc_sys_init();
42780 }
42781
42782 diff -urNp linux-2.6.32.42/fs/proc/task_mmu.c linux-2.6.32.42/fs/proc/task_mmu.c
42783 --- linux-2.6.32.42/fs/proc/task_mmu.c 2011-03-27 14:31:47.000000000 -0400
42784 +++ linux-2.6.32.42/fs/proc/task_mmu.c 2011-04-23 13:38:09.000000000 -0400
42785 @@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
42786 "VmStk:\t%8lu kB\n"
42787 "VmExe:\t%8lu kB\n"
42788 "VmLib:\t%8lu kB\n"
42789 - "VmPTE:\t%8lu kB\n",
42790 - hiwater_vm << (PAGE_SHIFT-10),
42791 + "VmPTE:\t%8lu kB\n"
42792 +
42793 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42794 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
42795 +#endif
42796 +
42797 + ,hiwater_vm << (PAGE_SHIFT-10),
42798 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
42799 mm->locked_vm << (PAGE_SHIFT-10),
42800 hiwater_rss << (PAGE_SHIFT-10),
42801 total_rss << (PAGE_SHIFT-10),
42802 data << (PAGE_SHIFT-10),
42803 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
42804 - (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
42805 + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
42806 +
42807 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42808 + , mm->context.user_cs_base, mm->context.user_cs_limit
42809 +#endif
42810 +
42811 + );
42812 }
42813
42814 unsigned long task_vsize(struct mm_struct *mm)
42815 @@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, v
42816 struct proc_maps_private *priv = m->private;
42817 struct vm_area_struct *vma = v;
42818
42819 - vma_stop(priv, vma);
42820 + if (!IS_ERR(vma))
42821 + vma_stop(priv, vma);
42822 if (priv->task)
42823 put_task_struct(priv->task);
42824 }
42825 @@ -199,6 +211,12 @@ static int do_maps_open(struct inode *in
42826 return ret;
42827 }
42828
42829 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42830 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42831 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
42832 + _mm->pax_flags & MF_PAX_SEGMEXEC))
42833 +#endif
42834 +
42835 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
42836 {
42837 struct mm_struct *mm = vma->vm_mm;
42838 @@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file
42839 int flags = vma->vm_flags;
42840 unsigned long ino = 0;
42841 unsigned long long pgoff = 0;
42842 - unsigned long start;
42843 dev_t dev = 0;
42844 int len;
42845
42846 @@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file
42847 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
42848 }
42849
42850 - /* We don't show the stack guard page in /proc/maps */
42851 - start = vma->vm_start;
42852 - if (vma->vm_flags & VM_GROWSDOWN)
42853 - if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
42854 - start += PAGE_SIZE;
42855 -
42856 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
42857 - start,
42858 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42859 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
42860 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
42861 +#else
42862 + vma->vm_start,
42863 vma->vm_end,
42864 +#endif
42865 flags & VM_READ ? 'r' : '-',
42866 flags & VM_WRITE ? 'w' : '-',
42867 flags & VM_EXEC ? 'x' : '-',
42868 flags & VM_MAYSHARE ? 's' : 'p',
42869 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42870 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
42871 +#else
42872 pgoff,
42873 +#endif
42874 MAJOR(dev), MINOR(dev), ino, &len);
42875
42876 /*
42877 @@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file
42878 */
42879 if (file) {
42880 pad_len_spaces(m, len);
42881 - seq_path(m, &file->f_path, "\n");
42882 + seq_path(m, &file->f_path, "\n\\");
42883 } else {
42884 const char *name = arch_vma_name(vma);
42885 if (!name) {
42886 @@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file
42887 if (vma->vm_start <= mm->brk &&
42888 vma->vm_end >= mm->start_brk) {
42889 name = "[heap]";
42890 - } else if (vma->vm_start <= mm->start_stack &&
42891 - vma->vm_end >= mm->start_stack) {
42892 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
42893 + (vma->vm_start <= mm->start_stack &&
42894 + vma->vm_end >= mm->start_stack)) {
42895 name = "[stack]";
42896 }
42897 } else {
42898 @@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m,
42899 };
42900
42901 memset(&mss, 0, sizeof mss);
42902 - mss.vma = vma;
42903 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
42904 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
42905 +
42906 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42907 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
42908 +#endif
42909 + mss.vma = vma;
42910 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
42911 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
42912 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42913 + }
42914 +#endif
42915
42916 show_map_vma(m, vma);
42917
42918 @@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m,
42919 "Swap: %8lu kB\n"
42920 "KernelPageSize: %8lu kB\n"
42921 "MMUPageSize: %8lu kB\n",
42922 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42923 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
42924 +#else
42925 (vma->vm_end - vma->vm_start) >> 10,
42926 +#endif
42927 mss.resident >> 10,
42928 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
42929 mss.shared_clean >> 10,
42930 diff -urNp linux-2.6.32.42/fs/proc/task_nommu.c linux-2.6.32.42/fs/proc/task_nommu.c
42931 --- linux-2.6.32.42/fs/proc/task_nommu.c 2011-03-27 14:31:47.000000000 -0400
42932 +++ linux-2.6.32.42/fs/proc/task_nommu.c 2011-04-17 15:56:46.000000000 -0400
42933 @@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct
42934 else
42935 bytes += kobjsize(mm);
42936
42937 - if (current->fs && current->fs->users > 1)
42938 + if (current->fs && atomic_read(&current->fs->users) > 1)
42939 sbytes += kobjsize(current->fs);
42940 else
42941 bytes += kobjsize(current->fs);
42942 @@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_fil
42943 if (len < 1)
42944 len = 1;
42945 seq_printf(m, "%*c", len, ' ');
42946 - seq_path(m, &file->f_path, "");
42947 + seq_path(m, &file->f_path, "\n\\");
42948 }
42949
42950 seq_putc(m, '\n');
42951 diff -urNp linux-2.6.32.42/fs/readdir.c linux-2.6.32.42/fs/readdir.c
42952 --- linux-2.6.32.42/fs/readdir.c 2011-03-27 14:31:47.000000000 -0400
42953 +++ linux-2.6.32.42/fs/readdir.c 2011-04-17 15:56:46.000000000 -0400
42954 @@ -16,6 +16,7 @@
42955 #include <linux/security.h>
42956 #include <linux/syscalls.h>
42957 #include <linux/unistd.h>
42958 +#include <linux/namei.h>
42959
42960 #include <asm/uaccess.h>
42961
42962 @@ -67,6 +68,7 @@ struct old_linux_dirent {
42963
42964 struct readdir_callback {
42965 struct old_linux_dirent __user * dirent;
42966 + struct file * file;
42967 int result;
42968 };
42969
42970 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
42971 buf->result = -EOVERFLOW;
42972 return -EOVERFLOW;
42973 }
42974 +
42975 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42976 + return 0;
42977 +
42978 buf->result++;
42979 dirent = buf->dirent;
42980 if (!access_ok(VERIFY_WRITE, dirent,
42981 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
42982
42983 buf.result = 0;
42984 buf.dirent = dirent;
42985 + buf.file = file;
42986
42987 error = vfs_readdir(file, fillonedir, &buf);
42988 if (buf.result)
42989 @@ -142,6 +149,7 @@ struct linux_dirent {
42990 struct getdents_callback {
42991 struct linux_dirent __user * current_dir;
42992 struct linux_dirent __user * previous;
42993 + struct file * file;
42994 int count;
42995 int error;
42996 };
42997 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
42998 buf->error = -EOVERFLOW;
42999 return -EOVERFLOW;
43000 }
43001 +
43002 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43003 + return 0;
43004 +
43005 dirent = buf->previous;
43006 if (dirent) {
43007 if (__put_user(offset, &dirent->d_off))
43008 @@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
43009 buf.previous = NULL;
43010 buf.count = count;
43011 buf.error = 0;
43012 + buf.file = file;
43013
43014 error = vfs_readdir(file, filldir, &buf);
43015 if (error >= 0)
43016 @@ -228,6 +241,7 @@ out:
43017 struct getdents_callback64 {
43018 struct linux_dirent64 __user * current_dir;
43019 struct linux_dirent64 __user * previous;
43020 + struct file *file;
43021 int count;
43022 int error;
43023 };
43024 @@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
43025 buf->error = -EINVAL; /* only used if we fail.. */
43026 if (reclen > buf->count)
43027 return -EINVAL;
43028 +
43029 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43030 + return 0;
43031 +
43032 dirent = buf->previous;
43033 if (dirent) {
43034 if (__put_user(offset, &dirent->d_off))
43035 @@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
43036
43037 buf.current_dir = dirent;
43038 buf.previous = NULL;
43039 + buf.file = file;
43040 buf.count = count;
43041 buf.error = 0;
43042
43043 diff -urNp linux-2.6.32.42/fs/reiserfs/dir.c linux-2.6.32.42/fs/reiserfs/dir.c
43044 --- linux-2.6.32.42/fs/reiserfs/dir.c 2011-03-27 14:31:47.000000000 -0400
43045 +++ linux-2.6.32.42/fs/reiserfs/dir.c 2011-05-16 21:46:57.000000000 -0400
43046 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
43047 struct reiserfs_dir_entry de;
43048 int ret = 0;
43049
43050 + pax_track_stack();
43051 +
43052 reiserfs_write_lock(inode->i_sb);
43053
43054 reiserfs_check_lock_depth(inode->i_sb, "readdir");
43055 diff -urNp linux-2.6.32.42/fs/reiserfs/do_balan.c linux-2.6.32.42/fs/reiserfs/do_balan.c
43056 --- linux-2.6.32.42/fs/reiserfs/do_balan.c 2011-03-27 14:31:47.000000000 -0400
43057 +++ linux-2.6.32.42/fs/reiserfs/do_balan.c 2011-04-17 15:56:46.000000000 -0400
43058 @@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb,
43059 return;
43060 }
43061
43062 - atomic_inc(&(fs_generation(tb->tb_sb)));
43063 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
43064 do_balance_starts(tb);
43065
43066 /* balance leaf returns 0 except if combining L R and S into
43067 diff -urNp linux-2.6.32.42/fs/reiserfs/item_ops.c linux-2.6.32.42/fs/reiserfs/item_ops.c
43068 --- linux-2.6.32.42/fs/reiserfs/item_ops.c 2011-03-27 14:31:47.000000000 -0400
43069 +++ linux-2.6.32.42/fs/reiserfs/item_ops.c 2011-04-17 15:56:46.000000000 -0400
43070 @@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
43071 vi->vi_index, vi->vi_type, vi->vi_ih);
43072 }
43073
43074 -static struct item_operations stat_data_ops = {
43075 +static const struct item_operations stat_data_ops = {
43076 .bytes_number = sd_bytes_number,
43077 .decrement_key = sd_decrement_key,
43078 .is_left_mergeable = sd_is_left_mergeable,
43079 @@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
43080 vi->vi_index, vi->vi_type, vi->vi_ih);
43081 }
43082
43083 -static struct item_operations direct_ops = {
43084 +static const struct item_operations direct_ops = {
43085 .bytes_number = direct_bytes_number,
43086 .decrement_key = direct_decrement_key,
43087 .is_left_mergeable = direct_is_left_mergeable,
43088 @@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
43089 vi->vi_index, vi->vi_type, vi->vi_ih);
43090 }
43091
43092 -static struct item_operations indirect_ops = {
43093 +static const struct item_operations indirect_ops = {
43094 .bytes_number = indirect_bytes_number,
43095 .decrement_key = indirect_decrement_key,
43096 .is_left_mergeable = indirect_is_left_mergeable,
43097 @@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
43098 printk("\n");
43099 }
43100
43101 -static struct item_operations direntry_ops = {
43102 +static const struct item_operations direntry_ops = {
43103 .bytes_number = direntry_bytes_number,
43104 .decrement_key = direntry_decrement_key,
43105 .is_left_mergeable = direntry_is_left_mergeable,
43106 @@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
43107 "Invalid item type observed, run fsck ASAP");
43108 }
43109
43110 -static struct item_operations errcatch_ops = {
43111 +static const struct item_operations errcatch_ops = {
43112 errcatch_bytes_number,
43113 errcatch_decrement_key,
43114 errcatch_is_left_mergeable,
43115 @@ -746,7 +746,7 @@ static struct item_operations errcatch_o
43116 #error Item types must use disk-format assigned values.
43117 #endif
43118
43119 -struct item_operations *item_ops[TYPE_ANY + 1] = {
43120 +const struct item_operations * const item_ops[TYPE_ANY + 1] = {
43121 &stat_data_ops,
43122 &indirect_ops,
43123 &direct_ops,
43124 diff -urNp linux-2.6.32.42/fs/reiserfs/journal.c linux-2.6.32.42/fs/reiserfs/journal.c
43125 --- linux-2.6.32.42/fs/reiserfs/journal.c 2011-03-27 14:31:47.000000000 -0400
43126 +++ linux-2.6.32.42/fs/reiserfs/journal.c 2011-05-16 21:46:57.000000000 -0400
43127 @@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_brea
43128 struct buffer_head *bh;
43129 int i, j;
43130
43131 + pax_track_stack();
43132 +
43133 bh = __getblk(dev, block, bufsize);
43134 if (buffer_uptodate(bh))
43135 return (bh);
43136 diff -urNp linux-2.6.32.42/fs/reiserfs/namei.c linux-2.6.32.42/fs/reiserfs/namei.c
43137 --- linux-2.6.32.42/fs/reiserfs/namei.c 2011-03-27 14:31:47.000000000 -0400
43138 +++ linux-2.6.32.42/fs/reiserfs/namei.c 2011-05-16 21:46:57.000000000 -0400
43139 @@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode
43140 unsigned long savelink = 1;
43141 struct timespec ctime;
43142
43143 + pax_track_stack();
43144 +
43145 /* three balancings: (1) old name removal, (2) new name insertion
43146 and (3) maybe "save" link insertion
43147 stat data updates: (1) old directory,
43148 diff -urNp linux-2.6.32.42/fs/reiserfs/procfs.c linux-2.6.32.42/fs/reiserfs/procfs.c
43149 --- linux-2.6.32.42/fs/reiserfs/procfs.c 2011-03-27 14:31:47.000000000 -0400
43150 +++ linux-2.6.32.42/fs/reiserfs/procfs.c 2011-05-16 21:46:57.000000000 -0400
43151 @@ -123,7 +123,7 @@ static int show_super(struct seq_file *m
43152 "SMALL_TAILS " : "NO_TAILS ",
43153 replay_only(sb) ? "REPLAY_ONLY " : "",
43154 convert_reiserfs(sb) ? "CONV " : "",
43155 - atomic_read(&r->s_generation_counter),
43156 + atomic_read_unchecked(&r->s_generation_counter),
43157 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
43158 SF(s_do_balance), SF(s_unneeded_left_neighbor),
43159 SF(s_good_search_by_key_reada), SF(s_bmaps),
43160 @@ -309,6 +309,8 @@ static int show_journal(struct seq_file
43161 struct journal_params *jp = &rs->s_v1.s_journal;
43162 char b[BDEVNAME_SIZE];
43163
43164 + pax_track_stack();
43165 +
43166 seq_printf(m, /* on-disk fields */
43167 "jp_journal_1st_block: \t%i\n"
43168 "jp_journal_dev: \t%s[%x]\n"
43169 diff -urNp linux-2.6.32.42/fs/reiserfs/stree.c linux-2.6.32.42/fs/reiserfs/stree.c
43170 --- linux-2.6.32.42/fs/reiserfs/stree.c 2011-03-27 14:31:47.000000000 -0400
43171 +++ linux-2.6.32.42/fs/reiserfs/stree.c 2011-05-16 21:46:57.000000000 -0400
43172 @@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs
43173 int iter = 0;
43174 #endif
43175
43176 + pax_track_stack();
43177 +
43178 BUG_ON(!th->t_trans_id);
43179
43180 init_tb_struct(th, &s_del_balance, sb, path,
43181 @@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct r
43182 int retval;
43183 int quota_cut_bytes = 0;
43184
43185 + pax_track_stack();
43186 +
43187 BUG_ON(!th->t_trans_id);
43188
43189 le_key2cpu_key(&cpu_key, key);
43190 @@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiser
43191 int quota_cut_bytes;
43192 loff_t tail_pos = 0;
43193
43194 + pax_track_stack();
43195 +
43196 BUG_ON(!th->t_trans_id);
43197
43198 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
43199 @@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reis
43200 int retval;
43201 int fs_gen;
43202
43203 + pax_track_stack();
43204 +
43205 BUG_ON(!th->t_trans_id);
43206
43207 fs_gen = get_generation(inode->i_sb);
43208 @@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs
43209 int fs_gen = 0;
43210 int quota_bytes = 0;
43211
43212 + pax_track_stack();
43213 +
43214 BUG_ON(!th->t_trans_id);
43215
43216 if (inode) { /* Do we count quotas for item? */
43217 diff -urNp linux-2.6.32.42/fs/reiserfs/super.c linux-2.6.32.42/fs/reiserfs/super.c
43218 --- linux-2.6.32.42/fs/reiserfs/super.c 2011-03-27 14:31:47.000000000 -0400
43219 +++ linux-2.6.32.42/fs/reiserfs/super.c 2011-05-16 21:46:57.000000000 -0400
43220 @@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct
43221 {.option_name = NULL}
43222 };
43223
43224 + pax_track_stack();
43225 +
43226 *blocks = 0;
43227 if (!options || !*options)
43228 /* use default configuration: create tails, journaling on, no
43229 diff -urNp linux-2.6.32.42/fs/select.c linux-2.6.32.42/fs/select.c
43230 --- linux-2.6.32.42/fs/select.c 2011-03-27 14:31:47.000000000 -0400
43231 +++ linux-2.6.32.42/fs/select.c 2011-05-16 21:46:57.000000000 -0400
43232 @@ -20,6 +20,7 @@
43233 #include <linux/module.h>
43234 #include <linux/slab.h>
43235 #include <linux/poll.h>
43236 +#include <linux/security.h>
43237 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
43238 #include <linux/file.h>
43239 #include <linux/fdtable.h>
43240 @@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, s
43241 int retval, i, timed_out = 0;
43242 unsigned long slack = 0;
43243
43244 + pax_track_stack();
43245 +
43246 rcu_read_lock();
43247 retval = max_select_fd(n, fds);
43248 rcu_read_unlock();
43249 @@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user
43250 /* Allocate small arguments on the stack to save memory and be faster */
43251 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
43252
43253 + pax_track_stack();
43254 +
43255 ret = -EINVAL;
43256 if (n < 0)
43257 goto out_nofds;
43258 @@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *uf
43259 struct poll_list *walk = head;
43260 unsigned long todo = nfds;
43261
43262 + pax_track_stack();
43263 +
43264 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
43265 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
43266 return -EINVAL;
43267
43268 diff -urNp linux-2.6.32.42/fs/seq_file.c linux-2.6.32.42/fs/seq_file.c
43269 --- linux-2.6.32.42/fs/seq_file.c 2011-03-27 14:31:47.000000000 -0400
43270 +++ linux-2.6.32.42/fs/seq_file.c 2011-04-17 15:56:46.000000000 -0400
43271 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
43272 return 0;
43273 }
43274 if (!m->buf) {
43275 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
43276 + m->size = PAGE_SIZE;
43277 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
43278 if (!m->buf)
43279 return -ENOMEM;
43280 }
43281 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
43282 Eoverflow:
43283 m->op->stop(m, p);
43284 kfree(m->buf);
43285 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
43286 + m->size <<= 1;
43287 + m->buf = kmalloc(m->size, GFP_KERNEL);
43288 return !m->buf ? -ENOMEM : -EAGAIN;
43289 }
43290
43291 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
43292 m->version = file->f_version;
43293 /* grab buffer if we didn't have one */
43294 if (!m->buf) {
43295 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
43296 + m->size = PAGE_SIZE;
43297 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
43298 if (!m->buf)
43299 goto Enomem;
43300 }
43301 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
43302 goto Fill;
43303 m->op->stop(m, p);
43304 kfree(m->buf);
43305 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
43306 + m->size <<= 1;
43307 + m->buf = kmalloc(m->size, GFP_KERNEL);
43308 if (!m->buf)
43309 goto Enomem;
43310 m->count = 0;
43311 diff -urNp linux-2.6.32.42/fs/smbfs/symlink.c linux-2.6.32.42/fs/smbfs/symlink.c
43312 --- linux-2.6.32.42/fs/smbfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
43313 +++ linux-2.6.32.42/fs/smbfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
43314 @@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
43315
43316 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
43317 {
43318 - char *s = nd_get_link(nd);
43319 + const char *s = nd_get_link(nd);
43320 if (!IS_ERR(s))
43321 __putname(s);
43322 }
43323 diff -urNp linux-2.6.32.42/fs/splice.c linux-2.6.32.42/fs/splice.c
43324 --- linux-2.6.32.42/fs/splice.c 2011-03-27 14:31:47.000000000 -0400
43325 +++ linux-2.6.32.42/fs/splice.c 2011-05-16 21:46:57.000000000 -0400
43326 @@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode
43327 pipe_lock(pipe);
43328
43329 for (;;) {
43330 - if (!pipe->readers) {
43331 + if (!atomic_read(&pipe->readers)) {
43332 send_sig(SIGPIPE, current, 0);
43333 if (!ret)
43334 ret = -EPIPE;
43335 @@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode
43336 do_wakeup = 0;
43337 }
43338
43339 - pipe->waiting_writers++;
43340 + atomic_inc(&pipe->waiting_writers);
43341 pipe_wait(pipe);
43342 - pipe->waiting_writers--;
43343 + atomic_dec(&pipe->waiting_writers);
43344 }
43345
43346 pipe_unlock(pipe);
43347 @@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *
43348 .spd_release = spd_release_page,
43349 };
43350
43351 + pax_track_stack();
43352 +
43353 index = *ppos >> PAGE_CACHE_SHIFT;
43354 loff = *ppos & ~PAGE_CACHE_MASK;
43355 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
43356 @@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file
43357 old_fs = get_fs();
43358 set_fs(get_ds());
43359 /* The cast to a user pointer is valid due to the set_fs() */
43360 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
43361 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
43362 set_fs(old_fs);
43363
43364 return res;
43365 @@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file
43366 old_fs = get_fs();
43367 set_fs(get_ds());
43368 /* The cast to a user pointer is valid due to the set_fs() */
43369 - res = vfs_write(file, (const char __user *)buf, count, &pos);
43370 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
43371 set_fs(old_fs);
43372
43373 return res;
43374 @@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct
43375 .spd_release = spd_release_page,
43376 };
43377
43378 + pax_track_stack();
43379 +
43380 index = *ppos >> PAGE_CACHE_SHIFT;
43381 offset = *ppos & ~PAGE_CACHE_MASK;
43382 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
43383 @@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct
43384 goto err;
43385
43386 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
43387 - vec[i].iov_base = (void __user *) page_address(page);
43388 + vec[i].iov_base = (__force void __user *) page_address(page);
43389 vec[i].iov_len = this_len;
43390 pages[i] = page;
43391 spd.nr_pages++;
43392 @@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
43393 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
43394 {
43395 while (!pipe->nrbufs) {
43396 - if (!pipe->writers)
43397 + if (!atomic_read(&pipe->writers))
43398 return 0;
43399
43400 - if (!pipe->waiting_writers && sd->num_spliced)
43401 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
43402 return 0;
43403
43404 if (sd->flags & SPLICE_F_NONBLOCK)
43405 @@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct fi
43406 * out of the pipe right after the splice_to_pipe(). So set
43407 * PIPE_READERS appropriately.
43408 */
43409 - pipe->readers = 1;
43410 + atomic_set(&pipe->readers, 1);
43411
43412 current->splice_pipe = pipe;
43413 }
43414 @@ -1592,6 +1596,8 @@ static long vmsplice_to_pipe(struct file
43415 .spd_release = spd_release_page,
43416 };
43417
43418 + pax_track_stack();
43419 +
43420 pipe = pipe_info(file->f_path.dentry->d_inode);
43421 if (!pipe)
43422 return -EBADF;
43423 @@ -1700,9 +1706,9 @@ static int ipipe_prep(struct pipe_inode_
43424 ret = -ERESTARTSYS;
43425 break;
43426 }
43427 - if (!pipe->writers)
43428 + if (!atomic_read(&pipe->writers))
43429 break;
43430 - if (!pipe->waiting_writers) {
43431 + if (!atomic_read(&pipe->waiting_writers)) {
43432 if (flags & SPLICE_F_NONBLOCK) {
43433 ret = -EAGAIN;
43434 break;
43435 @@ -1734,7 +1740,7 @@ static int opipe_prep(struct pipe_inode_
43436 pipe_lock(pipe);
43437
43438 while (pipe->nrbufs >= PIPE_BUFFERS) {
43439 - if (!pipe->readers) {
43440 + if (!atomic_read(&pipe->readers)) {
43441 send_sig(SIGPIPE, current, 0);
43442 ret = -EPIPE;
43443 break;
43444 @@ -1747,9 +1753,9 @@ static int opipe_prep(struct pipe_inode_
43445 ret = -ERESTARTSYS;
43446 break;
43447 }
43448 - pipe->waiting_writers++;
43449 + atomic_inc(&pipe->waiting_writers);
43450 pipe_wait(pipe);
43451 - pipe->waiting_writers--;
43452 + atomic_dec(&pipe->waiting_writers);
43453 }
43454
43455 pipe_unlock(pipe);
43456 @@ -1785,14 +1791,14 @@ retry:
43457 pipe_double_lock(ipipe, opipe);
43458
43459 do {
43460 - if (!opipe->readers) {
43461 + if (!atomic_read(&opipe->readers)) {
43462 send_sig(SIGPIPE, current, 0);
43463 if (!ret)
43464 ret = -EPIPE;
43465 break;
43466 }
43467
43468 - if (!ipipe->nrbufs && !ipipe->writers)
43469 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
43470 break;
43471
43472 /*
43473 @@ -1892,7 +1898,7 @@ static int link_pipe(struct pipe_inode_i
43474 pipe_double_lock(ipipe, opipe);
43475
43476 do {
43477 - if (!opipe->readers) {
43478 + if (!atomic_read(&opipe->readers)) {
43479 send_sig(SIGPIPE, current, 0);
43480 if (!ret)
43481 ret = -EPIPE;
43482 @@ -1937,7 +1943,7 @@ static int link_pipe(struct pipe_inode_i
43483 * return EAGAIN if we have the potential of some data in the
43484 * future, otherwise just return 0
43485 */
43486 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
43487 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
43488 ret = -EAGAIN;
43489
43490 pipe_unlock(ipipe);
43491 diff -urNp linux-2.6.32.42/fs/sysfs/file.c linux-2.6.32.42/fs/sysfs/file.c
43492 --- linux-2.6.32.42/fs/sysfs/file.c 2011-03-27 14:31:47.000000000 -0400
43493 +++ linux-2.6.32.42/fs/sysfs/file.c 2011-05-04 17:56:20.000000000 -0400
43494 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
43495
43496 struct sysfs_open_dirent {
43497 atomic_t refcnt;
43498 - atomic_t event;
43499 + atomic_unchecked_t event;
43500 wait_queue_head_t poll;
43501 struct list_head buffers; /* goes through sysfs_buffer.list */
43502 };
43503 @@ -53,7 +53,7 @@ struct sysfs_buffer {
43504 size_t count;
43505 loff_t pos;
43506 char * page;
43507 - struct sysfs_ops * ops;
43508 + const struct sysfs_ops * ops;
43509 struct mutex mutex;
43510 int needs_read_fill;
43511 int event;
43512 @@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentr
43513 {
43514 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
43515 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
43516 - struct sysfs_ops * ops = buffer->ops;
43517 + const struct sysfs_ops * ops = buffer->ops;
43518 int ret = 0;
43519 ssize_t count;
43520
43521 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
43522 if (!sysfs_get_active_two(attr_sd))
43523 return -ENODEV;
43524
43525 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
43526 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
43527 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
43528
43529 sysfs_put_active_two(attr_sd);
43530 @@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentr
43531 {
43532 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
43533 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
43534 - struct sysfs_ops * ops = buffer->ops;
43535 + const struct sysfs_ops * ops = buffer->ops;
43536 int rc;
43537
43538 /* need attr_sd for attr and ops, its parent for kobj */
43539 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
43540 return -ENOMEM;
43541
43542 atomic_set(&new_od->refcnt, 0);
43543 - atomic_set(&new_od->event, 1);
43544 + atomic_set_unchecked(&new_od->event, 1);
43545 init_waitqueue_head(&new_od->poll);
43546 INIT_LIST_HEAD(&new_od->buffers);
43547 goto retry;
43548 @@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode
43549 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
43550 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
43551 struct sysfs_buffer *buffer;
43552 - struct sysfs_ops *ops;
43553 + const struct sysfs_ops *ops;
43554 int error = -EACCES;
43555 char *p;
43556
43557 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
43558
43559 sysfs_put_active_two(attr_sd);
43560
43561 - if (buffer->event != atomic_read(&od->event))
43562 + if (buffer->event != atomic_read_unchecked(&od->event))
43563 goto trigger;
43564
43565 return DEFAULT_POLLMASK;
43566 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
43567
43568 od = sd->s_attr.open;
43569 if (od) {
43570 - atomic_inc(&od->event);
43571 + atomic_inc_unchecked(&od->event);
43572 wake_up_interruptible(&od->poll);
43573 }
43574
43575 diff -urNp linux-2.6.32.42/fs/sysfs/mount.c linux-2.6.32.42/fs/sysfs/mount.c
43576 --- linux-2.6.32.42/fs/sysfs/mount.c 2011-03-27 14:31:47.000000000 -0400
43577 +++ linux-2.6.32.42/fs/sysfs/mount.c 2011-04-17 15:56:46.000000000 -0400
43578 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
43579 .s_name = "",
43580 .s_count = ATOMIC_INIT(1),
43581 .s_flags = SYSFS_DIR,
43582 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
43583 + .s_mode = S_IFDIR | S_IRWXU,
43584 +#else
43585 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
43586 +#endif
43587 .s_ino = 1,
43588 };
43589
43590 diff -urNp linux-2.6.32.42/fs/sysfs/symlink.c linux-2.6.32.42/fs/sysfs/symlink.c
43591 --- linux-2.6.32.42/fs/sysfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
43592 +++ linux-2.6.32.42/fs/sysfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
43593 @@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct de
43594
43595 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
43596 {
43597 - char *page = nd_get_link(nd);
43598 + const char *page = nd_get_link(nd);
43599 if (!IS_ERR(page))
43600 free_page((unsigned long)page);
43601 }
43602 diff -urNp linux-2.6.32.42/fs/udf/balloc.c linux-2.6.32.42/fs/udf/balloc.c
43603 --- linux-2.6.32.42/fs/udf/balloc.c 2011-03-27 14:31:47.000000000 -0400
43604 +++ linux-2.6.32.42/fs/udf/balloc.c 2011-04-17 15:56:46.000000000 -0400
43605 @@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struc
43606
43607 mutex_lock(&sbi->s_alloc_mutex);
43608 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
43609 - if (bloc->logicalBlockNum < 0 ||
43610 - (bloc->logicalBlockNum + count) >
43611 - partmap->s_partition_len) {
43612 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
43613 udf_debug("%d < %d || %d + %d > %d\n",
43614 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
43615 count, partmap->s_partition_len);
43616 @@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct
43617
43618 mutex_lock(&sbi->s_alloc_mutex);
43619 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
43620 - if (bloc->logicalBlockNum < 0 ||
43621 - (bloc->logicalBlockNum + count) >
43622 - partmap->s_partition_len) {
43623 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
43624 udf_debug("%d < %d || %d + %d > %d\n",
43625 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
43626 partmap->s_partition_len);
43627 diff -urNp linux-2.6.32.42/fs/udf/inode.c linux-2.6.32.42/fs/udf/inode.c
43628 --- linux-2.6.32.42/fs/udf/inode.c 2011-03-27 14:31:47.000000000 -0400
43629 +++ linux-2.6.32.42/fs/udf/inode.c 2011-05-16 21:46:57.000000000 -0400
43630 @@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(
43631 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
43632 int lastblock = 0;
43633
43634 + pax_track_stack();
43635 +
43636 prev_epos.offset = udf_file_entry_alloc_offset(inode);
43637 prev_epos.block = iinfo->i_location;
43638 prev_epos.bh = NULL;
43639 diff -urNp linux-2.6.32.42/fs/udf/misc.c linux-2.6.32.42/fs/udf/misc.c
43640 --- linux-2.6.32.42/fs/udf/misc.c 2011-03-27 14:31:47.000000000 -0400
43641 +++ linux-2.6.32.42/fs/udf/misc.c 2011-04-23 12:56:11.000000000 -0400
43642 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
43643
43644 u8 udf_tag_checksum(const struct tag *t)
43645 {
43646 - u8 *data = (u8 *)t;
43647 + const u8 *data = (const u8 *)t;
43648 u8 checksum = 0;
43649 int i;
43650 for (i = 0; i < sizeof(struct tag); ++i)
43651 diff -urNp linux-2.6.32.42/fs/utimes.c linux-2.6.32.42/fs/utimes.c
43652 --- linux-2.6.32.42/fs/utimes.c 2011-03-27 14:31:47.000000000 -0400
43653 +++ linux-2.6.32.42/fs/utimes.c 2011-04-17 15:56:46.000000000 -0400
43654 @@ -1,6 +1,7 @@
43655 #include <linux/compiler.h>
43656 #include <linux/file.h>
43657 #include <linux/fs.h>
43658 +#include <linux/security.h>
43659 #include <linux/linkage.h>
43660 #include <linux/mount.h>
43661 #include <linux/namei.h>
43662 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
43663 goto mnt_drop_write_and_out;
43664 }
43665 }
43666 +
43667 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
43668 + error = -EACCES;
43669 + goto mnt_drop_write_and_out;
43670 + }
43671 +
43672 mutex_lock(&inode->i_mutex);
43673 error = notify_change(path->dentry, &newattrs);
43674 mutex_unlock(&inode->i_mutex);
43675 diff -urNp linux-2.6.32.42/fs/xattr_acl.c linux-2.6.32.42/fs/xattr_acl.c
43676 --- linux-2.6.32.42/fs/xattr_acl.c 2011-03-27 14:31:47.000000000 -0400
43677 +++ linux-2.6.32.42/fs/xattr_acl.c 2011-04-17 15:56:46.000000000 -0400
43678 @@ -17,8 +17,8 @@
43679 struct posix_acl *
43680 posix_acl_from_xattr(const void *value, size_t size)
43681 {
43682 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
43683 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
43684 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
43685 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
43686 int count;
43687 struct posix_acl *acl;
43688 struct posix_acl_entry *acl_e;
43689 diff -urNp linux-2.6.32.42/fs/xattr.c linux-2.6.32.42/fs/xattr.c
43690 --- linux-2.6.32.42/fs/xattr.c 2011-03-27 14:31:47.000000000 -0400
43691 +++ linux-2.6.32.42/fs/xattr.c 2011-04-17 15:56:46.000000000 -0400
43692 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
43693 * Extended attribute SET operations
43694 */
43695 static long
43696 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
43697 +setxattr(struct path *path, const char __user *name, const void __user *value,
43698 size_t size, int flags)
43699 {
43700 int error;
43701 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
43702 return PTR_ERR(kvalue);
43703 }
43704
43705 - error = vfs_setxattr(d, kname, kvalue, size, flags);
43706 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
43707 + error = -EACCES;
43708 + goto out;
43709 + }
43710 +
43711 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
43712 +out:
43713 kfree(kvalue);
43714 return error;
43715 }
43716 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
43717 return error;
43718 error = mnt_want_write(path.mnt);
43719 if (!error) {
43720 - error = setxattr(path.dentry, name, value, size, flags);
43721 + error = setxattr(&path, name, value, size, flags);
43722 mnt_drop_write(path.mnt);
43723 }
43724 path_put(&path);
43725 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
43726 return error;
43727 error = mnt_want_write(path.mnt);
43728 if (!error) {
43729 - error = setxattr(path.dentry, name, value, size, flags);
43730 + error = setxattr(&path, name, value, size, flags);
43731 mnt_drop_write(path.mnt);
43732 }
43733 path_put(&path);
43734 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
43735 const void __user *,value, size_t, size, int, flags)
43736 {
43737 struct file *f;
43738 - struct dentry *dentry;
43739 int error = -EBADF;
43740
43741 f = fget(fd);
43742 if (!f)
43743 return error;
43744 - dentry = f->f_path.dentry;
43745 - audit_inode(NULL, dentry);
43746 + audit_inode(NULL, f->f_path.dentry);
43747 error = mnt_want_write_file(f);
43748 if (!error) {
43749 - error = setxattr(dentry, name, value, size, flags);
43750 + error = setxattr(&f->f_path, name, value, size, flags);
43751 mnt_drop_write(f->f_path.mnt);
43752 }
43753 fput(f);
43754 diff -urNp linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl32.c
43755 --- linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-03-27 14:31:47.000000000 -0400
43756 +++ linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-04-17 15:56:46.000000000 -0400
43757 @@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
43758 xfs_fsop_geom_t fsgeo;
43759 int error;
43760
43761 + memset(&fsgeo, 0, sizeof(fsgeo));
43762 error = xfs_fs_geometry(mp, &fsgeo, 3);
43763 if (error)
43764 return -error;
43765 diff -urNp linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl.c
43766 --- linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 17:00:52.000000000 -0400
43767 +++ linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 20:07:09.000000000 -0400
43768 @@ -134,7 +134,7 @@ xfs_find_handle(
43769 }
43770
43771 error = -EFAULT;
43772 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
43773 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
43774 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
43775 goto out_put;
43776
43777 @@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
43778 if (IS_ERR(dentry))
43779 return PTR_ERR(dentry);
43780
43781 - kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
43782 + kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
43783 if (!kbuf)
43784 goto out_dput;
43785
43786 @@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
43787 xfs_mount_t *mp,
43788 void __user *arg)
43789 {
43790 - xfs_fsop_geom_t fsgeo;
43791 + xfs_fsop_geom_t fsgeo;
43792 int error;
43793
43794 error = xfs_fs_geometry(mp, &fsgeo, 3);
43795 diff -urNp linux-2.6.32.42/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.32.42/fs/xfs/linux-2.6/xfs_iops.c
43796 --- linux-2.6.32.42/fs/xfs/linux-2.6/xfs_iops.c 2011-03-27 14:31:47.000000000 -0400
43797 +++ linux-2.6.32.42/fs/xfs/linux-2.6/xfs_iops.c 2011-04-17 15:56:46.000000000 -0400
43798 @@ -468,7 +468,7 @@ xfs_vn_put_link(
43799 struct nameidata *nd,
43800 void *p)
43801 {
43802 - char *s = nd_get_link(nd);
43803 + const char *s = nd_get_link(nd);
43804
43805 if (!IS_ERR(s))
43806 kfree(s);
43807 diff -urNp linux-2.6.32.42/fs/xfs/xfs_bmap.c linux-2.6.32.42/fs/xfs/xfs_bmap.c
43808 --- linux-2.6.32.42/fs/xfs/xfs_bmap.c 2011-03-27 14:31:47.000000000 -0400
43809 +++ linux-2.6.32.42/fs/xfs/xfs_bmap.c 2011-04-17 15:56:46.000000000 -0400
43810 @@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
43811 int nmap,
43812 int ret_nmap);
43813 #else
43814 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
43815 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
43816 #endif /* DEBUG */
43817
43818 #if defined(XFS_RW_TRACE)
43819 diff -urNp linux-2.6.32.42/fs/xfs/xfs_dir2_sf.c linux-2.6.32.42/fs/xfs/xfs_dir2_sf.c
43820 --- linux-2.6.32.42/fs/xfs/xfs_dir2_sf.c 2011-03-27 14:31:47.000000000 -0400
43821 +++ linux-2.6.32.42/fs/xfs/xfs_dir2_sf.c 2011-04-18 22:07:30.000000000 -0400
43822 @@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
43823 }
43824
43825 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
43826 - if (filldir(dirent, sfep->name, sfep->namelen,
43827 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
43828 + char name[sfep->namelen];
43829 + memcpy(name, sfep->name, sfep->namelen);
43830 + if (filldir(dirent, name, sfep->namelen,
43831 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
43832 + *offset = off & 0x7fffffff;
43833 + return 0;
43834 + }
43835 + } else if (filldir(dirent, sfep->name, sfep->namelen,
43836 off & 0x7fffffff, ino, DT_UNKNOWN)) {
43837 *offset = off & 0x7fffffff;
43838 return 0;
43839 diff -urNp linux-2.6.32.42/grsecurity/gracl_alloc.c linux-2.6.32.42/grsecurity/gracl_alloc.c
43840 --- linux-2.6.32.42/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
43841 +++ linux-2.6.32.42/grsecurity/gracl_alloc.c 2011-04-17 15:56:46.000000000 -0400
43842 @@ -0,0 +1,105 @@
43843 +#include <linux/kernel.h>
43844 +#include <linux/mm.h>
43845 +#include <linux/slab.h>
43846 +#include <linux/vmalloc.h>
43847 +#include <linux/gracl.h>
43848 +#include <linux/grsecurity.h>
43849 +
43850 +static unsigned long alloc_stack_next = 1;
43851 +static unsigned long alloc_stack_size = 1;
43852 +static void **alloc_stack;
43853 +
43854 +static __inline__ int
43855 +alloc_pop(void)
43856 +{
43857 + if (alloc_stack_next == 1)
43858 + return 0;
43859 +
43860 + kfree(alloc_stack[alloc_stack_next - 2]);
43861 +
43862 + alloc_stack_next--;
43863 +
43864 + return 1;
43865 +}
43866 +
43867 +static __inline__ int
43868 +alloc_push(void *buf)
43869 +{
43870 + if (alloc_stack_next >= alloc_stack_size)
43871 + return 1;
43872 +
43873 + alloc_stack[alloc_stack_next - 1] = buf;
43874 +
43875 + alloc_stack_next++;
43876 +
43877 + return 0;
43878 +}
43879 +
43880 +void *
43881 +acl_alloc(unsigned long len)
43882 +{
43883 + void *ret = NULL;
43884 +
43885 + if (!len || len > PAGE_SIZE)
43886 + goto out;
43887 +
43888 + ret = kmalloc(len, GFP_KERNEL);
43889 +
43890 + if (ret) {
43891 + if (alloc_push(ret)) {
43892 + kfree(ret);
43893 + ret = NULL;
43894 + }
43895 + }
43896 +
43897 +out:
43898 + return ret;
43899 +}
43900 +
43901 +void *
43902 +acl_alloc_num(unsigned long num, unsigned long len)
43903 +{
43904 + if (!len || (num > (PAGE_SIZE / len)))
43905 + return NULL;
43906 +
43907 + return acl_alloc(num * len);
43908 +}
43909 +
43910 +void
43911 +acl_free_all(void)
43912 +{
43913 + if (gr_acl_is_enabled() || !alloc_stack)
43914 + return;
43915 +
43916 + while (alloc_pop()) ;
43917 +
43918 + if (alloc_stack) {
43919 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
43920 + kfree(alloc_stack);
43921 + else
43922 + vfree(alloc_stack);
43923 + }
43924 +
43925 + alloc_stack = NULL;
43926 + alloc_stack_size = 1;
43927 + alloc_stack_next = 1;
43928 +
43929 + return;
43930 +}
43931 +
43932 +int
43933 +acl_alloc_stack_init(unsigned long size)
43934 +{
43935 + if ((size * sizeof (void *)) <= PAGE_SIZE)
43936 + alloc_stack =
43937 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
43938 + else
43939 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
43940 +
43941 + alloc_stack_size = size;
43942 +
43943 + if (!alloc_stack)
43944 + return 0;
43945 + else
43946 + return 1;
43947 +}
43948 diff -urNp linux-2.6.32.42/grsecurity/gracl.c linux-2.6.32.42/grsecurity/gracl.c
43949 --- linux-2.6.32.42/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
43950 +++ linux-2.6.32.42/grsecurity/gracl.c 2011-06-11 16:24:26.000000000 -0400
43951 @@ -0,0 +1,4085 @@
43952 +#include <linux/kernel.h>
43953 +#include <linux/module.h>
43954 +#include <linux/sched.h>
43955 +#include <linux/mm.h>
43956 +#include <linux/file.h>
43957 +#include <linux/fs.h>
43958 +#include <linux/namei.h>
43959 +#include <linux/mount.h>
43960 +#include <linux/tty.h>
43961 +#include <linux/proc_fs.h>
43962 +#include <linux/smp_lock.h>
43963 +#include <linux/slab.h>
43964 +#include <linux/vmalloc.h>
43965 +#include <linux/types.h>
43966 +#include <linux/sysctl.h>
43967 +#include <linux/netdevice.h>
43968 +#include <linux/ptrace.h>
43969 +#include <linux/gracl.h>
43970 +#include <linux/gralloc.h>
43971 +#include <linux/grsecurity.h>
43972 +#include <linux/grinternal.h>
43973 +#include <linux/pid_namespace.h>
43974 +#include <linux/fdtable.h>
43975 +#include <linux/percpu.h>
43976 +
43977 +#include <asm/uaccess.h>
43978 +#include <asm/errno.h>
43979 +#include <asm/mman.h>
43980 +
43981 +static struct acl_role_db acl_role_set;
43982 +static struct name_db name_set;
43983 +static struct inodev_db inodev_set;
43984 +
43985 +/* for keeping track of userspace pointers used for subjects, so we
43986 + can share references in the kernel as well
43987 +*/
43988 +
43989 +static struct dentry *real_root;
43990 +static struct vfsmount *real_root_mnt;
43991 +
43992 +static struct acl_subj_map_db subj_map_set;
43993 +
43994 +static struct acl_role_label *default_role;
43995 +
43996 +static struct acl_role_label *role_list;
43997 +
43998 +static u16 acl_sp_role_value;
43999 +
44000 +extern char *gr_shared_page[4];
44001 +static DEFINE_MUTEX(gr_dev_mutex);
44002 +DEFINE_RWLOCK(gr_inode_lock);
44003 +
44004 +struct gr_arg *gr_usermode;
44005 +
44006 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
44007 +
44008 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
44009 +extern void gr_clear_learn_entries(void);
44010 +
44011 +#ifdef CONFIG_GRKERNSEC_RESLOG
44012 +extern void gr_log_resource(const struct task_struct *task,
44013 + const int res, const unsigned long wanted, const int gt);
44014 +#endif
44015 +
44016 +unsigned char *gr_system_salt;
44017 +unsigned char *gr_system_sum;
44018 +
44019 +static struct sprole_pw **acl_special_roles = NULL;
44020 +static __u16 num_sprole_pws = 0;
44021 +
44022 +static struct acl_role_label *kernel_role = NULL;
44023 +
44024 +static unsigned int gr_auth_attempts = 0;
44025 +static unsigned long gr_auth_expires = 0UL;
44026 +
44027 +#ifdef CONFIG_NET
44028 +extern struct vfsmount *sock_mnt;
44029 +#endif
44030 +extern struct vfsmount *pipe_mnt;
44031 +extern struct vfsmount *shm_mnt;
44032 +#ifdef CONFIG_HUGETLBFS
44033 +extern struct vfsmount *hugetlbfs_vfsmount;
44034 +#endif
44035 +
44036 +static struct acl_object_label *fakefs_obj_rw;
44037 +static struct acl_object_label *fakefs_obj_rwx;
44038 +
44039 +extern int gr_init_uidset(void);
44040 +extern void gr_free_uidset(void);
44041 +extern void gr_remove_uid(uid_t uid);
44042 +extern int gr_find_uid(uid_t uid);
44043 +
44044 +__inline__ int
44045 +gr_acl_is_enabled(void)
44046 +{
44047 + return (gr_status & GR_READY);
44048 +}
44049 +
44050 +#ifdef CONFIG_BTRFS_FS
44051 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
44052 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
44053 +#endif
44054 +
44055 +static inline dev_t __get_dev(const struct dentry *dentry)
44056 +{
44057 +#ifdef CONFIG_BTRFS_FS
44058 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
44059 + return get_btrfs_dev_from_inode(dentry->d_inode);
44060 + else
44061 +#endif
44062 + return dentry->d_inode->i_sb->s_dev;
44063 +}
44064 +
44065 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
44066 +{
44067 + return __get_dev(dentry);
44068 +}
44069 +
44070 +static char gr_task_roletype_to_char(struct task_struct *task)
44071 +{
44072 + switch (task->role->roletype &
44073 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
44074 + GR_ROLE_SPECIAL)) {
44075 + case GR_ROLE_DEFAULT:
44076 + return 'D';
44077 + case GR_ROLE_USER:
44078 + return 'U';
44079 + case GR_ROLE_GROUP:
44080 + return 'G';
44081 + case GR_ROLE_SPECIAL:
44082 + return 'S';
44083 + }
44084 +
44085 + return 'X';
44086 +}
44087 +
44088 +char gr_roletype_to_char(void)
44089 +{
44090 + return gr_task_roletype_to_char(current);
44091 +}
44092 +
44093 +__inline__ int
44094 +gr_acl_tpe_check(void)
44095 +{
44096 + if (unlikely(!(gr_status & GR_READY)))
44097 + return 0;
44098 + if (current->role->roletype & GR_ROLE_TPE)
44099 + return 1;
44100 + else
44101 + return 0;
44102 +}
44103 +
44104 +int
44105 +gr_handle_rawio(const struct inode *inode)
44106 +{
44107 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
44108 + if (inode && S_ISBLK(inode->i_mode) &&
44109 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
44110 + !capable(CAP_SYS_RAWIO))
44111 + return 1;
44112 +#endif
44113 + return 0;
44114 +}
44115 +
44116 +static int
44117 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
44118 +{
44119 + if (likely(lena != lenb))
44120 + return 0;
44121 +
44122 + return !memcmp(a, b, lena);
44123 +}
44124 +
44125 +/* this must be called with vfsmount_lock and dcache_lock held */
44126 +
44127 +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
44128 + struct dentry *root, struct vfsmount *rootmnt,
44129 + char *buffer, int buflen)
44130 +{
44131 + char * end = buffer+buflen;
44132 + char * retval;
44133 + int namelen;
44134 +
44135 + *--end = '\0';
44136 + buflen--;
44137 +
44138 + if (buflen < 1)
44139 + goto Elong;
44140 + /* Get '/' right */
44141 + retval = end-1;
44142 + *retval = '/';
44143 +
44144 + for (;;) {
44145 + struct dentry * parent;
44146 +
44147 + if (dentry == root && vfsmnt == rootmnt)
44148 + break;
44149 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
44150 + /* Global root? */
44151 + if (vfsmnt->mnt_parent == vfsmnt)
44152 + goto global_root;
44153 + dentry = vfsmnt->mnt_mountpoint;
44154 + vfsmnt = vfsmnt->mnt_parent;
44155 + continue;
44156 + }
44157 + parent = dentry->d_parent;
44158 + prefetch(parent);
44159 + namelen = dentry->d_name.len;
44160 + buflen -= namelen + 1;
44161 + if (buflen < 0)
44162 + goto Elong;
44163 + end -= namelen;
44164 + memcpy(end, dentry->d_name.name, namelen);
44165 + *--end = '/';
44166 + retval = end;
44167 + dentry = parent;
44168 + }
44169 +
44170 +out:
44171 + return retval;
44172 +
44173 +global_root:
44174 + namelen = dentry->d_name.len;
44175 + buflen -= namelen;
44176 + if (buflen < 0)
44177 + goto Elong;
44178 + retval -= namelen-1; /* hit the slash */
44179 + memcpy(retval, dentry->d_name.name, namelen);
44180 + goto out;
44181 +Elong:
44182 + retval = ERR_PTR(-ENAMETOOLONG);
44183 + goto out;
44184 +}
44185 +
44186 +static char *
44187 +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
44188 + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
44189 +{
44190 + char *retval;
44191 +
44192 + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
44193 + if (unlikely(IS_ERR(retval)))
44194 + retval = strcpy(buf, "<path too long>");
44195 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
44196 + retval[1] = '\0';
44197 +
44198 + return retval;
44199 +}
44200 +
44201 +static char *
44202 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44203 + char *buf, int buflen)
44204 +{
44205 + char *res;
44206 +
44207 + /* we can use real_root, real_root_mnt, because this is only called
44208 + by the RBAC system */
44209 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
44210 +
44211 + return res;
44212 +}
44213 +
44214 +static char *
44215 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44216 + char *buf, int buflen)
44217 +{
44218 + char *res;
44219 + struct dentry *root;
44220 + struct vfsmount *rootmnt;
44221 + struct task_struct *reaper = &init_task;
44222 +
44223 + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
44224 + read_lock(&reaper->fs->lock);
44225 + root = dget(reaper->fs->root.dentry);
44226 + rootmnt = mntget(reaper->fs->root.mnt);
44227 + read_unlock(&reaper->fs->lock);
44228 +
44229 + spin_lock(&dcache_lock);
44230 + spin_lock(&vfsmount_lock);
44231 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
44232 + spin_unlock(&vfsmount_lock);
44233 + spin_unlock(&dcache_lock);
44234 +
44235 + dput(root);
44236 + mntput(rootmnt);
44237 + return res;
44238 +}
44239 +
44240 +static char *
44241 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
44242 +{
44243 + char *ret;
44244 + spin_lock(&dcache_lock);
44245 + spin_lock(&vfsmount_lock);
44246 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44247 + PAGE_SIZE);
44248 + spin_unlock(&vfsmount_lock);
44249 + spin_unlock(&dcache_lock);
44250 + return ret;
44251 +}
44252 +
44253 +char *
44254 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
44255 +{
44256 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44257 + PAGE_SIZE);
44258 +}
44259 +
44260 +char *
44261 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
44262 +{
44263 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
44264 + PAGE_SIZE);
44265 +}
44266 +
44267 +char *
44268 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
44269 +{
44270 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
44271 + PAGE_SIZE);
44272 +}
44273 +
44274 +char *
44275 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
44276 +{
44277 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
44278 + PAGE_SIZE);
44279 +}
44280 +
44281 +char *
44282 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
44283 +{
44284 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
44285 + PAGE_SIZE);
44286 +}
44287 +
44288 +__inline__ __u32
44289 +to_gr_audit(const __u32 reqmode)
44290 +{
44291 + /* masks off auditable permission flags, then shifts them to create
44292 + auditing flags, and adds the special case of append auditing if
44293 + we're requesting write */
44294 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
44295 +}
44296 +
44297 +struct acl_subject_label *
44298 +lookup_subject_map(const struct acl_subject_label *userp)
44299 +{
44300 + unsigned int index = shash(userp, subj_map_set.s_size);
44301 + struct subject_map *match;
44302 +
44303 + match = subj_map_set.s_hash[index];
44304 +
44305 + while (match && match->user != userp)
44306 + match = match->next;
44307 +
44308 + if (match != NULL)
44309 + return match->kernel;
44310 + else
44311 + return NULL;
44312 +}
44313 +
44314 +static void
44315 +insert_subj_map_entry(struct subject_map *subjmap)
44316 +{
44317 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
44318 + struct subject_map **curr;
44319 +
44320 + subjmap->prev = NULL;
44321 +
44322 + curr = &subj_map_set.s_hash[index];
44323 + if (*curr != NULL)
44324 + (*curr)->prev = subjmap;
44325 +
44326 + subjmap->next = *curr;
44327 + *curr = subjmap;
44328 +
44329 + return;
44330 +}
44331 +
44332 +static struct acl_role_label *
44333 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
44334 + const gid_t gid)
44335 +{
44336 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
44337 + struct acl_role_label *match;
44338 + struct role_allowed_ip *ipp;
44339 + unsigned int x;
44340 + u32 curr_ip = task->signal->curr_ip;
44341 +
44342 + task->signal->saved_ip = curr_ip;
44343 +
44344 + match = acl_role_set.r_hash[index];
44345 +
44346 + while (match) {
44347 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
44348 + for (x = 0; x < match->domain_child_num; x++) {
44349 + if (match->domain_children[x] == uid)
44350 + goto found;
44351 + }
44352 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
44353 + break;
44354 + match = match->next;
44355 + }
44356 +found:
44357 + if (match == NULL) {
44358 + try_group:
44359 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
44360 + match = acl_role_set.r_hash[index];
44361 +
44362 + while (match) {
44363 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
44364 + for (x = 0; x < match->domain_child_num; x++) {
44365 + if (match->domain_children[x] == gid)
44366 + goto found2;
44367 + }
44368 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
44369 + break;
44370 + match = match->next;
44371 + }
44372 +found2:
44373 + if (match == NULL)
44374 + match = default_role;
44375 + if (match->allowed_ips == NULL)
44376 + return match;
44377 + else {
44378 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44379 + if (likely
44380 + ((ntohl(curr_ip) & ipp->netmask) ==
44381 + (ntohl(ipp->addr) & ipp->netmask)))
44382 + return match;
44383 + }
44384 + match = default_role;
44385 + }
44386 + } else if (match->allowed_ips == NULL) {
44387 + return match;
44388 + } else {
44389 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44390 + if (likely
44391 + ((ntohl(curr_ip) & ipp->netmask) ==
44392 + (ntohl(ipp->addr) & ipp->netmask)))
44393 + return match;
44394 + }
44395 + goto try_group;
44396 + }
44397 +
44398 + return match;
44399 +}
44400 +
44401 +struct acl_subject_label *
44402 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
44403 + const struct acl_role_label *role)
44404 +{
44405 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
44406 + struct acl_subject_label *match;
44407 +
44408 + match = role->subj_hash[index];
44409 +
44410 + while (match && (match->inode != ino || match->device != dev ||
44411 + (match->mode & GR_DELETED))) {
44412 + match = match->next;
44413 + }
44414 +
44415 + if (match && !(match->mode & GR_DELETED))
44416 + return match;
44417 + else
44418 + return NULL;
44419 +}
44420 +
44421 +struct acl_subject_label *
44422 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
44423 + const struct acl_role_label *role)
44424 +{
44425 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
44426 + struct acl_subject_label *match;
44427 +
44428 + match = role->subj_hash[index];
44429 +
44430 + while (match && (match->inode != ino || match->device != dev ||
44431 + !(match->mode & GR_DELETED))) {
44432 + match = match->next;
44433 + }
44434 +
44435 + if (match && (match->mode & GR_DELETED))
44436 + return match;
44437 + else
44438 + return NULL;
44439 +}
44440 +
44441 +static struct acl_object_label *
44442 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
44443 + const struct acl_subject_label *subj)
44444 +{
44445 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
44446 + struct acl_object_label *match;
44447 +
44448 + match = subj->obj_hash[index];
44449 +
44450 + while (match && (match->inode != ino || match->device != dev ||
44451 + (match->mode & GR_DELETED))) {
44452 + match = match->next;
44453 + }
44454 +
44455 + if (match && !(match->mode & GR_DELETED))
44456 + return match;
44457 + else
44458 + return NULL;
44459 +}
44460 +
44461 +static struct acl_object_label *
44462 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
44463 + const struct acl_subject_label *subj)
44464 +{
44465 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
44466 + struct acl_object_label *match;
44467 +
44468 + match = subj->obj_hash[index];
44469 +
44470 + while (match && (match->inode != ino || match->device != dev ||
44471 + !(match->mode & GR_DELETED))) {
44472 + match = match->next;
44473 + }
44474 +
44475 + if (match && (match->mode & GR_DELETED))
44476 + return match;
44477 +
44478 + match = subj->obj_hash[index];
44479 +
44480 + while (match && (match->inode != ino || match->device != dev ||
44481 + (match->mode & GR_DELETED))) {
44482 + match = match->next;
44483 + }
44484 +
44485 + if (match && !(match->mode & GR_DELETED))
44486 + return match;
44487 + else
44488 + return NULL;
44489 +}
44490 +
44491 +static struct name_entry *
44492 +lookup_name_entry(const char *name)
44493 +{
44494 + unsigned int len = strlen(name);
44495 + unsigned int key = full_name_hash(name, len);
44496 + unsigned int index = key % name_set.n_size;
44497 + struct name_entry *match;
44498 +
44499 + match = name_set.n_hash[index];
44500 +
44501 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
44502 + match = match->next;
44503 +
44504 + return match;
44505 +}
44506 +
44507 +static struct name_entry *
44508 +lookup_name_entry_create(const char *name)
44509 +{
44510 + unsigned int len = strlen(name);
44511 + unsigned int key = full_name_hash(name, len);
44512 + unsigned int index = key % name_set.n_size;
44513 + struct name_entry *match;
44514 +
44515 + match = name_set.n_hash[index];
44516 +
44517 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
44518 + !match->deleted))
44519 + match = match->next;
44520 +
44521 + if (match && match->deleted)
44522 + return match;
44523 +
44524 + match = name_set.n_hash[index];
44525 +
44526 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
44527 + match->deleted))
44528 + match = match->next;
44529 +
44530 + if (match && !match->deleted)
44531 + return match;
44532 + else
44533 + return NULL;
44534 +}
44535 +
44536 +static struct inodev_entry *
44537 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
44538 +{
44539 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
44540 + struct inodev_entry *match;
44541 +
44542 + match = inodev_set.i_hash[index];
44543 +
44544 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
44545 + match = match->next;
44546 +
44547 + return match;
44548 +}
44549 +
44550 +static void
44551 +insert_inodev_entry(struct inodev_entry *entry)
44552 +{
44553 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
44554 + inodev_set.i_size);
44555 + struct inodev_entry **curr;
44556 +
44557 + entry->prev = NULL;
44558 +
44559 + curr = &inodev_set.i_hash[index];
44560 + if (*curr != NULL)
44561 + (*curr)->prev = entry;
44562 +
44563 + entry->next = *curr;
44564 + *curr = entry;
44565 +
44566 + return;
44567 +}
44568 +
44569 +static void
44570 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
44571 +{
44572 + unsigned int index =
44573 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
44574 + struct acl_role_label **curr;
44575 + struct acl_role_label *tmp;
44576 +
44577 + curr = &acl_role_set.r_hash[index];
44578 +
44579 + /* if role was already inserted due to domains and already has
44580 + a role in the same bucket as it attached, then we need to
44581 + combine these two buckets
44582 + */
44583 + if (role->next) {
44584 + tmp = role->next;
44585 + while (tmp->next)
44586 + tmp = tmp->next;
44587 + tmp->next = *curr;
44588 + } else
44589 + role->next = *curr;
44590 + *curr = role;
44591 +
44592 + return;
44593 +}
44594 +
44595 +static void
44596 +insert_acl_role_label(struct acl_role_label *role)
44597 +{
44598 + int i;
44599 +
44600 + if (role_list == NULL) {
44601 + role_list = role;
44602 + role->prev = NULL;
44603 + } else {
44604 + role->prev = role_list;
44605 + role_list = role;
44606 + }
44607 +
44608 + /* used for hash chains */
44609 + role->next = NULL;
44610 +
44611 + if (role->roletype & GR_ROLE_DOMAIN) {
44612 + for (i = 0; i < role->domain_child_num; i++)
44613 + __insert_acl_role_label(role, role->domain_children[i]);
44614 + } else
44615 + __insert_acl_role_label(role, role->uidgid);
44616 +}
44617 +
44618 +static int
44619 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
44620 +{
44621 + struct name_entry **curr, *nentry;
44622 + struct inodev_entry *ientry;
44623 + unsigned int len = strlen(name);
44624 + unsigned int key = full_name_hash(name, len);
44625 + unsigned int index = key % name_set.n_size;
44626 +
44627 + curr = &name_set.n_hash[index];
44628 +
44629 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
44630 + curr = &((*curr)->next);
44631 +
44632 + if (*curr != NULL)
44633 + return 1;
44634 +
44635 + nentry = acl_alloc(sizeof (struct name_entry));
44636 + if (nentry == NULL)
44637 + return 0;
44638 + ientry = acl_alloc(sizeof (struct inodev_entry));
44639 + if (ientry == NULL)
44640 + return 0;
44641 + ientry->nentry = nentry;
44642 +
44643 + nentry->key = key;
44644 + nentry->name = name;
44645 + nentry->inode = inode;
44646 + nentry->device = device;
44647 + nentry->len = len;
44648 + nentry->deleted = deleted;
44649 +
44650 + nentry->prev = NULL;
44651 + curr = &name_set.n_hash[index];
44652 + if (*curr != NULL)
44653 + (*curr)->prev = nentry;
44654 + nentry->next = *curr;
44655 + *curr = nentry;
44656 +
44657 + /* insert us into the table searchable by inode/dev */
44658 + insert_inodev_entry(ientry);
44659 +
44660 + return 1;
44661 +}
44662 +
44663 +static void
44664 +insert_acl_obj_label(struct acl_object_label *obj,
44665 + struct acl_subject_label *subj)
44666 +{
44667 + unsigned int index =
44668 + fhash(obj->inode, obj->device, subj->obj_hash_size);
44669 + struct acl_object_label **curr;
44670 +
44671 +
44672 + obj->prev = NULL;
44673 +
44674 + curr = &subj->obj_hash[index];
44675 + if (*curr != NULL)
44676 + (*curr)->prev = obj;
44677 +
44678 + obj->next = *curr;
44679 + *curr = obj;
44680 +
44681 + return;
44682 +}
44683 +
44684 +static void
44685 +insert_acl_subj_label(struct acl_subject_label *obj,
44686 + struct acl_role_label *role)
44687 +{
44688 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
44689 + struct acl_subject_label **curr;
44690 +
44691 + obj->prev = NULL;
44692 +
44693 + curr = &role->subj_hash[index];
44694 + if (*curr != NULL)
44695 + (*curr)->prev = obj;
44696 +
44697 + obj->next = *curr;
44698 + *curr = obj;
44699 +
44700 + return;
44701 +}
44702 +
44703 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
44704 +
44705 +static void *
44706 +create_table(__u32 * len, int elementsize)
44707 +{
44708 + unsigned int table_sizes[] = {
44709 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
44710 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
44711 + 4194301, 8388593, 16777213, 33554393, 67108859
44712 + };
44713 + void *newtable = NULL;
44714 + unsigned int pwr = 0;
44715 +
44716 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
44717 + table_sizes[pwr] <= *len)
44718 + pwr++;
44719 +
44720 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
44721 + return newtable;
44722 +
44723 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
44724 + newtable =
44725 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
44726 + else
44727 + newtable = vmalloc(table_sizes[pwr] * elementsize);
44728 +
44729 + *len = table_sizes[pwr];
44730 +
44731 + return newtable;
44732 +}
44733 +
44734 +static int
44735 +init_variables(const struct gr_arg *arg)
44736 +{
44737 + struct task_struct *reaper = &init_task;
44738 + unsigned int stacksize;
44739 +
44740 + subj_map_set.s_size = arg->role_db.num_subjects;
44741 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
44742 + name_set.n_size = arg->role_db.num_objects;
44743 + inodev_set.i_size = arg->role_db.num_objects;
44744 +
44745 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
44746 + !name_set.n_size || !inodev_set.i_size)
44747 + return 1;
44748 +
44749 + if (!gr_init_uidset())
44750 + return 1;
44751 +
44752 + /* set up the stack that holds allocation info */
44753 +
44754 + stacksize = arg->role_db.num_pointers + 5;
44755 +
44756 + if (!acl_alloc_stack_init(stacksize))
44757 + return 1;
44758 +
44759 + /* grab reference for the real root dentry and vfsmount */
44760 + read_lock(&reaper->fs->lock);
44761 + real_root = dget(reaper->fs->root.dentry);
44762 + real_root_mnt = mntget(reaper->fs->root.mnt);
44763 + read_unlock(&reaper->fs->lock);
44764 +
44765 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44766 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
44767 +#endif
44768 +
44769 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
44770 + if (fakefs_obj_rw == NULL)
44771 + return 1;
44772 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
44773 +
44774 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
44775 + if (fakefs_obj_rwx == NULL)
44776 + return 1;
44777 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
44778 +
44779 + subj_map_set.s_hash =
44780 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
44781 + acl_role_set.r_hash =
44782 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
44783 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
44784 + inodev_set.i_hash =
44785 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
44786 +
44787 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
44788 + !name_set.n_hash || !inodev_set.i_hash)
44789 + return 1;
44790 +
44791 + memset(subj_map_set.s_hash, 0,
44792 + sizeof(struct subject_map *) * subj_map_set.s_size);
44793 + memset(acl_role_set.r_hash, 0,
44794 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
44795 + memset(name_set.n_hash, 0,
44796 + sizeof (struct name_entry *) * name_set.n_size);
44797 + memset(inodev_set.i_hash, 0,
44798 + sizeof (struct inodev_entry *) * inodev_set.i_size);
44799 +
44800 + return 0;
44801 +}
44802 +
44803 +/* free information not needed after startup
44804 + currently contains user->kernel pointer mappings for subjects
44805 +*/
44806 +
44807 +static void
44808 +free_init_variables(void)
44809 +{
44810 + __u32 i;
44811 +
44812 + if (subj_map_set.s_hash) {
44813 + for (i = 0; i < subj_map_set.s_size; i++) {
44814 + if (subj_map_set.s_hash[i]) {
44815 + kfree(subj_map_set.s_hash[i]);
44816 + subj_map_set.s_hash[i] = NULL;
44817 + }
44818 + }
44819 +
44820 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
44821 + PAGE_SIZE)
44822 + kfree(subj_map_set.s_hash);
44823 + else
44824 + vfree(subj_map_set.s_hash);
44825 + }
44826 +
44827 + return;
44828 +}
44829 +
44830 +static void
44831 +free_variables(void)
44832 +{
44833 + struct acl_subject_label *s;
44834 + struct acl_role_label *r;
44835 + struct task_struct *task, *task2;
44836 + unsigned int x;
44837 +
44838 + gr_clear_learn_entries();
44839 +
44840 + read_lock(&tasklist_lock);
44841 + do_each_thread(task2, task) {
44842 + task->acl_sp_role = 0;
44843 + task->acl_role_id = 0;
44844 + task->acl = NULL;
44845 + task->role = NULL;
44846 + } while_each_thread(task2, task);
44847 + read_unlock(&tasklist_lock);
44848 +
44849 + /* release the reference to the real root dentry and vfsmount */
44850 + if (real_root)
44851 + dput(real_root);
44852 + real_root = NULL;
44853 + if (real_root_mnt)
44854 + mntput(real_root_mnt);
44855 + real_root_mnt = NULL;
44856 +
44857 + /* free all object hash tables */
44858 +
44859 + FOR_EACH_ROLE_START(r)
44860 + if (r->subj_hash == NULL)
44861 + goto next_role;
44862 + FOR_EACH_SUBJECT_START(r, s, x)
44863 + if (s->obj_hash == NULL)
44864 + break;
44865 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
44866 + kfree(s->obj_hash);
44867 + else
44868 + vfree(s->obj_hash);
44869 + FOR_EACH_SUBJECT_END(s, x)
44870 + FOR_EACH_NESTED_SUBJECT_START(r, s)
44871 + if (s->obj_hash == NULL)
44872 + break;
44873 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
44874 + kfree(s->obj_hash);
44875 + else
44876 + vfree(s->obj_hash);
44877 + FOR_EACH_NESTED_SUBJECT_END(s)
44878 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
44879 + kfree(r->subj_hash);
44880 + else
44881 + vfree(r->subj_hash);
44882 + r->subj_hash = NULL;
44883 +next_role:
44884 + FOR_EACH_ROLE_END(r)
44885 +
44886 + acl_free_all();
44887 +
44888 + if (acl_role_set.r_hash) {
44889 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
44890 + PAGE_SIZE)
44891 + kfree(acl_role_set.r_hash);
44892 + else
44893 + vfree(acl_role_set.r_hash);
44894 + }
44895 + if (name_set.n_hash) {
44896 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
44897 + PAGE_SIZE)
44898 + kfree(name_set.n_hash);
44899 + else
44900 + vfree(name_set.n_hash);
44901 + }
44902 +
44903 + if (inodev_set.i_hash) {
44904 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
44905 + PAGE_SIZE)
44906 + kfree(inodev_set.i_hash);
44907 + else
44908 + vfree(inodev_set.i_hash);
44909 + }
44910 +
44911 + gr_free_uidset();
44912 +
44913 + memset(&name_set, 0, sizeof (struct name_db));
44914 + memset(&inodev_set, 0, sizeof (struct inodev_db));
44915 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
44916 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
44917 +
44918 + default_role = NULL;
44919 + role_list = NULL;
44920 +
44921 + return;
44922 +}
44923 +
44924 +static __u32
44925 +count_user_objs(struct acl_object_label *userp)
44926 +{
44927 + struct acl_object_label o_tmp;
44928 + __u32 num = 0;
44929 +
44930 + while (userp) {
44931 + if (copy_from_user(&o_tmp, userp,
44932 + sizeof (struct acl_object_label)))
44933 + break;
44934 +
44935 + userp = o_tmp.prev;
44936 + num++;
44937 + }
44938 +
44939 + return num;
44940 +}
44941 +
44942 +static struct acl_subject_label *
44943 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
44944 +
44945 +static int
44946 +copy_user_glob(struct acl_object_label *obj)
44947 +{
44948 + struct acl_object_label *g_tmp, **guser;
44949 + unsigned int len;
44950 + char *tmp;
44951 +
44952 + if (obj->globbed == NULL)
44953 + return 0;
44954 +
44955 + guser = &obj->globbed;
44956 + while (*guser) {
44957 + g_tmp = (struct acl_object_label *)
44958 + acl_alloc(sizeof (struct acl_object_label));
44959 + if (g_tmp == NULL)
44960 + return -ENOMEM;
44961 +
44962 + if (copy_from_user(g_tmp, *guser,
44963 + sizeof (struct acl_object_label)))
44964 + return -EFAULT;
44965 +
44966 + len = strnlen_user(g_tmp->filename, PATH_MAX);
44967 +
44968 + if (!len || len >= PATH_MAX)
44969 + return -EINVAL;
44970 +
44971 + if ((tmp = (char *) acl_alloc(len)) == NULL)
44972 + return -ENOMEM;
44973 +
44974 + if (copy_from_user(tmp, g_tmp->filename, len))
44975 + return -EFAULT;
44976 + tmp[len-1] = '\0';
44977 + g_tmp->filename = tmp;
44978 +
44979 + *guser = g_tmp;
44980 + guser = &(g_tmp->next);
44981 + }
44982 +
44983 + return 0;
44984 +}
44985 +
44986 +static int
44987 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
44988 + struct acl_role_label *role)
44989 +{
44990 + struct acl_object_label *o_tmp;
44991 + unsigned int len;
44992 + int ret;
44993 + char *tmp;
44994 +
44995 + while (userp) {
44996 + if ((o_tmp = (struct acl_object_label *)
44997 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
44998 + return -ENOMEM;
44999 +
45000 + if (copy_from_user(o_tmp, userp,
45001 + sizeof (struct acl_object_label)))
45002 + return -EFAULT;
45003 +
45004 + userp = o_tmp->prev;
45005 +
45006 + len = strnlen_user(o_tmp->filename, PATH_MAX);
45007 +
45008 + if (!len || len >= PATH_MAX)
45009 + return -EINVAL;
45010 +
45011 + if ((tmp = (char *) acl_alloc(len)) == NULL)
45012 + return -ENOMEM;
45013 +
45014 + if (copy_from_user(tmp, o_tmp->filename, len))
45015 + return -EFAULT;
45016 + tmp[len-1] = '\0';
45017 + o_tmp->filename = tmp;
45018 +
45019 + insert_acl_obj_label(o_tmp, subj);
45020 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
45021 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
45022 + return -ENOMEM;
45023 +
45024 + ret = copy_user_glob(o_tmp);
45025 + if (ret)
45026 + return ret;
45027 +
45028 + if (o_tmp->nested) {
45029 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
45030 + if (IS_ERR(o_tmp->nested))
45031 + return PTR_ERR(o_tmp->nested);
45032 +
45033 + /* insert into nested subject list */
45034 + o_tmp->nested->next = role->hash->first;
45035 + role->hash->first = o_tmp->nested;
45036 + }
45037 + }
45038 +
45039 + return 0;
45040 +}
45041 +
45042 +static __u32
45043 +count_user_subjs(struct acl_subject_label *userp)
45044 +{
45045 + struct acl_subject_label s_tmp;
45046 + __u32 num = 0;
45047 +
45048 + while (userp) {
45049 + if (copy_from_user(&s_tmp, userp,
45050 + sizeof (struct acl_subject_label)))
45051 + break;
45052 +
45053 + userp = s_tmp.prev;
45054 + /* do not count nested subjects against this count, since
45055 + they are not included in the hash table, but are
45056 + attached to objects. We have already counted
45057 + the subjects in userspace for the allocation
45058 + stack
45059 + */
45060 + if (!(s_tmp.mode & GR_NESTED))
45061 + num++;
45062 + }
45063 +
45064 + return num;
45065 +}
45066 +
45067 +static int
45068 +copy_user_allowedips(struct acl_role_label *rolep)
45069 +{
45070 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
45071 +
45072 + ruserip = rolep->allowed_ips;
45073 +
45074 + while (ruserip) {
45075 + rlast = rtmp;
45076 +
45077 + if ((rtmp = (struct role_allowed_ip *)
45078 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
45079 + return -ENOMEM;
45080 +
45081 + if (copy_from_user(rtmp, ruserip,
45082 + sizeof (struct role_allowed_ip)))
45083 + return -EFAULT;
45084 +
45085 + ruserip = rtmp->prev;
45086 +
45087 + if (!rlast) {
45088 + rtmp->prev = NULL;
45089 + rolep->allowed_ips = rtmp;
45090 + } else {
45091 + rlast->next = rtmp;
45092 + rtmp->prev = rlast;
45093 + }
45094 +
45095 + if (!ruserip)
45096 + rtmp->next = NULL;
45097 + }
45098 +
45099 + return 0;
45100 +}
45101 +
45102 +static int
45103 +copy_user_transitions(struct acl_role_label *rolep)
45104 +{
45105 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
45106 +
45107 + unsigned int len;
45108 + char *tmp;
45109 +
45110 + rusertp = rolep->transitions;
45111 +
45112 + while (rusertp) {
45113 + rlast = rtmp;
45114 +
45115 + if ((rtmp = (struct role_transition *)
45116 + acl_alloc(sizeof (struct role_transition))) == NULL)
45117 + return -ENOMEM;
45118 +
45119 + if (copy_from_user(rtmp, rusertp,
45120 + sizeof (struct role_transition)))
45121 + return -EFAULT;
45122 +
45123 + rusertp = rtmp->prev;
45124 +
45125 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
45126 +
45127 + if (!len || len >= GR_SPROLE_LEN)
45128 + return -EINVAL;
45129 +
45130 + if ((tmp = (char *) acl_alloc(len)) == NULL)
45131 + return -ENOMEM;
45132 +
45133 + if (copy_from_user(tmp, rtmp->rolename, len))
45134 + return -EFAULT;
45135 + tmp[len-1] = '\0';
45136 + rtmp->rolename = tmp;
45137 +
45138 + if (!rlast) {
45139 + rtmp->prev = NULL;
45140 + rolep->transitions = rtmp;
45141 + } else {
45142 + rlast->next = rtmp;
45143 + rtmp->prev = rlast;
45144 + }
45145 +
45146 + if (!rusertp)
45147 + rtmp->next = NULL;
45148 + }
45149 +
45150 + return 0;
45151 +}
45152 +
45153 +static struct acl_subject_label *
45154 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
45155 +{
45156 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
45157 + unsigned int len;
45158 + char *tmp;
45159 + __u32 num_objs;
45160 + struct acl_ip_label **i_tmp, *i_utmp2;
45161 + struct gr_hash_struct ghash;
45162 + struct subject_map *subjmap;
45163 + unsigned int i_num;
45164 + int err;
45165 +
45166 + s_tmp = lookup_subject_map(userp);
45167 +
45168 + /* we've already copied this subject into the kernel, just return
45169 + the reference to it, and don't copy it over again
45170 + */
45171 + if (s_tmp)
45172 + return(s_tmp);
45173 +
45174 + if ((s_tmp = (struct acl_subject_label *)
45175 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
45176 + return ERR_PTR(-ENOMEM);
45177 +
45178 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
45179 + if (subjmap == NULL)
45180 + return ERR_PTR(-ENOMEM);
45181 +
45182 + subjmap->user = userp;
45183 + subjmap->kernel = s_tmp;
45184 + insert_subj_map_entry(subjmap);
45185 +
45186 + if (copy_from_user(s_tmp, userp,
45187 + sizeof (struct acl_subject_label)))
45188 + return ERR_PTR(-EFAULT);
45189 +
45190 + len = strnlen_user(s_tmp->filename, PATH_MAX);
45191 +
45192 + if (!len || len >= PATH_MAX)
45193 + return ERR_PTR(-EINVAL);
45194 +
45195 + if ((tmp = (char *) acl_alloc(len)) == NULL)
45196 + return ERR_PTR(-ENOMEM);
45197 +
45198 + if (copy_from_user(tmp, s_tmp->filename, len))
45199 + return ERR_PTR(-EFAULT);
45200 + tmp[len-1] = '\0';
45201 + s_tmp->filename = tmp;
45202 +
45203 + if (!strcmp(s_tmp->filename, "/"))
45204 + role->root_label = s_tmp;
45205 +
45206 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
45207 + return ERR_PTR(-EFAULT);
45208 +
45209 + /* copy user and group transition tables */
45210 +
45211 + if (s_tmp->user_trans_num) {
45212 + uid_t *uidlist;
45213 +
45214 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
45215 + if (uidlist == NULL)
45216 + return ERR_PTR(-ENOMEM);
45217 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
45218 + return ERR_PTR(-EFAULT);
45219 +
45220 + s_tmp->user_transitions = uidlist;
45221 + }
45222 +
45223 + if (s_tmp->group_trans_num) {
45224 + gid_t *gidlist;
45225 +
45226 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
45227 + if (gidlist == NULL)
45228 + return ERR_PTR(-ENOMEM);
45229 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
45230 + return ERR_PTR(-EFAULT);
45231 +
45232 + s_tmp->group_transitions = gidlist;
45233 + }
45234 +
45235 + /* set up object hash table */
45236 + num_objs = count_user_objs(ghash.first);
45237 +
45238 + s_tmp->obj_hash_size = num_objs;
45239 + s_tmp->obj_hash =
45240 + (struct acl_object_label **)
45241 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
45242 +
45243 + if (!s_tmp->obj_hash)
45244 + return ERR_PTR(-ENOMEM);
45245 +
45246 + memset(s_tmp->obj_hash, 0,
45247 + s_tmp->obj_hash_size *
45248 + sizeof (struct acl_object_label *));
45249 +
45250 + /* add in objects */
45251 + err = copy_user_objs(ghash.first, s_tmp, role);
45252 +
45253 + if (err)
45254 + return ERR_PTR(err);
45255 +
45256 + /* set pointer for parent subject */
45257 + if (s_tmp->parent_subject) {
45258 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
45259 +
45260 + if (IS_ERR(s_tmp2))
45261 + return s_tmp2;
45262 +
45263 + s_tmp->parent_subject = s_tmp2;
45264 + }
45265 +
45266 + /* add in ip acls */
45267 +
45268 + if (!s_tmp->ip_num) {
45269 + s_tmp->ips = NULL;
45270 + goto insert;
45271 + }
45272 +
45273 + i_tmp =
45274 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
45275 + sizeof (struct acl_ip_label *));
45276 +
45277 + if (!i_tmp)
45278 + return ERR_PTR(-ENOMEM);
45279 +
45280 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
45281 + *(i_tmp + i_num) =
45282 + (struct acl_ip_label *)
45283 + acl_alloc(sizeof (struct acl_ip_label));
45284 + if (!*(i_tmp + i_num))
45285 + return ERR_PTR(-ENOMEM);
45286 +
45287 + if (copy_from_user
45288 + (&i_utmp2, s_tmp->ips + i_num,
45289 + sizeof (struct acl_ip_label *)))
45290 + return ERR_PTR(-EFAULT);
45291 +
45292 + if (copy_from_user
45293 + (*(i_tmp + i_num), i_utmp2,
45294 + sizeof (struct acl_ip_label)))
45295 + return ERR_PTR(-EFAULT);
45296 +
45297 + if ((*(i_tmp + i_num))->iface == NULL)
45298 + continue;
45299 +
45300 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
45301 + if (!len || len >= IFNAMSIZ)
45302 + return ERR_PTR(-EINVAL);
45303 + tmp = acl_alloc(len);
45304 + if (tmp == NULL)
45305 + return ERR_PTR(-ENOMEM);
45306 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
45307 + return ERR_PTR(-EFAULT);
45308 + (*(i_tmp + i_num))->iface = tmp;
45309 + }
45310 +
45311 + s_tmp->ips = i_tmp;
45312 +
45313 +insert:
45314 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
45315 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
45316 + return ERR_PTR(-ENOMEM);
45317 +
45318 + return s_tmp;
45319 +}
45320 +
45321 +static int
45322 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
45323 +{
45324 + struct acl_subject_label s_pre;
45325 + struct acl_subject_label * ret;
45326 + int err;
45327 +
45328 + while (userp) {
45329 + if (copy_from_user(&s_pre, userp,
45330 + sizeof (struct acl_subject_label)))
45331 + return -EFAULT;
45332 +
45333 + /* do not add nested subjects here, add
45334 + while parsing objects
45335 + */
45336 +
45337 + if (s_pre.mode & GR_NESTED) {
45338 + userp = s_pre.prev;
45339 + continue;
45340 + }
45341 +
45342 + ret = do_copy_user_subj(userp, role);
45343 +
45344 + err = PTR_ERR(ret);
45345 + if (IS_ERR(ret))
45346 + return err;
45347 +
45348 + insert_acl_subj_label(ret, role);
45349 +
45350 + userp = s_pre.prev;
45351 + }
45352 +
45353 + return 0;
45354 +}
45355 +
45356 +static int
45357 +copy_user_acl(struct gr_arg *arg)
45358 +{
45359 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
45360 + struct sprole_pw *sptmp;
45361 + struct gr_hash_struct *ghash;
45362 + uid_t *domainlist;
45363 + unsigned int r_num;
45364 + unsigned int len;
45365 + char *tmp;
45366 + int err = 0;
45367 + __u16 i;
45368 + __u32 num_subjs;
45369 +
45370 + /* we need a default and kernel role */
45371 + if (arg->role_db.num_roles < 2)
45372 + return -EINVAL;
45373 +
45374 + /* copy special role authentication info from userspace */
45375 +
45376 + num_sprole_pws = arg->num_sprole_pws;
45377 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
45378 +
45379 + if (!acl_special_roles) {
45380 + err = -ENOMEM;
45381 + goto cleanup;
45382 + }
45383 +
45384 + for (i = 0; i < num_sprole_pws; i++) {
45385 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
45386 + if (!sptmp) {
45387 + err = -ENOMEM;
45388 + goto cleanup;
45389 + }
45390 + if (copy_from_user(sptmp, arg->sprole_pws + i,
45391 + sizeof (struct sprole_pw))) {
45392 + err = -EFAULT;
45393 + goto cleanup;
45394 + }
45395 +
45396 + len =
45397 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
45398 +
45399 + if (!len || len >= GR_SPROLE_LEN) {
45400 + err = -EINVAL;
45401 + goto cleanup;
45402 + }
45403 +
45404 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
45405 + err = -ENOMEM;
45406 + goto cleanup;
45407 + }
45408 +
45409 + if (copy_from_user(tmp, sptmp->rolename, len)) {
45410 + err = -EFAULT;
45411 + goto cleanup;
45412 + }
45413 + tmp[len-1] = '\0';
45414 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
45415 + printk(KERN_ALERT "Copying special role %s\n", tmp);
45416 +#endif
45417 + sptmp->rolename = tmp;
45418 + acl_special_roles[i] = sptmp;
45419 + }
45420 +
45421 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
45422 +
45423 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
45424 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
45425 +
45426 + if (!r_tmp) {
45427 + err = -ENOMEM;
45428 + goto cleanup;
45429 + }
45430 +
45431 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
45432 + sizeof (struct acl_role_label *))) {
45433 + err = -EFAULT;
45434 + goto cleanup;
45435 + }
45436 +
45437 + if (copy_from_user(r_tmp, r_utmp2,
45438 + sizeof (struct acl_role_label))) {
45439 + err = -EFAULT;
45440 + goto cleanup;
45441 + }
45442 +
45443 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
45444 +
45445 + if (!len || len >= PATH_MAX) {
45446 + err = -EINVAL;
45447 + goto cleanup;
45448 + }
45449 +
45450 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
45451 + err = -ENOMEM;
45452 + goto cleanup;
45453 + }
45454 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
45455 + err = -EFAULT;
45456 + goto cleanup;
45457 + }
45458 + tmp[len-1] = '\0';
45459 + r_tmp->rolename = tmp;
45460 +
45461 + if (!strcmp(r_tmp->rolename, "default")
45462 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
45463 + default_role = r_tmp;
45464 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
45465 + kernel_role = r_tmp;
45466 + }
45467 +
45468 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
45469 + err = -ENOMEM;
45470 + goto cleanup;
45471 + }
45472 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
45473 + err = -EFAULT;
45474 + goto cleanup;
45475 + }
45476 +
45477 + r_tmp->hash = ghash;
45478 +
45479 + num_subjs = count_user_subjs(r_tmp->hash->first);
45480 +
45481 + r_tmp->subj_hash_size = num_subjs;
45482 + r_tmp->subj_hash =
45483 + (struct acl_subject_label **)
45484 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
45485 +
45486 + if (!r_tmp->subj_hash) {
45487 + err = -ENOMEM;
45488 + goto cleanup;
45489 + }
45490 +
45491 + err = copy_user_allowedips(r_tmp);
45492 + if (err)
45493 + goto cleanup;
45494 +
45495 + /* copy domain info */
45496 + if (r_tmp->domain_children != NULL) {
45497 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
45498 + if (domainlist == NULL) {
45499 + err = -ENOMEM;
45500 + goto cleanup;
45501 + }
45502 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
45503 + err = -EFAULT;
45504 + goto cleanup;
45505 + }
45506 + r_tmp->domain_children = domainlist;
45507 + }
45508 +
45509 + err = copy_user_transitions(r_tmp);
45510 + if (err)
45511 + goto cleanup;
45512 +
45513 + memset(r_tmp->subj_hash, 0,
45514 + r_tmp->subj_hash_size *
45515 + sizeof (struct acl_subject_label *));
45516 +
45517 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
45518 +
45519 + if (err)
45520 + goto cleanup;
45521 +
45522 + /* set nested subject list to null */
45523 + r_tmp->hash->first = NULL;
45524 +
45525 + insert_acl_role_label(r_tmp);
45526 + }
45527 +
45528 + goto return_err;
45529 + cleanup:
45530 + free_variables();
45531 + return_err:
45532 + return err;
45533 +
45534 +}
45535 +
45536 +static int
45537 +gracl_init(struct gr_arg *args)
45538 +{
45539 + int error = 0;
45540 +
45541 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
45542 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
45543 +
45544 + if (init_variables(args)) {
45545 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
45546 + error = -ENOMEM;
45547 + free_variables();
45548 + goto out;
45549 + }
45550 +
45551 + error = copy_user_acl(args);
45552 + free_init_variables();
45553 + if (error) {
45554 + free_variables();
45555 + goto out;
45556 + }
45557 +
45558 + if ((error = gr_set_acls(0))) {
45559 + free_variables();
45560 + goto out;
45561 + }
45562 +
45563 + pax_open_kernel();
45564 + gr_status |= GR_READY;
45565 + pax_close_kernel();
45566 +
45567 + out:
45568 + return error;
45569 +}
45570 +
45571 +/* derived from glibc fnmatch() 0: match, 1: no match*/
45572 +
45573 +static int
45574 +glob_match(const char *p, const char *n)
45575 +{
45576 + char c;
45577 +
45578 + while ((c = *p++) != '\0') {
45579 + switch (c) {
45580 + case '?':
45581 + if (*n == '\0')
45582 + return 1;
45583 + else if (*n == '/')
45584 + return 1;
45585 + break;
45586 + case '\\':
45587 + if (*n != c)
45588 + return 1;
45589 + break;
45590 + case '*':
45591 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
45592 + if (*n == '/')
45593 + return 1;
45594 + else if (c == '?') {
45595 + if (*n == '\0')
45596 + return 1;
45597 + else
45598 + ++n;
45599 + }
45600 + }
45601 + if (c == '\0') {
45602 + return 0;
45603 + } else {
45604 + const char *endp;
45605 +
45606 + if ((endp = strchr(n, '/')) == NULL)
45607 + endp = n + strlen(n);
45608 +
45609 + if (c == '[') {
45610 + for (--p; n < endp; ++n)
45611 + if (!glob_match(p, n))
45612 + return 0;
45613 + } else if (c == '/') {
45614 + while (*n != '\0' && *n != '/')
45615 + ++n;
45616 + if (*n == '/' && !glob_match(p, n + 1))
45617 + return 0;
45618 + } else {
45619 + for (--p; n < endp; ++n)
45620 + if (*n == c && !glob_match(p, n))
45621 + return 0;
45622 + }
45623 +
45624 + return 1;
45625 + }
45626 + case '[':
45627 + {
45628 + int not;
45629 + char cold;
45630 +
45631 + if (*n == '\0' || *n == '/')
45632 + return 1;
45633 +
45634 + not = (*p == '!' || *p == '^');
45635 + if (not)
45636 + ++p;
45637 +
45638 + c = *p++;
45639 + for (;;) {
45640 + unsigned char fn = (unsigned char)*n;
45641 +
45642 + if (c == '\0')
45643 + return 1;
45644 + else {
45645 + if (c == fn)
45646 + goto matched;
45647 + cold = c;
45648 + c = *p++;
45649 +
45650 + if (c == '-' && *p != ']') {
45651 + unsigned char cend = *p++;
45652 +
45653 + if (cend == '\0')
45654 + return 1;
45655 +
45656 + if (cold <= fn && fn <= cend)
45657 + goto matched;
45658 +
45659 + c = *p++;
45660 + }
45661 + }
45662 +
45663 + if (c == ']')
45664 + break;
45665 + }
45666 + if (!not)
45667 + return 1;
45668 + break;
45669 + matched:
45670 + while (c != ']') {
45671 + if (c == '\0')
45672 + return 1;
45673 +
45674 + c = *p++;
45675 + }
45676 + if (not)
45677 + return 1;
45678 + }
45679 + break;
45680 + default:
45681 + if (c != *n)
45682 + return 1;
45683 + }
45684 +
45685 + ++n;
45686 + }
45687 +
45688 + if (*n == '\0')
45689 + return 0;
45690 +
45691 + if (*n == '/')
45692 + return 0;
45693 +
45694 + return 1;
45695 +}
45696 +
45697 +static struct acl_object_label *
45698 +chk_glob_label(struct acl_object_label *globbed,
45699 + struct dentry *dentry, struct vfsmount *mnt, char **path)
45700 +{
45701 + struct acl_object_label *tmp;
45702 +
45703 + if (*path == NULL)
45704 + *path = gr_to_filename_nolock(dentry, mnt);
45705 +
45706 + tmp = globbed;
45707 +
45708 + while (tmp) {
45709 + if (!glob_match(tmp->filename, *path))
45710 + return tmp;
45711 + tmp = tmp->next;
45712 + }
45713 +
45714 + return NULL;
45715 +}
45716 +
45717 +static struct acl_object_label *
45718 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
45719 + const ino_t curr_ino, const dev_t curr_dev,
45720 + const struct acl_subject_label *subj, char **path, const int checkglob)
45721 +{
45722 + struct acl_subject_label *tmpsubj;
45723 + struct acl_object_label *retval;
45724 + struct acl_object_label *retval2;
45725 +
45726 + tmpsubj = (struct acl_subject_label *) subj;
45727 + read_lock(&gr_inode_lock);
45728 + do {
45729 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
45730 + if (retval) {
45731 + if (checkglob && retval->globbed) {
45732 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
45733 + (struct vfsmount *)orig_mnt, path);
45734 + if (retval2)
45735 + retval = retval2;
45736 + }
45737 + break;
45738 + }
45739 + } while ((tmpsubj = tmpsubj->parent_subject));
45740 + read_unlock(&gr_inode_lock);
45741 +
45742 + return retval;
45743 +}
45744 +
45745 +static __inline__ struct acl_object_label *
45746 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
45747 + const struct dentry *curr_dentry,
45748 + const struct acl_subject_label *subj, char **path, const int checkglob)
45749 +{
45750 + int newglob = checkglob;
45751 +
45752 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
45753 + as we don't want a / * rule to match instead of the / object
45754 + don't do this for create lookups that call this function though, since they're looking up
45755 + on the parent and thus need globbing checks on all paths
45756 + */
45757 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
45758 + newglob = GR_NO_GLOB;
45759 +
45760 + return __full_lookup(orig_dentry, orig_mnt,
45761 + curr_dentry->d_inode->i_ino,
45762 + __get_dev(curr_dentry), subj, path, newglob);
45763 +}
45764 +
45765 +static struct acl_object_label *
45766 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45767 + const struct acl_subject_label *subj, char *path, const int checkglob)
45768 +{
45769 + struct dentry *dentry = (struct dentry *) l_dentry;
45770 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
45771 + struct acl_object_label *retval;
45772 +
45773 + spin_lock(&dcache_lock);
45774 + spin_lock(&vfsmount_lock);
45775 +
45776 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
45777 +#ifdef CONFIG_NET
45778 + mnt == sock_mnt ||
45779 +#endif
45780 +#ifdef CONFIG_HUGETLBFS
45781 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
45782 +#endif
45783 + /* ignore Eric Biederman */
45784 + IS_PRIVATE(l_dentry->d_inode))) {
45785 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
45786 + goto out;
45787 + }
45788 +
45789 + for (;;) {
45790 + if (dentry == real_root && mnt == real_root_mnt)
45791 + break;
45792 +
45793 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
45794 + if (mnt->mnt_parent == mnt)
45795 + break;
45796 +
45797 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45798 + if (retval != NULL)
45799 + goto out;
45800 +
45801 + dentry = mnt->mnt_mountpoint;
45802 + mnt = mnt->mnt_parent;
45803 + continue;
45804 + }
45805 +
45806 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45807 + if (retval != NULL)
45808 + goto out;
45809 +
45810 + dentry = dentry->d_parent;
45811 + }
45812 +
45813 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45814 +
45815 + if (retval == NULL)
45816 + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
45817 +out:
45818 + spin_unlock(&vfsmount_lock);
45819 + spin_unlock(&dcache_lock);
45820 +
45821 + BUG_ON(retval == NULL);
45822 +
45823 + return retval;
45824 +}
45825 +
45826 +static __inline__ struct acl_object_label *
45827 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45828 + const struct acl_subject_label *subj)
45829 +{
45830 + char *path = NULL;
45831 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
45832 +}
45833 +
45834 +static __inline__ struct acl_object_label *
45835 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45836 + const struct acl_subject_label *subj)
45837 +{
45838 + char *path = NULL;
45839 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
45840 +}
45841 +
45842 +static __inline__ struct acl_object_label *
45843 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45844 + const struct acl_subject_label *subj, char *path)
45845 +{
45846 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
45847 +}
45848 +
45849 +static struct acl_subject_label *
45850 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45851 + const struct acl_role_label *role)
45852 +{
45853 + struct dentry *dentry = (struct dentry *) l_dentry;
45854 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
45855 + struct acl_subject_label *retval;
45856 +
45857 + spin_lock(&dcache_lock);
45858 + spin_lock(&vfsmount_lock);
45859 +
45860 + for (;;) {
45861 + if (dentry == real_root && mnt == real_root_mnt)
45862 + break;
45863 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
45864 + if (mnt->mnt_parent == mnt)
45865 + break;
45866 +
45867 + read_lock(&gr_inode_lock);
45868 + retval =
45869 + lookup_acl_subj_label(dentry->d_inode->i_ino,
45870 + __get_dev(dentry), role);
45871 + read_unlock(&gr_inode_lock);
45872 + if (retval != NULL)
45873 + goto out;
45874 +
45875 + dentry = mnt->mnt_mountpoint;
45876 + mnt = mnt->mnt_parent;
45877 + continue;
45878 + }
45879 +
45880 + read_lock(&gr_inode_lock);
45881 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
45882 + __get_dev(dentry), role);
45883 + read_unlock(&gr_inode_lock);
45884 + if (retval != NULL)
45885 + goto out;
45886 +
45887 + dentry = dentry->d_parent;
45888 + }
45889 +
45890 + read_lock(&gr_inode_lock);
45891 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
45892 + __get_dev(dentry), role);
45893 + read_unlock(&gr_inode_lock);
45894 +
45895 + if (unlikely(retval == NULL)) {
45896 + read_lock(&gr_inode_lock);
45897 + retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
45898 + __get_dev(real_root), role);
45899 + read_unlock(&gr_inode_lock);
45900 + }
45901 +out:
45902 + spin_unlock(&vfsmount_lock);
45903 + spin_unlock(&dcache_lock);
45904 +
45905 + BUG_ON(retval == NULL);
45906 +
45907 + return retval;
45908 +}
45909 +
45910 +static void
45911 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
45912 +{
45913 + struct task_struct *task = current;
45914 + const struct cred *cred = current_cred();
45915 +
45916 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
45917 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
45918 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
45919 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
45920 +
45921 + return;
45922 +}
45923 +
45924 +static void
45925 +gr_log_learn_sysctl(const char *path, const __u32 mode)
45926 +{
45927 + struct task_struct *task = current;
45928 + const struct cred *cred = current_cred();
45929 +
45930 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
45931 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
45932 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
45933 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
45934 +
45935 + return;
45936 +}
45937 +
45938 +static void
45939 +gr_log_learn_id_change(const char type, const unsigned int real,
45940 + const unsigned int effective, const unsigned int fs)
45941 +{
45942 + struct task_struct *task = current;
45943 + const struct cred *cred = current_cred();
45944 +
45945 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
45946 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
45947 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
45948 + type, real, effective, fs, &task->signal->saved_ip);
45949 +
45950 + return;
45951 +}
45952 +
45953 +__u32
45954 +gr_check_link(const struct dentry * new_dentry,
45955 + const struct dentry * parent_dentry,
45956 + const struct vfsmount * parent_mnt,
45957 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
45958 +{
45959 + struct acl_object_label *obj;
45960 + __u32 oldmode, newmode;
45961 + __u32 needmode;
45962 +
45963 + if (unlikely(!(gr_status & GR_READY)))
45964 + return (GR_CREATE | GR_LINK);
45965 +
45966 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
45967 + oldmode = obj->mode;
45968 +
45969 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
45970 + oldmode |= (GR_CREATE | GR_LINK);
45971 +
45972 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
45973 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
45974 + needmode |= GR_SETID | GR_AUDIT_SETID;
45975 +
45976 + newmode =
45977 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
45978 + oldmode | needmode);
45979 +
45980 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
45981 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
45982 + GR_INHERIT | GR_AUDIT_INHERIT);
45983 +
45984 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
45985 + goto bad;
45986 +
45987 + if ((oldmode & needmode) != needmode)
45988 + goto bad;
45989 +
45990 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
45991 + if ((newmode & needmode) != needmode)
45992 + goto bad;
45993 +
45994 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
45995 + return newmode;
45996 +bad:
45997 + needmode = oldmode;
45998 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
45999 + needmode |= GR_SETID;
46000 +
46001 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
46002 + gr_log_learn(old_dentry, old_mnt, needmode);
46003 + return (GR_CREATE | GR_LINK);
46004 + } else if (newmode & GR_SUPPRESS)
46005 + return GR_SUPPRESS;
46006 + else
46007 + return 0;
46008 +}
46009 +
46010 +__u32
46011 +gr_search_file(const struct dentry * dentry, const __u32 mode,
46012 + const struct vfsmount * mnt)
46013 +{
46014 + __u32 retval = mode;
46015 + struct acl_subject_label *curracl;
46016 + struct acl_object_label *currobj;
46017 +
46018 + if (unlikely(!(gr_status & GR_READY)))
46019 + return (mode & ~GR_AUDITS);
46020 +
46021 + curracl = current->acl;
46022 +
46023 + currobj = chk_obj_label(dentry, mnt, curracl);
46024 + retval = currobj->mode & mode;
46025 +
46026 + /* if we're opening a specified transfer file for writing
46027 + (e.g. /dev/initctl), then transfer our role to init
46028 + */
46029 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
46030 + current->role->roletype & GR_ROLE_PERSIST)) {
46031 + struct task_struct *task = init_pid_ns.child_reaper;
46032 +
46033 + if (task->role != current->role) {
46034 + task->acl_sp_role = 0;
46035 + task->acl_role_id = current->acl_role_id;
46036 + task->role = current->role;
46037 + rcu_read_lock();
46038 + read_lock(&grsec_exec_file_lock);
46039 + gr_apply_subject_to_task(task);
46040 + read_unlock(&grsec_exec_file_lock);
46041 + rcu_read_unlock();
46042 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
46043 + }
46044 + }
46045 +
46046 + if (unlikely
46047 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
46048 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
46049 + __u32 new_mode = mode;
46050 +
46051 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46052 +
46053 + retval = new_mode;
46054 +
46055 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
46056 + new_mode |= GR_INHERIT;
46057 +
46058 + if (!(mode & GR_NOLEARN))
46059 + gr_log_learn(dentry, mnt, new_mode);
46060 + }
46061 +
46062 + return retval;
46063 +}
46064 +
46065 +__u32
46066 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
46067 + const struct vfsmount * mnt, const __u32 mode)
46068 +{
46069 + struct name_entry *match;
46070 + struct acl_object_label *matchpo;
46071 + struct acl_subject_label *curracl;
46072 + char *path;
46073 + __u32 retval;
46074 +
46075 + if (unlikely(!(gr_status & GR_READY)))
46076 + return (mode & ~GR_AUDITS);
46077 +
46078 + preempt_disable();
46079 + path = gr_to_filename_rbac(new_dentry, mnt);
46080 + match = lookup_name_entry_create(path);
46081 +
46082 + if (!match)
46083 + goto check_parent;
46084 +
46085 + curracl = current->acl;
46086 +
46087 + read_lock(&gr_inode_lock);
46088 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
46089 + read_unlock(&gr_inode_lock);
46090 +
46091 + if (matchpo) {
46092 + if ((matchpo->mode & mode) !=
46093 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
46094 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
46095 + __u32 new_mode = mode;
46096 +
46097 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46098 +
46099 + gr_log_learn(new_dentry, mnt, new_mode);
46100 +
46101 + preempt_enable();
46102 + return new_mode;
46103 + }
46104 + preempt_enable();
46105 + return (matchpo->mode & mode);
46106 + }
46107 +
46108 + check_parent:
46109 + curracl = current->acl;
46110 +
46111 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
46112 + retval = matchpo->mode & mode;
46113 +
46114 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
46115 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
46116 + __u32 new_mode = mode;
46117 +
46118 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46119 +
46120 + gr_log_learn(new_dentry, mnt, new_mode);
46121 + preempt_enable();
46122 + return new_mode;
46123 + }
46124 +
46125 + preempt_enable();
46126 + return retval;
46127 +}
46128 +
46129 +int
46130 +gr_check_hidden_task(const struct task_struct *task)
46131 +{
46132 + if (unlikely(!(gr_status & GR_READY)))
46133 + return 0;
46134 +
46135 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
46136 + return 1;
46137 +
46138 + return 0;
46139 +}
46140 +
46141 +int
46142 +gr_check_protected_task(const struct task_struct *task)
46143 +{
46144 + if (unlikely(!(gr_status & GR_READY) || !task))
46145 + return 0;
46146 +
46147 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
46148 + task->acl != current->acl)
46149 + return 1;
46150 +
46151 + return 0;
46152 +}
46153 +
46154 +int
46155 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
46156 +{
46157 + struct task_struct *p;
46158 + int ret = 0;
46159 +
46160 + if (unlikely(!(gr_status & GR_READY) || !pid))
46161 + return ret;
46162 +
46163 + read_lock(&tasklist_lock);
46164 + do_each_pid_task(pid, type, p) {
46165 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
46166 + p->acl != current->acl) {
46167 + ret = 1;
46168 + goto out;
46169 + }
46170 + } while_each_pid_task(pid, type, p);
46171 +out:
46172 + read_unlock(&tasklist_lock);
46173 +
46174 + return ret;
46175 +}
46176 +
46177 +void
46178 +gr_copy_label(struct task_struct *tsk)
46179 +{
46180 + tsk->signal->used_accept = 0;
46181 + tsk->acl_sp_role = 0;
46182 + tsk->acl_role_id = current->acl_role_id;
46183 + tsk->acl = current->acl;
46184 + tsk->role = current->role;
46185 + tsk->signal->curr_ip = current->signal->curr_ip;
46186 + tsk->signal->saved_ip = current->signal->saved_ip;
46187 + if (current->exec_file)
46188 + get_file(current->exec_file);
46189 + tsk->exec_file = current->exec_file;
46190 + tsk->is_writable = current->is_writable;
46191 + if (unlikely(current->signal->used_accept)) {
46192 + current->signal->curr_ip = 0;
46193 + current->signal->saved_ip = 0;
46194 + }
46195 +
46196 + return;
46197 +}
46198 +
46199 +static void
46200 +gr_set_proc_res(struct task_struct *task)
46201 +{
46202 + struct acl_subject_label *proc;
46203 + unsigned short i;
46204 +
46205 + proc = task->acl;
46206 +
46207 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
46208 + return;
46209 +
46210 + for (i = 0; i < RLIM_NLIMITS; i++) {
46211 + if (!(proc->resmask & (1 << i)))
46212 + continue;
46213 +
46214 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
46215 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
46216 + }
46217 +
46218 + return;
46219 +}
46220 +
46221 +extern int __gr_process_user_ban(struct user_struct *user);
46222 +
46223 +int
46224 +gr_check_user_change(int real, int effective, int fs)
46225 +{
46226 + unsigned int i;
46227 + __u16 num;
46228 + uid_t *uidlist;
46229 + int curuid;
46230 + int realok = 0;
46231 + int effectiveok = 0;
46232 + int fsok = 0;
46233 +
46234 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
46235 + struct user_struct *user;
46236 +
46237 + if (real == -1)
46238 + goto skipit;
46239 +
46240 + user = find_user(real);
46241 + if (user == NULL)
46242 + goto skipit;
46243 +
46244 + if (__gr_process_user_ban(user)) {
46245 + /* for find_user */
46246 + free_uid(user);
46247 + return 1;
46248 + }
46249 +
46250 + /* for find_user */
46251 + free_uid(user);
46252 +
46253 +skipit:
46254 +#endif
46255 +
46256 + if (unlikely(!(gr_status & GR_READY)))
46257 + return 0;
46258 +
46259 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46260 + gr_log_learn_id_change('u', real, effective, fs);
46261 +
46262 + num = current->acl->user_trans_num;
46263 + uidlist = current->acl->user_transitions;
46264 +
46265 + if (uidlist == NULL)
46266 + return 0;
46267 +
46268 + if (real == -1)
46269 + realok = 1;
46270 + if (effective == -1)
46271 + effectiveok = 1;
46272 + if (fs == -1)
46273 + fsok = 1;
46274 +
46275 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
46276 + for (i = 0; i < num; i++) {
46277 + curuid = (int)uidlist[i];
46278 + if (real == curuid)
46279 + realok = 1;
46280 + if (effective == curuid)
46281 + effectiveok = 1;
46282 + if (fs == curuid)
46283 + fsok = 1;
46284 + }
46285 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
46286 + for (i = 0; i < num; i++) {
46287 + curuid = (int)uidlist[i];
46288 + if (real == curuid)
46289 + break;
46290 + if (effective == curuid)
46291 + break;
46292 + if (fs == curuid)
46293 + break;
46294 + }
46295 + /* not in deny list */
46296 + if (i == num) {
46297 + realok = 1;
46298 + effectiveok = 1;
46299 + fsok = 1;
46300 + }
46301 + }
46302 +
46303 + if (realok && effectiveok && fsok)
46304 + return 0;
46305 + else {
46306 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46307 + return 1;
46308 + }
46309 +}
46310 +
46311 +int
46312 +gr_check_group_change(int real, int effective, int fs)
46313 +{
46314 + unsigned int i;
46315 + __u16 num;
46316 + gid_t *gidlist;
46317 + int curgid;
46318 + int realok = 0;
46319 + int effectiveok = 0;
46320 + int fsok = 0;
46321 +
46322 + if (unlikely(!(gr_status & GR_READY)))
46323 + return 0;
46324 +
46325 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46326 + gr_log_learn_id_change('g', real, effective, fs);
46327 +
46328 + num = current->acl->group_trans_num;
46329 + gidlist = current->acl->group_transitions;
46330 +
46331 + if (gidlist == NULL)
46332 + return 0;
46333 +
46334 + if (real == -1)
46335 + realok = 1;
46336 + if (effective == -1)
46337 + effectiveok = 1;
46338 + if (fs == -1)
46339 + fsok = 1;
46340 +
46341 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
46342 + for (i = 0; i < num; i++) {
46343 + curgid = (int)gidlist[i];
46344 + if (real == curgid)
46345 + realok = 1;
46346 + if (effective == curgid)
46347 + effectiveok = 1;
46348 + if (fs == curgid)
46349 + fsok = 1;
46350 + }
46351 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
46352 + for (i = 0; i < num; i++) {
46353 + curgid = (int)gidlist[i];
46354 + if (real == curgid)
46355 + break;
46356 + if (effective == curgid)
46357 + break;
46358 + if (fs == curgid)
46359 + break;
46360 + }
46361 + /* not in deny list */
46362 + if (i == num) {
46363 + realok = 1;
46364 + effectiveok = 1;
46365 + fsok = 1;
46366 + }
46367 + }
46368 +
46369 + if (realok && effectiveok && fsok)
46370 + return 0;
46371 + else {
46372 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46373 + return 1;
46374 + }
46375 +}
46376 +
46377 +void
46378 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
46379 +{
46380 + struct acl_role_label *role = task->role;
46381 + struct acl_subject_label *subj = NULL;
46382 + struct acl_object_label *obj;
46383 + struct file *filp;
46384 +
46385 + if (unlikely(!(gr_status & GR_READY)))
46386 + return;
46387 +
46388 + filp = task->exec_file;
46389 +
46390 + /* kernel process, we'll give them the kernel role */
46391 + if (unlikely(!filp)) {
46392 + task->role = kernel_role;
46393 + task->acl = kernel_role->root_label;
46394 + return;
46395 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
46396 + role = lookup_acl_role_label(task, uid, gid);
46397 +
46398 + /* perform subject lookup in possibly new role
46399 + we can use this result below in the case where role == task->role
46400 + */
46401 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
46402 +
46403 + /* if we changed uid/gid, but result in the same role
46404 + and are using inheritance, don't lose the inherited subject
46405 + if current subject is other than what normal lookup
46406 + would result in, we arrived via inheritance, don't
46407 + lose subject
46408 + */
46409 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
46410 + (subj == task->acl)))
46411 + task->acl = subj;
46412 +
46413 + task->role = role;
46414 +
46415 + task->is_writable = 0;
46416 +
46417 + /* ignore additional mmap checks for processes that are writable
46418 + by the default ACL */
46419 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
46420 + if (unlikely(obj->mode & GR_WRITE))
46421 + task->is_writable = 1;
46422 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
46423 + if (unlikely(obj->mode & GR_WRITE))
46424 + task->is_writable = 1;
46425 +
46426 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46427 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46428 +#endif
46429 +
46430 + gr_set_proc_res(task);
46431 +
46432 + return;
46433 +}
46434 +
46435 +int
46436 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
46437 + const int unsafe_share)
46438 +{
46439 + struct task_struct *task = current;
46440 + struct acl_subject_label *newacl;
46441 + struct acl_object_label *obj;
46442 + __u32 retmode;
46443 +
46444 + if (unlikely(!(gr_status & GR_READY)))
46445 + return 0;
46446 +
46447 + newacl = chk_subj_label(dentry, mnt, task->role);
46448 +
46449 + task_lock(task);
46450 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
46451 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
46452 + !(task->role->roletype & GR_ROLE_GOD) &&
46453 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
46454 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
46455 + task_unlock(task);
46456 + if (unsafe_share)
46457 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
46458 + else
46459 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
46460 + return -EACCES;
46461 + }
46462 + task_unlock(task);
46463 +
46464 + obj = chk_obj_label(dentry, mnt, task->acl);
46465 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
46466 +
46467 + if (!(task->acl->mode & GR_INHERITLEARN) &&
46468 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
46469 + if (obj->nested)
46470 + task->acl = obj->nested;
46471 + else
46472 + task->acl = newacl;
46473 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
46474 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
46475 +
46476 + task->is_writable = 0;
46477 +
46478 + /* ignore additional mmap checks for processes that are writable
46479 + by the default ACL */
46480 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
46481 + if (unlikely(obj->mode & GR_WRITE))
46482 + task->is_writable = 1;
46483 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
46484 + if (unlikely(obj->mode & GR_WRITE))
46485 + task->is_writable = 1;
46486 +
46487 + gr_set_proc_res(task);
46488 +
46489 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46490 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46491 +#endif
46492 + return 0;
46493 +}
46494 +
46495 +/* always called with valid inodev ptr */
46496 +static void
46497 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
46498 +{
46499 + struct acl_object_label *matchpo;
46500 + struct acl_subject_label *matchps;
46501 + struct acl_subject_label *subj;
46502 + struct acl_role_label *role;
46503 + unsigned int x;
46504 +
46505 + FOR_EACH_ROLE_START(role)
46506 + FOR_EACH_SUBJECT_START(role, subj, x)
46507 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
46508 + matchpo->mode |= GR_DELETED;
46509 + FOR_EACH_SUBJECT_END(subj,x)
46510 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
46511 + if (subj->inode == ino && subj->device == dev)
46512 + subj->mode |= GR_DELETED;
46513 + FOR_EACH_NESTED_SUBJECT_END(subj)
46514 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
46515 + matchps->mode |= GR_DELETED;
46516 + FOR_EACH_ROLE_END(role)
46517 +
46518 + inodev->nentry->deleted = 1;
46519 +
46520 + return;
46521 +}
46522 +
46523 +void
46524 +gr_handle_delete(const ino_t ino, const dev_t dev)
46525 +{
46526 + struct inodev_entry *inodev;
46527 +
46528 + if (unlikely(!(gr_status & GR_READY)))
46529 + return;
46530 +
46531 + write_lock(&gr_inode_lock);
46532 + inodev = lookup_inodev_entry(ino, dev);
46533 + if (inodev != NULL)
46534 + do_handle_delete(inodev, ino, dev);
46535 + write_unlock(&gr_inode_lock);
46536 +
46537 + return;
46538 +}
46539 +
46540 +static void
46541 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
46542 + const ino_t newinode, const dev_t newdevice,
46543 + struct acl_subject_label *subj)
46544 +{
46545 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
46546 + struct acl_object_label *match;
46547 +
46548 + match = subj->obj_hash[index];
46549 +
46550 + while (match && (match->inode != oldinode ||
46551 + match->device != olddevice ||
46552 + !(match->mode & GR_DELETED)))
46553 + match = match->next;
46554 +
46555 + if (match && (match->inode == oldinode)
46556 + && (match->device == olddevice)
46557 + && (match->mode & GR_DELETED)) {
46558 + if (match->prev == NULL) {
46559 + subj->obj_hash[index] = match->next;
46560 + if (match->next != NULL)
46561 + match->next->prev = NULL;
46562 + } else {
46563 + match->prev->next = match->next;
46564 + if (match->next != NULL)
46565 + match->next->prev = match->prev;
46566 + }
46567 + match->prev = NULL;
46568 + match->next = NULL;
46569 + match->inode = newinode;
46570 + match->device = newdevice;
46571 + match->mode &= ~GR_DELETED;
46572 +
46573 + insert_acl_obj_label(match, subj);
46574 + }
46575 +
46576 + return;
46577 +}
46578 +
46579 +static void
46580 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
46581 + const ino_t newinode, const dev_t newdevice,
46582 + struct acl_role_label *role)
46583 +{
46584 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
46585 + struct acl_subject_label *match;
46586 +
46587 + match = role->subj_hash[index];
46588 +
46589 + while (match && (match->inode != oldinode ||
46590 + match->device != olddevice ||
46591 + !(match->mode & GR_DELETED)))
46592 + match = match->next;
46593 +
46594 + if (match && (match->inode == oldinode)
46595 + && (match->device == olddevice)
46596 + && (match->mode & GR_DELETED)) {
46597 + if (match->prev == NULL) {
46598 + role->subj_hash[index] = match->next;
46599 + if (match->next != NULL)
46600 + match->next->prev = NULL;
46601 + } else {
46602 + match->prev->next = match->next;
46603 + if (match->next != NULL)
46604 + match->next->prev = match->prev;
46605 + }
46606 + match->prev = NULL;
46607 + match->next = NULL;
46608 + match->inode = newinode;
46609 + match->device = newdevice;
46610 + match->mode &= ~GR_DELETED;
46611 +
46612 + insert_acl_subj_label(match, role);
46613 + }
46614 +
46615 + return;
46616 +}
46617 +
46618 +static void
46619 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
46620 + const ino_t newinode, const dev_t newdevice)
46621 +{
46622 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
46623 + struct inodev_entry *match;
46624 +
46625 + match = inodev_set.i_hash[index];
46626 +
46627 + while (match && (match->nentry->inode != oldinode ||
46628 + match->nentry->device != olddevice || !match->nentry->deleted))
46629 + match = match->next;
46630 +
46631 + if (match && (match->nentry->inode == oldinode)
46632 + && (match->nentry->device == olddevice) &&
46633 + match->nentry->deleted) {
46634 + if (match->prev == NULL) {
46635 + inodev_set.i_hash[index] = match->next;
46636 + if (match->next != NULL)
46637 + match->next->prev = NULL;
46638 + } else {
46639 + match->prev->next = match->next;
46640 + if (match->next != NULL)
46641 + match->next->prev = match->prev;
46642 + }
46643 + match->prev = NULL;
46644 + match->next = NULL;
46645 + match->nentry->inode = newinode;
46646 + match->nentry->device = newdevice;
46647 + match->nentry->deleted = 0;
46648 +
46649 + insert_inodev_entry(match);
46650 + }
46651 +
46652 + return;
46653 +}
46654 +
46655 +static void
46656 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
46657 + const struct vfsmount *mnt)
46658 +{
46659 + struct acl_subject_label *subj;
46660 + struct acl_role_label *role;
46661 + unsigned int x;
46662 + ino_t inode = dentry->d_inode->i_ino;
46663 + dev_t dev = __get_dev(dentry);
46664 +
46665 + FOR_EACH_ROLE_START(role)
46666 + update_acl_subj_label(matchn->inode, matchn->device,
46667 + inode, dev, role);
46668 +
46669 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
46670 + if ((subj->inode == inode) && (subj->device == dev)) {
46671 + subj->inode = inode;
46672 + subj->device = dev;
46673 + }
46674 + FOR_EACH_NESTED_SUBJECT_END(subj)
46675 + FOR_EACH_SUBJECT_START(role, subj, x)
46676 + update_acl_obj_label(matchn->inode, matchn->device,
46677 + inode, dev, subj);
46678 + FOR_EACH_SUBJECT_END(subj,x)
46679 + FOR_EACH_ROLE_END(role)
46680 +
46681 + update_inodev_entry(matchn->inode, matchn->device, inode, dev);
46682 +
46683 + return;
46684 +}
46685 +
46686 +void
46687 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
46688 +{
46689 + struct name_entry *matchn;
46690 +
46691 + if (unlikely(!(gr_status & GR_READY)))
46692 + return;
46693 +
46694 + preempt_disable();
46695 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
46696 +
46697 + if (unlikely((unsigned long)matchn)) {
46698 + write_lock(&gr_inode_lock);
46699 + do_handle_create(matchn, dentry, mnt);
46700 + write_unlock(&gr_inode_lock);
46701 + }
46702 + preempt_enable();
46703 +
46704 + return;
46705 +}
46706 +
46707 +void
46708 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
46709 + struct dentry *old_dentry,
46710 + struct dentry *new_dentry,
46711 + struct vfsmount *mnt, const __u8 replace)
46712 +{
46713 + struct name_entry *matchn;
46714 + struct inodev_entry *inodev;
46715 + ino_t oldinode = old_dentry->d_inode->i_ino;
46716 + dev_t olddev = __get_dev(old_dentry);
46717 +
46718 + /* vfs_rename swaps the name and parent link for old_dentry and
46719 + new_dentry
46720 + at this point, old_dentry has the new name, parent link, and inode
46721 + for the renamed file
46722 + if a file is being replaced by a rename, new_dentry has the inode
46723 + and name for the replaced file
46724 + */
46725 +
46726 + if (unlikely(!(gr_status & GR_READY)))
46727 + return;
46728 +
46729 + preempt_disable();
46730 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
46731 +
46732 + /* we wouldn't have to check d_inode if it weren't for
46733 + NFS silly-renaming
46734 + */
46735 +
46736 + write_lock(&gr_inode_lock);
46737 + if (unlikely(replace && new_dentry->d_inode)) {
46738 + ino_t newinode = new_dentry->d_inode->i_ino;
46739 + dev_t newdev = __get_dev(new_dentry);
46740 + inodev = lookup_inodev_entry(newinode, newdev);
46741 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
46742 + do_handle_delete(inodev, newinode, newdev);
46743 + }
46744 +
46745 + inodev = lookup_inodev_entry(oldinode, olddev);
46746 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
46747 + do_handle_delete(inodev, oldinode, olddev);
46748 +
46749 + if (unlikely((unsigned long)matchn))
46750 + do_handle_create(matchn, old_dentry, mnt);
46751 +
46752 + write_unlock(&gr_inode_lock);
46753 + preempt_enable();
46754 +
46755 + return;
46756 +}
46757 +
46758 +static int
46759 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
46760 + unsigned char **sum)
46761 +{
46762 + struct acl_role_label *r;
46763 + struct role_allowed_ip *ipp;
46764 + struct role_transition *trans;
46765 + unsigned int i;
46766 + int found = 0;
46767 + u32 curr_ip = current->signal->curr_ip;
46768 +
46769 + current->signal->saved_ip = curr_ip;
46770 +
46771 + /* check transition table */
46772 +
46773 + for (trans = current->role->transitions; trans; trans = trans->next) {
46774 + if (!strcmp(rolename, trans->rolename)) {
46775 + found = 1;
46776 + break;
46777 + }
46778 + }
46779 +
46780 + if (!found)
46781 + return 0;
46782 +
46783 + /* handle special roles that do not require authentication
46784 + and check ip */
46785 +
46786 + FOR_EACH_ROLE_START(r)
46787 + if (!strcmp(rolename, r->rolename) &&
46788 + (r->roletype & GR_ROLE_SPECIAL)) {
46789 + found = 0;
46790 + if (r->allowed_ips != NULL) {
46791 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
46792 + if ((ntohl(curr_ip) & ipp->netmask) ==
46793 + (ntohl(ipp->addr) & ipp->netmask))
46794 + found = 1;
46795 + }
46796 + } else
46797 + found = 2;
46798 + if (!found)
46799 + return 0;
46800 +
46801 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
46802 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
46803 + *salt = NULL;
46804 + *sum = NULL;
46805 + return 1;
46806 + }
46807 + }
46808 + FOR_EACH_ROLE_END(r)
46809 +
46810 + for (i = 0; i < num_sprole_pws; i++) {
46811 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
46812 + *salt = acl_special_roles[i]->salt;
46813 + *sum = acl_special_roles[i]->sum;
46814 + return 1;
46815 + }
46816 + }
46817 +
46818 + return 0;
46819 +}
46820 +
46821 +static void
46822 +assign_special_role(char *rolename)
46823 +{
46824 + struct acl_object_label *obj;
46825 + struct acl_role_label *r;
46826 + struct acl_role_label *assigned = NULL;
46827 + struct task_struct *tsk;
46828 + struct file *filp;
46829 +
46830 + FOR_EACH_ROLE_START(r)
46831 + if (!strcmp(rolename, r->rolename) &&
46832 + (r->roletype & GR_ROLE_SPECIAL)) {
46833 + assigned = r;
46834 + break;
46835 + }
46836 + FOR_EACH_ROLE_END(r)
46837 +
46838 + if (!assigned)
46839 + return;
46840 +
46841 + read_lock(&tasklist_lock);
46842 + read_lock(&grsec_exec_file_lock);
46843 +
46844 + tsk = current->real_parent;
46845 + if (tsk == NULL)
46846 + goto out_unlock;
46847 +
46848 + filp = tsk->exec_file;
46849 + if (filp == NULL)
46850 + goto out_unlock;
46851 +
46852 + tsk->is_writable = 0;
46853 +
46854 + tsk->acl_sp_role = 1;
46855 + tsk->acl_role_id = ++acl_sp_role_value;
46856 + tsk->role = assigned;
46857 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
46858 +
46859 + /* ignore additional mmap checks for processes that are writable
46860 + by the default ACL */
46861 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
46862 + if (unlikely(obj->mode & GR_WRITE))
46863 + tsk->is_writable = 1;
46864 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
46865 + if (unlikely(obj->mode & GR_WRITE))
46866 + tsk->is_writable = 1;
46867 +
46868 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46869 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
46870 +#endif
46871 +
46872 +out_unlock:
46873 + read_unlock(&grsec_exec_file_lock);
46874 + read_unlock(&tasklist_lock);
46875 + return;
46876 +}
46877 +
46878 +int gr_check_secure_terminal(struct task_struct *task)
46879 +{
46880 + struct task_struct *p, *p2, *p3;
46881 + struct files_struct *files;
46882 + struct fdtable *fdt;
46883 + struct file *our_file = NULL, *file;
46884 + int i;
46885 +
46886 + if (task->signal->tty == NULL)
46887 + return 1;
46888 +
46889 + files = get_files_struct(task);
46890 + if (files != NULL) {
46891 + rcu_read_lock();
46892 + fdt = files_fdtable(files);
46893 + for (i=0; i < fdt->max_fds; i++) {
46894 + file = fcheck_files(files, i);
46895 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
46896 + get_file(file);
46897 + our_file = file;
46898 + }
46899 + }
46900 + rcu_read_unlock();
46901 + put_files_struct(files);
46902 + }
46903 +
46904 + if (our_file == NULL)
46905 + return 1;
46906 +
46907 + read_lock(&tasklist_lock);
46908 + do_each_thread(p2, p) {
46909 + files = get_files_struct(p);
46910 + if (files == NULL ||
46911 + (p->signal && p->signal->tty == task->signal->tty)) {
46912 + if (files != NULL)
46913 + put_files_struct(files);
46914 + continue;
46915 + }
46916 + rcu_read_lock();
46917 + fdt = files_fdtable(files);
46918 + for (i=0; i < fdt->max_fds; i++) {
46919 + file = fcheck_files(files, i);
46920 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
46921 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
46922 + p3 = task;
46923 + while (p3->pid > 0) {
46924 + if (p3 == p)
46925 + break;
46926 + p3 = p3->real_parent;
46927 + }
46928 + if (p3 == p)
46929 + break;
46930 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
46931 + gr_handle_alertkill(p);
46932 + rcu_read_unlock();
46933 + put_files_struct(files);
46934 + read_unlock(&tasklist_lock);
46935 + fput(our_file);
46936 + return 0;
46937 + }
46938 + }
46939 + rcu_read_unlock();
46940 + put_files_struct(files);
46941 + } while_each_thread(p2, p);
46942 + read_unlock(&tasklist_lock);
46943 +
46944 + fput(our_file);
46945 + return 1;
46946 +}
46947 +
46948 +ssize_t
46949 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
46950 +{
46951 + struct gr_arg_wrapper uwrap;
46952 + unsigned char *sprole_salt = NULL;
46953 + unsigned char *sprole_sum = NULL;
46954 + int error = sizeof (struct gr_arg_wrapper);
46955 + int error2 = 0;
46956 +
46957 + mutex_lock(&gr_dev_mutex);
46958 +
46959 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
46960 + error = -EPERM;
46961 + goto out;
46962 + }
46963 +
46964 + if (count != sizeof (struct gr_arg_wrapper)) {
46965 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
46966 + error = -EINVAL;
46967 + goto out;
46968 + }
46969 +
46970 +
46971 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
46972 + gr_auth_expires = 0;
46973 + gr_auth_attempts = 0;
46974 + }
46975 +
46976 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
46977 + error = -EFAULT;
46978 + goto out;
46979 + }
46980 +
46981 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
46982 + error = -EINVAL;
46983 + goto out;
46984 + }
46985 +
46986 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
46987 + error = -EFAULT;
46988 + goto out;
46989 + }
46990 +
46991 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
46992 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
46993 + time_after(gr_auth_expires, get_seconds())) {
46994 + error = -EBUSY;
46995 + goto out;
46996 + }
46997 +
46998 + /* if non-root trying to do anything other than use a special role,
46999 + do not attempt authentication, do not count towards authentication
47000 + locking
47001 + */
47002 +
47003 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
47004 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
47005 + current_uid()) {
47006 + error = -EPERM;
47007 + goto out;
47008 + }
47009 +
47010 + /* ensure pw and special role name are null terminated */
47011 +
47012 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
47013 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
47014 +
47015 + /* Okay.
47016 + * We have our enough of the argument structure..(we have yet
47017 + * to copy_from_user the tables themselves) . Copy the tables
47018 + * only if we need them, i.e. for loading operations. */
47019 +
47020 + switch (gr_usermode->mode) {
47021 + case GR_STATUS:
47022 + if (gr_status & GR_READY) {
47023 + error = 1;
47024 + if (!gr_check_secure_terminal(current))
47025 + error = 3;
47026 + } else
47027 + error = 2;
47028 + goto out;
47029 + case GR_SHUTDOWN:
47030 + if ((gr_status & GR_READY)
47031 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47032 + pax_open_kernel();
47033 + gr_status &= ~GR_READY;
47034 + pax_close_kernel();
47035 +
47036 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
47037 + free_variables();
47038 + memset(gr_usermode, 0, sizeof (struct gr_arg));
47039 + memset(gr_system_salt, 0, GR_SALT_LEN);
47040 + memset(gr_system_sum, 0, GR_SHA_LEN);
47041 + } else if (gr_status & GR_READY) {
47042 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
47043 + error = -EPERM;
47044 + } else {
47045 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
47046 + error = -EAGAIN;
47047 + }
47048 + break;
47049 + case GR_ENABLE:
47050 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
47051 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
47052 + else {
47053 + if (gr_status & GR_READY)
47054 + error = -EAGAIN;
47055 + else
47056 + error = error2;
47057 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
47058 + }
47059 + break;
47060 + case GR_RELOAD:
47061 + if (!(gr_status & GR_READY)) {
47062 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
47063 + error = -EAGAIN;
47064 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47065 + lock_kernel();
47066 +
47067 + pax_open_kernel();
47068 + gr_status &= ~GR_READY;
47069 + pax_close_kernel();
47070 +
47071 + free_variables();
47072 + if (!(error2 = gracl_init(gr_usermode))) {
47073 + unlock_kernel();
47074 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
47075 + } else {
47076 + unlock_kernel();
47077 + error = error2;
47078 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
47079 + }
47080 + } else {
47081 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
47082 + error = -EPERM;
47083 + }
47084 + break;
47085 + case GR_SEGVMOD:
47086 + if (unlikely(!(gr_status & GR_READY))) {
47087 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
47088 + error = -EAGAIN;
47089 + break;
47090 + }
47091 +
47092 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47093 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
47094 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
47095 + struct acl_subject_label *segvacl;
47096 + segvacl =
47097 + lookup_acl_subj_label(gr_usermode->segv_inode,
47098 + gr_usermode->segv_device,
47099 + current->role);
47100 + if (segvacl) {
47101 + segvacl->crashes = 0;
47102 + segvacl->expires = 0;
47103 + }
47104 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
47105 + gr_remove_uid(gr_usermode->segv_uid);
47106 + }
47107 + } else {
47108 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
47109 + error = -EPERM;
47110 + }
47111 + break;
47112 + case GR_SPROLE:
47113 + case GR_SPROLEPAM:
47114 + if (unlikely(!(gr_status & GR_READY))) {
47115 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
47116 + error = -EAGAIN;
47117 + break;
47118 + }
47119 +
47120 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
47121 + current->role->expires = 0;
47122 + current->role->auth_attempts = 0;
47123 + }
47124 +
47125 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
47126 + time_after(current->role->expires, get_seconds())) {
47127 + error = -EBUSY;
47128 + goto out;
47129 + }
47130 +
47131 + if (lookup_special_role_auth
47132 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
47133 + && ((!sprole_salt && !sprole_sum)
47134 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
47135 + char *p = "";
47136 + assign_special_role(gr_usermode->sp_role);
47137 + read_lock(&tasklist_lock);
47138 + if (current->real_parent)
47139 + p = current->real_parent->role->rolename;
47140 + read_unlock(&tasklist_lock);
47141 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
47142 + p, acl_sp_role_value);
47143 + } else {
47144 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
47145 + error = -EPERM;
47146 + if(!(current->role->auth_attempts++))
47147 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
47148 +
47149 + goto out;
47150 + }
47151 + break;
47152 + case GR_UNSPROLE:
47153 + if (unlikely(!(gr_status & GR_READY))) {
47154 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
47155 + error = -EAGAIN;
47156 + break;
47157 + }
47158 +
47159 + if (current->role->roletype & GR_ROLE_SPECIAL) {
47160 + char *p = "";
47161 + int i = 0;
47162 +
47163 + read_lock(&tasklist_lock);
47164 + if (current->real_parent) {
47165 + p = current->real_parent->role->rolename;
47166 + i = current->real_parent->acl_role_id;
47167 + }
47168 + read_unlock(&tasklist_lock);
47169 +
47170 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
47171 + gr_set_acls(1);
47172 + } else {
47173 + error = -EPERM;
47174 + goto out;
47175 + }
47176 + break;
47177 + default:
47178 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
47179 + error = -EINVAL;
47180 + break;
47181 + }
47182 +
47183 + if (error != -EPERM)
47184 + goto out;
47185 +
47186 + if(!(gr_auth_attempts++))
47187 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
47188 +
47189 + out:
47190 + mutex_unlock(&gr_dev_mutex);
47191 + return error;
47192 +}
47193 +
47194 +/* must be called with
47195 + rcu_read_lock();
47196 + read_lock(&tasklist_lock);
47197 + read_lock(&grsec_exec_file_lock);
47198 +*/
47199 +int gr_apply_subject_to_task(struct task_struct *task)
47200 +{
47201 + struct acl_object_label *obj;
47202 + char *tmpname;
47203 + struct acl_subject_label *tmpsubj;
47204 + struct file *filp;
47205 + struct name_entry *nmatch;
47206 +
47207 + filp = task->exec_file;
47208 + if (filp == NULL)
47209 + return 0;
47210 +
47211 + /* the following is to apply the correct subject
47212 + on binaries running when the RBAC system
47213 + is enabled, when the binaries have been
47214 + replaced or deleted since their execution
47215 + -----
47216 + when the RBAC system starts, the inode/dev
47217 + from exec_file will be one the RBAC system
47218 + is unaware of. It only knows the inode/dev
47219 + of the present file on disk, or the absence
47220 + of it.
47221 + */
47222 + preempt_disable();
47223 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
47224 +
47225 + nmatch = lookup_name_entry(tmpname);
47226 + preempt_enable();
47227 + tmpsubj = NULL;
47228 + if (nmatch) {
47229 + if (nmatch->deleted)
47230 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
47231 + else
47232 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
47233 + if (tmpsubj != NULL)
47234 + task->acl = tmpsubj;
47235 + }
47236 + if (tmpsubj == NULL)
47237 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
47238 + task->role);
47239 + if (task->acl) {
47240 + struct acl_subject_label *curr;
47241 + curr = task->acl;
47242 +
47243 + task->is_writable = 0;
47244 + /* ignore additional mmap checks for processes that are writable
47245 + by the default ACL */
47246 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47247 + if (unlikely(obj->mode & GR_WRITE))
47248 + task->is_writable = 1;
47249 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
47250 + if (unlikely(obj->mode & GR_WRITE))
47251 + task->is_writable = 1;
47252 +
47253 + gr_set_proc_res(task);
47254 +
47255 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47256 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
47257 +#endif
47258 + } else {
47259 + return 1;
47260 + }
47261 +
47262 + return 0;
47263 +}
47264 +
47265 +int
47266 +gr_set_acls(const int type)
47267 +{
47268 + struct task_struct *task, *task2;
47269 + struct acl_role_label *role = current->role;
47270 + __u16 acl_role_id = current->acl_role_id;
47271 + const struct cred *cred;
47272 + int ret;
47273 +
47274 + rcu_read_lock();
47275 + read_lock(&tasklist_lock);
47276 + read_lock(&grsec_exec_file_lock);
47277 + do_each_thread(task2, task) {
47278 + /* check to see if we're called from the exit handler,
47279 + if so, only replace ACLs that have inherited the admin
47280 + ACL */
47281 +
47282 + if (type && (task->role != role ||
47283 + task->acl_role_id != acl_role_id))
47284 + continue;
47285 +
47286 + task->acl_role_id = 0;
47287 + task->acl_sp_role = 0;
47288 +
47289 + if (task->exec_file) {
47290 + cred = __task_cred(task);
47291 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
47292 +
47293 + ret = gr_apply_subject_to_task(task);
47294 + if (ret) {
47295 + read_unlock(&grsec_exec_file_lock);
47296 + read_unlock(&tasklist_lock);
47297 + rcu_read_unlock();
47298 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
47299 + return ret;
47300 + }
47301 + } else {
47302 + // it's a kernel process
47303 + task->role = kernel_role;
47304 + task->acl = kernel_role->root_label;
47305 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
47306 + task->acl->mode &= ~GR_PROCFIND;
47307 +#endif
47308 + }
47309 + } while_each_thread(task2, task);
47310 + read_unlock(&grsec_exec_file_lock);
47311 + read_unlock(&tasklist_lock);
47312 + rcu_read_unlock();
47313 +
47314 + return 0;
47315 +}
47316 +
47317 +void
47318 +gr_learn_resource(const struct task_struct *task,
47319 + const int res, const unsigned long wanted, const int gt)
47320 +{
47321 + struct acl_subject_label *acl;
47322 + const struct cred *cred;
47323 +
47324 + if (unlikely((gr_status & GR_READY) &&
47325 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
47326 + goto skip_reslog;
47327 +
47328 +#ifdef CONFIG_GRKERNSEC_RESLOG
47329 + gr_log_resource(task, res, wanted, gt);
47330 +#endif
47331 + skip_reslog:
47332 +
47333 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
47334 + return;
47335 +
47336 + acl = task->acl;
47337 +
47338 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
47339 + !(acl->resmask & (1 << (unsigned short) res))))
47340 + return;
47341 +
47342 + if (wanted >= acl->res[res].rlim_cur) {
47343 + unsigned long res_add;
47344 +
47345 + res_add = wanted;
47346 + switch (res) {
47347 + case RLIMIT_CPU:
47348 + res_add += GR_RLIM_CPU_BUMP;
47349 + break;
47350 + case RLIMIT_FSIZE:
47351 + res_add += GR_RLIM_FSIZE_BUMP;
47352 + break;
47353 + case RLIMIT_DATA:
47354 + res_add += GR_RLIM_DATA_BUMP;
47355 + break;
47356 + case RLIMIT_STACK:
47357 + res_add += GR_RLIM_STACK_BUMP;
47358 + break;
47359 + case RLIMIT_CORE:
47360 + res_add += GR_RLIM_CORE_BUMP;
47361 + break;
47362 + case RLIMIT_RSS:
47363 + res_add += GR_RLIM_RSS_BUMP;
47364 + break;
47365 + case RLIMIT_NPROC:
47366 + res_add += GR_RLIM_NPROC_BUMP;
47367 + break;
47368 + case RLIMIT_NOFILE:
47369 + res_add += GR_RLIM_NOFILE_BUMP;
47370 + break;
47371 + case RLIMIT_MEMLOCK:
47372 + res_add += GR_RLIM_MEMLOCK_BUMP;
47373 + break;
47374 + case RLIMIT_AS:
47375 + res_add += GR_RLIM_AS_BUMP;
47376 + break;
47377 + case RLIMIT_LOCKS:
47378 + res_add += GR_RLIM_LOCKS_BUMP;
47379 + break;
47380 + case RLIMIT_SIGPENDING:
47381 + res_add += GR_RLIM_SIGPENDING_BUMP;
47382 + break;
47383 + case RLIMIT_MSGQUEUE:
47384 + res_add += GR_RLIM_MSGQUEUE_BUMP;
47385 + break;
47386 + case RLIMIT_NICE:
47387 + res_add += GR_RLIM_NICE_BUMP;
47388 + break;
47389 + case RLIMIT_RTPRIO:
47390 + res_add += GR_RLIM_RTPRIO_BUMP;
47391 + break;
47392 + case RLIMIT_RTTIME:
47393 + res_add += GR_RLIM_RTTIME_BUMP;
47394 + break;
47395 + }
47396 +
47397 + acl->res[res].rlim_cur = res_add;
47398 +
47399 + if (wanted > acl->res[res].rlim_max)
47400 + acl->res[res].rlim_max = res_add;
47401 +
47402 + /* only log the subject filename, since resource logging is supported for
47403 + single-subject learning only */
47404 + rcu_read_lock();
47405 + cred = __task_cred(task);
47406 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
47407 + task->role->roletype, cred->uid, cred->gid, acl->filename,
47408 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
47409 + "", (unsigned long) res, &task->signal->saved_ip);
47410 + rcu_read_unlock();
47411 + }
47412 +
47413 + return;
47414 +}
47415 +
47416 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
47417 +void
47418 +pax_set_initial_flags(struct linux_binprm *bprm)
47419 +{
47420 + struct task_struct *task = current;
47421 + struct acl_subject_label *proc;
47422 + unsigned long flags;
47423 +
47424 + if (unlikely(!(gr_status & GR_READY)))
47425 + return;
47426 +
47427 + flags = pax_get_flags(task);
47428 +
47429 + proc = task->acl;
47430 +
47431 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
47432 + flags &= ~MF_PAX_PAGEEXEC;
47433 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
47434 + flags &= ~MF_PAX_SEGMEXEC;
47435 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
47436 + flags &= ~MF_PAX_RANDMMAP;
47437 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
47438 + flags &= ~MF_PAX_EMUTRAMP;
47439 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
47440 + flags &= ~MF_PAX_MPROTECT;
47441 +
47442 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
47443 + flags |= MF_PAX_PAGEEXEC;
47444 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
47445 + flags |= MF_PAX_SEGMEXEC;
47446 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
47447 + flags |= MF_PAX_RANDMMAP;
47448 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
47449 + flags |= MF_PAX_EMUTRAMP;
47450 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
47451 + flags |= MF_PAX_MPROTECT;
47452 +
47453 + pax_set_flags(task, flags);
47454 +
47455 + return;
47456 +}
47457 +#endif
47458 +
47459 +#ifdef CONFIG_SYSCTL
47460 +/* Eric Biederman likes breaking userland ABI and every inode-based security
47461 + system to save 35kb of memory */
47462 +
47463 +/* we modify the passed in filename, but adjust it back before returning */
47464 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
47465 +{
47466 + struct name_entry *nmatch;
47467 + char *p, *lastp = NULL;
47468 + struct acl_object_label *obj = NULL, *tmp;
47469 + struct acl_subject_label *tmpsubj;
47470 + char c = '\0';
47471 +
47472 + read_lock(&gr_inode_lock);
47473 +
47474 + p = name + len - 1;
47475 + do {
47476 + nmatch = lookup_name_entry(name);
47477 + if (lastp != NULL)
47478 + *lastp = c;
47479 +
47480 + if (nmatch == NULL)
47481 + goto next_component;
47482 + tmpsubj = current->acl;
47483 + do {
47484 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
47485 + if (obj != NULL) {
47486 + tmp = obj->globbed;
47487 + while (tmp) {
47488 + if (!glob_match(tmp->filename, name)) {
47489 + obj = tmp;
47490 + goto found_obj;
47491 + }
47492 + tmp = tmp->next;
47493 + }
47494 + goto found_obj;
47495 + }
47496 + } while ((tmpsubj = tmpsubj->parent_subject));
47497 +next_component:
47498 + /* end case */
47499 + if (p == name)
47500 + break;
47501 +
47502 + while (*p != '/')
47503 + p--;
47504 + if (p == name)
47505 + lastp = p + 1;
47506 + else {
47507 + lastp = p;
47508 + p--;
47509 + }
47510 + c = *lastp;
47511 + *lastp = '\0';
47512 + } while (1);
47513 +found_obj:
47514 + read_unlock(&gr_inode_lock);
47515 + /* obj returned will always be non-null */
47516 + return obj;
47517 +}
47518 +
47519 +/* returns 0 when allowing, non-zero on error
47520 + op of 0 is used for readdir, so we don't log the names of hidden files
47521 +*/
47522 +__u32
47523 +gr_handle_sysctl(const struct ctl_table *table, const int op)
47524 +{
47525 + ctl_table *tmp;
47526 + const char *proc_sys = "/proc/sys";
47527 + char *path;
47528 + struct acl_object_label *obj;
47529 + unsigned short len = 0, pos = 0, depth = 0, i;
47530 + __u32 err = 0;
47531 + __u32 mode = 0;
47532 +
47533 + if (unlikely(!(gr_status & GR_READY)))
47534 + return 0;
47535 +
47536 + /* for now, ignore operations on non-sysctl entries if it's not a
47537 + readdir*/
47538 + if (table->child != NULL && op != 0)
47539 + return 0;
47540 +
47541 + mode |= GR_FIND;
47542 + /* it's only a read if it's an entry, read on dirs is for readdir */
47543 + if (op & MAY_READ)
47544 + mode |= GR_READ;
47545 + if (op & MAY_WRITE)
47546 + mode |= GR_WRITE;
47547 +
47548 + preempt_disable();
47549 +
47550 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
47551 +
47552 + /* it's only a read/write if it's an actual entry, not a dir
47553 + (which are opened for readdir)
47554 + */
47555 +
47556 + /* convert the requested sysctl entry into a pathname */
47557 +
47558 + for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
47559 + len += strlen(tmp->procname);
47560 + len++;
47561 + depth++;
47562 + }
47563 +
47564 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
47565 + /* deny */
47566 + goto out;
47567 + }
47568 +
47569 + memset(path, 0, PAGE_SIZE);
47570 +
47571 + memcpy(path, proc_sys, strlen(proc_sys));
47572 +
47573 + pos += strlen(proc_sys);
47574 +
47575 + for (; depth > 0; depth--) {
47576 + path[pos] = '/';
47577 + pos++;
47578 + for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
47579 + if (depth == i) {
47580 + memcpy(path + pos, tmp->procname,
47581 + strlen(tmp->procname));
47582 + pos += strlen(tmp->procname);
47583 + }
47584 + i++;
47585 + }
47586 + }
47587 +
47588 + obj = gr_lookup_by_name(path, pos);
47589 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
47590 +
47591 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
47592 + ((err & mode) != mode))) {
47593 + __u32 new_mode = mode;
47594 +
47595 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
47596 +
47597 + err = 0;
47598 + gr_log_learn_sysctl(path, new_mode);
47599 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
47600 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
47601 + err = -ENOENT;
47602 + } else if (!(err & GR_FIND)) {
47603 + err = -ENOENT;
47604 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
47605 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
47606 + path, (mode & GR_READ) ? " reading" : "",
47607 + (mode & GR_WRITE) ? " writing" : "");
47608 + err = -EACCES;
47609 + } else if ((err & mode) != mode) {
47610 + err = -EACCES;
47611 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
47612 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
47613 + path, (mode & GR_READ) ? " reading" : "",
47614 + (mode & GR_WRITE) ? " writing" : "");
47615 + err = 0;
47616 + } else
47617 + err = 0;
47618 +
47619 + out:
47620 + preempt_enable();
47621 +
47622 + return err;
47623 +}
47624 +#endif
47625 +
47626 +int
47627 +gr_handle_proc_ptrace(struct task_struct *task)
47628 +{
47629 + struct file *filp;
47630 + struct task_struct *tmp = task;
47631 + struct task_struct *curtemp = current;
47632 + __u32 retmode;
47633 +
47634 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
47635 + if (unlikely(!(gr_status & GR_READY)))
47636 + return 0;
47637 +#endif
47638 +
47639 + read_lock(&tasklist_lock);
47640 + read_lock(&grsec_exec_file_lock);
47641 + filp = task->exec_file;
47642 +
47643 + while (tmp->pid > 0) {
47644 + if (tmp == curtemp)
47645 + break;
47646 + tmp = tmp->real_parent;
47647 + }
47648 +
47649 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
47650 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
47651 + read_unlock(&grsec_exec_file_lock);
47652 + read_unlock(&tasklist_lock);
47653 + return 1;
47654 + }
47655 +
47656 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
47657 + if (!(gr_status & GR_READY)) {
47658 + read_unlock(&grsec_exec_file_lock);
47659 + read_unlock(&tasklist_lock);
47660 + return 0;
47661 + }
47662 +#endif
47663 +
47664 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
47665 + read_unlock(&grsec_exec_file_lock);
47666 + read_unlock(&tasklist_lock);
47667 +
47668 + if (retmode & GR_NOPTRACE)
47669 + return 1;
47670 +
47671 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
47672 + && (current->acl != task->acl || (current->acl != current->role->root_label
47673 + && current->pid != task->pid)))
47674 + return 1;
47675 +
47676 + return 0;
47677 +}
47678 +
47679 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
47680 +{
47681 + if (unlikely(!(gr_status & GR_READY)))
47682 + return;
47683 +
47684 + if (!(current->role->roletype & GR_ROLE_GOD))
47685 + return;
47686 +
47687 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
47688 + p->role->rolename, gr_task_roletype_to_char(p),
47689 + p->acl->filename);
47690 +}
47691 +
47692 +int
47693 +gr_handle_ptrace(struct task_struct *task, const long request)
47694 +{
47695 + struct task_struct *tmp = task;
47696 + struct task_struct *curtemp = current;
47697 + __u32 retmode;
47698 +
47699 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
47700 + if (unlikely(!(gr_status & GR_READY)))
47701 + return 0;
47702 +#endif
47703 +
47704 + read_lock(&tasklist_lock);
47705 + while (tmp->pid > 0) {
47706 + if (tmp == curtemp)
47707 + break;
47708 + tmp = tmp->real_parent;
47709 + }
47710 +
47711 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
47712 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
47713 + read_unlock(&tasklist_lock);
47714 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47715 + return 1;
47716 + }
47717 + read_unlock(&tasklist_lock);
47718 +
47719 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
47720 + if (!(gr_status & GR_READY))
47721 + return 0;
47722 +#endif
47723 +
47724 + read_lock(&grsec_exec_file_lock);
47725 + if (unlikely(!task->exec_file)) {
47726 + read_unlock(&grsec_exec_file_lock);
47727 + return 0;
47728 + }
47729 +
47730 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
47731 + read_unlock(&grsec_exec_file_lock);
47732 +
47733 + if (retmode & GR_NOPTRACE) {
47734 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47735 + return 1;
47736 + }
47737 +
47738 + if (retmode & GR_PTRACERD) {
47739 + switch (request) {
47740 + case PTRACE_POKETEXT:
47741 + case PTRACE_POKEDATA:
47742 + case PTRACE_POKEUSR:
47743 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
47744 + case PTRACE_SETREGS:
47745 + case PTRACE_SETFPREGS:
47746 +#endif
47747 +#ifdef CONFIG_X86
47748 + case PTRACE_SETFPXREGS:
47749 +#endif
47750 +#ifdef CONFIG_ALTIVEC
47751 + case PTRACE_SETVRREGS:
47752 +#endif
47753 + return 1;
47754 + default:
47755 + return 0;
47756 + }
47757 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
47758 + !(current->role->roletype & GR_ROLE_GOD) &&
47759 + (current->acl != task->acl)) {
47760 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47761 + return 1;
47762 + }
47763 +
47764 + return 0;
47765 +}
47766 +
47767 +static int is_writable_mmap(const struct file *filp)
47768 +{
47769 + struct task_struct *task = current;
47770 + struct acl_object_label *obj, *obj2;
47771 +
47772 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
47773 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
47774 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47775 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
47776 + task->role->root_label);
47777 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
47778 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
47779 + return 1;
47780 + }
47781 + }
47782 + return 0;
47783 +}
47784 +
47785 +int
47786 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
47787 +{
47788 + __u32 mode;
47789 +
47790 + if (unlikely(!file || !(prot & PROT_EXEC)))
47791 + return 1;
47792 +
47793 + if (is_writable_mmap(file))
47794 + return 0;
47795 +
47796 + mode =
47797 + gr_search_file(file->f_path.dentry,
47798 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
47799 + file->f_path.mnt);
47800 +
47801 + if (!gr_tpe_allow(file))
47802 + return 0;
47803 +
47804 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
47805 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47806 + return 0;
47807 + } else if (unlikely(!(mode & GR_EXEC))) {
47808 + return 0;
47809 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
47810 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47811 + return 1;
47812 + }
47813 +
47814 + return 1;
47815 +}
47816 +
47817 +int
47818 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
47819 +{
47820 + __u32 mode;
47821 +
47822 + if (unlikely(!file || !(prot & PROT_EXEC)))
47823 + return 1;
47824 +
47825 + if (is_writable_mmap(file))
47826 + return 0;
47827 +
47828 + mode =
47829 + gr_search_file(file->f_path.dentry,
47830 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
47831 + file->f_path.mnt);
47832 +
47833 + if (!gr_tpe_allow(file))
47834 + return 0;
47835 +
47836 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
47837 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47838 + return 0;
47839 + } else if (unlikely(!(mode & GR_EXEC))) {
47840 + return 0;
47841 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
47842 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47843 + return 1;
47844 + }
47845 +
47846 + return 1;
47847 +}
47848 +
47849 +void
47850 +gr_acl_handle_psacct(struct task_struct *task, const long code)
47851 +{
47852 + unsigned long runtime;
47853 + unsigned long cputime;
47854 + unsigned int wday, cday;
47855 + __u8 whr, chr;
47856 + __u8 wmin, cmin;
47857 + __u8 wsec, csec;
47858 + struct timespec timeval;
47859 +
47860 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
47861 + !(task->acl->mode & GR_PROCACCT)))
47862 + return;
47863 +
47864 + do_posix_clock_monotonic_gettime(&timeval);
47865 + runtime = timeval.tv_sec - task->start_time.tv_sec;
47866 + wday = runtime / (3600 * 24);
47867 + runtime -= wday * (3600 * 24);
47868 + whr = runtime / 3600;
47869 + runtime -= whr * 3600;
47870 + wmin = runtime / 60;
47871 + runtime -= wmin * 60;
47872 + wsec = runtime;
47873 +
47874 + cputime = (task->utime + task->stime) / HZ;
47875 + cday = cputime / (3600 * 24);
47876 + cputime -= cday * (3600 * 24);
47877 + chr = cputime / 3600;
47878 + cputime -= chr * 3600;
47879 + cmin = cputime / 60;
47880 + cputime -= cmin * 60;
47881 + csec = cputime;
47882 +
47883 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
47884 +
47885 + return;
47886 +}
47887 +
47888 +void gr_set_kernel_label(struct task_struct *task)
47889 +{
47890 + if (gr_status & GR_READY) {
47891 + task->role = kernel_role;
47892 + task->acl = kernel_role->root_label;
47893 + }
47894 + return;
47895 +}
47896 +
47897 +#ifdef CONFIG_TASKSTATS
47898 +int gr_is_taskstats_denied(int pid)
47899 +{
47900 + struct task_struct *task;
47901 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47902 + const struct cred *cred;
47903 +#endif
47904 + int ret = 0;
47905 +
47906 + /* restrict taskstats viewing to un-chrooted root users
47907 + who have the 'view' subject flag if the RBAC system is enabled
47908 + */
47909 +
47910 + rcu_read_lock();
47911 + read_lock(&tasklist_lock);
47912 + task = find_task_by_vpid(pid);
47913 + if (task) {
47914 +#ifdef CONFIG_GRKERNSEC_CHROOT
47915 + if (proc_is_chrooted(task))
47916 + ret = -EACCES;
47917 +#endif
47918 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47919 + cred = __task_cred(task);
47920 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47921 + if (cred->uid != 0)
47922 + ret = -EACCES;
47923 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47924 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
47925 + ret = -EACCES;
47926 +#endif
47927 +#endif
47928 + if (gr_status & GR_READY) {
47929 + if (!(task->acl->mode & GR_VIEW))
47930 + ret = -EACCES;
47931 + }
47932 + } else
47933 + ret = -ENOENT;
47934 +
47935 + read_unlock(&tasklist_lock);
47936 + rcu_read_unlock();
47937 +
47938 + return ret;
47939 +}
47940 +#endif
47941 +
47942 +/* AUXV entries are filled via a descendant of search_binary_handler
47943 + after we've already applied the subject for the target
47944 +*/
47945 +int gr_acl_enable_at_secure(void)
47946 +{
47947 + if (unlikely(!(gr_status & GR_READY)))
47948 + return 0;
47949 +
47950 + if (current->acl->mode & GR_ATSECURE)
47951 + return 1;
47952 +
47953 + return 0;
47954 +}
47955 +
47956 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
47957 +{
47958 + struct task_struct *task = current;
47959 + struct dentry *dentry = file->f_path.dentry;
47960 + struct vfsmount *mnt = file->f_path.mnt;
47961 + struct acl_object_label *obj, *tmp;
47962 + struct acl_subject_label *subj;
47963 + unsigned int bufsize;
47964 + int is_not_root;
47965 + char *path;
47966 + dev_t dev = __get_dev(dentry);
47967 +
47968 + if (unlikely(!(gr_status & GR_READY)))
47969 + return 1;
47970 +
47971 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
47972 + return 1;
47973 +
47974 + /* ignore Eric Biederman */
47975 + if (IS_PRIVATE(dentry->d_inode))
47976 + return 1;
47977 +
47978 + subj = task->acl;
47979 + do {
47980 + obj = lookup_acl_obj_label(ino, dev, subj);
47981 + if (obj != NULL)
47982 + return (obj->mode & GR_FIND) ? 1 : 0;
47983 + } while ((subj = subj->parent_subject));
47984 +
47985 + /* this is purely an optimization since we're looking for an object
47986 + for the directory we're doing a readdir on
47987 + if it's possible for any globbed object to match the entry we're
47988 + filling into the directory, then the object we find here will be
47989 + an anchor point with attached globbed objects
47990 + */
47991 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
47992 + if (obj->globbed == NULL)
47993 + return (obj->mode & GR_FIND) ? 1 : 0;
47994 +
47995 + is_not_root = ((obj->filename[0] == '/') &&
47996 + (obj->filename[1] == '\0')) ? 0 : 1;
47997 + bufsize = PAGE_SIZE - namelen - is_not_root;
47998 +
47999 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
48000 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
48001 + return 1;
48002 +
48003 + preempt_disable();
48004 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48005 + bufsize);
48006 +
48007 + bufsize = strlen(path);
48008 +
48009 + /* if base is "/", don't append an additional slash */
48010 + if (is_not_root)
48011 + *(path + bufsize) = '/';
48012 + memcpy(path + bufsize + is_not_root, name, namelen);
48013 + *(path + bufsize + namelen + is_not_root) = '\0';
48014 +
48015 + tmp = obj->globbed;
48016 + while (tmp) {
48017 + if (!glob_match(tmp->filename, path)) {
48018 + preempt_enable();
48019 + return (tmp->mode & GR_FIND) ? 1 : 0;
48020 + }
48021 + tmp = tmp->next;
48022 + }
48023 + preempt_enable();
48024 + return (obj->mode & GR_FIND) ? 1 : 0;
48025 +}
48026 +
48027 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
48028 +EXPORT_SYMBOL(gr_acl_is_enabled);
48029 +#endif
48030 +EXPORT_SYMBOL(gr_learn_resource);
48031 +EXPORT_SYMBOL(gr_set_kernel_label);
48032 +#ifdef CONFIG_SECURITY
48033 +EXPORT_SYMBOL(gr_check_user_change);
48034 +EXPORT_SYMBOL(gr_check_group_change);
48035 +#endif
48036 +
48037 diff -urNp linux-2.6.32.42/grsecurity/gracl_cap.c linux-2.6.32.42/grsecurity/gracl_cap.c
48038 --- linux-2.6.32.42/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
48039 +++ linux-2.6.32.42/grsecurity/gracl_cap.c 2011-04-17 15:56:46.000000000 -0400
48040 @@ -0,0 +1,138 @@
48041 +#include <linux/kernel.h>
48042 +#include <linux/module.h>
48043 +#include <linux/sched.h>
48044 +#include <linux/gracl.h>
48045 +#include <linux/grsecurity.h>
48046 +#include <linux/grinternal.h>
48047 +
48048 +static const char *captab_log[] = {
48049 + "CAP_CHOWN",
48050 + "CAP_DAC_OVERRIDE",
48051 + "CAP_DAC_READ_SEARCH",
48052 + "CAP_FOWNER",
48053 + "CAP_FSETID",
48054 + "CAP_KILL",
48055 + "CAP_SETGID",
48056 + "CAP_SETUID",
48057 + "CAP_SETPCAP",
48058 + "CAP_LINUX_IMMUTABLE",
48059 + "CAP_NET_BIND_SERVICE",
48060 + "CAP_NET_BROADCAST",
48061 + "CAP_NET_ADMIN",
48062 + "CAP_NET_RAW",
48063 + "CAP_IPC_LOCK",
48064 + "CAP_IPC_OWNER",
48065 + "CAP_SYS_MODULE",
48066 + "CAP_SYS_RAWIO",
48067 + "CAP_SYS_CHROOT",
48068 + "CAP_SYS_PTRACE",
48069 + "CAP_SYS_PACCT",
48070 + "CAP_SYS_ADMIN",
48071 + "CAP_SYS_BOOT",
48072 + "CAP_SYS_NICE",
48073 + "CAP_SYS_RESOURCE",
48074 + "CAP_SYS_TIME",
48075 + "CAP_SYS_TTY_CONFIG",
48076 + "CAP_MKNOD",
48077 + "CAP_LEASE",
48078 + "CAP_AUDIT_WRITE",
48079 + "CAP_AUDIT_CONTROL",
48080 + "CAP_SETFCAP",
48081 + "CAP_MAC_OVERRIDE",
48082 + "CAP_MAC_ADMIN"
48083 +};
48084 +
48085 +EXPORT_SYMBOL(gr_is_capable);
48086 +EXPORT_SYMBOL(gr_is_capable_nolog);
48087 +
48088 +int
48089 +gr_is_capable(const int cap)
48090 +{
48091 + struct task_struct *task = current;
48092 + const struct cred *cred = current_cred();
48093 + struct acl_subject_label *curracl;
48094 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
48095 + kernel_cap_t cap_audit = __cap_empty_set;
48096 +
48097 + if (!gr_acl_is_enabled())
48098 + return 1;
48099 +
48100 + curracl = task->acl;
48101 +
48102 + cap_drop = curracl->cap_lower;
48103 + cap_mask = curracl->cap_mask;
48104 + cap_audit = curracl->cap_invert_audit;
48105 +
48106 + while ((curracl = curracl->parent_subject)) {
48107 + /* if the cap isn't specified in the current computed mask but is specified in the
48108 + current level subject, and is lowered in the current level subject, then add
48109 + it to the set of dropped capabilities
48110 + otherwise, add the current level subject's mask to the current computed mask
48111 + */
48112 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
48113 + cap_raise(cap_mask, cap);
48114 + if (cap_raised(curracl->cap_lower, cap))
48115 + cap_raise(cap_drop, cap);
48116 + if (cap_raised(curracl->cap_invert_audit, cap))
48117 + cap_raise(cap_audit, cap);
48118 + }
48119 + }
48120 +
48121 + if (!cap_raised(cap_drop, cap)) {
48122 + if (cap_raised(cap_audit, cap))
48123 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
48124 + return 1;
48125 + }
48126 +
48127 + curracl = task->acl;
48128 +
48129 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
48130 + && cap_raised(cred->cap_effective, cap)) {
48131 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
48132 + task->role->roletype, cred->uid,
48133 + cred->gid, task->exec_file ?
48134 + gr_to_filename(task->exec_file->f_path.dentry,
48135 + task->exec_file->f_path.mnt) : curracl->filename,
48136 + curracl->filename, 0UL,
48137 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
48138 + return 1;
48139 + }
48140 +
48141 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
48142 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
48143 + return 0;
48144 +}
48145 +
48146 +int
48147 +gr_is_capable_nolog(const int cap)
48148 +{
48149 + struct acl_subject_label *curracl;
48150 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
48151 +
48152 + if (!gr_acl_is_enabled())
48153 + return 1;
48154 +
48155 + curracl = current->acl;
48156 +
48157 + cap_drop = curracl->cap_lower;
48158 + cap_mask = curracl->cap_mask;
48159 +
48160 + while ((curracl = curracl->parent_subject)) {
48161 + /* if the cap isn't specified in the current computed mask but is specified in the
48162 + current level subject, and is lowered in the current level subject, then add
48163 + it to the set of dropped capabilities
48164 + otherwise, add the current level subject's mask to the current computed mask
48165 + */
48166 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
48167 + cap_raise(cap_mask, cap);
48168 + if (cap_raised(curracl->cap_lower, cap))
48169 + cap_raise(cap_drop, cap);
48170 + }
48171 + }
48172 +
48173 + if (!cap_raised(cap_drop, cap))
48174 + return 1;
48175 +
48176 + return 0;
48177 +}
48178 +
48179 diff -urNp linux-2.6.32.42/grsecurity/gracl_fs.c linux-2.6.32.42/grsecurity/gracl_fs.c
48180 --- linux-2.6.32.42/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
48181 +++ linux-2.6.32.42/grsecurity/gracl_fs.c 2011-04-17 15:56:46.000000000 -0400
48182 @@ -0,0 +1,431 @@
48183 +#include <linux/kernel.h>
48184 +#include <linux/sched.h>
48185 +#include <linux/types.h>
48186 +#include <linux/fs.h>
48187 +#include <linux/file.h>
48188 +#include <linux/stat.h>
48189 +#include <linux/grsecurity.h>
48190 +#include <linux/grinternal.h>
48191 +#include <linux/gracl.h>
48192 +
48193 +__u32
48194 +gr_acl_handle_hidden_file(const struct dentry * dentry,
48195 + const struct vfsmount * mnt)
48196 +{
48197 + __u32 mode;
48198 +
48199 + if (unlikely(!dentry->d_inode))
48200 + return GR_FIND;
48201 +
48202 + mode =
48203 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
48204 +
48205 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
48206 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48207 + return mode;
48208 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
48209 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48210 + return 0;
48211 + } else if (unlikely(!(mode & GR_FIND)))
48212 + return 0;
48213 +
48214 + return GR_FIND;
48215 +}
48216 +
48217 +__u32
48218 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
48219 + const int fmode)
48220 +{
48221 + __u32 reqmode = GR_FIND;
48222 + __u32 mode;
48223 +
48224 + if (unlikely(!dentry->d_inode))
48225 + return reqmode;
48226 +
48227 + if (unlikely(fmode & O_APPEND))
48228 + reqmode |= GR_APPEND;
48229 + else if (unlikely(fmode & FMODE_WRITE))
48230 + reqmode |= GR_WRITE;
48231 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48232 + reqmode |= GR_READ;
48233 + if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
48234 + reqmode &= ~GR_READ;
48235 + mode =
48236 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48237 + mnt);
48238 +
48239 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48240 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48241 + reqmode & GR_READ ? " reading" : "",
48242 + reqmode & GR_WRITE ? " writing" : reqmode &
48243 + GR_APPEND ? " appending" : "");
48244 + return reqmode;
48245 + } else
48246 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48247 + {
48248 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48249 + reqmode & GR_READ ? " reading" : "",
48250 + reqmode & GR_WRITE ? " writing" : reqmode &
48251 + GR_APPEND ? " appending" : "");
48252 + return 0;
48253 + } else if (unlikely((mode & reqmode) != reqmode))
48254 + return 0;
48255 +
48256 + return reqmode;
48257 +}
48258 +
48259 +__u32
48260 +gr_acl_handle_creat(const struct dentry * dentry,
48261 + const struct dentry * p_dentry,
48262 + const struct vfsmount * p_mnt, const int fmode,
48263 + const int imode)
48264 +{
48265 + __u32 reqmode = GR_WRITE | GR_CREATE;
48266 + __u32 mode;
48267 +
48268 + if (unlikely(fmode & O_APPEND))
48269 + reqmode |= GR_APPEND;
48270 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48271 + reqmode |= GR_READ;
48272 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
48273 + reqmode |= GR_SETID;
48274 +
48275 + mode =
48276 + gr_check_create(dentry, p_dentry, p_mnt,
48277 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48278 +
48279 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48280 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48281 + reqmode & GR_READ ? " reading" : "",
48282 + reqmode & GR_WRITE ? " writing" : reqmode &
48283 + GR_APPEND ? " appending" : "");
48284 + return reqmode;
48285 + } else
48286 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48287 + {
48288 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48289 + reqmode & GR_READ ? " reading" : "",
48290 + reqmode & GR_WRITE ? " writing" : reqmode &
48291 + GR_APPEND ? " appending" : "");
48292 + return 0;
48293 + } else if (unlikely((mode & reqmode) != reqmode))
48294 + return 0;
48295 +
48296 + return reqmode;
48297 +}
48298 +
48299 +__u32
48300 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
48301 + const int fmode)
48302 +{
48303 + __u32 mode, reqmode = GR_FIND;
48304 +
48305 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
48306 + reqmode |= GR_EXEC;
48307 + if (fmode & S_IWOTH)
48308 + reqmode |= GR_WRITE;
48309 + if (fmode & S_IROTH)
48310 + reqmode |= GR_READ;
48311 +
48312 + mode =
48313 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48314 + mnt);
48315 +
48316 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48317 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48318 + reqmode & GR_READ ? " reading" : "",
48319 + reqmode & GR_WRITE ? " writing" : "",
48320 + reqmode & GR_EXEC ? " executing" : "");
48321 + return reqmode;
48322 + } else
48323 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48324 + {
48325 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48326 + reqmode & GR_READ ? " reading" : "",
48327 + reqmode & GR_WRITE ? " writing" : "",
48328 + reqmode & GR_EXEC ? " executing" : "");
48329 + return 0;
48330 + } else if (unlikely((mode & reqmode) != reqmode))
48331 + return 0;
48332 +
48333 + return reqmode;
48334 +}
48335 +
48336 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
48337 +{
48338 + __u32 mode;
48339 +
48340 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
48341 +
48342 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
48343 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
48344 + return mode;
48345 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
48346 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
48347 + return 0;
48348 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
48349 + return 0;
48350 +
48351 + return (reqmode);
48352 +}
48353 +
48354 +__u32
48355 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
48356 +{
48357 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
48358 +}
48359 +
48360 +__u32
48361 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
48362 +{
48363 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
48364 +}
48365 +
48366 +__u32
48367 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
48368 +{
48369 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
48370 +}
48371 +
48372 +__u32
48373 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
48374 +{
48375 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
48376 +}
48377 +
48378 +__u32
48379 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
48380 + mode_t mode)
48381 +{
48382 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
48383 + return 1;
48384 +
48385 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48386 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48387 + GR_FCHMOD_ACL_MSG);
48388 + } else {
48389 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
48390 + }
48391 +}
48392 +
48393 +__u32
48394 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
48395 + mode_t mode)
48396 +{
48397 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48398 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48399 + GR_CHMOD_ACL_MSG);
48400 + } else {
48401 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
48402 + }
48403 +}
48404 +
48405 +__u32
48406 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
48407 +{
48408 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
48409 +}
48410 +
48411 +__u32
48412 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
48413 +{
48414 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
48415 +}
48416 +
48417 +__u32
48418 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
48419 +{
48420 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
48421 +}
48422 +
48423 +__u32
48424 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
48425 +{
48426 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
48427 + GR_UNIXCONNECT_ACL_MSG);
48428 +}
48429 +
48430 +/* hardlinks require at minimum create permission,
48431 + any additional privilege required is based on the
48432 + privilege of the file being linked to
48433 +*/
48434 +__u32
48435 +gr_acl_handle_link(const struct dentry * new_dentry,
48436 + const struct dentry * parent_dentry,
48437 + const struct vfsmount * parent_mnt,
48438 + const struct dentry * old_dentry,
48439 + const struct vfsmount * old_mnt, const char *to)
48440 +{
48441 + __u32 mode;
48442 + __u32 needmode = GR_CREATE | GR_LINK;
48443 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
48444 +
48445 + mode =
48446 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
48447 + old_mnt);
48448 +
48449 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
48450 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48451 + return mode;
48452 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
48453 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48454 + return 0;
48455 + } else if (unlikely((mode & needmode) != needmode))
48456 + return 0;
48457 +
48458 + return 1;
48459 +}
48460 +
48461 +__u32
48462 +gr_acl_handle_symlink(const struct dentry * new_dentry,
48463 + const struct dentry * parent_dentry,
48464 + const struct vfsmount * parent_mnt, const char *from)
48465 +{
48466 + __u32 needmode = GR_WRITE | GR_CREATE;
48467 + __u32 mode;
48468 +
48469 + mode =
48470 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
48471 + GR_CREATE | GR_AUDIT_CREATE |
48472 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
48473 +
48474 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
48475 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
48476 + return mode;
48477 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
48478 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
48479 + return 0;
48480 + } else if (unlikely((mode & needmode) != needmode))
48481 + return 0;
48482 +
48483 + return (GR_WRITE | GR_CREATE);
48484 +}
48485 +
48486 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
48487 +{
48488 + __u32 mode;
48489 +
48490 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48491 +
48492 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
48493 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
48494 + return mode;
48495 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
48496 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
48497 + return 0;
48498 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
48499 + return 0;
48500 +
48501 + return (reqmode);
48502 +}
48503 +
48504 +__u32
48505 +gr_acl_handle_mknod(const struct dentry * new_dentry,
48506 + const struct dentry * parent_dentry,
48507 + const struct vfsmount * parent_mnt,
48508 + const int mode)
48509 +{
48510 + __u32 reqmode = GR_WRITE | GR_CREATE;
48511 + if (unlikely(mode & (S_ISUID | S_ISGID)))
48512 + reqmode |= GR_SETID;
48513 +
48514 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
48515 + reqmode, GR_MKNOD_ACL_MSG);
48516 +}
48517 +
48518 +__u32
48519 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
48520 + const struct dentry *parent_dentry,
48521 + const struct vfsmount *parent_mnt)
48522 +{
48523 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
48524 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
48525 +}
48526 +
48527 +#define RENAME_CHECK_SUCCESS(old, new) \
48528 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
48529 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
48530 +
48531 +int
48532 +gr_acl_handle_rename(struct dentry *new_dentry,
48533 + struct dentry *parent_dentry,
48534 + const struct vfsmount *parent_mnt,
48535 + struct dentry *old_dentry,
48536 + struct inode *old_parent_inode,
48537 + struct vfsmount *old_mnt, const char *newname)
48538 +{
48539 + __u32 comp1, comp2;
48540 + int error = 0;
48541 +
48542 + if (unlikely(!gr_acl_is_enabled()))
48543 + return 0;
48544 +
48545 + if (!new_dentry->d_inode) {
48546 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
48547 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
48548 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
48549 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
48550 + GR_DELETE | GR_AUDIT_DELETE |
48551 + GR_AUDIT_READ | GR_AUDIT_WRITE |
48552 + GR_SUPPRESS, old_mnt);
48553 + } else {
48554 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
48555 + GR_CREATE | GR_DELETE |
48556 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
48557 + GR_AUDIT_READ | GR_AUDIT_WRITE |
48558 + GR_SUPPRESS, parent_mnt);
48559 + comp2 =
48560 + gr_search_file(old_dentry,
48561 + GR_READ | GR_WRITE | GR_AUDIT_READ |
48562 + GR_DELETE | GR_AUDIT_DELETE |
48563 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
48564 + }
48565 +
48566 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
48567 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
48568 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
48569 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
48570 + && !(comp2 & GR_SUPPRESS)) {
48571 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
48572 + error = -EACCES;
48573 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
48574 + error = -EACCES;
48575 +
48576 + return error;
48577 +}
48578 +
48579 +void
48580 +gr_acl_handle_exit(void)
48581 +{
48582 + u16 id;
48583 + char *rolename;
48584 + struct file *exec_file;
48585 +
48586 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
48587 + !(current->role->roletype & GR_ROLE_PERSIST))) {
48588 + id = current->acl_role_id;
48589 + rolename = current->role->rolename;
48590 + gr_set_acls(1);
48591 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
48592 + }
48593 +
48594 + write_lock(&grsec_exec_file_lock);
48595 + exec_file = current->exec_file;
48596 + current->exec_file = NULL;
48597 + write_unlock(&grsec_exec_file_lock);
48598 +
48599 + if (exec_file)
48600 + fput(exec_file);
48601 +}
48602 +
48603 +int
48604 +gr_acl_handle_procpidmem(const struct task_struct *task)
48605 +{
48606 + if (unlikely(!gr_acl_is_enabled()))
48607 + return 0;
48608 +
48609 + if (task != current && task->acl->mode & GR_PROTPROCFD)
48610 + return -EACCES;
48611 +
48612 + return 0;
48613 +}
48614 diff -urNp linux-2.6.32.42/grsecurity/gracl_ip.c linux-2.6.32.42/grsecurity/gracl_ip.c
48615 --- linux-2.6.32.42/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
48616 +++ linux-2.6.32.42/grsecurity/gracl_ip.c 2011-04-17 15:56:46.000000000 -0400
48617 @@ -0,0 +1,382 @@
48618 +#include <linux/kernel.h>
48619 +#include <asm/uaccess.h>
48620 +#include <asm/errno.h>
48621 +#include <net/sock.h>
48622 +#include <linux/file.h>
48623 +#include <linux/fs.h>
48624 +#include <linux/net.h>
48625 +#include <linux/in.h>
48626 +#include <linux/skbuff.h>
48627 +#include <linux/ip.h>
48628 +#include <linux/udp.h>
48629 +#include <linux/smp_lock.h>
48630 +#include <linux/types.h>
48631 +#include <linux/sched.h>
48632 +#include <linux/netdevice.h>
48633 +#include <linux/inetdevice.h>
48634 +#include <linux/gracl.h>
48635 +#include <linux/grsecurity.h>
48636 +#include <linux/grinternal.h>
48637 +
48638 +#define GR_BIND 0x01
48639 +#define GR_CONNECT 0x02
48640 +#define GR_INVERT 0x04
48641 +#define GR_BINDOVERRIDE 0x08
48642 +#define GR_CONNECTOVERRIDE 0x10
48643 +#define GR_SOCK_FAMILY 0x20
48644 +
48645 +static const char * gr_protocols[IPPROTO_MAX] = {
48646 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
48647 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
48648 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
48649 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
48650 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
48651 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
48652 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
48653 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
48654 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
48655 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
48656 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
48657 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
48658 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
48659 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
48660 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
48661 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
48662 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
48663 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
48664 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
48665 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
48666 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
48667 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
48668 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
48669 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
48670 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
48671 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
48672 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
48673 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
48674 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
48675 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
48676 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
48677 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
48678 + };
48679 +
48680 +static const char * gr_socktypes[SOCK_MAX] = {
48681 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
48682 + "unknown:7", "unknown:8", "unknown:9", "packet"
48683 + };
48684 +
48685 +static const char * gr_sockfamilies[AF_MAX+1] = {
48686 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
48687 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
48688 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
48689 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
48690 + };
48691 +
48692 +const char *
48693 +gr_proto_to_name(unsigned char proto)
48694 +{
48695 + return gr_protocols[proto];
48696 +}
48697 +
48698 +const char *
48699 +gr_socktype_to_name(unsigned char type)
48700 +{
48701 + return gr_socktypes[type];
48702 +}
48703 +
48704 +const char *
48705 +gr_sockfamily_to_name(unsigned char family)
48706 +{
48707 + return gr_sockfamilies[family];
48708 +}
48709 +
48710 +int
48711 +gr_search_socket(const int domain, const int type, const int protocol)
48712 +{
48713 + struct acl_subject_label *curr;
48714 + const struct cred *cred = current_cred();
48715 +
48716 + if (unlikely(!gr_acl_is_enabled()))
48717 + goto exit;
48718 +
48719 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
48720 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
48721 + goto exit; // let the kernel handle it
48722 +
48723 + curr = current->acl;
48724 +
48725 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
48726 + /* the family is allowed, if this is PF_INET allow it only if
48727 + the extra sock type/protocol checks pass */
48728 + if (domain == PF_INET)
48729 + goto inet_check;
48730 + goto exit;
48731 + } else {
48732 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48733 + __u32 fakeip = 0;
48734 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48735 + current->role->roletype, cred->uid,
48736 + cred->gid, current->exec_file ?
48737 + gr_to_filename(current->exec_file->f_path.dentry,
48738 + current->exec_file->f_path.mnt) :
48739 + curr->filename, curr->filename,
48740 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
48741 + &current->signal->saved_ip);
48742 + goto exit;
48743 + }
48744 + goto exit_fail;
48745 + }
48746 +
48747 +inet_check:
48748 + /* the rest of this checking is for IPv4 only */
48749 + if (!curr->ips)
48750 + goto exit;
48751 +
48752 + if ((curr->ip_type & (1 << type)) &&
48753 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
48754 + goto exit;
48755 +
48756 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48757 + /* we don't place acls on raw sockets , and sometimes
48758 + dgram/ip sockets are opened for ioctl and not
48759 + bind/connect, so we'll fake a bind learn log */
48760 + if (type == SOCK_RAW || type == SOCK_PACKET) {
48761 + __u32 fakeip = 0;
48762 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48763 + current->role->roletype, cred->uid,
48764 + cred->gid, current->exec_file ?
48765 + gr_to_filename(current->exec_file->f_path.dentry,
48766 + current->exec_file->f_path.mnt) :
48767 + curr->filename, curr->filename,
48768 + &fakeip, 0, type,
48769 + protocol, GR_CONNECT, &current->signal->saved_ip);
48770 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
48771 + __u32 fakeip = 0;
48772 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48773 + current->role->roletype, cred->uid,
48774 + cred->gid, current->exec_file ?
48775 + gr_to_filename(current->exec_file->f_path.dentry,
48776 + current->exec_file->f_path.mnt) :
48777 + curr->filename, curr->filename,
48778 + &fakeip, 0, type,
48779 + protocol, GR_BIND, &current->signal->saved_ip);
48780 + }
48781 + /* we'll log when they use connect or bind */
48782 + goto exit;
48783 + }
48784 +
48785 +exit_fail:
48786 + if (domain == PF_INET)
48787 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
48788 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
48789 + else
48790 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
48791 + gr_socktype_to_name(type), protocol);
48792 +
48793 + return 0;
48794 +exit:
48795 + return 1;
48796 +}
48797 +
48798 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
48799 +{
48800 + if ((ip->mode & mode) &&
48801 + (ip_port >= ip->low) &&
48802 + (ip_port <= ip->high) &&
48803 + ((ntohl(ip_addr) & our_netmask) ==
48804 + (ntohl(our_addr) & our_netmask))
48805 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
48806 + && (ip->type & (1 << type))) {
48807 + if (ip->mode & GR_INVERT)
48808 + return 2; // specifically denied
48809 + else
48810 + return 1; // allowed
48811 + }
48812 +
48813 + return 0; // not specifically allowed, may continue parsing
48814 +}
48815 +
48816 +static int
48817 +gr_search_connectbind(const int full_mode, struct sock *sk,
48818 + struct sockaddr_in *addr, const int type)
48819 +{
48820 + char iface[IFNAMSIZ] = {0};
48821 + struct acl_subject_label *curr;
48822 + struct acl_ip_label *ip;
48823 + struct inet_sock *isk;
48824 + struct net_device *dev;
48825 + struct in_device *idev;
48826 + unsigned long i;
48827 + int ret;
48828 + int mode = full_mode & (GR_BIND | GR_CONNECT);
48829 + __u32 ip_addr = 0;
48830 + __u32 our_addr;
48831 + __u32 our_netmask;
48832 + char *p;
48833 + __u16 ip_port = 0;
48834 + const struct cred *cred = current_cred();
48835 +
48836 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
48837 + return 0;
48838 +
48839 + curr = current->acl;
48840 + isk = inet_sk(sk);
48841 +
48842 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
48843 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
48844 + addr->sin_addr.s_addr = curr->inaddr_any_override;
48845 + if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
48846 + struct sockaddr_in saddr;
48847 + int err;
48848 +
48849 + saddr.sin_family = AF_INET;
48850 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
48851 + saddr.sin_port = isk->sport;
48852 +
48853 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
48854 + if (err)
48855 + return err;
48856 +
48857 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
48858 + if (err)
48859 + return err;
48860 + }
48861 +
48862 + if (!curr->ips)
48863 + return 0;
48864 +
48865 + ip_addr = addr->sin_addr.s_addr;
48866 + ip_port = ntohs(addr->sin_port);
48867 +
48868 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48869 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48870 + current->role->roletype, cred->uid,
48871 + cred->gid, current->exec_file ?
48872 + gr_to_filename(current->exec_file->f_path.dentry,
48873 + current->exec_file->f_path.mnt) :
48874 + curr->filename, curr->filename,
48875 + &ip_addr, ip_port, type,
48876 + sk->sk_protocol, mode, &current->signal->saved_ip);
48877 + return 0;
48878 + }
48879 +
48880 + for (i = 0; i < curr->ip_num; i++) {
48881 + ip = *(curr->ips + i);
48882 + if (ip->iface != NULL) {
48883 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
48884 + p = strchr(iface, ':');
48885 + if (p != NULL)
48886 + *p = '\0';
48887 + dev = dev_get_by_name(sock_net(sk), iface);
48888 + if (dev == NULL)
48889 + continue;
48890 + idev = in_dev_get(dev);
48891 + if (idev == NULL) {
48892 + dev_put(dev);
48893 + continue;
48894 + }
48895 + rcu_read_lock();
48896 + for_ifa(idev) {
48897 + if (!strcmp(ip->iface, ifa->ifa_label)) {
48898 + our_addr = ifa->ifa_address;
48899 + our_netmask = 0xffffffff;
48900 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
48901 + if (ret == 1) {
48902 + rcu_read_unlock();
48903 + in_dev_put(idev);
48904 + dev_put(dev);
48905 + return 0;
48906 + } else if (ret == 2) {
48907 + rcu_read_unlock();
48908 + in_dev_put(idev);
48909 + dev_put(dev);
48910 + goto denied;
48911 + }
48912 + }
48913 + } endfor_ifa(idev);
48914 + rcu_read_unlock();
48915 + in_dev_put(idev);
48916 + dev_put(dev);
48917 + } else {
48918 + our_addr = ip->addr;
48919 + our_netmask = ip->netmask;
48920 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
48921 + if (ret == 1)
48922 + return 0;
48923 + else if (ret == 2)
48924 + goto denied;
48925 + }
48926 + }
48927 +
48928 +denied:
48929 + if (mode == GR_BIND)
48930 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
48931 + else if (mode == GR_CONNECT)
48932 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
48933 +
48934 + return -EACCES;
48935 +}
48936 +
48937 +int
48938 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
48939 +{
48940 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
48941 +}
48942 +
48943 +int
48944 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
48945 +{
48946 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
48947 +}
48948 +
48949 +int gr_search_listen(struct socket *sock)
48950 +{
48951 + struct sock *sk = sock->sk;
48952 + struct sockaddr_in addr;
48953 +
48954 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
48955 + addr.sin_port = inet_sk(sk)->sport;
48956 +
48957 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
48958 +}
48959 +
48960 +int gr_search_accept(struct socket *sock)
48961 +{
48962 + struct sock *sk = sock->sk;
48963 + struct sockaddr_in addr;
48964 +
48965 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
48966 + addr.sin_port = inet_sk(sk)->sport;
48967 +
48968 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
48969 +}
48970 +
48971 +int
48972 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
48973 +{
48974 + if (addr)
48975 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
48976 + else {
48977 + struct sockaddr_in sin;
48978 + const struct inet_sock *inet = inet_sk(sk);
48979 +
48980 + sin.sin_addr.s_addr = inet->daddr;
48981 + sin.sin_port = inet->dport;
48982 +
48983 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
48984 + }
48985 +}
48986 +
48987 +int
48988 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
48989 +{
48990 + struct sockaddr_in sin;
48991 +
48992 + if (unlikely(skb->len < sizeof (struct udphdr)))
48993 + return 0; // skip this packet
48994 +
48995 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
48996 + sin.sin_port = udp_hdr(skb)->source;
48997 +
48998 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
48999 +}
49000 diff -urNp linux-2.6.32.42/grsecurity/gracl_learn.c linux-2.6.32.42/grsecurity/gracl_learn.c
49001 --- linux-2.6.32.42/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
49002 +++ linux-2.6.32.42/grsecurity/gracl_learn.c 2011-04-17 15:56:46.000000000 -0400
49003 @@ -0,0 +1,211 @@
49004 +#include <linux/kernel.h>
49005 +#include <linux/mm.h>
49006 +#include <linux/sched.h>
49007 +#include <linux/poll.h>
49008 +#include <linux/smp_lock.h>
49009 +#include <linux/string.h>
49010 +#include <linux/file.h>
49011 +#include <linux/types.h>
49012 +#include <linux/vmalloc.h>
49013 +#include <linux/grinternal.h>
49014 +
49015 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
49016 + size_t count, loff_t *ppos);
49017 +extern int gr_acl_is_enabled(void);
49018 +
49019 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
49020 +static int gr_learn_attached;
49021 +
49022 +/* use a 512k buffer */
49023 +#define LEARN_BUFFER_SIZE (512 * 1024)
49024 +
49025 +static DEFINE_SPINLOCK(gr_learn_lock);
49026 +static DEFINE_MUTEX(gr_learn_user_mutex);
49027 +
49028 +/* we need to maintain two buffers, so that the kernel context of grlearn
49029 + uses a semaphore around the userspace copying, and the other kernel contexts
49030 + use a spinlock when copying into the buffer, since they cannot sleep
49031 +*/
49032 +static char *learn_buffer;
49033 +static char *learn_buffer_user;
49034 +static int learn_buffer_len;
49035 +static int learn_buffer_user_len;
49036 +
49037 +static ssize_t
49038 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
49039 +{
49040 + DECLARE_WAITQUEUE(wait, current);
49041 + ssize_t retval = 0;
49042 +
49043 + add_wait_queue(&learn_wait, &wait);
49044 + set_current_state(TASK_INTERRUPTIBLE);
49045 + do {
49046 + mutex_lock(&gr_learn_user_mutex);
49047 + spin_lock(&gr_learn_lock);
49048 + if (learn_buffer_len)
49049 + break;
49050 + spin_unlock(&gr_learn_lock);
49051 + mutex_unlock(&gr_learn_user_mutex);
49052 + if (file->f_flags & O_NONBLOCK) {
49053 + retval = -EAGAIN;
49054 + goto out;
49055 + }
49056 + if (signal_pending(current)) {
49057 + retval = -ERESTARTSYS;
49058 + goto out;
49059 + }
49060 +
49061 + schedule();
49062 + } while (1);
49063 +
49064 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
49065 + learn_buffer_user_len = learn_buffer_len;
49066 + retval = learn_buffer_len;
49067 + learn_buffer_len = 0;
49068 +
49069 + spin_unlock(&gr_learn_lock);
49070 +
49071 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
49072 + retval = -EFAULT;
49073 +
49074 + mutex_unlock(&gr_learn_user_mutex);
49075 +out:
49076 + set_current_state(TASK_RUNNING);
49077 + remove_wait_queue(&learn_wait, &wait);
49078 + return retval;
49079 +}
49080 +
49081 +static unsigned int
49082 +poll_learn(struct file * file, poll_table * wait)
49083 +{
49084 + poll_wait(file, &learn_wait, wait);
49085 +
49086 + if (learn_buffer_len)
49087 + return (POLLIN | POLLRDNORM);
49088 +
49089 + return 0;
49090 +}
49091 +
49092 +void
49093 +gr_clear_learn_entries(void)
49094 +{
49095 + char *tmp;
49096 +
49097 + mutex_lock(&gr_learn_user_mutex);
49098 + if (learn_buffer != NULL) {
49099 + spin_lock(&gr_learn_lock);
49100 + tmp = learn_buffer;
49101 + learn_buffer = NULL;
49102 + spin_unlock(&gr_learn_lock);
49103 + vfree(learn_buffer);
49104 + }
49105 + if (learn_buffer_user != NULL) {
49106 + vfree(learn_buffer_user);
49107 + learn_buffer_user = NULL;
49108 + }
49109 + learn_buffer_len = 0;
49110 + mutex_unlock(&gr_learn_user_mutex);
49111 +
49112 + return;
49113 +}
49114 +
49115 +void
49116 +gr_add_learn_entry(const char *fmt, ...)
49117 +{
49118 + va_list args;
49119 + unsigned int len;
49120 +
49121 + if (!gr_learn_attached)
49122 + return;
49123 +
49124 + spin_lock(&gr_learn_lock);
49125 +
49126 + /* leave a gap at the end so we know when it's "full" but don't have to
49127 + compute the exact length of the string we're trying to append
49128 + */
49129 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
49130 + spin_unlock(&gr_learn_lock);
49131 + wake_up_interruptible(&learn_wait);
49132 + return;
49133 + }
49134 + if (learn_buffer == NULL) {
49135 + spin_unlock(&gr_learn_lock);
49136 + return;
49137 + }
49138 +
49139 + va_start(args, fmt);
49140 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
49141 + va_end(args);
49142 +
49143 + learn_buffer_len += len + 1;
49144 +
49145 + spin_unlock(&gr_learn_lock);
49146 + wake_up_interruptible(&learn_wait);
49147 +
49148 + return;
49149 +}
49150 +
49151 +static int
49152 +open_learn(struct inode *inode, struct file *file)
49153 +{
49154 + if (file->f_mode & FMODE_READ && gr_learn_attached)
49155 + return -EBUSY;
49156 + if (file->f_mode & FMODE_READ) {
49157 + int retval = 0;
49158 + mutex_lock(&gr_learn_user_mutex);
49159 + if (learn_buffer == NULL)
49160 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
49161 + if (learn_buffer_user == NULL)
49162 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
49163 + if (learn_buffer == NULL) {
49164 + retval = -ENOMEM;
49165 + goto out_error;
49166 + }
49167 + if (learn_buffer_user == NULL) {
49168 + retval = -ENOMEM;
49169 + goto out_error;
49170 + }
49171 + learn_buffer_len = 0;
49172 + learn_buffer_user_len = 0;
49173 + gr_learn_attached = 1;
49174 +out_error:
49175 + mutex_unlock(&gr_learn_user_mutex);
49176 + return retval;
49177 + }
49178 + return 0;
49179 +}
49180 +
49181 +static int
49182 +close_learn(struct inode *inode, struct file *file)
49183 +{
49184 + char *tmp;
49185 +
49186 + if (file->f_mode & FMODE_READ) {
49187 + mutex_lock(&gr_learn_user_mutex);
49188 + if (learn_buffer != NULL) {
49189 + spin_lock(&gr_learn_lock);
49190 + tmp = learn_buffer;
49191 + learn_buffer = NULL;
49192 + spin_unlock(&gr_learn_lock);
49193 + vfree(tmp);
49194 + }
49195 + if (learn_buffer_user != NULL) {
49196 + vfree(learn_buffer_user);
49197 + learn_buffer_user = NULL;
49198 + }
49199 + learn_buffer_len = 0;
49200 + learn_buffer_user_len = 0;
49201 + gr_learn_attached = 0;
49202 + mutex_unlock(&gr_learn_user_mutex);
49203 + }
49204 +
49205 + return 0;
49206 +}
49207 +
49208 +const struct file_operations grsec_fops = {
49209 + .read = read_learn,
49210 + .write = write_grsec_handler,
49211 + .open = open_learn,
49212 + .release = close_learn,
49213 + .poll = poll_learn,
49214 +};
49215 diff -urNp linux-2.6.32.42/grsecurity/gracl_res.c linux-2.6.32.42/grsecurity/gracl_res.c
49216 --- linux-2.6.32.42/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
49217 +++ linux-2.6.32.42/grsecurity/gracl_res.c 2011-04-17 15:56:46.000000000 -0400
49218 @@ -0,0 +1,67 @@
49219 +#include <linux/kernel.h>
49220 +#include <linux/sched.h>
49221 +#include <linux/gracl.h>
49222 +#include <linux/grinternal.h>
49223 +
49224 +static const char *restab_log[] = {
49225 + [RLIMIT_CPU] = "RLIMIT_CPU",
49226 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
49227 + [RLIMIT_DATA] = "RLIMIT_DATA",
49228 + [RLIMIT_STACK] = "RLIMIT_STACK",
49229 + [RLIMIT_CORE] = "RLIMIT_CORE",
49230 + [RLIMIT_RSS] = "RLIMIT_RSS",
49231 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
49232 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
49233 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
49234 + [RLIMIT_AS] = "RLIMIT_AS",
49235 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
49236 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
49237 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
49238 + [RLIMIT_NICE] = "RLIMIT_NICE",
49239 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
49240 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
49241 + [GR_CRASH_RES] = "RLIMIT_CRASH"
49242 +};
49243 +
49244 +void
49245 +gr_log_resource(const struct task_struct *task,
49246 + const int res, const unsigned long wanted, const int gt)
49247 +{
49248 + const struct cred *cred;
49249 + unsigned long rlim;
49250 +
49251 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
49252 + return;
49253 +
49254 + // not yet supported resource
49255 + if (unlikely(!restab_log[res]))
49256 + return;
49257 +
49258 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
49259 + rlim = task->signal->rlim[res].rlim_max;
49260 + else
49261 + rlim = task->signal->rlim[res].rlim_cur;
49262 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
49263 + return;
49264 +
49265 + rcu_read_lock();
49266 + cred = __task_cred(task);
49267 +
49268 + if (res == RLIMIT_NPROC &&
49269 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
49270 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
49271 + goto out_rcu_unlock;
49272 + else if (res == RLIMIT_MEMLOCK &&
49273 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
49274 + goto out_rcu_unlock;
49275 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
49276 + goto out_rcu_unlock;
49277 + rcu_read_unlock();
49278 +
49279 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
49280 +
49281 + return;
49282 +out_rcu_unlock:
49283 + rcu_read_unlock();
49284 + return;
49285 +}
49286 diff -urNp linux-2.6.32.42/grsecurity/gracl_segv.c linux-2.6.32.42/grsecurity/gracl_segv.c
49287 --- linux-2.6.32.42/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
49288 +++ linux-2.6.32.42/grsecurity/gracl_segv.c 2011-04-17 15:56:46.000000000 -0400
49289 @@ -0,0 +1,284 @@
49290 +#include <linux/kernel.h>
49291 +#include <linux/mm.h>
49292 +#include <asm/uaccess.h>
49293 +#include <asm/errno.h>
49294 +#include <asm/mman.h>
49295 +#include <net/sock.h>
49296 +#include <linux/file.h>
49297 +#include <linux/fs.h>
49298 +#include <linux/net.h>
49299 +#include <linux/in.h>
49300 +#include <linux/smp_lock.h>
49301 +#include <linux/slab.h>
49302 +#include <linux/types.h>
49303 +#include <linux/sched.h>
49304 +#include <linux/timer.h>
49305 +#include <linux/gracl.h>
49306 +#include <linux/grsecurity.h>
49307 +#include <linux/grinternal.h>
49308 +
49309 +static struct crash_uid *uid_set;
49310 +static unsigned short uid_used;
49311 +static DEFINE_SPINLOCK(gr_uid_lock);
49312 +extern rwlock_t gr_inode_lock;
49313 +extern struct acl_subject_label *
49314 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
49315 + struct acl_role_label *role);
49316 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
49317 +
49318 +int
49319 +gr_init_uidset(void)
49320 +{
49321 + uid_set =
49322 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
49323 + uid_used = 0;
49324 +
49325 + return uid_set ? 1 : 0;
49326 +}
49327 +
49328 +void
49329 +gr_free_uidset(void)
49330 +{
49331 + if (uid_set)
49332 + kfree(uid_set);
49333 +
49334 + return;
49335 +}
49336 +
49337 +int
49338 +gr_find_uid(const uid_t uid)
49339 +{
49340 + struct crash_uid *tmp = uid_set;
49341 + uid_t buid;
49342 + int low = 0, high = uid_used - 1, mid;
49343 +
49344 + while (high >= low) {
49345 + mid = (low + high) >> 1;
49346 + buid = tmp[mid].uid;
49347 + if (buid == uid)
49348 + return mid;
49349 + if (buid > uid)
49350 + high = mid - 1;
49351 + if (buid < uid)
49352 + low = mid + 1;
49353 + }
49354 +
49355 + return -1;
49356 +}
49357 +
49358 +static __inline__ void
49359 +gr_insertsort(void)
49360 +{
49361 + unsigned short i, j;
49362 + struct crash_uid index;
49363 +
49364 + for (i = 1; i < uid_used; i++) {
49365 + index = uid_set[i];
49366 + j = i;
49367 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
49368 + uid_set[j] = uid_set[j - 1];
49369 + j--;
49370 + }
49371 + uid_set[j] = index;
49372 + }
49373 +
49374 + return;
49375 +}
49376 +
49377 +static __inline__ void
49378 +gr_insert_uid(const uid_t uid, const unsigned long expires)
49379 +{
49380 + int loc;
49381 +
49382 + if (uid_used == GR_UIDTABLE_MAX)
49383 + return;
49384 +
49385 + loc = gr_find_uid(uid);
49386 +
49387 + if (loc >= 0) {
49388 + uid_set[loc].expires = expires;
49389 + return;
49390 + }
49391 +
49392 + uid_set[uid_used].uid = uid;
49393 + uid_set[uid_used].expires = expires;
49394 + uid_used++;
49395 +
49396 + gr_insertsort();
49397 +
49398 + return;
49399 +}
49400 +
49401 +void
49402 +gr_remove_uid(const unsigned short loc)
49403 +{
49404 + unsigned short i;
49405 +
49406 + for (i = loc + 1; i < uid_used; i++)
49407 + uid_set[i - 1] = uid_set[i];
49408 +
49409 + uid_used--;
49410 +
49411 + return;
49412 +}
49413 +
49414 +int
49415 +gr_check_crash_uid(const uid_t uid)
49416 +{
49417 + int loc;
49418 + int ret = 0;
49419 +
49420 + if (unlikely(!gr_acl_is_enabled()))
49421 + return 0;
49422 +
49423 + spin_lock(&gr_uid_lock);
49424 + loc = gr_find_uid(uid);
49425 +
49426 + if (loc < 0)
49427 + goto out_unlock;
49428 +
49429 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
49430 + gr_remove_uid(loc);
49431 + else
49432 + ret = 1;
49433 +
49434 +out_unlock:
49435 + spin_unlock(&gr_uid_lock);
49436 + return ret;
49437 +}
49438 +
49439 +static __inline__ int
49440 +proc_is_setxid(const struct cred *cred)
49441 +{
49442 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
49443 + cred->uid != cred->fsuid)
49444 + return 1;
49445 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
49446 + cred->gid != cred->fsgid)
49447 + return 1;
49448 +
49449 + return 0;
49450 +}
49451 +
49452 +void
49453 +gr_handle_crash(struct task_struct *task, const int sig)
49454 +{
49455 + struct acl_subject_label *curr;
49456 + struct acl_subject_label *curr2;
49457 + struct task_struct *tsk, *tsk2;
49458 + const struct cred *cred;
49459 + const struct cred *cred2;
49460 +
49461 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
49462 + return;
49463 +
49464 + if (unlikely(!gr_acl_is_enabled()))
49465 + return;
49466 +
49467 + curr = task->acl;
49468 +
49469 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
49470 + return;
49471 +
49472 + if (time_before_eq(curr->expires, get_seconds())) {
49473 + curr->expires = 0;
49474 + curr->crashes = 0;
49475 + }
49476 +
49477 + curr->crashes++;
49478 +
49479 + if (!curr->expires)
49480 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
49481 +
49482 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
49483 + time_after(curr->expires, get_seconds())) {
49484 + rcu_read_lock();
49485 + cred = __task_cred(task);
49486 + if (cred->uid && proc_is_setxid(cred)) {
49487 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
49488 + spin_lock(&gr_uid_lock);
49489 + gr_insert_uid(cred->uid, curr->expires);
49490 + spin_unlock(&gr_uid_lock);
49491 + curr->expires = 0;
49492 + curr->crashes = 0;
49493 + read_lock(&tasklist_lock);
49494 + do_each_thread(tsk2, tsk) {
49495 + cred2 = __task_cred(tsk);
49496 + if (tsk != task && cred2->uid == cred->uid)
49497 + gr_fake_force_sig(SIGKILL, tsk);
49498 + } while_each_thread(tsk2, tsk);
49499 + read_unlock(&tasklist_lock);
49500 + } else {
49501 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
49502 + read_lock(&tasklist_lock);
49503 + do_each_thread(tsk2, tsk) {
49504 + if (likely(tsk != task)) {
49505 + curr2 = tsk->acl;
49506 +
49507 + if (curr2->device == curr->device &&
49508 + curr2->inode == curr->inode)
49509 + gr_fake_force_sig(SIGKILL, tsk);
49510 + }
49511 + } while_each_thread(tsk2, tsk);
49512 + read_unlock(&tasklist_lock);
49513 + }
49514 + rcu_read_unlock();
49515 + }
49516 +
49517 + return;
49518 +}
49519 +
49520 +int
49521 +gr_check_crash_exec(const struct file *filp)
49522 +{
49523 + struct acl_subject_label *curr;
49524 +
49525 + if (unlikely(!gr_acl_is_enabled()))
49526 + return 0;
49527 +
49528 + read_lock(&gr_inode_lock);
49529 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
49530 + filp->f_path.dentry->d_inode->i_sb->s_dev,
49531 + current->role);
49532 + read_unlock(&gr_inode_lock);
49533 +
49534 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
49535 + (!curr->crashes && !curr->expires))
49536 + return 0;
49537 +
49538 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
49539 + time_after(curr->expires, get_seconds()))
49540 + return 1;
49541 + else if (time_before_eq(curr->expires, get_seconds())) {
49542 + curr->crashes = 0;
49543 + curr->expires = 0;
49544 + }
49545 +
49546 + return 0;
49547 +}
49548 +
49549 +void
49550 +gr_handle_alertkill(struct task_struct *task)
49551 +{
49552 + struct acl_subject_label *curracl;
49553 + __u32 curr_ip;
49554 + struct task_struct *p, *p2;
49555 +
49556 + if (unlikely(!gr_acl_is_enabled()))
49557 + return;
49558 +
49559 + curracl = task->acl;
49560 + curr_ip = task->signal->curr_ip;
49561 +
49562 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
49563 + read_lock(&tasklist_lock);
49564 + do_each_thread(p2, p) {
49565 + if (p->signal->curr_ip == curr_ip)
49566 + gr_fake_force_sig(SIGKILL, p);
49567 + } while_each_thread(p2, p);
49568 + read_unlock(&tasklist_lock);
49569 + } else if (curracl->mode & GR_KILLPROC)
49570 + gr_fake_force_sig(SIGKILL, task);
49571 +
49572 + return;
49573 +}
49574 diff -urNp linux-2.6.32.42/grsecurity/gracl_shm.c linux-2.6.32.42/grsecurity/gracl_shm.c
49575 --- linux-2.6.32.42/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
49576 +++ linux-2.6.32.42/grsecurity/gracl_shm.c 2011-04-17 15:56:46.000000000 -0400
49577 @@ -0,0 +1,40 @@
49578 +#include <linux/kernel.h>
49579 +#include <linux/mm.h>
49580 +#include <linux/sched.h>
49581 +#include <linux/file.h>
49582 +#include <linux/ipc.h>
49583 +#include <linux/gracl.h>
49584 +#include <linux/grsecurity.h>
49585 +#include <linux/grinternal.h>
49586 +
49587 +int
49588 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
49589 + const time_t shm_createtime, const uid_t cuid, const int shmid)
49590 +{
49591 + struct task_struct *task;
49592 +
49593 + if (!gr_acl_is_enabled())
49594 + return 1;
49595 +
49596 + rcu_read_lock();
49597 + read_lock(&tasklist_lock);
49598 +
49599 + task = find_task_by_vpid(shm_cprid);
49600 +
49601 + if (unlikely(!task))
49602 + task = find_task_by_vpid(shm_lapid);
49603 +
49604 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
49605 + (task->pid == shm_lapid)) &&
49606 + (task->acl->mode & GR_PROTSHM) &&
49607 + (task->acl != current->acl))) {
49608 + read_unlock(&tasklist_lock);
49609 + rcu_read_unlock();
49610 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
49611 + return 0;
49612 + }
49613 + read_unlock(&tasklist_lock);
49614 + rcu_read_unlock();
49615 +
49616 + return 1;
49617 +}
49618 diff -urNp linux-2.6.32.42/grsecurity/grsec_chdir.c linux-2.6.32.42/grsecurity/grsec_chdir.c
49619 --- linux-2.6.32.42/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
49620 +++ linux-2.6.32.42/grsecurity/grsec_chdir.c 2011-04-17 15:56:46.000000000 -0400
49621 @@ -0,0 +1,19 @@
49622 +#include <linux/kernel.h>
49623 +#include <linux/sched.h>
49624 +#include <linux/fs.h>
49625 +#include <linux/file.h>
49626 +#include <linux/grsecurity.h>
49627 +#include <linux/grinternal.h>
49628 +
49629 +void
49630 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
49631 +{
49632 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
49633 + if ((grsec_enable_chdir && grsec_enable_group &&
49634 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
49635 + !grsec_enable_group)) {
49636 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
49637 + }
49638 +#endif
49639 + return;
49640 +}
49641 diff -urNp linux-2.6.32.42/grsecurity/grsec_chroot.c linux-2.6.32.42/grsecurity/grsec_chroot.c
49642 --- linux-2.6.32.42/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
49643 +++ linux-2.6.32.42/grsecurity/grsec_chroot.c 2011-06-20 19:44:00.000000000 -0400
49644 @@ -0,0 +1,395 @@
49645 +#include <linux/kernel.h>
49646 +#include <linux/module.h>
49647 +#include <linux/sched.h>
49648 +#include <linux/file.h>
49649 +#include <linux/fs.h>
49650 +#include <linux/mount.h>
49651 +#include <linux/types.h>
49652 +#include <linux/pid_namespace.h>
49653 +#include <linux/grsecurity.h>
49654 +#include <linux/grinternal.h>
49655 +
49656 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
49657 +{
49658 +#ifdef CONFIG_GRKERNSEC
49659 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
49660 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
49661 + task->gr_is_chrooted = 1;
49662 + else
49663 + task->gr_is_chrooted = 0;
49664 +
49665 + task->gr_chroot_dentry = path->dentry;
49666 +#endif
49667 + return;
49668 +}
49669 +
49670 +void gr_clear_chroot_entries(struct task_struct *task)
49671 +{
49672 +#ifdef CONFIG_GRKERNSEC
49673 + task->gr_is_chrooted = 0;
49674 + task->gr_chroot_dentry = NULL;
49675 +#endif
49676 + return;
49677 +}
49678 +
49679 +int
49680 +gr_handle_chroot_unix(const pid_t pid)
49681 +{
49682 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
49683 + struct pid *spid = NULL;
49684 +
49685 + if (unlikely(!grsec_enable_chroot_unix))
49686 + return 1;
49687 +
49688 + if (likely(!proc_is_chrooted(current)))
49689 + return 1;
49690 +
49691 + rcu_read_lock();
49692 + read_lock(&tasklist_lock);
49693 +
49694 + spid = find_vpid(pid);
49695 + if (spid) {
49696 + struct task_struct *p;
49697 + p = pid_task(spid, PIDTYPE_PID);
49698 + if (unlikely(p && !have_same_root(current, p))) {
49699 + read_unlock(&tasklist_lock);
49700 + rcu_read_unlock();
49701 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
49702 + return 0;
49703 + }
49704 + }
49705 + read_unlock(&tasklist_lock);
49706 + rcu_read_unlock();
49707 +#endif
49708 + return 1;
49709 +}
49710 +
49711 +int
49712 +gr_handle_chroot_nice(void)
49713 +{
49714 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49715 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
49716 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
49717 + return -EPERM;
49718 + }
49719 +#endif
49720 + return 0;
49721 +}
49722 +
49723 +int
49724 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
49725 +{
49726 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49727 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
49728 + && proc_is_chrooted(current)) {
49729 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
49730 + return -EACCES;
49731 + }
49732 +#endif
49733 + return 0;
49734 +}
49735 +
49736 +int
49737 +gr_handle_chroot_rawio(const struct inode *inode)
49738 +{
49739 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49740 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
49741 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
49742 + return 1;
49743 +#endif
49744 + return 0;
49745 +}
49746 +
49747 +int
49748 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
49749 +{
49750 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49751 + struct task_struct *p;
49752 + int ret = 0;
49753 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
49754 + return ret;
49755 +
49756 + read_lock(&tasklist_lock);
49757 + do_each_pid_task(pid, type, p) {
49758 + if (!have_same_root(current, p)) {
49759 + ret = 1;
49760 + goto out;
49761 + }
49762 + } while_each_pid_task(pid, type, p);
49763 +out:
49764 + read_unlock(&tasklist_lock);
49765 + return ret;
49766 +#endif
49767 + return 0;
49768 +}
49769 +
49770 +int
49771 +gr_pid_is_chrooted(struct task_struct *p)
49772 +{
49773 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49774 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
49775 + return 0;
49776 +
49777 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
49778 + !have_same_root(current, p)) {
49779 + return 1;
49780 + }
49781 +#endif
49782 + return 0;
49783 +}
49784 +
49785 +EXPORT_SYMBOL(gr_pid_is_chrooted);
49786 +
49787 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
49788 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
49789 +{
49790 + struct dentry *dentry = (struct dentry *)u_dentry;
49791 + struct vfsmount *mnt = (struct vfsmount *)u_mnt;
49792 + struct dentry *realroot;
49793 + struct vfsmount *realrootmnt;
49794 + struct dentry *currentroot;
49795 + struct vfsmount *currentmnt;
49796 + struct task_struct *reaper = &init_task;
49797 + int ret = 1;
49798 +
49799 + read_lock(&reaper->fs->lock);
49800 + realrootmnt = mntget(reaper->fs->root.mnt);
49801 + realroot = dget(reaper->fs->root.dentry);
49802 + read_unlock(&reaper->fs->lock);
49803 +
49804 + read_lock(&current->fs->lock);
49805 + currentmnt = mntget(current->fs->root.mnt);
49806 + currentroot = dget(current->fs->root.dentry);
49807 + read_unlock(&current->fs->lock);
49808 +
49809 + spin_lock(&dcache_lock);
49810 + for (;;) {
49811 + if (unlikely((dentry == realroot && mnt == realrootmnt)
49812 + || (dentry == currentroot && mnt == currentmnt)))
49813 + break;
49814 + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
49815 + if (mnt->mnt_parent == mnt)
49816 + break;
49817 + dentry = mnt->mnt_mountpoint;
49818 + mnt = mnt->mnt_parent;
49819 + continue;
49820 + }
49821 + dentry = dentry->d_parent;
49822 + }
49823 + spin_unlock(&dcache_lock);
49824 +
49825 + dput(currentroot);
49826 + mntput(currentmnt);
49827 +
49828 + /* access is outside of chroot */
49829 + if (dentry == realroot && mnt == realrootmnt)
49830 + ret = 0;
49831 +
49832 + dput(realroot);
49833 + mntput(realrootmnt);
49834 + return ret;
49835 +}
49836 +#endif
49837 +
49838 +int
49839 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
49840 +{
49841 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
49842 + if (!grsec_enable_chroot_fchdir)
49843 + return 1;
49844 +
49845 + if (!proc_is_chrooted(current))
49846 + return 1;
49847 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
49848 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
49849 + return 0;
49850 + }
49851 +#endif
49852 + return 1;
49853 +}
49854 +
49855 +int
49856 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
49857 + const time_t shm_createtime)
49858 +{
49859 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
49860 + struct pid *pid = NULL;
49861 + time_t starttime;
49862 +
49863 + if (unlikely(!grsec_enable_chroot_shmat))
49864 + return 1;
49865 +
49866 + if (likely(!proc_is_chrooted(current)))
49867 + return 1;
49868 +
49869 + rcu_read_lock();
49870 + read_lock(&tasklist_lock);
49871 +
49872 + pid = find_vpid(shm_cprid);
49873 + if (pid) {
49874 + struct task_struct *p;
49875 + p = pid_task(pid, PIDTYPE_PID);
49876 + if (p == NULL)
49877 + goto unlock;
49878 + starttime = p->start_time.tv_sec;
49879 + if (unlikely(!have_same_root(current, p) &&
49880 + time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime))) {
49881 + read_unlock(&tasklist_lock);
49882 + rcu_read_unlock();
49883 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
49884 + return 0;
49885 + }
49886 + } else {
49887 + pid = find_vpid(shm_lapid);
49888 + if (pid) {
49889 + struct task_struct *p;
49890 + p = pid_task(pid, PIDTYPE_PID);
49891 + if (p == NULL)
49892 + goto unlock;
49893 + if (unlikely(!have_same_root(current, p))) {
49894 + read_unlock(&tasklist_lock);
49895 + rcu_read_unlock();
49896 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
49897 + return 0;
49898 + }
49899 + }
49900 + }
49901 +
49902 +unlock:
49903 + read_unlock(&tasklist_lock);
49904 + rcu_read_unlock();
49905 +#endif
49906 + return 1;
49907 +}
49908 +
49909 +void
49910 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
49911 +{
49912 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
49913 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
49914 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
49915 +#endif
49916 + return;
49917 +}
49918 +
49919 +int
49920 +gr_handle_chroot_mknod(const struct dentry *dentry,
49921 + const struct vfsmount *mnt, const int mode)
49922 +{
49923 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
49924 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
49925 + proc_is_chrooted(current)) {
49926 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
49927 + return -EPERM;
49928 + }
49929 +#endif
49930 + return 0;
49931 +}
49932 +
49933 +int
49934 +gr_handle_chroot_mount(const struct dentry *dentry,
49935 + const struct vfsmount *mnt, const char *dev_name)
49936 +{
49937 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
49938 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
49939 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
49940 + return -EPERM;
49941 + }
49942 +#endif
49943 + return 0;
49944 +}
49945 +
49946 +int
49947 +gr_handle_chroot_pivot(void)
49948 +{
49949 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
49950 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
49951 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
49952 + return -EPERM;
49953 + }
49954 +#endif
49955 + return 0;
49956 +}
49957 +
49958 +int
49959 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
49960 +{
49961 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
49962 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
49963 + !gr_is_outside_chroot(dentry, mnt)) {
49964 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
49965 + return -EPERM;
49966 + }
49967 +#endif
49968 + return 0;
49969 +}
49970 +
49971 +int
49972 +gr_handle_chroot_caps(struct path *path)
49973 +{
49974 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49975 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
49976 + (init_task.fs->root.dentry != path->dentry) &&
49977 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
49978 +
49979 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
49980 + const struct cred *old = current_cred();
49981 + struct cred *new = prepare_creds();
49982 + if (new == NULL)
49983 + return 1;
49984 +
49985 + new->cap_permitted = cap_drop(old->cap_permitted,
49986 + chroot_caps);
49987 + new->cap_inheritable = cap_drop(old->cap_inheritable,
49988 + chroot_caps);
49989 + new->cap_effective = cap_drop(old->cap_effective,
49990 + chroot_caps);
49991 +
49992 + commit_creds(new);
49993 +
49994 + return 0;
49995 + }
49996 +#endif
49997 + return 0;
49998 +}
49999 +
50000 +int
50001 +gr_handle_chroot_sysctl(const int op)
50002 +{
50003 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
50004 + if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
50005 + && (op & MAY_WRITE))
50006 + return -EACCES;
50007 +#endif
50008 + return 0;
50009 +}
50010 +
50011 +void
50012 +gr_handle_chroot_chdir(struct path *path)
50013 +{
50014 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
50015 + if (grsec_enable_chroot_chdir)
50016 + set_fs_pwd(current->fs, path);
50017 +#endif
50018 + return;
50019 +}
50020 +
50021 +int
50022 +gr_handle_chroot_chmod(const struct dentry *dentry,
50023 + const struct vfsmount *mnt, const int mode)
50024 +{
50025 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
50026 + /* allow chmod +s on directories, but not on files */
50027 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
50028 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
50029 + proc_is_chrooted(current)) {
50030 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
50031 + return -EPERM;
50032 + }
50033 +#endif
50034 + return 0;
50035 +}
50036 +
50037 +#ifdef CONFIG_SECURITY
50038 +EXPORT_SYMBOL(gr_handle_chroot_caps);
50039 +#endif
50040 diff -urNp linux-2.6.32.42/grsecurity/grsec_disabled.c linux-2.6.32.42/grsecurity/grsec_disabled.c
50041 --- linux-2.6.32.42/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
50042 +++ linux-2.6.32.42/grsecurity/grsec_disabled.c 2011-04-17 15:56:46.000000000 -0400
50043 @@ -0,0 +1,447 @@
50044 +#include <linux/kernel.h>
50045 +#include <linux/module.h>
50046 +#include <linux/sched.h>
50047 +#include <linux/file.h>
50048 +#include <linux/fs.h>
50049 +#include <linux/kdev_t.h>
50050 +#include <linux/net.h>
50051 +#include <linux/in.h>
50052 +#include <linux/ip.h>
50053 +#include <linux/skbuff.h>
50054 +#include <linux/sysctl.h>
50055 +
50056 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
50057 +void
50058 +pax_set_initial_flags(struct linux_binprm *bprm)
50059 +{
50060 + return;
50061 +}
50062 +#endif
50063 +
50064 +#ifdef CONFIG_SYSCTL
50065 +__u32
50066 +gr_handle_sysctl(const struct ctl_table * table, const int op)
50067 +{
50068 + return 0;
50069 +}
50070 +#endif
50071 +
50072 +#ifdef CONFIG_TASKSTATS
50073 +int gr_is_taskstats_denied(int pid)
50074 +{
50075 + return 0;
50076 +}
50077 +#endif
50078 +
50079 +int
50080 +gr_acl_is_enabled(void)
50081 +{
50082 + return 0;
50083 +}
50084 +
50085 +int
50086 +gr_handle_rawio(const struct inode *inode)
50087 +{
50088 + return 0;
50089 +}
50090 +
50091 +void
50092 +gr_acl_handle_psacct(struct task_struct *task, const long code)
50093 +{
50094 + return;
50095 +}
50096 +
50097 +int
50098 +gr_handle_ptrace(struct task_struct *task, const long request)
50099 +{
50100 + return 0;
50101 +}
50102 +
50103 +int
50104 +gr_handle_proc_ptrace(struct task_struct *task)
50105 +{
50106 + return 0;
50107 +}
50108 +
50109 +void
50110 +gr_learn_resource(const struct task_struct *task,
50111 + const int res, const unsigned long wanted, const int gt)
50112 +{
50113 + return;
50114 +}
50115 +
50116 +int
50117 +gr_set_acls(const int type)
50118 +{
50119 + return 0;
50120 +}
50121 +
50122 +int
50123 +gr_check_hidden_task(const struct task_struct *tsk)
50124 +{
50125 + return 0;
50126 +}
50127 +
50128 +int
50129 +gr_check_protected_task(const struct task_struct *task)
50130 +{
50131 + return 0;
50132 +}
50133 +
50134 +int
50135 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50136 +{
50137 + return 0;
50138 +}
50139 +
50140 +void
50141 +gr_copy_label(struct task_struct *tsk)
50142 +{
50143 + return;
50144 +}
50145 +
50146 +void
50147 +gr_set_pax_flags(struct task_struct *task)
50148 +{
50149 + return;
50150 +}
50151 +
50152 +int
50153 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
50154 + const int unsafe_share)
50155 +{
50156 + return 0;
50157 +}
50158 +
50159 +void
50160 +gr_handle_delete(const ino_t ino, const dev_t dev)
50161 +{
50162 + return;
50163 +}
50164 +
50165 +void
50166 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50167 +{
50168 + return;
50169 +}
50170 +
50171 +void
50172 +gr_handle_crash(struct task_struct *task, const int sig)
50173 +{
50174 + return;
50175 +}
50176 +
50177 +int
50178 +gr_check_crash_exec(const struct file *filp)
50179 +{
50180 + return 0;
50181 +}
50182 +
50183 +int
50184 +gr_check_crash_uid(const uid_t uid)
50185 +{
50186 + return 0;
50187 +}
50188 +
50189 +void
50190 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
50191 + struct dentry *old_dentry,
50192 + struct dentry *new_dentry,
50193 + struct vfsmount *mnt, const __u8 replace)
50194 +{
50195 + return;
50196 +}
50197 +
50198 +int
50199 +gr_search_socket(const int family, const int type, const int protocol)
50200 +{
50201 + return 1;
50202 +}
50203 +
50204 +int
50205 +gr_search_connectbind(const int mode, const struct socket *sock,
50206 + const struct sockaddr_in *addr)
50207 +{
50208 + return 0;
50209 +}
50210 +
50211 +int
50212 +gr_is_capable(const int cap)
50213 +{
50214 + return 1;
50215 +}
50216 +
50217 +int
50218 +gr_is_capable_nolog(const int cap)
50219 +{
50220 + return 1;
50221 +}
50222 +
50223 +void
50224 +gr_handle_alertkill(struct task_struct *task)
50225 +{
50226 + return;
50227 +}
50228 +
50229 +__u32
50230 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
50231 +{
50232 + return 1;
50233 +}
50234 +
50235 +__u32
50236 +gr_acl_handle_hidden_file(const struct dentry * dentry,
50237 + const struct vfsmount * mnt)
50238 +{
50239 + return 1;
50240 +}
50241 +
50242 +__u32
50243 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
50244 + const int fmode)
50245 +{
50246 + return 1;
50247 +}
50248 +
50249 +__u32
50250 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
50251 +{
50252 + return 1;
50253 +}
50254 +
50255 +__u32
50256 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
50257 +{
50258 + return 1;
50259 +}
50260 +
50261 +int
50262 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
50263 + unsigned int *vm_flags)
50264 +{
50265 + return 1;
50266 +}
50267 +
50268 +__u32
50269 +gr_acl_handle_truncate(const struct dentry * dentry,
50270 + const struct vfsmount * mnt)
50271 +{
50272 + return 1;
50273 +}
50274 +
50275 +__u32
50276 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
50277 +{
50278 + return 1;
50279 +}
50280 +
50281 +__u32
50282 +gr_acl_handle_access(const struct dentry * dentry,
50283 + const struct vfsmount * mnt, const int fmode)
50284 +{
50285 + return 1;
50286 +}
50287 +
50288 +__u32
50289 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
50290 + mode_t mode)
50291 +{
50292 + return 1;
50293 +}
50294 +
50295 +__u32
50296 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
50297 + mode_t mode)
50298 +{
50299 + return 1;
50300 +}
50301 +
50302 +__u32
50303 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
50304 +{
50305 + return 1;
50306 +}
50307 +
50308 +__u32
50309 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
50310 +{
50311 + return 1;
50312 +}
50313 +
50314 +void
50315 +grsecurity_init(void)
50316 +{
50317 + return;
50318 +}
50319 +
50320 +__u32
50321 +gr_acl_handle_mknod(const struct dentry * new_dentry,
50322 + const struct dentry * parent_dentry,
50323 + const struct vfsmount * parent_mnt,
50324 + const int mode)
50325 +{
50326 + return 1;
50327 +}
50328 +
50329 +__u32
50330 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
50331 + const struct dentry * parent_dentry,
50332 + const struct vfsmount * parent_mnt)
50333 +{
50334 + return 1;
50335 +}
50336 +
50337 +__u32
50338 +gr_acl_handle_symlink(const struct dentry * new_dentry,
50339 + const struct dentry * parent_dentry,
50340 + const struct vfsmount * parent_mnt, const char *from)
50341 +{
50342 + return 1;
50343 +}
50344 +
50345 +__u32
50346 +gr_acl_handle_link(const struct dentry * new_dentry,
50347 + const struct dentry * parent_dentry,
50348 + const struct vfsmount * parent_mnt,
50349 + const struct dentry * old_dentry,
50350 + const struct vfsmount * old_mnt, const char *to)
50351 +{
50352 + return 1;
50353 +}
50354 +
50355 +int
50356 +gr_acl_handle_rename(const struct dentry *new_dentry,
50357 + const struct dentry *parent_dentry,
50358 + const struct vfsmount *parent_mnt,
50359 + const struct dentry *old_dentry,
50360 + const struct inode *old_parent_inode,
50361 + const struct vfsmount *old_mnt, const char *newname)
50362 +{
50363 + return 0;
50364 +}
50365 +
50366 +int
50367 +gr_acl_handle_filldir(const struct file *file, const char *name,
50368 + const int namelen, const ino_t ino)
50369 +{
50370 + return 1;
50371 +}
50372 +
50373 +int
50374 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50375 + const time_t shm_createtime, const uid_t cuid, const int shmid)
50376 +{
50377 + return 1;
50378 +}
50379 +
50380 +int
50381 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
50382 +{
50383 + return 0;
50384 +}
50385 +
50386 +int
50387 +gr_search_accept(const struct socket *sock)
50388 +{
50389 + return 0;
50390 +}
50391 +
50392 +int
50393 +gr_search_listen(const struct socket *sock)
50394 +{
50395 + return 0;
50396 +}
50397 +
50398 +int
50399 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
50400 +{
50401 + return 0;
50402 +}
50403 +
50404 +__u32
50405 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
50406 +{
50407 + return 1;
50408 +}
50409 +
50410 +__u32
50411 +gr_acl_handle_creat(const struct dentry * dentry,
50412 + const struct dentry * p_dentry,
50413 + const struct vfsmount * p_mnt, const int fmode,
50414 + const int imode)
50415 +{
50416 + return 1;
50417 +}
50418 +
50419 +void
50420 +gr_acl_handle_exit(void)
50421 +{
50422 + return;
50423 +}
50424 +
50425 +int
50426 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50427 +{
50428 + return 1;
50429 +}
50430 +
50431 +void
50432 +gr_set_role_label(const uid_t uid, const gid_t gid)
50433 +{
50434 + return;
50435 +}
50436 +
50437 +int
50438 +gr_acl_handle_procpidmem(const struct task_struct *task)
50439 +{
50440 + return 0;
50441 +}
50442 +
50443 +int
50444 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
50445 +{
50446 + return 0;
50447 +}
50448 +
50449 +int
50450 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
50451 +{
50452 + return 0;
50453 +}
50454 +
50455 +void
50456 +gr_set_kernel_label(struct task_struct *task)
50457 +{
50458 + return;
50459 +}
50460 +
50461 +int
50462 +gr_check_user_change(int real, int effective, int fs)
50463 +{
50464 + return 0;
50465 +}
50466 +
50467 +int
50468 +gr_check_group_change(int real, int effective, int fs)
50469 +{
50470 + return 0;
50471 +}
50472 +
50473 +int gr_acl_enable_at_secure(void)
50474 +{
50475 + return 0;
50476 +}
50477 +
50478 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50479 +{
50480 + return dentry->d_inode->i_sb->s_dev;
50481 +}
50482 +
50483 +EXPORT_SYMBOL(gr_is_capable);
50484 +EXPORT_SYMBOL(gr_is_capable_nolog);
50485 +EXPORT_SYMBOL(gr_learn_resource);
50486 +EXPORT_SYMBOL(gr_set_kernel_label);
50487 +#ifdef CONFIG_SECURITY
50488 +EXPORT_SYMBOL(gr_check_user_change);
50489 +EXPORT_SYMBOL(gr_check_group_change);
50490 +#endif
50491 diff -urNp linux-2.6.32.42/grsecurity/grsec_exec.c linux-2.6.32.42/grsecurity/grsec_exec.c
50492 --- linux-2.6.32.42/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
50493 +++ linux-2.6.32.42/grsecurity/grsec_exec.c 2011-04-17 15:56:46.000000000 -0400
50494 @@ -0,0 +1,148 @@
50495 +#include <linux/kernel.h>
50496 +#include <linux/sched.h>
50497 +#include <linux/file.h>
50498 +#include <linux/binfmts.h>
50499 +#include <linux/smp_lock.h>
50500 +#include <linux/fs.h>
50501 +#include <linux/types.h>
50502 +#include <linux/grdefs.h>
50503 +#include <linux/grinternal.h>
50504 +#include <linux/capability.h>
50505 +#include <linux/compat.h>
50506 +
50507 +#include <asm/uaccess.h>
50508 +
50509 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50510 +static char gr_exec_arg_buf[132];
50511 +static DEFINE_MUTEX(gr_exec_arg_mutex);
50512 +#endif
50513 +
50514 +int
50515 +gr_handle_nproc(void)
50516 +{
50517 +#ifdef CONFIG_GRKERNSEC_EXECVE
50518 + const struct cred *cred = current_cred();
50519 + if (grsec_enable_execve && cred->user &&
50520 + (atomic_read(&cred->user->processes) >
50521 + current->signal->rlim[RLIMIT_NPROC].rlim_cur) &&
50522 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
50523 + gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
50524 + return -EAGAIN;
50525 + }
50526 +#endif
50527 + return 0;
50528 +}
50529 +
50530 +void
50531 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
50532 +{
50533 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50534 + char *grarg = gr_exec_arg_buf;
50535 + unsigned int i, x, execlen = 0;
50536 + char c;
50537 +
50538 + if (!((grsec_enable_execlog && grsec_enable_group &&
50539 + in_group_p(grsec_audit_gid))
50540 + || (grsec_enable_execlog && !grsec_enable_group)))
50541 + return;
50542 +
50543 + mutex_lock(&gr_exec_arg_mutex);
50544 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
50545 +
50546 + if (unlikely(argv == NULL))
50547 + goto log;
50548 +
50549 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
50550 + const char __user *p;
50551 + unsigned int len;
50552 +
50553 + if (copy_from_user(&p, argv + i, sizeof(p)))
50554 + goto log;
50555 + if (!p)
50556 + goto log;
50557 + len = strnlen_user(p, 128 - execlen);
50558 + if (len > 128 - execlen)
50559 + len = 128 - execlen;
50560 + else if (len > 0)
50561 + len--;
50562 + if (copy_from_user(grarg + execlen, p, len))
50563 + goto log;
50564 +
50565 + /* rewrite unprintable characters */
50566 + for (x = 0; x < len; x++) {
50567 + c = *(grarg + execlen + x);
50568 + if (c < 32 || c > 126)
50569 + *(grarg + execlen + x) = ' ';
50570 + }
50571 +
50572 + execlen += len;
50573 + *(grarg + execlen) = ' ';
50574 + *(grarg + execlen + 1) = '\0';
50575 + execlen++;
50576 + }
50577 +
50578 + log:
50579 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
50580 + bprm->file->f_path.mnt, grarg);
50581 + mutex_unlock(&gr_exec_arg_mutex);
50582 +#endif
50583 + return;
50584 +}
50585 +
50586 +#ifdef CONFIG_COMPAT
50587 +void
50588 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
50589 +{
50590 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50591 + char *grarg = gr_exec_arg_buf;
50592 + unsigned int i, x, execlen = 0;
50593 + char c;
50594 +
50595 + if (!((grsec_enable_execlog && grsec_enable_group &&
50596 + in_group_p(grsec_audit_gid))
50597 + || (grsec_enable_execlog && !grsec_enable_group)))
50598 + return;
50599 +
50600 + mutex_lock(&gr_exec_arg_mutex);
50601 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
50602 +
50603 + if (unlikely(argv == NULL))
50604 + goto log;
50605 +
50606 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
50607 + compat_uptr_t p;
50608 + unsigned int len;
50609 +
50610 + if (get_user(p, argv + i))
50611 + goto log;
50612 + len = strnlen_user(compat_ptr(p), 128 - execlen);
50613 + if (len > 128 - execlen)
50614 + len = 128 - execlen;
50615 + else if (len > 0)
50616 + len--;
50617 + else
50618 + goto log;
50619 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
50620 + goto log;
50621 +
50622 + /* rewrite unprintable characters */
50623 + for (x = 0; x < len; x++) {
50624 + c = *(grarg + execlen + x);
50625 + if (c < 32 || c > 126)
50626 + *(grarg + execlen + x) = ' ';
50627 + }
50628 +
50629 + execlen += len;
50630 + *(grarg + execlen) = ' ';
50631 + *(grarg + execlen + 1) = '\0';
50632 + execlen++;
50633 + }
50634 +
50635 + log:
50636 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
50637 + bprm->file->f_path.mnt, grarg);
50638 + mutex_unlock(&gr_exec_arg_mutex);
50639 +#endif
50640 + return;
50641 +}
50642 +#endif
50643 diff -urNp linux-2.6.32.42/grsecurity/grsec_fifo.c linux-2.6.32.42/grsecurity/grsec_fifo.c
50644 --- linux-2.6.32.42/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
50645 +++ linux-2.6.32.42/grsecurity/grsec_fifo.c 2011-04-17 15:56:46.000000000 -0400
50646 @@ -0,0 +1,24 @@
50647 +#include <linux/kernel.h>
50648 +#include <linux/sched.h>
50649 +#include <linux/fs.h>
50650 +#include <linux/file.h>
50651 +#include <linux/grinternal.h>
50652 +
50653 +int
50654 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
50655 + const struct dentry *dir, const int flag, const int acc_mode)
50656 +{
50657 +#ifdef CONFIG_GRKERNSEC_FIFO
50658 + const struct cred *cred = current_cred();
50659 +
50660 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
50661 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
50662 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
50663 + (cred->fsuid != dentry->d_inode->i_uid)) {
50664 + if (!inode_permission(dentry->d_inode, acc_mode))
50665 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
50666 + return -EACCES;
50667 + }
50668 +#endif
50669 + return 0;
50670 +}
50671 diff -urNp linux-2.6.32.42/grsecurity/grsec_fork.c linux-2.6.32.42/grsecurity/grsec_fork.c
50672 --- linux-2.6.32.42/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
50673 +++ linux-2.6.32.42/grsecurity/grsec_fork.c 2011-04-17 15:56:46.000000000 -0400
50674 @@ -0,0 +1,23 @@
50675 +#include <linux/kernel.h>
50676 +#include <linux/sched.h>
50677 +#include <linux/grsecurity.h>
50678 +#include <linux/grinternal.h>
50679 +#include <linux/errno.h>
50680 +
50681 +void
50682 +gr_log_forkfail(const int retval)
50683 +{
50684 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
50685 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
50686 + switch (retval) {
50687 + case -EAGAIN:
50688 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
50689 + break;
50690 + case -ENOMEM:
50691 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
50692 + break;
50693 + }
50694 + }
50695 +#endif
50696 + return;
50697 +}
50698 diff -urNp linux-2.6.32.42/grsecurity/grsec_init.c linux-2.6.32.42/grsecurity/grsec_init.c
50699 --- linux-2.6.32.42/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
50700 +++ linux-2.6.32.42/grsecurity/grsec_init.c 2011-06-29 19:35:26.000000000 -0400
50701 @@ -0,0 +1,274 @@
50702 +#include <linux/kernel.h>
50703 +#include <linux/sched.h>
50704 +#include <linux/mm.h>
50705 +#include <linux/smp_lock.h>
50706 +#include <linux/gracl.h>
50707 +#include <linux/slab.h>
50708 +#include <linux/vmalloc.h>
50709 +#include <linux/percpu.h>
50710 +#include <linux/module.h>
50711 +
50712 +int grsec_enable_brute;
50713 +int grsec_enable_link;
50714 +int grsec_enable_dmesg;
50715 +int grsec_enable_harden_ptrace;
50716 +int grsec_enable_fifo;
50717 +int grsec_enable_execve;
50718 +int grsec_enable_execlog;
50719 +int grsec_enable_signal;
50720 +int grsec_enable_forkfail;
50721 +int grsec_enable_audit_ptrace;
50722 +int grsec_enable_time;
50723 +int grsec_enable_audit_textrel;
50724 +int grsec_enable_group;
50725 +int grsec_audit_gid;
50726 +int grsec_enable_chdir;
50727 +int grsec_enable_mount;
50728 +int grsec_enable_rofs;
50729 +int grsec_enable_chroot_findtask;
50730 +int grsec_enable_chroot_mount;
50731 +int grsec_enable_chroot_shmat;
50732 +int grsec_enable_chroot_fchdir;
50733 +int grsec_enable_chroot_double;
50734 +int grsec_enable_chroot_pivot;
50735 +int grsec_enable_chroot_chdir;
50736 +int grsec_enable_chroot_chmod;
50737 +int grsec_enable_chroot_mknod;
50738 +int grsec_enable_chroot_nice;
50739 +int grsec_enable_chroot_execlog;
50740 +int grsec_enable_chroot_caps;
50741 +int grsec_enable_chroot_sysctl;
50742 +int grsec_enable_chroot_unix;
50743 +int grsec_enable_tpe;
50744 +int grsec_tpe_gid;
50745 +int grsec_enable_blackhole;
50746 +#ifdef CONFIG_IPV6_MODULE
50747 +EXPORT_SYMBOL(grsec_enable_blackhole);
50748 +#endif
50749 +int grsec_lastack_retries;
50750 +int grsec_enable_tpe_all;
50751 +int grsec_enable_tpe_invert;
50752 +int grsec_enable_socket_all;
50753 +int grsec_socket_all_gid;
50754 +int grsec_enable_socket_client;
50755 +int grsec_socket_client_gid;
50756 +int grsec_enable_socket_server;
50757 +int grsec_socket_server_gid;
50758 +int grsec_resource_logging;
50759 +int grsec_disable_privio;
50760 +int grsec_enable_log_rwxmaps;
50761 +int grsec_lock;
50762 +
50763 +DEFINE_SPINLOCK(grsec_alert_lock);
50764 +unsigned long grsec_alert_wtime = 0;
50765 +unsigned long grsec_alert_fyet = 0;
50766 +
50767 +DEFINE_SPINLOCK(grsec_audit_lock);
50768 +
50769 +DEFINE_RWLOCK(grsec_exec_file_lock);
50770 +
50771 +char *gr_shared_page[4];
50772 +
50773 +char *gr_alert_log_fmt;
50774 +char *gr_audit_log_fmt;
50775 +char *gr_alert_log_buf;
50776 +char *gr_audit_log_buf;
50777 +
50778 +extern struct gr_arg *gr_usermode;
50779 +extern unsigned char *gr_system_salt;
50780 +extern unsigned char *gr_system_sum;
50781 +
50782 +void __init
50783 +grsecurity_init(void)
50784 +{
50785 + int j;
50786 + /* create the per-cpu shared pages */
50787 +
50788 +#ifdef CONFIG_X86
50789 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
50790 +#endif
50791 +
50792 + for (j = 0; j < 4; j++) {
50793 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
50794 + if (gr_shared_page[j] == NULL) {
50795 + panic("Unable to allocate grsecurity shared page");
50796 + return;
50797 + }
50798 + }
50799 +
50800 + /* allocate log buffers */
50801 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
50802 + if (!gr_alert_log_fmt) {
50803 + panic("Unable to allocate grsecurity alert log format buffer");
50804 + return;
50805 + }
50806 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
50807 + if (!gr_audit_log_fmt) {
50808 + panic("Unable to allocate grsecurity audit log format buffer");
50809 + return;
50810 + }
50811 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
50812 + if (!gr_alert_log_buf) {
50813 + panic("Unable to allocate grsecurity alert log buffer");
50814 + return;
50815 + }
50816 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
50817 + if (!gr_audit_log_buf) {
50818 + panic("Unable to allocate grsecurity audit log buffer");
50819 + return;
50820 + }
50821 +
50822 + /* allocate memory for authentication structure */
50823 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
50824 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
50825 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
50826 +
50827 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
50828 + panic("Unable to allocate grsecurity authentication structure");
50829 + return;
50830 + }
50831 +
50832 +
50833 +#ifdef CONFIG_GRKERNSEC_IO
50834 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
50835 + grsec_disable_privio = 1;
50836 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
50837 + grsec_disable_privio = 1;
50838 +#else
50839 + grsec_disable_privio = 0;
50840 +#endif
50841 +#endif
50842 +
50843 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
50844 + /* for backward compatibility, tpe_invert always defaults to on if
50845 + enabled in the kernel
50846 + */
50847 + grsec_enable_tpe_invert = 1;
50848 +#endif
50849 +
50850 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
50851 +#ifndef CONFIG_GRKERNSEC_SYSCTL
50852 + grsec_lock = 1;
50853 +#endif
50854 +
50855 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
50856 + grsec_enable_audit_textrel = 1;
50857 +#endif
50858 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
50859 + grsec_enable_log_rwxmaps = 1;
50860 +#endif
50861 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
50862 + grsec_enable_group = 1;
50863 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
50864 +#endif
50865 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
50866 + grsec_enable_chdir = 1;
50867 +#endif
50868 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50869 + grsec_enable_harden_ptrace = 1;
50870 +#endif
50871 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
50872 + grsec_enable_mount = 1;
50873 +#endif
50874 +#ifdef CONFIG_GRKERNSEC_LINK
50875 + grsec_enable_link = 1;
50876 +#endif
50877 +#ifdef CONFIG_GRKERNSEC_BRUTE
50878 + grsec_enable_brute = 1;
50879 +#endif
50880 +#ifdef CONFIG_GRKERNSEC_DMESG
50881 + grsec_enable_dmesg = 1;
50882 +#endif
50883 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
50884 + grsec_enable_blackhole = 1;
50885 + grsec_lastack_retries = 4;
50886 +#endif
50887 +#ifdef CONFIG_GRKERNSEC_FIFO
50888 + grsec_enable_fifo = 1;
50889 +#endif
50890 +#ifdef CONFIG_GRKERNSEC_EXECVE
50891 + grsec_enable_execve = 1;
50892 +#endif
50893 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50894 + grsec_enable_execlog = 1;
50895 +#endif
50896 +#ifdef CONFIG_GRKERNSEC_SIGNAL
50897 + grsec_enable_signal = 1;
50898 +#endif
50899 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
50900 + grsec_enable_forkfail = 1;
50901 +#endif
50902 +#ifdef CONFIG_GRKERNSEC_TIME
50903 + grsec_enable_time = 1;
50904 +#endif
50905 +#ifdef CONFIG_GRKERNSEC_RESLOG
50906 + grsec_resource_logging = 1;
50907 +#endif
50908 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
50909 + grsec_enable_chroot_findtask = 1;
50910 +#endif
50911 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
50912 + grsec_enable_chroot_unix = 1;
50913 +#endif
50914 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
50915 + grsec_enable_chroot_mount = 1;
50916 +#endif
50917 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
50918 + grsec_enable_chroot_fchdir = 1;
50919 +#endif
50920 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
50921 + grsec_enable_chroot_shmat = 1;
50922 +#endif
50923 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
50924 + grsec_enable_audit_ptrace = 1;
50925 +#endif
50926 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
50927 + grsec_enable_chroot_double = 1;
50928 +#endif
50929 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
50930 + grsec_enable_chroot_pivot = 1;
50931 +#endif
50932 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
50933 + grsec_enable_chroot_chdir = 1;
50934 +#endif
50935 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
50936 + grsec_enable_chroot_chmod = 1;
50937 +#endif
50938 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
50939 + grsec_enable_chroot_mknod = 1;
50940 +#endif
50941 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
50942 + grsec_enable_chroot_nice = 1;
50943 +#endif
50944 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
50945 + grsec_enable_chroot_execlog = 1;
50946 +#endif
50947 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50948 + grsec_enable_chroot_caps = 1;
50949 +#endif
50950 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
50951 + grsec_enable_chroot_sysctl = 1;
50952 +#endif
50953 +#ifdef CONFIG_GRKERNSEC_TPE
50954 + grsec_enable_tpe = 1;
50955 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
50956 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
50957 + grsec_enable_tpe_all = 1;
50958 +#endif
50959 +#endif
50960 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
50961 + grsec_enable_socket_all = 1;
50962 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
50963 +#endif
50964 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
50965 + grsec_enable_socket_client = 1;
50966 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
50967 +#endif
50968 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
50969 + grsec_enable_socket_server = 1;
50970 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
50971 +#endif
50972 +#endif
50973 +
50974 + return;
50975 +}
50976 diff -urNp linux-2.6.32.42/grsecurity/grsec_link.c linux-2.6.32.42/grsecurity/grsec_link.c
50977 --- linux-2.6.32.42/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
50978 +++ linux-2.6.32.42/grsecurity/grsec_link.c 2011-04-17 15:56:46.000000000 -0400
50979 @@ -0,0 +1,43 @@
50980 +#include <linux/kernel.h>
50981 +#include <linux/sched.h>
50982 +#include <linux/fs.h>
50983 +#include <linux/file.h>
50984 +#include <linux/grinternal.h>
50985 +
50986 +int
50987 +gr_handle_follow_link(const struct inode *parent,
50988 + const struct inode *inode,
50989 + const struct dentry *dentry, const struct vfsmount *mnt)
50990 +{
50991 +#ifdef CONFIG_GRKERNSEC_LINK
50992 + const struct cred *cred = current_cred();
50993 +
50994 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
50995 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
50996 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
50997 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
50998 + return -EACCES;
50999 + }
51000 +#endif
51001 + return 0;
51002 +}
51003 +
51004 +int
51005 +gr_handle_hardlink(const struct dentry *dentry,
51006 + const struct vfsmount *mnt,
51007 + struct inode *inode, const int mode, const char *to)
51008 +{
51009 +#ifdef CONFIG_GRKERNSEC_LINK
51010 + const struct cred *cred = current_cred();
51011 +
51012 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
51013 + (!S_ISREG(mode) || (mode & S_ISUID) ||
51014 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
51015 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
51016 + !capable(CAP_FOWNER) && cred->uid) {
51017 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
51018 + return -EPERM;
51019 + }
51020 +#endif
51021 + return 0;
51022 +}
51023 diff -urNp linux-2.6.32.42/grsecurity/grsec_log.c linux-2.6.32.42/grsecurity/grsec_log.c
51024 --- linux-2.6.32.42/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
51025 +++ linux-2.6.32.42/grsecurity/grsec_log.c 2011-05-10 21:58:49.000000000 -0400
51026 @@ -0,0 +1,310 @@
51027 +#include <linux/kernel.h>
51028 +#include <linux/sched.h>
51029 +#include <linux/file.h>
51030 +#include <linux/tty.h>
51031 +#include <linux/fs.h>
51032 +#include <linux/grinternal.h>
51033 +
51034 +#ifdef CONFIG_TREE_PREEMPT_RCU
51035 +#define DISABLE_PREEMPT() preempt_disable()
51036 +#define ENABLE_PREEMPT() preempt_enable()
51037 +#else
51038 +#define DISABLE_PREEMPT()
51039 +#define ENABLE_PREEMPT()
51040 +#endif
51041 +
51042 +#define BEGIN_LOCKS(x) \
51043 + DISABLE_PREEMPT(); \
51044 + rcu_read_lock(); \
51045 + read_lock(&tasklist_lock); \
51046 + read_lock(&grsec_exec_file_lock); \
51047 + if (x != GR_DO_AUDIT) \
51048 + spin_lock(&grsec_alert_lock); \
51049 + else \
51050 + spin_lock(&grsec_audit_lock)
51051 +
51052 +#define END_LOCKS(x) \
51053 + if (x != GR_DO_AUDIT) \
51054 + spin_unlock(&grsec_alert_lock); \
51055 + else \
51056 + spin_unlock(&grsec_audit_lock); \
51057 + read_unlock(&grsec_exec_file_lock); \
51058 + read_unlock(&tasklist_lock); \
51059 + rcu_read_unlock(); \
51060 + ENABLE_PREEMPT(); \
51061 + if (x == GR_DONT_AUDIT) \
51062 + gr_handle_alertkill(current)
51063 +
51064 +enum {
51065 + FLOODING,
51066 + NO_FLOODING
51067 +};
51068 +
51069 +extern char *gr_alert_log_fmt;
51070 +extern char *gr_audit_log_fmt;
51071 +extern char *gr_alert_log_buf;
51072 +extern char *gr_audit_log_buf;
51073 +
51074 +static int gr_log_start(int audit)
51075 +{
51076 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
51077 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
51078 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51079 +
51080 + if (audit == GR_DO_AUDIT)
51081 + goto set_fmt;
51082 +
51083 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
51084 + grsec_alert_wtime = jiffies;
51085 + grsec_alert_fyet = 0;
51086 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
51087 + grsec_alert_fyet++;
51088 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
51089 + grsec_alert_wtime = jiffies;
51090 + grsec_alert_fyet++;
51091 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
51092 + return FLOODING;
51093 + } else return FLOODING;
51094 +
51095 +set_fmt:
51096 + memset(buf, 0, PAGE_SIZE);
51097 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
51098 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
51099 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
51100 + } else if (current->signal->curr_ip) {
51101 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
51102 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
51103 + } else if (gr_acl_is_enabled()) {
51104 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
51105 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
51106 + } else {
51107 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
51108 + strcpy(buf, fmt);
51109 + }
51110 +
51111 + return NO_FLOODING;
51112 +}
51113 +
51114 +static void gr_log_middle(int audit, const char *msg, va_list ap)
51115 + __attribute__ ((format (printf, 2, 0)));
51116 +
51117 +static void gr_log_middle(int audit, const char *msg, va_list ap)
51118 +{
51119 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51120 + unsigned int len = strlen(buf);
51121 +
51122 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
51123 +
51124 + return;
51125 +}
51126 +
51127 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
51128 + __attribute__ ((format (printf, 2, 3)));
51129 +
51130 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
51131 +{
51132 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51133 + unsigned int len = strlen(buf);
51134 + va_list ap;
51135 +
51136 + va_start(ap, msg);
51137 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
51138 + va_end(ap);
51139 +
51140 + return;
51141 +}
51142 +
51143 +static void gr_log_end(int audit)
51144 +{
51145 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51146 + unsigned int len = strlen(buf);
51147 +
51148 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
51149 + printk("%s\n", buf);
51150 +
51151 + return;
51152 +}
51153 +
51154 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
51155 +{
51156 + int logtype;
51157 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
51158 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
51159 + void *voidptr = NULL;
51160 + int num1 = 0, num2 = 0;
51161 + unsigned long ulong1 = 0, ulong2 = 0;
51162 + struct dentry *dentry = NULL;
51163 + struct vfsmount *mnt = NULL;
51164 + struct file *file = NULL;
51165 + struct task_struct *task = NULL;
51166 + const struct cred *cred, *pcred;
51167 + va_list ap;
51168 +
51169 + BEGIN_LOCKS(audit);
51170 + logtype = gr_log_start(audit);
51171 + if (logtype == FLOODING) {
51172 + END_LOCKS(audit);
51173 + return;
51174 + }
51175 + va_start(ap, argtypes);
51176 + switch (argtypes) {
51177 + case GR_TTYSNIFF:
51178 + task = va_arg(ap, struct task_struct *);
51179 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
51180 + break;
51181 + case GR_SYSCTL_HIDDEN:
51182 + str1 = va_arg(ap, char *);
51183 + gr_log_middle_varargs(audit, msg, result, str1);
51184 + break;
51185 + case GR_RBAC:
51186 + dentry = va_arg(ap, struct dentry *);
51187 + mnt = va_arg(ap, struct vfsmount *);
51188 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
51189 + break;
51190 + case GR_RBAC_STR:
51191 + dentry = va_arg(ap, struct dentry *);
51192 + mnt = va_arg(ap, struct vfsmount *);
51193 + str1 = va_arg(ap, char *);
51194 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
51195 + break;
51196 + case GR_STR_RBAC:
51197 + str1 = va_arg(ap, char *);
51198 + dentry = va_arg(ap, struct dentry *);
51199 + mnt = va_arg(ap, struct vfsmount *);
51200 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
51201 + break;
51202 + case GR_RBAC_MODE2:
51203 + dentry = va_arg(ap, struct dentry *);
51204 + mnt = va_arg(ap, struct vfsmount *);
51205 + str1 = va_arg(ap, char *);
51206 + str2 = va_arg(ap, char *);
51207 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
51208 + break;
51209 + case GR_RBAC_MODE3:
51210 + dentry = va_arg(ap, struct dentry *);
51211 + mnt = va_arg(ap, struct vfsmount *);
51212 + str1 = va_arg(ap, char *);
51213 + str2 = va_arg(ap, char *);
51214 + str3 = va_arg(ap, char *);
51215 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
51216 + break;
51217 + case GR_FILENAME:
51218 + dentry = va_arg(ap, struct dentry *);
51219 + mnt = va_arg(ap, struct vfsmount *);
51220 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
51221 + break;
51222 + case GR_STR_FILENAME:
51223 + str1 = va_arg(ap, char *);
51224 + dentry = va_arg(ap, struct dentry *);
51225 + mnt = va_arg(ap, struct vfsmount *);
51226 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
51227 + break;
51228 + case GR_FILENAME_STR:
51229 + dentry = va_arg(ap, struct dentry *);
51230 + mnt = va_arg(ap, struct vfsmount *);
51231 + str1 = va_arg(ap, char *);
51232 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
51233 + break;
51234 + case GR_FILENAME_TWO_INT:
51235 + dentry = va_arg(ap, struct dentry *);
51236 + mnt = va_arg(ap, struct vfsmount *);
51237 + num1 = va_arg(ap, int);
51238 + num2 = va_arg(ap, int);
51239 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
51240 + break;
51241 + case GR_FILENAME_TWO_INT_STR:
51242 + dentry = va_arg(ap, struct dentry *);
51243 + mnt = va_arg(ap, struct vfsmount *);
51244 + num1 = va_arg(ap, int);
51245 + num2 = va_arg(ap, int);
51246 + str1 = va_arg(ap, char *);
51247 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
51248 + break;
51249 + case GR_TEXTREL:
51250 + file = va_arg(ap, struct file *);
51251 + ulong1 = va_arg(ap, unsigned long);
51252 + ulong2 = va_arg(ap, unsigned long);
51253 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
51254 + break;
51255 + case GR_PTRACE:
51256 + task = va_arg(ap, struct task_struct *);
51257 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
51258 + break;
51259 + case GR_RESOURCE:
51260 + task = va_arg(ap, struct task_struct *);
51261 + cred = __task_cred(task);
51262 + pcred = __task_cred(task->real_parent);
51263 + ulong1 = va_arg(ap, unsigned long);
51264 + str1 = va_arg(ap, char *);
51265 + ulong2 = va_arg(ap, unsigned long);
51266 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51267 + break;
51268 + case GR_CAP:
51269 + task = va_arg(ap, struct task_struct *);
51270 + cred = __task_cred(task);
51271 + pcred = __task_cred(task->real_parent);
51272 + str1 = va_arg(ap, char *);
51273 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51274 + break;
51275 + case GR_SIG:
51276 + str1 = va_arg(ap, char *);
51277 + voidptr = va_arg(ap, void *);
51278 + gr_log_middle_varargs(audit, msg, str1, voidptr);
51279 + break;
51280 + case GR_SIG2:
51281 + task = va_arg(ap, struct task_struct *);
51282 + cred = __task_cred(task);
51283 + pcred = __task_cred(task->real_parent);
51284 + num1 = va_arg(ap, int);
51285 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51286 + break;
51287 + case GR_CRASH1:
51288 + task = va_arg(ap, struct task_struct *);
51289 + cred = __task_cred(task);
51290 + pcred = __task_cred(task->real_parent);
51291 + ulong1 = va_arg(ap, unsigned long);
51292 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
51293 + break;
51294 + case GR_CRASH2:
51295 + task = va_arg(ap, struct task_struct *);
51296 + cred = __task_cred(task);
51297 + pcred = __task_cred(task->real_parent);
51298 + ulong1 = va_arg(ap, unsigned long);
51299 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
51300 + break;
51301 + case GR_RWXMAP:
51302 + file = va_arg(ap, struct file *);
51303 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
51304 + break;
51305 + case GR_PSACCT:
51306 + {
51307 + unsigned int wday, cday;
51308 + __u8 whr, chr;
51309 + __u8 wmin, cmin;
51310 + __u8 wsec, csec;
51311 + char cur_tty[64] = { 0 };
51312 + char parent_tty[64] = { 0 };
51313 +
51314 + task = va_arg(ap, struct task_struct *);
51315 + wday = va_arg(ap, unsigned int);
51316 + cday = va_arg(ap, unsigned int);
51317 + whr = va_arg(ap, int);
51318 + chr = va_arg(ap, int);
51319 + wmin = va_arg(ap, int);
51320 + cmin = va_arg(ap, int);
51321 + wsec = va_arg(ap, int);
51322 + csec = va_arg(ap, int);
51323 + ulong1 = va_arg(ap, unsigned long);
51324 + cred = __task_cred(task);
51325 + pcred = __task_cred(task->real_parent);
51326 +
51327 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51328 + }
51329 + break;
51330 + default:
51331 + gr_log_middle(audit, msg, ap);
51332 + }
51333 + va_end(ap);
51334 + gr_log_end(audit);
51335 + END_LOCKS(audit);
51336 +}
51337 diff -urNp linux-2.6.32.42/grsecurity/grsec_mem.c linux-2.6.32.42/grsecurity/grsec_mem.c
51338 --- linux-2.6.32.42/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
51339 +++ linux-2.6.32.42/grsecurity/grsec_mem.c 2011-04-17 15:56:46.000000000 -0400
51340 @@ -0,0 +1,33 @@
51341 +#include <linux/kernel.h>
51342 +#include <linux/sched.h>
51343 +#include <linux/mm.h>
51344 +#include <linux/mman.h>
51345 +#include <linux/grinternal.h>
51346 +
51347 +void
51348 +gr_handle_ioperm(void)
51349 +{
51350 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
51351 + return;
51352 +}
51353 +
51354 +void
51355 +gr_handle_iopl(void)
51356 +{
51357 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
51358 + return;
51359 +}
51360 +
51361 +void
51362 +gr_handle_mem_readwrite(u64 from, u64 to)
51363 +{
51364 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
51365 + return;
51366 +}
51367 +
51368 +void
51369 +gr_handle_vm86(void)
51370 +{
51371 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
51372 + return;
51373 +}
51374 diff -urNp linux-2.6.32.42/grsecurity/grsec_mount.c linux-2.6.32.42/grsecurity/grsec_mount.c
51375 --- linux-2.6.32.42/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
51376 +++ linux-2.6.32.42/grsecurity/grsec_mount.c 2011-06-20 19:47:03.000000000 -0400
51377 @@ -0,0 +1,62 @@
51378 +#include <linux/kernel.h>
51379 +#include <linux/sched.h>
51380 +#include <linux/mount.h>
51381 +#include <linux/grsecurity.h>
51382 +#include <linux/grinternal.h>
51383 +
51384 +void
51385 +gr_log_remount(const char *devname, const int retval)
51386 +{
51387 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51388 + if (grsec_enable_mount && (retval >= 0))
51389 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
51390 +#endif
51391 + return;
51392 +}
51393 +
51394 +void
51395 +gr_log_unmount(const char *devname, const int retval)
51396 +{
51397 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51398 + if (grsec_enable_mount && (retval >= 0))
51399 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
51400 +#endif
51401 + return;
51402 +}
51403 +
51404 +void
51405 +gr_log_mount(const char *from, const char *to, const int retval)
51406 +{
51407 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51408 + if (grsec_enable_mount && (retval >= 0))
51409 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
51410 +#endif
51411 + return;
51412 +}
51413 +
51414 +int
51415 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
51416 +{
51417 +#ifdef CONFIG_GRKERNSEC_ROFS
51418 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
51419 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
51420 + return -EPERM;
51421 + } else
51422 + return 0;
51423 +#endif
51424 + return 0;
51425 +}
51426 +
51427 +int
51428 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
51429 +{
51430 +#ifdef CONFIG_GRKERNSEC_ROFS
51431 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
51432 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
51433 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
51434 + return -EPERM;
51435 + } else
51436 + return 0;
51437 +#endif
51438 + return 0;
51439 +}
51440 diff -urNp linux-2.6.32.42/grsecurity/grsec_pax.c linux-2.6.32.42/grsecurity/grsec_pax.c
51441 --- linux-2.6.32.42/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
51442 +++ linux-2.6.32.42/grsecurity/grsec_pax.c 2011-04-17 15:56:46.000000000 -0400
51443 @@ -0,0 +1,36 @@
51444 +#include <linux/kernel.h>
51445 +#include <linux/sched.h>
51446 +#include <linux/mm.h>
51447 +#include <linux/file.h>
51448 +#include <linux/grinternal.h>
51449 +#include <linux/grsecurity.h>
51450 +
51451 +void
51452 +gr_log_textrel(struct vm_area_struct * vma)
51453 +{
51454 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
51455 + if (grsec_enable_audit_textrel)
51456 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
51457 +#endif
51458 + return;
51459 +}
51460 +
51461 +void
51462 +gr_log_rwxmmap(struct file *file)
51463 +{
51464 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51465 + if (grsec_enable_log_rwxmaps)
51466 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
51467 +#endif
51468 + return;
51469 +}
51470 +
51471 +void
51472 +gr_log_rwxmprotect(struct file *file)
51473 +{
51474 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51475 + if (grsec_enable_log_rwxmaps)
51476 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
51477 +#endif
51478 + return;
51479 +}
51480 diff -urNp linux-2.6.32.42/grsecurity/grsec_ptrace.c linux-2.6.32.42/grsecurity/grsec_ptrace.c
51481 --- linux-2.6.32.42/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
51482 +++ linux-2.6.32.42/grsecurity/grsec_ptrace.c 2011-04-17 15:56:46.000000000 -0400
51483 @@ -0,0 +1,14 @@
51484 +#include <linux/kernel.h>
51485 +#include <linux/sched.h>
51486 +#include <linux/grinternal.h>
51487 +#include <linux/grsecurity.h>
51488 +
51489 +void
51490 +gr_audit_ptrace(struct task_struct *task)
51491 +{
51492 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
51493 + if (grsec_enable_audit_ptrace)
51494 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
51495 +#endif
51496 + return;
51497 +}
51498 diff -urNp linux-2.6.32.42/grsecurity/grsec_sig.c linux-2.6.32.42/grsecurity/grsec_sig.c
51499 --- linux-2.6.32.42/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
51500 +++ linux-2.6.32.42/grsecurity/grsec_sig.c 2011-06-29 19:40:31.000000000 -0400
51501 @@ -0,0 +1,205 @@
51502 +#include <linux/kernel.h>
51503 +#include <linux/sched.h>
51504 +#include <linux/delay.h>
51505 +#include <linux/grsecurity.h>
51506 +#include <linux/grinternal.h>
51507 +#include <linux/hardirq.h>
51508 +
51509 +char *signames[] = {
51510 + [SIGSEGV] = "Segmentation fault",
51511 + [SIGILL] = "Illegal instruction",
51512 + [SIGABRT] = "Abort",
51513 + [SIGBUS] = "Invalid alignment/Bus error"
51514 +};
51515 +
51516 +void
51517 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
51518 +{
51519 +#ifdef CONFIG_GRKERNSEC_SIGNAL
51520 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
51521 + (sig == SIGABRT) || (sig == SIGBUS))) {
51522 + if (t->pid == current->pid) {
51523 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
51524 + } else {
51525 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
51526 + }
51527 + }
51528 +#endif
51529 + return;
51530 +}
51531 +
51532 +int
51533 +gr_handle_signal(const struct task_struct *p, const int sig)
51534 +{
51535 +#ifdef CONFIG_GRKERNSEC
51536 + if (current->pid > 1 && gr_check_protected_task(p)) {
51537 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
51538 + return -EPERM;
51539 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
51540 + return -EPERM;
51541 + }
51542 +#endif
51543 + return 0;
51544 +}
51545 +
51546 +#ifdef CONFIG_GRKERNSEC
51547 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
51548 +
51549 +int gr_fake_force_sig(int sig, struct task_struct *t)
51550 +{
51551 + unsigned long int flags;
51552 + int ret, blocked, ignored;
51553 + struct k_sigaction *action;
51554 +
51555 + spin_lock_irqsave(&t->sighand->siglock, flags);
51556 + action = &t->sighand->action[sig-1];
51557 + ignored = action->sa.sa_handler == SIG_IGN;
51558 + blocked = sigismember(&t->blocked, sig);
51559 + if (blocked || ignored) {
51560 + action->sa.sa_handler = SIG_DFL;
51561 + if (blocked) {
51562 + sigdelset(&t->blocked, sig);
51563 + recalc_sigpending_and_wake(t);
51564 + }
51565 + }
51566 + if (action->sa.sa_handler == SIG_DFL)
51567 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
51568 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
51569 +
51570 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
51571 +
51572 + return ret;
51573 +}
51574 +#endif
51575 +
51576 +#ifdef CONFIG_GRKERNSEC_BRUTE
51577 +#define GR_USER_BAN_TIME (15 * 60)
51578 +
51579 +static int __get_dumpable(unsigned long mm_flags)
51580 +{
51581 + int ret;
51582 +
51583 + ret = mm_flags & MMF_DUMPABLE_MASK;
51584 + return (ret >= 2) ? 2 : ret;
51585 +}
51586 +#endif
51587 +
51588 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
51589 +{
51590 +#ifdef CONFIG_GRKERNSEC_BRUTE
51591 + uid_t uid = 0;
51592 +
51593 + if (!grsec_enable_brute)
51594 + return;
51595 +
51596 + rcu_read_lock();
51597 + read_lock(&tasklist_lock);
51598 + read_lock(&grsec_exec_file_lock);
51599 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
51600 + p->real_parent->brute = 1;
51601 + else {
51602 + const struct cred *cred = __task_cred(p), *cred2;
51603 + struct task_struct *tsk, *tsk2;
51604 +
51605 + if (!__get_dumpable(mm_flags) && cred->uid) {
51606 + struct user_struct *user;
51607 +
51608 + uid = cred->uid;
51609 +
51610 + /* this is put upon execution past expiration */
51611 + user = find_user(uid);
51612 + if (user == NULL)
51613 + goto unlock;
51614 + user->banned = 1;
51615 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
51616 + if (user->ban_expires == ~0UL)
51617 + user->ban_expires--;
51618 +
51619 + do_each_thread(tsk2, tsk) {
51620 + cred2 = __task_cred(tsk);
51621 + if (tsk != p && cred2->uid == uid)
51622 + gr_fake_force_sig(SIGKILL, tsk);
51623 + } while_each_thread(tsk2, tsk);
51624 + }
51625 + }
51626 +unlock:
51627 + read_unlock(&grsec_exec_file_lock);
51628 + read_unlock(&tasklist_lock);
51629 + rcu_read_unlock();
51630 +
51631 + if (uid)
51632 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
51633 +#endif
51634 + return;
51635 +}
51636 +
51637 +void gr_handle_brute_check(void)
51638 +{
51639 +#ifdef CONFIG_GRKERNSEC_BRUTE
51640 + if (current->brute)
51641 + msleep(30 * 1000);
51642 +#endif
51643 + return;
51644 +}
51645 +
51646 +void gr_handle_kernel_exploit(void)
51647 +{
51648 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
51649 + const struct cred *cred;
51650 + struct task_struct *tsk, *tsk2;
51651 + struct user_struct *user;
51652 + uid_t uid;
51653 +
51654 + if (in_irq() || in_serving_softirq() || in_nmi())
51655 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
51656 +
51657 + uid = current_uid();
51658 +
51659 + if (uid == 0)
51660 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
51661 + else {
51662 + /* kill all the processes of this user, hold a reference
51663 + to their creds struct, and prevent them from creating
51664 + another process until system reset
51665 + */
51666 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
51667 + /* we intentionally leak this ref */
51668 + user = get_uid(current->cred->user);
51669 + if (user) {
51670 + user->banned = 1;
51671 + user->ban_expires = ~0UL;
51672 + }
51673 +
51674 + read_lock(&tasklist_lock);
51675 + do_each_thread(tsk2, tsk) {
51676 + cred = __task_cred(tsk);
51677 + if (cred->uid == uid)
51678 + gr_fake_force_sig(SIGKILL, tsk);
51679 + } while_each_thread(tsk2, tsk);
51680 + read_unlock(&tasklist_lock);
51681 + }
51682 +#endif
51683 +}
51684 +
51685 +int __gr_process_user_ban(struct user_struct *user)
51686 +{
51687 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
51688 + if (unlikely(user->banned)) {
51689 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
51690 + user->banned = 0;
51691 + user->ban_expires = 0;
51692 + free_uid(user);
51693 + } else
51694 + return -EPERM;
51695 + }
51696 +#endif
51697 + return 0;
51698 +}
51699 +
51700 +int gr_process_user_ban(void)
51701 +{
51702 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
51703 + return __gr_process_user_ban(current->cred->user);
51704 +#endif
51705 + return 0;
51706 +}
51707 diff -urNp linux-2.6.32.42/grsecurity/grsec_sock.c linux-2.6.32.42/grsecurity/grsec_sock.c
51708 --- linux-2.6.32.42/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
51709 +++ linux-2.6.32.42/grsecurity/grsec_sock.c 2011-04-17 15:56:46.000000000 -0400
51710 @@ -0,0 +1,275 @@
51711 +#include <linux/kernel.h>
51712 +#include <linux/module.h>
51713 +#include <linux/sched.h>
51714 +#include <linux/file.h>
51715 +#include <linux/net.h>
51716 +#include <linux/in.h>
51717 +#include <linux/ip.h>
51718 +#include <net/sock.h>
51719 +#include <net/inet_sock.h>
51720 +#include <linux/grsecurity.h>
51721 +#include <linux/grinternal.h>
51722 +#include <linux/gracl.h>
51723 +
51724 +kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
51725 +EXPORT_SYMBOL(gr_cap_rtnetlink);
51726 +
51727 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
51728 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
51729 +
51730 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
51731 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
51732 +
51733 +#ifdef CONFIG_UNIX_MODULE
51734 +EXPORT_SYMBOL(gr_acl_handle_unix);
51735 +EXPORT_SYMBOL(gr_acl_handle_mknod);
51736 +EXPORT_SYMBOL(gr_handle_chroot_unix);
51737 +EXPORT_SYMBOL(gr_handle_create);
51738 +#endif
51739 +
51740 +#ifdef CONFIG_GRKERNSEC
51741 +#define gr_conn_table_size 32749
51742 +struct conn_table_entry {
51743 + struct conn_table_entry *next;
51744 + struct signal_struct *sig;
51745 +};
51746 +
51747 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
51748 +DEFINE_SPINLOCK(gr_conn_table_lock);
51749 +
51750 +extern const char * gr_socktype_to_name(unsigned char type);
51751 +extern const char * gr_proto_to_name(unsigned char proto);
51752 +extern const char * gr_sockfamily_to_name(unsigned char family);
51753 +
51754 +static __inline__ int
51755 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
51756 +{
51757 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
51758 +}
51759 +
51760 +static __inline__ int
51761 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
51762 + __u16 sport, __u16 dport)
51763 +{
51764 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
51765 + sig->gr_sport == sport && sig->gr_dport == dport))
51766 + return 1;
51767 + else
51768 + return 0;
51769 +}
51770 +
51771 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
51772 +{
51773 + struct conn_table_entry **match;
51774 + unsigned int index;
51775 +
51776 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
51777 + sig->gr_sport, sig->gr_dport,
51778 + gr_conn_table_size);
51779 +
51780 + newent->sig = sig;
51781 +
51782 + match = &gr_conn_table[index];
51783 + newent->next = *match;
51784 + *match = newent;
51785 +
51786 + return;
51787 +}
51788 +
51789 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
51790 +{
51791 + struct conn_table_entry *match, *last = NULL;
51792 + unsigned int index;
51793 +
51794 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
51795 + sig->gr_sport, sig->gr_dport,
51796 + gr_conn_table_size);
51797 +
51798 + match = gr_conn_table[index];
51799 + while (match && !conn_match(match->sig,
51800 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
51801 + sig->gr_dport)) {
51802 + last = match;
51803 + match = match->next;
51804 + }
51805 +
51806 + if (match) {
51807 + if (last)
51808 + last->next = match->next;
51809 + else
51810 + gr_conn_table[index] = NULL;
51811 + kfree(match);
51812 + }
51813 +
51814 + return;
51815 +}
51816 +
51817 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
51818 + __u16 sport, __u16 dport)
51819 +{
51820 + struct conn_table_entry *match;
51821 + unsigned int index;
51822 +
51823 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
51824 +
51825 + match = gr_conn_table[index];
51826 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
51827 + match = match->next;
51828 +
51829 + if (match)
51830 + return match->sig;
51831 + else
51832 + return NULL;
51833 +}
51834 +
51835 +#endif
51836 +
51837 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
51838 +{
51839 +#ifdef CONFIG_GRKERNSEC
51840 + struct signal_struct *sig = task->signal;
51841 + struct conn_table_entry *newent;
51842 +
51843 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
51844 + if (newent == NULL)
51845 + return;
51846 + /* no bh lock needed since we are called with bh disabled */
51847 + spin_lock(&gr_conn_table_lock);
51848 + gr_del_task_from_ip_table_nolock(sig);
51849 + sig->gr_saddr = inet->rcv_saddr;
51850 + sig->gr_daddr = inet->daddr;
51851 + sig->gr_sport = inet->sport;
51852 + sig->gr_dport = inet->dport;
51853 + gr_add_to_task_ip_table_nolock(sig, newent);
51854 + spin_unlock(&gr_conn_table_lock);
51855 +#endif
51856 + return;
51857 +}
51858 +
51859 +void gr_del_task_from_ip_table(struct task_struct *task)
51860 +{
51861 +#ifdef CONFIG_GRKERNSEC
51862 + spin_lock_bh(&gr_conn_table_lock);
51863 + gr_del_task_from_ip_table_nolock(task->signal);
51864 + spin_unlock_bh(&gr_conn_table_lock);
51865 +#endif
51866 + return;
51867 +}
51868 +
51869 +void
51870 +gr_attach_curr_ip(const struct sock *sk)
51871 +{
51872 +#ifdef CONFIG_GRKERNSEC
51873 + struct signal_struct *p, *set;
51874 + const struct inet_sock *inet = inet_sk(sk);
51875 +
51876 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
51877 + return;
51878 +
51879 + set = current->signal;
51880 +
51881 + spin_lock_bh(&gr_conn_table_lock);
51882 + p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
51883 + inet->dport, inet->sport);
51884 + if (unlikely(p != NULL)) {
51885 + set->curr_ip = p->curr_ip;
51886 + set->used_accept = 1;
51887 + gr_del_task_from_ip_table_nolock(p);
51888 + spin_unlock_bh(&gr_conn_table_lock);
51889 + return;
51890 + }
51891 + spin_unlock_bh(&gr_conn_table_lock);
51892 +
51893 + set->curr_ip = inet->daddr;
51894 + set->used_accept = 1;
51895 +#endif
51896 + return;
51897 +}
51898 +
51899 +int
51900 +gr_handle_sock_all(const int family, const int type, const int protocol)
51901 +{
51902 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
51903 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
51904 + (family != AF_UNIX)) {
51905 + if (family == AF_INET)
51906 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
51907 + else
51908 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
51909 + return -EACCES;
51910 + }
51911 +#endif
51912 + return 0;
51913 +}
51914 +
51915 +int
51916 +gr_handle_sock_server(const struct sockaddr *sck)
51917 +{
51918 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51919 + if (grsec_enable_socket_server &&
51920 + in_group_p(grsec_socket_server_gid) &&
51921 + sck && (sck->sa_family != AF_UNIX) &&
51922 + (sck->sa_family != AF_LOCAL)) {
51923 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
51924 + return -EACCES;
51925 + }
51926 +#endif
51927 + return 0;
51928 +}
51929 +
51930 +int
51931 +gr_handle_sock_server_other(const struct sock *sck)
51932 +{
51933 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51934 + if (grsec_enable_socket_server &&
51935 + in_group_p(grsec_socket_server_gid) &&
51936 + sck && (sck->sk_family != AF_UNIX) &&
51937 + (sck->sk_family != AF_LOCAL)) {
51938 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
51939 + return -EACCES;
51940 + }
51941 +#endif
51942 + return 0;
51943 +}
51944 +
51945 +int
51946 +gr_handle_sock_client(const struct sockaddr *sck)
51947 +{
51948 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
51949 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
51950 + sck && (sck->sa_family != AF_UNIX) &&
51951 + (sck->sa_family != AF_LOCAL)) {
51952 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
51953 + return -EACCES;
51954 + }
51955 +#endif
51956 + return 0;
51957 +}
51958 +
51959 +kernel_cap_t
51960 +gr_cap_rtnetlink(struct sock *sock)
51961 +{
51962 +#ifdef CONFIG_GRKERNSEC
51963 + if (!gr_acl_is_enabled())
51964 + return current_cap();
51965 + else if (sock->sk_protocol == NETLINK_ISCSI &&
51966 + cap_raised(current_cap(), CAP_SYS_ADMIN) &&
51967 + gr_is_capable(CAP_SYS_ADMIN))
51968 + return current_cap();
51969 + else if (sock->sk_protocol == NETLINK_AUDIT &&
51970 + cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
51971 + gr_is_capable(CAP_AUDIT_WRITE) &&
51972 + cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
51973 + gr_is_capable(CAP_AUDIT_CONTROL))
51974 + return current_cap();
51975 + else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
51976 + ((sock->sk_protocol == NETLINK_ROUTE) ?
51977 + gr_is_capable_nolog(CAP_NET_ADMIN) :
51978 + gr_is_capable(CAP_NET_ADMIN)))
51979 + return current_cap();
51980 + else
51981 + return __cap_empty_set;
51982 +#else
51983 + return current_cap();
51984 +#endif
51985 +}
51986 diff -urNp linux-2.6.32.42/grsecurity/grsec_sysctl.c linux-2.6.32.42/grsecurity/grsec_sysctl.c
51987 --- linux-2.6.32.42/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
51988 +++ linux-2.6.32.42/grsecurity/grsec_sysctl.c 2011-06-29 19:37:19.000000000 -0400
51989 @@ -0,0 +1,489 @@
51990 +#include <linux/kernel.h>
51991 +#include <linux/sched.h>
51992 +#include <linux/sysctl.h>
51993 +#include <linux/grsecurity.h>
51994 +#include <linux/grinternal.h>
51995 +
51996 +int
51997 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
51998 +{
51999 +#ifdef CONFIG_GRKERNSEC_SYSCTL
52000 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
52001 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
52002 + return -EACCES;
52003 + }
52004 +#endif
52005 + return 0;
52006 +}
52007 +
52008 +#ifdef CONFIG_GRKERNSEC_ROFS
52009 +static int __maybe_unused one = 1;
52010 +#endif
52011 +
52012 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
52013 +ctl_table grsecurity_table[] = {
52014 +#ifdef CONFIG_GRKERNSEC_SYSCTL
52015 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
52016 +#ifdef CONFIG_GRKERNSEC_IO
52017 + {
52018 + .ctl_name = CTL_UNNUMBERED,
52019 + .procname = "disable_priv_io",
52020 + .data = &grsec_disable_privio,
52021 + .maxlen = sizeof(int),
52022 + .mode = 0600,
52023 + .proc_handler = &proc_dointvec,
52024 + },
52025 +#endif
52026 +#endif
52027 +#ifdef CONFIG_GRKERNSEC_LINK
52028 + {
52029 + .ctl_name = CTL_UNNUMBERED,
52030 + .procname = "linking_restrictions",
52031 + .data = &grsec_enable_link,
52032 + .maxlen = sizeof(int),
52033 + .mode = 0600,
52034 + .proc_handler = &proc_dointvec,
52035 + },
52036 +#endif
52037 +#ifdef CONFIG_GRKERNSEC_BRUTE
52038 + {
52039 + .ctl_name = CTL_UNNUMBERED,
52040 + .procname = "deter_bruteforce",
52041 + .data = &grsec_enable_brute,
52042 + .maxlen = sizeof(int),
52043 + .mode = 0600,
52044 + .proc_handler = &proc_dointvec,
52045 + },
52046 +#endif
52047 +#ifdef CONFIG_GRKERNSEC_FIFO
52048 + {
52049 + .ctl_name = CTL_UNNUMBERED,
52050 + .procname = "fifo_restrictions",
52051 + .data = &grsec_enable_fifo,
52052 + .maxlen = sizeof(int),
52053 + .mode = 0600,
52054 + .proc_handler = &proc_dointvec,
52055 + },
52056 +#endif
52057 +#ifdef CONFIG_GRKERNSEC_EXECVE
52058 + {
52059 + .ctl_name = CTL_UNNUMBERED,
52060 + .procname = "execve_limiting",
52061 + .data = &grsec_enable_execve,
52062 + .maxlen = sizeof(int),
52063 + .mode = 0600,
52064 + .proc_handler = &proc_dointvec,
52065 + },
52066 +#endif
52067 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52068 + {
52069 + .ctl_name = CTL_UNNUMBERED,
52070 + .procname = "ip_blackhole",
52071 + .data = &grsec_enable_blackhole,
52072 + .maxlen = sizeof(int),
52073 + .mode = 0600,
52074 + .proc_handler = &proc_dointvec,
52075 + },
52076 + {
52077 + .ctl_name = CTL_UNNUMBERED,
52078 + .procname = "lastack_retries",
52079 + .data = &grsec_lastack_retries,
52080 + .maxlen = sizeof(int),
52081 + .mode = 0600,
52082 + .proc_handler = &proc_dointvec,
52083 + },
52084 +#endif
52085 +#ifdef CONFIG_GRKERNSEC_EXECLOG
52086 + {
52087 + .ctl_name = CTL_UNNUMBERED,
52088 + .procname = "exec_logging",
52089 + .data = &grsec_enable_execlog,
52090 + .maxlen = sizeof(int),
52091 + .mode = 0600,
52092 + .proc_handler = &proc_dointvec,
52093 + },
52094 +#endif
52095 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52096 + {
52097 + .ctl_name = CTL_UNNUMBERED,
52098 + .procname = "rwxmap_logging",
52099 + .data = &grsec_enable_log_rwxmaps,
52100 + .maxlen = sizeof(int),
52101 + .mode = 0600,
52102 + .proc_handler = &proc_dointvec,
52103 + },
52104 +#endif
52105 +#ifdef CONFIG_GRKERNSEC_SIGNAL
52106 + {
52107 + .ctl_name = CTL_UNNUMBERED,
52108 + .procname = "signal_logging",
52109 + .data = &grsec_enable_signal,
52110 + .maxlen = sizeof(int),
52111 + .mode = 0600,
52112 + .proc_handler = &proc_dointvec,
52113 + },
52114 +#endif
52115 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
52116 + {
52117 + .ctl_name = CTL_UNNUMBERED,
52118 + .procname = "forkfail_logging",
52119 + .data = &grsec_enable_forkfail,
52120 + .maxlen = sizeof(int),
52121 + .mode = 0600,
52122 + .proc_handler = &proc_dointvec,
52123 + },
52124 +#endif
52125 +#ifdef CONFIG_GRKERNSEC_TIME
52126 + {
52127 + .ctl_name = CTL_UNNUMBERED,
52128 + .procname = "timechange_logging",
52129 + .data = &grsec_enable_time,
52130 + .maxlen = sizeof(int),
52131 + .mode = 0600,
52132 + .proc_handler = &proc_dointvec,
52133 + },
52134 +#endif
52135 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
52136 + {
52137 + .ctl_name = CTL_UNNUMBERED,
52138 + .procname = "chroot_deny_shmat",
52139 + .data = &grsec_enable_chroot_shmat,
52140 + .maxlen = sizeof(int),
52141 + .mode = 0600,
52142 + .proc_handler = &proc_dointvec,
52143 + },
52144 +#endif
52145 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52146 + {
52147 + .ctl_name = CTL_UNNUMBERED,
52148 + .procname = "chroot_deny_unix",
52149 + .data = &grsec_enable_chroot_unix,
52150 + .maxlen = sizeof(int),
52151 + .mode = 0600,
52152 + .proc_handler = &proc_dointvec,
52153 + },
52154 +#endif
52155 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52156 + {
52157 + .ctl_name = CTL_UNNUMBERED,
52158 + .procname = "chroot_deny_mount",
52159 + .data = &grsec_enable_chroot_mount,
52160 + .maxlen = sizeof(int),
52161 + .mode = 0600,
52162 + .proc_handler = &proc_dointvec,
52163 + },
52164 +#endif
52165 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52166 + {
52167 + .ctl_name = CTL_UNNUMBERED,
52168 + .procname = "chroot_deny_fchdir",
52169 + .data = &grsec_enable_chroot_fchdir,
52170 + .maxlen = sizeof(int),
52171 + .mode = 0600,
52172 + .proc_handler = &proc_dointvec,
52173 + },
52174 +#endif
52175 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52176 + {
52177 + .ctl_name = CTL_UNNUMBERED,
52178 + .procname = "chroot_deny_chroot",
52179 + .data = &grsec_enable_chroot_double,
52180 + .maxlen = sizeof(int),
52181 + .mode = 0600,
52182 + .proc_handler = &proc_dointvec,
52183 + },
52184 +#endif
52185 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52186 + {
52187 + .ctl_name = CTL_UNNUMBERED,
52188 + .procname = "chroot_deny_pivot",
52189 + .data = &grsec_enable_chroot_pivot,
52190 + .maxlen = sizeof(int),
52191 + .mode = 0600,
52192 + .proc_handler = &proc_dointvec,
52193 + },
52194 +#endif
52195 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
52196 + {
52197 + .ctl_name = CTL_UNNUMBERED,
52198 + .procname = "chroot_enforce_chdir",
52199 + .data = &grsec_enable_chroot_chdir,
52200 + .maxlen = sizeof(int),
52201 + .mode = 0600,
52202 + .proc_handler = &proc_dointvec,
52203 + },
52204 +#endif
52205 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
52206 + {
52207 + .ctl_name = CTL_UNNUMBERED,
52208 + .procname = "chroot_deny_chmod",
52209 + .data = &grsec_enable_chroot_chmod,
52210 + .maxlen = sizeof(int),
52211 + .mode = 0600,
52212 + .proc_handler = &proc_dointvec,
52213 + },
52214 +#endif
52215 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52216 + {
52217 + .ctl_name = CTL_UNNUMBERED,
52218 + .procname = "chroot_deny_mknod",
52219 + .data = &grsec_enable_chroot_mknod,
52220 + .maxlen = sizeof(int),
52221 + .mode = 0600,
52222 + .proc_handler = &proc_dointvec,
52223 + },
52224 +#endif
52225 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52226 + {
52227 + .ctl_name = CTL_UNNUMBERED,
52228 + .procname = "chroot_restrict_nice",
52229 + .data = &grsec_enable_chroot_nice,
52230 + .maxlen = sizeof(int),
52231 + .mode = 0600,
52232 + .proc_handler = &proc_dointvec,
52233 + },
52234 +#endif
52235 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52236 + {
52237 + .ctl_name = CTL_UNNUMBERED,
52238 + .procname = "chroot_execlog",
52239 + .data = &grsec_enable_chroot_execlog,
52240 + .maxlen = sizeof(int),
52241 + .mode = 0600,
52242 + .proc_handler = &proc_dointvec,
52243 + },
52244 +#endif
52245 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52246 + {
52247 + .ctl_name = CTL_UNNUMBERED,
52248 + .procname = "chroot_caps",
52249 + .data = &grsec_enable_chroot_caps,
52250 + .maxlen = sizeof(int),
52251 + .mode = 0600,
52252 + .proc_handler = &proc_dointvec,
52253 + },
52254 +#endif
52255 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52256 + {
52257 + .ctl_name = CTL_UNNUMBERED,
52258 + .procname = "chroot_deny_sysctl",
52259 + .data = &grsec_enable_chroot_sysctl,
52260 + .maxlen = sizeof(int),
52261 + .mode = 0600,
52262 + .proc_handler = &proc_dointvec,
52263 + },
52264 +#endif
52265 +#ifdef CONFIG_GRKERNSEC_TPE
52266 + {
52267 + .ctl_name = CTL_UNNUMBERED,
52268 + .procname = "tpe",
52269 + .data = &grsec_enable_tpe,
52270 + .maxlen = sizeof(int),
52271 + .mode = 0600,
52272 + .proc_handler = &proc_dointvec,
52273 + },
52274 + {
52275 + .ctl_name = CTL_UNNUMBERED,
52276 + .procname = "tpe_gid",
52277 + .data = &grsec_tpe_gid,
52278 + .maxlen = sizeof(int),
52279 + .mode = 0600,
52280 + .proc_handler = &proc_dointvec,
52281 + },
52282 +#endif
52283 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52284 + {
52285 + .ctl_name = CTL_UNNUMBERED,
52286 + .procname = "tpe_invert",
52287 + .data = &grsec_enable_tpe_invert,
52288 + .maxlen = sizeof(int),
52289 + .mode = 0600,
52290 + .proc_handler = &proc_dointvec,
52291 + },
52292 +#endif
52293 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
52294 + {
52295 + .ctl_name = CTL_UNNUMBERED,
52296 + .procname = "tpe_restrict_all",
52297 + .data = &grsec_enable_tpe_all,
52298 + .maxlen = sizeof(int),
52299 + .mode = 0600,
52300 + .proc_handler = &proc_dointvec,
52301 + },
52302 +#endif
52303 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
52304 + {
52305 + .ctl_name = CTL_UNNUMBERED,
52306 + .procname = "socket_all",
52307 + .data = &grsec_enable_socket_all,
52308 + .maxlen = sizeof(int),
52309 + .mode = 0600,
52310 + .proc_handler = &proc_dointvec,
52311 + },
52312 + {
52313 + .ctl_name = CTL_UNNUMBERED,
52314 + .procname = "socket_all_gid",
52315 + .data = &grsec_socket_all_gid,
52316 + .maxlen = sizeof(int),
52317 + .mode = 0600,
52318 + .proc_handler = &proc_dointvec,
52319 + },
52320 +#endif
52321 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52322 + {
52323 + .ctl_name = CTL_UNNUMBERED,
52324 + .procname = "socket_client",
52325 + .data = &grsec_enable_socket_client,
52326 + .maxlen = sizeof(int),
52327 + .mode = 0600,
52328 + .proc_handler = &proc_dointvec,
52329 + },
52330 + {
52331 + .ctl_name = CTL_UNNUMBERED,
52332 + .procname = "socket_client_gid",
52333 + .data = &grsec_socket_client_gid,
52334 + .maxlen = sizeof(int),
52335 + .mode = 0600,
52336 + .proc_handler = &proc_dointvec,
52337 + },
52338 +#endif
52339 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52340 + {
52341 + .ctl_name = CTL_UNNUMBERED,
52342 + .procname = "socket_server",
52343 + .data = &grsec_enable_socket_server,
52344 + .maxlen = sizeof(int),
52345 + .mode = 0600,
52346 + .proc_handler = &proc_dointvec,
52347 + },
52348 + {
52349 + .ctl_name = CTL_UNNUMBERED,
52350 + .procname = "socket_server_gid",
52351 + .data = &grsec_socket_server_gid,
52352 + .maxlen = sizeof(int),
52353 + .mode = 0600,
52354 + .proc_handler = &proc_dointvec,
52355 + },
52356 +#endif
52357 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
52358 + {
52359 + .ctl_name = CTL_UNNUMBERED,
52360 + .procname = "audit_group",
52361 + .data = &grsec_enable_group,
52362 + .maxlen = sizeof(int),
52363 + .mode = 0600,
52364 + .proc_handler = &proc_dointvec,
52365 + },
52366 + {
52367 + .ctl_name = CTL_UNNUMBERED,
52368 + .procname = "audit_gid",
52369 + .data = &grsec_audit_gid,
52370 + .maxlen = sizeof(int),
52371 + .mode = 0600,
52372 + .proc_handler = &proc_dointvec,
52373 + },
52374 +#endif
52375 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52376 + {
52377 + .ctl_name = CTL_UNNUMBERED,
52378 + .procname = "audit_chdir",
52379 + .data = &grsec_enable_chdir,
52380 + .maxlen = sizeof(int),
52381 + .mode = 0600,
52382 + .proc_handler = &proc_dointvec,
52383 + },
52384 +#endif
52385 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52386 + {
52387 + .ctl_name = CTL_UNNUMBERED,
52388 + .procname = "audit_mount",
52389 + .data = &grsec_enable_mount,
52390 + .maxlen = sizeof(int),
52391 + .mode = 0600,
52392 + .proc_handler = &proc_dointvec,
52393 + },
52394 +#endif
52395 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
52396 + {
52397 + .ctl_name = CTL_UNNUMBERED,
52398 + .procname = "audit_textrel",
52399 + .data = &grsec_enable_audit_textrel,
52400 + .maxlen = sizeof(int),
52401 + .mode = 0600,
52402 + .proc_handler = &proc_dointvec,
52403 + },
52404 +#endif
52405 +#ifdef CONFIG_GRKERNSEC_DMESG
52406 + {
52407 + .ctl_name = CTL_UNNUMBERED,
52408 + .procname = "dmesg",
52409 + .data = &grsec_enable_dmesg,
52410 + .maxlen = sizeof(int),
52411 + .mode = 0600,
52412 + .proc_handler = &proc_dointvec,
52413 + },
52414 +#endif
52415 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52416 + {
52417 + .ctl_name = CTL_UNNUMBERED,
52418 + .procname = "chroot_findtask",
52419 + .data = &grsec_enable_chroot_findtask,
52420 + .maxlen = sizeof(int),
52421 + .mode = 0600,
52422 + .proc_handler = &proc_dointvec,
52423 + },
52424 +#endif
52425 +#ifdef CONFIG_GRKERNSEC_RESLOG
52426 + {
52427 + .ctl_name = CTL_UNNUMBERED,
52428 + .procname = "resource_logging",
52429 + .data = &grsec_resource_logging,
52430 + .maxlen = sizeof(int),
52431 + .mode = 0600,
52432 + .proc_handler = &proc_dointvec,
52433 + },
52434 +#endif
52435 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
52436 + {
52437 + .ctl_name = CTL_UNNUMBERED,
52438 + .procname = "audit_ptrace",
52439 + .data = &grsec_enable_audit_ptrace,
52440 + .maxlen = sizeof(int),
52441 + .mode = 0600,
52442 + .proc_handler = &proc_dointvec,
52443 + },
52444 +#endif
52445 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52446 + {
52447 + .ctl_name = CTL_UNNUMBERED,
52448 + .procname = "harden_ptrace",
52449 + .data = &grsec_enable_harden_ptrace,
52450 + .maxlen = sizeof(int),
52451 + .mode = 0600,
52452 + .proc_handler = &proc_dointvec,
52453 + },
52454 +#endif
52455 + {
52456 + .ctl_name = CTL_UNNUMBERED,
52457 + .procname = "grsec_lock",
52458 + .data = &grsec_lock,
52459 + .maxlen = sizeof(int),
52460 + .mode = 0600,
52461 + .proc_handler = &proc_dointvec,
52462 + },
52463 +#endif
52464 +#ifdef CONFIG_GRKERNSEC_ROFS
52465 + {
52466 + .ctl_name = CTL_UNNUMBERED,
52467 + .procname = "romount_protect",
52468 + .data = &grsec_enable_rofs,
52469 + .maxlen = sizeof(int),
52470 + .mode = 0600,
52471 + .proc_handler = &proc_dointvec_minmax,
52472 + .extra1 = &one,
52473 + .extra2 = &one,
52474 + },
52475 +#endif
52476 + { .ctl_name = 0 }
52477 +};
52478 +#endif
52479 diff -urNp linux-2.6.32.42/grsecurity/grsec_time.c linux-2.6.32.42/grsecurity/grsec_time.c
52480 --- linux-2.6.32.42/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
52481 +++ linux-2.6.32.42/grsecurity/grsec_time.c 2011-04-17 15:56:46.000000000 -0400
52482 @@ -0,0 +1,16 @@
52483 +#include <linux/kernel.h>
52484 +#include <linux/sched.h>
52485 +#include <linux/grinternal.h>
52486 +#include <linux/module.h>
52487 +
52488 +void
52489 +gr_log_timechange(void)
52490 +{
52491 +#ifdef CONFIG_GRKERNSEC_TIME
52492 + if (grsec_enable_time)
52493 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
52494 +#endif
52495 + return;
52496 +}
52497 +
52498 +EXPORT_SYMBOL(gr_log_timechange);
52499 diff -urNp linux-2.6.32.42/grsecurity/grsec_tpe.c linux-2.6.32.42/grsecurity/grsec_tpe.c
52500 --- linux-2.6.32.42/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
52501 +++ linux-2.6.32.42/grsecurity/grsec_tpe.c 2011-04-17 15:56:46.000000000 -0400
52502 @@ -0,0 +1,39 @@
52503 +#include <linux/kernel.h>
52504 +#include <linux/sched.h>
52505 +#include <linux/file.h>
52506 +#include <linux/fs.h>
52507 +#include <linux/grinternal.h>
52508 +
52509 +extern int gr_acl_tpe_check(void);
52510 +
52511 +int
52512 +gr_tpe_allow(const struct file *file)
52513 +{
52514 +#ifdef CONFIG_GRKERNSEC
52515 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
52516 + const struct cred *cred = current_cred();
52517 +
52518 + if (cred->uid && ((grsec_enable_tpe &&
52519 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52520 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
52521 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
52522 +#else
52523 + in_group_p(grsec_tpe_gid)
52524 +#endif
52525 + ) || gr_acl_tpe_check()) &&
52526 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
52527 + (inode->i_mode & S_IWOTH))))) {
52528 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
52529 + return 0;
52530 + }
52531 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
52532 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
52533 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
52534 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
52535 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
52536 + return 0;
52537 + }
52538 +#endif
52539 +#endif
52540 + return 1;
52541 +}
52542 diff -urNp linux-2.6.32.42/grsecurity/grsum.c linux-2.6.32.42/grsecurity/grsum.c
52543 --- linux-2.6.32.42/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
52544 +++ linux-2.6.32.42/grsecurity/grsum.c 2011-04-17 15:56:46.000000000 -0400
52545 @@ -0,0 +1,61 @@
52546 +#include <linux/err.h>
52547 +#include <linux/kernel.h>
52548 +#include <linux/sched.h>
52549 +#include <linux/mm.h>
52550 +#include <linux/scatterlist.h>
52551 +#include <linux/crypto.h>
52552 +#include <linux/gracl.h>
52553 +
52554 +
52555 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
52556 +#error "crypto and sha256 must be built into the kernel"
52557 +#endif
52558 +
52559 +int
52560 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
52561 +{
52562 + char *p;
52563 + struct crypto_hash *tfm;
52564 + struct hash_desc desc;
52565 + struct scatterlist sg;
52566 + unsigned char temp_sum[GR_SHA_LEN];
52567 + volatile int retval = 0;
52568 + volatile int dummy = 0;
52569 + unsigned int i;
52570 +
52571 + sg_init_table(&sg, 1);
52572 +
52573 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
52574 + if (IS_ERR(tfm)) {
52575 + /* should never happen, since sha256 should be built in */
52576 + return 1;
52577 + }
52578 +
52579 + desc.tfm = tfm;
52580 + desc.flags = 0;
52581 +
52582 + crypto_hash_init(&desc);
52583 +
52584 + p = salt;
52585 + sg_set_buf(&sg, p, GR_SALT_LEN);
52586 + crypto_hash_update(&desc, &sg, sg.length);
52587 +
52588 + p = entry->pw;
52589 + sg_set_buf(&sg, p, strlen(p));
52590 +
52591 + crypto_hash_update(&desc, &sg, sg.length);
52592 +
52593 + crypto_hash_final(&desc, temp_sum);
52594 +
52595 + memset(entry->pw, 0, GR_PW_LEN);
52596 +
52597 + for (i = 0; i < GR_SHA_LEN; i++)
52598 + if (sum[i] != temp_sum[i])
52599 + retval = 1;
52600 + else
52601 + dummy = 1; // waste a cycle
52602 +
52603 + crypto_free_hash(tfm);
52604 +
52605 + return retval;
52606 +}
52607 diff -urNp linux-2.6.32.42/grsecurity/Kconfig linux-2.6.32.42/grsecurity/Kconfig
52608 --- linux-2.6.32.42/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
52609 +++ linux-2.6.32.42/grsecurity/Kconfig 2011-06-29 20:55:13.000000000 -0400
52610 @@ -0,0 +1,1047 @@
52611 +#
52612 +# grecurity configuration
52613 +#
52614 +
52615 +menu "Grsecurity"
52616 +
52617 +config GRKERNSEC
52618 + bool "Grsecurity"
52619 + select CRYPTO
52620 + select CRYPTO_SHA256
52621 + help
52622 + If you say Y here, you will be able to configure many features
52623 + that will enhance the security of your system. It is highly
52624 + recommended that you say Y here and read through the help
52625 + for each option so that you fully understand the features and
52626 + can evaluate their usefulness for your machine.
52627 +
52628 +choice
52629 + prompt "Security Level"
52630 + depends on GRKERNSEC
52631 + default GRKERNSEC_CUSTOM
52632 +
52633 +config GRKERNSEC_LOW
52634 + bool "Low"
52635 + select GRKERNSEC_LINK
52636 + select GRKERNSEC_FIFO
52637 + select GRKERNSEC_EXECVE
52638 + select GRKERNSEC_RANDNET
52639 + select GRKERNSEC_DMESG
52640 + select GRKERNSEC_CHROOT
52641 + select GRKERNSEC_CHROOT_CHDIR
52642 +
52643 + help
52644 + If you choose this option, several of the grsecurity options will
52645 + be enabled that will give you greater protection against a number
52646 + of attacks, while assuring that none of your software will have any
52647 + conflicts with the additional security measures. If you run a lot
52648 + of unusual software, or you are having problems with the higher
52649 + security levels, you should say Y here. With this option, the
52650 + following features are enabled:
52651 +
52652 + - Linking restrictions
52653 + - FIFO restrictions
52654 + - Enforcing RLIMIT_NPROC on execve
52655 + - Restricted dmesg
52656 + - Enforced chdir("/") on chroot
52657 + - Runtime module disabling
52658 +
52659 +config GRKERNSEC_MEDIUM
52660 + bool "Medium"
52661 + select PAX
52662 + select PAX_EI_PAX
52663 + select PAX_PT_PAX_FLAGS
52664 + select PAX_HAVE_ACL_FLAGS
52665 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
52666 + select GRKERNSEC_CHROOT
52667 + select GRKERNSEC_CHROOT_SYSCTL
52668 + select GRKERNSEC_LINK
52669 + select GRKERNSEC_FIFO
52670 + select GRKERNSEC_EXECVE
52671 + select GRKERNSEC_DMESG
52672 + select GRKERNSEC_RANDNET
52673 + select GRKERNSEC_FORKFAIL
52674 + select GRKERNSEC_TIME
52675 + select GRKERNSEC_SIGNAL
52676 + select GRKERNSEC_CHROOT
52677 + select GRKERNSEC_CHROOT_UNIX
52678 + select GRKERNSEC_CHROOT_MOUNT
52679 + select GRKERNSEC_CHROOT_PIVOT
52680 + select GRKERNSEC_CHROOT_DOUBLE
52681 + select GRKERNSEC_CHROOT_CHDIR
52682 + select GRKERNSEC_CHROOT_MKNOD
52683 + select GRKERNSEC_PROC
52684 + select GRKERNSEC_PROC_USERGROUP
52685 + select PAX_RANDUSTACK
52686 + select PAX_ASLR
52687 + select PAX_RANDMMAP
52688 + select PAX_REFCOUNT if (X86 || SPARC64)
52689 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB))
52690 +
52691 + help
52692 + If you say Y here, several features in addition to those included
52693 + in the low additional security level will be enabled. These
52694 + features provide even more security to your system, though in rare
52695 + cases they may be incompatible with very old or poorly written
52696 + software. If you enable this option, make sure that your auth
52697 + service (identd) is running as gid 1001. With this option,
52698 + the following features (in addition to those provided in the
52699 + low additional security level) will be enabled:
52700 +
52701 + - Failed fork logging
52702 + - Time change logging
52703 + - Signal logging
52704 + - Deny mounts in chroot
52705 + - Deny double chrooting
52706 + - Deny sysctl writes in chroot
52707 + - Deny mknod in chroot
52708 + - Deny access to abstract AF_UNIX sockets out of chroot
52709 + - Deny pivot_root in chroot
52710 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
52711 + - /proc restrictions with special GID set to 10 (usually wheel)
52712 + - Address Space Layout Randomization (ASLR)
52713 + - Prevent exploitation of most refcount overflows
52714 + - Bounds checking of copying between the kernel and userland
52715 +
52716 +config GRKERNSEC_HIGH
52717 + bool "High"
52718 + select GRKERNSEC_LINK
52719 + select GRKERNSEC_FIFO
52720 + select GRKERNSEC_EXECVE
52721 + select GRKERNSEC_DMESG
52722 + select GRKERNSEC_FORKFAIL
52723 + select GRKERNSEC_TIME
52724 + select GRKERNSEC_SIGNAL
52725 + select GRKERNSEC_CHROOT
52726 + select GRKERNSEC_CHROOT_SHMAT
52727 + select GRKERNSEC_CHROOT_UNIX
52728 + select GRKERNSEC_CHROOT_MOUNT
52729 + select GRKERNSEC_CHROOT_FCHDIR
52730 + select GRKERNSEC_CHROOT_PIVOT
52731 + select GRKERNSEC_CHROOT_DOUBLE
52732 + select GRKERNSEC_CHROOT_CHDIR
52733 + select GRKERNSEC_CHROOT_MKNOD
52734 + select GRKERNSEC_CHROOT_CAPS
52735 + select GRKERNSEC_CHROOT_SYSCTL
52736 + select GRKERNSEC_CHROOT_FINDTASK
52737 + select GRKERNSEC_SYSFS_RESTRICT
52738 + select GRKERNSEC_PROC
52739 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
52740 + select GRKERNSEC_HIDESYM
52741 + select GRKERNSEC_BRUTE
52742 + select GRKERNSEC_PROC_USERGROUP
52743 + select GRKERNSEC_KMEM
52744 + select GRKERNSEC_RESLOG
52745 + select GRKERNSEC_RANDNET
52746 + select GRKERNSEC_PROC_ADD
52747 + select GRKERNSEC_CHROOT_CHMOD
52748 + select GRKERNSEC_CHROOT_NICE
52749 + select GRKERNSEC_AUDIT_MOUNT
52750 + select GRKERNSEC_MODHARDEN if (MODULES)
52751 + select GRKERNSEC_HARDEN_PTRACE
52752 + select GRKERNSEC_VM86 if (X86_32)
52753 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
52754 + select PAX
52755 + select PAX_RANDUSTACK
52756 + select PAX_ASLR
52757 + select PAX_RANDMMAP
52758 + select PAX_NOEXEC
52759 + select PAX_MPROTECT
52760 + select PAX_EI_PAX
52761 + select PAX_PT_PAX_FLAGS
52762 + select PAX_HAVE_ACL_FLAGS
52763 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
52764 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
52765 + select PAX_RANDKSTACK if (X86_TSC && X86)
52766 + select PAX_SEGMEXEC if (X86_32)
52767 + select PAX_PAGEEXEC
52768 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
52769 + select PAX_EMUTRAMP if (PARISC)
52770 + select PAX_EMUSIGRT if (PARISC)
52771 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
52772 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
52773 + select PAX_REFCOUNT if (X86 || SPARC64)
52774 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB))
52775 + help
52776 + If you say Y here, many of the features of grsecurity will be
52777 + enabled, which will protect you against many kinds of attacks
52778 + against your system. The heightened security comes at a cost
52779 + of an increased chance of incompatibilities with rare software
52780 + on your machine. Since this security level enables PaX, you should
52781 + view <http://pax.grsecurity.net> and read about the PaX
52782 + project. While you are there, download chpax and run it on
52783 + binaries that cause problems with PaX. Also remember that
52784 + since the /proc restrictions are enabled, you must run your
52785 + identd as gid 1001. This security level enables the following
52786 + features in addition to those listed in the low and medium
52787 + security levels:
52788 +
52789 + - Additional /proc restrictions
52790 + - Chmod restrictions in chroot
52791 + - No signals, ptrace, or viewing of processes outside of chroot
52792 + - Capability restrictions in chroot
52793 + - Deny fchdir out of chroot
52794 + - Priority restrictions in chroot
52795 + - Segmentation-based implementation of PaX
52796 + - Mprotect restrictions
52797 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
52798 + - Kernel stack randomization
52799 + - Mount/unmount/remount logging
52800 + - Kernel symbol hiding
52801 + - Prevention of memory exhaustion-based exploits
52802 + - Hardening of module auto-loading
52803 + - Ptrace restrictions
52804 + - Restricted vm86 mode
52805 + - Restricted sysfs/debugfs
52806 + - Active kernel exploit response
52807 +
52808 +config GRKERNSEC_CUSTOM
52809 + bool "Custom"
52810 + help
52811 + If you say Y here, you will be able to configure every grsecurity
52812 + option, which allows you to enable many more features that aren't
52813 + covered in the basic security levels. These additional features
52814 + include TPE, socket restrictions, and the sysctl system for
52815 + grsecurity. It is advised that you read through the help for
52816 + each option to determine its usefulness in your situation.
52817 +
52818 +endchoice
52819 +
52820 +menu "Address Space Protection"
52821 +depends on GRKERNSEC
52822 +
52823 +config GRKERNSEC_KMEM
52824 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
52825 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
52826 + help
52827 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
52828 + be written to via mmap or otherwise to modify the running kernel.
52829 + /dev/port will also not be allowed to be opened. If you have module
52830 + support disabled, enabling this will close up four ways that are
52831 + currently used to insert malicious code into the running kernel.
52832 + Even with all these features enabled, we still highly recommend that
52833 + you use the RBAC system, as it is still possible for an attacker to
52834 + modify the running kernel through privileged I/O granted by ioperm/iopl.
52835 + If you are not using XFree86, you may be able to stop this additional
52836 + case by enabling the 'Disable privileged I/O' option. Though nothing
52837 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
52838 + but only to video memory, which is the only writing we allow in this
52839 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
52840 + not be allowed to mprotect it with PROT_WRITE later.
52841 + It is highly recommended that you say Y here if you meet all the
52842 + conditions above.
52843 +
52844 +config GRKERNSEC_VM86
52845 + bool "Restrict VM86 mode"
52846 + depends on X86_32
52847 +
52848 + help
52849 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
52850 + make use of a special execution mode on 32bit x86 processors called
52851 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
52852 + video cards and will still work with this option enabled. The purpose
52853 + of the option is to prevent exploitation of emulation errors in
52854 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
52855 + Nearly all users should be able to enable this option.
52856 +
52857 +config GRKERNSEC_IO
52858 + bool "Disable privileged I/O"
52859 + depends on X86
52860 + select RTC_CLASS
52861 + select RTC_INTF_DEV
52862 + select RTC_DRV_CMOS
52863 +
52864 + help
52865 + If you say Y here, all ioperm and iopl calls will return an error.
52866 + Ioperm and iopl can be used to modify the running kernel.
52867 + Unfortunately, some programs need this access to operate properly,
52868 + the most notable of which are XFree86 and hwclock. hwclock can be
52869 + remedied by having RTC support in the kernel, so real-time
52870 + clock support is enabled if this option is enabled, to ensure
52871 + that hwclock operates correctly. XFree86 still will not
52872 + operate correctly with this option enabled, so DO NOT CHOOSE Y
52873 + IF YOU USE XFree86. If you use XFree86 and you still want to
52874 + protect your kernel against modification, use the RBAC system.
52875 +
52876 +config GRKERNSEC_PROC_MEMMAP
52877 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
52878 + default y if (PAX_NOEXEC || PAX_ASLR)
52879 + depends on PAX_NOEXEC || PAX_ASLR
52880 + help
52881 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
52882 + give no information about the addresses of its mappings if
52883 + PaX features that rely on random addresses are enabled on the task.
52884 + If you use PaX it is greatly recommended that you say Y here as it
52885 + closes up a hole that makes the full ASLR useless for suid
52886 + binaries.
52887 +
52888 +config GRKERNSEC_BRUTE
52889 + bool "Deter exploit bruteforcing"
52890 + help
52891 + If you say Y here, attempts to bruteforce exploits against forking
52892 + daemons such as apache or sshd, as well as against suid/sgid binaries
52893 + will be deterred. When a child of a forking daemon is killed by PaX
52894 + or crashes due to an illegal instruction or other suspicious signal,
52895 + the parent process will be delayed 30 seconds upon every subsequent
52896 + fork until the administrator is able to assess the situation and
52897 + restart the daemon.
52898 + In the suid/sgid case, the attempt is logged, the user has all their
52899 + processes terminated, and they are prevented from executing any further
52900 + processes for 15 minutes.
52901 + It is recommended that you also enable signal logging in the auditing
52902 + section so that logs are generated when a process triggers a suspicious
52903 + signal.
52904 + If the sysctl option is enabled, a sysctl option with name
52905 + "deter_bruteforce" is created.
52906 +
52907 +config GRKERNSEC_MODHARDEN
52908 + bool "Harden module auto-loading"
52909 + depends on MODULES
52910 + help
52911 + If you say Y here, module auto-loading in response to use of some
52912 + feature implemented by an unloaded module will be restricted to
52913 + root users. Enabling this option helps defend against attacks
52914 + by unprivileged users who abuse the auto-loading behavior to
52915 + cause a vulnerable module to load that is then exploited.
52916 +
52917 + If this option prevents a legitimate use of auto-loading for a
52918 + non-root user, the administrator can execute modprobe manually
52919 + with the exact name of the module mentioned in the alert log.
52920 + Alternatively, the administrator can add the module to the list
52921 + of modules loaded at boot by modifying init scripts.
52922 +
52923 + Modification of init scripts will most likely be needed on
52924 + Ubuntu servers with encrypted home directory support enabled,
52925 + as the first non-root user logging in will cause the ecb(aes),
52926 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
52927 +
52928 +config GRKERNSEC_HIDESYM
52929 + bool "Hide kernel symbols"
52930 + help
52931 + If you say Y here, getting information on loaded modules, and
52932 + displaying all kernel symbols through a syscall will be restricted
52933 + to users with CAP_SYS_MODULE. For software compatibility reasons,
52934 + /proc/kallsyms will be restricted to the root user. The RBAC
52935 + system can hide that entry even from root.
52936 +
52937 + This option also prevents leaking of kernel addresses through
52938 + several /proc entries.
52939 +
52940 + Note that this option is only effective provided the following
52941 + conditions are met:
52942 + 1) The kernel using grsecurity is not precompiled by some distribution
52943 + 2) You have also enabled GRKERNSEC_DMESG
52944 + 3) You are using the RBAC system and hiding other files such as your
52945 + kernel image and System.map. Alternatively, enabling this option
52946 + causes the permissions on /boot, /lib/modules, and the kernel
52947 + source directory to change at compile time to prevent
52948 + reading by non-root users.
52949 + If the above conditions are met, this option will aid in providing a
52950 + useful protection against local kernel exploitation of overflows
52951 + and arbitrary read/write vulnerabilities.
52952 +
52953 +config GRKERNSEC_KERN_LOCKOUT
52954 + bool "Active kernel exploit response"
52955 + depends on X86 || ARM || PPC || SPARC
52956 + help
52957 + If you say Y here, when a PaX alert is triggered due to suspicious
52958 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
52959 + or an OOPs occurs due to bad memory accesses, instead of just
52960 + terminating the offending process (and potentially allowing
52961 + a subsequent exploit from the same user), we will take one of two
52962 + actions:
52963 + If the user was root, we will panic the system
52964 + If the user was non-root, we will log the attempt, terminate
52965 + all processes owned by the user, then prevent them from creating
52966 + any new processes until the system is restarted
52967 + This deters repeated kernel exploitation/bruteforcing attempts
52968 + and is useful for later forensics.
52969 +
52970 +endmenu
52971 +menu "Role Based Access Control Options"
52972 +depends on GRKERNSEC
52973 +
52974 +config GRKERNSEC_RBAC_DEBUG
52975 + bool
52976 +
52977 +config GRKERNSEC_NO_RBAC
52978 + bool "Disable RBAC system"
52979 + help
52980 + If you say Y here, the /dev/grsec device will be removed from the kernel,
52981 + preventing the RBAC system from being enabled. You should only say Y
52982 + here if you have no intention of using the RBAC system, so as to prevent
52983 + an attacker with root access from misusing the RBAC system to hide files
52984 + and processes when loadable module support and /dev/[k]mem have been
52985 + locked down.
52986 +
52987 +config GRKERNSEC_ACL_HIDEKERN
52988 + bool "Hide kernel processes"
52989 + help
52990 + If you say Y here, all kernel threads will be hidden to all
52991 + processes but those whose subject has the "view hidden processes"
52992 + flag.
52993 +
52994 +config GRKERNSEC_ACL_MAXTRIES
52995 + int "Maximum tries before password lockout"
52996 + default 3
52997 + help
52998 + This option enforces the maximum number of times a user can attempt
52999 + to authorize themselves with the grsecurity RBAC system before being
53000 + denied the ability to attempt authorization again for a specified time.
53001 + The lower the number, the harder it will be to brute-force a password.
53002 +
53003 +config GRKERNSEC_ACL_TIMEOUT
53004 + int "Time to wait after max password tries, in seconds"
53005 + default 30
53006 + help
53007 + This option specifies the time the user must wait after attempting to
53008 + authorize to the RBAC system with the maximum number of invalid
53009 + passwords. The higher the number, the harder it will be to brute-force
53010 + a password.
53011 +
53012 +endmenu
53013 +menu "Filesystem Protections"
53014 +depends on GRKERNSEC
53015 +
53016 +config GRKERNSEC_PROC
53017 + bool "Proc restrictions"
53018 + help
53019 + If you say Y here, the permissions of the /proc filesystem
53020 + will be altered to enhance system security and privacy. You MUST
53021 + choose either a user only restriction or a user and group restriction.
53022 + Depending upon the option you choose, you can either restrict users to
53023 + see only the processes they themselves run, or choose a group that can
53024 + view all processes and files normally restricted to root if you choose
53025 + the "restrict to user only" option. NOTE: If you're running identd as
53026 + a non-root user, you will have to run it as the group you specify here.
53027 +
53028 +config GRKERNSEC_PROC_USER
53029 + bool "Restrict /proc to user only"
53030 + depends on GRKERNSEC_PROC
53031 + help
53032 + If you say Y here, non-root users will only be able to view their own
53033 + processes, and restricts them from viewing network-related information,
53034 + and viewing kernel symbol and module information.
53035 +
53036 +config GRKERNSEC_PROC_USERGROUP
53037 + bool "Allow special group"
53038 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
53039 + help
53040 + If you say Y here, you will be able to select a group that will be
53041 + able to view all processes and network-related information. If you've
53042 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
53043 + remain hidden. This option is useful if you want to run identd as
53044 + a non-root user.
53045 +
53046 +config GRKERNSEC_PROC_GID
53047 + int "GID for special group"
53048 + depends on GRKERNSEC_PROC_USERGROUP
53049 + default 1001
53050 +
53051 +config GRKERNSEC_PROC_ADD
53052 + bool "Additional restrictions"
53053 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
53054 + help
53055 + If you say Y here, additional restrictions will be placed on
53056 + /proc that keep normal users from viewing device information and
53057 + slabinfo information that could be useful for exploits.
53058 +
53059 +config GRKERNSEC_LINK
53060 + bool "Linking restrictions"
53061 + help
53062 + If you say Y here, /tmp race exploits will be prevented, since users
53063 + will no longer be able to follow symlinks owned by other users in
53064 + world-writable +t directories (e.g. /tmp), unless the owner of the
53065 + symlink is the owner of the directory. users will also not be
53066 + able to hardlink to files they do not own. If the sysctl option is
53067 + enabled, a sysctl option with name "linking_restrictions" is created.
53068 +
53069 +config GRKERNSEC_FIFO
53070 + bool "FIFO restrictions"
53071 + help
53072 + If you say Y here, users will not be able to write to FIFOs they don't
53073 + own in world-writable +t directories (e.g. /tmp), unless the owner of
53074 + the FIFO is the same owner of the directory it's held in. If the sysctl
53075 + option is enabled, a sysctl option with name "fifo_restrictions" is
53076 + created.
53077 +
53078 +config GRKERNSEC_SYSFS_RESTRICT
53079 + bool "Sysfs/debugfs restriction"
53080 + depends on SYSFS
53081 + help
53082 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
53083 + any filesystem normally mounted under it (e.g. debugfs) will only
53084 + be accessible by root. These filesystems generally provide access
53085 + to hardware and debug information that isn't appropriate for unprivileged
53086 + users of the system. Sysfs and debugfs have also become a large source
53087 + of new vulnerabilities, ranging from infoleaks to local compromise.
53088 + There has been very little oversight with an eye toward security involved
53089 + in adding new exporters of information to these filesystems, so their
53090 + use is discouraged.
53091 + This option is equivalent to a chmod 0700 of the mount paths.
53092 +
53093 +config GRKERNSEC_ROFS
53094 + bool "Runtime read-only mount protection"
53095 + help
53096 + If you say Y here, a sysctl option with name "romount_protect" will
53097 + be created. By setting this option to 1 at runtime, filesystems
53098 + will be protected in the following ways:
53099 + * No new writable mounts will be allowed
53100 + * Existing read-only mounts won't be able to be remounted read/write
53101 + * Write operations will be denied on all block devices
53102 + This option acts independently of grsec_lock: once it is set to 1,
53103 + it cannot be turned off. Therefore, please be mindful of the resulting
53104 + behavior if this option is enabled in an init script on a read-only
53105 + filesystem. This feature is mainly intended for secure embedded systems.
53106 +
53107 +config GRKERNSEC_CHROOT
53108 + bool "Chroot jail restrictions"
53109 + help
53110 + If you say Y here, you will be able to choose several options that will
53111 + make breaking out of a chrooted jail much more difficult. If you
53112 + encounter no software incompatibilities with the following options, it
53113 + is recommended that you enable each one.
53114 +
53115 +config GRKERNSEC_CHROOT_MOUNT
53116 + bool "Deny mounts"
53117 + depends on GRKERNSEC_CHROOT
53118 + help
53119 + If you say Y here, processes inside a chroot will not be able to
53120 + mount or remount filesystems. If the sysctl option is enabled, a
53121 + sysctl option with name "chroot_deny_mount" is created.
53122 +
53123 +config GRKERNSEC_CHROOT_DOUBLE
53124 + bool "Deny double-chroots"
53125 + depends on GRKERNSEC_CHROOT
53126 + help
53127 + If you say Y here, processes inside a chroot will not be able to chroot
53128 + again outside the chroot. This is a widely used method of breaking
53129 + out of a chroot jail and should not be allowed. If the sysctl
53130 + option is enabled, a sysctl option with name
53131 + "chroot_deny_chroot" is created.
53132 +
53133 +config GRKERNSEC_CHROOT_PIVOT
53134 + bool "Deny pivot_root in chroot"
53135 + depends on GRKERNSEC_CHROOT
53136 + help
53137 + If you say Y here, processes inside a chroot will not be able to use
53138 + a function called pivot_root() that was introduced in Linux 2.3.41. It
53139 + works similar to chroot in that it changes the root filesystem. This
53140 + function could be misused in a chrooted process to attempt to break out
53141 + of the chroot, and therefore should not be allowed. If the sysctl
53142 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
53143 + created.
53144 +
53145 +config GRKERNSEC_CHROOT_CHDIR
53146 + bool "Enforce chdir(\"/\") on all chroots"
53147 + depends on GRKERNSEC_CHROOT
53148 + help
53149 + If you say Y here, the current working directory of all newly-chrooted
53150 + applications will be set to the the root directory of the chroot.
53151 + The man page on chroot(2) states:
53152 + Note that this call does not change the current working
53153 + directory, so that `.' can be outside the tree rooted at
53154 + `/'. In particular, the super-user can escape from a
53155 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
53156 +
53157 + It is recommended that you say Y here, since it's not known to break
53158 + any software. If the sysctl option is enabled, a sysctl option with
53159 + name "chroot_enforce_chdir" is created.
53160 +
53161 +config GRKERNSEC_CHROOT_CHMOD
53162 + bool "Deny (f)chmod +s"
53163 + depends on GRKERNSEC_CHROOT
53164 + help
53165 + If you say Y here, processes inside a chroot will not be able to chmod
53166 + or fchmod files to make them have suid or sgid bits. This protects
53167 + against another published method of breaking a chroot. If the sysctl
53168 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
53169 + created.
53170 +
53171 +config GRKERNSEC_CHROOT_FCHDIR
53172 + bool "Deny fchdir out of chroot"
53173 + depends on GRKERNSEC_CHROOT
53174 + help
53175 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
53176 + to a file descriptor of the chrooting process that points to a directory
53177 + outside the filesystem will be stopped. If the sysctl option
53178 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
53179 +
53180 +config GRKERNSEC_CHROOT_MKNOD
53181 + bool "Deny mknod"
53182 + depends on GRKERNSEC_CHROOT
53183 + help
53184 + If you say Y here, processes inside a chroot will not be allowed to
53185 + mknod. The problem with using mknod inside a chroot is that it
53186 + would allow an attacker to create a device entry that is the same
53187 + as one on the physical root of your system, which could range from
53188 + anything from the console device to a device for your harddrive (which
53189 + they could then use to wipe the drive or steal data). It is recommended
53190 + that you say Y here, unless you run into software incompatibilities.
53191 + If the sysctl option is enabled, a sysctl option with name
53192 + "chroot_deny_mknod" is created.
53193 +
53194 +config GRKERNSEC_CHROOT_SHMAT
53195 + bool "Deny shmat() out of chroot"
53196 + depends on GRKERNSEC_CHROOT
53197 + help
53198 + If you say Y here, processes inside a chroot will not be able to attach
53199 + to shared memory segments that were created outside of the chroot jail.
53200 + It is recommended that you say Y here. If the sysctl option is enabled,
53201 + a sysctl option with name "chroot_deny_shmat" is created.
53202 +
53203 +config GRKERNSEC_CHROOT_UNIX
53204 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
53205 + depends on GRKERNSEC_CHROOT
53206 + help
53207 + If you say Y here, processes inside a chroot will not be able to
53208 + connect to abstract (meaning not belonging to a filesystem) Unix
53209 + domain sockets that were bound outside of a chroot. It is recommended
53210 + that you say Y here. If the sysctl option is enabled, a sysctl option
53211 + with name "chroot_deny_unix" is created.
53212 +
53213 +config GRKERNSEC_CHROOT_FINDTASK
53214 + bool "Protect outside processes"
53215 + depends on GRKERNSEC_CHROOT
53216 + help
53217 + If you say Y here, processes inside a chroot will not be able to
53218 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
53219 + getsid, or view any process outside of the chroot. If the sysctl
53220 + option is enabled, a sysctl option with name "chroot_findtask" is
53221 + created.
53222 +
53223 +config GRKERNSEC_CHROOT_NICE
53224 + bool "Restrict priority changes"
53225 + depends on GRKERNSEC_CHROOT
53226 + help
53227 + If you say Y here, processes inside a chroot will not be able to raise
53228 + the priority of processes in the chroot, or alter the priority of
53229 + processes outside the chroot. This provides more security than simply
53230 + removing CAP_SYS_NICE from the process' capability set. If the
53231 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
53232 + is created.
53233 +
53234 +config GRKERNSEC_CHROOT_SYSCTL
53235 + bool "Deny sysctl writes"
53236 + depends on GRKERNSEC_CHROOT
53237 + help
53238 + If you say Y here, an attacker in a chroot will not be able to
53239 + write to sysctl entries, either by sysctl(2) or through a /proc
53240 + interface. It is strongly recommended that you say Y here. If the
53241 + sysctl option is enabled, a sysctl option with name
53242 + "chroot_deny_sysctl" is created.
53243 +
53244 +config GRKERNSEC_CHROOT_CAPS
53245 + bool "Capability restrictions"
53246 + depends on GRKERNSEC_CHROOT
53247 + help
53248 + If you say Y here, the capabilities on all root processes within a
53249 + chroot jail will be lowered to stop module insertion, raw i/o,
53250 + system and net admin tasks, rebooting the system, modifying immutable
53251 + files, modifying IPC owned by another, and changing the system time.
53252 + This is left an option because it can break some apps. Disable this
53253 + if your chrooted apps are having problems performing those kinds of
53254 + tasks. If the sysctl option is enabled, a sysctl option with
53255 + name "chroot_caps" is created.
53256 +
53257 +endmenu
53258 +menu "Kernel Auditing"
53259 +depends on GRKERNSEC
53260 +
53261 +config GRKERNSEC_AUDIT_GROUP
53262 + bool "Single group for auditing"
53263 + help
53264 + If you say Y here, the exec, chdir, and (un)mount logging features
53265 + will only operate on a group you specify. This option is recommended
53266 + if you only want to watch certain users instead of having a large
53267 + amount of logs from the entire system. If the sysctl option is enabled,
53268 + a sysctl option with name "audit_group" is created.
53269 +
53270 +config GRKERNSEC_AUDIT_GID
53271 + int "GID for auditing"
53272 + depends on GRKERNSEC_AUDIT_GROUP
53273 + default 1007
53274 +
53275 +config GRKERNSEC_EXECLOG
53276 + bool "Exec logging"
53277 + help
53278 + If you say Y here, all execve() calls will be logged (since the
53279 + other exec*() calls are frontends to execve(), all execution
53280 + will be logged). Useful for shell-servers that like to keep track
53281 + of their users. If the sysctl option is enabled, a sysctl option with
53282 + name "exec_logging" is created.
53283 + WARNING: This option when enabled will produce a LOT of logs, especially
53284 + on an active system.
53285 +
53286 +config GRKERNSEC_RESLOG
53287 + bool "Resource logging"
53288 + help
53289 + If you say Y here, all attempts to overstep resource limits will
53290 + be logged with the resource name, the requested size, and the current
53291 + limit. It is highly recommended that you say Y here. If the sysctl
53292 + option is enabled, a sysctl option with name "resource_logging" is
53293 + created. If the RBAC system is enabled, the sysctl value is ignored.
53294 +
53295 +config GRKERNSEC_CHROOT_EXECLOG
53296 + bool "Log execs within chroot"
53297 + help
53298 + If you say Y here, all executions inside a chroot jail will be logged
53299 + to syslog. This can cause a large amount of logs if certain
53300 + applications (eg. djb's daemontools) are installed on the system, and
53301 + is therefore left as an option. If the sysctl option is enabled, a
53302 + sysctl option with name "chroot_execlog" is created.
53303 +
53304 +config GRKERNSEC_AUDIT_PTRACE
53305 + bool "Ptrace logging"
53306 + help
53307 + If you say Y here, all attempts to attach to a process via ptrace
53308 + will be logged. If the sysctl option is enabled, a sysctl option
53309 + with name "audit_ptrace" is created.
53310 +
53311 +config GRKERNSEC_AUDIT_CHDIR
53312 + bool "Chdir logging"
53313 + help
53314 + If you say Y here, all chdir() calls will be logged. If the sysctl
53315 + option is enabled, a sysctl option with name "audit_chdir" is created.
53316 +
53317 +config GRKERNSEC_AUDIT_MOUNT
53318 + bool "(Un)Mount logging"
53319 + help
53320 + If you say Y here, all mounts and unmounts will be logged. If the
53321 + sysctl option is enabled, a sysctl option with name "audit_mount" is
53322 + created.
53323 +
53324 +config GRKERNSEC_SIGNAL
53325 + bool "Signal logging"
53326 + help
53327 + If you say Y here, certain important signals will be logged, such as
53328 + SIGSEGV, which will as a result inform you of when a error in a program
53329 + occurred, which in some cases could mean a possible exploit attempt.
53330 + If the sysctl option is enabled, a sysctl option with name
53331 + "signal_logging" is created.
53332 +
53333 +config GRKERNSEC_FORKFAIL
53334 + bool "Fork failure logging"
53335 + help
53336 + If you say Y here, all failed fork() attempts will be logged.
53337 + This could suggest a fork bomb, or someone attempting to overstep
53338 + their process limit. If the sysctl option is enabled, a sysctl option
53339 + with name "forkfail_logging" is created.
53340 +
53341 +config GRKERNSEC_TIME
53342 + bool "Time change logging"
53343 + help
53344 + If you say Y here, any changes of the system clock will be logged.
53345 + If the sysctl option is enabled, a sysctl option with name
53346 + "timechange_logging" is created.
53347 +
53348 +config GRKERNSEC_PROC_IPADDR
53349 + bool "/proc/<pid>/ipaddr support"
53350 + help
53351 + If you say Y here, a new entry will be added to each /proc/<pid>
53352 + directory that contains the IP address of the person using the task.
53353 + The IP is carried across local TCP and AF_UNIX stream sockets.
53354 + This information can be useful for IDS/IPSes to perform remote response
53355 + to a local attack. The entry is readable by only the owner of the
53356 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
53357 + the RBAC system), and thus does not create privacy concerns.
53358 +
53359 +config GRKERNSEC_RWXMAP_LOG
53360 + bool 'Denied RWX mmap/mprotect logging'
53361 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
53362 + help
53363 + If you say Y here, calls to mmap() and mprotect() with explicit
53364 + usage of PROT_WRITE and PROT_EXEC together will be logged when
53365 + denied by the PAX_MPROTECT feature. If the sysctl option is
53366 + enabled, a sysctl option with name "rwxmap_logging" is created.
53367 +
53368 +config GRKERNSEC_AUDIT_TEXTREL
53369 + bool 'ELF text relocations logging (READ HELP)'
53370 + depends on PAX_MPROTECT
53371 + help
53372 + If you say Y here, text relocations will be logged with the filename
53373 + of the offending library or binary. The purpose of the feature is
53374 + to help Linux distribution developers get rid of libraries and
53375 + binaries that need text relocations which hinder the future progress
53376 + of PaX. Only Linux distribution developers should say Y here, and
53377 + never on a production machine, as this option creates an information
53378 + leak that could aid an attacker in defeating the randomization of
53379 + a single memory region. If the sysctl option is enabled, a sysctl
53380 + option with name "audit_textrel" is created.
53381 +
53382 +endmenu
53383 +
53384 +menu "Executable Protections"
53385 +depends on GRKERNSEC
53386 +
53387 +config GRKERNSEC_EXECVE
53388 + bool "Enforce RLIMIT_NPROC on execs"
53389 + help
53390 + If you say Y here, users with a resource limit on processes will
53391 + have the value checked during execve() calls. The current system
53392 + only checks the system limit during fork() calls. If the sysctl option
53393 + is enabled, a sysctl option with name "execve_limiting" is created.
53394 +
53395 +config GRKERNSEC_DMESG
53396 + bool "Dmesg(8) restriction"
53397 + help
53398 + If you say Y here, non-root users will not be able to use dmesg(8)
53399 + to view up to the last 4kb of messages in the kernel's log buffer.
53400 + The kernel's log buffer often contains kernel addresses and other
53401 + identifying information useful to an attacker in fingerprinting a
53402 + system for a targeted exploit.
53403 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
53404 + created.
53405 +
53406 +config GRKERNSEC_HARDEN_PTRACE
53407 + bool "Deter ptrace-based process snooping"
53408 + help
53409 + If you say Y here, TTY sniffers and other malicious monitoring
53410 + programs implemented through ptrace will be defeated. If you
53411 + have been using the RBAC system, this option has already been
53412 + enabled for several years for all users, with the ability to make
53413 + fine-grained exceptions.
53414 +
53415 + This option only affects the ability of non-root users to ptrace
53416 + processes that are not a descendent of the ptracing process.
53417 + This means that strace ./binary and gdb ./binary will still work,
53418 + but attaching to arbitrary processes will not. If the sysctl
53419 + option is enabled, a sysctl option with name "harden_ptrace" is
53420 + created.
53421 +
53422 +config GRKERNSEC_TPE
53423 + bool "Trusted Path Execution (TPE)"
53424 + help
53425 + If you say Y here, you will be able to choose a gid to add to the
53426 + supplementary groups of users you want to mark as "untrusted."
53427 + These users will not be able to execute any files that are not in
53428 + root-owned directories writable only by root. If the sysctl option
53429 + is enabled, a sysctl option with name "tpe" is created.
53430 +
53431 +config GRKERNSEC_TPE_ALL
53432 + bool "Partially restrict all non-root users"
53433 + depends on GRKERNSEC_TPE
53434 + help
53435 + If you say Y here, all non-root users will be covered under
53436 + a weaker TPE restriction. This is separate from, and in addition to,
53437 + the main TPE options that you have selected elsewhere. Thus, if a
53438 + "trusted" GID is chosen, this restriction applies to even that GID.
53439 + Under this restriction, all non-root users will only be allowed to
53440 + execute files in directories they own that are not group or
53441 + world-writable, or in directories owned by root and writable only by
53442 + root. If the sysctl option is enabled, a sysctl option with name
53443 + "tpe_restrict_all" is created.
53444 +
53445 +config GRKERNSEC_TPE_INVERT
53446 + bool "Invert GID option"
53447 + depends on GRKERNSEC_TPE
53448 + help
53449 + If you say Y here, the group you specify in the TPE configuration will
53450 + decide what group TPE restrictions will be *disabled* for. This
53451 + option is useful if you want TPE restrictions to be applied to most
53452 + users on the system. If the sysctl option is enabled, a sysctl option
53453 + with name "tpe_invert" is created. Unlike other sysctl options, this
53454 + entry will default to on for backward-compatibility.
53455 +
53456 +config GRKERNSEC_TPE_GID
53457 + int "GID for untrusted users"
53458 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
53459 + default 1005
53460 + help
53461 + Setting this GID determines what group TPE restrictions will be
53462 + *enabled* for. If the sysctl option is enabled, a sysctl option
53463 + with name "tpe_gid" is created.
53464 +
53465 +config GRKERNSEC_TPE_GID
53466 + int "GID for trusted users"
53467 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
53468 + default 1005
53469 + help
53470 + Setting this GID determines what group TPE restrictions will be
53471 + *disabled* for. If the sysctl option is enabled, a sysctl option
53472 + with name "tpe_gid" is created.
53473 +
53474 +endmenu
53475 +menu "Network Protections"
53476 +depends on GRKERNSEC
53477 +
53478 +config GRKERNSEC_RANDNET
53479 + bool "Larger entropy pools"
53480 + help
53481 + If you say Y here, the entropy pools used for many features of Linux
53482 + and grsecurity will be doubled in size. Since several grsecurity
53483 + features use additional randomness, it is recommended that you say Y
53484 + here. Saying Y here has a similar effect as modifying
53485 + /proc/sys/kernel/random/poolsize.
53486 +
53487 +config GRKERNSEC_BLACKHOLE
53488 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
53489 + help
53490 + If you say Y here, neither TCP resets nor ICMP
53491 + destination-unreachable packets will be sent in response to packets
53492 + sent to ports for which no associated listening process exists.
53493 + This feature supports both IPV4 and IPV6 and exempts the
53494 + loopback interface from blackholing. Enabling this feature
53495 + makes a host more resilient to DoS attacks and reduces network
53496 + visibility against scanners.
53497 +
53498 + The blackhole feature as-implemented is equivalent to the FreeBSD
53499 + blackhole feature, as it prevents RST responses to all packets, not
53500 + just SYNs. Under most application behavior this causes no
53501 + problems, but applications (like haproxy) may not close certain
53502 + connections in a way that cleanly terminates them on the remote
53503 + end, leaving the remote host in LAST_ACK state. Because of this
53504 + side-effect and to prevent intentional LAST_ACK DoSes, this
53505 + feature also adds automatic mitigation against such attacks.
53506 + The mitigation drastically reduces the amount of time a socket
53507 + can spend in LAST_ACK state. If you're using haproxy and not
53508 + all servers it connects to have this option enabled, consider
53509 + disabling this feature on the haproxy host.
53510 +
53511 + If the sysctl option is enabled, two sysctl options with names
53512 + "ip_blackhole" and "lastack_retries" will be created.
53513 + While "ip_blackhole" takes the standard zero/non-zero on/off
53514 + toggle, "lastack_retries" uses the same kinds of values as
53515 + "tcp_retries1" and "tcp_retries2". The default value of 4
53516 + prevents a socket from lasting more than 45 seconds in LAST_ACK
53517 + state.
53518 +
53519 +config GRKERNSEC_SOCKET
53520 + bool "Socket restrictions"
53521 + help
53522 + If you say Y here, you will be able to choose from several options.
53523 + If you assign a GID on your system and add it to the supplementary
53524 + groups of users you want to restrict socket access to, this patch
53525 + will perform up to three things, based on the option(s) you choose.
53526 +
53527 +config GRKERNSEC_SOCKET_ALL
53528 + bool "Deny any sockets to group"
53529 + depends on GRKERNSEC_SOCKET
53530 + help
53531 + If you say Y here, you will be able to choose a GID of whose users will
53532 + be unable to connect to other hosts from your machine or run server
53533 + applications from your machine. If the sysctl option is enabled, a
53534 + sysctl option with name "socket_all" is created.
53535 +
53536 +config GRKERNSEC_SOCKET_ALL_GID
53537 + int "GID to deny all sockets for"
53538 + depends on GRKERNSEC_SOCKET_ALL
53539 + default 1004
53540 + help
53541 + Here you can choose the GID to disable socket access for. Remember to
53542 + add the users you want socket access disabled for to the GID
53543 + specified here. If the sysctl option is enabled, a sysctl option
53544 + with name "socket_all_gid" is created.
53545 +
53546 +config GRKERNSEC_SOCKET_CLIENT
53547 + bool "Deny client sockets to group"
53548 + depends on GRKERNSEC_SOCKET
53549 + help
53550 + If you say Y here, you will be able to choose a GID of whose users will
53551 + be unable to connect to other hosts from your machine, but will be
53552 + able to run servers. If this option is enabled, all users in the group
53553 + you specify will have to use passive mode when initiating ftp transfers
53554 + from the shell on your machine. If the sysctl option is enabled, a
53555 + sysctl option with name "socket_client" is created.
53556 +
53557 +config GRKERNSEC_SOCKET_CLIENT_GID
53558 + int "GID to deny client sockets for"
53559 + depends on GRKERNSEC_SOCKET_CLIENT
53560 + default 1003
53561 + help
53562 + Here you can choose the GID to disable client socket access for.
53563 + Remember to add the users you want client socket access disabled for to
53564 + the GID specified here. If the sysctl option is enabled, a sysctl
53565 + option with name "socket_client_gid" is created.
53566 +
53567 +config GRKERNSEC_SOCKET_SERVER
53568 + bool "Deny server sockets to group"
53569 + depends on GRKERNSEC_SOCKET
53570 + help
53571 + If you say Y here, you will be able to choose a GID of whose users will
53572 + be unable to run server applications from your machine. If the sysctl
53573 + option is enabled, a sysctl option with name "socket_server" is created.
53574 +
53575 +config GRKERNSEC_SOCKET_SERVER_GID
53576 + int "GID to deny server sockets for"
53577 + depends on GRKERNSEC_SOCKET_SERVER
53578 + default 1002
53579 + help
53580 + Here you can choose the GID to disable server socket access for.
53581 + Remember to add the users you want server socket access disabled for to
53582 + the GID specified here. If the sysctl option is enabled, a sysctl
53583 + option with name "socket_server_gid" is created.
53584 +
53585 +endmenu
53586 +menu "Sysctl support"
53587 +depends on GRKERNSEC && SYSCTL
53588 +
53589 +config GRKERNSEC_SYSCTL
53590 + bool "Sysctl support"
53591 + help
53592 + If you say Y here, you will be able to change the options that
53593 + grsecurity runs with at bootup, without having to recompile your
53594 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
53595 + to enable (1) or disable (0) various features. All the sysctl entries
53596 + are mutable until the "grsec_lock" entry is set to a non-zero value.
53597 + All features enabled in the kernel configuration are disabled at boot
53598 + if you do not say Y to the "Turn on features by default" option.
53599 + All options should be set at startup, and the grsec_lock entry should
53600 + be set to a non-zero value after all the options are set.
53601 + *THIS IS EXTREMELY IMPORTANT*
53602 +
53603 +config GRKERNSEC_SYSCTL_DISTRO
53604 + bool "Extra sysctl support for distro makers (READ HELP)"
53605 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
53606 + help
53607 + If you say Y here, additional sysctl options will be created
53608 + for features that affect processes running as root. Therefore,
53609 + it is critical when using this option that the grsec_lock entry be
53610 + enabled after boot. Only distros with prebuilt kernel packages
53611 + with this option enabled that can ensure grsec_lock is enabled
53612 + after boot should use this option.
53613 + *Failure to set grsec_lock after boot makes all grsec features
53614 + this option covers useless*
53615 +
53616 + Currently this option creates the following sysctl entries:
53617 + "Disable Privileged I/O": "disable_priv_io"
53618 +
53619 +config GRKERNSEC_SYSCTL_ON
53620 + bool "Turn on features by default"
53621 + depends on GRKERNSEC_SYSCTL
53622 + help
53623 + If you say Y here, instead of having all features enabled in the
53624 + kernel configuration disabled at boot time, the features will be
53625 + enabled at boot time. It is recommended you say Y here unless
53626 + there is some reason you would want all sysctl-tunable features to
53627 + be disabled by default. As mentioned elsewhere, it is important
53628 + to enable the grsec_lock entry once you have finished modifying
53629 + the sysctl entries.
53630 +
53631 +endmenu
53632 +menu "Logging Options"
53633 +depends on GRKERNSEC
53634 +
53635 +config GRKERNSEC_FLOODTIME
53636 + int "Seconds in between log messages (minimum)"
53637 + default 10
53638 + help
53639 + This option allows you to enforce the number of seconds between
53640 + grsecurity log messages. The default should be suitable for most
53641 + people, however, if you choose to change it, choose a value small enough
53642 + to allow informative logs to be produced, but large enough to
53643 + prevent flooding.
53644 +
53645 +config GRKERNSEC_FLOODBURST
53646 + int "Number of messages in a burst (maximum)"
53647 + default 4
53648 + help
53649 + This option allows you to choose the maximum number of messages allowed
53650 + within the flood time interval you chose in a separate option. The
53651 + default should be suitable for most people, however if you find that
53652 + many of your logs are being interpreted as flooding, you may want to
53653 + raise this value.
53654 +
53655 +endmenu
53656 +
53657 +endmenu
53658 diff -urNp linux-2.6.32.42/grsecurity/Makefile linux-2.6.32.42/grsecurity/Makefile
53659 --- linux-2.6.32.42/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
53660 +++ linux-2.6.32.42/grsecurity/Makefile 2011-05-24 20:27:46.000000000 -0400
53661 @@ -0,0 +1,33 @@
53662 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
53663 +# during 2001-2009 it has been completely redesigned by Brad Spengler
53664 +# into an RBAC system
53665 +#
53666 +# All code in this directory and various hooks inserted throughout the kernel
53667 +# are copyright Brad Spengler - Open Source Security, Inc., and released
53668 +# under the GPL v2 or higher
53669 +
53670 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
53671 + grsec_mount.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
53672 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
53673 +
53674 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
53675 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
53676 + gracl_learn.o grsec_log.o
53677 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
53678 +
53679 +ifdef CONFIG_NET
53680 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
53681 +endif
53682 +
53683 +ifndef CONFIG_GRKERNSEC
53684 +obj-y += grsec_disabled.o
53685 +endif
53686 +
53687 +ifdef CONFIG_GRKERNSEC_HIDESYM
53688 +extra-y := grsec_hidesym.o
53689 +$(obj)/grsec_hidesym.o:
53690 + @-chmod -f 500 /boot
53691 + @-chmod -f 500 /lib/modules
53692 + @-chmod -f 700 .
53693 + @echo ' grsec: protected kernel image paths'
53694 +endif
53695 diff -urNp linux-2.6.32.42/include/acpi/acpi_drivers.h linux-2.6.32.42/include/acpi/acpi_drivers.h
53696 --- linux-2.6.32.42/include/acpi/acpi_drivers.h 2011-03-27 14:31:47.000000000 -0400
53697 +++ linux-2.6.32.42/include/acpi/acpi_drivers.h 2011-04-17 15:56:46.000000000 -0400
53698 @@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acp
53699 Dock Station
53700 -------------------------------------------------------------------------- */
53701 struct acpi_dock_ops {
53702 - acpi_notify_handler handler;
53703 - acpi_notify_handler uevent;
53704 + const acpi_notify_handler handler;
53705 + const acpi_notify_handler uevent;
53706 };
53707
53708 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
53709 @@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
53710 extern int register_dock_notifier(struct notifier_block *nb);
53711 extern void unregister_dock_notifier(struct notifier_block *nb);
53712 extern int register_hotplug_dock_device(acpi_handle handle,
53713 - struct acpi_dock_ops *ops,
53714 + const struct acpi_dock_ops *ops,
53715 void *context);
53716 extern void unregister_hotplug_dock_device(acpi_handle handle);
53717 #else
53718 @@ -144,7 +144,7 @@ static inline void unregister_dock_notif
53719 {
53720 }
53721 static inline int register_hotplug_dock_device(acpi_handle handle,
53722 - struct acpi_dock_ops *ops,
53723 + const struct acpi_dock_ops *ops,
53724 void *context)
53725 {
53726 return -ENODEV;
53727 diff -urNp linux-2.6.32.42/include/asm-generic/atomic-long.h linux-2.6.32.42/include/asm-generic/atomic-long.h
53728 --- linux-2.6.32.42/include/asm-generic/atomic-long.h 2011-03-27 14:31:47.000000000 -0400
53729 +++ linux-2.6.32.42/include/asm-generic/atomic-long.h 2011-05-16 21:46:57.000000000 -0400
53730 @@ -22,6 +22,12 @@
53731
53732 typedef atomic64_t atomic_long_t;
53733
53734 +#ifdef CONFIG_PAX_REFCOUNT
53735 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
53736 +#else
53737 +typedef atomic64_t atomic_long_unchecked_t;
53738 +#endif
53739 +
53740 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
53741
53742 static inline long atomic_long_read(atomic_long_t *l)
53743 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
53744 return (long)atomic64_read(v);
53745 }
53746
53747 +#ifdef CONFIG_PAX_REFCOUNT
53748 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
53749 +{
53750 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53751 +
53752 + return (long)atomic64_read_unchecked(v);
53753 +}
53754 +#endif
53755 +
53756 static inline void atomic_long_set(atomic_long_t *l, long i)
53757 {
53758 atomic64_t *v = (atomic64_t *)l;
53759 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
53760 atomic64_set(v, i);
53761 }
53762
53763 +#ifdef CONFIG_PAX_REFCOUNT
53764 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
53765 +{
53766 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53767 +
53768 + atomic64_set_unchecked(v, i);
53769 +}
53770 +#endif
53771 +
53772 static inline void atomic_long_inc(atomic_long_t *l)
53773 {
53774 atomic64_t *v = (atomic64_t *)l;
53775 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
53776 atomic64_inc(v);
53777 }
53778
53779 +#ifdef CONFIG_PAX_REFCOUNT
53780 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
53781 +{
53782 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53783 +
53784 + atomic64_inc_unchecked(v);
53785 +}
53786 +#endif
53787 +
53788 static inline void atomic_long_dec(atomic_long_t *l)
53789 {
53790 atomic64_t *v = (atomic64_t *)l;
53791 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
53792 atomic64_dec(v);
53793 }
53794
53795 +#ifdef CONFIG_PAX_REFCOUNT
53796 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
53797 +{
53798 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53799 +
53800 + atomic64_dec_unchecked(v);
53801 +}
53802 +#endif
53803 +
53804 static inline void atomic_long_add(long i, atomic_long_t *l)
53805 {
53806 atomic64_t *v = (atomic64_t *)l;
53807 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
53808 atomic64_add(i, v);
53809 }
53810
53811 +#ifdef CONFIG_PAX_REFCOUNT
53812 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
53813 +{
53814 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53815 +
53816 + atomic64_add_unchecked(i, v);
53817 +}
53818 +#endif
53819 +
53820 static inline void atomic_long_sub(long i, atomic_long_t *l)
53821 {
53822 atomic64_t *v = (atomic64_t *)l;
53823 @@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
53824 return (long)atomic64_inc_return(v);
53825 }
53826
53827 +#ifdef CONFIG_PAX_REFCOUNT
53828 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
53829 +{
53830 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53831 +
53832 + return (long)atomic64_inc_return_unchecked(v);
53833 +}
53834 +#endif
53835 +
53836 static inline long atomic_long_dec_return(atomic_long_t *l)
53837 {
53838 atomic64_t *v = (atomic64_t *)l;
53839 @@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
53840
53841 typedef atomic_t atomic_long_t;
53842
53843 +#ifdef CONFIG_PAX_REFCOUNT
53844 +typedef atomic_unchecked_t atomic_long_unchecked_t;
53845 +#else
53846 +typedef atomic_t atomic_long_unchecked_t;
53847 +#endif
53848 +
53849 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
53850 static inline long atomic_long_read(atomic_long_t *l)
53851 {
53852 @@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
53853 return (long)atomic_read(v);
53854 }
53855
53856 +#ifdef CONFIG_PAX_REFCOUNT
53857 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
53858 +{
53859 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53860 +
53861 + return (long)atomic_read_unchecked(v);
53862 +}
53863 +#endif
53864 +
53865 static inline void atomic_long_set(atomic_long_t *l, long i)
53866 {
53867 atomic_t *v = (atomic_t *)l;
53868 @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
53869 atomic_set(v, i);
53870 }
53871
53872 +#ifdef CONFIG_PAX_REFCOUNT
53873 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
53874 +{
53875 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53876 +
53877 + atomic_set_unchecked(v, i);
53878 +}
53879 +#endif
53880 +
53881 static inline void atomic_long_inc(atomic_long_t *l)
53882 {
53883 atomic_t *v = (atomic_t *)l;
53884 @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
53885 atomic_inc(v);
53886 }
53887
53888 +#ifdef CONFIG_PAX_REFCOUNT
53889 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
53890 +{
53891 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53892 +
53893 + atomic_inc_unchecked(v);
53894 +}
53895 +#endif
53896 +
53897 static inline void atomic_long_dec(atomic_long_t *l)
53898 {
53899 atomic_t *v = (atomic_t *)l;
53900 @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
53901 atomic_dec(v);
53902 }
53903
53904 +#ifdef CONFIG_PAX_REFCOUNT
53905 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
53906 +{
53907 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53908 +
53909 + atomic_dec_unchecked(v);
53910 +}
53911 +#endif
53912 +
53913 static inline void atomic_long_add(long i, atomic_long_t *l)
53914 {
53915 atomic_t *v = (atomic_t *)l;
53916 @@ -176,6 +278,15 @@ static inline void atomic_long_add(long
53917 atomic_add(i, v);
53918 }
53919
53920 +#ifdef CONFIG_PAX_REFCOUNT
53921 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
53922 +{
53923 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53924 +
53925 + atomic_add_unchecked(i, v);
53926 +}
53927 +#endif
53928 +
53929 static inline void atomic_long_sub(long i, atomic_long_t *l)
53930 {
53931 atomic_t *v = (atomic_t *)l;
53932 @@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
53933 return (long)atomic_inc_return(v);
53934 }
53935
53936 +#ifdef CONFIG_PAX_REFCOUNT
53937 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
53938 +{
53939 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53940 +
53941 + return (long)atomic_inc_return_unchecked(v);
53942 +}
53943 +#endif
53944 +
53945 static inline long atomic_long_dec_return(atomic_long_t *l)
53946 {
53947 atomic_t *v = (atomic_t *)l;
53948 @@ -255,4 +375,47 @@ static inline long atomic_long_add_unles
53949
53950 #endif /* BITS_PER_LONG == 64 */
53951
53952 +#ifdef CONFIG_PAX_REFCOUNT
53953 +static inline void pax_refcount_needs_these_functions(void)
53954 +{
53955 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
53956 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
53957 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
53958 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
53959 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
53960 + atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
53961 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
53962 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
53963 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
53964 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
53965 + atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
53966 +
53967 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
53968 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
53969 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
53970 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
53971 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
53972 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
53973 +}
53974 +#else
53975 +#define atomic_read_unchecked(v) atomic_read(v)
53976 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
53977 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
53978 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
53979 +#define atomic_inc_unchecked(v) atomic_inc(v)
53980 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
53981 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
53982 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
53983 +#define atomic_dec_unchecked(v) atomic_dec(v)
53984 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
53985 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
53986 +
53987 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
53988 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
53989 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
53990 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
53991 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
53992 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
53993 +#endif
53994 +
53995 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
53996 diff -urNp linux-2.6.32.42/include/asm-generic/cache.h linux-2.6.32.42/include/asm-generic/cache.h
53997 --- linux-2.6.32.42/include/asm-generic/cache.h 2011-03-27 14:31:47.000000000 -0400
53998 +++ linux-2.6.32.42/include/asm-generic/cache.h 2011-05-04 17:56:28.000000000 -0400
53999 @@ -6,7 +6,7 @@
54000 * cache lines need to provide their own cache.h.
54001 */
54002
54003 -#define L1_CACHE_SHIFT 5
54004 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
54005 +#define L1_CACHE_SHIFT 5U
54006 +#define L1_CACHE_BYTES (1U << L1_CACHE_SHIFT)
54007
54008 #endif /* __ASM_GENERIC_CACHE_H */
54009 diff -urNp linux-2.6.32.42/include/asm-generic/dma-mapping-common.h linux-2.6.32.42/include/asm-generic/dma-mapping-common.h
54010 --- linux-2.6.32.42/include/asm-generic/dma-mapping-common.h 2011-03-27 14:31:47.000000000 -0400
54011 +++ linux-2.6.32.42/include/asm-generic/dma-mapping-common.h 2011-04-17 15:56:46.000000000 -0400
54012 @@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
54013 enum dma_data_direction dir,
54014 struct dma_attrs *attrs)
54015 {
54016 - struct dma_map_ops *ops = get_dma_ops(dev);
54017 + const struct dma_map_ops *ops = get_dma_ops(dev);
54018 dma_addr_t addr;
54019
54020 kmemcheck_mark_initialized(ptr, size);
54021 @@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
54022 enum dma_data_direction dir,
54023 struct dma_attrs *attrs)
54024 {
54025 - struct dma_map_ops *ops = get_dma_ops(dev);
54026 + const struct dma_map_ops *ops = get_dma_ops(dev);
54027
54028 BUG_ON(!valid_dma_direction(dir));
54029 if (ops->unmap_page)
54030 @@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
54031 int nents, enum dma_data_direction dir,
54032 struct dma_attrs *attrs)
54033 {
54034 - struct dma_map_ops *ops = get_dma_ops(dev);
54035 + const struct dma_map_ops *ops = get_dma_ops(dev);
54036 int i, ents;
54037 struct scatterlist *s;
54038
54039 @@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
54040 int nents, enum dma_data_direction dir,
54041 struct dma_attrs *attrs)
54042 {
54043 - struct dma_map_ops *ops = get_dma_ops(dev);
54044 + const struct dma_map_ops *ops = get_dma_ops(dev);
54045
54046 BUG_ON(!valid_dma_direction(dir));
54047 debug_dma_unmap_sg(dev, sg, nents, dir);
54048 @@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
54049 size_t offset, size_t size,
54050 enum dma_data_direction dir)
54051 {
54052 - struct dma_map_ops *ops = get_dma_ops(dev);
54053 + const struct dma_map_ops *ops = get_dma_ops(dev);
54054 dma_addr_t addr;
54055
54056 kmemcheck_mark_initialized(page_address(page) + offset, size);
54057 @@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
54058 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
54059 size_t size, enum dma_data_direction dir)
54060 {
54061 - struct dma_map_ops *ops = get_dma_ops(dev);
54062 + const struct dma_map_ops *ops = get_dma_ops(dev);
54063
54064 BUG_ON(!valid_dma_direction(dir));
54065 if (ops->unmap_page)
54066 @@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
54067 size_t size,
54068 enum dma_data_direction dir)
54069 {
54070 - struct dma_map_ops *ops = get_dma_ops(dev);
54071 + const struct dma_map_ops *ops = get_dma_ops(dev);
54072
54073 BUG_ON(!valid_dma_direction(dir));
54074 if (ops->sync_single_for_cpu)
54075 @@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
54076 dma_addr_t addr, size_t size,
54077 enum dma_data_direction dir)
54078 {
54079 - struct dma_map_ops *ops = get_dma_ops(dev);
54080 + const struct dma_map_ops *ops = get_dma_ops(dev);
54081
54082 BUG_ON(!valid_dma_direction(dir));
54083 if (ops->sync_single_for_device)
54084 @@ -123,7 +123,7 @@ static inline void dma_sync_single_range
54085 size_t size,
54086 enum dma_data_direction dir)
54087 {
54088 - struct dma_map_ops *ops = get_dma_ops(dev);
54089 + const struct dma_map_ops *ops = get_dma_ops(dev);
54090
54091 BUG_ON(!valid_dma_direction(dir));
54092 if (ops->sync_single_range_for_cpu) {
54093 @@ -140,7 +140,7 @@ static inline void dma_sync_single_range
54094 size_t size,
54095 enum dma_data_direction dir)
54096 {
54097 - struct dma_map_ops *ops = get_dma_ops(dev);
54098 + const struct dma_map_ops *ops = get_dma_ops(dev);
54099
54100 BUG_ON(!valid_dma_direction(dir));
54101 if (ops->sync_single_range_for_device) {
54102 @@ -155,7 +155,7 @@ static inline void
54103 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
54104 int nelems, enum dma_data_direction dir)
54105 {
54106 - struct dma_map_ops *ops = get_dma_ops(dev);
54107 + const struct dma_map_ops *ops = get_dma_ops(dev);
54108
54109 BUG_ON(!valid_dma_direction(dir));
54110 if (ops->sync_sg_for_cpu)
54111 @@ -167,7 +167,7 @@ static inline void
54112 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
54113 int nelems, enum dma_data_direction dir)
54114 {
54115 - struct dma_map_ops *ops = get_dma_ops(dev);
54116 + const struct dma_map_ops *ops = get_dma_ops(dev);
54117
54118 BUG_ON(!valid_dma_direction(dir));
54119 if (ops->sync_sg_for_device)
54120 diff -urNp linux-2.6.32.42/include/asm-generic/futex.h linux-2.6.32.42/include/asm-generic/futex.h
54121 --- linux-2.6.32.42/include/asm-generic/futex.h 2011-03-27 14:31:47.000000000 -0400
54122 +++ linux-2.6.32.42/include/asm-generic/futex.h 2011-04-17 15:56:46.000000000 -0400
54123 @@ -6,7 +6,7 @@
54124 #include <asm/errno.h>
54125
54126 static inline int
54127 -futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
54128 +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
54129 {
54130 int op = (encoded_op >> 28) & 7;
54131 int cmp = (encoded_op >> 24) & 15;
54132 @@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
54133 }
54134
54135 static inline int
54136 -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
54137 +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
54138 {
54139 return -ENOSYS;
54140 }
54141 diff -urNp linux-2.6.32.42/include/asm-generic/int-l64.h linux-2.6.32.42/include/asm-generic/int-l64.h
54142 --- linux-2.6.32.42/include/asm-generic/int-l64.h 2011-03-27 14:31:47.000000000 -0400
54143 +++ linux-2.6.32.42/include/asm-generic/int-l64.h 2011-04-17 15:56:46.000000000 -0400
54144 @@ -46,6 +46,8 @@ typedef unsigned int u32;
54145 typedef signed long s64;
54146 typedef unsigned long u64;
54147
54148 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
54149 +
54150 #define S8_C(x) x
54151 #define U8_C(x) x ## U
54152 #define S16_C(x) x
54153 diff -urNp linux-2.6.32.42/include/asm-generic/int-ll64.h linux-2.6.32.42/include/asm-generic/int-ll64.h
54154 --- linux-2.6.32.42/include/asm-generic/int-ll64.h 2011-03-27 14:31:47.000000000 -0400
54155 +++ linux-2.6.32.42/include/asm-generic/int-ll64.h 2011-04-17 15:56:46.000000000 -0400
54156 @@ -51,6 +51,8 @@ typedef unsigned int u32;
54157 typedef signed long long s64;
54158 typedef unsigned long long u64;
54159
54160 +typedef unsigned long long intoverflow_t;
54161 +
54162 #define S8_C(x) x
54163 #define U8_C(x) x ## U
54164 #define S16_C(x) x
54165 diff -urNp linux-2.6.32.42/include/asm-generic/kmap_types.h linux-2.6.32.42/include/asm-generic/kmap_types.h
54166 --- linux-2.6.32.42/include/asm-generic/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
54167 +++ linux-2.6.32.42/include/asm-generic/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
54168 @@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
54169 KMAP_D(16) KM_IRQ_PTE,
54170 KMAP_D(17) KM_NMI,
54171 KMAP_D(18) KM_NMI_PTE,
54172 -KMAP_D(19) KM_TYPE_NR
54173 +KMAP_D(19) KM_CLEARPAGE,
54174 +KMAP_D(20) KM_TYPE_NR
54175 };
54176
54177 #undef KMAP_D
54178 diff -urNp linux-2.6.32.42/include/asm-generic/pgtable.h linux-2.6.32.42/include/asm-generic/pgtable.h
54179 --- linux-2.6.32.42/include/asm-generic/pgtable.h 2011-03-27 14:31:47.000000000 -0400
54180 +++ linux-2.6.32.42/include/asm-generic/pgtable.h 2011-04-17 15:56:46.000000000 -0400
54181 @@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
54182 unsigned long size);
54183 #endif
54184
54185 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
54186 +static inline unsigned long pax_open_kernel(void) { return 0; }
54187 +#endif
54188 +
54189 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
54190 +static inline unsigned long pax_close_kernel(void) { return 0; }
54191 +#endif
54192 +
54193 #endif /* !__ASSEMBLY__ */
54194
54195 #endif /* _ASM_GENERIC_PGTABLE_H */
54196 diff -urNp linux-2.6.32.42/include/asm-generic/pgtable-nopmd.h linux-2.6.32.42/include/asm-generic/pgtable-nopmd.h
54197 --- linux-2.6.32.42/include/asm-generic/pgtable-nopmd.h 2011-03-27 14:31:47.000000000 -0400
54198 +++ linux-2.6.32.42/include/asm-generic/pgtable-nopmd.h 2011-04-17 15:56:46.000000000 -0400
54199 @@ -1,14 +1,19 @@
54200 #ifndef _PGTABLE_NOPMD_H
54201 #define _PGTABLE_NOPMD_H
54202
54203 -#ifndef __ASSEMBLY__
54204 -
54205 #include <asm-generic/pgtable-nopud.h>
54206
54207 -struct mm_struct;
54208 -
54209 #define __PAGETABLE_PMD_FOLDED
54210
54211 +#define PMD_SHIFT PUD_SHIFT
54212 +#define PTRS_PER_PMD 1
54213 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
54214 +#define PMD_MASK (~(PMD_SIZE-1))
54215 +
54216 +#ifndef __ASSEMBLY__
54217 +
54218 +struct mm_struct;
54219 +
54220 /*
54221 * Having the pmd type consist of a pud gets the size right, and allows
54222 * us to conceptually access the pud entry that this pmd is folded into
54223 @@ -16,11 +21,6 @@ struct mm_struct;
54224 */
54225 typedef struct { pud_t pud; } pmd_t;
54226
54227 -#define PMD_SHIFT PUD_SHIFT
54228 -#define PTRS_PER_PMD 1
54229 -#define PMD_SIZE (1UL << PMD_SHIFT)
54230 -#define PMD_MASK (~(PMD_SIZE-1))
54231 -
54232 /*
54233 * The "pud_xxx()" functions here are trivial for a folded two-level
54234 * setup: the pmd is never bad, and a pmd always exists (as it's folded
54235 diff -urNp linux-2.6.32.42/include/asm-generic/pgtable-nopud.h linux-2.6.32.42/include/asm-generic/pgtable-nopud.h
54236 --- linux-2.6.32.42/include/asm-generic/pgtable-nopud.h 2011-03-27 14:31:47.000000000 -0400
54237 +++ linux-2.6.32.42/include/asm-generic/pgtable-nopud.h 2011-04-17 15:56:46.000000000 -0400
54238 @@ -1,10 +1,15 @@
54239 #ifndef _PGTABLE_NOPUD_H
54240 #define _PGTABLE_NOPUD_H
54241
54242 -#ifndef __ASSEMBLY__
54243 -
54244 #define __PAGETABLE_PUD_FOLDED
54245
54246 +#define PUD_SHIFT PGDIR_SHIFT
54247 +#define PTRS_PER_PUD 1
54248 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
54249 +#define PUD_MASK (~(PUD_SIZE-1))
54250 +
54251 +#ifndef __ASSEMBLY__
54252 +
54253 /*
54254 * Having the pud type consist of a pgd gets the size right, and allows
54255 * us to conceptually access the pgd entry that this pud is folded into
54256 @@ -12,11 +17,6 @@
54257 */
54258 typedef struct { pgd_t pgd; } pud_t;
54259
54260 -#define PUD_SHIFT PGDIR_SHIFT
54261 -#define PTRS_PER_PUD 1
54262 -#define PUD_SIZE (1UL << PUD_SHIFT)
54263 -#define PUD_MASK (~(PUD_SIZE-1))
54264 -
54265 /*
54266 * The "pgd_xxx()" functions here are trivial for a folded two-level
54267 * setup: the pud is never bad, and a pud always exists (as it's folded
54268 diff -urNp linux-2.6.32.42/include/asm-generic/vmlinux.lds.h linux-2.6.32.42/include/asm-generic/vmlinux.lds.h
54269 --- linux-2.6.32.42/include/asm-generic/vmlinux.lds.h 2011-03-27 14:31:47.000000000 -0400
54270 +++ linux-2.6.32.42/include/asm-generic/vmlinux.lds.h 2011-04-17 15:56:46.000000000 -0400
54271 @@ -199,6 +199,7 @@
54272 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
54273 VMLINUX_SYMBOL(__start_rodata) = .; \
54274 *(.rodata) *(.rodata.*) \
54275 + *(.data.read_only) \
54276 *(__vermagic) /* Kernel version magic */ \
54277 *(__markers_strings) /* Markers: strings */ \
54278 *(__tracepoints_strings)/* Tracepoints: strings */ \
54279 @@ -656,22 +657,24 @@
54280 * section in the linker script will go there too. @phdr should have
54281 * a leading colon.
54282 *
54283 - * Note that this macros defines __per_cpu_load as an absolute symbol.
54284 + * Note that this macros defines per_cpu_load as an absolute symbol.
54285 * If there is no need to put the percpu section at a predetermined
54286 * address, use PERCPU().
54287 */
54288 #define PERCPU_VADDR(vaddr, phdr) \
54289 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
54290 - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
54291 + per_cpu_load = .; \
54292 + .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
54293 - LOAD_OFFSET) { \
54294 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
54295 VMLINUX_SYMBOL(__per_cpu_start) = .; \
54296 *(.data.percpu.first) \
54297 - *(.data.percpu.page_aligned) \
54298 *(.data.percpu) \
54299 + . = ALIGN(PAGE_SIZE); \
54300 + *(.data.percpu.page_aligned) \
54301 *(.data.percpu.shared_aligned) \
54302 VMLINUX_SYMBOL(__per_cpu_end) = .; \
54303 } phdr \
54304 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
54305 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
54306
54307 /**
54308 * PERCPU - define output section for percpu area, simple version
54309 diff -urNp linux-2.6.32.42/include/drm/drmP.h linux-2.6.32.42/include/drm/drmP.h
54310 --- linux-2.6.32.42/include/drm/drmP.h 2011-03-27 14:31:47.000000000 -0400
54311 +++ linux-2.6.32.42/include/drm/drmP.h 2011-04-17 15:56:46.000000000 -0400
54312 @@ -71,6 +71,7 @@
54313 #include <linux/workqueue.h>
54314 #include <linux/poll.h>
54315 #include <asm/pgalloc.h>
54316 +#include <asm/local.h>
54317 #include "drm.h"
54318
54319 #include <linux/idr.h>
54320 @@ -814,7 +815,7 @@ struct drm_driver {
54321 void (*vgaarb_irq)(struct drm_device *dev, bool state);
54322
54323 /* Driver private ops for this object */
54324 - struct vm_operations_struct *gem_vm_ops;
54325 + const struct vm_operations_struct *gem_vm_ops;
54326
54327 int major;
54328 int minor;
54329 @@ -917,7 +918,7 @@ struct drm_device {
54330
54331 /** \name Usage Counters */
54332 /*@{ */
54333 - int open_count; /**< Outstanding files open */
54334 + local_t open_count; /**< Outstanding files open */
54335 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
54336 atomic_t vma_count; /**< Outstanding vma areas open */
54337 int buf_use; /**< Buffers in use -- cannot alloc */
54338 @@ -928,7 +929,7 @@ struct drm_device {
54339 /*@{ */
54340 unsigned long counters;
54341 enum drm_stat_type types[15];
54342 - atomic_t counts[15];
54343 + atomic_unchecked_t counts[15];
54344 /*@} */
54345
54346 struct list_head filelist;
54347 @@ -1016,7 +1017,7 @@ struct drm_device {
54348 struct pci_controller *hose;
54349 #endif
54350 struct drm_sg_mem *sg; /**< Scatter gather memory */
54351 - unsigned int num_crtcs; /**< Number of CRTCs on this device */
54352 + unsigned int num_crtcs; /**< Number of CRTCs on this device */
54353 void *dev_private; /**< device private data */
54354 void *mm_private;
54355 struct address_space *dev_mapping;
54356 @@ -1042,11 +1043,11 @@ struct drm_device {
54357 spinlock_t object_name_lock;
54358 struct idr object_name_idr;
54359 atomic_t object_count;
54360 - atomic_t object_memory;
54361 + atomic_unchecked_t object_memory;
54362 atomic_t pin_count;
54363 - atomic_t pin_memory;
54364 + atomic_unchecked_t pin_memory;
54365 atomic_t gtt_count;
54366 - atomic_t gtt_memory;
54367 + atomic_unchecked_t gtt_memory;
54368 uint32_t gtt_total;
54369 uint32_t invalidate_domains; /* domains pending invalidation */
54370 uint32_t flush_domains; /* domains pending flush */
54371 diff -urNp linux-2.6.32.42/include/linux/a.out.h linux-2.6.32.42/include/linux/a.out.h
54372 --- linux-2.6.32.42/include/linux/a.out.h 2011-03-27 14:31:47.000000000 -0400
54373 +++ linux-2.6.32.42/include/linux/a.out.h 2011-04-17 15:56:46.000000000 -0400
54374 @@ -39,6 +39,14 @@ enum machine_type {
54375 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
54376 };
54377
54378 +/* Constants for the N_FLAGS field */
54379 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
54380 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
54381 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
54382 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
54383 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
54384 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
54385 +
54386 #if !defined (N_MAGIC)
54387 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
54388 #endif
54389 diff -urNp linux-2.6.32.42/include/linux/atmdev.h linux-2.6.32.42/include/linux/atmdev.h
54390 --- linux-2.6.32.42/include/linux/atmdev.h 2011-03-27 14:31:47.000000000 -0400
54391 +++ linux-2.6.32.42/include/linux/atmdev.h 2011-04-17 15:56:46.000000000 -0400
54392 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
54393 #endif
54394
54395 struct k_atm_aal_stats {
54396 -#define __HANDLE_ITEM(i) atomic_t i
54397 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
54398 __AAL_STAT_ITEMS
54399 #undef __HANDLE_ITEM
54400 };
54401 diff -urNp linux-2.6.32.42/include/linux/backlight.h linux-2.6.32.42/include/linux/backlight.h
54402 --- linux-2.6.32.42/include/linux/backlight.h 2011-03-27 14:31:47.000000000 -0400
54403 +++ linux-2.6.32.42/include/linux/backlight.h 2011-04-17 15:56:46.000000000 -0400
54404 @@ -36,18 +36,18 @@ struct backlight_device;
54405 struct fb_info;
54406
54407 struct backlight_ops {
54408 - unsigned int options;
54409 + const unsigned int options;
54410
54411 #define BL_CORE_SUSPENDRESUME (1 << 0)
54412
54413 /* Notify the backlight driver some property has changed */
54414 - int (*update_status)(struct backlight_device *);
54415 + int (* const update_status)(struct backlight_device *);
54416 /* Return the current backlight brightness (accounting for power,
54417 fb_blank etc.) */
54418 - int (*get_brightness)(struct backlight_device *);
54419 + int (* const get_brightness)(struct backlight_device *);
54420 /* Check if given framebuffer device is the one bound to this backlight;
54421 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
54422 - int (*check_fb)(struct fb_info *);
54423 + int (* const check_fb)(struct fb_info *);
54424 };
54425
54426 /* This structure defines all the properties of a backlight */
54427 @@ -86,7 +86,7 @@ struct backlight_device {
54428 registered this device has been unloaded, and if class_get_devdata()
54429 points to something in the body of that driver, it is also invalid. */
54430 struct mutex ops_lock;
54431 - struct backlight_ops *ops;
54432 + const struct backlight_ops *ops;
54433
54434 /* The framebuffer notifier block */
54435 struct notifier_block fb_notif;
54436 @@ -103,7 +103,7 @@ static inline void backlight_update_stat
54437 }
54438
54439 extern struct backlight_device *backlight_device_register(const char *name,
54440 - struct device *dev, void *devdata, struct backlight_ops *ops);
54441 + struct device *dev, void *devdata, const struct backlight_ops *ops);
54442 extern void backlight_device_unregister(struct backlight_device *bd);
54443 extern void backlight_force_update(struct backlight_device *bd,
54444 enum backlight_update_reason reason);
54445 diff -urNp linux-2.6.32.42/include/linux/binfmts.h linux-2.6.32.42/include/linux/binfmts.h
54446 --- linux-2.6.32.42/include/linux/binfmts.h 2011-04-17 17:00:52.000000000 -0400
54447 +++ linux-2.6.32.42/include/linux/binfmts.h 2011-04-17 15:56:46.000000000 -0400
54448 @@ -83,6 +83,7 @@ struct linux_binfmt {
54449 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
54450 int (*load_shlib)(struct file *);
54451 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
54452 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
54453 unsigned long min_coredump; /* minimal dump size */
54454 int hasvdso;
54455 };
54456 diff -urNp linux-2.6.32.42/include/linux/blkdev.h linux-2.6.32.42/include/linux/blkdev.h
54457 --- linux-2.6.32.42/include/linux/blkdev.h 2011-03-27 14:31:47.000000000 -0400
54458 +++ linux-2.6.32.42/include/linux/blkdev.h 2011-04-17 15:56:46.000000000 -0400
54459 @@ -1265,19 +1265,19 @@ static inline int blk_integrity_rq(struc
54460 #endif /* CONFIG_BLK_DEV_INTEGRITY */
54461
54462 struct block_device_operations {
54463 - int (*open) (struct block_device *, fmode_t);
54464 - int (*release) (struct gendisk *, fmode_t);
54465 - int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54466 - int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54467 - int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54468 - int (*direct_access) (struct block_device *, sector_t,
54469 + int (* const open) (struct block_device *, fmode_t);
54470 + int (* const release) (struct gendisk *, fmode_t);
54471 + int (* const locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54472 + int (* const ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54473 + int (* const compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54474 + int (* const direct_access) (struct block_device *, sector_t,
54475 void **, unsigned long *);
54476 - int (*media_changed) (struct gendisk *);
54477 - unsigned long long (*set_capacity) (struct gendisk *,
54478 + int (* const media_changed) (struct gendisk *);
54479 + unsigned long long (* const set_capacity) (struct gendisk *,
54480 unsigned long long);
54481 - int (*revalidate_disk) (struct gendisk *);
54482 - int (*getgeo)(struct block_device *, struct hd_geometry *);
54483 - struct module *owner;
54484 + int (* const revalidate_disk) (struct gendisk *);
54485 + int (*const getgeo)(struct block_device *, struct hd_geometry *);
54486 + struct module * const owner;
54487 };
54488
54489 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
54490 diff -urNp linux-2.6.32.42/include/linux/blktrace_api.h linux-2.6.32.42/include/linux/blktrace_api.h
54491 --- linux-2.6.32.42/include/linux/blktrace_api.h 2011-03-27 14:31:47.000000000 -0400
54492 +++ linux-2.6.32.42/include/linux/blktrace_api.h 2011-05-04 17:56:28.000000000 -0400
54493 @@ -160,7 +160,7 @@ struct blk_trace {
54494 struct dentry *dir;
54495 struct dentry *dropped_file;
54496 struct dentry *msg_file;
54497 - atomic_t dropped;
54498 + atomic_unchecked_t dropped;
54499 };
54500
54501 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
54502 diff -urNp linux-2.6.32.42/include/linux/byteorder/little_endian.h linux-2.6.32.42/include/linux/byteorder/little_endian.h
54503 --- linux-2.6.32.42/include/linux/byteorder/little_endian.h 2011-03-27 14:31:47.000000000 -0400
54504 +++ linux-2.6.32.42/include/linux/byteorder/little_endian.h 2011-04-17 15:56:46.000000000 -0400
54505 @@ -42,51 +42,51 @@
54506
54507 static inline __le64 __cpu_to_le64p(const __u64 *p)
54508 {
54509 - return (__force __le64)*p;
54510 + return (__force const __le64)*p;
54511 }
54512 static inline __u64 __le64_to_cpup(const __le64 *p)
54513 {
54514 - return (__force __u64)*p;
54515 + return (__force const __u64)*p;
54516 }
54517 static inline __le32 __cpu_to_le32p(const __u32 *p)
54518 {
54519 - return (__force __le32)*p;
54520 + return (__force const __le32)*p;
54521 }
54522 static inline __u32 __le32_to_cpup(const __le32 *p)
54523 {
54524 - return (__force __u32)*p;
54525 + return (__force const __u32)*p;
54526 }
54527 static inline __le16 __cpu_to_le16p(const __u16 *p)
54528 {
54529 - return (__force __le16)*p;
54530 + return (__force const __le16)*p;
54531 }
54532 static inline __u16 __le16_to_cpup(const __le16 *p)
54533 {
54534 - return (__force __u16)*p;
54535 + return (__force const __u16)*p;
54536 }
54537 static inline __be64 __cpu_to_be64p(const __u64 *p)
54538 {
54539 - return (__force __be64)__swab64p(p);
54540 + return (__force const __be64)__swab64p(p);
54541 }
54542 static inline __u64 __be64_to_cpup(const __be64 *p)
54543 {
54544 - return __swab64p((__u64 *)p);
54545 + return __swab64p((const __u64 *)p);
54546 }
54547 static inline __be32 __cpu_to_be32p(const __u32 *p)
54548 {
54549 - return (__force __be32)__swab32p(p);
54550 + return (__force const __be32)__swab32p(p);
54551 }
54552 static inline __u32 __be32_to_cpup(const __be32 *p)
54553 {
54554 - return __swab32p((__u32 *)p);
54555 + return __swab32p((const __u32 *)p);
54556 }
54557 static inline __be16 __cpu_to_be16p(const __u16 *p)
54558 {
54559 - return (__force __be16)__swab16p(p);
54560 + return (__force const __be16)__swab16p(p);
54561 }
54562 static inline __u16 __be16_to_cpup(const __be16 *p)
54563 {
54564 - return __swab16p((__u16 *)p);
54565 + return __swab16p((const __u16 *)p);
54566 }
54567 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
54568 #define __le64_to_cpus(x) do { (void)(x); } while (0)
54569 diff -urNp linux-2.6.32.42/include/linux/cache.h linux-2.6.32.42/include/linux/cache.h
54570 --- linux-2.6.32.42/include/linux/cache.h 2011-03-27 14:31:47.000000000 -0400
54571 +++ linux-2.6.32.42/include/linux/cache.h 2011-04-17 15:56:46.000000000 -0400
54572 @@ -16,6 +16,10 @@
54573 #define __read_mostly
54574 #endif
54575
54576 +#ifndef __read_only
54577 +#define __read_only __read_mostly
54578 +#endif
54579 +
54580 #ifndef ____cacheline_aligned
54581 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
54582 #endif
54583 diff -urNp linux-2.6.32.42/include/linux/capability.h linux-2.6.32.42/include/linux/capability.h
54584 --- linux-2.6.32.42/include/linux/capability.h 2011-03-27 14:31:47.000000000 -0400
54585 +++ linux-2.6.32.42/include/linux/capability.h 2011-04-17 15:56:46.000000000 -0400
54586 @@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff
54587 (security_real_capable_noaudit((t), (cap)) == 0)
54588
54589 extern int capable(int cap);
54590 +int capable_nolog(int cap);
54591
54592 /* audit system wants to get cap info from files as well */
54593 struct dentry;
54594 diff -urNp linux-2.6.32.42/include/linux/compiler-gcc4.h linux-2.6.32.42/include/linux/compiler-gcc4.h
54595 --- linux-2.6.32.42/include/linux/compiler-gcc4.h 2011-03-27 14:31:47.000000000 -0400
54596 +++ linux-2.6.32.42/include/linux/compiler-gcc4.h 2011-04-17 15:56:46.000000000 -0400
54597 @@ -36,4 +36,8 @@
54598 the kernel context */
54599 #define __cold __attribute__((__cold__))
54600
54601 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
54602 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
54603 +#define __bos0(ptr) __bos((ptr), 0)
54604 +#define __bos1(ptr) __bos((ptr), 1)
54605 #endif
54606 diff -urNp linux-2.6.32.42/include/linux/compiler.h linux-2.6.32.42/include/linux/compiler.h
54607 --- linux-2.6.32.42/include/linux/compiler.h 2011-03-27 14:31:47.000000000 -0400
54608 +++ linux-2.6.32.42/include/linux/compiler.h 2011-04-17 15:56:46.000000000 -0400
54609 @@ -256,6 +256,22 @@ void ftrace_likely_update(struct ftrace_
54610 #define __cold
54611 #endif
54612
54613 +#ifndef __alloc_size
54614 +#define __alloc_size
54615 +#endif
54616 +
54617 +#ifndef __bos
54618 +#define __bos
54619 +#endif
54620 +
54621 +#ifndef __bos0
54622 +#define __bos0
54623 +#endif
54624 +
54625 +#ifndef __bos1
54626 +#define __bos1
54627 +#endif
54628 +
54629 /* Simple shorthand for a section definition */
54630 #ifndef __section
54631 # define __section(S) __attribute__ ((__section__(#S)))
54632 @@ -278,6 +294,7 @@ void ftrace_likely_update(struct ftrace_
54633 * use is to mediate communication between process-level code and irq/NMI
54634 * handlers, all running on the same CPU.
54635 */
54636 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
54637 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
54638 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
54639
54640 #endif /* __LINUX_COMPILER_H */
54641 diff -urNp linux-2.6.32.42/include/linux/dcache.h linux-2.6.32.42/include/linux/dcache.h
54642 --- linux-2.6.32.42/include/linux/dcache.h 2011-03-27 14:31:47.000000000 -0400
54643 +++ linux-2.6.32.42/include/linux/dcache.h 2011-04-23 13:34:46.000000000 -0400
54644 @@ -119,6 +119,8 @@ struct dentry {
54645 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
54646 };
54647
54648 +#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
54649 +
54650 /*
54651 * dentry->d_lock spinlock nesting subclasses:
54652 *
54653 diff -urNp linux-2.6.32.42/include/linux/decompress/mm.h linux-2.6.32.42/include/linux/decompress/mm.h
54654 --- linux-2.6.32.42/include/linux/decompress/mm.h 2011-03-27 14:31:47.000000000 -0400
54655 +++ linux-2.6.32.42/include/linux/decompress/mm.h 2011-04-17 15:56:46.000000000 -0400
54656 @@ -78,7 +78,7 @@ static void free(void *where)
54657 * warnings when not needed (indeed large_malloc / large_free are not
54658 * needed by inflate */
54659
54660 -#define malloc(a) kmalloc(a, GFP_KERNEL)
54661 +#define malloc(a) kmalloc((a), GFP_KERNEL)
54662 #define free(a) kfree(a)
54663
54664 #define large_malloc(a) vmalloc(a)
54665 diff -urNp linux-2.6.32.42/include/linux/dma-mapping.h linux-2.6.32.42/include/linux/dma-mapping.h
54666 --- linux-2.6.32.42/include/linux/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
54667 +++ linux-2.6.32.42/include/linux/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
54668 @@ -16,50 +16,50 @@ enum dma_data_direction {
54669 };
54670
54671 struct dma_map_ops {
54672 - void* (*alloc_coherent)(struct device *dev, size_t size,
54673 + void* (* const alloc_coherent)(struct device *dev, size_t size,
54674 dma_addr_t *dma_handle, gfp_t gfp);
54675 - void (*free_coherent)(struct device *dev, size_t size,
54676 + void (* const free_coherent)(struct device *dev, size_t size,
54677 void *vaddr, dma_addr_t dma_handle);
54678 - dma_addr_t (*map_page)(struct device *dev, struct page *page,
54679 + dma_addr_t (* const map_page)(struct device *dev, struct page *page,
54680 unsigned long offset, size_t size,
54681 enum dma_data_direction dir,
54682 struct dma_attrs *attrs);
54683 - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
54684 + void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
54685 size_t size, enum dma_data_direction dir,
54686 struct dma_attrs *attrs);
54687 - int (*map_sg)(struct device *dev, struct scatterlist *sg,
54688 + int (* const map_sg)(struct device *dev, struct scatterlist *sg,
54689 int nents, enum dma_data_direction dir,
54690 struct dma_attrs *attrs);
54691 - void (*unmap_sg)(struct device *dev,
54692 + void (* const unmap_sg)(struct device *dev,
54693 struct scatterlist *sg, int nents,
54694 enum dma_data_direction dir,
54695 struct dma_attrs *attrs);
54696 - void (*sync_single_for_cpu)(struct device *dev,
54697 + void (* const sync_single_for_cpu)(struct device *dev,
54698 dma_addr_t dma_handle, size_t size,
54699 enum dma_data_direction dir);
54700 - void (*sync_single_for_device)(struct device *dev,
54701 + void (* const sync_single_for_device)(struct device *dev,
54702 dma_addr_t dma_handle, size_t size,
54703 enum dma_data_direction dir);
54704 - void (*sync_single_range_for_cpu)(struct device *dev,
54705 + void (* const sync_single_range_for_cpu)(struct device *dev,
54706 dma_addr_t dma_handle,
54707 unsigned long offset,
54708 size_t size,
54709 enum dma_data_direction dir);
54710 - void (*sync_single_range_for_device)(struct device *dev,
54711 + void (* const sync_single_range_for_device)(struct device *dev,
54712 dma_addr_t dma_handle,
54713 unsigned long offset,
54714 size_t size,
54715 enum dma_data_direction dir);
54716 - void (*sync_sg_for_cpu)(struct device *dev,
54717 + void (* const sync_sg_for_cpu)(struct device *dev,
54718 struct scatterlist *sg, int nents,
54719 enum dma_data_direction dir);
54720 - void (*sync_sg_for_device)(struct device *dev,
54721 + void (* const sync_sg_for_device)(struct device *dev,
54722 struct scatterlist *sg, int nents,
54723 enum dma_data_direction dir);
54724 - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
54725 - int (*dma_supported)(struct device *dev, u64 mask);
54726 + int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
54727 + int (* const dma_supported)(struct device *dev, u64 mask);
54728 int (*set_dma_mask)(struct device *dev, u64 mask);
54729 - int is_phys;
54730 + const int is_phys;
54731 };
54732
54733 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
54734 diff -urNp linux-2.6.32.42/include/linux/dst.h linux-2.6.32.42/include/linux/dst.h
54735 --- linux-2.6.32.42/include/linux/dst.h 2011-03-27 14:31:47.000000000 -0400
54736 +++ linux-2.6.32.42/include/linux/dst.h 2011-04-17 15:56:46.000000000 -0400
54737 @@ -380,7 +380,7 @@ struct dst_node
54738 struct thread_pool *pool;
54739
54740 /* Transaction IDs live here */
54741 - atomic_long_t gen;
54742 + atomic_long_unchecked_t gen;
54743
54744 /*
54745 * How frequently and how many times transaction
54746 diff -urNp linux-2.6.32.42/include/linux/elf.h linux-2.6.32.42/include/linux/elf.h
54747 --- linux-2.6.32.42/include/linux/elf.h 2011-03-27 14:31:47.000000000 -0400
54748 +++ linux-2.6.32.42/include/linux/elf.h 2011-04-17 15:56:46.000000000 -0400
54749 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
54750 #define PT_GNU_EH_FRAME 0x6474e550
54751
54752 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
54753 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
54754 +
54755 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
54756 +
54757 +/* Constants for the e_flags field */
54758 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
54759 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
54760 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
54761 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
54762 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
54763 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
54764
54765 /* These constants define the different elf file types */
54766 #define ET_NONE 0
54767 @@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
54768 #define DT_DEBUG 21
54769 #define DT_TEXTREL 22
54770 #define DT_JMPREL 23
54771 +#define DT_FLAGS 30
54772 + #define DF_TEXTREL 0x00000004
54773 #define DT_ENCODING 32
54774 #define OLD_DT_LOOS 0x60000000
54775 #define DT_LOOS 0x6000000d
54776 @@ -230,6 +243,19 @@ typedef struct elf64_hdr {
54777 #define PF_W 0x2
54778 #define PF_X 0x1
54779
54780 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
54781 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
54782 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
54783 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
54784 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
54785 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
54786 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
54787 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
54788 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
54789 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
54790 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
54791 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
54792 +
54793 typedef struct elf32_phdr{
54794 Elf32_Word p_type;
54795 Elf32_Off p_offset;
54796 @@ -322,6 +348,8 @@ typedef struct elf64_shdr {
54797 #define EI_OSABI 7
54798 #define EI_PAD 8
54799
54800 +#define EI_PAX 14
54801 +
54802 #define ELFMAG0 0x7f /* EI_MAG */
54803 #define ELFMAG1 'E'
54804 #define ELFMAG2 'L'
54805 @@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
54806 #define elf_phdr elf32_phdr
54807 #define elf_note elf32_note
54808 #define elf_addr_t Elf32_Off
54809 +#define elf_dyn Elf32_Dyn
54810
54811 #else
54812
54813 @@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
54814 #define elf_phdr elf64_phdr
54815 #define elf_note elf64_note
54816 #define elf_addr_t Elf64_Off
54817 +#define elf_dyn Elf64_Dyn
54818
54819 #endif
54820
54821 diff -urNp linux-2.6.32.42/include/linux/fscache-cache.h linux-2.6.32.42/include/linux/fscache-cache.h
54822 --- linux-2.6.32.42/include/linux/fscache-cache.h 2011-03-27 14:31:47.000000000 -0400
54823 +++ linux-2.6.32.42/include/linux/fscache-cache.h 2011-05-04 17:56:28.000000000 -0400
54824 @@ -116,7 +116,7 @@ struct fscache_operation {
54825 #endif
54826 };
54827
54828 -extern atomic_t fscache_op_debug_id;
54829 +extern atomic_unchecked_t fscache_op_debug_id;
54830 extern const struct slow_work_ops fscache_op_slow_work_ops;
54831
54832 extern void fscache_enqueue_operation(struct fscache_operation *);
54833 @@ -134,7 +134,7 @@ static inline void fscache_operation_ini
54834 fscache_operation_release_t release)
54835 {
54836 atomic_set(&op->usage, 1);
54837 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
54838 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
54839 op->release = release;
54840 INIT_LIST_HEAD(&op->pend_link);
54841 fscache_set_op_state(op, "Init");
54842 diff -urNp linux-2.6.32.42/include/linux/fs.h linux-2.6.32.42/include/linux/fs.h
54843 --- linux-2.6.32.42/include/linux/fs.h 2011-03-27 14:31:47.000000000 -0400
54844 +++ linux-2.6.32.42/include/linux/fs.h 2011-04-17 15:56:46.000000000 -0400
54845 @@ -90,6 +90,11 @@ struct inodes_stat_t {
54846 /* Expect random access pattern */
54847 #define FMODE_RANDOM ((__force fmode_t)4096)
54848
54849 +/* Hack for grsec so as not to require read permission simply to execute
54850 + * a binary
54851 + */
54852 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
54853 +
54854 /*
54855 * The below are the various read and write types that we support. Some of
54856 * them include behavioral modifiers that send information down to the
54857 @@ -568,41 +573,41 @@ typedef int (*read_actor_t)(read_descrip
54858 unsigned long, unsigned long);
54859
54860 struct address_space_operations {
54861 - int (*writepage)(struct page *page, struct writeback_control *wbc);
54862 - int (*readpage)(struct file *, struct page *);
54863 - void (*sync_page)(struct page *);
54864 + int (* const writepage)(struct page *page, struct writeback_control *wbc);
54865 + int (* const readpage)(struct file *, struct page *);
54866 + void (* const sync_page)(struct page *);
54867
54868 /* Write back some dirty pages from this mapping. */
54869 - int (*writepages)(struct address_space *, struct writeback_control *);
54870 + int (* const writepages)(struct address_space *, struct writeback_control *);
54871
54872 /* Set a page dirty. Return true if this dirtied it */
54873 - int (*set_page_dirty)(struct page *page);
54874 + int (* const set_page_dirty)(struct page *page);
54875
54876 - int (*readpages)(struct file *filp, struct address_space *mapping,
54877 + int (* const readpages)(struct file *filp, struct address_space *mapping,
54878 struct list_head *pages, unsigned nr_pages);
54879
54880 - int (*write_begin)(struct file *, struct address_space *mapping,
54881 + int (* const write_begin)(struct file *, struct address_space *mapping,
54882 loff_t pos, unsigned len, unsigned flags,
54883 struct page **pagep, void **fsdata);
54884 - int (*write_end)(struct file *, struct address_space *mapping,
54885 + int (* const write_end)(struct file *, struct address_space *mapping,
54886 loff_t pos, unsigned len, unsigned copied,
54887 struct page *page, void *fsdata);
54888
54889 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
54890 - sector_t (*bmap)(struct address_space *, sector_t);
54891 - void (*invalidatepage) (struct page *, unsigned long);
54892 - int (*releasepage) (struct page *, gfp_t);
54893 - ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
54894 + sector_t (* const bmap)(struct address_space *, sector_t);
54895 + void (* const invalidatepage) (struct page *, unsigned long);
54896 + int (* const releasepage) (struct page *, gfp_t);
54897 + ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
54898 loff_t offset, unsigned long nr_segs);
54899 - int (*get_xip_mem)(struct address_space *, pgoff_t, int,
54900 + int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
54901 void **, unsigned long *);
54902 /* migrate the contents of a page to the specified target */
54903 - int (*migratepage) (struct address_space *,
54904 + int (* const migratepage) (struct address_space *,
54905 struct page *, struct page *);
54906 - int (*launder_page) (struct page *);
54907 - int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
54908 + int (* const launder_page) (struct page *);
54909 + int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
54910 unsigned long);
54911 - int (*error_remove_page)(struct address_space *, struct page *);
54912 + int (* const error_remove_page)(struct address_space *, struct page *);
54913 };
54914
54915 /*
54916 @@ -1030,19 +1035,19 @@ static inline int file_check_writeable(s
54917 typedef struct files_struct *fl_owner_t;
54918
54919 struct file_lock_operations {
54920 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
54921 - void (*fl_release_private)(struct file_lock *);
54922 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
54923 + void (* const fl_release_private)(struct file_lock *);
54924 };
54925
54926 struct lock_manager_operations {
54927 - int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
54928 - void (*fl_notify)(struct file_lock *); /* unblock callback */
54929 - int (*fl_grant)(struct file_lock *, struct file_lock *, int);
54930 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
54931 - void (*fl_release_private)(struct file_lock *);
54932 - void (*fl_break)(struct file_lock *);
54933 - int (*fl_mylease)(struct file_lock *, struct file_lock *);
54934 - int (*fl_change)(struct file_lock **, int);
54935 + int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
54936 + void (* const fl_notify)(struct file_lock *); /* unblock callback */
54937 + int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
54938 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
54939 + void (* const fl_release_private)(struct file_lock *);
54940 + void (* const fl_break)(struct file_lock *);
54941 + int (* const fl_mylease)(struct file_lock *, struct file_lock *);
54942 + int (* const fl_change)(struct file_lock **, int);
54943 };
54944
54945 struct lock_manager {
54946 @@ -1441,7 +1446,7 @@ struct fiemap_extent_info {
54947 unsigned int fi_flags; /* Flags as passed from user */
54948 unsigned int fi_extents_mapped; /* Number of mapped extents */
54949 unsigned int fi_extents_max; /* Size of fiemap_extent array */
54950 - struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
54951 + struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
54952 * array */
54953 };
54954 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
54955 @@ -1558,30 +1563,30 @@ extern ssize_t vfs_writev(struct file *,
54956 unsigned long, loff_t *);
54957
54958 struct super_operations {
54959 - struct inode *(*alloc_inode)(struct super_block *sb);
54960 - void (*destroy_inode)(struct inode *);
54961 + struct inode *(* const alloc_inode)(struct super_block *sb);
54962 + void (* const destroy_inode)(struct inode *);
54963
54964 - void (*dirty_inode) (struct inode *);
54965 - int (*write_inode) (struct inode *, int);
54966 - void (*drop_inode) (struct inode *);
54967 - void (*delete_inode) (struct inode *);
54968 - void (*put_super) (struct super_block *);
54969 - void (*write_super) (struct super_block *);
54970 - int (*sync_fs)(struct super_block *sb, int wait);
54971 - int (*freeze_fs) (struct super_block *);
54972 - int (*unfreeze_fs) (struct super_block *);
54973 - int (*statfs) (struct dentry *, struct kstatfs *);
54974 - int (*remount_fs) (struct super_block *, int *, char *);
54975 - void (*clear_inode) (struct inode *);
54976 - void (*umount_begin) (struct super_block *);
54977 + void (* const dirty_inode) (struct inode *);
54978 + int (* const write_inode) (struct inode *, int);
54979 + void (* const drop_inode) (struct inode *);
54980 + void (* const delete_inode) (struct inode *);
54981 + void (* const put_super) (struct super_block *);
54982 + void (* const write_super) (struct super_block *);
54983 + int (* const sync_fs)(struct super_block *sb, int wait);
54984 + int (* const freeze_fs) (struct super_block *);
54985 + int (* const unfreeze_fs) (struct super_block *);
54986 + int (* const statfs) (struct dentry *, struct kstatfs *);
54987 + int (* const remount_fs) (struct super_block *, int *, char *);
54988 + void (* const clear_inode) (struct inode *);
54989 + void (* const umount_begin) (struct super_block *);
54990
54991 - int (*show_options)(struct seq_file *, struct vfsmount *);
54992 - int (*show_stats)(struct seq_file *, struct vfsmount *);
54993 + int (* const show_options)(struct seq_file *, struct vfsmount *);
54994 + int (* const show_stats)(struct seq_file *, struct vfsmount *);
54995 #ifdef CONFIG_QUOTA
54996 - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
54997 - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
54998 + ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
54999 + ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
55000 #endif
55001 - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
55002 + int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
55003 };
55004
55005 /*
55006 diff -urNp linux-2.6.32.42/include/linux/fs_struct.h linux-2.6.32.42/include/linux/fs_struct.h
55007 --- linux-2.6.32.42/include/linux/fs_struct.h 2011-03-27 14:31:47.000000000 -0400
55008 +++ linux-2.6.32.42/include/linux/fs_struct.h 2011-04-17 15:56:46.000000000 -0400
55009 @@ -4,7 +4,7 @@
55010 #include <linux/path.h>
55011
55012 struct fs_struct {
55013 - int users;
55014 + atomic_t users;
55015 rwlock_t lock;
55016 int umask;
55017 int in_exec;
55018 diff -urNp linux-2.6.32.42/include/linux/ftrace_event.h linux-2.6.32.42/include/linux/ftrace_event.h
55019 --- linux-2.6.32.42/include/linux/ftrace_event.h 2011-03-27 14:31:47.000000000 -0400
55020 +++ linux-2.6.32.42/include/linux/ftrace_event.h 2011-05-04 17:56:28.000000000 -0400
55021 @@ -163,7 +163,7 @@ extern int trace_define_field(struct ftr
55022 int filter_type);
55023 extern int trace_define_common_fields(struct ftrace_event_call *call);
55024
55025 -#define is_signed_type(type) (((type)(-1)) < 0)
55026 +#define is_signed_type(type) (((type)(-1)) < (type)1)
55027
55028 int trace_set_clr_event(const char *system, const char *event, int set);
55029
55030 diff -urNp linux-2.6.32.42/include/linux/genhd.h linux-2.6.32.42/include/linux/genhd.h
55031 --- linux-2.6.32.42/include/linux/genhd.h 2011-03-27 14:31:47.000000000 -0400
55032 +++ linux-2.6.32.42/include/linux/genhd.h 2011-04-17 15:56:46.000000000 -0400
55033 @@ -161,7 +161,7 @@ struct gendisk {
55034
55035 struct timer_rand_state *random;
55036
55037 - atomic_t sync_io; /* RAID */
55038 + atomic_unchecked_t sync_io; /* RAID */
55039 struct work_struct async_notify;
55040 #ifdef CONFIG_BLK_DEV_INTEGRITY
55041 struct blk_integrity *integrity;
55042 diff -urNp linux-2.6.32.42/include/linux/gracl.h linux-2.6.32.42/include/linux/gracl.h
55043 --- linux-2.6.32.42/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
55044 +++ linux-2.6.32.42/include/linux/gracl.h 2011-04-17 15:56:46.000000000 -0400
55045 @@ -0,0 +1,317 @@
55046 +#ifndef GR_ACL_H
55047 +#define GR_ACL_H
55048 +
55049 +#include <linux/grdefs.h>
55050 +#include <linux/resource.h>
55051 +#include <linux/capability.h>
55052 +#include <linux/dcache.h>
55053 +#include <asm/resource.h>
55054 +
55055 +/* Major status information */
55056 +
55057 +#define GR_VERSION "grsecurity 2.2.2"
55058 +#define GRSECURITY_VERSION 0x2202
55059 +
55060 +enum {
55061 + GR_SHUTDOWN = 0,
55062 + GR_ENABLE = 1,
55063 + GR_SPROLE = 2,
55064 + GR_RELOAD = 3,
55065 + GR_SEGVMOD = 4,
55066 + GR_STATUS = 5,
55067 + GR_UNSPROLE = 6,
55068 + GR_PASSSET = 7,
55069 + GR_SPROLEPAM = 8,
55070 +};
55071 +
55072 +/* Password setup definitions
55073 + * kernel/grhash.c */
55074 +enum {
55075 + GR_PW_LEN = 128,
55076 + GR_SALT_LEN = 16,
55077 + GR_SHA_LEN = 32,
55078 +};
55079 +
55080 +enum {
55081 + GR_SPROLE_LEN = 64,
55082 +};
55083 +
55084 +enum {
55085 + GR_NO_GLOB = 0,
55086 + GR_REG_GLOB,
55087 + GR_CREATE_GLOB
55088 +};
55089 +
55090 +#define GR_NLIMITS 32
55091 +
55092 +/* Begin Data Structures */
55093 +
55094 +struct sprole_pw {
55095 + unsigned char *rolename;
55096 + unsigned char salt[GR_SALT_LEN];
55097 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
55098 +};
55099 +
55100 +struct name_entry {
55101 + __u32 key;
55102 + ino_t inode;
55103 + dev_t device;
55104 + char *name;
55105 + __u16 len;
55106 + __u8 deleted;
55107 + struct name_entry *prev;
55108 + struct name_entry *next;
55109 +};
55110 +
55111 +struct inodev_entry {
55112 + struct name_entry *nentry;
55113 + struct inodev_entry *prev;
55114 + struct inodev_entry *next;
55115 +};
55116 +
55117 +struct acl_role_db {
55118 + struct acl_role_label **r_hash;
55119 + __u32 r_size;
55120 +};
55121 +
55122 +struct inodev_db {
55123 + struct inodev_entry **i_hash;
55124 + __u32 i_size;
55125 +};
55126 +
55127 +struct name_db {
55128 + struct name_entry **n_hash;
55129 + __u32 n_size;
55130 +};
55131 +
55132 +struct crash_uid {
55133 + uid_t uid;
55134 + unsigned long expires;
55135 +};
55136 +
55137 +struct gr_hash_struct {
55138 + void **table;
55139 + void **nametable;
55140 + void *first;
55141 + __u32 table_size;
55142 + __u32 used_size;
55143 + int type;
55144 +};
55145 +
55146 +/* Userspace Grsecurity ACL data structures */
55147 +
55148 +struct acl_subject_label {
55149 + char *filename;
55150 + ino_t inode;
55151 + dev_t device;
55152 + __u32 mode;
55153 + kernel_cap_t cap_mask;
55154 + kernel_cap_t cap_lower;
55155 + kernel_cap_t cap_invert_audit;
55156 +
55157 + struct rlimit res[GR_NLIMITS];
55158 + __u32 resmask;
55159 +
55160 + __u8 user_trans_type;
55161 + __u8 group_trans_type;
55162 + uid_t *user_transitions;
55163 + gid_t *group_transitions;
55164 + __u16 user_trans_num;
55165 + __u16 group_trans_num;
55166 +
55167 + __u32 sock_families[2];
55168 + __u32 ip_proto[8];
55169 + __u32 ip_type;
55170 + struct acl_ip_label **ips;
55171 + __u32 ip_num;
55172 + __u32 inaddr_any_override;
55173 +
55174 + __u32 crashes;
55175 + unsigned long expires;
55176 +
55177 + struct acl_subject_label *parent_subject;
55178 + struct gr_hash_struct *hash;
55179 + struct acl_subject_label *prev;
55180 + struct acl_subject_label *next;
55181 +
55182 + struct acl_object_label **obj_hash;
55183 + __u32 obj_hash_size;
55184 + __u16 pax_flags;
55185 +};
55186 +
55187 +struct role_allowed_ip {
55188 + __u32 addr;
55189 + __u32 netmask;
55190 +
55191 + struct role_allowed_ip *prev;
55192 + struct role_allowed_ip *next;
55193 +};
55194 +
55195 +struct role_transition {
55196 + char *rolename;
55197 +
55198 + struct role_transition *prev;
55199 + struct role_transition *next;
55200 +};
55201 +
55202 +struct acl_role_label {
55203 + char *rolename;
55204 + uid_t uidgid;
55205 + __u16 roletype;
55206 +
55207 + __u16 auth_attempts;
55208 + unsigned long expires;
55209 +
55210 + struct acl_subject_label *root_label;
55211 + struct gr_hash_struct *hash;
55212 +
55213 + struct acl_role_label *prev;
55214 + struct acl_role_label *next;
55215 +
55216 + struct role_transition *transitions;
55217 + struct role_allowed_ip *allowed_ips;
55218 + uid_t *domain_children;
55219 + __u16 domain_child_num;
55220 +
55221 + struct acl_subject_label **subj_hash;
55222 + __u32 subj_hash_size;
55223 +};
55224 +
55225 +struct user_acl_role_db {
55226 + struct acl_role_label **r_table;
55227 + __u32 num_pointers; /* Number of allocations to track */
55228 + __u32 num_roles; /* Number of roles */
55229 + __u32 num_domain_children; /* Number of domain children */
55230 + __u32 num_subjects; /* Number of subjects */
55231 + __u32 num_objects; /* Number of objects */
55232 +};
55233 +
55234 +struct acl_object_label {
55235 + char *filename;
55236 + ino_t inode;
55237 + dev_t device;
55238 + __u32 mode;
55239 +
55240 + struct acl_subject_label *nested;
55241 + struct acl_object_label *globbed;
55242 +
55243 + /* next two structures not used */
55244 +
55245 + struct acl_object_label *prev;
55246 + struct acl_object_label *next;
55247 +};
55248 +
55249 +struct acl_ip_label {
55250 + char *iface;
55251 + __u32 addr;
55252 + __u32 netmask;
55253 + __u16 low, high;
55254 + __u8 mode;
55255 + __u32 type;
55256 + __u32 proto[8];
55257 +
55258 + /* next two structures not used */
55259 +
55260 + struct acl_ip_label *prev;
55261 + struct acl_ip_label *next;
55262 +};
55263 +
55264 +struct gr_arg {
55265 + struct user_acl_role_db role_db;
55266 + unsigned char pw[GR_PW_LEN];
55267 + unsigned char salt[GR_SALT_LEN];
55268 + unsigned char sum[GR_SHA_LEN];
55269 + unsigned char sp_role[GR_SPROLE_LEN];
55270 + struct sprole_pw *sprole_pws;
55271 + dev_t segv_device;
55272 + ino_t segv_inode;
55273 + uid_t segv_uid;
55274 + __u16 num_sprole_pws;
55275 + __u16 mode;
55276 +};
55277 +
55278 +struct gr_arg_wrapper {
55279 + struct gr_arg *arg;
55280 + __u32 version;
55281 + __u32 size;
55282 +};
55283 +
55284 +struct subject_map {
55285 + struct acl_subject_label *user;
55286 + struct acl_subject_label *kernel;
55287 + struct subject_map *prev;
55288 + struct subject_map *next;
55289 +};
55290 +
55291 +struct acl_subj_map_db {
55292 + struct subject_map **s_hash;
55293 + __u32 s_size;
55294 +};
55295 +
55296 +/* End Data Structures Section */
55297 +
55298 +/* Hash functions generated by empirical testing by Brad Spengler
55299 + Makes good use of the low bits of the inode. Generally 0-1 times
55300 + in loop for successful match. 0-3 for unsuccessful match.
55301 + Shift/add algorithm with modulus of table size and an XOR*/
55302 +
55303 +static __inline__ unsigned int
55304 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
55305 +{
55306 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
55307 +}
55308 +
55309 + static __inline__ unsigned int
55310 +shash(const struct acl_subject_label *userp, const unsigned int sz)
55311 +{
55312 + return ((const unsigned long)userp % sz);
55313 +}
55314 +
55315 +static __inline__ unsigned int
55316 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
55317 +{
55318 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
55319 +}
55320 +
55321 +static __inline__ unsigned int
55322 +nhash(const char *name, const __u16 len, const unsigned int sz)
55323 +{
55324 + return full_name_hash((const unsigned char *)name, len) % sz;
55325 +}
55326 +
55327 +#define FOR_EACH_ROLE_START(role) \
55328 + role = role_list; \
55329 + while (role) {
55330 +
55331 +#define FOR_EACH_ROLE_END(role) \
55332 + role = role->prev; \
55333 + }
55334 +
55335 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
55336 + subj = NULL; \
55337 + iter = 0; \
55338 + while (iter < role->subj_hash_size) { \
55339 + if (subj == NULL) \
55340 + subj = role->subj_hash[iter]; \
55341 + if (subj == NULL) { \
55342 + iter++; \
55343 + continue; \
55344 + }
55345 +
55346 +#define FOR_EACH_SUBJECT_END(subj,iter) \
55347 + subj = subj->next; \
55348 + if (subj == NULL) \
55349 + iter++; \
55350 + }
55351 +
55352 +
55353 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
55354 + subj = role->hash->first; \
55355 + while (subj != NULL) {
55356 +
55357 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
55358 + subj = subj->next; \
55359 + }
55360 +
55361 +#endif
55362 +
55363 diff -urNp linux-2.6.32.42/include/linux/gralloc.h linux-2.6.32.42/include/linux/gralloc.h
55364 --- linux-2.6.32.42/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
55365 +++ linux-2.6.32.42/include/linux/gralloc.h 2011-04-17 15:56:46.000000000 -0400
55366 @@ -0,0 +1,9 @@
55367 +#ifndef __GRALLOC_H
55368 +#define __GRALLOC_H
55369 +
55370 +void acl_free_all(void);
55371 +int acl_alloc_stack_init(unsigned long size);
55372 +void *acl_alloc(unsigned long len);
55373 +void *acl_alloc_num(unsigned long num, unsigned long len);
55374 +
55375 +#endif
55376 diff -urNp linux-2.6.32.42/include/linux/grdefs.h linux-2.6.32.42/include/linux/grdefs.h
55377 --- linux-2.6.32.42/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
55378 +++ linux-2.6.32.42/include/linux/grdefs.h 2011-06-11 16:20:26.000000000 -0400
55379 @@ -0,0 +1,140 @@
55380 +#ifndef GRDEFS_H
55381 +#define GRDEFS_H
55382 +
55383 +/* Begin grsecurity status declarations */
55384 +
55385 +enum {
55386 + GR_READY = 0x01,
55387 + GR_STATUS_INIT = 0x00 // disabled state
55388 +};
55389 +
55390 +/* Begin ACL declarations */
55391 +
55392 +/* Role flags */
55393 +
55394 +enum {
55395 + GR_ROLE_USER = 0x0001,
55396 + GR_ROLE_GROUP = 0x0002,
55397 + GR_ROLE_DEFAULT = 0x0004,
55398 + GR_ROLE_SPECIAL = 0x0008,
55399 + GR_ROLE_AUTH = 0x0010,
55400 + GR_ROLE_NOPW = 0x0020,
55401 + GR_ROLE_GOD = 0x0040,
55402 + GR_ROLE_LEARN = 0x0080,
55403 + GR_ROLE_TPE = 0x0100,
55404 + GR_ROLE_DOMAIN = 0x0200,
55405 + GR_ROLE_PAM = 0x0400,
55406 + GR_ROLE_PERSIST = 0x800
55407 +};
55408 +
55409 +/* ACL Subject and Object mode flags */
55410 +enum {
55411 + GR_DELETED = 0x80000000
55412 +};
55413 +
55414 +/* ACL Object-only mode flags */
55415 +enum {
55416 + GR_READ = 0x00000001,
55417 + GR_APPEND = 0x00000002,
55418 + GR_WRITE = 0x00000004,
55419 + GR_EXEC = 0x00000008,
55420 + GR_FIND = 0x00000010,
55421 + GR_INHERIT = 0x00000020,
55422 + GR_SETID = 0x00000040,
55423 + GR_CREATE = 0x00000080,
55424 + GR_DELETE = 0x00000100,
55425 + GR_LINK = 0x00000200,
55426 + GR_AUDIT_READ = 0x00000400,
55427 + GR_AUDIT_APPEND = 0x00000800,
55428 + GR_AUDIT_WRITE = 0x00001000,
55429 + GR_AUDIT_EXEC = 0x00002000,
55430 + GR_AUDIT_FIND = 0x00004000,
55431 + GR_AUDIT_INHERIT= 0x00008000,
55432 + GR_AUDIT_SETID = 0x00010000,
55433 + GR_AUDIT_CREATE = 0x00020000,
55434 + GR_AUDIT_DELETE = 0x00040000,
55435 + GR_AUDIT_LINK = 0x00080000,
55436 + GR_PTRACERD = 0x00100000,
55437 + GR_NOPTRACE = 0x00200000,
55438 + GR_SUPPRESS = 0x00400000,
55439 + GR_NOLEARN = 0x00800000,
55440 + GR_INIT_TRANSFER= 0x01000000
55441 +};
55442 +
55443 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
55444 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
55445 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
55446 +
55447 +/* ACL subject-only mode flags */
55448 +enum {
55449 + GR_KILL = 0x00000001,
55450 + GR_VIEW = 0x00000002,
55451 + GR_PROTECTED = 0x00000004,
55452 + GR_LEARN = 0x00000008,
55453 + GR_OVERRIDE = 0x00000010,
55454 + /* just a placeholder, this mode is only used in userspace */
55455 + GR_DUMMY = 0x00000020,
55456 + GR_PROTSHM = 0x00000040,
55457 + GR_KILLPROC = 0x00000080,
55458 + GR_KILLIPPROC = 0x00000100,
55459 + /* just a placeholder, this mode is only used in userspace */
55460 + GR_NOTROJAN = 0x00000200,
55461 + GR_PROTPROCFD = 0x00000400,
55462 + GR_PROCACCT = 0x00000800,
55463 + GR_RELAXPTRACE = 0x00001000,
55464 + GR_NESTED = 0x00002000,
55465 + GR_INHERITLEARN = 0x00004000,
55466 + GR_PROCFIND = 0x00008000,
55467 + GR_POVERRIDE = 0x00010000,
55468 + GR_KERNELAUTH = 0x00020000,
55469 + GR_ATSECURE = 0x00040000,
55470 + GR_SHMEXEC = 0x00080000
55471 +};
55472 +
55473 +enum {
55474 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
55475 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
55476 + GR_PAX_ENABLE_MPROTECT = 0x0004,
55477 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
55478 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
55479 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
55480 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
55481 + GR_PAX_DISABLE_MPROTECT = 0x0400,
55482 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
55483 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
55484 +};
55485 +
55486 +enum {
55487 + GR_ID_USER = 0x01,
55488 + GR_ID_GROUP = 0x02,
55489 +};
55490 +
55491 +enum {
55492 + GR_ID_ALLOW = 0x01,
55493 + GR_ID_DENY = 0x02,
55494 +};
55495 +
55496 +#define GR_CRASH_RES 31
55497 +#define GR_UIDTABLE_MAX 500
55498 +
55499 +/* begin resource learning section */
55500 +enum {
55501 + GR_RLIM_CPU_BUMP = 60,
55502 + GR_RLIM_FSIZE_BUMP = 50000,
55503 + GR_RLIM_DATA_BUMP = 10000,
55504 + GR_RLIM_STACK_BUMP = 1000,
55505 + GR_RLIM_CORE_BUMP = 10000,
55506 + GR_RLIM_RSS_BUMP = 500000,
55507 + GR_RLIM_NPROC_BUMP = 1,
55508 + GR_RLIM_NOFILE_BUMP = 5,
55509 + GR_RLIM_MEMLOCK_BUMP = 50000,
55510 + GR_RLIM_AS_BUMP = 500000,
55511 + GR_RLIM_LOCKS_BUMP = 2,
55512 + GR_RLIM_SIGPENDING_BUMP = 5,
55513 + GR_RLIM_MSGQUEUE_BUMP = 10000,
55514 + GR_RLIM_NICE_BUMP = 1,
55515 + GR_RLIM_RTPRIO_BUMP = 1,
55516 + GR_RLIM_RTTIME_BUMP = 1000000
55517 +};
55518 +
55519 +#endif
55520 diff -urNp linux-2.6.32.42/include/linux/grinternal.h linux-2.6.32.42/include/linux/grinternal.h
55521 --- linux-2.6.32.42/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
55522 +++ linux-2.6.32.42/include/linux/grinternal.h 2011-06-29 19:41:14.000000000 -0400
55523 @@ -0,0 +1,219 @@
55524 +#ifndef __GRINTERNAL_H
55525 +#define __GRINTERNAL_H
55526 +
55527 +#ifdef CONFIG_GRKERNSEC
55528 +
55529 +#include <linux/fs.h>
55530 +#include <linux/mnt_namespace.h>
55531 +#include <linux/nsproxy.h>
55532 +#include <linux/gracl.h>
55533 +#include <linux/grdefs.h>
55534 +#include <linux/grmsg.h>
55535 +
55536 +void gr_add_learn_entry(const char *fmt, ...)
55537 + __attribute__ ((format (printf, 1, 2)));
55538 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
55539 + const struct vfsmount *mnt);
55540 +__u32 gr_check_create(const struct dentry *new_dentry,
55541 + const struct dentry *parent,
55542 + const struct vfsmount *mnt, const __u32 mode);
55543 +int gr_check_protected_task(const struct task_struct *task);
55544 +__u32 to_gr_audit(const __u32 reqmode);
55545 +int gr_set_acls(const int type);
55546 +int gr_apply_subject_to_task(struct task_struct *task);
55547 +int gr_acl_is_enabled(void);
55548 +char gr_roletype_to_char(void);
55549 +
55550 +void gr_handle_alertkill(struct task_struct *task);
55551 +char *gr_to_filename(const struct dentry *dentry,
55552 + const struct vfsmount *mnt);
55553 +char *gr_to_filename1(const struct dentry *dentry,
55554 + const struct vfsmount *mnt);
55555 +char *gr_to_filename2(const struct dentry *dentry,
55556 + const struct vfsmount *mnt);
55557 +char *gr_to_filename3(const struct dentry *dentry,
55558 + const struct vfsmount *mnt);
55559 +
55560 +extern int grsec_enable_harden_ptrace;
55561 +extern int grsec_enable_link;
55562 +extern int grsec_enable_fifo;
55563 +extern int grsec_enable_execve;
55564 +extern int grsec_enable_shm;
55565 +extern int grsec_enable_execlog;
55566 +extern int grsec_enable_signal;
55567 +extern int grsec_enable_audit_ptrace;
55568 +extern int grsec_enable_forkfail;
55569 +extern int grsec_enable_time;
55570 +extern int grsec_enable_rofs;
55571 +extern int grsec_enable_chroot_shmat;
55572 +extern int grsec_enable_chroot_findtask;
55573 +extern int grsec_enable_chroot_mount;
55574 +extern int grsec_enable_chroot_double;
55575 +extern int grsec_enable_chroot_pivot;
55576 +extern int grsec_enable_chroot_chdir;
55577 +extern int grsec_enable_chroot_chmod;
55578 +extern int grsec_enable_chroot_mknod;
55579 +extern int grsec_enable_chroot_fchdir;
55580 +extern int grsec_enable_chroot_nice;
55581 +extern int grsec_enable_chroot_execlog;
55582 +extern int grsec_enable_chroot_caps;
55583 +extern int grsec_enable_chroot_sysctl;
55584 +extern int grsec_enable_chroot_unix;
55585 +extern int grsec_enable_tpe;
55586 +extern int grsec_tpe_gid;
55587 +extern int grsec_enable_tpe_all;
55588 +extern int grsec_enable_tpe_invert;
55589 +extern int grsec_enable_socket_all;
55590 +extern int grsec_socket_all_gid;
55591 +extern int grsec_enable_socket_client;
55592 +extern int grsec_socket_client_gid;
55593 +extern int grsec_enable_socket_server;
55594 +extern int grsec_socket_server_gid;
55595 +extern int grsec_audit_gid;
55596 +extern int grsec_enable_group;
55597 +extern int grsec_enable_audit_textrel;
55598 +extern int grsec_enable_log_rwxmaps;
55599 +extern int grsec_enable_mount;
55600 +extern int grsec_enable_chdir;
55601 +extern int grsec_resource_logging;
55602 +extern int grsec_enable_blackhole;
55603 +extern int grsec_lastack_retries;
55604 +extern int grsec_enable_brute;
55605 +extern int grsec_lock;
55606 +
55607 +extern spinlock_t grsec_alert_lock;
55608 +extern unsigned long grsec_alert_wtime;
55609 +extern unsigned long grsec_alert_fyet;
55610 +
55611 +extern spinlock_t grsec_audit_lock;
55612 +
55613 +extern rwlock_t grsec_exec_file_lock;
55614 +
55615 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
55616 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
55617 + (tsk)->exec_file->f_vfsmnt) : "/")
55618 +
55619 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
55620 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
55621 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55622 +
55623 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
55624 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
55625 + (tsk)->exec_file->f_vfsmnt) : "/")
55626 +
55627 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
55628 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
55629 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55630 +
55631 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
55632 +
55633 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
55634 +
55635 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
55636 + (task)->pid, (cred)->uid, \
55637 + (cred)->euid, (cred)->gid, (cred)->egid, \
55638 + gr_parent_task_fullpath(task), \
55639 + (task)->real_parent->comm, (task)->real_parent->pid, \
55640 + (pcred)->uid, (pcred)->euid, \
55641 + (pcred)->gid, (pcred)->egid
55642 +
55643 +#define GR_CHROOT_CAPS {{ \
55644 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
55645 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
55646 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
55647 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
55648 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
55649 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
55650 +
55651 +#define security_learn(normal_msg,args...) \
55652 +({ \
55653 + read_lock(&grsec_exec_file_lock); \
55654 + gr_add_learn_entry(normal_msg "\n", ## args); \
55655 + read_unlock(&grsec_exec_file_lock); \
55656 +})
55657 +
55658 +enum {
55659 + GR_DO_AUDIT,
55660 + GR_DONT_AUDIT,
55661 + GR_DONT_AUDIT_GOOD
55662 +};
55663 +
55664 +enum {
55665 + GR_TTYSNIFF,
55666 + GR_RBAC,
55667 + GR_RBAC_STR,
55668 + GR_STR_RBAC,
55669 + GR_RBAC_MODE2,
55670 + GR_RBAC_MODE3,
55671 + GR_FILENAME,
55672 + GR_SYSCTL_HIDDEN,
55673 + GR_NOARGS,
55674 + GR_ONE_INT,
55675 + GR_ONE_INT_TWO_STR,
55676 + GR_ONE_STR,
55677 + GR_STR_INT,
55678 + GR_TWO_STR_INT,
55679 + GR_TWO_INT,
55680 + GR_TWO_U64,
55681 + GR_THREE_INT,
55682 + GR_FIVE_INT_TWO_STR,
55683 + GR_TWO_STR,
55684 + GR_THREE_STR,
55685 + GR_FOUR_STR,
55686 + GR_STR_FILENAME,
55687 + GR_FILENAME_STR,
55688 + GR_FILENAME_TWO_INT,
55689 + GR_FILENAME_TWO_INT_STR,
55690 + GR_TEXTREL,
55691 + GR_PTRACE,
55692 + GR_RESOURCE,
55693 + GR_CAP,
55694 + GR_SIG,
55695 + GR_SIG2,
55696 + GR_CRASH1,
55697 + GR_CRASH2,
55698 + GR_PSACCT,
55699 + GR_RWXMAP
55700 +};
55701 +
55702 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
55703 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
55704 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
55705 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
55706 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
55707 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
55708 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
55709 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
55710 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
55711 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
55712 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
55713 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
55714 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
55715 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
55716 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
55717 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
55718 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
55719 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
55720 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
55721 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
55722 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
55723 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
55724 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
55725 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
55726 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
55727 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
55728 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
55729 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
55730 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
55731 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
55732 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
55733 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
55734 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
55735 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
55736 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
55737 +
55738 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
55739 +
55740 +#endif
55741 +
55742 +#endif
55743 diff -urNp linux-2.6.32.42/include/linux/grmsg.h linux-2.6.32.42/include/linux/grmsg.h
55744 --- linux-2.6.32.42/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
55745 +++ linux-2.6.32.42/include/linux/grmsg.h 2011-04-17 15:56:46.000000000 -0400
55746 @@ -0,0 +1,108 @@
55747 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
55748 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
55749 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
55750 +#define GR_STOPMOD_MSG "denied modification of module state by "
55751 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
55752 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
55753 +#define GR_IOPERM_MSG "denied use of ioperm() by "
55754 +#define GR_IOPL_MSG "denied use of iopl() by "
55755 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
55756 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
55757 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
55758 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
55759 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
55760 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
55761 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
55762 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
55763 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
55764 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
55765 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
55766 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
55767 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
55768 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
55769 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
55770 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
55771 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
55772 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
55773 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
55774 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
55775 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
55776 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
55777 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
55778 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
55779 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
55780 +#define GR_NPROC_MSG "denied overstep of process limit by "
55781 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
55782 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
55783 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
55784 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
55785 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
55786 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
55787 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
55788 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
55789 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
55790 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
55791 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
55792 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
55793 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
55794 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
55795 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
55796 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
55797 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
55798 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
55799 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
55800 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
55801 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
55802 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
55803 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
55804 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
55805 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
55806 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
55807 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
55808 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
55809 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
55810 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
55811 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
55812 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
55813 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
55814 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
55815 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
55816 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
55817 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
55818 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
55819 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
55820 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
55821 +#define GR_NICE_CHROOT_MSG "denied priority change by "
55822 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
55823 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
55824 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
55825 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
55826 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
55827 +#define GR_TIME_MSG "time set by "
55828 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
55829 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
55830 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
55831 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
55832 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
55833 +#define GR_BIND_MSG "denied bind() by "
55834 +#define GR_CONNECT_MSG "denied connect() by "
55835 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
55836 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
55837 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
55838 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
55839 +#define GR_CAP_ACL_MSG "use of %s denied for "
55840 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
55841 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
55842 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
55843 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
55844 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
55845 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
55846 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
55847 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
55848 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
55849 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
55850 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
55851 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
55852 +#define GR_VM86_MSG "denied use of vm86 by "
55853 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
55854 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
55855 diff -urNp linux-2.6.32.42/include/linux/grsecurity.h linux-2.6.32.42/include/linux/grsecurity.h
55856 --- linux-2.6.32.42/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
55857 +++ linux-2.6.32.42/include/linux/grsecurity.h 2011-04-17 15:56:46.000000000 -0400
55858 @@ -0,0 +1,212 @@
55859 +#ifndef GR_SECURITY_H
55860 +#define GR_SECURITY_H
55861 +#include <linux/fs.h>
55862 +#include <linux/fs_struct.h>
55863 +#include <linux/binfmts.h>
55864 +#include <linux/gracl.h>
55865 +#include <linux/compat.h>
55866 +
55867 +/* notify of brain-dead configs */
55868 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
55869 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
55870 +#endif
55871 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
55872 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
55873 +#endif
55874 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
55875 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
55876 +#endif
55877 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
55878 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
55879 +#endif
55880 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
55881 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
55882 +#endif
55883 +
55884 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
55885 +void gr_handle_brute_check(void);
55886 +void gr_handle_kernel_exploit(void);
55887 +int gr_process_user_ban(void);
55888 +
55889 +char gr_roletype_to_char(void);
55890 +
55891 +int gr_acl_enable_at_secure(void);
55892 +
55893 +int gr_check_user_change(int real, int effective, int fs);
55894 +int gr_check_group_change(int real, int effective, int fs);
55895 +
55896 +void gr_del_task_from_ip_table(struct task_struct *p);
55897 +
55898 +int gr_pid_is_chrooted(struct task_struct *p);
55899 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
55900 +int gr_handle_chroot_nice(void);
55901 +int gr_handle_chroot_sysctl(const int op);
55902 +int gr_handle_chroot_setpriority(struct task_struct *p,
55903 + const int niceval);
55904 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
55905 +int gr_handle_chroot_chroot(const struct dentry *dentry,
55906 + const struct vfsmount *mnt);
55907 +int gr_handle_chroot_caps(struct path *path);
55908 +void gr_handle_chroot_chdir(struct path *path);
55909 +int gr_handle_chroot_chmod(const struct dentry *dentry,
55910 + const struct vfsmount *mnt, const int mode);
55911 +int gr_handle_chroot_mknod(const struct dentry *dentry,
55912 + const struct vfsmount *mnt, const int mode);
55913 +int gr_handle_chroot_mount(const struct dentry *dentry,
55914 + const struct vfsmount *mnt,
55915 + const char *dev_name);
55916 +int gr_handle_chroot_pivot(void);
55917 +int gr_handle_chroot_unix(const pid_t pid);
55918 +
55919 +int gr_handle_rawio(const struct inode *inode);
55920 +int gr_handle_nproc(void);
55921 +
55922 +void gr_handle_ioperm(void);
55923 +void gr_handle_iopl(void);
55924 +
55925 +int gr_tpe_allow(const struct file *file);
55926 +
55927 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
55928 +void gr_clear_chroot_entries(struct task_struct *task);
55929 +
55930 +void gr_log_forkfail(const int retval);
55931 +void gr_log_timechange(void);
55932 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
55933 +void gr_log_chdir(const struct dentry *dentry,
55934 + const struct vfsmount *mnt);
55935 +void gr_log_chroot_exec(const struct dentry *dentry,
55936 + const struct vfsmount *mnt);
55937 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
55938 +#ifdef CONFIG_COMPAT
55939 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
55940 +#endif
55941 +void gr_log_remount(const char *devname, const int retval);
55942 +void gr_log_unmount(const char *devname, const int retval);
55943 +void gr_log_mount(const char *from, const char *to, const int retval);
55944 +void gr_log_textrel(struct vm_area_struct *vma);
55945 +void gr_log_rwxmmap(struct file *file);
55946 +void gr_log_rwxmprotect(struct file *file);
55947 +
55948 +int gr_handle_follow_link(const struct inode *parent,
55949 + const struct inode *inode,
55950 + const struct dentry *dentry,
55951 + const struct vfsmount *mnt);
55952 +int gr_handle_fifo(const struct dentry *dentry,
55953 + const struct vfsmount *mnt,
55954 + const struct dentry *dir, const int flag,
55955 + const int acc_mode);
55956 +int gr_handle_hardlink(const struct dentry *dentry,
55957 + const struct vfsmount *mnt,
55958 + struct inode *inode,
55959 + const int mode, const char *to);
55960 +
55961 +int gr_is_capable(const int cap);
55962 +int gr_is_capable_nolog(const int cap);
55963 +void gr_learn_resource(const struct task_struct *task, const int limit,
55964 + const unsigned long wanted, const int gt);
55965 +void gr_copy_label(struct task_struct *tsk);
55966 +void gr_handle_crash(struct task_struct *task, const int sig);
55967 +int gr_handle_signal(const struct task_struct *p, const int sig);
55968 +int gr_check_crash_uid(const uid_t uid);
55969 +int gr_check_protected_task(const struct task_struct *task);
55970 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
55971 +int gr_acl_handle_mmap(const struct file *file,
55972 + const unsigned long prot);
55973 +int gr_acl_handle_mprotect(const struct file *file,
55974 + const unsigned long prot);
55975 +int gr_check_hidden_task(const struct task_struct *tsk);
55976 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
55977 + const struct vfsmount *mnt);
55978 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
55979 + const struct vfsmount *mnt);
55980 +__u32 gr_acl_handle_access(const struct dentry *dentry,
55981 + const struct vfsmount *mnt, const int fmode);
55982 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
55983 + const struct vfsmount *mnt, mode_t mode);
55984 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
55985 + const struct vfsmount *mnt, mode_t mode);
55986 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
55987 + const struct vfsmount *mnt);
55988 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
55989 + const struct vfsmount *mnt);
55990 +int gr_handle_ptrace(struct task_struct *task, const long request);
55991 +int gr_handle_proc_ptrace(struct task_struct *task);
55992 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
55993 + const struct vfsmount *mnt);
55994 +int gr_check_crash_exec(const struct file *filp);
55995 +int gr_acl_is_enabled(void);
55996 +void gr_set_kernel_label(struct task_struct *task);
55997 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
55998 + const gid_t gid);
55999 +int gr_set_proc_label(const struct dentry *dentry,
56000 + const struct vfsmount *mnt,
56001 + const int unsafe_share);
56002 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
56003 + const struct vfsmount *mnt);
56004 +__u32 gr_acl_handle_open(const struct dentry *dentry,
56005 + const struct vfsmount *mnt, const int fmode);
56006 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
56007 + const struct dentry *p_dentry,
56008 + const struct vfsmount *p_mnt, const int fmode,
56009 + const int imode);
56010 +void gr_handle_create(const struct dentry *dentry,
56011 + const struct vfsmount *mnt);
56012 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
56013 + const struct dentry *parent_dentry,
56014 + const struct vfsmount *parent_mnt,
56015 + const int mode);
56016 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
56017 + const struct dentry *parent_dentry,
56018 + const struct vfsmount *parent_mnt);
56019 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
56020 + const struct vfsmount *mnt);
56021 +void gr_handle_delete(const ino_t ino, const dev_t dev);
56022 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
56023 + const struct vfsmount *mnt);
56024 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
56025 + const struct dentry *parent_dentry,
56026 + const struct vfsmount *parent_mnt,
56027 + const char *from);
56028 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
56029 + const struct dentry *parent_dentry,
56030 + const struct vfsmount *parent_mnt,
56031 + const struct dentry *old_dentry,
56032 + const struct vfsmount *old_mnt, const char *to);
56033 +int gr_acl_handle_rename(struct dentry *new_dentry,
56034 + struct dentry *parent_dentry,
56035 + const struct vfsmount *parent_mnt,
56036 + struct dentry *old_dentry,
56037 + struct inode *old_parent_inode,
56038 + struct vfsmount *old_mnt, const char *newname);
56039 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
56040 + struct dentry *old_dentry,
56041 + struct dentry *new_dentry,
56042 + struct vfsmount *mnt, const __u8 replace);
56043 +__u32 gr_check_link(const struct dentry *new_dentry,
56044 + const struct dentry *parent_dentry,
56045 + const struct vfsmount *parent_mnt,
56046 + const struct dentry *old_dentry,
56047 + const struct vfsmount *old_mnt);
56048 +int gr_acl_handle_filldir(const struct file *file, const char *name,
56049 + const unsigned int namelen, const ino_t ino);
56050 +
56051 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
56052 + const struct vfsmount *mnt);
56053 +void gr_acl_handle_exit(void);
56054 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
56055 +int gr_acl_handle_procpidmem(const struct task_struct *task);
56056 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
56057 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
56058 +void gr_audit_ptrace(struct task_struct *task);
56059 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
56060 +
56061 +#ifdef CONFIG_GRKERNSEC
56062 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
56063 +void gr_handle_vm86(void);
56064 +void gr_handle_mem_readwrite(u64 from, u64 to);
56065 +
56066 +extern int grsec_enable_dmesg;
56067 +extern int grsec_disable_privio;
56068 +#endif
56069 +
56070 +#endif
56071 diff -urNp linux-2.6.32.42/include/linux/hdpu_features.h linux-2.6.32.42/include/linux/hdpu_features.h
56072 --- linux-2.6.32.42/include/linux/hdpu_features.h 2011-03-27 14:31:47.000000000 -0400
56073 +++ linux-2.6.32.42/include/linux/hdpu_features.h 2011-04-17 15:56:46.000000000 -0400
56074 @@ -3,7 +3,7 @@
56075 struct cpustate_t {
56076 spinlock_t lock;
56077 int excl;
56078 - int open_count;
56079 + atomic_t open_count;
56080 unsigned char cached_val;
56081 int inited;
56082 unsigned long *set_addr;
56083 diff -urNp linux-2.6.32.42/include/linux/highmem.h linux-2.6.32.42/include/linux/highmem.h
56084 --- linux-2.6.32.42/include/linux/highmem.h 2011-03-27 14:31:47.000000000 -0400
56085 +++ linux-2.6.32.42/include/linux/highmem.h 2011-04-17 15:56:46.000000000 -0400
56086 @@ -137,6 +137,18 @@ static inline void clear_highpage(struct
56087 kunmap_atomic(kaddr, KM_USER0);
56088 }
56089
56090 +static inline void sanitize_highpage(struct page *page)
56091 +{
56092 + void *kaddr;
56093 + unsigned long flags;
56094 +
56095 + local_irq_save(flags);
56096 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
56097 + clear_page(kaddr);
56098 + kunmap_atomic(kaddr, KM_CLEARPAGE);
56099 + local_irq_restore(flags);
56100 +}
56101 +
56102 static inline void zero_user_segments(struct page *page,
56103 unsigned start1, unsigned end1,
56104 unsigned start2, unsigned end2)
56105 diff -urNp linux-2.6.32.42/include/linux/i2o.h linux-2.6.32.42/include/linux/i2o.h
56106 --- linux-2.6.32.42/include/linux/i2o.h 2011-03-27 14:31:47.000000000 -0400
56107 +++ linux-2.6.32.42/include/linux/i2o.h 2011-05-04 17:56:28.000000000 -0400
56108 @@ -564,7 +564,7 @@ struct i2o_controller {
56109 struct i2o_device *exec; /* Executive */
56110 #if BITS_PER_LONG == 64
56111 spinlock_t context_list_lock; /* lock for context_list */
56112 - atomic_t context_list_counter; /* needed for unique contexts */
56113 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
56114 struct list_head context_list; /* list of context id's
56115 and pointers */
56116 #endif
56117 diff -urNp linux-2.6.32.42/include/linux/init_task.h linux-2.6.32.42/include/linux/init_task.h
56118 --- linux-2.6.32.42/include/linux/init_task.h 2011-03-27 14:31:47.000000000 -0400
56119 +++ linux-2.6.32.42/include/linux/init_task.h 2011-05-18 20:44:59.000000000 -0400
56120 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
56121 #define INIT_IDS
56122 #endif
56123
56124 +#ifdef CONFIG_X86
56125 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
56126 +#else
56127 +#define INIT_TASK_THREAD_INFO
56128 +#endif
56129 +
56130 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
56131 /*
56132 * Because of the reduced scope of CAP_SETPCAP when filesystem
56133 @@ -156,6 +162,7 @@ extern struct cred init_cred;
56134 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
56135 .comm = "swapper", \
56136 .thread = INIT_THREAD, \
56137 + INIT_TASK_THREAD_INFO \
56138 .fs = &init_fs, \
56139 .files = &init_files, \
56140 .signal = &init_signals, \
56141 diff -urNp linux-2.6.32.42/include/linux/interrupt.h linux-2.6.32.42/include/linux/interrupt.h
56142 --- linux-2.6.32.42/include/linux/interrupt.h 2011-06-25 12:55:35.000000000 -0400
56143 +++ linux-2.6.32.42/include/linux/interrupt.h 2011-06-25 12:56:37.000000000 -0400
56144 @@ -363,7 +363,7 @@ enum
56145 /* map softirq index to softirq name. update 'softirq_to_name' in
56146 * kernel/softirq.c when adding a new softirq.
56147 */
56148 -extern char *softirq_to_name[NR_SOFTIRQS];
56149 +extern const char * const softirq_to_name[NR_SOFTIRQS];
56150
56151 /* softirq mask and active fields moved to irq_cpustat_t in
56152 * asm/hardirq.h to get better cache usage. KAO
56153 @@ -371,12 +371,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
56154
56155 struct softirq_action
56156 {
56157 - void (*action)(struct softirq_action *);
56158 + void (*action)(void);
56159 };
56160
56161 asmlinkage void do_softirq(void);
56162 asmlinkage void __do_softirq(void);
56163 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
56164 +extern void open_softirq(int nr, void (*action)(void));
56165 extern void softirq_init(void);
56166 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
56167 extern void raise_softirq_irqoff(unsigned int nr);
56168 diff -urNp linux-2.6.32.42/include/linux/irq.h linux-2.6.32.42/include/linux/irq.h
56169 --- linux-2.6.32.42/include/linux/irq.h 2011-03-27 14:31:47.000000000 -0400
56170 +++ linux-2.6.32.42/include/linux/irq.h 2011-04-17 15:56:46.000000000 -0400
56171 @@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq,
56172 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
56173 bool boot)
56174 {
56175 +#ifdef CONFIG_CPUMASK_OFFSTACK
56176 gfp_t gfp = GFP_ATOMIC;
56177
56178 if (boot)
56179 gfp = GFP_NOWAIT;
56180
56181 -#ifdef CONFIG_CPUMASK_OFFSTACK
56182 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
56183 return false;
56184
56185 diff -urNp linux-2.6.32.42/include/linux/kallsyms.h linux-2.6.32.42/include/linux/kallsyms.h
56186 --- linux-2.6.32.42/include/linux/kallsyms.h 2011-03-27 14:31:47.000000000 -0400
56187 +++ linux-2.6.32.42/include/linux/kallsyms.h 2011-04-17 15:56:46.000000000 -0400
56188 @@ -15,7 +15,8 @@
56189
56190 struct module;
56191
56192 -#ifdef CONFIG_KALLSYMS
56193 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
56194 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
56195 /* Lookup the address for a symbol. Returns 0 if not found. */
56196 unsigned long kallsyms_lookup_name(const char *name);
56197
56198 @@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(un
56199 /* Stupid that this does nothing, but I didn't create this mess. */
56200 #define __print_symbol(fmt, addr)
56201 #endif /*CONFIG_KALLSYMS*/
56202 +#else /* when included by kallsyms.c, vsnprintf.c, or
56203 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
56204 +extern void __print_symbol(const char *fmt, unsigned long address);
56205 +extern int sprint_symbol(char *buffer, unsigned long address);
56206 +const char *kallsyms_lookup(unsigned long addr,
56207 + unsigned long *symbolsize,
56208 + unsigned long *offset,
56209 + char **modname, char *namebuf);
56210 +#endif
56211
56212 /* This macro allows us to keep printk typechecking */
56213 static void __check_printsym_format(const char *fmt, ...)
56214 diff -urNp linux-2.6.32.42/include/linux/kgdb.h linux-2.6.32.42/include/linux/kgdb.h
56215 --- linux-2.6.32.42/include/linux/kgdb.h 2011-03-27 14:31:47.000000000 -0400
56216 +++ linux-2.6.32.42/include/linux/kgdb.h 2011-05-04 17:56:20.000000000 -0400
56217 @@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
56218
56219 extern int kgdb_connected;
56220
56221 -extern atomic_t kgdb_setting_breakpoint;
56222 -extern atomic_t kgdb_cpu_doing_single_step;
56223 +extern atomic_unchecked_t kgdb_setting_breakpoint;
56224 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
56225
56226 extern struct task_struct *kgdb_usethread;
56227 extern struct task_struct *kgdb_contthread;
56228 @@ -251,20 +251,20 @@ struct kgdb_arch {
56229 */
56230 struct kgdb_io {
56231 const char *name;
56232 - int (*read_char) (void);
56233 - void (*write_char) (u8);
56234 - void (*flush) (void);
56235 - int (*init) (void);
56236 - void (*pre_exception) (void);
56237 - void (*post_exception) (void);
56238 + int (* const read_char) (void);
56239 + void (* const write_char) (u8);
56240 + void (* const flush) (void);
56241 + int (* const init) (void);
56242 + void (* const pre_exception) (void);
56243 + void (* const post_exception) (void);
56244 };
56245
56246 -extern struct kgdb_arch arch_kgdb_ops;
56247 +extern const struct kgdb_arch arch_kgdb_ops;
56248
56249 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
56250
56251 -extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
56252 -extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
56253 +extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
56254 +extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
56255
56256 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
56257 extern int kgdb_mem2hex(char *mem, char *buf, int count);
56258 diff -urNp linux-2.6.32.42/include/linux/kmod.h linux-2.6.32.42/include/linux/kmod.h
56259 --- linux-2.6.32.42/include/linux/kmod.h 2011-03-27 14:31:47.000000000 -0400
56260 +++ linux-2.6.32.42/include/linux/kmod.h 2011-04-17 15:56:46.000000000 -0400
56261 @@ -31,6 +31,8 @@
56262 * usually useless though. */
56263 extern int __request_module(bool wait, const char *name, ...) \
56264 __attribute__((format(printf, 2, 3)));
56265 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
56266 + __attribute__((format(printf, 3, 4)));
56267 #define request_module(mod...) __request_module(true, mod)
56268 #define request_module_nowait(mod...) __request_module(false, mod)
56269 #define try_then_request_module(x, mod...) \
56270 diff -urNp linux-2.6.32.42/include/linux/kobject.h linux-2.6.32.42/include/linux/kobject.h
56271 --- linux-2.6.32.42/include/linux/kobject.h 2011-03-27 14:31:47.000000000 -0400
56272 +++ linux-2.6.32.42/include/linux/kobject.h 2011-04-17 15:56:46.000000000 -0400
56273 @@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kob
56274
56275 struct kobj_type {
56276 void (*release)(struct kobject *kobj);
56277 - struct sysfs_ops *sysfs_ops;
56278 + const struct sysfs_ops *sysfs_ops;
56279 struct attribute **default_attrs;
56280 };
56281
56282 @@ -118,9 +118,9 @@ struct kobj_uevent_env {
56283 };
56284
56285 struct kset_uevent_ops {
56286 - int (*filter)(struct kset *kset, struct kobject *kobj);
56287 - const char *(*name)(struct kset *kset, struct kobject *kobj);
56288 - int (*uevent)(struct kset *kset, struct kobject *kobj,
56289 + int (* const filter)(struct kset *kset, struct kobject *kobj);
56290 + const char *(* const name)(struct kset *kset, struct kobject *kobj);
56291 + int (* const uevent)(struct kset *kset, struct kobject *kobj,
56292 struct kobj_uevent_env *env);
56293 };
56294
56295 @@ -132,7 +132,7 @@ struct kobj_attribute {
56296 const char *buf, size_t count);
56297 };
56298
56299 -extern struct sysfs_ops kobj_sysfs_ops;
56300 +extern const struct sysfs_ops kobj_sysfs_ops;
56301
56302 /**
56303 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
56304 @@ -155,14 +155,14 @@ struct kset {
56305 struct list_head list;
56306 spinlock_t list_lock;
56307 struct kobject kobj;
56308 - struct kset_uevent_ops *uevent_ops;
56309 + const struct kset_uevent_ops *uevent_ops;
56310 };
56311
56312 extern void kset_init(struct kset *kset);
56313 extern int __must_check kset_register(struct kset *kset);
56314 extern void kset_unregister(struct kset *kset);
56315 extern struct kset * __must_check kset_create_and_add(const char *name,
56316 - struct kset_uevent_ops *u,
56317 + const struct kset_uevent_ops *u,
56318 struct kobject *parent_kobj);
56319
56320 static inline struct kset *to_kset(struct kobject *kobj)
56321 diff -urNp linux-2.6.32.42/include/linux/kvm_host.h linux-2.6.32.42/include/linux/kvm_host.h
56322 --- linux-2.6.32.42/include/linux/kvm_host.h 2011-03-27 14:31:47.000000000 -0400
56323 +++ linux-2.6.32.42/include/linux/kvm_host.h 2011-04-17 15:56:46.000000000 -0400
56324 @@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
56325 void vcpu_load(struct kvm_vcpu *vcpu);
56326 void vcpu_put(struct kvm_vcpu *vcpu);
56327
56328 -int kvm_init(void *opaque, unsigned int vcpu_size,
56329 +int kvm_init(const void *opaque, unsigned int vcpu_size,
56330 struct module *module);
56331 void kvm_exit(void);
56332
56333 @@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
56334 struct kvm_guest_debug *dbg);
56335 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
56336
56337 -int kvm_arch_init(void *opaque);
56338 +int kvm_arch_init(const void *opaque);
56339 void kvm_arch_exit(void);
56340
56341 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
56342 diff -urNp linux-2.6.32.42/include/linux/libata.h linux-2.6.32.42/include/linux/libata.h
56343 --- linux-2.6.32.42/include/linux/libata.h 2011-03-27 14:31:47.000000000 -0400
56344 +++ linux-2.6.32.42/include/linux/libata.h 2011-04-23 12:56:11.000000000 -0400
56345 @@ -525,11 +525,11 @@ struct ata_ioports {
56346
56347 struct ata_host {
56348 spinlock_t lock;
56349 - struct device *dev;
56350 + struct device *dev;
56351 void __iomem * const *iomap;
56352 unsigned int n_ports;
56353 void *private_data;
56354 - struct ata_port_operations *ops;
56355 + const struct ata_port_operations *ops;
56356 unsigned long flags;
56357 #ifdef CONFIG_ATA_ACPI
56358 acpi_handle acpi_handle;
56359 @@ -710,7 +710,7 @@ struct ata_link {
56360
56361 struct ata_port {
56362 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
56363 - struct ata_port_operations *ops;
56364 + const struct ata_port_operations *ops;
56365 spinlock_t *lock;
56366 /* Flags owned by the EH context. Only EH should touch these once the
56367 port is active */
56368 @@ -892,7 +892,7 @@ struct ata_port_info {
56369 unsigned long pio_mask;
56370 unsigned long mwdma_mask;
56371 unsigned long udma_mask;
56372 - struct ata_port_operations *port_ops;
56373 + const struct ata_port_operations *port_ops;
56374 void *private_data;
56375 };
56376
56377 @@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timi
56378 extern const unsigned long sata_deb_timing_hotplug[];
56379 extern const unsigned long sata_deb_timing_long[];
56380
56381 -extern struct ata_port_operations ata_dummy_port_ops;
56382 +extern const struct ata_port_operations ata_dummy_port_ops;
56383 extern const struct ata_port_info ata_dummy_port_info;
56384
56385 static inline const unsigned long *
56386 @@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_
56387 struct scsi_host_template *sht);
56388 extern void ata_host_detach(struct ata_host *host);
56389 extern void ata_host_init(struct ata_host *, struct device *,
56390 - unsigned long, struct ata_port_operations *);
56391 + unsigned long, const struct ata_port_operations *);
56392 extern int ata_scsi_detect(struct scsi_host_template *sht);
56393 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
56394 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
56395 diff -urNp linux-2.6.32.42/include/linux/lockd/bind.h linux-2.6.32.42/include/linux/lockd/bind.h
56396 --- linux-2.6.32.42/include/linux/lockd/bind.h 2011-03-27 14:31:47.000000000 -0400
56397 +++ linux-2.6.32.42/include/linux/lockd/bind.h 2011-04-17 15:56:46.000000000 -0400
56398 @@ -23,13 +23,13 @@ struct svc_rqst;
56399 * This is the set of functions for lockd->nfsd communication
56400 */
56401 struct nlmsvc_binding {
56402 - __be32 (*fopen)(struct svc_rqst *,
56403 + __be32 (* const fopen)(struct svc_rqst *,
56404 struct nfs_fh *,
56405 struct file **);
56406 - void (*fclose)(struct file *);
56407 + void (* const fclose)(struct file *);
56408 };
56409
56410 -extern struct nlmsvc_binding * nlmsvc_ops;
56411 +extern const struct nlmsvc_binding * nlmsvc_ops;
56412
56413 /*
56414 * Similar to nfs_client_initdata, but without the NFS-specific
56415 diff -urNp linux-2.6.32.42/include/linux/mm.h linux-2.6.32.42/include/linux/mm.h
56416 --- linux-2.6.32.42/include/linux/mm.h 2011-03-27 14:31:47.000000000 -0400
56417 +++ linux-2.6.32.42/include/linux/mm.h 2011-04-17 15:56:46.000000000 -0400
56418 @@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void
56419
56420 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
56421 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
56422 +
56423 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
56424 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
56425 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
56426 +#else
56427 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
56428 +#endif
56429 +
56430 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
56431 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
56432
56433 @@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
56434 int set_page_dirty_lock(struct page *page);
56435 int clear_page_dirty_for_io(struct page *page);
56436
56437 -/* Is the vma a continuation of the stack vma above it? */
56438 -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
56439 -{
56440 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
56441 -}
56442 -
56443 extern unsigned long move_page_tables(struct vm_area_struct *vma,
56444 unsigned long old_addr, struct vm_area_struct *new_vma,
56445 unsigned long new_addr, unsigned long len);
56446 @@ -890,6 +891,8 @@ struct shrinker {
56447 extern void register_shrinker(struct shrinker *);
56448 extern void unregister_shrinker(struct shrinker *);
56449
56450 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
56451 +
56452 int vma_wants_writenotify(struct vm_area_struct *vma);
56453
56454 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
56455 @@ -1162,6 +1165,7 @@ out:
56456 }
56457
56458 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
56459 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
56460
56461 extern unsigned long do_brk(unsigned long, unsigned long);
56462
56463 @@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(
56464 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
56465 struct vm_area_struct **pprev);
56466
56467 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
56468 +extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
56469 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
56470 +
56471 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
56472 NULL if none. Assume start_addr < end_addr. */
56473 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
56474 @@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(st
56475 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
56476 }
56477
56478 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
56479 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
56480 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
56481 unsigned long pfn, unsigned long size, pgprot_t);
56482 @@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long
56483 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
56484 extern int sysctl_memory_failure_early_kill;
56485 extern int sysctl_memory_failure_recovery;
56486 -extern atomic_long_t mce_bad_pages;
56487 +extern atomic_long_unchecked_t mce_bad_pages;
56488 +
56489 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
56490 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
56491 +#else
56492 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
56493 +#endif
56494
56495 #endif /* __KERNEL__ */
56496 #endif /* _LINUX_MM_H */
56497 diff -urNp linux-2.6.32.42/include/linux/mm_types.h linux-2.6.32.42/include/linux/mm_types.h
56498 --- linux-2.6.32.42/include/linux/mm_types.h 2011-03-27 14:31:47.000000000 -0400
56499 +++ linux-2.6.32.42/include/linux/mm_types.h 2011-04-17 15:56:46.000000000 -0400
56500 @@ -186,6 +186,8 @@ struct vm_area_struct {
56501 #ifdef CONFIG_NUMA
56502 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
56503 #endif
56504 +
56505 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
56506 };
56507
56508 struct core_thread {
56509 @@ -287,6 +289,24 @@ struct mm_struct {
56510 #ifdef CONFIG_MMU_NOTIFIER
56511 struct mmu_notifier_mm *mmu_notifier_mm;
56512 #endif
56513 +
56514 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56515 + unsigned long pax_flags;
56516 +#endif
56517 +
56518 +#ifdef CONFIG_PAX_DLRESOLVE
56519 + unsigned long call_dl_resolve;
56520 +#endif
56521 +
56522 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
56523 + unsigned long call_syscall;
56524 +#endif
56525 +
56526 +#ifdef CONFIG_PAX_ASLR
56527 + unsigned long delta_mmap; /* randomized offset */
56528 + unsigned long delta_stack; /* randomized offset */
56529 +#endif
56530 +
56531 };
56532
56533 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
56534 diff -urNp linux-2.6.32.42/include/linux/mmu_notifier.h linux-2.6.32.42/include/linux/mmu_notifier.h
56535 --- linux-2.6.32.42/include/linux/mmu_notifier.h 2011-03-27 14:31:47.000000000 -0400
56536 +++ linux-2.6.32.42/include/linux/mmu_notifier.h 2011-04-17 15:56:46.000000000 -0400
56537 @@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
56538 */
56539 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
56540 ({ \
56541 - pte_t __pte; \
56542 + pte_t ___pte; \
56543 struct vm_area_struct *___vma = __vma; \
56544 unsigned long ___address = __address; \
56545 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
56546 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
56547 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
56548 - __pte; \
56549 + ___pte; \
56550 })
56551
56552 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
56553 diff -urNp linux-2.6.32.42/include/linux/mmzone.h linux-2.6.32.42/include/linux/mmzone.h
56554 --- linux-2.6.32.42/include/linux/mmzone.h 2011-03-27 14:31:47.000000000 -0400
56555 +++ linux-2.6.32.42/include/linux/mmzone.h 2011-04-17 15:56:46.000000000 -0400
56556 @@ -350,7 +350,7 @@ struct zone {
56557 unsigned long flags; /* zone flags, see below */
56558
56559 /* Zone statistics */
56560 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56561 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56562
56563 /*
56564 * prev_priority holds the scanning priority for this zone. It is
56565 diff -urNp linux-2.6.32.42/include/linux/mod_devicetable.h linux-2.6.32.42/include/linux/mod_devicetable.h
56566 --- linux-2.6.32.42/include/linux/mod_devicetable.h 2011-03-27 14:31:47.000000000 -0400
56567 +++ linux-2.6.32.42/include/linux/mod_devicetable.h 2011-04-17 15:56:46.000000000 -0400
56568 @@ -12,7 +12,7 @@
56569 typedef unsigned long kernel_ulong_t;
56570 #endif
56571
56572 -#define PCI_ANY_ID (~0)
56573 +#define PCI_ANY_ID ((__u16)~0)
56574
56575 struct pci_device_id {
56576 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
56577 @@ -131,7 +131,7 @@ struct usb_device_id {
56578 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
56579 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
56580
56581 -#define HID_ANY_ID (~0)
56582 +#define HID_ANY_ID (~0U)
56583
56584 struct hid_device_id {
56585 __u16 bus;
56586 diff -urNp linux-2.6.32.42/include/linux/module.h linux-2.6.32.42/include/linux/module.h
56587 --- linux-2.6.32.42/include/linux/module.h 2011-03-27 14:31:47.000000000 -0400
56588 +++ linux-2.6.32.42/include/linux/module.h 2011-04-17 15:56:46.000000000 -0400
56589 @@ -287,16 +287,16 @@ struct module
56590 int (*init)(void);
56591
56592 /* If this is non-NULL, vfree after init() returns */
56593 - void *module_init;
56594 + void *module_init_rx, *module_init_rw;
56595
56596 /* Here is the actual code + data, vfree'd on unload. */
56597 - void *module_core;
56598 + void *module_core_rx, *module_core_rw;
56599
56600 /* Here are the sizes of the init and core sections */
56601 - unsigned int init_size, core_size;
56602 + unsigned int init_size_rw, core_size_rw;
56603
56604 /* The size of the executable code in each section. */
56605 - unsigned int init_text_size, core_text_size;
56606 + unsigned int init_size_rx, core_size_rx;
56607
56608 /* Arch-specific module values */
56609 struct mod_arch_specific arch;
56610 @@ -393,16 +393,46 @@ struct module *__module_address(unsigned
56611 bool is_module_address(unsigned long addr);
56612 bool is_module_text_address(unsigned long addr);
56613
56614 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
56615 +{
56616 +
56617 +#ifdef CONFIG_PAX_KERNEXEC
56618 + if (ktla_ktva(addr) >= (unsigned long)start &&
56619 + ktla_ktva(addr) < (unsigned long)start + size)
56620 + return 1;
56621 +#endif
56622 +
56623 + return ((void *)addr >= start && (void *)addr < start + size);
56624 +}
56625 +
56626 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
56627 +{
56628 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
56629 +}
56630 +
56631 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
56632 +{
56633 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
56634 +}
56635 +
56636 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
56637 +{
56638 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
56639 +}
56640 +
56641 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
56642 +{
56643 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
56644 +}
56645 +
56646 static inline int within_module_core(unsigned long addr, struct module *mod)
56647 {
56648 - return (unsigned long)mod->module_core <= addr &&
56649 - addr < (unsigned long)mod->module_core + mod->core_size;
56650 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
56651 }
56652
56653 static inline int within_module_init(unsigned long addr, struct module *mod)
56654 {
56655 - return (unsigned long)mod->module_init <= addr &&
56656 - addr < (unsigned long)mod->module_init + mod->init_size;
56657 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
56658 }
56659
56660 /* Search for module by name: must hold module_mutex. */
56661 diff -urNp linux-2.6.32.42/include/linux/moduleloader.h linux-2.6.32.42/include/linux/moduleloader.h
56662 --- linux-2.6.32.42/include/linux/moduleloader.h 2011-03-27 14:31:47.000000000 -0400
56663 +++ linux-2.6.32.42/include/linux/moduleloader.h 2011-04-17 15:56:46.000000000 -0400
56664 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
56665 sections. Returns NULL on failure. */
56666 void *module_alloc(unsigned long size);
56667
56668 +#ifdef CONFIG_PAX_KERNEXEC
56669 +void *module_alloc_exec(unsigned long size);
56670 +#else
56671 +#define module_alloc_exec(x) module_alloc(x)
56672 +#endif
56673 +
56674 /* Free memory returned from module_alloc. */
56675 void module_free(struct module *mod, void *module_region);
56676
56677 +#ifdef CONFIG_PAX_KERNEXEC
56678 +void module_free_exec(struct module *mod, void *module_region);
56679 +#else
56680 +#define module_free_exec(x, y) module_free((x), (y))
56681 +#endif
56682 +
56683 /* Apply the given relocation to the (simplified) ELF. Return -error
56684 or 0. */
56685 int apply_relocate(Elf_Shdr *sechdrs,
56686 diff -urNp linux-2.6.32.42/include/linux/moduleparam.h linux-2.6.32.42/include/linux/moduleparam.h
56687 --- linux-2.6.32.42/include/linux/moduleparam.h 2011-03-27 14:31:47.000000000 -0400
56688 +++ linux-2.6.32.42/include/linux/moduleparam.h 2011-04-17 15:56:46.000000000 -0400
56689 @@ -132,7 +132,7 @@ struct kparam_array
56690
56691 /* Actually copy string: maxlen param is usually sizeof(string). */
56692 #define module_param_string(name, string, len, perm) \
56693 - static const struct kparam_string __param_string_##name \
56694 + static const struct kparam_string __param_string_##name __used \
56695 = { len, string }; \
56696 __module_param_call(MODULE_PARAM_PREFIX, name, \
56697 param_set_copystring, param_get_string, \
56698 @@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffe
56699
56700 /* Comma-separated array: *nump is set to number they actually specified. */
56701 #define module_param_array_named(name, array, type, nump, perm) \
56702 - static const struct kparam_array __param_arr_##name \
56703 + static const struct kparam_array __param_arr_##name __used \
56704 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
56705 sizeof(array[0]), array }; \
56706 __module_param_call(MODULE_PARAM_PREFIX, name, \
56707 diff -urNp linux-2.6.32.42/include/linux/mutex.h linux-2.6.32.42/include/linux/mutex.h
56708 --- linux-2.6.32.42/include/linux/mutex.h 2011-03-27 14:31:47.000000000 -0400
56709 +++ linux-2.6.32.42/include/linux/mutex.h 2011-04-17 15:56:46.000000000 -0400
56710 @@ -51,7 +51,7 @@ struct mutex {
56711 spinlock_t wait_lock;
56712 struct list_head wait_list;
56713 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
56714 - struct thread_info *owner;
56715 + struct task_struct *owner;
56716 #endif
56717 #ifdef CONFIG_DEBUG_MUTEXES
56718 const char *name;
56719 diff -urNp linux-2.6.32.42/include/linux/namei.h linux-2.6.32.42/include/linux/namei.h
56720 --- linux-2.6.32.42/include/linux/namei.h 2011-03-27 14:31:47.000000000 -0400
56721 +++ linux-2.6.32.42/include/linux/namei.h 2011-04-17 15:56:46.000000000 -0400
56722 @@ -22,7 +22,7 @@ struct nameidata {
56723 unsigned int flags;
56724 int last_type;
56725 unsigned depth;
56726 - char *saved_names[MAX_NESTED_LINKS + 1];
56727 + const char *saved_names[MAX_NESTED_LINKS + 1];
56728
56729 /* Intent data */
56730 union {
56731 @@ -84,12 +84,12 @@ extern int follow_up(struct path *);
56732 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
56733 extern void unlock_rename(struct dentry *, struct dentry *);
56734
56735 -static inline void nd_set_link(struct nameidata *nd, char *path)
56736 +static inline void nd_set_link(struct nameidata *nd, const char *path)
56737 {
56738 nd->saved_names[nd->depth] = path;
56739 }
56740
56741 -static inline char *nd_get_link(struct nameidata *nd)
56742 +static inline const char *nd_get_link(const struct nameidata *nd)
56743 {
56744 return nd->saved_names[nd->depth];
56745 }
56746 diff -urNp linux-2.6.32.42/include/linux/netfilter/xt_gradm.h linux-2.6.32.42/include/linux/netfilter/xt_gradm.h
56747 --- linux-2.6.32.42/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
56748 +++ linux-2.6.32.42/include/linux/netfilter/xt_gradm.h 2011-04-17 15:56:46.000000000 -0400
56749 @@ -0,0 +1,9 @@
56750 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
56751 +#define _LINUX_NETFILTER_XT_GRADM_H 1
56752 +
56753 +struct xt_gradm_mtinfo {
56754 + __u16 flags;
56755 + __u16 invflags;
56756 +};
56757 +
56758 +#endif
56759 diff -urNp linux-2.6.32.42/include/linux/nodemask.h linux-2.6.32.42/include/linux/nodemask.h
56760 --- linux-2.6.32.42/include/linux/nodemask.h 2011-03-27 14:31:47.000000000 -0400
56761 +++ linux-2.6.32.42/include/linux/nodemask.h 2011-04-17 15:56:46.000000000 -0400
56762 @@ -464,11 +464,11 @@ static inline int num_node_state(enum no
56763
56764 #define any_online_node(mask) \
56765 ({ \
56766 - int node; \
56767 - for_each_node_mask(node, (mask)) \
56768 - if (node_online(node)) \
56769 + int __node; \
56770 + for_each_node_mask(__node, (mask)) \
56771 + if (node_online(__node)) \
56772 break; \
56773 - node; \
56774 + __node; \
56775 })
56776
56777 #define num_online_nodes() num_node_state(N_ONLINE)
56778 diff -urNp linux-2.6.32.42/include/linux/oprofile.h linux-2.6.32.42/include/linux/oprofile.h
56779 --- linux-2.6.32.42/include/linux/oprofile.h 2011-03-27 14:31:47.000000000 -0400
56780 +++ linux-2.6.32.42/include/linux/oprofile.h 2011-04-17 15:56:46.000000000 -0400
56781 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
56782 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
56783 char const * name, ulong * val);
56784
56785 -/** Create a file for read-only access to an atomic_t. */
56786 +/** Create a file for read-only access to an atomic_unchecked_t. */
56787 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
56788 - char const * name, atomic_t * val);
56789 + char const * name, atomic_unchecked_t * val);
56790
56791 /** create a directory */
56792 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
56793 diff -urNp linux-2.6.32.42/include/linux/perf_event.h linux-2.6.32.42/include/linux/perf_event.h
56794 --- linux-2.6.32.42/include/linux/perf_event.h 2011-03-27 14:31:47.000000000 -0400
56795 +++ linux-2.6.32.42/include/linux/perf_event.h 2011-05-04 17:56:28.000000000 -0400
56796 @@ -476,7 +476,7 @@ struct hw_perf_event {
56797 struct hrtimer hrtimer;
56798 };
56799 };
56800 - atomic64_t prev_count;
56801 + atomic64_unchecked_t prev_count;
56802 u64 sample_period;
56803 u64 last_period;
56804 atomic64_t period_left;
56805 @@ -557,7 +557,7 @@ struct perf_event {
56806 const struct pmu *pmu;
56807
56808 enum perf_event_active_state state;
56809 - atomic64_t count;
56810 + atomic64_unchecked_t count;
56811
56812 /*
56813 * These are the total time in nanoseconds that the event
56814 @@ -595,8 +595,8 @@ struct perf_event {
56815 * These accumulate total time (in nanoseconds) that children
56816 * events have been enabled and running, respectively.
56817 */
56818 - atomic64_t child_total_time_enabled;
56819 - atomic64_t child_total_time_running;
56820 + atomic64_unchecked_t child_total_time_enabled;
56821 + atomic64_unchecked_t child_total_time_running;
56822
56823 /*
56824 * Protect attach/detach and child_list:
56825 diff -urNp linux-2.6.32.42/include/linux/pipe_fs_i.h linux-2.6.32.42/include/linux/pipe_fs_i.h
56826 --- linux-2.6.32.42/include/linux/pipe_fs_i.h 2011-03-27 14:31:47.000000000 -0400
56827 +++ linux-2.6.32.42/include/linux/pipe_fs_i.h 2011-04-17 15:56:46.000000000 -0400
56828 @@ -46,9 +46,9 @@ struct pipe_inode_info {
56829 wait_queue_head_t wait;
56830 unsigned int nrbufs, curbuf;
56831 struct page *tmp_page;
56832 - unsigned int readers;
56833 - unsigned int writers;
56834 - unsigned int waiting_writers;
56835 + atomic_t readers;
56836 + atomic_t writers;
56837 + atomic_t waiting_writers;
56838 unsigned int r_counter;
56839 unsigned int w_counter;
56840 struct fasync_struct *fasync_readers;
56841 diff -urNp linux-2.6.32.42/include/linux/poison.h linux-2.6.32.42/include/linux/poison.h
56842 --- linux-2.6.32.42/include/linux/poison.h 2011-03-27 14:31:47.000000000 -0400
56843 +++ linux-2.6.32.42/include/linux/poison.h 2011-04-17 15:56:46.000000000 -0400
56844 @@ -19,8 +19,8 @@
56845 * under normal circumstances, used to verify that nobody uses
56846 * non-initialized list entries.
56847 */
56848 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
56849 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
56850 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
56851 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
56852
56853 /********** include/linux/timer.h **********/
56854 /*
56855 diff -urNp linux-2.6.32.42/include/linux/proc_fs.h linux-2.6.32.42/include/linux/proc_fs.h
56856 --- linux-2.6.32.42/include/linux/proc_fs.h 2011-03-27 14:31:47.000000000 -0400
56857 +++ linux-2.6.32.42/include/linux/proc_fs.h 2011-04-17 15:56:46.000000000 -0400
56858 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
56859 return proc_create_data(name, mode, parent, proc_fops, NULL);
56860 }
56861
56862 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
56863 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
56864 +{
56865 +#ifdef CONFIG_GRKERNSEC_PROC_USER
56866 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
56867 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56868 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
56869 +#else
56870 + return proc_create_data(name, mode, parent, proc_fops, NULL);
56871 +#endif
56872 +}
56873 +
56874 +
56875 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
56876 mode_t mode, struct proc_dir_entry *base,
56877 read_proc_t *read_proc, void * data)
56878 diff -urNp linux-2.6.32.42/include/linux/ptrace.h linux-2.6.32.42/include/linux/ptrace.h
56879 --- linux-2.6.32.42/include/linux/ptrace.h 2011-03-27 14:31:47.000000000 -0400
56880 +++ linux-2.6.32.42/include/linux/ptrace.h 2011-04-17 15:56:46.000000000 -0400
56881 @@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_
56882 extern void exit_ptrace(struct task_struct *tracer);
56883 #define PTRACE_MODE_READ 1
56884 #define PTRACE_MODE_ATTACH 2
56885 -/* Returns 0 on success, -errno on denial. */
56886 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
56887 /* Returns true on success, false on denial. */
56888 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
56889 +/* Returns true on success, false on denial. */
56890 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
56891
56892 static inline int ptrace_reparented(struct task_struct *child)
56893 {
56894 diff -urNp linux-2.6.32.42/include/linux/random.h linux-2.6.32.42/include/linux/random.h
56895 --- linux-2.6.32.42/include/linux/random.h 2011-03-27 14:31:47.000000000 -0400
56896 +++ linux-2.6.32.42/include/linux/random.h 2011-04-17 15:56:46.000000000 -0400
56897 @@ -74,6 +74,11 @@ unsigned long randomize_range(unsigned l
56898 u32 random32(void);
56899 void srandom32(u32 seed);
56900
56901 +static inline unsigned long pax_get_random_long(void)
56902 +{
56903 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
56904 +}
56905 +
56906 #endif /* __KERNEL___ */
56907
56908 #endif /* _LINUX_RANDOM_H */
56909 diff -urNp linux-2.6.32.42/include/linux/reboot.h linux-2.6.32.42/include/linux/reboot.h
56910 --- linux-2.6.32.42/include/linux/reboot.h 2011-03-27 14:31:47.000000000 -0400
56911 +++ linux-2.6.32.42/include/linux/reboot.h 2011-05-22 23:02:06.000000000 -0400
56912 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
56913 * Architecture-specific implementations of sys_reboot commands.
56914 */
56915
56916 -extern void machine_restart(char *cmd);
56917 -extern void machine_halt(void);
56918 -extern void machine_power_off(void);
56919 +extern void machine_restart(char *cmd) __noreturn;
56920 +extern void machine_halt(void) __noreturn;
56921 +extern void machine_power_off(void) __noreturn;
56922
56923 extern void machine_shutdown(void);
56924 struct pt_regs;
56925 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
56926 */
56927
56928 extern void kernel_restart_prepare(char *cmd);
56929 -extern void kernel_restart(char *cmd);
56930 -extern void kernel_halt(void);
56931 -extern void kernel_power_off(void);
56932 +extern void kernel_restart(char *cmd) __noreturn;
56933 +extern void kernel_halt(void) __noreturn;
56934 +extern void kernel_power_off(void) __noreturn;
56935
56936 void ctrl_alt_del(void);
56937
56938 @@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
56939 * Emergency restart, callable from an interrupt handler.
56940 */
56941
56942 -extern void emergency_restart(void);
56943 +extern void emergency_restart(void) __noreturn;
56944 #include <asm/emergency-restart.h>
56945
56946 #endif
56947 diff -urNp linux-2.6.32.42/include/linux/reiserfs_fs.h linux-2.6.32.42/include/linux/reiserfs_fs.h
56948 --- linux-2.6.32.42/include/linux/reiserfs_fs.h 2011-03-27 14:31:47.000000000 -0400
56949 +++ linux-2.6.32.42/include/linux/reiserfs_fs.h 2011-04-17 15:56:46.000000000 -0400
56950 @@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset
56951 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
56952
56953 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
56954 -#define get_generation(s) atomic_read (&fs_generation(s))
56955 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
56956 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
56957 #define __fs_changed(gen,s) (gen != get_generation (s))
56958 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
56959 @@ -1534,24 +1534,24 @@ static inline struct super_block *sb_fro
56960 */
56961
56962 struct item_operations {
56963 - int (*bytes_number) (struct item_head * ih, int block_size);
56964 - void (*decrement_key) (struct cpu_key *);
56965 - int (*is_left_mergeable) (struct reiserfs_key * ih,
56966 + int (* const bytes_number) (struct item_head * ih, int block_size);
56967 + void (* const decrement_key) (struct cpu_key *);
56968 + int (* const is_left_mergeable) (struct reiserfs_key * ih,
56969 unsigned long bsize);
56970 - void (*print_item) (struct item_head *, char *item);
56971 - void (*check_item) (struct item_head *, char *item);
56972 + void (* const print_item) (struct item_head *, char *item);
56973 + void (* const check_item) (struct item_head *, char *item);
56974
56975 - int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
56976 + int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
56977 int is_affected, int insert_size);
56978 - int (*check_left) (struct virtual_item * vi, int free,
56979 + int (* const check_left) (struct virtual_item * vi, int free,
56980 int start_skip, int end_skip);
56981 - int (*check_right) (struct virtual_item * vi, int free);
56982 - int (*part_size) (struct virtual_item * vi, int from, int to);
56983 - int (*unit_num) (struct virtual_item * vi);
56984 - void (*print_vi) (struct virtual_item * vi);
56985 + int (* const check_right) (struct virtual_item * vi, int free);
56986 + int (* const part_size) (struct virtual_item * vi, int from, int to);
56987 + int (* const unit_num) (struct virtual_item * vi);
56988 + void (* const print_vi) (struct virtual_item * vi);
56989 };
56990
56991 -extern struct item_operations *item_ops[TYPE_ANY + 1];
56992 +extern const struct item_operations * const item_ops[TYPE_ANY + 1];
56993
56994 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
56995 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
56996 diff -urNp linux-2.6.32.42/include/linux/reiserfs_fs_sb.h linux-2.6.32.42/include/linux/reiserfs_fs_sb.h
56997 --- linux-2.6.32.42/include/linux/reiserfs_fs_sb.h 2011-03-27 14:31:47.000000000 -0400
56998 +++ linux-2.6.32.42/include/linux/reiserfs_fs_sb.h 2011-04-17 15:56:46.000000000 -0400
56999 @@ -377,7 +377,7 @@ struct reiserfs_sb_info {
57000 /* Comment? -Hans */
57001 wait_queue_head_t s_wait;
57002 /* To be obsoleted soon by per buffer seals.. -Hans */
57003 - atomic_t s_generation_counter; // increased by one every time the
57004 + atomic_unchecked_t s_generation_counter; // increased by one every time the
57005 // tree gets re-balanced
57006 unsigned long s_properties; /* File system properties. Currently holds
57007 on-disk FS format */
57008 diff -urNp linux-2.6.32.42/include/linux/sched.h linux-2.6.32.42/include/linux/sched.h
57009 --- linux-2.6.32.42/include/linux/sched.h 2011-03-27 14:31:47.000000000 -0400
57010 +++ linux-2.6.32.42/include/linux/sched.h 2011-06-04 20:42:54.000000000 -0400
57011 @@ -101,6 +101,7 @@ struct bio;
57012 struct fs_struct;
57013 struct bts_context;
57014 struct perf_event_context;
57015 +struct linux_binprm;
57016
57017 /*
57018 * List of flags we want to share for kernel threads,
57019 @@ -350,7 +351,7 @@ extern signed long schedule_timeout_kill
57020 extern signed long schedule_timeout_uninterruptible(signed long timeout);
57021 asmlinkage void __schedule(void);
57022 asmlinkage void schedule(void);
57023 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
57024 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
57025
57026 struct nsproxy;
57027 struct user_namespace;
57028 @@ -371,9 +372,12 @@ struct user_namespace;
57029 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
57030
57031 extern int sysctl_max_map_count;
57032 +extern unsigned long sysctl_heap_stack_gap;
57033
57034 #include <linux/aio.h>
57035
57036 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
57037 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
57038 extern unsigned long
57039 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
57040 unsigned long, unsigned long);
57041 @@ -666,6 +670,16 @@ struct signal_struct {
57042 struct tty_audit_buf *tty_audit_buf;
57043 #endif
57044
57045 +#ifdef CONFIG_GRKERNSEC
57046 + u32 curr_ip;
57047 + u32 saved_ip;
57048 + u32 gr_saddr;
57049 + u32 gr_daddr;
57050 + u16 gr_sport;
57051 + u16 gr_dport;
57052 + u8 used_accept:1;
57053 +#endif
57054 +
57055 int oom_adj; /* OOM kill score adjustment (bit shift) */
57056 };
57057
57058 @@ -723,6 +737,11 @@ struct user_struct {
57059 struct key *session_keyring; /* UID's default session keyring */
57060 #endif
57061
57062 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57063 + unsigned int banned;
57064 + unsigned long ban_expires;
57065 +#endif
57066 +
57067 /* Hash table maintenance information */
57068 struct hlist_node uidhash_node;
57069 uid_t uid;
57070 @@ -1328,8 +1347,8 @@ struct task_struct {
57071 struct list_head thread_group;
57072
57073 struct completion *vfork_done; /* for vfork() */
57074 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
57075 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
57076 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
57077 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
57078
57079 cputime_t utime, stime, utimescaled, stimescaled;
57080 cputime_t gtime;
57081 @@ -1343,16 +1362,6 @@ struct task_struct {
57082 struct task_cputime cputime_expires;
57083 struct list_head cpu_timers[3];
57084
57085 -/* process credentials */
57086 - const struct cred *real_cred; /* objective and real subjective task
57087 - * credentials (COW) */
57088 - const struct cred *cred; /* effective (overridable) subjective task
57089 - * credentials (COW) */
57090 - struct mutex cred_guard_mutex; /* guard against foreign influences on
57091 - * credential calculations
57092 - * (notably. ptrace) */
57093 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
57094 -
57095 char comm[TASK_COMM_LEN]; /* executable name excluding path
57096 - access with [gs]et_task_comm (which lock
57097 it with task_lock())
57098 @@ -1369,6 +1378,10 @@ struct task_struct {
57099 #endif
57100 /* CPU-specific state of this task */
57101 struct thread_struct thread;
57102 +/* thread_info moved to task_struct */
57103 +#ifdef CONFIG_X86
57104 + struct thread_info tinfo;
57105 +#endif
57106 /* filesystem information */
57107 struct fs_struct *fs;
57108 /* open file information */
57109 @@ -1436,6 +1449,15 @@ struct task_struct {
57110 int hardirq_context;
57111 int softirq_context;
57112 #endif
57113 +
57114 +/* process credentials */
57115 + const struct cred *real_cred; /* objective and real subjective task
57116 + * credentials (COW) */
57117 + struct mutex cred_guard_mutex; /* guard against foreign influences on
57118 + * credential calculations
57119 + * (notably. ptrace) */
57120 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
57121 +
57122 #ifdef CONFIG_LOCKDEP
57123 # define MAX_LOCK_DEPTH 48UL
57124 u64 curr_chain_key;
57125 @@ -1456,6 +1478,9 @@ struct task_struct {
57126
57127 struct backing_dev_info *backing_dev_info;
57128
57129 + const struct cred *cred; /* effective (overridable) subjective task
57130 + * credentials (COW) */
57131 +
57132 struct io_context *io_context;
57133
57134 unsigned long ptrace_message;
57135 @@ -1519,6 +1544,21 @@ struct task_struct {
57136 unsigned long default_timer_slack_ns;
57137
57138 struct list_head *scm_work_list;
57139 +
57140 +#ifdef CONFIG_GRKERNSEC
57141 + /* grsecurity */
57142 + struct dentry *gr_chroot_dentry;
57143 + struct acl_subject_label *acl;
57144 + struct acl_role_label *role;
57145 + struct file *exec_file;
57146 + u16 acl_role_id;
57147 + /* is this the task that authenticated to the special role */
57148 + u8 acl_sp_role;
57149 + u8 is_writable;
57150 + u8 brute;
57151 + u8 gr_is_chrooted;
57152 +#endif
57153 +
57154 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
57155 /* Index of current stored adress in ret_stack */
57156 int curr_ret_stack;
57157 @@ -1542,6 +1582,57 @@ struct task_struct {
57158 #endif /* CONFIG_TRACING */
57159 };
57160
57161 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
57162 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
57163 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
57164 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
57165 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
57166 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
57167 +
57168 +#ifdef CONFIG_PAX_SOFTMODE
57169 +extern unsigned int pax_softmode;
57170 +#endif
57171 +
57172 +extern int pax_check_flags(unsigned long *);
57173 +
57174 +/* if tsk != current then task_lock must be held on it */
57175 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
57176 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
57177 +{
57178 + if (likely(tsk->mm))
57179 + return tsk->mm->pax_flags;
57180 + else
57181 + return 0UL;
57182 +}
57183 +
57184 +/* if tsk != current then task_lock must be held on it */
57185 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
57186 +{
57187 + if (likely(tsk->mm)) {
57188 + tsk->mm->pax_flags = flags;
57189 + return 0;
57190 + }
57191 + return -EINVAL;
57192 +}
57193 +#endif
57194 +
57195 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
57196 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
57197 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
57198 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
57199 +#endif
57200 +
57201 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
57202 +void pax_report_insns(void *pc, void *sp);
57203 +void pax_report_refcount_overflow(struct pt_regs *regs);
57204 +void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
57205 +
57206 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
57207 +extern void pax_track_stack(void);
57208 +#else
57209 +static inline void pax_track_stack(void) {}
57210 +#endif
57211 +
57212 /* Future-safe accessor for struct task_struct's cpus_allowed. */
57213 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
57214
57215 @@ -1978,7 +2069,9 @@ void yield(void);
57216 extern struct exec_domain default_exec_domain;
57217
57218 union thread_union {
57219 +#ifndef CONFIG_X86
57220 struct thread_info thread_info;
57221 +#endif
57222 unsigned long stack[THREAD_SIZE/sizeof(long)];
57223 };
57224
57225 @@ -2155,7 +2248,7 @@ extern void __cleanup_sighand(struct sig
57226 extern void exit_itimers(struct signal_struct *);
57227 extern void flush_itimer_signals(void);
57228
57229 -extern NORET_TYPE void do_group_exit(int);
57230 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
57231
57232 extern void daemonize(const char *, ...);
57233 extern int allow_signal(int);
57234 @@ -2284,13 +2377,17 @@ static inline unsigned long *end_of_stac
57235
57236 #endif
57237
57238 -static inline int object_is_on_stack(void *obj)
57239 +static inline int object_starts_on_stack(void *obj)
57240 {
57241 - void *stack = task_stack_page(current);
57242 + const void *stack = task_stack_page(current);
57243
57244 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
57245 }
57246
57247 +#ifdef CONFIG_PAX_USERCOPY
57248 +extern int object_is_on_stack(const void *obj, unsigned long len);
57249 +#endif
57250 +
57251 extern void thread_info_cache_init(void);
57252
57253 #ifdef CONFIG_DEBUG_STACK_USAGE
57254 diff -urNp linux-2.6.32.42/include/linux/screen_info.h linux-2.6.32.42/include/linux/screen_info.h
57255 --- linux-2.6.32.42/include/linux/screen_info.h 2011-03-27 14:31:47.000000000 -0400
57256 +++ linux-2.6.32.42/include/linux/screen_info.h 2011-04-17 15:56:46.000000000 -0400
57257 @@ -42,7 +42,8 @@ struct screen_info {
57258 __u16 pages; /* 0x32 */
57259 __u16 vesa_attributes; /* 0x34 */
57260 __u32 capabilities; /* 0x36 */
57261 - __u8 _reserved[6]; /* 0x3a */
57262 + __u16 vesapm_size; /* 0x3a */
57263 + __u8 _reserved[4]; /* 0x3c */
57264 } __attribute__((packed));
57265
57266 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
57267 diff -urNp linux-2.6.32.42/include/linux/security.h linux-2.6.32.42/include/linux/security.h
57268 --- linux-2.6.32.42/include/linux/security.h 2011-03-27 14:31:47.000000000 -0400
57269 +++ linux-2.6.32.42/include/linux/security.h 2011-04-17 15:56:46.000000000 -0400
57270 @@ -34,6 +34,7 @@
57271 #include <linux/key.h>
57272 #include <linux/xfrm.h>
57273 #include <linux/gfp.h>
57274 +#include <linux/grsecurity.h>
57275 #include <net/flow.h>
57276
57277 /* Maximum number of letters for an LSM name string */
57278 diff -urNp linux-2.6.32.42/include/linux/shm.h linux-2.6.32.42/include/linux/shm.h
57279 --- linux-2.6.32.42/include/linux/shm.h 2011-03-27 14:31:47.000000000 -0400
57280 +++ linux-2.6.32.42/include/linux/shm.h 2011-04-17 15:56:46.000000000 -0400
57281 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
57282 pid_t shm_cprid;
57283 pid_t shm_lprid;
57284 struct user_struct *mlock_user;
57285 +#ifdef CONFIG_GRKERNSEC
57286 + time_t shm_createtime;
57287 + pid_t shm_lapid;
57288 +#endif
57289 };
57290
57291 /* shm_mode upper byte flags */
57292 diff -urNp linux-2.6.32.42/include/linux/skbuff.h linux-2.6.32.42/include/linux/skbuff.h
57293 --- linux-2.6.32.42/include/linux/skbuff.h 2011-03-27 14:31:47.000000000 -0400
57294 +++ linux-2.6.32.42/include/linux/skbuff.h 2011-05-04 17:56:20.000000000 -0400
57295 @@ -544,7 +544,7 @@ static inline union skb_shared_tx *skb_t
57296 */
57297 static inline int skb_queue_empty(const struct sk_buff_head *list)
57298 {
57299 - return list->next == (struct sk_buff *)list;
57300 + return list->next == (const struct sk_buff *)list;
57301 }
57302
57303 /**
57304 @@ -557,7 +557,7 @@ static inline int skb_queue_empty(const
57305 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
57306 const struct sk_buff *skb)
57307 {
57308 - return (skb->next == (struct sk_buff *) list);
57309 + return (skb->next == (const struct sk_buff *) list);
57310 }
57311
57312 /**
57313 @@ -570,7 +570,7 @@ static inline bool skb_queue_is_last(con
57314 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
57315 const struct sk_buff *skb)
57316 {
57317 - return (skb->prev == (struct sk_buff *) list);
57318 + return (skb->prev == (const struct sk_buff *) list);
57319 }
57320
57321 /**
57322 @@ -1367,7 +1367,7 @@ static inline int skb_network_offset(con
57323 * headroom, you should not reduce this.
57324 */
57325 #ifndef NET_SKB_PAD
57326 -#define NET_SKB_PAD 32
57327 +#define NET_SKB_PAD (_AC(32,U))
57328 #endif
57329
57330 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
57331 diff -urNp linux-2.6.32.42/include/linux/slab_def.h linux-2.6.32.42/include/linux/slab_def.h
57332 --- linux-2.6.32.42/include/linux/slab_def.h 2011-03-27 14:31:47.000000000 -0400
57333 +++ linux-2.6.32.42/include/linux/slab_def.h 2011-05-04 17:56:28.000000000 -0400
57334 @@ -69,10 +69,10 @@ struct kmem_cache {
57335 unsigned long node_allocs;
57336 unsigned long node_frees;
57337 unsigned long node_overflow;
57338 - atomic_t allochit;
57339 - atomic_t allocmiss;
57340 - atomic_t freehit;
57341 - atomic_t freemiss;
57342 + atomic_unchecked_t allochit;
57343 + atomic_unchecked_t allocmiss;
57344 + atomic_unchecked_t freehit;
57345 + atomic_unchecked_t freemiss;
57346
57347 /*
57348 * If debugging is enabled, then the allocator can add additional
57349 diff -urNp linux-2.6.32.42/include/linux/slab.h linux-2.6.32.42/include/linux/slab.h
57350 --- linux-2.6.32.42/include/linux/slab.h 2011-03-27 14:31:47.000000000 -0400
57351 +++ linux-2.6.32.42/include/linux/slab.h 2011-04-17 15:56:46.000000000 -0400
57352 @@ -11,12 +11,20 @@
57353
57354 #include <linux/gfp.h>
57355 #include <linux/types.h>
57356 +#include <linux/err.h>
57357
57358 /*
57359 * Flags to pass to kmem_cache_create().
57360 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
57361 */
57362 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
57363 +
57364 +#ifdef CONFIG_PAX_USERCOPY
57365 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
57366 +#else
57367 +#define SLAB_USERCOPY 0x00000000UL
57368 +#endif
57369 +
57370 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
57371 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
57372 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
57373 @@ -82,10 +90,13 @@
57374 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
57375 * Both make kfree a no-op.
57376 */
57377 -#define ZERO_SIZE_PTR ((void *)16)
57378 +#define ZERO_SIZE_PTR \
57379 +({ \
57380 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
57381 + (void *)(-MAX_ERRNO-1L); \
57382 +})
57383
57384 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
57385 - (unsigned long)ZERO_SIZE_PTR)
57386 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
57387
57388 /*
57389 * struct kmem_cache related prototypes
57390 @@ -138,6 +149,7 @@ void * __must_check krealloc(const void
57391 void kfree(const void *);
57392 void kzfree(const void *);
57393 size_t ksize(const void *);
57394 +void check_object_size(const void *ptr, unsigned long n, bool to);
57395
57396 /*
57397 * Allocator specific definitions. These are mainly used to establish optimized
57398 @@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t
57399
57400 void __init kmem_cache_init_late(void);
57401
57402 +#define kmalloc(x, y) \
57403 +({ \
57404 + void *___retval; \
57405 + intoverflow_t ___x = (intoverflow_t)x; \
57406 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
57407 + ___retval = NULL; \
57408 + else \
57409 + ___retval = kmalloc((size_t)___x, (y)); \
57410 + ___retval; \
57411 +})
57412 +
57413 +#define kmalloc_node(x, y, z) \
57414 +({ \
57415 + void *___retval; \
57416 + intoverflow_t ___x = (intoverflow_t)x; \
57417 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
57418 + ___retval = NULL; \
57419 + else \
57420 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
57421 + ___retval; \
57422 +})
57423 +
57424 +#define kzalloc(x, y) \
57425 +({ \
57426 + void *___retval; \
57427 + intoverflow_t ___x = (intoverflow_t)x; \
57428 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
57429 + ___retval = NULL; \
57430 + else \
57431 + ___retval = kzalloc((size_t)___x, (y)); \
57432 + ___retval; \
57433 +})
57434 +
57435 #endif /* _LINUX_SLAB_H */
57436 diff -urNp linux-2.6.32.42/include/linux/slub_def.h linux-2.6.32.42/include/linux/slub_def.h
57437 --- linux-2.6.32.42/include/linux/slub_def.h 2011-03-27 14:31:47.000000000 -0400
57438 +++ linux-2.6.32.42/include/linux/slub_def.h 2011-04-17 15:56:46.000000000 -0400
57439 @@ -86,7 +86,7 @@ struct kmem_cache {
57440 struct kmem_cache_order_objects max;
57441 struct kmem_cache_order_objects min;
57442 gfp_t allocflags; /* gfp flags to use on each alloc */
57443 - int refcount; /* Refcount for slab cache destroy */
57444 + atomic_t refcount; /* Refcount for slab cache destroy */
57445 void (*ctor)(void *);
57446 int inuse; /* Offset to metadata */
57447 int align; /* Alignment */
57448 diff -urNp linux-2.6.32.42/include/linux/sonet.h linux-2.6.32.42/include/linux/sonet.h
57449 --- linux-2.6.32.42/include/linux/sonet.h 2011-03-27 14:31:47.000000000 -0400
57450 +++ linux-2.6.32.42/include/linux/sonet.h 2011-04-17 15:56:46.000000000 -0400
57451 @@ -61,7 +61,7 @@ struct sonet_stats {
57452 #include <asm/atomic.h>
57453
57454 struct k_sonet_stats {
57455 -#define __HANDLE_ITEM(i) atomic_t i
57456 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
57457 __SONET_ITEMS
57458 #undef __HANDLE_ITEM
57459 };
57460 diff -urNp linux-2.6.32.42/include/linux/sunrpc/clnt.h linux-2.6.32.42/include/linux/sunrpc/clnt.h
57461 --- linux-2.6.32.42/include/linux/sunrpc/clnt.h 2011-03-27 14:31:47.000000000 -0400
57462 +++ linux-2.6.32.42/include/linux/sunrpc/clnt.h 2011-04-17 15:56:46.000000000 -0400
57463 @@ -167,9 +167,9 @@ static inline unsigned short rpc_get_por
57464 {
57465 switch (sap->sa_family) {
57466 case AF_INET:
57467 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
57468 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
57469 case AF_INET6:
57470 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
57471 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
57472 }
57473 return 0;
57474 }
57475 @@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const
57476 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
57477 const struct sockaddr *src)
57478 {
57479 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
57480 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
57481 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
57482
57483 dsin->sin_family = ssin->sin_family;
57484 @@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const
57485 if (sa->sa_family != AF_INET6)
57486 return 0;
57487
57488 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
57489 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
57490 }
57491
57492 #endif /* __KERNEL__ */
57493 diff -urNp linux-2.6.32.42/include/linux/sunrpc/svc_rdma.h linux-2.6.32.42/include/linux/sunrpc/svc_rdma.h
57494 --- linux-2.6.32.42/include/linux/sunrpc/svc_rdma.h 2011-03-27 14:31:47.000000000 -0400
57495 +++ linux-2.6.32.42/include/linux/sunrpc/svc_rdma.h 2011-05-04 17:56:28.000000000 -0400
57496 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
57497 extern unsigned int svcrdma_max_requests;
57498 extern unsigned int svcrdma_max_req_size;
57499
57500 -extern atomic_t rdma_stat_recv;
57501 -extern atomic_t rdma_stat_read;
57502 -extern atomic_t rdma_stat_write;
57503 -extern atomic_t rdma_stat_sq_starve;
57504 -extern atomic_t rdma_stat_rq_starve;
57505 -extern atomic_t rdma_stat_rq_poll;
57506 -extern atomic_t rdma_stat_rq_prod;
57507 -extern atomic_t rdma_stat_sq_poll;
57508 -extern atomic_t rdma_stat_sq_prod;
57509 +extern atomic_unchecked_t rdma_stat_recv;
57510 +extern atomic_unchecked_t rdma_stat_read;
57511 +extern atomic_unchecked_t rdma_stat_write;
57512 +extern atomic_unchecked_t rdma_stat_sq_starve;
57513 +extern atomic_unchecked_t rdma_stat_rq_starve;
57514 +extern atomic_unchecked_t rdma_stat_rq_poll;
57515 +extern atomic_unchecked_t rdma_stat_rq_prod;
57516 +extern atomic_unchecked_t rdma_stat_sq_poll;
57517 +extern atomic_unchecked_t rdma_stat_sq_prod;
57518
57519 #define RPCRDMA_VERSION 1
57520
57521 diff -urNp linux-2.6.32.42/include/linux/suspend.h linux-2.6.32.42/include/linux/suspend.h
57522 --- linux-2.6.32.42/include/linux/suspend.h 2011-03-27 14:31:47.000000000 -0400
57523 +++ linux-2.6.32.42/include/linux/suspend.h 2011-04-17 15:56:46.000000000 -0400
57524 @@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
57525 * which require special recovery actions in that situation.
57526 */
57527 struct platform_suspend_ops {
57528 - int (*valid)(suspend_state_t state);
57529 - int (*begin)(suspend_state_t state);
57530 - int (*prepare)(void);
57531 - int (*prepare_late)(void);
57532 - int (*enter)(suspend_state_t state);
57533 - void (*wake)(void);
57534 - void (*finish)(void);
57535 - void (*end)(void);
57536 - void (*recover)(void);
57537 + int (* const valid)(suspend_state_t state);
57538 + int (* const begin)(suspend_state_t state);
57539 + int (* const prepare)(void);
57540 + int (* const prepare_late)(void);
57541 + int (* const enter)(suspend_state_t state);
57542 + void (* const wake)(void);
57543 + void (* const finish)(void);
57544 + void (* const end)(void);
57545 + void (* const recover)(void);
57546 };
57547
57548 #ifdef CONFIG_SUSPEND
57549 @@ -120,7 +120,7 @@ struct platform_suspend_ops {
57550 * suspend_set_ops - set platform dependent suspend operations
57551 * @ops: The new suspend operations to set.
57552 */
57553 -extern void suspend_set_ops(struct platform_suspend_ops *ops);
57554 +extern void suspend_set_ops(const struct platform_suspend_ops *ops);
57555 extern int suspend_valid_only_mem(suspend_state_t state);
57556
57557 /**
57558 @@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
57559 #else /* !CONFIG_SUSPEND */
57560 #define suspend_valid_only_mem NULL
57561
57562 -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
57563 +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
57564 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
57565 #endif /* !CONFIG_SUSPEND */
57566
57567 @@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
57568 * platforms which require special recovery actions in that situation.
57569 */
57570 struct platform_hibernation_ops {
57571 - int (*begin)(void);
57572 - void (*end)(void);
57573 - int (*pre_snapshot)(void);
57574 - void (*finish)(void);
57575 - int (*prepare)(void);
57576 - int (*enter)(void);
57577 - void (*leave)(void);
57578 - int (*pre_restore)(void);
57579 - void (*restore_cleanup)(void);
57580 - void (*recover)(void);
57581 + int (* const begin)(void);
57582 + void (* const end)(void);
57583 + int (* const pre_snapshot)(void);
57584 + void (* const finish)(void);
57585 + int (* const prepare)(void);
57586 + int (* const enter)(void);
57587 + void (* const leave)(void);
57588 + int (* const pre_restore)(void);
57589 + void (* const restore_cleanup)(void);
57590 + void (* const recover)(void);
57591 };
57592
57593 #ifdef CONFIG_HIBERNATION
57594 @@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
57595 extern void swsusp_unset_page_free(struct page *);
57596 extern unsigned long get_safe_page(gfp_t gfp_mask);
57597
57598 -extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
57599 +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
57600 extern int hibernate(void);
57601 extern bool system_entering_hibernation(void);
57602 #else /* CONFIG_HIBERNATION */
57603 @@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
57604 static inline void swsusp_set_page_free(struct page *p) {}
57605 static inline void swsusp_unset_page_free(struct page *p) {}
57606
57607 -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
57608 +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
57609 static inline int hibernate(void) { return -ENOSYS; }
57610 static inline bool system_entering_hibernation(void) { return false; }
57611 #endif /* CONFIG_HIBERNATION */
57612 diff -urNp linux-2.6.32.42/include/linux/sysctl.h linux-2.6.32.42/include/linux/sysctl.h
57613 --- linux-2.6.32.42/include/linux/sysctl.h 2011-03-27 14:31:47.000000000 -0400
57614 +++ linux-2.6.32.42/include/linux/sysctl.h 2011-04-17 15:56:46.000000000 -0400
57615 @@ -164,7 +164,11 @@ enum
57616 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
57617 };
57618
57619 -
57620 +#ifdef CONFIG_PAX_SOFTMODE
57621 +enum {
57622 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
57623 +};
57624 +#endif
57625
57626 /* CTL_VM names: */
57627 enum
57628 @@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_tab
57629
57630 extern int proc_dostring(struct ctl_table *, int,
57631 void __user *, size_t *, loff_t *);
57632 +extern int proc_dostring_modpriv(struct ctl_table *, int,
57633 + void __user *, size_t *, loff_t *);
57634 extern int proc_dointvec(struct ctl_table *, int,
57635 void __user *, size_t *, loff_t *);
57636 extern int proc_dointvec_minmax(struct ctl_table *, int,
57637 @@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name,
57638
57639 extern ctl_handler sysctl_data;
57640 extern ctl_handler sysctl_string;
57641 +extern ctl_handler sysctl_string_modpriv;
57642 extern ctl_handler sysctl_intvec;
57643 extern ctl_handler sysctl_jiffies;
57644 extern ctl_handler sysctl_ms_jiffies;
57645 diff -urNp linux-2.6.32.42/include/linux/sysfs.h linux-2.6.32.42/include/linux/sysfs.h
57646 --- linux-2.6.32.42/include/linux/sysfs.h 2011-03-27 14:31:47.000000000 -0400
57647 +++ linux-2.6.32.42/include/linux/sysfs.h 2011-04-17 15:56:46.000000000 -0400
57648 @@ -75,8 +75,8 @@ struct bin_attribute {
57649 };
57650
57651 struct sysfs_ops {
57652 - ssize_t (*show)(struct kobject *, struct attribute *,char *);
57653 - ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
57654 + ssize_t (* const show)(struct kobject *, struct attribute *,char *);
57655 + ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
57656 };
57657
57658 struct sysfs_dirent;
57659 diff -urNp linux-2.6.32.42/include/linux/thread_info.h linux-2.6.32.42/include/linux/thread_info.h
57660 --- linux-2.6.32.42/include/linux/thread_info.h 2011-03-27 14:31:47.000000000 -0400
57661 +++ linux-2.6.32.42/include/linux/thread_info.h 2011-04-17 15:56:46.000000000 -0400
57662 @@ -23,7 +23,7 @@ struct restart_block {
57663 };
57664 /* For futex_wait and futex_wait_requeue_pi */
57665 struct {
57666 - u32 *uaddr;
57667 + u32 __user *uaddr;
57668 u32 val;
57669 u32 flags;
57670 u32 bitset;
57671 diff -urNp linux-2.6.32.42/include/linux/tty.h linux-2.6.32.42/include/linux/tty.h
57672 --- linux-2.6.32.42/include/linux/tty.h 2011-03-27 14:31:47.000000000 -0400
57673 +++ linux-2.6.32.42/include/linux/tty.h 2011-04-17 15:56:46.000000000 -0400
57674 @@ -13,6 +13,7 @@
57675 #include <linux/tty_driver.h>
57676 #include <linux/tty_ldisc.h>
57677 #include <linux/mutex.h>
57678 +#include <linux/poll.h>
57679
57680 #include <asm/system.h>
57681
57682 @@ -443,7 +444,6 @@ extern int tty_perform_flush(struct tty_
57683 extern dev_t tty_devnum(struct tty_struct *tty);
57684 extern void proc_clear_tty(struct task_struct *p);
57685 extern struct tty_struct *get_current_tty(void);
57686 -extern void tty_default_fops(struct file_operations *fops);
57687 extern struct tty_struct *alloc_tty_struct(void);
57688 extern void free_tty_struct(struct tty_struct *tty);
57689 extern void initialize_tty_struct(struct tty_struct *tty,
57690 @@ -493,6 +493,18 @@ extern void tty_ldisc_begin(void);
57691 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
57692 extern void tty_ldisc_enable(struct tty_struct *tty);
57693
57694 +/* tty_io.c */
57695 +extern ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
57696 +extern ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
57697 +extern unsigned int tty_poll(struct file *, poll_table *);
57698 +#ifdef CONFIG_COMPAT
57699 +extern long tty_compat_ioctl(struct file *file, unsigned int cmd,
57700 + unsigned long arg);
57701 +#else
57702 +#define tty_compat_ioctl NULL
57703 +#endif
57704 +extern int tty_release(struct inode *, struct file *);
57705 +extern int tty_fasync(int fd, struct file *filp, int on);
57706
57707 /* n_tty.c */
57708 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
57709 diff -urNp linux-2.6.32.42/include/linux/tty_ldisc.h linux-2.6.32.42/include/linux/tty_ldisc.h
57710 --- linux-2.6.32.42/include/linux/tty_ldisc.h 2011-03-27 14:31:47.000000000 -0400
57711 +++ linux-2.6.32.42/include/linux/tty_ldisc.h 2011-04-17 15:56:46.000000000 -0400
57712 @@ -139,7 +139,7 @@ struct tty_ldisc_ops {
57713
57714 struct module *owner;
57715
57716 - int refcount;
57717 + atomic_t refcount;
57718 };
57719
57720 struct tty_ldisc {
57721 diff -urNp linux-2.6.32.42/include/linux/types.h linux-2.6.32.42/include/linux/types.h
57722 --- linux-2.6.32.42/include/linux/types.h 2011-03-27 14:31:47.000000000 -0400
57723 +++ linux-2.6.32.42/include/linux/types.h 2011-04-17 15:56:46.000000000 -0400
57724 @@ -191,10 +191,26 @@ typedef struct {
57725 volatile int counter;
57726 } atomic_t;
57727
57728 +#ifdef CONFIG_PAX_REFCOUNT
57729 +typedef struct {
57730 + volatile int counter;
57731 +} atomic_unchecked_t;
57732 +#else
57733 +typedef atomic_t atomic_unchecked_t;
57734 +#endif
57735 +
57736 #ifdef CONFIG_64BIT
57737 typedef struct {
57738 volatile long counter;
57739 } atomic64_t;
57740 +
57741 +#ifdef CONFIG_PAX_REFCOUNT
57742 +typedef struct {
57743 + volatile long counter;
57744 +} atomic64_unchecked_t;
57745 +#else
57746 +typedef atomic64_t atomic64_unchecked_t;
57747 +#endif
57748 #endif
57749
57750 struct ustat {
57751 diff -urNp linux-2.6.32.42/include/linux/uaccess.h linux-2.6.32.42/include/linux/uaccess.h
57752 --- linux-2.6.32.42/include/linux/uaccess.h 2011-03-27 14:31:47.000000000 -0400
57753 +++ linux-2.6.32.42/include/linux/uaccess.h 2011-04-17 15:56:46.000000000 -0400
57754 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
57755 long ret; \
57756 mm_segment_t old_fs = get_fs(); \
57757 \
57758 - set_fs(KERNEL_DS); \
57759 pagefault_disable(); \
57760 + set_fs(KERNEL_DS); \
57761 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
57762 - pagefault_enable(); \
57763 set_fs(old_fs); \
57764 + pagefault_enable(); \
57765 ret; \
57766 })
57767
57768 @@ -93,7 +93,7 @@ static inline unsigned long __copy_from_
57769 * Safely read from address @src to the buffer at @dst. If a kernel fault
57770 * happens, handle that and return -EFAULT.
57771 */
57772 -extern long probe_kernel_read(void *dst, void *src, size_t size);
57773 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
57774
57775 /*
57776 * probe_kernel_write(): safely attempt to write to a location
57777 @@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst,
57778 * Safely write to address @dst from the buffer at @src. If a kernel fault
57779 * happens, handle that and return -EFAULT.
57780 */
57781 -extern long probe_kernel_write(void *dst, void *src, size_t size);
57782 +extern long probe_kernel_write(void *dst, const void *src, size_t size);
57783
57784 #endif /* __LINUX_UACCESS_H__ */
57785 diff -urNp linux-2.6.32.42/include/linux/unaligned/access_ok.h linux-2.6.32.42/include/linux/unaligned/access_ok.h
57786 --- linux-2.6.32.42/include/linux/unaligned/access_ok.h 2011-03-27 14:31:47.000000000 -0400
57787 +++ linux-2.6.32.42/include/linux/unaligned/access_ok.h 2011-04-17 15:56:46.000000000 -0400
57788 @@ -6,32 +6,32 @@
57789
57790 static inline u16 get_unaligned_le16(const void *p)
57791 {
57792 - return le16_to_cpup((__le16 *)p);
57793 + return le16_to_cpup((const __le16 *)p);
57794 }
57795
57796 static inline u32 get_unaligned_le32(const void *p)
57797 {
57798 - return le32_to_cpup((__le32 *)p);
57799 + return le32_to_cpup((const __le32 *)p);
57800 }
57801
57802 static inline u64 get_unaligned_le64(const void *p)
57803 {
57804 - return le64_to_cpup((__le64 *)p);
57805 + return le64_to_cpup((const __le64 *)p);
57806 }
57807
57808 static inline u16 get_unaligned_be16(const void *p)
57809 {
57810 - return be16_to_cpup((__be16 *)p);
57811 + return be16_to_cpup((const __be16 *)p);
57812 }
57813
57814 static inline u32 get_unaligned_be32(const void *p)
57815 {
57816 - return be32_to_cpup((__be32 *)p);
57817 + return be32_to_cpup((const __be32 *)p);
57818 }
57819
57820 static inline u64 get_unaligned_be64(const void *p)
57821 {
57822 - return be64_to_cpup((__be64 *)p);
57823 + return be64_to_cpup((const __be64 *)p);
57824 }
57825
57826 static inline void put_unaligned_le16(u16 val, void *p)
57827 diff -urNp linux-2.6.32.42/include/linux/vmalloc.h linux-2.6.32.42/include/linux/vmalloc.h
57828 --- linux-2.6.32.42/include/linux/vmalloc.h 2011-03-27 14:31:47.000000000 -0400
57829 +++ linux-2.6.32.42/include/linux/vmalloc.h 2011-04-17 15:56:46.000000000 -0400
57830 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
57831 #define VM_MAP 0x00000004 /* vmap()ed pages */
57832 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
57833 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
57834 +
57835 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
57836 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
57837 +#endif
57838 +
57839 /* bits [20..32] reserved for arch specific ioremap internals */
57840
57841 /*
57842 @@ -123,4 +128,81 @@ struct vm_struct **pcpu_get_vm_areas(con
57843
57844 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
57845
57846 +#define vmalloc(x) \
57847 +({ \
57848 + void *___retval; \
57849 + intoverflow_t ___x = (intoverflow_t)x; \
57850 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
57851 + ___retval = NULL; \
57852 + else \
57853 + ___retval = vmalloc((unsigned long)___x); \
57854 + ___retval; \
57855 +})
57856 +
57857 +#define __vmalloc(x, y, z) \
57858 +({ \
57859 + void *___retval; \
57860 + intoverflow_t ___x = (intoverflow_t)x; \
57861 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
57862 + ___retval = NULL; \
57863 + else \
57864 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
57865 + ___retval; \
57866 +})
57867 +
57868 +#define vmalloc_user(x) \
57869 +({ \
57870 + void *___retval; \
57871 + intoverflow_t ___x = (intoverflow_t)x; \
57872 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
57873 + ___retval = NULL; \
57874 + else \
57875 + ___retval = vmalloc_user((unsigned long)___x); \
57876 + ___retval; \
57877 +})
57878 +
57879 +#define vmalloc_exec(x) \
57880 +({ \
57881 + void *___retval; \
57882 + intoverflow_t ___x = (intoverflow_t)x; \
57883 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
57884 + ___retval = NULL; \
57885 + else \
57886 + ___retval = vmalloc_exec((unsigned long)___x); \
57887 + ___retval; \
57888 +})
57889 +
57890 +#define vmalloc_node(x, y) \
57891 +({ \
57892 + void *___retval; \
57893 + intoverflow_t ___x = (intoverflow_t)x; \
57894 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
57895 + ___retval = NULL; \
57896 + else \
57897 + ___retval = vmalloc_node((unsigned long)___x, (y));\
57898 + ___retval; \
57899 +})
57900 +
57901 +#define vmalloc_32(x) \
57902 +({ \
57903 + void *___retval; \
57904 + intoverflow_t ___x = (intoverflow_t)x; \
57905 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
57906 + ___retval = NULL; \
57907 + else \
57908 + ___retval = vmalloc_32((unsigned long)___x); \
57909 + ___retval; \
57910 +})
57911 +
57912 +#define vmalloc_32_user(x) \
57913 +({ \
57914 + void *___retval; \
57915 + intoverflow_t ___x = (intoverflow_t)x; \
57916 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
57917 + ___retval = NULL; \
57918 + else \
57919 + ___retval = vmalloc_32_user((unsigned long)___x);\
57920 + ___retval; \
57921 +})
57922 +
57923 #endif /* _LINUX_VMALLOC_H */
57924 diff -urNp linux-2.6.32.42/include/linux/vmstat.h linux-2.6.32.42/include/linux/vmstat.h
57925 --- linux-2.6.32.42/include/linux/vmstat.h 2011-03-27 14:31:47.000000000 -0400
57926 +++ linux-2.6.32.42/include/linux/vmstat.h 2011-04-17 15:56:46.000000000 -0400
57927 @@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in
57928 /*
57929 * Zone based page accounting with per cpu differentials.
57930 */
57931 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57932 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57933
57934 static inline void zone_page_state_add(long x, struct zone *zone,
57935 enum zone_stat_item item)
57936 {
57937 - atomic_long_add(x, &zone->vm_stat[item]);
57938 - atomic_long_add(x, &vm_stat[item]);
57939 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
57940 + atomic_long_add_unchecked(x, &vm_stat[item]);
57941 }
57942
57943 static inline unsigned long global_page_state(enum zone_stat_item item)
57944 {
57945 - long x = atomic_long_read(&vm_stat[item]);
57946 + long x = atomic_long_read_unchecked(&vm_stat[item]);
57947 #ifdef CONFIG_SMP
57948 if (x < 0)
57949 x = 0;
57950 @@ -158,7 +158,7 @@ static inline unsigned long global_page_
57951 static inline unsigned long zone_page_state(struct zone *zone,
57952 enum zone_stat_item item)
57953 {
57954 - long x = atomic_long_read(&zone->vm_stat[item]);
57955 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
57956 #ifdef CONFIG_SMP
57957 if (x < 0)
57958 x = 0;
57959 @@ -175,7 +175,7 @@ static inline unsigned long zone_page_st
57960 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
57961 enum zone_stat_item item)
57962 {
57963 - long x = atomic_long_read(&zone->vm_stat[item]);
57964 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
57965
57966 #ifdef CONFIG_SMP
57967 int cpu;
57968 @@ -264,8 +264,8 @@ static inline void __mod_zone_page_state
57969
57970 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
57971 {
57972 - atomic_long_inc(&zone->vm_stat[item]);
57973 - atomic_long_inc(&vm_stat[item]);
57974 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
57975 + atomic_long_inc_unchecked(&vm_stat[item]);
57976 }
57977
57978 static inline void __inc_zone_page_state(struct page *page,
57979 @@ -276,8 +276,8 @@ static inline void __inc_zone_page_state
57980
57981 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
57982 {
57983 - atomic_long_dec(&zone->vm_stat[item]);
57984 - atomic_long_dec(&vm_stat[item]);
57985 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
57986 + atomic_long_dec_unchecked(&vm_stat[item]);
57987 }
57988
57989 static inline void __dec_zone_page_state(struct page *page,
57990 diff -urNp linux-2.6.32.42/include/media/v4l2-device.h linux-2.6.32.42/include/media/v4l2-device.h
57991 --- linux-2.6.32.42/include/media/v4l2-device.h 2011-03-27 14:31:47.000000000 -0400
57992 +++ linux-2.6.32.42/include/media/v4l2-device.h 2011-05-04 17:56:28.000000000 -0400
57993 @@ -71,7 +71,7 @@ int __must_check v4l2_device_register(st
57994 this function returns 0. If the name ends with a digit (e.g. cx18),
57995 then the name will be set to cx18-0 since cx180 looks really odd. */
57996 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
57997 - atomic_t *instance);
57998 + atomic_unchecked_t *instance);
57999
58000 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
58001 Since the parent disappears this ensures that v4l2_dev doesn't have an
58002 diff -urNp linux-2.6.32.42/include/net/flow.h linux-2.6.32.42/include/net/flow.h
58003 --- linux-2.6.32.42/include/net/flow.h 2011-03-27 14:31:47.000000000 -0400
58004 +++ linux-2.6.32.42/include/net/flow.h 2011-05-04 17:56:28.000000000 -0400
58005 @@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net
58006 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
58007 u8 dir, flow_resolve_t resolver);
58008 extern void flow_cache_flush(void);
58009 -extern atomic_t flow_cache_genid;
58010 +extern atomic_unchecked_t flow_cache_genid;
58011
58012 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
58013 {
58014 diff -urNp linux-2.6.32.42/include/net/inetpeer.h linux-2.6.32.42/include/net/inetpeer.h
58015 --- linux-2.6.32.42/include/net/inetpeer.h 2011-03-27 14:31:47.000000000 -0400
58016 +++ linux-2.6.32.42/include/net/inetpeer.h 2011-04-17 15:56:46.000000000 -0400
58017 @@ -24,7 +24,7 @@ struct inet_peer
58018 __u32 dtime; /* the time of last use of not
58019 * referenced entries */
58020 atomic_t refcnt;
58021 - atomic_t rid; /* Frag reception counter */
58022 + atomic_unchecked_t rid; /* Frag reception counter */
58023 __u32 tcp_ts;
58024 unsigned long tcp_ts_stamp;
58025 };
58026 diff -urNp linux-2.6.32.42/include/net/ip_vs.h linux-2.6.32.42/include/net/ip_vs.h
58027 --- linux-2.6.32.42/include/net/ip_vs.h 2011-03-27 14:31:47.000000000 -0400
58028 +++ linux-2.6.32.42/include/net/ip_vs.h 2011-05-04 17:56:28.000000000 -0400
58029 @@ -365,7 +365,7 @@ struct ip_vs_conn {
58030 struct ip_vs_conn *control; /* Master control connection */
58031 atomic_t n_control; /* Number of controlled ones */
58032 struct ip_vs_dest *dest; /* real server */
58033 - atomic_t in_pkts; /* incoming packet counter */
58034 + atomic_unchecked_t in_pkts; /* incoming packet counter */
58035
58036 /* packet transmitter for different forwarding methods. If it
58037 mangles the packet, it must return NF_DROP or better NF_STOLEN,
58038 @@ -466,7 +466,7 @@ struct ip_vs_dest {
58039 union nf_inet_addr addr; /* IP address of the server */
58040 __be16 port; /* port number of the server */
58041 volatile unsigned flags; /* dest status flags */
58042 - atomic_t conn_flags; /* flags to copy to conn */
58043 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
58044 atomic_t weight; /* server weight */
58045
58046 atomic_t refcnt; /* reference counter */
58047 diff -urNp linux-2.6.32.42/include/net/irda/ircomm_tty.h linux-2.6.32.42/include/net/irda/ircomm_tty.h
58048 --- linux-2.6.32.42/include/net/irda/ircomm_tty.h 2011-03-27 14:31:47.000000000 -0400
58049 +++ linux-2.6.32.42/include/net/irda/ircomm_tty.h 2011-04-17 15:56:46.000000000 -0400
58050 @@ -35,6 +35,7 @@
58051 #include <linux/termios.h>
58052 #include <linux/timer.h>
58053 #include <linux/tty.h> /* struct tty_struct */
58054 +#include <asm/local.h>
58055
58056 #include <net/irda/irias_object.h>
58057 #include <net/irda/ircomm_core.h>
58058 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
58059 unsigned short close_delay;
58060 unsigned short closing_wait; /* time to wait before closing */
58061
58062 - int open_count;
58063 - int blocked_open; /* # of blocked opens */
58064 + local_t open_count;
58065 + local_t blocked_open; /* # of blocked opens */
58066
58067 /* Protect concurent access to :
58068 * o self->open_count
58069 diff -urNp linux-2.6.32.42/include/net/iucv/af_iucv.h linux-2.6.32.42/include/net/iucv/af_iucv.h
58070 --- linux-2.6.32.42/include/net/iucv/af_iucv.h 2011-03-27 14:31:47.000000000 -0400
58071 +++ linux-2.6.32.42/include/net/iucv/af_iucv.h 2011-05-04 17:56:28.000000000 -0400
58072 @@ -87,7 +87,7 @@ struct iucv_sock {
58073 struct iucv_sock_list {
58074 struct hlist_head head;
58075 rwlock_t lock;
58076 - atomic_t autobind_name;
58077 + atomic_unchecked_t autobind_name;
58078 };
58079
58080 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
58081 diff -urNp linux-2.6.32.42/include/net/neighbour.h linux-2.6.32.42/include/net/neighbour.h
58082 --- linux-2.6.32.42/include/net/neighbour.h 2011-03-27 14:31:47.000000000 -0400
58083 +++ linux-2.6.32.42/include/net/neighbour.h 2011-04-17 15:56:46.000000000 -0400
58084 @@ -125,12 +125,12 @@ struct neighbour
58085 struct neigh_ops
58086 {
58087 int family;
58088 - void (*solicit)(struct neighbour *, struct sk_buff*);
58089 - void (*error_report)(struct neighbour *, struct sk_buff*);
58090 - int (*output)(struct sk_buff*);
58091 - int (*connected_output)(struct sk_buff*);
58092 - int (*hh_output)(struct sk_buff*);
58093 - int (*queue_xmit)(struct sk_buff*);
58094 + void (* const solicit)(struct neighbour *, struct sk_buff*);
58095 + void (* const error_report)(struct neighbour *, struct sk_buff*);
58096 + int (* const output)(struct sk_buff*);
58097 + int (* const connected_output)(struct sk_buff*);
58098 + int (* const hh_output)(struct sk_buff*);
58099 + int (* const queue_xmit)(struct sk_buff*);
58100 };
58101
58102 struct pneigh_entry
58103 diff -urNp linux-2.6.32.42/include/net/netlink.h linux-2.6.32.42/include/net/netlink.h
58104 --- linux-2.6.32.42/include/net/netlink.h 2011-03-27 14:31:47.000000000 -0400
58105 +++ linux-2.6.32.42/include/net/netlink.h 2011-04-17 15:56:46.000000000 -0400
58106 @@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct
58107 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
58108 {
58109 if (mark)
58110 - skb_trim(skb, (unsigned char *) mark - skb->data);
58111 + skb_trim(skb, (const unsigned char *) mark - skb->data);
58112 }
58113
58114 /**
58115 diff -urNp linux-2.6.32.42/include/net/netns/ipv4.h linux-2.6.32.42/include/net/netns/ipv4.h
58116 --- linux-2.6.32.42/include/net/netns/ipv4.h 2011-03-27 14:31:47.000000000 -0400
58117 +++ linux-2.6.32.42/include/net/netns/ipv4.h 2011-05-04 17:56:28.000000000 -0400
58118 @@ -54,7 +54,7 @@ struct netns_ipv4 {
58119 int current_rt_cache_rebuild_count;
58120
58121 struct timer_list rt_secret_timer;
58122 - atomic_t rt_genid;
58123 + atomic_unchecked_t rt_genid;
58124
58125 #ifdef CONFIG_IP_MROUTE
58126 struct sock *mroute_sk;
58127 diff -urNp linux-2.6.32.42/include/net/sctp/sctp.h linux-2.6.32.42/include/net/sctp/sctp.h
58128 --- linux-2.6.32.42/include/net/sctp/sctp.h 2011-03-27 14:31:47.000000000 -0400
58129 +++ linux-2.6.32.42/include/net/sctp/sctp.h 2011-04-17 15:56:46.000000000 -0400
58130 @@ -305,8 +305,8 @@ extern int sctp_debug_flag;
58131
58132 #else /* SCTP_DEBUG */
58133
58134 -#define SCTP_DEBUG_PRINTK(whatever...)
58135 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
58136 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
58137 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
58138 #define SCTP_ENABLE_DEBUG
58139 #define SCTP_DISABLE_DEBUG
58140 #define SCTP_ASSERT(expr, str, func)
58141 diff -urNp linux-2.6.32.42/include/net/sock.h linux-2.6.32.42/include/net/sock.h
58142 --- linux-2.6.32.42/include/net/sock.h 2011-03-27 14:31:47.000000000 -0400
58143 +++ linux-2.6.32.42/include/net/sock.h 2011-05-04 17:56:28.000000000 -0400
58144 @@ -272,7 +272,7 @@ struct sock {
58145 rwlock_t sk_callback_lock;
58146 int sk_err,
58147 sk_err_soft;
58148 - atomic_t sk_drops;
58149 + atomic_unchecked_t sk_drops;
58150 unsigned short sk_ack_backlog;
58151 unsigned short sk_max_ack_backlog;
58152 __u32 sk_priority;
58153 diff -urNp linux-2.6.32.42/include/net/tcp.h linux-2.6.32.42/include/net/tcp.h
58154 --- linux-2.6.32.42/include/net/tcp.h 2011-03-27 14:31:47.000000000 -0400
58155 +++ linux-2.6.32.42/include/net/tcp.h 2011-04-17 15:56:46.000000000 -0400
58156 @@ -1444,6 +1444,7 @@ enum tcp_seq_states {
58157 struct tcp_seq_afinfo {
58158 char *name;
58159 sa_family_t family;
58160 + /* cannot be const */
58161 struct file_operations seq_fops;
58162 struct seq_operations seq_ops;
58163 };
58164 diff -urNp linux-2.6.32.42/include/net/udp.h linux-2.6.32.42/include/net/udp.h
58165 --- linux-2.6.32.42/include/net/udp.h 2011-03-27 14:31:47.000000000 -0400
58166 +++ linux-2.6.32.42/include/net/udp.h 2011-04-17 15:56:46.000000000 -0400
58167 @@ -187,6 +187,7 @@ struct udp_seq_afinfo {
58168 char *name;
58169 sa_family_t family;
58170 struct udp_table *udp_table;
58171 + /* cannot be const */
58172 struct file_operations seq_fops;
58173 struct seq_operations seq_ops;
58174 };
58175 diff -urNp linux-2.6.32.42/include/scsi/scsi_device.h linux-2.6.32.42/include/scsi/scsi_device.h
58176 --- linux-2.6.32.42/include/scsi/scsi_device.h 2011-04-17 17:00:52.000000000 -0400
58177 +++ linux-2.6.32.42/include/scsi/scsi_device.h 2011-05-04 17:56:28.000000000 -0400
58178 @@ -156,9 +156,9 @@ struct scsi_device {
58179 unsigned int max_device_blocked; /* what device_blocked counts down from */
58180 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
58181
58182 - atomic_t iorequest_cnt;
58183 - atomic_t iodone_cnt;
58184 - atomic_t ioerr_cnt;
58185 + atomic_unchecked_t iorequest_cnt;
58186 + atomic_unchecked_t iodone_cnt;
58187 + atomic_unchecked_t ioerr_cnt;
58188
58189 struct device sdev_gendev,
58190 sdev_dev;
58191 diff -urNp linux-2.6.32.42/include/sound/ac97_codec.h linux-2.6.32.42/include/sound/ac97_codec.h
58192 --- linux-2.6.32.42/include/sound/ac97_codec.h 2011-03-27 14:31:47.000000000 -0400
58193 +++ linux-2.6.32.42/include/sound/ac97_codec.h 2011-04-17 15:56:46.000000000 -0400
58194 @@ -419,15 +419,15 @@
58195 struct snd_ac97;
58196
58197 struct snd_ac97_build_ops {
58198 - int (*build_3d) (struct snd_ac97 *ac97);
58199 - int (*build_specific) (struct snd_ac97 *ac97);
58200 - int (*build_spdif) (struct snd_ac97 *ac97);
58201 - int (*build_post_spdif) (struct snd_ac97 *ac97);
58202 + int (* const build_3d) (struct snd_ac97 *ac97);
58203 + int (* const build_specific) (struct snd_ac97 *ac97);
58204 + int (* const build_spdif) (struct snd_ac97 *ac97);
58205 + int (* const build_post_spdif) (struct snd_ac97 *ac97);
58206 #ifdef CONFIG_PM
58207 - void (*suspend) (struct snd_ac97 *ac97);
58208 - void (*resume) (struct snd_ac97 *ac97);
58209 + void (* const suspend) (struct snd_ac97 *ac97);
58210 + void (* const resume) (struct snd_ac97 *ac97);
58211 #endif
58212 - void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
58213 + void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
58214 };
58215
58216 struct snd_ac97_bus_ops {
58217 @@ -477,7 +477,7 @@ struct snd_ac97_template {
58218
58219 struct snd_ac97 {
58220 /* -- lowlevel (hardware) driver specific -- */
58221 - struct snd_ac97_build_ops * build_ops;
58222 + const struct snd_ac97_build_ops * build_ops;
58223 void *private_data;
58224 void (*private_free) (struct snd_ac97 *ac97);
58225 /* --- */
58226 diff -urNp linux-2.6.32.42/include/sound/ymfpci.h linux-2.6.32.42/include/sound/ymfpci.h
58227 --- linux-2.6.32.42/include/sound/ymfpci.h 2011-03-27 14:31:47.000000000 -0400
58228 +++ linux-2.6.32.42/include/sound/ymfpci.h 2011-05-04 17:56:28.000000000 -0400
58229 @@ -358,7 +358,7 @@ struct snd_ymfpci {
58230 spinlock_t reg_lock;
58231 spinlock_t voice_lock;
58232 wait_queue_head_t interrupt_sleep;
58233 - atomic_t interrupt_sleep_count;
58234 + atomic_unchecked_t interrupt_sleep_count;
58235 struct snd_info_entry *proc_entry;
58236 const struct firmware *dsp_microcode;
58237 const struct firmware *controller_microcode;
58238 diff -urNp linux-2.6.32.42/include/trace/events/irq.h linux-2.6.32.42/include/trace/events/irq.h
58239 --- linux-2.6.32.42/include/trace/events/irq.h 2011-03-27 14:31:47.000000000 -0400
58240 +++ linux-2.6.32.42/include/trace/events/irq.h 2011-04-17 15:56:46.000000000 -0400
58241 @@ -34,7 +34,7 @@
58242 */
58243 TRACE_EVENT(irq_handler_entry,
58244
58245 - TP_PROTO(int irq, struct irqaction *action),
58246 + TP_PROTO(int irq, const struct irqaction *action),
58247
58248 TP_ARGS(irq, action),
58249
58250 @@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
58251 */
58252 TRACE_EVENT(irq_handler_exit,
58253
58254 - TP_PROTO(int irq, struct irqaction *action, int ret),
58255 + TP_PROTO(int irq, const struct irqaction *action, int ret),
58256
58257 TP_ARGS(irq, action, ret),
58258
58259 @@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
58260 */
58261 TRACE_EVENT(softirq_entry,
58262
58263 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
58264 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
58265
58266 TP_ARGS(h, vec),
58267
58268 @@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
58269 */
58270 TRACE_EVENT(softirq_exit,
58271
58272 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
58273 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
58274
58275 TP_ARGS(h, vec),
58276
58277 diff -urNp linux-2.6.32.42/include/video/uvesafb.h linux-2.6.32.42/include/video/uvesafb.h
58278 --- linux-2.6.32.42/include/video/uvesafb.h 2011-03-27 14:31:47.000000000 -0400
58279 +++ linux-2.6.32.42/include/video/uvesafb.h 2011-04-17 15:56:46.000000000 -0400
58280 @@ -177,6 +177,7 @@ struct uvesafb_par {
58281 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
58282 u8 pmi_setpal; /* PMI for palette changes */
58283 u16 *pmi_base; /* protected mode interface location */
58284 + u8 *pmi_code; /* protected mode code location */
58285 void *pmi_start;
58286 void *pmi_pal;
58287 u8 *vbe_state_orig; /*
58288 diff -urNp linux-2.6.32.42/init/do_mounts.c linux-2.6.32.42/init/do_mounts.c
58289 --- linux-2.6.32.42/init/do_mounts.c 2011-03-27 14:31:47.000000000 -0400
58290 +++ linux-2.6.32.42/init/do_mounts.c 2011-04-17 15:56:46.000000000 -0400
58291 @@ -216,11 +216,11 @@ static void __init get_fs_names(char *pa
58292
58293 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
58294 {
58295 - int err = sys_mount(name, "/root", fs, flags, data);
58296 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
58297 if (err)
58298 return err;
58299
58300 - sys_chdir("/root");
58301 + sys_chdir((__force const char __user *)"/root");
58302 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
58303 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
58304 current->fs->pwd.mnt->mnt_sb->s_type->name,
58305 @@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...
58306 va_start(args, fmt);
58307 vsprintf(buf, fmt, args);
58308 va_end(args);
58309 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
58310 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
58311 if (fd >= 0) {
58312 sys_ioctl(fd, FDEJECT, 0);
58313 sys_close(fd);
58314 }
58315 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
58316 - fd = sys_open("/dev/console", O_RDWR, 0);
58317 + fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
58318 if (fd >= 0) {
58319 sys_ioctl(fd, TCGETS, (long)&termios);
58320 termios.c_lflag &= ~ICANON;
58321 sys_ioctl(fd, TCSETSF, (long)&termios);
58322 - sys_read(fd, &c, 1);
58323 + sys_read(fd, (char __user *)&c, 1);
58324 termios.c_lflag |= ICANON;
58325 sys_ioctl(fd, TCSETSF, (long)&termios);
58326 sys_close(fd);
58327 @@ -416,6 +416,6 @@ void __init prepare_namespace(void)
58328 mount_root();
58329 out:
58330 devtmpfs_mount("dev");
58331 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
58332 - sys_chroot(".");
58333 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58334 + sys_chroot((__force char __user *)".");
58335 }
58336 diff -urNp linux-2.6.32.42/init/do_mounts.h linux-2.6.32.42/init/do_mounts.h
58337 --- linux-2.6.32.42/init/do_mounts.h 2011-03-27 14:31:47.000000000 -0400
58338 +++ linux-2.6.32.42/init/do_mounts.h 2011-04-17 15:56:46.000000000 -0400
58339 @@ -15,15 +15,15 @@ extern int root_mountflags;
58340
58341 static inline int create_dev(char *name, dev_t dev)
58342 {
58343 - sys_unlink(name);
58344 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
58345 + sys_unlink((__force char __user *)name);
58346 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
58347 }
58348
58349 #if BITS_PER_LONG == 32
58350 static inline u32 bstat(char *name)
58351 {
58352 struct stat64 stat;
58353 - if (sys_stat64(name, &stat) != 0)
58354 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
58355 return 0;
58356 if (!S_ISBLK(stat.st_mode))
58357 return 0;
58358 diff -urNp linux-2.6.32.42/init/do_mounts_initrd.c linux-2.6.32.42/init/do_mounts_initrd.c
58359 --- linux-2.6.32.42/init/do_mounts_initrd.c 2011-03-27 14:31:47.000000000 -0400
58360 +++ linux-2.6.32.42/init/do_mounts_initrd.c 2011-04-17 15:56:46.000000000 -0400
58361 @@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shel
58362 sys_close(old_fd);sys_close(root_fd);
58363 sys_close(0);sys_close(1);sys_close(2);
58364 sys_setsid();
58365 - (void) sys_open("/dev/console",O_RDWR,0);
58366 + (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
58367 (void) sys_dup(0);
58368 (void) sys_dup(0);
58369 return kernel_execve(shell, argv, envp_init);
58370 @@ -47,13 +47,13 @@ static void __init handle_initrd(void)
58371 create_dev("/dev/root.old", Root_RAM0);
58372 /* mount initrd on rootfs' /root */
58373 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
58374 - sys_mkdir("/old", 0700);
58375 - root_fd = sys_open("/", 0, 0);
58376 - old_fd = sys_open("/old", 0, 0);
58377 + sys_mkdir((__force const char __user *)"/old", 0700);
58378 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
58379 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
58380 /* move initrd over / and chdir/chroot in initrd root */
58381 - sys_chdir("/root");
58382 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
58383 - sys_chroot(".");
58384 + sys_chdir((__force const char __user *)"/root");
58385 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58386 + sys_chroot((__force const char __user *)".");
58387
58388 /*
58389 * In case that a resume from disk is carried out by linuxrc or one of
58390 @@ -70,15 +70,15 @@ static void __init handle_initrd(void)
58391
58392 /* move initrd to rootfs' /old */
58393 sys_fchdir(old_fd);
58394 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
58395 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
58396 /* switch root and cwd back to / of rootfs */
58397 sys_fchdir(root_fd);
58398 - sys_chroot(".");
58399 + sys_chroot((__force const char __user *)".");
58400 sys_close(old_fd);
58401 sys_close(root_fd);
58402
58403 if (new_decode_dev(real_root_dev) == Root_RAM0) {
58404 - sys_chdir("/old");
58405 + sys_chdir((__force const char __user *)"/old");
58406 return;
58407 }
58408
58409 @@ -86,17 +86,17 @@ static void __init handle_initrd(void)
58410 mount_root();
58411
58412 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
58413 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
58414 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
58415 if (!error)
58416 printk("okay\n");
58417 else {
58418 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
58419 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
58420 if (error == -ENOENT)
58421 printk("/initrd does not exist. Ignored.\n");
58422 else
58423 printk("failed\n");
58424 printk(KERN_NOTICE "Unmounting old root\n");
58425 - sys_umount("/old", MNT_DETACH);
58426 + sys_umount((__force char __user *)"/old", MNT_DETACH);
58427 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
58428 if (fd < 0) {
58429 error = fd;
58430 @@ -119,11 +119,11 @@ int __init initrd_load(void)
58431 * mounted in the normal path.
58432 */
58433 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
58434 - sys_unlink("/initrd.image");
58435 + sys_unlink((__force const char __user *)"/initrd.image");
58436 handle_initrd();
58437 return 1;
58438 }
58439 }
58440 - sys_unlink("/initrd.image");
58441 + sys_unlink((__force const char __user *)"/initrd.image");
58442 return 0;
58443 }
58444 diff -urNp linux-2.6.32.42/init/do_mounts_md.c linux-2.6.32.42/init/do_mounts_md.c
58445 --- linux-2.6.32.42/init/do_mounts_md.c 2011-03-27 14:31:47.000000000 -0400
58446 +++ linux-2.6.32.42/init/do_mounts_md.c 2011-04-17 15:56:46.000000000 -0400
58447 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
58448 partitioned ? "_d" : "", minor,
58449 md_setup_args[ent].device_names);
58450
58451 - fd = sys_open(name, 0, 0);
58452 + fd = sys_open((__force char __user *)name, 0, 0);
58453 if (fd < 0) {
58454 printk(KERN_ERR "md: open failed - cannot start "
58455 "array %s\n", name);
58456 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
58457 * array without it
58458 */
58459 sys_close(fd);
58460 - fd = sys_open(name, 0, 0);
58461 + fd = sys_open((__force char __user *)name, 0, 0);
58462 sys_ioctl(fd, BLKRRPART, 0);
58463 }
58464 sys_close(fd);
58465 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
58466
58467 wait_for_device_probe();
58468
58469 - fd = sys_open("/dev/md0", 0, 0);
58470 + fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
58471 if (fd >= 0) {
58472 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
58473 sys_close(fd);
58474 diff -urNp linux-2.6.32.42/init/initramfs.c linux-2.6.32.42/init/initramfs.c
58475 --- linux-2.6.32.42/init/initramfs.c 2011-03-27 14:31:47.000000000 -0400
58476 +++ linux-2.6.32.42/init/initramfs.c 2011-04-17 15:56:46.000000000 -0400
58477 @@ -74,7 +74,7 @@ static void __init free_hash(void)
58478 }
58479 }
58480
58481 -static long __init do_utime(char __user *filename, time_t mtime)
58482 +static long __init do_utime(__force char __user *filename, time_t mtime)
58483 {
58484 struct timespec t[2];
58485
58486 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
58487 struct dir_entry *de, *tmp;
58488 list_for_each_entry_safe(de, tmp, &dir_list, list) {
58489 list_del(&de->list);
58490 - do_utime(de->name, de->mtime);
58491 + do_utime((__force char __user *)de->name, de->mtime);
58492 kfree(de->name);
58493 kfree(de);
58494 }
58495 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
58496 if (nlink >= 2) {
58497 char *old = find_link(major, minor, ino, mode, collected);
58498 if (old)
58499 - return (sys_link(old, collected) < 0) ? -1 : 1;
58500 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
58501 }
58502 return 0;
58503 }
58504 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
58505 {
58506 struct stat st;
58507
58508 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
58509 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
58510 if (S_ISDIR(st.st_mode))
58511 - sys_rmdir(path);
58512 + sys_rmdir((__force char __user *)path);
58513 else
58514 - sys_unlink(path);
58515 + sys_unlink((__force char __user *)path);
58516 }
58517 }
58518
58519 @@ -305,7 +305,7 @@ static int __init do_name(void)
58520 int openflags = O_WRONLY|O_CREAT;
58521 if (ml != 1)
58522 openflags |= O_TRUNC;
58523 - wfd = sys_open(collected, openflags, mode);
58524 + wfd = sys_open((__force char __user *)collected, openflags, mode);
58525
58526 if (wfd >= 0) {
58527 sys_fchown(wfd, uid, gid);
58528 @@ -317,17 +317,17 @@ static int __init do_name(void)
58529 }
58530 }
58531 } else if (S_ISDIR(mode)) {
58532 - sys_mkdir(collected, mode);
58533 - sys_chown(collected, uid, gid);
58534 - sys_chmod(collected, mode);
58535 + sys_mkdir((__force char __user *)collected, mode);
58536 + sys_chown((__force char __user *)collected, uid, gid);
58537 + sys_chmod((__force char __user *)collected, mode);
58538 dir_add(collected, mtime);
58539 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
58540 S_ISFIFO(mode) || S_ISSOCK(mode)) {
58541 if (maybe_link() == 0) {
58542 - sys_mknod(collected, mode, rdev);
58543 - sys_chown(collected, uid, gid);
58544 - sys_chmod(collected, mode);
58545 - do_utime(collected, mtime);
58546 + sys_mknod((__force char __user *)collected, mode, rdev);
58547 + sys_chown((__force char __user *)collected, uid, gid);
58548 + sys_chmod((__force char __user *)collected, mode);
58549 + do_utime((__force char __user *)collected, mtime);
58550 }
58551 }
58552 return 0;
58553 @@ -336,15 +336,15 @@ static int __init do_name(void)
58554 static int __init do_copy(void)
58555 {
58556 if (count >= body_len) {
58557 - sys_write(wfd, victim, body_len);
58558 + sys_write(wfd, (__force char __user *)victim, body_len);
58559 sys_close(wfd);
58560 - do_utime(vcollected, mtime);
58561 + do_utime((__force char __user *)vcollected, mtime);
58562 kfree(vcollected);
58563 eat(body_len);
58564 state = SkipIt;
58565 return 0;
58566 } else {
58567 - sys_write(wfd, victim, count);
58568 + sys_write(wfd, (__force char __user *)victim, count);
58569 body_len -= count;
58570 eat(count);
58571 return 1;
58572 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
58573 {
58574 collected[N_ALIGN(name_len) + body_len] = '\0';
58575 clean_path(collected, 0);
58576 - sys_symlink(collected + N_ALIGN(name_len), collected);
58577 - sys_lchown(collected, uid, gid);
58578 - do_utime(collected, mtime);
58579 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
58580 + sys_lchown((__force char __user *)collected, uid, gid);
58581 + do_utime((__force char __user *)collected, mtime);
58582 state = SkipIt;
58583 next_state = Reset;
58584 return 0;
58585 diff -urNp linux-2.6.32.42/init/Kconfig linux-2.6.32.42/init/Kconfig
58586 --- linux-2.6.32.42/init/Kconfig 2011-05-10 22:12:01.000000000 -0400
58587 +++ linux-2.6.32.42/init/Kconfig 2011-05-10 22:12:34.000000000 -0400
58588 @@ -1004,7 +1004,7 @@ config SLUB_DEBUG
58589
58590 config COMPAT_BRK
58591 bool "Disable heap randomization"
58592 - default y
58593 + default n
58594 help
58595 Randomizing heap placement makes heap exploits harder, but it
58596 also breaks ancient binaries (including anything libc5 based).
58597 diff -urNp linux-2.6.32.42/init/main.c linux-2.6.32.42/init/main.c
58598 --- linux-2.6.32.42/init/main.c 2011-05-10 22:12:01.000000000 -0400
58599 +++ linux-2.6.32.42/init/main.c 2011-05-22 23:02:06.000000000 -0400
58600 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void)
58601 #ifdef CONFIG_TC
58602 extern void tc_init(void);
58603 #endif
58604 +extern void grsecurity_init(void);
58605
58606 enum system_states system_state __read_mostly;
58607 EXPORT_SYMBOL(system_state);
58608 @@ -183,6 +184,49 @@ static int __init set_reset_devices(char
58609
58610 __setup("reset_devices", set_reset_devices);
58611
58612 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
58613 +extern char pax_enter_kernel_user[];
58614 +extern char pax_exit_kernel_user[];
58615 +extern pgdval_t clone_pgd_mask;
58616 +#endif
58617 +
58618 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
58619 +static int __init setup_pax_nouderef(char *str)
58620 +{
58621 +#ifdef CONFIG_X86_32
58622 + unsigned int cpu;
58623 + struct desc_struct *gdt;
58624 +
58625 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
58626 + gdt = get_cpu_gdt_table(cpu);
58627 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
58628 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
58629 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
58630 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
58631 + }
58632 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
58633 +#else
58634 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
58635 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
58636 + clone_pgd_mask = ~(pgdval_t)0UL;
58637 +#endif
58638 +
58639 + return 0;
58640 +}
58641 +early_param("pax_nouderef", setup_pax_nouderef);
58642 +#endif
58643 +
58644 +#ifdef CONFIG_PAX_SOFTMODE
58645 +unsigned int pax_softmode;
58646 +
58647 +static int __init setup_pax_softmode(char *str)
58648 +{
58649 + get_option(&str, &pax_softmode);
58650 + return 1;
58651 +}
58652 +__setup("pax_softmode=", setup_pax_softmode);
58653 +#endif
58654 +
58655 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
58656 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
58657 static const char *panic_later, *panic_param;
58658 @@ -705,52 +749,53 @@ int initcall_debug;
58659 core_param(initcall_debug, initcall_debug, bool, 0644);
58660
58661 static char msgbuf[64];
58662 -static struct boot_trace_call call;
58663 -static struct boot_trace_ret ret;
58664 +static struct boot_trace_call trace_call;
58665 +static struct boot_trace_ret trace_ret;
58666
58667 int do_one_initcall(initcall_t fn)
58668 {
58669 int count = preempt_count();
58670 ktime_t calltime, delta, rettime;
58671 + const char *msg1 = "", *msg2 = "";
58672
58673 if (initcall_debug) {
58674 - call.caller = task_pid_nr(current);
58675 - printk("calling %pF @ %i\n", fn, call.caller);
58676 + trace_call.caller = task_pid_nr(current);
58677 + printk("calling %pF @ %i\n", fn, trace_call.caller);
58678 calltime = ktime_get();
58679 - trace_boot_call(&call, fn);
58680 + trace_boot_call(&trace_call, fn);
58681 enable_boot_trace();
58682 }
58683
58684 - ret.result = fn();
58685 + trace_ret.result = fn();
58686
58687 if (initcall_debug) {
58688 disable_boot_trace();
58689 rettime = ktime_get();
58690 delta = ktime_sub(rettime, calltime);
58691 - ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
58692 - trace_boot_ret(&ret, fn);
58693 + trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
58694 + trace_boot_ret(&trace_ret, fn);
58695 printk("initcall %pF returned %d after %Ld usecs\n", fn,
58696 - ret.result, ret.duration);
58697 + trace_ret.result, trace_ret.duration);
58698 }
58699
58700 msgbuf[0] = 0;
58701
58702 - if (ret.result && ret.result != -ENODEV && initcall_debug)
58703 - sprintf(msgbuf, "error code %d ", ret.result);
58704 + if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
58705 + sprintf(msgbuf, "error code %d ", trace_ret.result);
58706
58707 if (preempt_count() != count) {
58708 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
58709 + msg1 = " preemption imbalance";
58710 preempt_count() = count;
58711 }
58712 if (irqs_disabled()) {
58713 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
58714 + msg2 = " disabled interrupts";
58715 local_irq_enable();
58716 }
58717 - if (msgbuf[0]) {
58718 - printk("initcall %pF returned with %s\n", fn, msgbuf);
58719 + if (msgbuf[0] || *msg1 || *msg2) {
58720 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
58721 }
58722
58723 - return ret.result;
58724 + return trace_ret.result;
58725 }
58726
58727
58728 @@ -893,11 +938,13 @@ static int __init kernel_init(void * unu
58729 if (!ramdisk_execute_command)
58730 ramdisk_execute_command = "/init";
58731
58732 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
58733 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
58734 ramdisk_execute_command = NULL;
58735 prepare_namespace();
58736 }
58737
58738 + grsecurity_init();
58739 +
58740 /*
58741 * Ok, we have completed the initial bootup, and
58742 * we're essentially up and running. Get rid of the
58743 diff -urNp linux-2.6.32.42/init/noinitramfs.c linux-2.6.32.42/init/noinitramfs.c
58744 --- linux-2.6.32.42/init/noinitramfs.c 2011-03-27 14:31:47.000000000 -0400
58745 +++ linux-2.6.32.42/init/noinitramfs.c 2011-04-17 15:56:46.000000000 -0400
58746 @@ -29,7 +29,7 @@ static int __init default_rootfs(void)
58747 {
58748 int err;
58749
58750 - err = sys_mkdir("/dev", 0755);
58751 + err = sys_mkdir((const char __user *)"/dev", 0755);
58752 if (err < 0)
58753 goto out;
58754
58755 @@ -39,7 +39,7 @@ static int __init default_rootfs(void)
58756 if (err < 0)
58757 goto out;
58758
58759 - err = sys_mkdir("/root", 0700);
58760 + err = sys_mkdir((const char __user *)"/root", 0700);
58761 if (err < 0)
58762 goto out;
58763
58764 diff -urNp linux-2.6.32.42/ipc/mqueue.c linux-2.6.32.42/ipc/mqueue.c
58765 --- linux-2.6.32.42/ipc/mqueue.c 2011-03-27 14:31:47.000000000 -0400
58766 +++ linux-2.6.32.42/ipc/mqueue.c 2011-04-17 15:56:46.000000000 -0400
58767 @@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(st
58768 mq_bytes = (mq_msg_tblsz +
58769 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
58770
58771 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
58772 spin_lock(&mq_lock);
58773 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
58774 u->mq_bytes + mq_bytes >
58775 diff -urNp linux-2.6.32.42/ipc/sem.c linux-2.6.32.42/ipc/sem.c
58776 --- linux-2.6.32.42/ipc/sem.c 2011-03-27 14:31:47.000000000 -0400
58777 +++ linux-2.6.32.42/ipc/sem.c 2011-05-16 21:46:57.000000000 -0400
58778 @@ -671,6 +671,8 @@ static int semctl_main(struct ipc_namesp
58779 ushort* sem_io = fast_sem_io;
58780 int nsems;
58781
58782 + pax_track_stack();
58783 +
58784 sma = sem_lock_check(ns, semid);
58785 if (IS_ERR(sma))
58786 return PTR_ERR(sma);
58787 @@ -1071,6 +1073,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
58788 unsigned long jiffies_left = 0;
58789 struct ipc_namespace *ns;
58790
58791 + pax_track_stack();
58792 +
58793 ns = current->nsproxy->ipc_ns;
58794
58795 if (nsops < 1 || semid < 0)
58796 diff -urNp linux-2.6.32.42/ipc/shm.c linux-2.6.32.42/ipc/shm.c
58797 --- linux-2.6.32.42/ipc/shm.c 2011-03-27 14:31:47.000000000 -0400
58798 +++ linux-2.6.32.42/ipc/shm.c 2011-04-17 15:56:46.000000000 -0400
58799 @@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_name
58800 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
58801 #endif
58802
58803 +#ifdef CONFIG_GRKERNSEC
58804 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58805 + const time_t shm_createtime, const uid_t cuid,
58806 + const int shmid);
58807 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58808 + const time_t shm_createtime);
58809 +#endif
58810 +
58811 void shm_init_ns(struct ipc_namespace *ns)
58812 {
58813 ns->shm_ctlmax = SHMMAX;
58814 @@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *
58815 shp->shm_lprid = 0;
58816 shp->shm_atim = shp->shm_dtim = 0;
58817 shp->shm_ctim = get_seconds();
58818 +#ifdef CONFIG_GRKERNSEC
58819 + {
58820 + struct timespec timeval;
58821 + do_posix_clock_monotonic_gettime(&timeval);
58822 +
58823 + shp->shm_createtime = timeval.tv_sec;
58824 + }
58825 +#endif
58826 shp->shm_segsz = size;
58827 shp->shm_nattch = 0;
58828 shp->shm_file = file;
58829 @@ -880,9 +896,21 @@ long do_shmat(int shmid, char __user *sh
58830 if (err)
58831 goto out_unlock;
58832
58833 +#ifdef CONFIG_GRKERNSEC
58834 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
58835 + shp->shm_perm.cuid, shmid) ||
58836 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
58837 + err = -EACCES;
58838 + goto out_unlock;
58839 + }
58840 +#endif
58841 +
58842 path.dentry = dget(shp->shm_file->f_path.dentry);
58843 path.mnt = shp->shm_file->f_path.mnt;
58844 shp->shm_nattch++;
58845 +#ifdef CONFIG_GRKERNSEC
58846 + shp->shm_lapid = current->pid;
58847 +#endif
58848 size = i_size_read(path.dentry->d_inode);
58849 shm_unlock(shp);
58850
58851 diff -urNp linux-2.6.32.42/kernel/acct.c linux-2.6.32.42/kernel/acct.c
58852 --- linux-2.6.32.42/kernel/acct.c 2011-03-27 14:31:47.000000000 -0400
58853 +++ linux-2.6.32.42/kernel/acct.c 2011-04-17 15:56:46.000000000 -0400
58854 @@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_a
58855 */
58856 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
58857 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
58858 - file->f_op->write(file, (char *)&ac,
58859 + file->f_op->write(file, (__force char __user *)&ac,
58860 sizeof(acct_t), &file->f_pos);
58861 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
58862 set_fs(fs);
58863 diff -urNp linux-2.6.32.42/kernel/audit.c linux-2.6.32.42/kernel/audit.c
58864 --- linux-2.6.32.42/kernel/audit.c 2011-03-27 14:31:47.000000000 -0400
58865 +++ linux-2.6.32.42/kernel/audit.c 2011-05-04 17:56:28.000000000 -0400
58866 @@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
58867 3) suppressed due to audit_rate_limit
58868 4) suppressed due to audit_backlog_limit
58869 */
58870 -static atomic_t audit_lost = ATOMIC_INIT(0);
58871 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
58872
58873 /* The netlink socket. */
58874 static struct sock *audit_sock;
58875 @@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
58876 unsigned long now;
58877 int print;
58878
58879 - atomic_inc(&audit_lost);
58880 + atomic_inc_unchecked(&audit_lost);
58881
58882 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
58883
58884 @@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
58885 printk(KERN_WARNING
58886 "audit: audit_lost=%d audit_rate_limit=%d "
58887 "audit_backlog_limit=%d\n",
58888 - atomic_read(&audit_lost),
58889 + atomic_read_unchecked(&audit_lost),
58890 audit_rate_limit,
58891 audit_backlog_limit);
58892 audit_panic(message);
58893 @@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_b
58894 status_set.pid = audit_pid;
58895 status_set.rate_limit = audit_rate_limit;
58896 status_set.backlog_limit = audit_backlog_limit;
58897 - status_set.lost = atomic_read(&audit_lost);
58898 + status_set.lost = atomic_read_unchecked(&audit_lost);
58899 status_set.backlog = skb_queue_len(&audit_skb_queue);
58900 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
58901 &status_set, sizeof(status_set));
58902 @@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_b
58903 spin_unlock_irq(&tsk->sighand->siglock);
58904 }
58905 read_unlock(&tasklist_lock);
58906 - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
58907 - &s, sizeof(s));
58908 +
58909 + if (!err)
58910 + audit_send_reply(NETLINK_CB(skb).pid, seq,
58911 + AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
58912 break;
58913 }
58914 case AUDIT_TTY_SET: {
58915 diff -urNp linux-2.6.32.42/kernel/auditsc.c linux-2.6.32.42/kernel/auditsc.c
58916 --- linux-2.6.32.42/kernel/auditsc.c 2011-03-27 14:31:47.000000000 -0400
58917 +++ linux-2.6.32.42/kernel/auditsc.c 2011-05-04 17:56:28.000000000 -0400
58918 @@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_conte
58919 }
58920
58921 /* global counter which is incremented every time something logs in */
58922 -static atomic_t session_id = ATOMIC_INIT(0);
58923 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
58924
58925 /**
58926 * audit_set_loginuid - set a task's audit_context loginuid
58927 @@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT
58928 */
58929 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
58930 {
58931 - unsigned int sessionid = atomic_inc_return(&session_id);
58932 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
58933 struct audit_context *context = task->audit_context;
58934
58935 if (context && context->in_syscall) {
58936 diff -urNp linux-2.6.32.42/kernel/capability.c linux-2.6.32.42/kernel/capability.c
58937 --- linux-2.6.32.42/kernel/capability.c 2011-03-27 14:31:47.000000000 -0400
58938 +++ linux-2.6.32.42/kernel/capability.c 2011-04-17 15:56:46.000000000 -0400
58939 @@ -305,10 +305,26 @@ int capable(int cap)
58940 BUG();
58941 }
58942
58943 - if (security_capable(cap) == 0) {
58944 + if (security_capable(cap) == 0 && gr_is_capable(cap)) {
58945 current->flags |= PF_SUPERPRIV;
58946 return 1;
58947 }
58948 return 0;
58949 }
58950 +
58951 +int capable_nolog(int cap)
58952 +{
58953 + if (unlikely(!cap_valid(cap))) {
58954 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
58955 + BUG();
58956 + }
58957 +
58958 + if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
58959 + current->flags |= PF_SUPERPRIV;
58960 + return 1;
58961 + }
58962 + return 0;
58963 +}
58964 +
58965 EXPORT_SYMBOL(capable);
58966 +EXPORT_SYMBOL(capable_nolog);
58967 diff -urNp linux-2.6.32.42/kernel/cgroup.c linux-2.6.32.42/kernel/cgroup.c
58968 --- linux-2.6.32.42/kernel/cgroup.c 2011-03-27 14:31:47.000000000 -0400
58969 +++ linux-2.6.32.42/kernel/cgroup.c 2011-05-16 21:46:57.000000000 -0400
58970 @@ -536,6 +536,8 @@ static struct css_set *find_css_set(
58971 struct hlist_head *hhead;
58972 struct cg_cgroup_link *link;
58973
58974 + pax_track_stack();
58975 +
58976 /* First see if we already have a cgroup group that matches
58977 * the desired set */
58978 read_lock(&css_set_lock);
58979 diff -urNp linux-2.6.32.42/kernel/configs.c linux-2.6.32.42/kernel/configs.c
58980 --- linux-2.6.32.42/kernel/configs.c 2011-03-27 14:31:47.000000000 -0400
58981 +++ linux-2.6.32.42/kernel/configs.c 2011-04-17 15:56:46.000000000 -0400
58982 @@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
58983 struct proc_dir_entry *entry;
58984
58985 /* create the current config file */
58986 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
58987 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
58988 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
58989 + &ikconfig_file_ops);
58990 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58991 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
58992 + &ikconfig_file_ops);
58993 +#endif
58994 +#else
58995 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
58996 &ikconfig_file_ops);
58997 +#endif
58998 +
58999 if (!entry)
59000 return -ENOMEM;
59001
59002 diff -urNp linux-2.6.32.42/kernel/cpu.c linux-2.6.32.42/kernel/cpu.c
59003 --- linux-2.6.32.42/kernel/cpu.c 2011-03-27 14:31:47.000000000 -0400
59004 +++ linux-2.6.32.42/kernel/cpu.c 2011-04-17 15:56:46.000000000 -0400
59005 @@ -19,7 +19,7 @@
59006 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
59007 static DEFINE_MUTEX(cpu_add_remove_lock);
59008
59009 -static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
59010 +static RAW_NOTIFIER_HEAD(cpu_chain);
59011
59012 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
59013 * Should always be manipulated under cpu_add_remove_lock
59014 diff -urNp linux-2.6.32.42/kernel/cred.c linux-2.6.32.42/kernel/cred.c
59015 --- linux-2.6.32.42/kernel/cred.c 2011-03-27 14:31:47.000000000 -0400
59016 +++ linux-2.6.32.42/kernel/cred.c 2011-05-17 19:26:34.000000000 -0400
59017 @@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head
59018 */
59019 void __put_cred(struct cred *cred)
59020 {
59021 + pax_track_stack();
59022 +
59023 kdebug("__put_cred(%p{%d,%d})", cred,
59024 atomic_read(&cred->usage),
59025 read_cred_subscribers(cred));
59026 @@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
59027 {
59028 struct cred *cred;
59029
59030 + pax_track_stack();
59031 +
59032 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
59033 atomic_read(&tsk->cred->usage),
59034 read_cred_subscribers(tsk->cred));
59035 @@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct
59036 {
59037 const struct cred *cred;
59038
59039 + pax_track_stack();
59040 +
59041 rcu_read_lock();
59042
59043 do {
59044 @@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
59045 {
59046 struct cred *new;
59047
59048 + pax_track_stack();
59049 +
59050 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
59051 if (!new)
59052 return NULL;
59053 @@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
59054 const struct cred *old;
59055 struct cred *new;
59056
59057 + pax_track_stack();
59058 +
59059 validate_process_creds();
59060
59061 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
59062 @@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
59063 struct thread_group_cred *tgcred = NULL;
59064 struct cred *new;
59065
59066 + pax_track_stack();
59067 +
59068 #ifdef CONFIG_KEYS
59069 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
59070 if (!tgcred)
59071 @@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, un
59072 struct cred *new;
59073 int ret;
59074
59075 + pax_track_stack();
59076 +
59077 mutex_init(&p->cred_guard_mutex);
59078
59079 if (
59080 @@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
59081 struct task_struct *task = current;
59082 const struct cred *old = task->real_cred;
59083
59084 + pax_track_stack();
59085 +
59086 kdebug("commit_creds(%p{%d,%d})", new,
59087 atomic_read(&new->usage),
59088 read_cred_subscribers(new));
59089 @@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
59090
59091 get_cred(new); /* we will require a ref for the subj creds too */
59092
59093 + gr_set_role_label(task, new->uid, new->gid);
59094 +
59095 /* dumpability changes */
59096 if (old->euid != new->euid ||
59097 old->egid != new->egid ||
59098 @@ -606,6 +624,8 @@ EXPORT_SYMBOL(commit_creds);
59099 */
59100 void abort_creds(struct cred *new)
59101 {
59102 + pax_track_stack();
59103 +
59104 kdebug("abort_creds(%p{%d,%d})", new,
59105 atomic_read(&new->usage),
59106 read_cred_subscribers(new));
59107 @@ -629,6 +649,8 @@ const struct cred *override_creds(const
59108 {
59109 const struct cred *old = current->cred;
59110
59111 + pax_track_stack();
59112 +
59113 kdebug("override_creds(%p{%d,%d})", new,
59114 atomic_read(&new->usage),
59115 read_cred_subscribers(new));
59116 @@ -658,6 +680,8 @@ void revert_creds(const struct cred *old
59117 {
59118 const struct cred *override = current->cred;
59119
59120 + pax_track_stack();
59121 +
59122 kdebug("revert_creds(%p{%d,%d})", old,
59123 atomic_read(&old->usage),
59124 read_cred_subscribers(old));
59125 @@ -704,6 +728,8 @@ struct cred *prepare_kernel_cred(struct
59126 const struct cred *old;
59127 struct cred *new;
59128
59129 + pax_track_stack();
59130 +
59131 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
59132 if (!new)
59133 return NULL;
59134 @@ -758,6 +784,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
59135 */
59136 int set_security_override(struct cred *new, u32 secid)
59137 {
59138 + pax_track_stack();
59139 +
59140 return security_kernel_act_as(new, secid);
59141 }
59142 EXPORT_SYMBOL(set_security_override);
59143 @@ -777,6 +805,8 @@ int set_security_override_from_ctx(struc
59144 u32 secid;
59145 int ret;
59146
59147 + pax_track_stack();
59148 +
59149 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
59150 if (ret < 0)
59151 return ret;
59152 diff -urNp linux-2.6.32.42/kernel/exit.c linux-2.6.32.42/kernel/exit.c
59153 --- linux-2.6.32.42/kernel/exit.c 2011-03-27 14:31:47.000000000 -0400
59154 +++ linux-2.6.32.42/kernel/exit.c 2011-04-17 15:56:46.000000000 -0400
59155 @@ -55,6 +55,10 @@
59156 #include <asm/pgtable.h>
59157 #include <asm/mmu_context.h>
59158
59159 +#ifdef CONFIG_GRKERNSEC
59160 +extern rwlock_t grsec_exec_file_lock;
59161 +#endif
59162 +
59163 static void exit_mm(struct task_struct * tsk);
59164
59165 static void __unhash_process(struct task_struct *p)
59166 @@ -174,6 +178,8 @@ void release_task(struct task_struct * p
59167 struct task_struct *leader;
59168 int zap_leader;
59169 repeat:
59170 + gr_del_task_from_ip_table(p);
59171 +
59172 tracehook_prepare_release_task(p);
59173 /* don't need to get the RCU readlock here - the process is dead and
59174 * can't be modifying its own credentials */
59175 @@ -341,11 +347,22 @@ static void reparent_to_kthreadd(void)
59176 {
59177 write_lock_irq(&tasklist_lock);
59178
59179 +#ifdef CONFIG_GRKERNSEC
59180 + write_lock(&grsec_exec_file_lock);
59181 + if (current->exec_file) {
59182 + fput(current->exec_file);
59183 + current->exec_file = NULL;
59184 + }
59185 + write_unlock(&grsec_exec_file_lock);
59186 +#endif
59187 +
59188 ptrace_unlink(current);
59189 /* Reparent to init */
59190 current->real_parent = current->parent = kthreadd_task;
59191 list_move_tail(&current->sibling, &current->real_parent->children);
59192
59193 + gr_set_kernel_label(current);
59194 +
59195 /* Set the exit signal to SIGCHLD so we signal init on exit */
59196 current->exit_signal = SIGCHLD;
59197
59198 @@ -397,7 +414,7 @@ int allow_signal(int sig)
59199 * know it'll be handled, so that they don't get converted to
59200 * SIGKILL or just silently dropped.
59201 */
59202 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
59203 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
59204 recalc_sigpending();
59205 spin_unlock_irq(&current->sighand->siglock);
59206 return 0;
59207 @@ -433,6 +450,17 @@ void daemonize(const char *name, ...)
59208 vsnprintf(current->comm, sizeof(current->comm), name, args);
59209 va_end(args);
59210
59211 +#ifdef CONFIG_GRKERNSEC
59212 + write_lock(&grsec_exec_file_lock);
59213 + if (current->exec_file) {
59214 + fput(current->exec_file);
59215 + current->exec_file = NULL;
59216 + }
59217 + write_unlock(&grsec_exec_file_lock);
59218 +#endif
59219 +
59220 + gr_set_kernel_label(current);
59221 +
59222 /*
59223 * If we were started as result of loading a module, close all of the
59224 * user space pages. We don't need them, and if we didn't close them
59225 @@ -897,17 +925,17 @@ NORET_TYPE void do_exit(long code)
59226 struct task_struct *tsk = current;
59227 int group_dead;
59228
59229 - profile_task_exit(tsk);
59230 -
59231 - WARN_ON(atomic_read(&tsk->fs_excl));
59232 -
59233 + /*
59234 + * Check this first since set_fs() below depends on
59235 + * current_thread_info(), which we better not access when we're in
59236 + * interrupt context. Other than that, we want to do the set_fs()
59237 + * as early as possible.
59238 + */
59239 if (unlikely(in_interrupt()))
59240 panic("Aiee, killing interrupt handler!");
59241 - if (unlikely(!tsk->pid))
59242 - panic("Attempted to kill the idle task!");
59243
59244 /*
59245 - * If do_exit is called because this processes oopsed, it's possible
59246 + * If do_exit is called because this processes Oops'ed, it's possible
59247 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
59248 * continuing. Amongst other possible reasons, this is to prevent
59249 * mm_release()->clear_child_tid() from writing to a user-controlled
59250 @@ -915,6 +943,13 @@ NORET_TYPE void do_exit(long code)
59251 */
59252 set_fs(USER_DS);
59253
59254 + profile_task_exit(tsk);
59255 +
59256 + WARN_ON(atomic_read(&tsk->fs_excl));
59257 +
59258 + if (unlikely(!tsk->pid))
59259 + panic("Attempted to kill the idle task!");
59260 +
59261 tracehook_report_exit(&code);
59262
59263 validate_creds_for_do_exit(tsk);
59264 @@ -973,6 +1008,9 @@ NORET_TYPE void do_exit(long code)
59265 tsk->exit_code = code;
59266 taskstats_exit(tsk, group_dead);
59267
59268 + gr_acl_handle_psacct(tsk, code);
59269 + gr_acl_handle_exit();
59270 +
59271 exit_mm(tsk);
59272
59273 if (group_dead)
59274 @@ -1188,7 +1226,7 @@ static int wait_task_zombie(struct wait_
59275
59276 if (unlikely(wo->wo_flags & WNOWAIT)) {
59277 int exit_code = p->exit_code;
59278 - int why, status;
59279 + int why;
59280
59281 get_task_struct(p);
59282 read_unlock(&tasklist_lock);
59283 diff -urNp linux-2.6.32.42/kernel/fork.c linux-2.6.32.42/kernel/fork.c
59284 --- linux-2.6.32.42/kernel/fork.c 2011-03-27 14:31:47.000000000 -0400
59285 +++ linux-2.6.32.42/kernel/fork.c 2011-04-17 15:56:46.000000000 -0400
59286 @@ -253,7 +253,7 @@ static struct task_struct *dup_task_stru
59287 *stackend = STACK_END_MAGIC; /* for overflow detection */
59288
59289 #ifdef CONFIG_CC_STACKPROTECTOR
59290 - tsk->stack_canary = get_random_int();
59291 + tsk->stack_canary = pax_get_random_long();
59292 #endif
59293
59294 /* One for us, one for whoever does the "release_task()" (usually parent) */
59295 @@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm
59296 mm->locked_vm = 0;
59297 mm->mmap = NULL;
59298 mm->mmap_cache = NULL;
59299 - mm->free_area_cache = oldmm->mmap_base;
59300 - mm->cached_hole_size = ~0UL;
59301 + mm->free_area_cache = oldmm->free_area_cache;
59302 + mm->cached_hole_size = oldmm->cached_hole_size;
59303 mm->map_count = 0;
59304 cpumask_clear(mm_cpumask(mm));
59305 mm->mm_rb = RB_ROOT;
59306 @@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm
59307 tmp->vm_flags &= ~VM_LOCKED;
59308 tmp->vm_mm = mm;
59309 tmp->vm_next = tmp->vm_prev = NULL;
59310 + tmp->vm_mirror = NULL;
59311 anon_vma_link(tmp);
59312 file = tmp->vm_file;
59313 if (file) {
59314 @@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm
59315 if (retval)
59316 goto out;
59317 }
59318 +
59319 +#ifdef CONFIG_PAX_SEGMEXEC
59320 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
59321 + struct vm_area_struct *mpnt_m;
59322 +
59323 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
59324 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
59325 +
59326 + if (!mpnt->vm_mirror)
59327 + continue;
59328 +
59329 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
59330 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
59331 + mpnt->vm_mirror = mpnt_m;
59332 + } else {
59333 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
59334 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
59335 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
59336 + mpnt->vm_mirror->vm_mirror = mpnt;
59337 + }
59338 + }
59339 + BUG_ON(mpnt_m);
59340 + }
59341 +#endif
59342 +
59343 /* a new mm has just been created */
59344 arch_dup_mmap(oldmm, mm);
59345 retval = 0;
59346 @@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_f
59347 write_unlock(&fs->lock);
59348 return -EAGAIN;
59349 }
59350 - fs->users++;
59351 + atomic_inc(&fs->users);
59352 write_unlock(&fs->lock);
59353 return 0;
59354 }
59355 tsk->fs = copy_fs_struct(fs);
59356 if (!tsk->fs)
59357 return -ENOMEM;
59358 + gr_set_chroot_entries(tsk, &tsk->fs->root);
59359 return 0;
59360 }
59361
59362 @@ -1033,10 +1060,13 @@ static struct task_struct *copy_process(
59363 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
59364 #endif
59365 retval = -EAGAIN;
59366 +
59367 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
59368 +
59369 if (atomic_read(&p->real_cred->user->processes) >=
59370 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
59371 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
59372 - p->real_cred->user != INIT_USER)
59373 + if (p->real_cred->user != INIT_USER &&
59374 + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
59375 goto bad_fork_free;
59376 }
59377
59378 @@ -1183,6 +1213,8 @@ static struct task_struct *copy_process(
59379 goto bad_fork_free_pid;
59380 }
59381
59382 + gr_copy_label(p);
59383 +
59384 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
59385 /*
59386 * Clear TID on mm_release()?
59387 @@ -1333,6 +1365,8 @@ bad_fork_cleanup_count:
59388 bad_fork_free:
59389 free_task(p);
59390 fork_out:
59391 + gr_log_forkfail(retval);
59392 +
59393 return ERR_PTR(retval);
59394 }
59395
59396 @@ -1426,6 +1460,8 @@ long do_fork(unsigned long clone_flags,
59397 if (clone_flags & CLONE_PARENT_SETTID)
59398 put_user(nr, parent_tidptr);
59399
59400 + gr_handle_brute_check();
59401 +
59402 if (clone_flags & CLONE_VFORK) {
59403 p->vfork_done = &vfork;
59404 init_completion(&vfork);
59405 @@ -1558,7 +1594,7 @@ static int unshare_fs(unsigned long unsh
59406 return 0;
59407
59408 /* don't need lock here; in the worst case we'll do useless copy */
59409 - if (fs->users == 1)
59410 + if (atomic_read(&fs->users) == 1)
59411 return 0;
59412
59413 *new_fsp = copy_fs_struct(fs);
59414 @@ -1681,7 +1717,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
59415 fs = current->fs;
59416 write_lock(&fs->lock);
59417 current->fs = new_fs;
59418 - if (--fs->users)
59419 + gr_set_chroot_entries(current, &current->fs->root);
59420 + if (atomic_dec_return(&fs->users))
59421 new_fs = NULL;
59422 else
59423 new_fs = fs;
59424 diff -urNp linux-2.6.32.42/kernel/futex.c linux-2.6.32.42/kernel/futex.c
59425 --- linux-2.6.32.42/kernel/futex.c 2011-03-27 14:31:47.000000000 -0400
59426 +++ linux-2.6.32.42/kernel/futex.c 2011-05-16 21:46:57.000000000 -0400
59427 @@ -54,6 +54,7 @@
59428 #include <linux/mount.h>
59429 #include <linux/pagemap.h>
59430 #include <linux/syscalls.h>
59431 +#include <linux/ptrace.h>
59432 #include <linux/signal.h>
59433 #include <linux/module.h>
59434 #include <linux/magic.h>
59435 @@ -221,6 +222,11 @@ get_futex_key(u32 __user *uaddr, int fsh
59436 struct page *page;
59437 int err;
59438
59439 +#ifdef CONFIG_PAX_SEGMEXEC
59440 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
59441 + return -EFAULT;
59442 +#endif
59443 +
59444 /*
59445 * The futex address must be "naturally" aligned.
59446 */
59447 @@ -1789,6 +1795,8 @@ static int futex_wait(u32 __user *uaddr,
59448 struct futex_q q;
59449 int ret;
59450
59451 + pax_track_stack();
59452 +
59453 if (!bitset)
59454 return -EINVAL;
59455
59456 @@ -1841,7 +1849,7 @@ retry:
59457
59458 restart = &current_thread_info()->restart_block;
59459 restart->fn = futex_wait_restart;
59460 - restart->futex.uaddr = (u32 *)uaddr;
59461 + restart->futex.uaddr = uaddr;
59462 restart->futex.val = val;
59463 restart->futex.time = abs_time->tv64;
59464 restart->futex.bitset = bitset;
59465 @@ -2203,6 +2211,8 @@ static int futex_wait_requeue_pi(u32 __u
59466 struct futex_q q;
59467 int res, ret;
59468
59469 + pax_track_stack();
59470 +
59471 if (!bitset)
59472 return -EINVAL;
59473
59474 @@ -2377,7 +2387,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59475 {
59476 struct robust_list_head __user *head;
59477 unsigned long ret;
59478 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
59479 const struct cred *cred = current_cred(), *pcred;
59480 +#endif
59481
59482 if (!futex_cmpxchg_enabled)
59483 return -ENOSYS;
59484 @@ -2393,11 +2405,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59485 if (!p)
59486 goto err_unlock;
59487 ret = -EPERM;
59488 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59489 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
59490 + goto err_unlock;
59491 +#else
59492 pcred = __task_cred(p);
59493 if (cred->euid != pcred->euid &&
59494 cred->euid != pcred->uid &&
59495 !capable(CAP_SYS_PTRACE))
59496 goto err_unlock;
59497 +#endif
59498 head = p->robust_list;
59499 rcu_read_unlock();
59500 }
59501 @@ -2459,7 +2476,7 @@ retry:
59502 */
59503 static inline int fetch_robust_entry(struct robust_list __user **entry,
59504 struct robust_list __user * __user *head,
59505 - int *pi)
59506 + unsigned int *pi)
59507 {
59508 unsigned long uentry;
59509
59510 @@ -2640,6 +2657,7 @@ static int __init futex_init(void)
59511 {
59512 u32 curval;
59513 int i;
59514 + mm_segment_t oldfs;
59515
59516 /*
59517 * This will fail and we want it. Some arch implementations do
59518 @@ -2651,7 +2669,10 @@ static int __init futex_init(void)
59519 * implementation, the non functional ones will return
59520 * -ENOSYS.
59521 */
59522 + oldfs = get_fs();
59523 + set_fs(USER_DS);
59524 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
59525 + set_fs(oldfs);
59526 if (curval == -EFAULT)
59527 futex_cmpxchg_enabled = 1;
59528
59529 diff -urNp linux-2.6.32.42/kernel/futex_compat.c linux-2.6.32.42/kernel/futex_compat.c
59530 --- linux-2.6.32.42/kernel/futex_compat.c 2011-03-27 14:31:47.000000000 -0400
59531 +++ linux-2.6.32.42/kernel/futex_compat.c 2011-04-17 15:56:46.000000000 -0400
59532 @@ -10,6 +10,7 @@
59533 #include <linux/compat.h>
59534 #include <linux/nsproxy.h>
59535 #include <linux/futex.h>
59536 +#include <linux/ptrace.h>
59537
59538 #include <asm/uaccess.h>
59539
59540 @@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
59541 {
59542 struct compat_robust_list_head __user *head;
59543 unsigned long ret;
59544 - const struct cred *cred = current_cred(), *pcred;
59545 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
59546 + const struct cred *cred = current_cred();
59547 + const struct cred *pcred;
59548 +#endif
59549
59550 if (!futex_cmpxchg_enabled)
59551 return -ENOSYS;
59552 @@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
59553 if (!p)
59554 goto err_unlock;
59555 ret = -EPERM;
59556 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59557 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
59558 + goto err_unlock;
59559 +#else
59560 pcred = __task_cred(p);
59561 if (cred->euid != pcred->euid &&
59562 cred->euid != pcred->uid &&
59563 !capable(CAP_SYS_PTRACE))
59564 goto err_unlock;
59565 +#endif
59566 head = p->compat_robust_list;
59567 read_unlock(&tasklist_lock);
59568 }
59569 diff -urNp linux-2.6.32.42/kernel/gcov/base.c linux-2.6.32.42/kernel/gcov/base.c
59570 --- linux-2.6.32.42/kernel/gcov/base.c 2011-03-27 14:31:47.000000000 -0400
59571 +++ linux-2.6.32.42/kernel/gcov/base.c 2011-04-17 15:56:46.000000000 -0400
59572 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
59573 }
59574
59575 #ifdef CONFIG_MODULES
59576 -static inline int within(void *addr, void *start, unsigned long size)
59577 -{
59578 - return ((addr >= start) && (addr < start + size));
59579 -}
59580 -
59581 /* Update list and generate events when modules are unloaded. */
59582 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
59583 void *data)
59584 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
59585 prev = NULL;
59586 /* Remove entries located in module from linked list. */
59587 for (info = gcov_info_head; info; info = info->next) {
59588 - if (within(info, mod->module_core, mod->core_size)) {
59589 + if (within_module_core_rw((unsigned long)info, mod)) {
59590 if (prev)
59591 prev->next = info->next;
59592 else
59593 diff -urNp linux-2.6.32.42/kernel/hrtimer.c linux-2.6.32.42/kernel/hrtimer.c
59594 --- linux-2.6.32.42/kernel/hrtimer.c 2011-03-27 14:31:47.000000000 -0400
59595 +++ linux-2.6.32.42/kernel/hrtimer.c 2011-04-17 15:56:46.000000000 -0400
59596 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
59597 local_irq_restore(flags);
59598 }
59599
59600 -static void run_hrtimer_softirq(struct softirq_action *h)
59601 +static void run_hrtimer_softirq(void)
59602 {
59603 hrtimer_peek_ahead_timers();
59604 }
59605 diff -urNp linux-2.6.32.42/kernel/kallsyms.c linux-2.6.32.42/kernel/kallsyms.c
59606 --- linux-2.6.32.42/kernel/kallsyms.c 2011-03-27 14:31:47.000000000 -0400
59607 +++ linux-2.6.32.42/kernel/kallsyms.c 2011-04-17 15:56:46.000000000 -0400
59608 @@ -11,6 +11,9 @@
59609 * Changed the compression method from stem compression to "table lookup"
59610 * compression (see scripts/kallsyms.c for a more complete description)
59611 */
59612 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59613 +#define __INCLUDED_BY_HIDESYM 1
59614 +#endif
59615 #include <linux/kallsyms.h>
59616 #include <linux/module.h>
59617 #include <linux/init.h>
59618 @@ -51,12 +54,33 @@ extern const unsigned long kallsyms_mark
59619
59620 static inline int is_kernel_inittext(unsigned long addr)
59621 {
59622 + if (system_state != SYSTEM_BOOTING)
59623 + return 0;
59624 +
59625 if (addr >= (unsigned long)_sinittext
59626 && addr <= (unsigned long)_einittext)
59627 return 1;
59628 return 0;
59629 }
59630
59631 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59632 +#ifdef CONFIG_MODULES
59633 +static inline int is_module_text(unsigned long addr)
59634 +{
59635 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
59636 + return 1;
59637 +
59638 + addr = ktla_ktva(addr);
59639 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
59640 +}
59641 +#else
59642 +static inline int is_module_text(unsigned long addr)
59643 +{
59644 + return 0;
59645 +}
59646 +#endif
59647 +#endif
59648 +
59649 static inline int is_kernel_text(unsigned long addr)
59650 {
59651 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
59652 @@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigne
59653
59654 static inline int is_kernel(unsigned long addr)
59655 {
59656 +
59657 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59658 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
59659 + return 1;
59660 +
59661 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
59662 +#else
59663 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
59664 +#endif
59665 +
59666 return 1;
59667 return in_gate_area_no_task(addr);
59668 }
59669
59670 static int is_ksym_addr(unsigned long addr)
59671 {
59672 +
59673 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59674 + if (is_module_text(addr))
59675 + return 0;
59676 +#endif
59677 +
59678 if (all_var)
59679 return is_kernel(addr);
59680
59681 @@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(st
59682
59683 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
59684 {
59685 - iter->name[0] = '\0';
59686 iter->nameoff = get_symbol_offset(new_pos);
59687 iter->pos = new_pos;
59688 }
59689 @@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, vo
59690 {
59691 struct kallsym_iter *iter = m->private;
59692
59693 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59694 + if (current_uid())
59695 + return 0;
59696 +#endif
59697 +
59698 /* Some debugging symbols have no name. Ignore them. */
59699 if (!iter->name[0])
59700 return 0;
59701 @@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *i
59702 struct kallsym_iter *iter;
59703 int ret;
59704
59705 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
59706 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
59707 if (!iter)
59708 return -ENOMEM;
59709 reset_iter(iter, 0);
59710 diff -urNp linux-2.6.32.42/kernel/kgdb.c linux-2.6.32.42/kernel/kgdb.c
59711 --- linux-2.6.32.42/kernel/kgdb.c 2011-04-17 17:00:52.000000000 -0400
59712 +++ linux-2.6.32.42/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
59713 @@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
59714 /* Guard for recursive entry */
59715 static int exception_level;
59716
59717 -static struct kgdb_io *kgdb_io_ops;
59718 +static const struct kgdb_io *kgdb_io_ops;
59719 static DEFINE_SPINLOCK(kgdb_registration_lock);
59720
59721 /* kgdb console driver is loaded */
59722 @@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1)
59723 */
59724 static atomic_t passive_cpu_wait[NR_CPUS];
59725 static atomic_t cpu_in_kgdb[NR_CPUS];
59726 -atomic_t kgdb_setting_breakpoint;
59727 +atomic_unchecked_t kgdb_setting_breakpoint;
59728
59729 struct task_struct *kgdb_usethread;
59730 struct task_struct *kgdb_contthread;
59731 @@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBY
59732 sizeof(unsigned long)];
59733
59734 /* to keep track of the CPU which is doing the single stepping*/
59735 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59736 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59737
59738 /*
59739 * If you are debugging a problem where roundup (the collection of
59740 @@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
59741 return 0;
59742 if (kgdb_connected)
59743 return 1;
59744 - if (atomic_read(&kgdb_setting_breakpoint))
59745 + if (atomic_read_unchecked(&kgdb_setting_breakpoint))
59746 return 1;
59747 if (print_wait)
59748 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
59749 @@ -1426,8 +1426,8 @@ acquirelock:
59750 * instance of the exception handler wanted to come into the
59751 * debugger on a different CPU via a single step
59752 */
59753 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
59754 - atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
59755 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
59756 + atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
59757
59758 atomic_set(&kgdb_active, -1);
59759 touch_softlockup_watchdog();
59760 @@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void
59761 *
59762 * Register it with the KGDB core.
59763 */
59764 -int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
59765 +int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
59766 {
59767 int err;
59768
59769 @@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
59770 *
59771 * Unregister it with the KGDB core.
59772 */
59773 -void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
59774 +void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
59775 {
59776 BUG_ON(kgdb_connected);
59777
59778 @@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod
59779 */
59780 void kgdb_breakpoint(void)
59781 {
59782 - atomic_set(&kgdb_setting_breakpoint, 1);
59783 + atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
59784 wmb(); /* Sync point before breakpoint */
59785 arch_kgdb_breakpoint();
59786 wmb(); /* Sync point after breakpoint */
59787 - atomic_set(&kgdb_setting_breakpoint, 0);
59788 + atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
59789 }
59790 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
59791
59792 diff -urNp linux-2.6.32.42/kernel/kmod.c linux-2.6.32.42/kernel/kmod.c
59793 --- linux-2.6.32.42/kernel/kmod.c 2011-03-27 14:31:47.000000000 -0400
59794 +++ linux-2.6.32.42/kernel/kmod.c 2011-04-17 15:56:46.000000000 -0400
59795 @@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
59796 * If module auto-loading support is disabled then this function
59797 * becomes a no-operation.
59798 */
59799 -int __request_module(bool wait, const char *fmt, ...)
59800 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
59801 {
59802 - va_list args;
59803 char module_name[MODULE_NAME_LEN];
59804 unsigned int max_modprobes;
59805 int ret;
59806 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
59807 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
59808 static char *envp[] = { "HOME=/",
59809 "TERM=linux",
59810 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
59811 @@ -84,12 +83,24 @@ int __request_module(bool wait, const ch
59812 if (ret)
59813 return ret;
59814
59815 - va_start(args, fmt);
59816 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
59817 - va_end(args);
59818 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
59819 if (ret >= MODULE_NAME_LEN)
59820 return -ENAMETOOLONG;
59821
59822 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59823 + if (!current_uid()) {
59824 + /* hack to workaround consolekit/udisks stupidity */
59825 + read_lock(&tasklist_lock);
59826 + if (!strcmp(current->comm, "mount") &&
59827 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
59828 + read_unlock(&tasklist_lock);
59829 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
59830 + return -EPERM;
59831 + }
59832 + read_unlock(&tasklist_lock);
59833 + }
59834 +#endif
59835 +
59836 /* If modprobe needs a service that is in a module, we get a recursive
59837 * loop. Limit the number of running kmod threads to max_threads/2 or
59838 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
59839 @@ -121,6 +132,48 @@ int __request_module(bool wait, const ch
59840 atomic_dec(&kmod_concurrent);
59841 return ret;
59842 }
59843 +
59844 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
59845 +{
59846 + va_list args;
59847 + int ret;
59848 +
59849 + va_start(args, fmt);
59850 + ret = ____request_module(wait, module_param, fmt, args);
59851 + va_end(args);
59852 +
59853 + return ret;
59854 +}
59855 +
59856 +int __request_module(bool wait, const char *fmt, ...)
59857 +{
59858 + va_list args;
59859 + int ret;
59860 +
59861 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59862 + if (current_uid()) {
59863 + char module_param[MODULE_NAME_LEN];
59864 +
59865 + memset(module_param, 0, sizeof(module_param));
59866 +
59867 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
59868 +
59869 + va_start(args, fmt);
59870 + ret = ____request_module(wait, module_param, fmt, args);
59871 + va_end(args);
59872 +
59873 + return ret;
59874 + }
59875 +#endif
59876 +
59877 + va_start(args, fmt);
59878 + ret = ____request_module(wait, NULL, fmt, args);
59879 + va_end(args);
59880 +
59881 + return ret;
59882 +}
59883 +
59884 +
59885 EXPORT_SYMBOL(__request_module);
59886 #endif /* CONFIG_MODULES */
59887
59888 diff -urNp linux-2.6.32.42/kernel/kprobes.c linux-2.6.32.42/kernel/kprobes.c
59889 --- linux-2.6.32.42/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
59890 +++ linux-2.6.32.42/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
59891 @@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
59892 * kernel image and loaded module images reside. This is required
59893 * so x86_64 can correctly handle the %rip-relative fixups.
59894 */
59895 - kip->insns = module_alloc(PAGE_SIZE);
59896 + kip->insns = module_alloc_exec(PAGE_SIZE);
59897 if (!kip->insns) {
59898 kfree(kip);
59899 return NULL;
59900 @@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(st
59901 */
59902 if (!list_is_singular(&kprobe_insn_pages)) {
59903 list_del(&kip->list);
59904 - module_free(NULL, kip->insns);
59905 + module_free_exec(NULL, kip->insns);
59906 kfree(kip);
59907 }
59908 return 1;
59909 @@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
59910 {
59911 int i, err = 0;
59912 unsigned long offset = 0, size = 0;
59913 - char *modname, namebuf[128];
59914 + char *modname, namebuf[KSYM_NAME_LEN];
59915 const char *symbol_name;
59916 void *addr;
59917 struct kprobe_blackpoint *kb;
59918 @@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(st
59919 const char *sym = NULL;
59920 unsigned int i = *(loff_t *) v;
59921 unsigned long offset = 0;
59922 - char *modname, namebuf[128];
59923 + char *modname, namebuf[KSYM_NAME_LEN];
59924
59925 head = &kprobe_table[i];
59926 preempt_disable();
59927 diff -urNp linux-2.6.32.42/kernel/lockdep.c linux-2.6.32.42/kernel/lockdep.c
59928 --- linux-2.6.32.42/kernel/lockdep.c 2011-06-25 12:55:35.000000000 -0400
59929 +++ linux-2.6.32.42/kernel/lockdep.c 2011-06-25 12:56:37.000000000 -0400
59930 @@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_t
59931 /*
59932 * Various lockdep statistics:
59933 */
59934 -atomic_t chain_lookup_hits;
59935 -atomic_t chain_lookup_misses;
59936 -atomic_t hardirqs_on_events;
59937 -atomic_t hardirqs_off_events;
59938 -atomic_t redundant_hardirqs_on;
59939 -atomic_t redundant_hardirqs_off;
59940 -atomic_t softirqs_on_events;
59941 -atomic_t softirqs_off_events;
59942 -atomic_t redundant_softirqs_on;
59943 -atomic_t redundant_softirqs_off;
59944 -atomic_t nr_unused_locks;
59945 -atomic_t nr_cyclic_checks;
59946 -atomic_t nr_find_usage_forwards_checks;
59947 -atomic_t nr_find_usage_backwards_checks;
59948 +atomic_unchecked_t chain_lookup_hits;
59949 +atomic_unchecked_t chain_lookup_misses;
59950 +atomic_unchecked_t hardirqs_on_events;
59951 +atomic_unchecked_t hardirqs_off_events;
59952 +atomic_unchecked_t redundant_hardirqs_on;
59953 +atomic_unchecked_t redundant_hardirqs_off;
59954 +atomic_unchecked_t softirqs_on_events;
59955 +atomic_unchecked_t softirqs_off_events;
59956 +atomic_unchecked_t redundant_softirqs_on;
59957 +atomic_unchecked_t redundant_softirqs_off;
59958 +atomic_unchecked_t nr_unused_locks;
59959 +atomic_unchecked_t nr_cyclic_checks;
59960 +atomic_unchecked_t nr_find_usage_forwards_checks;
59961 +atomic_unchecked_t nr_find_usage_backwards_checks;
59962 #endif
59963
59964 /*
59965 @@ -577,6 +577,10 @@ static int static_obj(void *obj)
59966 int i;
59967 #endif
59968
59969 +#ifdef CONFIG_PAX_KERNEXEC
59970 + start = ktla_ktva(start);
59971 +#endif
59972 +
59973 /*
59974 * static variable?
59975 */
59976 @@ -592,8 +596,7 @@ static int static_obj(void *obj)
59977 */
59978 for_each_possible_cpu(i) {
59979 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
59980 - end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
59981 - + per_cpu_offset(i);
59982 + end = start + PERCPU_ENOUGH_ROOM;
59983
59984 if ((addr >= start) && (addr < end))
59985 return 1;
59986 @@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *
59987 if (!static_obj(lock->key)) {
59988 debug_locks_off();
59989 printk("INFO: trying to register non-static key.\n");
59990 + printk("lock:%pS key:%pS.\n", lock, lock->key);
59991 printk("the code is fine but needs lockdep annotation.\n");
59992 printk("turning off the locking correctness validator.\n");
59993 dump_stack();
59994 @@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep
59995 if (!class)
59996 return 0;
59997 }
59998 - debug_atomic_inc((atomic_t *)&class->ops);
59999 + debug_atomic_inc((atomic_unchecked_t *)&class->ops);
60000 if (very_verbose(class)) {
60001 printk("\nacquire class [%p] %s", class->key, class->name);
60002 if (class->name_version > 1)
60003 diff -urNp linux-2.6.32.42/kernel/lockdep_internals.h linux-2.6.32.42/kernel/lockdep_internals.h
60004 --- linux-2.6.32.42/kernel/lockdep_internals.h 2011-03-27 14:31:47.000000000 -0400
60005 +++ linux-2.6.32.42/kernel/lockdep_internals.h 2011-04-17 15:56:46.000000000 -0400
60006 @@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_
60007 /*
60008 * Various lockdep statistics:
60009 */
60010 -extern atomic_t chain_lookup_hits;
60011 -extern atomic_t chain_lookup_misses;
60012 -extern atomic_t hardirqs_on_events;
60013 -extern atomic_t hardirqs_off_events;
60014 -extern atomic_t redundant_hardirqs_on;
60015 -extern atomic_t redundant_hardirqs_off;
60016 -extern atomic_t softirqs_on_events;
60017 -extern atomic_t softirqs_off_events;
60018 -extern atomic_t redundant_softirqs_on;
60019 -extern atomic_t redundant_softirqs_off;
60020 -extern atomic_t nr_unused_locks;
60021 -extern atomic_t nr_cyclic_checks;
60022 -extern atomic_t nr_cyclic_check_recursions;
60023 -extern atomic_t nr_find_usage_forwards_checks;
60024 -extern atomic_t nr_find_usage_forwards_recursions;
60025 -extern atomic_t nr_find_usage_backwards_checks;
60026 -extern atomic_t nr_find_usage_backwards_recursions;
60027 -# define debug_atomic_inc(ptr) atomic_inc(ptr)
60028 -# define debug_atomic_dec(ptr) atomic_dec(ptr)
60029 -# define debug_atomic_read(ptr) atomic_read(ptr)
60030 +extern atomic_unchecked_t chain_lookup_hits;
60031 +extern atomic_unchecked_t chain_lookup_misses;
60032 +extern atomic_unchecked_t hardirqs_on_events;
60033 +extern atomic_unchecked_t hardirqs_off_events;
60034 +extern atomic_unchecked_t redundant_hardirqs_on;
60035 +extern atomic_unchecked_t redundant_hardirqs_off;
60036 +extern atomic_unchecked_t softirqs_on_events;
60037 +extern atomic_unchecked_t softirqs_off_events;
60038 +extern atomic_unchecked_t redundant_softirqs_on;
60039 +extern atomic_unchecked_t redundant_softirqs_off;
60040 +extern atomic_unchecked_t nr_unused_locks;
60041 +extern atomic_unchecked_t nr_cyclic_checks;
60042 +extern atomic_unchecked_t nr_cyclic_check_recursions;
60043 +extern atomic_unchecked_t nr_find_usage_forwards_checks;
60044 +extern atomic_unchecked_t nr_find_usage_forwards_recursions;
60045 +extern atomic_unchecked_t nr_find_usage_backwards_checks;
60046 +extern atomic_unchecked_t nr_find_usage_backwards_recursions;
60047 +# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
60048 +# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
60049 +# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
60050 #else
60051 # define debug_atomic_inc(ptr) do { } while (0)
60052 # define debug_atomic_dec(ptr) do { } while (0)
60053 diff -urNp linux-2.6.32.42/kernel/lockdep_proc.c linux-2.6.32.42/kernel/lockdep_proc.c
60054 --- linux-2.6.32.42/kernel/lockdep_proc.c 2011-03-27 14:31:47.000000000 -0400
60055 +++ linux-2.6.32.42/kernel/lockdep_proc.c 2011-04-17 15:56:46.000000000 -0400
60056 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
60057
60058 static void print_name(struct seq_file *m, struct lock_class *class)
60059 {
60060 - char str[128];
60061 + char str[KSYM_NAME_LEN];
60062 const char *name = class->name;
60063
60064 if (!name) {
60065 diff -urNp linux-2.6.32.42/kernel/module.c linux-2.6.32.42/kernel/module.c
60066 --- linux-2.6.32.42/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
60067 +++ linux-2.6.32.42/kernel/module.c 2011-04-29 18:52:40.000000000 -0400
60068 @@ -55,6 +55,7 @@
60069 #include <linux/async.h>
60070 #include <linux/percpu.h>
60071 #include <linux/kmemleak.h>
60072 +#include <linux/grsecurity.h>
60073
60074 #define CREATE_TRACE_POINTS
60075 #include <trace/events/module.h>
60076 @@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
60077 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
60078
60079 /* Bounds of module allocation, for speeding __module_address */
60080 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
60081 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
60082 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
60083
60084 int register_module_notifier(struct notifier_block * nb)
60085 {
60086 @@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct
60087 return true;
60088
60089 list_for_each_entry_rcu(mod, &modules, list) {
60090 - struct symsearch arr[] = {
60091 + struct symsearch modarr[] = {
60092 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
60093 NOT_GPL_ONLY, false },
60094 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
60095 @@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct
60096 #endif
60097 };
60098
60099 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
60100 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
60101 return true;
60102 }
60103 return false;
60104 @@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned lo
60105 void *ptr;
60106 int cpu;
60107
60108 - if (align > PAGE_SIZE) {
60109 + if (align-1 >= PAGE_SIZE) {
60110 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
60111 name, align, PAGE_SIZE);
60112 align = PAGE_SIZE;
60113 @@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resol
60114 * /sys/module/foo/sections stuff
60115 * J. Corbet <corbet@lwn.net>
60116 */
60117 -#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
60118 +#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60119
60120 static inline bool sect_empty(const Elf_Shdr *sect)
60121 {
60122 @@ -1545,7 +1547,8 @@ static void free_module(struct module *m
60123 destroy_params(mod->kp, mod->num_kp);
60124
60125 /* This may be NULL, but that's OK */
60126 - module_free(mod, mod->module_init);
60127 + module_free(mod, mod->module_init_rw);
60128 + module_free_exec(mod, mod->module_init_rx);
60129 kfree(mod->args);
60130 if (mod->percpu)
60131 percpu_modfree(mod->percpu);
60132 @@ -1554,10 +1557,12 @@ static void free_module(struct module *m
60133 percpu_modfree(mod->refptr);
60134 #endif
60135 /* Free lock-classes: */
60136 - lockdep_free_key_range(mod->module_core, mod->core_size);
60137 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
60138 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
60139
60140 /* Finally, free the core (containing the module structure) */
60141 - module_free(mod, mod->module_core);
60142 + module_free_exec(mod, mod->module_core_rx);
60143 + module_free(mod, mod->module_core_rw);
60144
60145 #ifdef CONFIG_MPU
60146 update_protections(current->mm);
60147 @@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *se
60148 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
60149 int ret = 0;
60150 const struct kernel_symbol *ksym;
60151 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60152 + int is_fs_load = 0;
60153 + int register_filesystem_found = 0;
60154 + char *p;
60155 +
60156 + p = strstr(mod->args, "grsec_modharden_fs");
60157 +
60158 + if (p) {
60159 + char *endptr = p + strlen("grsec_modharden_fs");
60160 + /* copy \0 as well */
60161 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
60162 + is_fs_load = 1;
60163 + }
60164 +#endif
60165 +
60166
60167 for (i = 1; i < n; i++) {
60168 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60169 + const char *name = strtab + sym[i].st_name;
60170 +
60171 + /* it's a real shame this will never get ripped and copied
60172 + upstream! ;(
60173 + */
60174 + if (is_fs_load && !strcmp(name, "register_filesystem"))
60175 + register_filesystem_found = 1;
60176 +#endif
60177 switch (sym[i].st_shndx) {
60178 case SHN_COMMON:
60179 /* We compiled with -fno-common. These are not
60180 @@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *se
60181 strtab + sym[i].st_name, mod);
60182 /* Ok if resolved. */
60183 if (ksym) {
60184 + pax_open_kernel();
60185 sym[i].st_value = ksym->value;
60186 + pax_close_kernel();
60187 break;
60188 }
60189
60190 @@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *se
60191 secbase = (unsigned long)mod->percpu;
60192 else
60193 secbase = sechdrs[sym[i].st_shndx].sh_addr;
60194 + pax_open_kernel();
60195 sym[i].st_value += secbase;
60196 + pax_close_kernel();
60197 break;
60198 }
60199 }
60200
60201 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60202 + if (is_fs_load && !register_filesystem_found) {
60203 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
60204 + ret = -EPERM;
60205 + }
60206 +#endif
60207 +
60208 return ret;
60209 }
60210
60211 @@ -1731,11 +1771,12 @@ static void layout_sections(struct modul
60212 || s->sh_entsize != ~0UL
60213 || strstarts(secstrings + s->sh_name, ".init"))
60214 continue;
60215 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
60216 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60217 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
60218 + else
60219 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
60220 DEBUGP("\t%s\n", secstrings + s->sh_name);
60221 }
60222 - if (m == 0)
60223 - mod->core_text_size = mod->core_size;
60224 }
60225
60226 DEBUGP("Init section allocation order:\n");
60227 @@ -1748,12 +1789,13 @@ static void layout_sections(struct modul
60228 || s->sh_entsize != ~0UL
60229 || !strstarts(secstrings + s->sh_name, ".init"))
60230 continue;
60231 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
60232 - | INIT_OFFSET_MASK);
60233 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60234 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
60235 + else
60236 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
60237 + s->sh_entsize |= INIT_OFFSET_MASK;
60238 DEBUGP("\t%s\n", secstrings + s->sh_name);
60239 }
60240 - if (m == 0)
60241 - mod->init_text_size = mod->init_size;
60242 }
60243 }
60244
60245 @@ -1857,9 +1899,8 @@ static int is_exported(const char *name,
60246
60247 /* As per nm */
60248 static char elf_type(const Elf_Sym *sym,
60249 - Elf_Shdr *sechdrs,
60250 - const char *secstrings,
60251 - struct module *mod)
60252 + const Elf_Shdr *sechdrs,
60253 + const char *secstrings)
60254 {
60255 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
60256 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
60257 @@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struc
60258
60259 /* Put symbol section at end of init part of module. */
60260 symsect->sh_flags |= SHF_ALLOC;
60261 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
60262 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
60263 symindex) | INIT_OFFSET_MASK;
60264 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
60265
60266 @@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struc
60267 }
60268
60269 /* Append room for core symbols at end of core part. */
60270 - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
60271 - mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
60272 + symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
60273 + mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
60274
60275 /* Put string table section at end of init part of module. */
60276 strsect->sh_flags |= SHF_ALLOC;
60277 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
60278 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
60279 strindex) | INIT_OFFSET_MASK;
60280 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
60281
60282 /* Append room for core symbols' strings at end of core part. */
60283 - *pstroffs = mod->core_size;
60284 + *pstroffs = mod->core_size_rx;
60285 __set_bit(0, strmap);
60286 - mod->core_size += bitmap_weight(strmap, strsect->sh_size);
60287 + mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
60288
60289 return symoffs;
60290 }
60291 @@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *
60292 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
60293 mod->strtab = (void *)sechdrs[strindex].sh_addr;
60294
60295 + pax_open_kernel();
60296 +
60297 /* Set types up while we still have access to sections. */
60298 for (i = 0; i < mod->num_symtab; i++)
60299 mod->symtab[i].st_info
60300 - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
60301 + = elf_type(&mod->symtab[i], sechdrs, secstrings);
60302
60303 - mod->core_symtab = dst = mod->module_core + symoffs;
60304 + mod->core_symtab = dst = mod->module_core_rx + symoffs;
60305 src = mod->symtab;
60306 *dst = *src;
60307 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
60308 @@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *
60309 }
60310 mod->core_num_syms = ndst;
60311
60312 - mod->core_strtab = s = mod->module_core + stroffs;
60313 + mod->core_strtab = s = mod->module_core_rx + stroffs;
60314 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
60315 if (test_bit(i, strmap))
60316 *++s = mod->strtab[i];
60317 +
60318 + pax_close_kernel();
60319 }
60320 #else
60321 static inline unsigned long layout_symtab(struct module *mod,
60322 @@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _
60323 #endif
60324 }
60325
60326 -static void *module_alloc_update_bounds(unsigned long size)
60327 +static void *module_alloc_update_bounds_rw(unsigned long size)
60328 {
60329 void *ret = module_alloc(size);
60330
60331 if (ret) {
60332 /* Update module bounds. */
60333 - if ((unsigned long)ret < module_addr_min)
60334 - module_addr_min = (unsigned long)ret;
60335 - if ((unsigned long)ret + size > module_addr_max)
60336 - module_addr_max = (unsigned long)ret + size;
60337 + if ((unsigned long)ret < module_addr_min_rw)
60338 + module_addr_min_rw = (unsigned long)ret;
60339 + if ((unsigned long)ret + size > module_addr_max_rw)
60340 + module_addr_max_rw = (unsigned long)ret + size;
60341 + }
60342 + return ret;
60343 +}
60344 +
60345 +static void *module_alloc_update_bounds_rx(unsigned long size)
60346 +{
60347 + void *ret = module_alloc_exec(size);
60348 +
60349 + if (ret) {
60350 + /* Update module bounds. */
60351 + if ((unsigned long)ret < module_addr_min_rx)
60352 + module_addr_min_rx = (unsigned long)ret;
60353 + if ((unsigned long)ret + size > module_addr_max_rx)
60354 + module_addr_max_rx = (unsigned long)ret + size;
60355 }
60356 return ret;
60357 }
60358 @@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct
60359 unsigned int i;
60360
60361 /* only scan the sections containing data */
60362 - kmemleak_scan_area(mod->module_core, (unsigned long)mod -
60363 - (unsigned long)mod->module_core,
60364 + kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
60365 + (unsigned long)mod->module_core_rw,
60366 sizeof(struct module), GFP_KERNEL);
60367
60368 for (i = 1; i < hdr->e_shnum; i++) {
60369 @@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct
60370 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
60371 continue;
60372
60373 - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
60374 - (unsigned long)mod->module_core,
60375 + kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
60376 + (unsigned long)mod->module_core_rw,
60377 sechdrs[i].sh_size, GFP_KERNEL);
60378 }
60379 }
60380 @@ -2263,7 +2322,7 @@ static noinline struct module *load_modu
60381 secstrings, &stroffs, strmap);
60382
60383 /* Do the allocs. */
60384 - ptr = module_alloc_update_bounds(mod->core_size);
60385 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
60386 /*
60387 * The pointer to this block is stored in the module structure
60388 * which is inside the block. Just mark it as not being a
60389 @@ -2274,23 +2333,47 @@ static noinline struct module *load_modu
60390 err = -ENOMEM;
60391 goto free_percpu;
60392 }
60393 - memset(ptr, 0, mod->core_size);
60394 - mod->module_core = ptr;
60395 + memset(ptr, 0, mod->core_size_rw);
60396 + mod->module_core_rw = ptr;
60397
60398 - ptr = module_alloc_update_bounds(mod->init_size);
60399 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
60400 /*
60401 * The pointer to this block is stored in the module structure
60402 * which is inside the block. This block doesn't need to be
60403 * scanned as it contains data and code that will be freed
60404 * after the module is initialized.
60405 */
60406 - kmemleak_ignore(ptr);
60407 - if (!ptr && mod->init_size) {
60408 + kmemleak_not_leak(ptr);
60409 + if (!ptr && mod->init_size_rw) {
60410 + err = -ENOMEM;
60411 + goto free_core_rw;
60412 + }
60413 + memset(ptr, 0, mod->init_size_rw);
60414 + mod->module_init_rw = ptr;
60415 +
60416 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
60417 + kmemleak_not_leak(ptr);
60418 + if (!ptr) {
60419 err = -ENOMEM;
60420 - goto free_core;
60421 + goto free_init_rw;
60422 }
60423 - memset(ptr, 0, mod->init_size);
60424 - mod->module_init = ptr;
60425 +
60426 + pax_open_kernel();
60427 + memset(ptr, 0, mod->core_size_rx);
60428 + pax_close_kernel();
60429 + mod->module_core_rx = ptr;
60430 +
60431 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
60432 + kmemleak_not_leak(ptr);
60433 + if (!ptr && mod->init_size_rx) {
60434 + err = -ENOMEM;
60435 + goto free_core_rx;
60436 + }
60437 +
60438 + pax_open_kernel();
60439 + memset(ptr, 0, mod->init_size_rx);
60440 + pax_close_kernel();
60441 + mod->module_init_rx = ptr;
60442
60443 /* Transfer each section which specifies SHF_ALLOC */
60444 DEBUGP("final section addresses:\n");
60445 @@ -2300,17 +2383,45 @@ static noinline struct module *load_modu
60446 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
60447 continue;
60448
60449 - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
60450 - dest = mod->module_init
60451 - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
60452 - else
60453 - dest = mod->module_core + sechdrs[i].sh_entsize;
60454 + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
60455 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
60456 + dest = mod->module_init_rw
60457 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
60458 + else
60459 + dest = mod->module_init_rx
60460 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
60461 + } else {
60462 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
60463 + dest = mod->module_core_rw + sechdrs[i].sh_entsize;
60464 + else
60465 + dest = mod->module_core_rx + sechdrs[i].sh_entsize;
60466 + }
60467 +
60468 + if (sechdrs[i].sh_type != SHT_NOBITS) {
60469
60470 - if (sechdrs[i].sh_type != SHT_NOBITS)
60471 - memcpy(dest, (void *)sechdrs[i].sh_addr,
60472 - sechdrs[i].sh_size);
60473 +#ifdef CONFIG_PAX_KERNEXEC
60474 +#ifdef CONFIG_X86_64
60475 + if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
60476 + set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
60477 +#endif
60478 + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
60479 + pax_open_kernel();
60480 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
60481 + pax_close_kernel();
60482 + } else
60483 +#endif
60484 +
60485 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
60486 + }
60487 /* Update sh_addr to point to copy in image. */
60488 - sechdrs[i].sh_addr = (unsigned long)dest;
60489 +
60490 +#ifdef CONFIG_PAX_KERNEXEC
60491 + if (sechdrs[i].sh_flags & SHF_EXECINSTR)
60492 + sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
60493 + else
60494 +#endif
60495 +
60496 + sechdrs[i].sh_addr = (unsigned long)dest;
60497 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
60498 }
60499 /* Module has been moved. */
60500 @@ -2322,7 +2433,7 @@ static noinline struct module *load_modu
60501 mod->name);
60502 if (!mod->refptr) {
60503 err = -ENOMEM;
60504 - goto free_init;
60505 + goto free_init_rx;
60506 }
60507 #endif
60508 /* Now we've moved module, initialize linked lists, etc. */
60509 @@ -2351,6 +2462,31 @@ static noinline struct module *load_modu
60510 /* Set up MODINFO_ATTR fields */
60511 setup_modinfo(mod, sechdrs, infoindex);
60512
60513 + mod->args = args;
60514 +
60515 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60516 + {
60517 + char *p, *p2;
60518 +
60519 + if (strstr(mod->args, "grsec_modharden_netdev")) {
60520 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
60521 + err = -EPERM;
60522 + goto cleanup;
60523 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
60524 + p += strlen("grsec_modharden_normal");
60525 + p2 = strstr(p, "_");
60526 + if (p2) {
60527 + *p2 = '\0';
60528 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
60529 + *p2 = '_';
60530 + }
60531 + err = -EPERM;
60532 + goto cleanup;
60533 + }
60534 + }
60535 +#endif
60536 +
60537 +
60538 /* Fix up syms, so that st_value is a pointer to location. */
60539 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
60540 mod);
60541 @@ -2431,8 +2567,8 @@ static noinline struct module *load_modu
60542
60543 /* Now do relocations. */
60544 for (i = 1; i < hdr->e_shnum; i++) {
60545 - const char *strtab = (char *)sechdrs[strindex].sh_addr;
60546 unsigned int info = sechdrs[i].sh_info;
60547 + strtab = (char *)sechdrs[strindex].sh_addr;
60548
60549 /* Not a valid relocation section? */
60550 if (info >= hdr->e_shnum)
60551 @@ -2493,16 +2629,15 @@ static noinline struct module *load_modu
60552 * Do it before processing of module parameters, so the module
60553 * can provide parameter accessor functions of its own.
60554 */
60555 - if (mod->module_init)
60556 - flush_icache_range((unsigned long)mod->module_init,
60557 - (unsigned long)mod->module_init
60558 - + mod->init_size);
60559 - flush_icache_range((unsigned long)mod->module_core,
60560 - (unsigned long)mod->module_core + mod->core_size);
60561 + if (mod->module_init_rx)
60562 + flush_icache_range((unsigned long)mod->module_init_rx,
60563 + (unsigned long)mod->module_init_rx
60564 + + mod->init_size_rx);
60565 + flush_icache_range((unsigned long)mod->module_core_rx,
60566 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
60567
60568 set_fs(old_fs);
60569
60570 - mod->args = args;
60571 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
60572 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
60573 mod->name);
60574 @@ -2546,12 +2681,16 @@ static noinline struct module *load_modu
60575 free_unload:
60576 module_unload_free(mod);
60577 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
60578 + free_init_rx:
60579 percpu_modfree(mod->refptr);
60580 - free_init:
60581 #endif
60582 - module_free(mod, mod->module_init);
60583 - free_core:
60584 - module_free(mod, mod->module_core);
60585 + module_free_exec(mod, mod->module_init_rx);
60586 + free_core_rx:
60587 + module_free_exec(mod, mod->module_core_rx);
60588 + free_init_rw:
60589 + module_free(mod, mod->module_init_rw);
60590 + free_core_rw:
60591 + module_free(mod, mod->module_core_rw);
60592 /* mod will be freed with core. Don't access it beyond this line! */
60593 free_percpu:
60594 if (percpu)
60595 @@ -2653,10 +2792,12 @@ SYSCALL_DEFINE3(init_module, void __user
60596 mod->symtab = mod->core_symtab;
60597 mod->strtab = mod->core_strtab;
60598 #endif
60599 - module_free(mod, mod->module_init);
60600 - mod->module_init = NULL;
60601 - mod->init_size = 0;
60602 - mod->init_text_size = 0;
60603 + module_free(mod, mod->module_init_rw);
60604 + module_free_exec(mod, mod->module_init_rx);
60605 + mod->module_init_rw = NULL;
60606 + mod->module_init_rx = NULL;
60607 + mod->init_size_rw = 0;
60608 + mod->init_size_rx = 0;
60609 mutex_unlock(&module_mutex);
60610
60611 return 0;
60612 @@ -2687,10 +2828,16 @@ static const char *get_ksymbol(struct mo
60613 unsigned long nextval;
60614
60615 /* At worse, next value is at end of module */
60616 - if (within_module_init(addr, mod))
60617 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
60618 + if (within_module_init_rx(addr, mod))
60619 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
60620 + else if (within_module_init_rw(addr, mod))
60621 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
60622 + else if (within_module_core_rx(addr, mod))
60623 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
60624 + else if (within_module_core_rw(addr, mod))
60625 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
60626 else
60627 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
60628 + return NULL;
60629
60630 /* Scan for closest preceeding symbol, and next symbol. (ELF
60631 starts real symbols at 1). */
60632 @@ -2936,7 +3083,7 @@ static int m_show(struct seq_file *m, vo
60633 char buf[8];
60634
60635 seq_printf(m, "%s %u",
60636 - mod->name, mod->init_size + mod->core_size);
60637 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
60638 print_unload_info(m, mod);
60639
60640 /* Informative for users. */
60641 @@ -2945,7 +3092,7 @@ static int m_show(struct seq_file *m, vo
60642 mod->state == MODULE_STATE_COMING ? "Loading":
60643 "Live");
60644 /* Used by oprofile and other similar tools. */
60645 - seq_printf(m, " 0x%p", mod->module_core);
60646 + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
60647
60648 /* Taints info */
60649 if (mod->taints)
60650 @@ -2981,7 +3128,17 @@ static const struct file_operations proc
60651
60652 static int __init proc_modules_init(void)
60653 {
60654 +#ifndef CONFIG_GRKERNSEC_HIDESYM
60655 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60656 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
60657 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60658 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
60659 +#else
60660 proc_create("modules", 0, NULL, &proc_modules_operations);
60661 +#endif
60662 +#else
60663 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
60664 +#endif
60665 return 0;
60666 }
60667 module_init(proc_modules_init);
60668 @@ -3040,12 +3197,12 @@ struct module *__module_address(unsigned
60669 {
60670 struct module *mod;
60671
60672 - if (addr < module_addr_min || addr > module_addr_max)
60673 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
60674 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
60675 return NULL;
60676
60677 list_for_each_entry_rcu(mod, &modules, list)
60678 - if (within_module_core(addr, mod)
60679 - || within_module_init(addr, mod))
60680 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
60681 return mod;
60682 return NULL;
60683 }
60684 @@ -3079,11 +3236,20 @@ bool is_module_text_address(unsigned lon
60685 */
60686 struct module *__module_text_address(unsigned long addr)
60687 {
60688 - struct module *mod = __module_address(addr);
60689 + struct module *mod;
60690 +
60691 +#ifdef CONFIG_X86_32
60692 + addr = ktla_ktva(addr);
60693 +#endif
60694 +
60695 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
60696 + return NULL;
60697 +
60698 + mod = __module_address(addr);
60699 +
60700 if (mod) {
60701 /* Make sure it's within the text section. */
60702 - if (!within(addr, mod->module_init, mod->init_text_size)
60703 - && !within(addr, mod->module_core, mod->core_text_size))
60704 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
60705 mod = NULL;
60706 }
60707 return mod;
60708 diff -urNp linux-2.6.32.42/kernel/mutex.c linux-2.6.32.42/kernel/mutex.c
60709 --- linux-2.6.32.42/kernel/mutex.c 2011-03-27 14:31:47.000000000 -0400
60710 +++ linux-2.6.32.42/kernel/mutex.c 2011-04-17 15:56:46.000000000 -0400
60711 @@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock,
60712 */
60713
60714 for (;;) {
60715 - struct thread_info *owner;
60716 + struct task_struct *owner;
60717
60718 /*
60719 * If we own the BKL, then don't spin. The owner of
60720 @@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock,
60721 spin_lock_mutex(&lock->wait_lock, flags);
60722
60723 debug_mutex_lock_common(lock, &waiter);
60724 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
60725 + debug_mutex_add_waiter(lock, &waiter, task);
60726
60727 /* add waiting tasks to the end of the waitqueue (FIFO): */
60728 list_add_tail(&waiter.list, &lock->wait_list);
60729 @@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock,
60730 * TASK_UNINTERRUPTIBLE case.)
60731 */
60732 if (unlikely(signal_pending_state(state, task))) {
60733 - mutex_remove_waiter(lock, &waiter,
60734 - task_thread_info(task));
60735 + mutex_remove_waiter(lock, &waiter, task);
60736 mutex_release(&lock->dep_map, 1, ip);
60737 spin_unlock_mutex(&lock->wait_lock, flags);
60738
60739 @@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock,
60740 done:
60741 lock_acquired(&lock->dep_map, ip);
60742 /* got the lock - rejoice! */
60743 - mutex_remove_waiter(lock, &waiter, current_thread_info());
60744 + mutex_remove_waiter(lock, &waiter, task);
60745 mutex_set_owner(lock);
60746
60747 /* set it to 0 if there are no waiters left: */
60748 diff -urNp linux-2.6.32.42/kernel/mutex-debug.c linux-2.6.32.42/kernel/mutex-debug.c
60749 --- linux-2.6.32.42/kernel/mutex-debug.c 2011-03-27 14:31:47.000000000 -0400
60750 +++ linux-2.6.32.42/kernel/mutex-debug.c 2011-04-17 15:56:46.000000000 -0400
60751 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
60752 }
60753
60754 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60755 - struct thread_info *ti)
60756 + struct task_struct *task)
60757 {
60758 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
60759
60760 /* Mark the current thread as blocked on the lock: */
60761 - ti->task->blocked_on = waiter;
60762 + task->blocked_on = waiter;
60763 }
60764
60765 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60766 - struct thread_info *ti)
60767 + struct task_struct *task)
60768 {
60769 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
60770 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
60771 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
60772 - ti->task->blocked_on = NULL;
60773 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
60774 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
60775 + task->blocked_on = NULL;
60776
60777 list_del_init(&waiter->list);
60778 waiter->task = NULL;
60779 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
60780 return;
60781
60782 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
60783 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
60784 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
60785 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
60786 mutex_clear_owner(lock);
60787 }
60788 diff -urNp linux-2.6.32.42/kernel/mutex-debug.h linux-2.6.32.42/kernel/mutex-debug.h
60789 --- linux-2.6.32.42/kernel/mutex-debug.h 2011-03-27 14:31:47.000000000 -0400
60790 +++ linux-2.6.32.42/kernel/mutex-debug.h 2011-04-17 15:56:46.000000000 -0400
60791 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
60792 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
60793 extern void debug_mutex_add_waiter(struct mutex *lock,
60794 struct mutex_waiter *waiter,
60795 - struct thread_info *ti);
60796 + struct task_struct *task);
60797 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60798 - struct thread_info *ti);
60799 + struct task_struct *task);
60800 extern void debug_mutex_unlock(struct mutex *lock);
60801 extern void debug_mutex_init(struct mutex *lock, const char *name,
60802 struct lock_class_key *key);
60803
60804 static inline void mutex_set_owner(struct mutex *lock)
60805 {
60806 - lock->owner = current_thread_info();
60807 + lock->owner = current;
60808 }
60809
60810 static inline void mutex_clear_owner(struct mutex *lock)
60811 diff -urNp linux-2.6.32.42/kernel/mutex.h linux-2.6.32.42/kernel/mutex.h
60812 --- linux-2.6.32.42/kernel/mutex.h 2011-03-27 14:31:47.000000000 -0400
60813 +++ linux-2.6.32.42/kernel/mutex.h 2011-04-17 15:56:46.000000000 -0400
60814 @@ -19,7 +19,7 @@
60815 #ifdef CONFIG_SMP
60816 static inline void mutex_set_owner(struct mutex *lock)
60817 {
60818 - lock->owner = current_thread_info();
60819 + lock->owner = current;
60820 }
60821
60822 static inline void mutex_clear_owner(struct mutex *lock)
60823 diff -urNp linux-2.6.32.42/kernel/panic.c linux-2.6.32.42/kernel/panic.c
60824 --- linux-2.6.32.42/kernel/panic.c 2011-03-27 14:31:47.000000000 -0400
60825 +++ linux-2.6.32.42/kernel/panic.c 2011-04-17 15:56:46.000000000 -0400
60826 @@ -352,7 +352,7 @@ static void warn_slowpath_common(const c
60827 const char *board;
60828
60829 printk(KERN_WARNING "------------[ cut here ]------------\n");
60830 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
60831 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
60832 board = dmi_get_system_info(DMI_PRODUCT_NAME);
60833 if (board)
60834 printk(KERN_WARNING "Hardware name: %s\n", board);
60835 @@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
60836 */
60837 void __stack_chk_fail(void)
60838 {
60839 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
60840 + dump_stack();
60841 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
60842 __builtin_return_address(0));
60843 }
60844 EXPORT_SYMBOL(__stack_chk_fail);
60845 diff -urNp linux-2.6.32.42/kernel/params.c linux-2.6.32.42/kernel/params.c
60846 --- linux-2.6.32.42/kernel/params.c 2011-03-27 14:31:47.000000000 -0400
60847 +++ linux-2.6.32.42/kernel/params.c 2011-04-17 15:56:46.000000000 -0400
60848 @@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct
60849 return ret;
60850 }
60851
60852 -static struct sysfs_ops module_sysfs_ops = {
60853 +static const struct sysfs_ops module_sysfs_ops = {
60854 .show = module_attr_show,
60855 .store = module_attr_store,
60856 };
60857 @@ -739,7 +739,7 @@ static int uevent_filter(struct kset *ks
60858 return 0;
60859 }
60860
60861 -static struct kset_uevent_ops module_uevent_ops = {
60862 +static const struct kset_uevent_ops module_uevent_ops = {
60863 .filter = uevent_filter,
60864 };
60865
60866 diff -urNp linux-2.6.32.42/kernel/perf_event.c linux-2.6.32.42/kernel/perf_event.c
60867 --- linux-2.6.32.42/kernel/perf_event.c 2011-04-17 17:00:52.000000000 -0400
60868 +++ linux-2.6.32.42/kernel/perf_event.c 2011-05-04 17:56:28.000000000 -0400
60869 @@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostl
60870 */
60871 int sysctl_perf_event_sample_rate __read_mostly = 100000;
60872
60873 -static atomic64_t perf_event_id;
60874 +static atomic64_unchecked_t perf_event_id;
60875
60876 /*
60877 * Lock for (sysadmin-configurable) event reservations:
60878 @@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struc
60879 * In order to keep per-task stats reliable we need to flip the event
60880 * values when we flip the contexts.
60881 */
60882 - value = atomic64_read(&next_event->count);
60883 - value = atomic64_xchg(&event->count, value);
60884 - atomic64_set(&next_event->count, value);
60885 + value = atomic64_read_unchecked(&next_event->count);
60886 + value = atomic64_xchg_unchecked(&event->count, value);
60887 + atomic64_set_unchecked(&next_event->count, value);
60888
60889 swap(event->total_time_enabled, next_event->total_time_enabled);
60890 swap(event->total_time_running, next_event->total_time_running);
60891 @@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_e
60892 update_event_times(event);
60893 }
60894
60895 - return atomic64_read(&event->count);
60896 + return atomic64_read_unchecked(&event->count);
60897 }
60898
60899 /*
60900 @@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct
60901 values[n++] = 1 + leader->nr_siblings;
60902 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
60903 values[n++] = leader->total_time_enabled +
60904 - atomic64_read(&leader->child_total_time_enabled);
60905 + atomic64_read_unchecked(&leader->child_total_time_enabled);
60906 }
60907 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
60908 values[n++] = leader->total_time_running +
60909 - atomic64_read(&leader->child_total_time_running);
60910 + atomic64_read_unchecked(&leader->child_total_time_running);
60911 }
60912
60913 size = n * sizeof(u64);
60914 @@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct pe
60915 values[n++] = perf_event_read_value(event);
60916 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
60917 values[n++] = event->total_time_enabled +
60918 - atomic64_read(&event->child_total_time_enabled);
60919 + atomic64_read_unchecked(&event->child_total_time_enabled);
60920 }
60921 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
60922 values[n++] = event->total_time_running +
60923 - atomic64_read(&event->child_total_time_running);
60924 + atomic64_read_unchecked(&event->child_total_time_running);
60925 }
60926 if (read_format & PERF_FORMAT_ID)
60927 values[n++] = primary_event_id(event);
60928 @@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct fil
60929 static void perf_event_reset(struct perf_event *event)
60930 {
60931 (void)perf_event_read(event);
60932 - atomic64_set(&event->count, 0);
60933 + atomic64_set_unchecked(&event->count, 0);
60934 perf_event_update_userpage(event);
60935 }
60936
60937 @@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct p
60938 ++userpg->lock;
60939 barrier();
60940 userpg->index = perf_event_index(event);
60941 - userpg->offset = atomic64_read(&event->count);
60942 + userpg->offset = atomic64_read_unchecked(&event->count);
60943 if (event->state == PERF_EVENT_STATE_ACTIVE)
60944 - userpg->offset -= atomic64_read(&event->hw.prev_count);
60945 + userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
60946
60947 userpg->time_enabled = event->total_time_enabled +
60948 - atomic64_read(&event->child_total_time_enabled);
60949 + atomic64_read_unchecked(&event->child_total_time_enabled);
60950
60951 userpg->time_running = event->total_time_running +
60952 - atomic64_read(&event->child_total_time_running);
60953 + atomic64_read_unchecked(&event->child_total_time_running);
60954
60955 barrier();
60956 ++userpg->lock;
60957 @@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct
60958 u64 values[4];
60959 int n = 0;
60960
60961 - values[n++] = atomic64_read(&event->count);
60962 + values[n++] = atomic64_read_unchecked(&event->count);
60963 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
60964 values[n++] = event->total_time_enabled +
60965 - atomic64_read(&event->child_total_time_enabled);
60966 + atomic64_read_unchecked(&event->child_total_time_enabled);
60967 }
60968 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
60969 values[n++] = event->total_time_running +
60970 - atomic64_read(&event->child_total_time_running);
60971 + atomic64_read_unchecked(&event->child_total_time_running);
60972 }
60973 if (read_format & PERF_FORMAT_ID)
60974 values[n++] = primary_event_id(event);
60975 @@ -2940,7 +2940,7 @@ static void perf_output_read_group(struc
60976 if (leader != event)
60977 leader->pmu->read(leader);
60978
60979 - values[n++] = atomic64_read(&leader->count);
60980 + values[n++] = atomic64_read_unchecked(&leader->count);
60981 if (read_format & PERF_FORMAT_ID)
60982 values[n++] = primary_event_id(leader);
60983
60984 @@ -2952,7 +2952,7 @@ static void perf_output_read_group(struc
60985 if (sub != event)
60986 sub->pmu->read(sub);
60987
60988 - values[n++] = atomic64_read(&sub->count);
60989 + values[n++] = atomic64_read_unchecked(&sub->count);
60990 if (read_format & PERF_FORMAT_ID)
60991 values[n++] = primary_event_id(sub);
60992
60993 @@ -3787,7 +3787,7 @@ static void perf_swevent_add(struct perf
60994 {
60995 struct hw_perf_event *hwc = &event->hw;
60996
60997 - atomic64_add(nr, &event->count);
60998 + atomic64_add_unchecked(nr, &event->count);
60999
61000 if (!hwc->sample_period)
61001 return;
61002 @@ -4044,9 +4044,9 @@ static void cpu_clock_perf_event_update(
61003 u64 now;
61004
61005 now = cpu_clock(cpu);
61006 - prev = atomic64_read(&event->hw.prev_count);
61007 - atomic64_set(&event->hw.prev_count, now);
61008 - atomic64_add(now - prev, &event->count);
61009 + prev = atomic64_read_unchecked(&event->hw.prev_count);
61010 + atomic64_set_unchecked(&event->hw.prev_count, now);
61011 + atomic64_add_unchecked(now - prev, &event->count);
61012 }
61013
61014 static int cpu_clock_perf_event_enable(struct perf_event *event)
61015 @@ -4054,7 +4054,7 @@ static int cpu_clock_perf_event_enable(s
61016 struct hw_perf_event *hwc = &event->hw;
61017 int cpu = raw_smp_processor_id();
61018
61019 - atomic64_set(&hwc->prev_count, cpu_clock(cpu));
61020 + atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
61021 perf_swevent_start_hrtimer(event);
61022
61023 return 0;
61024 @@ -4086,9 +4086,9 @@ static void task_clock_perf_event_update
61025 u64 prev;
61026 s64 delta;
61027
61028 - prev = atomic64_xchg(&event->hw.prev_count, now);
61029 + prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
61030 delta = now - prev;
61031 - atomic64_add(delta, &event->count);
61032 + atomic64_add_unchecked(delta, &event->count);
61033 }
61034
61035 static int task_clock_perf_event_enable(struct perf_event *event)
61036 @@ -4098,7 +4098,7 @@ static int task_clock_perf_event_enable(
61037
61038 now = event->ctx->time;
61039
61040 - atomic64_set(&hwc->prev_count, now);
61041 + atomic64_set_unchecked(&hwc->prev_count, now);
61042
61043 perf_swevent_start_hrtimer(event);
61044
61045 @@ -4293,7 +4293,7 @@ perf_event_alloc(struct perf_event_attr
61046 event->parent = parent_event;
61047
61048 event->ns = get_pid_ns(current->nsproxy->pid_ns);
61049 - event->id = atomic64_inc_return(&perf_event_id);
61050 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
61051
61052 event->state = PERF_EVENT_STATE_INACTIVE;
61053
61054 @@ -4724,15 +4724,15 @@ static void sync_child_event(struct perf
61055 if (child_event->attr.inherit_stat)
61056 perf_event_read_event(child_event, child);
61057
61058 - child_val = atomic64_read(&child_event->count);
61059 + child_val = atomic64_read_unchecked(&child_event->count);
61060
61061 /*
61062 * Add back the child's count to the parent's count:
61063 */
61064 - atomic64_add(child_val, &parent_event->count);
61065 - atomic64_add(child_event->total_time_enabled,
61066 + atomic64_add_unchecked(child_val, &parent_event->count);
61067 + atomic64_add_unchecked(child_event->total_time_enabled,
61068 &parent_event->child_total_time_enabled);
61069 - atomic64_add(child_event->total_time_running,
61070 + atomic64_add_unchecked(child_event->total_time_running,
61071 &parent_event->child_total_time_running);
61072
61073 /*
61074 diff -urNp linux-2.6.32.42/kernel/pid.c linux-2.6.32.42/kernel/pid.c
61075 --- linux-2.6.32.42/kernel/pid.c 2011-04-22 19:16:29.000000000 -0400
61076 +++ linux-2.6.32.42/kernel/pid.c 2011-04-18 19:22:38.000000000 -0400
61077 @@ -33,6 +33,7 @@
61078 #include <linux/rculist.h>
61079 #include <linux/bootmem.h>
61080 #include <linux/hash.h>
61081 +#include <linux/security.h>
61082 #include <linux/pid_namespace.h>
61083 #include <linux/init_task.h>
61084 #include <linux/syscalls.h>
61085 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
61086
61087 int pid_max = PID_MAX_DEFAULT;
61088
61089 -#define RESERVED_PIDS 300
61090 +#define RESERVED_PIDS 500
61091
61092 int pid_max_min = RESERVED_PIDS + 1;
61093 int pid_max_max = PID_MAX_LIMIT;
61094 @@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
61095 */
61096 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
61097 {
61098 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
61099 + struct task_struct *task;
61100 +
61101 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
61102 +
61103 + if (gr_pid_is_chrooted(task))
61104 + return NULL;
61105 +
61106 + return task;
61107 }
61108
61109 struct task_struct *find_task_by_vpid(pid_t vnr)
61110 diff -urNp linux-2.6.32.42/kernel/posix-cpu-timers.c linux-2.6.32.42/kernel/posix-cpu-timers.c
61111 --- linux-2.6.32.42/kernel/posix-cpu-timers.c 2011-03-27 14:31:47.000000000 -0400
61112 +++ linux-2.6.32.42/kernel/posix-cpu-timers.c 2011-04-17 15:56:46.000000000 -0400
61113 @@ -6,6 +6,7 @@
61114 #include <linux/posix-timers.h>
61115 #include <linux/errno.h>
61116 #include <linux/math64.h>
61117 +#include <linux/security.h>
61118 #include <asm/uaccess.h>
61119 #include <linux/kernel_stat.h>
61120 #include <trace/events/timer.h>
61121 diff -urNp linux-2.6.32.42/kernel/posix-timers.c linux-2.6.32.42/kernel/posix-timers.c
61122 --- linux-2.6.32.42/kernel/posix-timers.c 2011-03-27 14:31:47.000000000 -0400
61123 +++ linux-2.6.32.42/kernel/posix-timers.c 2011-05-16 21:46:57.000000000 -0400
61124 @@ -42,6 +42,7 @@
61125 #include <linux/compiler.h>
61126 #include <linux/idr.h>
61127 #include <linux/posix-timers.h>
61128 +#include <linux/grsecurity.h>
61129 #include <linux/syscalls.h>
61130 #include <linux/wait.h>
61131 #include <linux/workqueue.h>
61132 @@ -296,6 +297,8 @@ static __init int init_posix_timers(void
61133 .nsleep = no_nsleep,
61134 };
61135
61136 + pax_track_stack();
61137 +
61138 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
61139 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
61140 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
61141 @@ -948,6 +951,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
61142 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
61143 return -EFAULT;
61144
61145 + /* only the CLOCK_REALTIME clock can be set, all other clocks
61146 + have their clock_set fptr set to a nosettime dummy function
61147 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
61148 + call common_clock_set, which calls do_sys_settimeofday, which
61149 + we hook
61150 + */
61151 +
61152 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
61153 }
61154
61155 diff -urNp linux-2.6.32.42/kernel/power/hibernate.c linux-2.6.32.42/kernel/power/hibernate.c
61156 --- linux-2.6.32.42/kernel/power/hibernate.c 2011-03-27 14:31:47.000000000 -0400
61157 +++ linux-2.6.32.42/kernel/power/hibernate.c 2011-04-17 15:56:46.000000000 -0400
61158 @@ -48,14 +48,14 @@ enum {
61159
61160 static int hibernation_mode = HIBERNATION_SHUTDOWN;
61161
61162 -static struct platform_hibernation_ops *hibernation_ops;
61163 +static const struct platform_hibernation_ops *hibernation_ops;
61164
61165 /**
61166 * hibernation_set_ops - set the global hibernate operations
61167 * @ops: the hibernation operations to use in subsequent hibernation transitions
61168 */
61169
61170 -void hibernation_set_ops(struct platform_hibernation_ops *ops)
61171 +void hibernation_set_ops(const struct platform_hibernation_ops *ops)
61172 {
61173 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
61174 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
61175 diff -urNp linux-2.6.32.42/kernel/power/poweroff.c linux-2.6.32.42/kernel/power/poweroff.c
61176 --- linux-2.6.32.42/kernel/power/poweroff.c 2011-03-27 14:31:47.000000000 -0400
61177 +++ linux-2.6.32.42/kernel/power/poweroff.c 2011-04-17 15:56:46.000000000 -0400
61178 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
61179 .enable_mask = SYSRQ_ENABLE_BOOT,
61180 };
61181
61182 -static int pm_sysrq_init(void)
61183 +static int __init pm_sysrq_init(void)
61184 {
61185 register_sysrq_key('o', &sysrq_poweroff_op);
61186 return 0;
61187 diff -urNp linux-2.6.32.42/kernel/power/process.c linux-2.6.32.42/kernel/power/process.c
61188 --- linux-2.6.32.42/kernel/power/process.c 2011-03-27 14:31:47.000000000 -0400
61189 +++ linux-2.6.32.42/kernel/power/process.c 2011-04-17 15:56:46.000000000 -0400
61190 @@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_
61191 struct timeval start, end;
61192 u64 elapsed_csecs64;
61193 unsigned int elapsed_csecs;
61194 + bool timedout = false;
61195
61196 do_gettimeofday(&start);
61197
61198 end_time = jiffies + TIMEOUT;
61199 do {
61200 todo = 0;
61201 + if (time_after(jiffies, end_time))
61202 + timedout = true;
61203 read_lock(&tasklist_lock);
61204 do_each_thread(g, p) {
61205 if (frozen(p) || !freezeable(p))
61206 @@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_
61207 * It is "frozen enough". If the task does wake
61208 * up, it will immediately call try_to_freeze.
61209 */
61210 - if (!task_is_stopped_or_traced(p) &&
61211 - !freezer_should_skip(p))
61212 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
61213 todo++;
61214 + if (timedout) {
61215 + printk(KERN_ERR "Task refusing to freeze:\n");
61216 + sched_show_task(p);
61217 + }
61218 + }
61219 } while_each_thread(g, p);
61220 read_unlock(&tasklist_lock);
61221 yield(); /* Yield is okay here */
61222 - if (time_after(jiffies, end_time))
61223 - break;
61224 - } while (todo);
61225 + } while (todo && !timedout);
61226
61227 do_gettimeofday(&end);
61228 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
61229 diff -urNp linux-2.6.32.42/kernel/power/suspend.c linux-2.6.32.42/kernel/power/suspend.c
61230 --- linux-2.6.32.42/kernel/power/suspend.c 2011-03-27 14:31:47.000000000 -0400
61231 +++ linux-2.6.32.42/kernel/power/suspend.c 2011-04-17 15:56:46.000000000 -0400
61232 @@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_M
61233 [PM_SUSPEND_MEM] = "mem",
61234 };
61235
61236 -static struct platform_suspend_ops *suspend_ops;
61237 +static const struct platform_suspend_ops *suspend_ops;
61238
61239 /**
61240 * suspend_set_ops - Set the global suspend method table.
61241 * @ops: Pointer to ops structure.
61242 */
61243 -void suspend_set_ops(struct platform_suspend_ops *ops)
61244 +void suspend_set_ops(const struct platform_suspend_ops *ops)
61245 {
61246 mutex_lock(&pm_mutex);
61247 suspend_ops = ops;
61248 diff -urNp linux-2.6.32.42/kernel/printk.c linux-2.6.32.42/kernel/printk.c
61249 --- linux-2.6.32.42/kernel/printk.c 2011-03-27 14:31:47.000000000 -0400
61250 +++ linux-2.6.32.42/kernel/printk.c 2011-04-17 15:56:46.000000000 -0400
61251 @@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf
61252 char c;
61253 int error = 0;
61254
61255 +#ifdef CONFIG_GRKERNSEC_DMESG
61256 + if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
61257 + return -EPERM;
61258 +#endif
61259 +
61260 error = security_syslog(type);
61261 if (error)
61262 return error;
61263 diff -urNp linux-2.6.32.42/kernel/profile.c linux-2.6.32.42/kernel/profile.c
61264 --- linux-2.6.32.42/kernel/profile.c 2011-03-27 14:31:47.000000000 -0400
61265 +++ linux-2.6.32.42/kernel/profile.c 2011-05-04 17:56:28.000000000 -0400
61266 @@ -39,7 +39,7 @@ struct profile_hit {
61267 /* Oprofile timer tick hook */
61268 static int (*timer_hook)(struct pt_regs *) __read_mostly;
61269
61270 -static atomic_t *prof_buffer;
61271 +static atomic_unchecked_t *prof_buffer;
61272 static unsigned long prof_len, prof_shift;
61273
61274 int prof_on __read_mostly;
61275 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
61276 hits[i].pc = 0;
61277 continue;
61278 }
61279 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61280 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61281 hits[i].hits = hits[i].pc = 0;
61282 }
61283 }
61284 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
61285 * Add the current hit(s) and flush the write-queue out
61286 * to the global buffer:
61287 */
61288 - atomic_add(nr_hits, &prof_buffer[pc]);
61289 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
61290 for (i = 0; i < NR_PROFILE_HIT; ++i) {
61291 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61292 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61293 hits[i].pc = hits[i].hits = 0;
61294 }
61295 out:
61296 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
61297 if (prof_on != type || !prof_buffer)
61298 return;
61299 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
61300 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61301 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61302 }
61303 #endif /* !CONFIG_SMP */
61304 EXPORT_SYMBOL_GPL(profile_hits);
61305 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
61306 return -EFAULT;
61307 buf++; p++; count--; read++;
61308 }
61309 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
61310 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
61311 if (copy_to_user(buf, (void *)pnt, count))
61312 return -EFAULT;
61313 read += count;
61314 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
61315 }
61316 #endif
61317 profile_discard_flip_buffers();
61318 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
61319 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
61320 return count;
61321 }
61322
61323 diff -urNp linux-2.6.32.42/kernel/ptrace.c linux-2.6.32.42/kernel/ptrace.c
61324 --- linux-2.6.32.42/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
61325 +++ linux-2.6.32.42/kernel/ptrace.c 2011-05-22 23:02:06.000000000 -0400
61326 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
61327 return ret;
61328 }
61329
61330 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
61331 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
61332 + unsigned int log)
61333 {
61334 const struct cred *cred = current_cred(), *tcred;
61335
61336 @@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_stru
61337 cred->gid != tcred->egid ||
61338 cred->gid != tcred->sgid ||
61339 cred->gid != tcred->gid) &&
61340 - !capable(CAP_SYS_PTRACE)) {
61341 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
61342 + (log && !capable(CAP_SYS_PTRACE)))
61343 + ) {
61344 rcu_read_unlock();
61345 return -EPERM;
61346 }
61347 @@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_stru
61348 smp_rmb();
61349 if (task->mm)
61350 dumpable = get_dumpable(task->mm);
61351 - if (!dumpable && !capable(CAP_SYS_PTRACE))
61352 + if (!dumpable &&
61353 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
61354 + (log && !capable(CAP_SYS_PTRACE))))
61355 return -EPERM;
61356
61357 return security_ptrace_access_check(task, mode);
61358 @@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struc
61359 {
61360 int err;
61361 task_lock(task);
61362 - err = __ptrace_may_access(task, mode);
61363 + err = __ptrace_may_access(task, mode, 0);
61364 + task_unlock(task);
61365 + return !err;
61366 +}
61367 +
61368 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
61369 +{
61370 + int err;
61371 + task_lock(task);
61372 + err = __ptrace_may_access(task, mode, 1);
61373 task_unlock(task);
61374 return !err;
61375 }
61376 @@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *ta
61377 goto out;
61378
61379 task_lock(task);
61380 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
61381 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
61382 task_unlock(task);
61383 if (retval)
61384 goto unlock_creds;
61385 @@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *ta
61386 goto unlock_tasklist;
61387
61388 task->ptrace = PT_PTRACED;
61389 - if (capable(CAP_SYS_PTRACE))
61390 + if (capable_nolog(CAP_SYS_PTRACE))
61391 task->ptrace |= PT_PTRACE_CAP;
61392
61393 __ptrace_link(task, current);
61394 @@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *
61395 {
61396 int copied = 0;
61397
61398 + pax_track_stack();
61399 +
61400 while (len > 0) {
61401 char buf[128];
61402 int this_len, retval;
61403 @@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct
61404 {
61405 int copied = 0;
61406
61407 + pax_track_stack();
61408 +
61409 while (len > 0) {
61410 char buf[128];
61411 int this_len, retval;
61412 @@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *c
61413 int ret = -EIO;
61414 siginfo_t siginfo;
61415
61416 + pax_track_stack();
61417 +
61418 switch (request) {
61419 case PTRACE_PEEKTEXT:
61420 case PTRACE_PEEKDATA:
61421 @@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *c
61422 ret = ptrace_setoptions(child, data);
61423 break;
61424 case PTRACE_GETEVENTMSG:
61425 - ret = put_user(child->ptrace_message, (unsigned long __user *) data);
61426 + ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
61427 break;
61428
61429 case PTRACE_GETSIGINFO:
61430 ret = ptrace_getsiginfo(child, &siginfo);
61431 if (!ret)
61432 - ret = copy_siginfo_to_user((siginfo_t __user *) data,
61433 + ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
61434 &siginfo);
61435 break;
61436
61437 case PTRACE_SETSIGINFO:
61438 - if (copy_from_user(&siginfo, (siginfo_t __user *) data,
61439 + if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
61440 sizeof siginfo))
61441 ret = -EFAULT;
61442 else
61443 @@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
61444 goto out;
61445 }
61446
61447 + if (gr_handle_ptrace(child, request)) {
61448 + ret = -EPERM;
61449 + goto out_put_task_struct;
61450 + }
61451 +
61452 if (request == PTRACE_ATTACH) {
61453 ret = ptrace_attach(child);
61454 /*
61455 * Some architectures need to do book-keeping after
61456 * a ptrace attach.
61457 */
61458 - if (!ret)
61459 + if (!ret) {
61460 arch_ptrace_attach(child);
61461 + gr_audit_ptrace(child);
61462 + }
61463 goto out_put_task_struct;
61464 }
61465
61466 @@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_
61467 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
61468 if (copied != sizeof(tmp))
61469 return -EIO;
61470 - return put_user(tmp, (unsigned long __user *)data);
61471 + return put_user(tmp, (__force unsigned long __user *)data);
61472 }
61473
61474 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
61475 @@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_st
61476 siginfo_t siginfo;
61477 int ret;
61478
61479 + pax_track_stack();
61480 +
61481 switch (request) {
61482 case PTRACE_PEEKTEXT:
61483 case PTRACE_PEEKDATA:
61484 @@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat
61485 goto out;
61486 }
61487
61488 + if (gr_handle_ptrace(child, request)) {
61489 + ret = -EPERM;
61490 + goto out_put_task_struct;
61491 + }
61492 +
61493 if (request == PTRACE_ATTACH) {
61494 ret = ptrace_attach(child);
61495 /*
61496 * Some architectures need to do book-keeping after
61497 * a ptrace attach.
61498 */
61499 - if (!ret)
61500 + if (!ret) {
61501 arch_ptrace_attach(child);
61502 + gr_audit_ptrace(child);
61503 + }
61504 goto out_put_task_struct;
61505 }
61506
61507 diff -urNp linux-2.6.32.42/kernel/rcutorture.c linux-2.6.32.42/kernel/rcutorture.c
61508 --- linux-2.6.32.42/kernel/rcutorture.c 2011-03-27 14:31:47.000000000 -0400
61509 +++ linux-2.6.32.42/kernel/rcutorture.c 2011-05-04 17:56:28.000000000 -0400
61510 @@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
61511 { 0 };
61512 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
61513 { 0 };
61514 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
61515 -static atomic_t n_rcu_torture_alloc;
61516 -static atomic_t n_rcu_torture_alloc_fail;
61517 -static atomic_t n_rcu_torture_free;
61518 -static atomic_t n_rcu_torture_mberror;
61519 -static atomic_t n_rcu_torture_error;
61520 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
61521 +static atomic_unchecked_t n_rcu_torture_alloc;
61522 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
61523 +static atomic_unchecked_t n_rcu_torture_free;
61524 +static atomic_unchecked_t n_rcu_torture_mberror;
61525 +static atomic_unchecked_t n_rcu_torture_error;
61526 static long n_rcu_torture_timers;
61527 static struct list_head rcu_torture_removed;
61528 static cpumask_var_t shuffle_tmp_mask;
61529 @@ -187,11 +187,11 @@ rcu_torture_alloc(void)
61530
61531 spin_lock_bh(&rcu_torture_lock);
61532 if (list_empty(&rcu_torture_freelist)) {
61533 - atomic_inc(&n_rcu_torture_alloc_fail);
61534 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
61535 spin_unlock_bh(&rcu_torture_lock);
61536 return NULL;
61537 }
61538 - atomic_inc(&n_rcu_torture_alloc);
61539 + atomic_inc_unchecked(&n_rcu_torture_alloc);
61540 p = rcu_torture_freelist.next;
61541 list_del_init(p);
61542 spin_unlock_bh(&rcu_torture_lock);
61543 @@ -204,7 +204,7 @@ rcu_torture_alloc(void)
61544 static void
61545 rcu_torture_free(struct rcu_torture *p)
61546 {
61547 - atomic_inc(&n_rcu_torture_free);
61548 + atomic_inc_unchecked(&n_rcu_torture_free);
61549 spin_lock_bh(&rcu_torture_lock);
61550 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
61551 spin_unlock_bh(&rcu_torture_lock);
61552 @@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
61553 i = rp->rtort_pipe_count;
61554 if (i > RCU_TORTURE_PIPE_LEN)
61555 i = RCU_TORTURE_PIPE_LEN;
61556 - atomic_inc(&rcu_torture_wcount[i]);
61557 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
61558 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
61559 rp->rtort_mbtest = 0;
61560 rcu_torture_free(rp);
61561 @@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_fr
61562 i = rp->rtort_pipe_count;
61563 if (i > RCU_TORTURE_PIPE_LEN)
61564 i = RCU_TORTURE_PIPE_LEN;
61565 - atomic_inc(&rcu_torture_wcount[i]);
61566 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
61567 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
61568 rp->rtort_mbtest = 0;
61569 list_del(&rp->rtort_free);
61570 @@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
61571 i = old_rp->rtort_pipe_count;
61572 if (i > RCU_TORTURE_PIPE_LEN)
61573 i = RCU_TORTURE_PIPE_LEN;
61574 - atomic_inc(&rcu_torture_wcount[i]);
61575 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
61576 old_rp->rtort_pipe_count++;
61577 cur_ops->deferred_free(old_rp);
61578 }
61579 @@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned l
61580 return;
61581 }
61582 if (p->rtort_mbtest == 0)
61583 - atomic_inc(&n_rcu_torture_mberror);
61584 + atomic_inc_unchecked(&n_rcu_torture_mberror);
61585 spin_lock(&rand_lock);
61586 cur_ops->read_delay(&rand);
61587 n_rcu_torture_timers++;
61588 @@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
61589 continue;
61590 }
61591 if (p->rtort_mbtest == 0)
61592 - atomic_inc(&n_rcu_torture_mberror);
61593 + atomic_inc_unchecked(&n_rcu_torture_mberror);
61594 cur_ops->read_delay(&rand);
61595 preempt_disable();
61596 pipe_count = p->rtort_pipe_count;
61597 @@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
61598 rcu_torture_current,
61599 rcu_torture_current_version,
61600 list_empty(&rcu_torture_freelist),
61601 - atomic_read(&n_rcu_torture_alloc),
61602 - atomic_read(&n_rcu_torture_alloc_fail),
61603 - atomic_read(&n_rcu_torture_free),
61604 - atomic_read(&n_rcu_torture_mberror),
61605 + atomic_read_unchecked(&n_rcu_torture_alloc),
61606 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
61607 + atomic_read_unchecked(&n_rcu_torture_free),
61608 + atomic_read_unchecked(&n_rcu_torture_mberror),
61609 n_rcu_torture_timers);
61610 - if (atomic_read(&n_rcu_torture_mberror) != 0)
61611 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
61612 cnt += sprintf(&page[cnt], " !!!");
61613 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
61614 if (i > 1) {
61615 cnt += sprintf(&page[cnt], "!!! ");
61616 - atomic_inc(&n_rcu_torture_error);
61617 + atomic_inc_unchecked(&n_rcu_torture_error);
61618 WARN_ON_ONCE(1);
61619 }
61620 cnt += sprintf(&page[cnt], "Reader Pipe: ");
61621 @@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
61622 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
61623 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
61624 cnt += sprintf(&page[cnt], " %d",
61625 - atomic_read(&rcu_torture_wcount[i]));
61626 + atomic_read_unchecked(&rcu_torture_wcount[i]));
61627 }
61628 cnt += sprintf(&page[cnt], "\n");
61629 if (cur_ops->stats)
61630 @@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
61631
61632 if (cur_ops->cleanup)
61633 cur_ops->cleanup();
61634 - if (atomic_read(&n_rcu_torture_error))
61635 + if (atomic_read_unchecked(&n_rcu_torture_error))
61636 rcu_torture_print_module_parms("End of test: FAILURE");
61637 else
61638 rcu_torture_print_module_parms("End of test: SUCCESS");
61639 @@ -1138,13 +1138,13 @@ rcu_torture_init(void)
61640
61641 rcu_torture_current = NULL;
61642 rcu_torture_current_version = 0;
61643 - atomic_set(&n_rcu_torture_alloc, 0);
61644 - atomic_set(&n_rcu_torture_alloc_fail, 0);
61645 - atomic_set(&n_rcu_torture_free, 0);
61646 - atomic_set(&n_rcu_torture_mberror, 0);
61647 - atomic_set(&n_rcu_torture_error, 0);
61648 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
61649 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
61650 + atomic_set_unchecked(&n_rcu_torture_free, 0);
61651 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
61652 + atomic_set_unchecked(&n_rcu_torture_error, 0);
61653 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
61654 - atomic_set(&rcu_torture_wcount[i], 0);
61655 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
61656 for_each_possible_cpu(cpu) {
61657 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
61658 per_cpu(rcu_torture_count, cpu)[i] = 0;
61659 diff -urNp linux-2.6.32.42/kernel/rcutree.c linux-2.6.32.42/kernel/rcutree.c
61660 --- linux-2.6.32.42/kernel/rcutree.c 2011-03-27 14:31:47.000000000 -0400
61661 +++ linux-2.6.32.42/kernel/rcutree.c 2011-04-17 15:56:46.000000000 -0400
61662 @@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state
61663 /*
61664 * Do softirq processing for the current CPU.
61665 */
61666 -static void rcu_process_callbacks(struct softirq_action *unused)
61667 +static void rcu_process_callbacks(void)
61668 {
61669 /*
61670 * Memory references from any prior RCU read-side critical sections
61671 diff -urNp linux-2.6.32.42/kernel/rcutree_plugin.h linux-2.6.32.42/kernel/rcutree_plugin.h
61672 --- linux-2.6.32.42/kernel/rcutree_plugin.h 2011-03-27 14:31:47.000000000 -0400
61673 +++ linux-2.6.32.42/kernel/rcutree_plugin.h 2011-04-17 15:56:46.000000000 -0400
61674 @@ -145,7 +145,7 @@ static void rcu_preempt_note_context_swi
61675 */
61676 void __rcu_read_lock(void)
61677 {
61678 - ACCESS_ONCE(current->rcu_read_lock_nesting)++;
61679 + ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
61680 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
61681 }
61682 EXPORT_SYMBOL_GPL(__rcu_read_lock);
61683 @@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
61684 struct task_struct *t = current;
61685
61686 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
61687 - if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
61688 + if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
61689 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
61690 rcu_read_unlock_special(t);
61691 }
61692 diff -urNp linux-2.6.32.42/kernel/relay.c linux-2.6.32.42/kernel/relay.c
61693 --- linux-2.6.32.42/kernel/relay.c 2011-03-27 14:31:47.000000000 -0400
61694 +++ linux-2.6.32.42/kernel/relay.c 2011-05-16 21:46:57.000000000 -0400
61695 @@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct fi
61696 unsigned int flags,
61697 int *nonpad_ret)
61698 {
61699 - unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
61700 + unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
61701 struct rchan_buf *rbuf = in->private_data;
61702 unsigned int subbuf_size = rbuf->chan->subbuf_size;
61703 uint64_t pos = (uint64_t) *ppos;
61704 @@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct fi
61705 .ops = &relay_pipe_buf_ops,
61706 .spd_release = relay_page_release,
61707 };
61708 + ssize_t ret;
61709 +
61710 + pax_track_stack();
61711
61712 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
61713 return 0;
61714 diff -urNp linux-2.6.32.42/kernel/resource.c linux-2.6.32.42/kernel/resource.c
61715 --- linux-2.6.32.42/kernel/resource.c 2011-03-27 14:31:47.000000000 -0400
61716 +++ linux-2.6.32.42/kernel/resource.c 2011-04-17 15:56:46.000000000 -0400
61717 @@ -132,8 +132,18 @@ static const struct file_operations proc
61718
61719 static int __init ioresources_init(void)
61720 {
61721 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
61722 +#ifdef CONFIG_GRKERNSEC_PROC_USER
61723 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
61724 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
61725 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61726 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
61727 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
61728 +#endif
61729 +#else
61730 proc_create("ioports", 0, NULL, &proc_ioports_operations);
61731 proc_create("iomem", 0, NULL, &proc_iomem_operations);
61732 +#endif
61733 return 0;
61734 }
61735 __initcall(ioresources_init);
61736 diff -urNp linux-2.6.32.42/kernel/rtmutex.c linux-2.6.32.42/kernel/rtmutex.c
61737 --- linux-2.6.32.42/kernel/rtmutex.c 2011-03-27 14:31:47.000000000 -0400
61738 +++ linux-2.6.32.42/kernel/rtmutex.c 2011-04-17 15:56:46.000000000 -0400
61739 @@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt
61740 */
61741 spin_lock_irqsave(&pendowner->pi_lock, flags);
61742
61743 - WARN_ON(!pendowner->pi_blocked_on);
61744 + BUG_ON(!pendowner->pi_blocked_on);
61745 WARN_ON(pendowner->pi_blocked_on != waiter);
61746 WARN_ON(pendowner->pi_blocked_on->lock != lock);
61747
61748 diff -urNp linux-2.6.32.42/kernel/rtmutex-tester.c linux-2.6.32.42/kernel/rtmutex-tester.c
61749 --- linux-2.6.32.42/kernel/rtmutex-tester.c 2011-03-27 14:31:47.000000000 -0400
61750 +++ linux-2.6.32.42/kernel/rtmutex-tester.c 2011-05-04 17:56:28.000000000 -0400
61751 @@ -21,7 +21,7 @@
61752 #define MAX_RT_TEST_MUTEXES 8
61753
61754 static spinlock_t rttest_lock;
61755 -static atomic_t rttest_event;
61756 +static atomic_unchecked_t rttest_event;
61757
61758 struct test_thread_data {
61759 int opcode;
61760 @@ -64,7 +64,7 @@ static int handle_op(struct test_thread_
61761
61762 case RTTEST_LOCKCONT:
61763 td->mutexes[td->opdata] = 1;
61764 - td->event = atomic_add_return(1, &rttest_event);
61765 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61766 return 0;
61767
61768 case RTTEST_RESET:
61769 @@ -82,7 +82,7 @@ static int handle_op(struct test_thread_
61770 return 0;
61771
61772 case RTTEST_RESETEVENT:
61773 - atomic_set(&rttest_event, 0);
61774 + atomic_set_unchecked(&rttest_event, 0);
61775 return 0;
61776
61777 default:
61778 @@ -99,9 +99,9 @@ static int handle_op(struct test_thread_
61779 return ret;
61780
61781 td->mutexes[id] = 1;
61782 - td->event = atomic_add_return(1, &rttest_event);
61783 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61784 rt_mutex_lock(&mutexes[id]);
61785 - td->event = atomic_add_return(1, &rttest_event);
61786 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61787 td->mutexes[id] = 4;
61788 return 0;
61789
61790 @@ -112,9 +112,9 @@ static int handle_op(struct test_thread_
61791 return ret;
61792
61793 td->mutexes[id] = 1;
61794 - td->event = atomic_add_return(1, &rttest_event);
61795 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61796 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
61797 - td->event = atomic_add_return(1, &rttest_event);
61798 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61799 td->mutexes[id] = ret ? 0 : 4;
61800 return ret ? -EINTR : 0;
61801
61802 @@ -123,9 +123,9 @@ static int handle_op(struct test_thread_
61803 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
61804 return ret;
61805
61806 - td->event = atomic_add_return(1, &rttest_event);
61807 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61808 rt_mutex_unlock(&mutexes[id]);
61809 - td->event = atomic_add_return(1, &rttest_event);
61810 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61811 td->mutexes[id] = 0;
61812 return 0;
61813
61814 @@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mu
61815 break;
61816
61817 td->mutexes[dat] = 2;
61818 - td->event = atomic_add_return(1, &rttest_event);
61819 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61820 break;
61821
61822 case RTTEST_LOCKBKL:
61823 @@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mu
61824 return;
61825
61826 td->mutexes[dat] = 3;
61827 - td->event = atomic_add_return(1, &rttest_event);
61828 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61829 break;
61830
61831 case RTTEST_LOCKNOWAIT:
61832 @@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mu
61833 return;
61834
61835 td->mutexes[dat] = 1;
61836 - td->event = atomic_add_return(1, &rttest_event);
61837 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61838 return;
61839
61840 case RTTEST_LOCKBKL:
61841 diff -urNp linux-2.6.32.42/kernel/sched.c linux-2.6.32.42/kernel/sched.c
61842 --- linux-2.6.32.42/kernel/sched.c 2011-03-27 14:31:47.000000000 -0400
61843 +++ linux-2.6.32.42/kernel/sched.c 2011-05-22 23:02:06.000000000 -0400
61844 @@ -5043,7 +5043,7 @@ out:
61845 * In CONFIG_NO_HZ case, the idle load balance owner will do the
61846 * rebalancing for all the cpus for whom scheduler ticks are stopped.
61847 */
61848 -static void run_rebalance_domains(struct softirq_action *h)
61849 +static void run_rebalance_domains(void)
61850 {
61851 int this_cpu = smp_processor_id();
61852 struct rq *this_rq = cpu_rq(this_cpu);
61853 @@ -5700,6 +5700,8 @@ asmlinkage void __sched schedule(void)
61854 struct rq *rq;
61855 int cpu;
61856
61857 + pax_track_stack();
61858 +
61859 need_resched:
61860 preempt_disable();
61861 cpu = smp_processor_id();
61862 @@ -5770,7 +5772,7 @@ EXPORT_SYMBOL(schedule);
61863 * Look out! "owner" is an entirely speculative pointer
61864 * access and not reliable.
61865 */
61866 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
61867 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
61868 {
61869 unsigned int cpu;
61870 struct rq *rq;
61871 @@ -5784,10 +5786,10 @@ int mutex_spin_on_owner(struct mutex *lo
61872 * DEBUG_PAGEALLOC could have unmapped it if
61873 * the mutex owner just released it and exited.
61874 */
61875 - if (probe_kernel_address(&owner->cpu, cpu))
61876 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
61877 return 0;
61878 #else
61879 - cpu = owner->cpu;
61880 + cpu = task_thread_info(owner)->cpu;
61881 #endif
61882
61883 /*
61884 @@ -5816,7 +5818,7 @@ int mutex_spin_on_owner(struct mutex *lo
61885 /*
61886 * Is that owner really running on that cpu?
61887 */
61888 - if (task_thread_info(rq->curr) != owner || need_resched())
61889 + if (rq->curr != owner || need_resched())
61890 return 0;
61891
61892 cpu_relax();
61893 @@ -6359,6 +6361,8 @@ int can_nice(const struct task_struct *p
61894 /* convert nice value [19,-20] to rlimit style value [1,40] */
61895 int nice_rlim = 20 - nice;
61896
61897 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
61898 +
61899 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
61900 capable(CAP_SYS_NICE));
61901 }
61902 @@ -6392,7 +6396,8 @@ SYSCALL_DEFINE1(nice, int, increment)
61903 if (nice > 19)
61904 nice = 19;
61905
61906 - if (increment < 0 && !can_nice(current, nice))
61907 + if (increment < 0 && (!can_nice(current, nice) ||
61908 + gr_handle_chroot_nice()))
61909 return -EPERM;
61910
61911 retval = security_task_setnice(current, nice);
61912 @@ -8774,7 +8779,7 @@ static void init_sched_groups_power(int
61913 long power;
61914 int weight;
61915
61916 - WARN_ON(!sd || !sd->groups);
61917 + BUG_ON(!sd || !sd->groups);
61918
61919 if (cpu != group_first_cpu(sd->groups))
61920 return;
61921 diff -urNp linux-2.6.32.42/kernel/signal.c linux-2.6.32.42/kernel/signal.c
61922 --- linux-2.6.32.42/kernel/signal.c 2011-04-17 17:00:52.000000000 -0400
61923 +++ linux-2.6.32.42/kernel/signal.c 2011-05-22 23:02:06.000000000 -0400
61924 @@ -41,12 +41,12 @@
61925
61926 static struct kmem_cache *sigqueue_cachep;
61927
61928 -static void __user *sig_handler(struct task_struct *t, int sig)
61929 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
61930 {
61931 return t->sighand->action[sig - 1].sa.sa_handler;
61932 }
61933
61934 -static int sig_handler_ignored(void __user *handler, int sig)
61935 +static int sig_handler_ignored(__sighandler_t handler, int sig)
61936 {
61937 /* Is it explicitly or implicitly ignored? */
61938 return handler == SIG_IGN ||
61939 @@ -56,7 +56,7 @@ static int sig_handler_ignored(void __us
61940 static int sig_task_ignored(struct task_struct *t, int sig,
61941 int from_ancestor_ns)
61942 {
61943 - void __user *handler;
61944 + __sighandler_t handler;
61945
61946 handler = sig_handler(t, sig);
61947
61948 @@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc
61949 */
61950 user = get_uid(__task_cred(t)->user);
61951 atomic_inc(&user->sigpending);
61952 +
61953 + if (!override_rlimit)
61954 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
61955 if (override_rlimit ||
61956 atomic_read(&user->sigpending) <=
61957 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
61958 @@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct
61959
61960 int unhandled_signal(struct task_struct *tsk, int sig)
61961 {
61962 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
61963 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
61964 if (is_global_init(tsk))
61965 return 1;
61966 if (handler != SIG_IGN && handler != SIG_DFL)
61967 @@ -627,6 +630,9 @@ static int check_kill_permission(int sig
61968 }
61969 }
61970
61971 + if (gr_handle_signal(t, sig))
61972 + return -EPERM;
61973 +
61974 return security_task_kill(t, info, sig, 0);
61975 }
61976
61977 @@ -968,7 +974,7 @@ __group_send_sig_info(int sig, struct si
61978 return send_signal(sig, info, p, 1);
61979 }
61980
61981 -static int
61982 +int
61983 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
61984 {
61985 return send_signal(sig, info, t, 0);
61986 @@ -1005,6 +1011,7 @@ force_sig_info(int sig, struct siginfo *
61987 unsigned long int flags;
61988 int ret, blocked, ignored;
61989 struct k_sigaction *action;
61990 + int is_unhandled = 0;
61991
61992 spin_lock_irqsave(&t->sighand->siglock, flags);
61993 action = &t->sighand->action[sig-1];
61994 @@ -1019,9 +1026,18 @@ force_sig_info(int sig, struct siginfo *
61995 }
61996 if (action->sa.sa_handler == SIG_DFL)
61997 t->signal->flags &= ~SIGNAL_UNKILLABLE;
61998 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
61999 + is_unhandled = 1;
62000 ret = specific_send_sig_info(sig, info, t);
62001 spin_unlock_irqrestore(&t->sighand->siglock, flags);
62002
62003 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
62004 + normal operation */
62005 + if (is_unhandled) {
62006 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
62007 + gr_handle_crash(t, sig);
62008 + }
62009 +
62010 return ret;
62011 }
62012
62013 @@ -1081,8 +1097,11 @@ int group_send_sig_info(int sig, struct
62014 {
62015 int ret = check_kill_permission(sig, info, p);
62016
62017 - if (!ret && sig)
62018 + if (!ret && sig) {
62019 ret = do_send_sig_info(sig, info, p, true);
62020 + if (!ret)
62021 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
62022 + }
62023
62024 return ret;
62025 }
62026 @@ -1644,6 +1663,8 @@ void ptrace_notify(int exit_code)
62027 {
62028 siginfo_t info;
62029
62030 + pax_track_stack();
62031 +
62032 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
62033
62034 memset(&info, 0, sizeof info);
62035 diff -urNp linux-2.6.32.42/kernel/smp.c linux-2.6.32.42/kernel/smp.c
62036 --- linux-2.6.32.42/kernel/smp.c 2011-03-27 14:31:47.000000000 -0400
62037 +++ linux-2.6.32.42/kernel/smp.c 2011-04-17 15:56:46.000000000 -0400
62038 @@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void
62039 }
62040 EXPORT_SYMBOL(smp_call_function);
62041
62042 -void ipi_call_lock(void)
62043 +void ipi_call_lock(void) __acquires(call_function.lock)
62044 {
62045 spin_lock(&call_function.lock);
62046 }
62047
62048 -void ipi_call_unlock(void)
62049 +void ipi_call_unlock(void) __releases(call_function.lock)
62050 {
62051 spin_unlock(&call_function.lock);
62052 }
62053
62054 -void ipi_call_lock_irq(void)
62055 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
62056 {
62057 spin_lock_irq(&call_function.lock);
62058 }
62059
62060 -void ipi_call_unlock_irq(void)
62061 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
62062 {
62063 spin_unlock_irq(&call_function.lock);
62064 }
62065 diff -urNp linux-2.6.32.42/kernel/softirq.c linux-2.6.32.42/kernel/softirq.c
62066 --- linux-2.6.32.42/kernel/softirq.c 2011-03-27 14:31:47.000000000 -0400
62067 +++ linux-2.6.32.42/kernel/softirq.c 2011-04-17 15:56:46.000000000 -0400
62068 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
62069
62070 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
62071
62072 -char *softirq_to_name[NR_SOFTIRQS] = {
62073 +const char * const softirq_to_name[NR_SOFTIRQS] = {
62074 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
62075 "TASKLET", "SCHED", "HRTIMER", "RCU"
62076 };
62077 @@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
62078
62079 asmlinkage void __do_softirq(void)
62080 {
62081 - struct softirq_action *h;
62082 + const struct softirq_action *h;
62083 __u32 pending;
62084 int max_restart = MAX_SOFTIRQ_RESTART;
62085 int cpu;
62086 @@ -233,7 +233,7 @@ restart:
62087 kstat_incr_softirqs_this_cpu(h - softirq_vec);
62088
62089 trace_softirq_entry(h, softirq_vec);
62090 - h->action(h);
62091 + h->action();
62092 trace_softirq_exit(h, softirq_vec);
62093 if (unlikely(prev_count != preempt_count())) {
62094 printk(KERN_ERR "huh, entered softirq %td %s %p"
62095 @@ -363,7 +363,7 @@ void raise_softirq(unsigned int nr)
62096 local_irq_restore(flags);
62097 }
62098
62099 -void open_softirq(int nr, void (*action)(struct softirq_action *))
62100 +void open_softirq(int nr, void (*action)(void))
62101 {
62102 softirq_vec[nr].action = action;
62103 }
62104 @@ -419,7 +419,7 @@ void __tasklet_hi_schedule_first(struct
62105
62106 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
62107
62108 -static void tasklet_action(struct softirq_action *a)
62109 +static void tasklet_action(void)
62110 {
62111 struct tasklet_struct *list;
62112
62113 @@ -454,7 +454,7 @@ static void tasklet_action(struct softir
62114 }
62115 }
62116
62117 -static void tasklet_hi_action(struct softirq_action *a)
62118 +static void tasklet_hi_action(void)
62119 {
62120 struct tasklet_struct *list;
62121
62122 diff -urNp linux-2.6.32.42/kernel/sys.c linux-2.6.32.42/kernel/sys.c
62123 --- linux-2.6.32.42/kernel/sys.c 2011-03-27 14:31:47.000000000 -0400
62124 +++ linux-2.6.32.42/kernel/sys.c 2011-04-17 15:56:46.000000000 -0400
62125 @@ -133,6 +133,12 @@ static int set_one_prio(struct task_stru
62126 error = -EACCES;
62127 goto out;
62128 }
62129 +
62130 + if (gr_handle_chroot_setpriority(p, niceval)) {
62131 + error = -EACCES;
62132 + goto out;
62133 + }
62134 +
62135 no_nice = security_task_setnice(p, niceval);
62136 if (no_nice) {
62137 error = no_nice;
62138 @@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which,
62139 !(user = find_user(who)))
62140 goto out_unlock; /* No processes for this user */
62141
62142 - do_each_thread(g, p)
62143 + do_each_thread(g, p) {
62144 if (__task_cred(p)->uid == who)
62145 error = set_one_prio(p, niceval, error);
62146 - while_each_thread(g, p);
62147 + } while_each_thread(g, p);
62148 if (who != cred->uid)
62149 free_uid(user); /* For find_user() */
62150 break;
62151 @@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which,
62152 !(user = find_user(who)))
62153 goto out_unlock; /* No processes for this user */
62154
62155 - do_each_thread(g, p)
62156 + do_each_thread(g, p) {
62157 if (__task_cred(p)->uid == who) {
62158 niceval = 20 - task_nice(p);
62159 if (niceval > retval)
62160 retval = niceval;
62161 }
62162 - while_each_thread(g, p);
62163 + } while_each_thread(g, p);
62164 if (who != cred->uid)
62165 free_uid(user); /* for find_user() */
62166 break;
62167 @@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
62168 goto error;
62169 }
62170
62171 + if (gr_check_group_change(new->gid, new->egid, -1))
62172 + goto error;
62173 +
62174 if (rgid != (gid_t) -1 ||
62175 (egid != (gid_t) -1 && egid != old->gid))
62176 new->sgid = new->egid;
62177 @@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
62178 goto error;
62179
62180 retval = -EPERM;
62181 +
62182 + if (gr_check_group_change(gid, gid, gid))
62183 + goto error;
62184 +
62185 if (capable(CAP_SETGID))
62186 new->gid = new->egid = new->sgid = new->fsgid = gid;
62187 else if (gid == old->gid || gid == old->sgid)
62188 @@ -627,6 +640,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
62189 goto error;
62190 }
62191
62192 + if (gr_check_user_change(new->uid, new->euid, -1))
62193 + goto error;
62194 +
62195 if (new->uid != old->uid) {
62196 retval = set_user(new);
62197 if (retval < 0)
62198 @@ -675,6 +691,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
62199 goto error;
62200
62201 retval = -EPERM;
62202 +
62203 + if (gr_check_crash_uid(uid))
62204 + goto error;
62205 + if (gr_check_user_change(uid, uid, uid))
62206 + goto error;
62207 +
62208 if (capable(CAP_SETUID)) {
62209 new->suid = new->uid = uid;
62210 if (uid != old->uid) {
62211 @@ -732,6 +754,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
62212 goto error;
62213 }
62214
62215 + if (gr_check_user_change(ruid, euid, -1))
62216 + goto error;
62217 +
62218 if (ruid != (uid_t) -1) {
62219 new->uid = ruid;
62220 if (ruid != old->uid) {
62221 @@ -800,6 +825,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
62222 goto error;
62223 }
62224
62225 + if (gr_check_group_change(rgid, egid, -1))
62226 + goto error;
62227 +
62228 if (rgid != (gid_t) -1)
62229 new->gid = rgid;
62230 if (egid != (gid_t) -1)
62231 @@ -849,6 +877,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
62232 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
62233 goto error;
62234
62235 + if (gr_check_user_change(-1, -1, uid))
62236 + goto error;
62237 +
62238 if (uid == old->uid || uid == old->euid ||
62239 uid == old->suid || uid == old->fsuid ||
62240 capable(CAP_SETUID)) {
62241 @@ -889,6 +920,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
62242 if (gid == old->gid || gid == old->egid ||
62243 gid == old->sgid || gid == old->fsgid ||
62244 capable(CAP_SETGID)) {
62245 + if (gr_check_group_change(-1, -1, gid))
62246 + goto error;
62247 +
62248 if (gid != old_fsgid) {
62249 new->fsgid = gid;
62250 goto change_okay;
62251 @@ -1454,7 +1488,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
62252 error = get_dumpable(me->mm);
62253 break;
62254 case PR_SET_DUMPABLE:
62255 - if (arg2 < 0 || arg2 > 1) {
62256 + if (arg2 > 1) {
62257 error = -EINVAL;
62258 break;
62259 }
62260 diff -urNp linux-2.6.32.42/kernel/sysctl.c linux-2.6.32.42/kernel/sysctl.c
62261 --- linux-2.6.32.42/kernel/sysctl.c 2011-03-27 14:31:47.000000000 -0400
62262 +++ linux-2.6.32.42/kernel/sysctl.c 2011-04-17 15:56:46.000000000 -0400
62263 @@ -63,6 +63,13 @@
62264 static int deprecated_sysctl_warning(struct __sysctl_args *args);
62265
62266 #if defined(CONFIG_SYSCTL)
62267 +#include <linux/grsecurity.h>
62268 +#include <linux/grinternal.h>
62269 +
62270 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
62271 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
62272 + const int op);
62273 +extern int gr_handle_chroot_sysctl(const int op);
62274
62275 /* External variables not in a header file. */
62276 extern int C_A_D;
62277 @@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_ta
62278 static int proc_taint(struct ctl_table *table, int write,
62279 void __user *buffer, size_t *lenp, loff_t *ppos);
62280 #endif
62281 +extern ctl_table grsecurity_table[];
62282
62283 static struct ctl_table root_table[];
62284 static struct ctl_table_root sysctl_table_root;
62285 @@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
62286 int sysctl_legacy_va_layout;
62287 #endif
62288
62289 +#ifdef CONFIG_PAX_SOFTMODE
62290 +static ctl_table pax_table[] = {
62291 + {
62292 + .ctl_name = CTL_UNNUMBERED,
62293 + .procname = "softmode",
62294 + .data = &pax_softmode,
62295 + .maxlen = sizeof(unsigned int),
62296 + .mode = 0600,
62297 + .proc_handler = &proc_dointvec,
62298 + },
62299 +
62300 + { .ctl_name = 0 }
62301 +};
62302 +#endif
62303 +
62304 extern int prove_locking;
62305 extern int lock_stat;
62306
62307 @@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = N
62308 #endif
62309
62310 static struct ctl_table kern_table[] = {
62311 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
62312 + {
62313 + .ctl_name = CTL_UNNUMBERED,
62314 + .procname = "grsecurity",
62315 + .mode = 0500,
62316 + .child = grsecurity_table,
62317 + },
62318 +#endif
62319 +
62320 +#ifdef CONFIG_PAX_SOFTMODE
62321 + {
62322 + .ctl_name = CTL_UNNUMBERED,
62323 + .procname = "pax",
62324 + .mode = 0500,
62325 + .child = pax_table,
62326 + },
62327 +#endif
62328 +
62329 {
62330 .ctl_name = CTL_UNNUMBERED,
62331 .procname = "sched_child_runs_first",
62332 @@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
62333 .data = &modprobe_path,
62334 .maxlen = KMOD_PATH_LEN,
62335 .mode = 0644,
62336 - .proc_handler = &proc_dostring,
62337 - .strategy = &sysctl_string,
62338 + .proc_handler = &proc_dostring_modpriv,
62339 + .strategy = &sysctl_string_modpriv,
62340 },
62341 {
62342 .ctl_name = CTL_UNNUMBERED,
62343 @@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
62344 .mode = 0644,
62345 .proc_handler = &proc_dointvec
62346 },
62347 + {
62348 + .procname = "heap_stack_gap",
62349 + .data = &sysctl_heap_stack_gap,
62350 + .maxlen = sizeof(sysctl_heap_stack_gap),
62351 + .mode = 0644,
62352 + .proc_handler = proc_doulongvec_minmax,
62353 + },
62354 #else
62355 {
62356 .ctl_name = CTL_UNNUMBERED,
62357 @@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
62358 return 0;
62359 }
62360
62361 +static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
62362 +
62363 static int parse_table(int __user *name, int nlen,
62364 void __user *oldval, size_t __user *oldlenp,
62365 void __user *newval, size_t newlen,
62366 @@ -1821,7 +1871,7 @@ repeat:
62367 if (n == table->ctl_name) {
62368 int error;
62369 if (table->child) {
62370 - if (sysctl_perm(root, table, MAY_EXEC))
62371 + if (sysctl_perm_nochk(root, table, MAY_EXEC))
62372 return -EPERM;
62373 name++;
62374 nlen--;
62375 @@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
62376 int error;
62377 int mode;
62378
62379 + if (table->parent != NULL && table->parent->procname != NULL &&
62380 + table->procname != NULL &&
62381 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
62382 + return -EACCES;
62383 + if (gr_handle_chroot_sysctl(op))
62384 + return -EACCES;
62385 + error = gr_handle_sysctl(table, op);
62386 + if (error)
62387 + return error;
62388 +
62389 + error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
62390 + if (error)
62391 + return error;
62392 +
62393 + if (root->permissions)
62394 + mode = root->permissions(root, current->nsproxy, table);
62395 + else
62396 + mode = table->mode;
62397 +
62398 + return test_perm(mode, op);
62399 +}
62400 +
62401 +int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
62402 +{
62403 + int error;
62404 + int mode;
62405 +
62406 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
62407 if (error)
62408 return error;
62409 @@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *tabl
62410 buffer, lenp, ppos);
62411 }
62412
62413 +int proc_dostring_modpriv(struct ctl_table *table, int write,
62414 + void __user *buffer, size_t *lenp, loff_t *ppos)
62415 +{
62416 + if (write && !capable(CAP_SYS_MODULE))
62417 + return -EPERM;
62418 +
62419 + return _proc_do_string(table->data, table->maxlen, write,
62420 + buffer, lenp, ppos);
62421 +}
62422 +
62423
62424 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
62425 int *valp,
62426 @@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(v
62427 vleft = table->maxlen / sizeof(unsigned long);
62428 left = *lenp;
62429
62430 - for (; left && vleft--; i++, min++, max++, first=0) {
62431 + for (; left && vleft--; i++, first=0) {
62432 if (write) {
62433 while (left) {
62434 char c;
62435 @@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *tabl
62436 return -ENOSYS;
62437 }
62438
62439 +int proc_dostring_modpriv(struct ctl_table *table, int write,
62440 + void __user *buffer, size_t *lenp, loff_t *ppos)
62441 +{
62442 + return -ENOSYS;
62443 +}
62444 +
62445 int proc_dointvec(struct ctl_table *table, int write,
62446 void __user *buffer, size_t *lenp, loff_t *ppos)
62447 {
62448 @@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *tabl
62449 return 1;
62450 }
62451
62452 +int sysctl_string_modpriv(struct ctl_table *table,
62453 + void __user *oldval, size_t __user *oldlenp,
62454 + void __user *newval, size_t newlen)
62455 +{
62456 + if (newval && newlen && !capable(CAP_SYS_MODULE))
62457 + return -EPERM;
62458 +
62459 + return sysctl_string(table, oldval, oldlenp, newval, newlen);
62460 +}
62461 +
62462 /*
62463 * This function makes sure that all of the integers in the vector
62464 * are between the minimum and maximum values given in the arrays
62465 @@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *tabl
62466 return -ENOSYS;
62467 }
62468
62469 +int sysctl_string_modpriv(struct ctl_table *table,
62470 + void __user *oldval, size_t __user *oldlenp,
62471 + void __user *newval, size_t newlen)
62472 +{
62473 + return -ENOSYS;
62474 +}
62475 +
62476 int sysctl_intvec(struct ctl_table *table,
62477 void __user *oldval, size_t __user *oldlenp,
62478 void __user *newval, size_t newlen)
62479 @@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
62480 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
62481 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
62482 EXPORT_SYMBOL(proc_dostring);
62483 +EXPORT_SYMBOL(proc_dostring_modpriv);
62484 EXPORT_SYMBOL(proc_doulongvec_minmax);
62485 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
62486 EXPORT_SYMBOL(register_sysctl_table);
62487 @@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
62488 EXPORT_SYMBOL(sysctl_jiffies);
62489 EXPORT_SYMBOL(sysctl_ms_jiffies);
62490 EXPORT_SYMBOL(sysctl_string);
62491 +EXPORT_SYMBOL(sysctl_string_modpriv);
62492 EXPORT_SYMBOL(sysctl_data);
62493 EXPORT_SYMBOL(unregister_sysctl_table);
62494 diff -urNp linux-2.6.32.42/kernel/sysctl_check.c linux-2.6.32.42/kernel/sysctl_check.c
62495 --- linux-2.6.32.42/kernel/sysctl_check.c 2011-03-27 14:31:47.000000000 -0400
62496 +++ linux-2.6.32.42/kernel/sysctl_check.c 2011-04-17 15:56:46.000000000 -0400
62497 @@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *n
62498 } else {
62499 if ((table->strategy == sysctl_data) ||
62500 (table->strategy == sysctl_string) ||
62501 + (table->strategy == sysctl_string_modpriv) ||
62502 (table->strategy == sysctl_intvec) ||
62503 (table->strategy == sysctl_jiffies) ||
62504 (table->strategy == sysctl_ms_jiffies) ||
62505 (table->proc_handler == proc_dostring) ||
62506 + (table->proc_handler == proc_dostring_modpriv) ||
62507 (table->proc_handler == proc_dointvec) ||
62508 (table->proc_handler == proc_dointvec_minmax) ||
62509 (table->proc_handler == proc_dointvec_jiffies) ||
62510 diff -urNp linux-2.6.32.42/kernel/taskstats.c linux-2.6.32.42/kernel/taskstats.c
62511 --- linux-2.6.32.42/kernel/taskstats.c 2011-03-27 14:31:47.000000000 -0400
62512 +++ linux-2.6.32.42/kernel/taskstats.c 2011-04-17 15:56:46.000000000 -0400
62513 @@ -26,9 +26,12 @@
62514 #include <linux/cgroup.h>
62515 #include <linux/fs.h>
62516 #include <linux/file.h>
62517 +#include <linux/grsecurity.h>
62518 #include <net/genetlink.h>
62519 #include <asm/atomic.h>
62520
62521 +extern int gr_is_taskstats_denied(int pid);
62522 +
62523 /*
62524 * Maximum length of a cpumask that can be specified in
62525 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
62526 @@ -433,6 +436,9 @@ static int taskstats_user_cmd(struct sk_
62527 size_t size;
62528 cpumask_var_t mask;
62529
62530 + if (gr_is_taskstats_denied(current->pid))
62531 + return -EACCES;
62532 +
62533 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
62534 return -ENOMEM;
62535
62536 diff -urNp linux-2.6.32.42/kernel/time/tick-broadcast.c linux-2.6.32.42/kernel/time/tick-broadcast.c
62537 --- linux-2.6.32.42/kernel/time/tick-broadcast.c 2011-05-23 16:56:59.000000000 -0400
62538 +++ linux-2.6.32.42/kernel/time/tick-broadcast.c 2011-05-23 16:57:13.000000000 -0400
62539 @@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
62540 * then clear the broadcast bit.
62541 */
62542 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
62543 - int cpu = smp_processor_id();
62544 + cpu = smp_processor_id();
62545
62546 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
62547 tick_broadcast_clear_oneshot(cpu);
62548 diff -urNp linux-2.6.32.42/kernel/time/timekeeping.c linux-2.6.32.42/kernel/time/timekeeping.c
62549 --- linux-2.6.32.42/kernel/time/timekeeping.c 2011-06-25 12:55:35.000000000 -0400
62550 +++ linux-2.6.32.42/kernel/time/timekeeping.c 2011-06-25 12:56:37.000000000 -0400
62551 @@ -14,6 +14,7 @@
62552 #include <linux/init.h>
62553 #include <linux/mm.h>
62554 #include <linux/sched.h>
62555 +#include <linux/grsecurity.h>
62556 #include <linux/sysdev.h>
62557 #include <linux/clocksource.h>
62558 #include <linux/jiffies.h>
62559 @@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
62560 */
62561 struct timespec ts = xtime;
62562 timespec_add_ns(&ts, nsec);
62563 - ACCESS_ONCE(xtime_cache) = ts;
62564 + ACCESS_ONCE_RW(xtime_cache) = ts;
62565 }
62566
62567 /* must hold xtime_lock */
62568 @@ -333,6 +334,8 @@ int do_settimeofday(struct timespec *tv)
62569 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
62570 return -EINVAL;
62571
62572 + gr_log_timechange();
62573 +
62574 write_seqlock_irqsave(&xtime_lock, flags);
62575
62576 timekeeping_forward_now();
62577 diff -urNp linux-2.6.32.42/kernel/time/timer_list.c linux-2.6.32.42/kernel/time/timer_list.c
62578 --- linux-2.6.32.42/kernel/time/timer_list.c 2011-03-27 14:31:47.000000000 -0400
62579 +++ linux-2.6.32.42/kernel/time/timer_list.c 2011-04-17 15:56:46.000000000 -0400
62580 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
62581
62582 static void print_name_offset(struct seq_file *m, void *sym)
62583 {
62584 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62585 + SEQ_printf(m, "<%p>", NULL);
62586 +#else
62587 char symname[KSYM_NAME_LEN];
62588
62589 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
62590 SEQ_printf(m, "<%p>", sym);
62591 else
62592 SEQ_printf(m, "%s", symname);
62593 +#endif
62594 }
62595
62596 static void
62597 @@ -112,7 +116,11 @@ next_one:
62598 static void
62599 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
62600 {
62601 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62602 + SEQ_printf(m, " .base: %p\n", NULL);
62603 +#else
62604 SEQ_printf(m, " .base: %p\n", base);
62605 +#endif
62606 SEQ_printf(m, " .index: %d\n",
62607 base->index);
62608 SEQ_printf(m, " .resolution: %Lu nsecs\n",
62609 @@ -289,7 +297,11 @@ static int __init init_timer_list_procfs
62610 {
62611 struct proc_dir_entry *pe;
62612
62613 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
62614 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
62615 +#else
62616 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
62617 +#endif
62618 if (!pe)
62619 return -ENOMEM;
62620 return 0;
62621 diff -urNp linux-2.6.32.42/kernel/time/timer_stats.c linux-2.6.32.42/kernel/time/timer_stats.c
62622 --- linux-2.6.32.42/kernel/time/timer_stats.c 2011-03-27 14:31:47.000000000 -0400
62623 +++ linux-2.6.32.42/kernel/time/timer_stats.c 2011-05-04 17:56:28.000000000 -0400
62624 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
62625 static unsigned long nr_entries;
62626 static struct entry entries[MAX_ENTRIES];
62627
62628 -static atomic_t overflow_count;
62629 +static atomic_unchecked_t overflow_count;
62630
62631 /*
62632 * The entries are in a hash-table, for fast lookup:
62633 @@ -140,7 +140,7 @@ static void reset_entries(void)
62634 nr_entries = 0;
62635 memset(entries, 0, sizeof(entries));
62636 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
62637 - atomic_set(&overflow_count, 0);
62638 + atomic_set_unchecked(&overflow_count, 0);
62639 }
62640
62641 static struct entry *alloc_entry(void)
62642 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
62643 if (likely(entry))
62644 entry->count++;
62645 else
62646 - atomic_inc(&overflow_count);
62647 + atomic_inc_unchecked(&overflow_count);
62648
62649 out_unlock:
62650 spin_unlock_irqrestore(lock, flags);
62651 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
62652
62653 static void print_name_offset(struct seq_file *m, unsigned long addr)
62654 {
62655 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62656 + seq_printf(m, "<%p>", NULL);
62657 +#else
62658 char symname[KSYM_NAME_LEN];
62659
62660 if (lookup_symbol_name(addr, symname) < 0)
62661 seq_printf(m, "<%p>", (void *)addr);
62662 else
62663 seq_printf(m, "%s", symname);
62664 +#endif
62665 }
62666
62667 static int tstats_show(struct seq_file *m, void *v)
62668 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
62669
62670 seq_puts(m, "Timer Stats Version: v0.2\n");
62671 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
62672 - if (atomic_read(&overflow_count))
62673 + if (atomic_read_unchecked(&overflow_count))
62674 seq_printf(m, "Overflow: %d entries\n",
62675 - atomic_read(&overflow_count));
62676 + atomic_read_unchecked(&overflow_count));
62677
62678 for (i = 0; i < nr_entries; i++) {
62679 entry = entries + i;
62680 @@ -415,7 +419,11 @@ static int __init init_tstats_procfs(voi
62681 {
62682 struct proc_dir_entry *pe;
62683
62684 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
62685 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
62686 +#else
62687 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
62688 +#endif
62689 if (!pe)
62690 return -ENOMEM;
62691 return 0;
62692 diff -urNp linux-2.6.32.42/kernel/time.c linux-2.6.32.42/kernel/time.c
62693 --- linux-2.6.32.42/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
62694 +++ linux-2.6.32.42/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
62695 @@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec
62696 return error;
62697
62698 if (tz) {
62699 + /* we log in do_settimeofday called below, so don't log twice
62700 + */
62701 + if (!tv)
62702 + gr_log_timechange();
62703 +
62704 /* SMP safe, global irq locking makes it work. */
62705 sys_tz = *tz;
62706 update_vsyscall_tz();
62707 @@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
62708 * Avoid unnecessary multiplications/divisions in the
62709 * two most common HZ cases:
62710 */
62711 -unsigned int inline jiffies_to_msecs(const unsigned long j)
62712 +inline unsigned int jiffies_to_msecs(const unsigned long j)
62713 {
62714 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
62715 return (MSEC_PER_SEC / HZ) * j;
62716 @@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(con
62717 }
62718 EXPORT_SYMBOL(jiffies_to_msecs);
62719
62720 -unsigned int inline jiffies_to_usecs(const unsigned long j)
62721 +inline unsigned int jiffies_to_usecs(const unsigned long j)
62722 {
62723 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
62724 return (USEC_PER_SEC / HZ) * j;
62725 diff -urNp linux-2.6.32.42/kernel/timer.c linux-2.6.32.42/kernel/timer.c
62726 --- linux-2.6.32.42/kernel/timer.c 2011-03-27 14:31:47.000000000 -0400
62727 +++ linux-2.6.32.42/kernel/timer.c 2011-04-17 15:56:46.000000000 -0400
62728 @@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
62729 /*
62730 * This function runs timers and the timer-tq in bottom half context.
62731 */
62732 -static void run_timer_softirq(struct softirq_action *h)
62733 +static void run_timer_softirq(void)
62734 {
62735 struct tvec_base *base = __get_cpu_var(tvec_bases);
62736
62737 diff -urNp linux-2.6.32.42/kernel/trace/blktrace.c linux-2.6.32.42/kernel/trace/blktrace.c
62738 --- linux-2.6.32.42/kernel/trace/blktrace.c 2011-03-27 14:31:47.000000000 -0400
62739 +++ linux-2.6.32.42/kernel/trace/blktrace.c 2011-05-04 17:56:28.000000000 -0400
62740 @@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct f
62741 struct blk_trace *bt = filp->private_data;
62742 char buf[16];
62743
62744 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
62745 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
62746
62747 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
62748 }
62749 @@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(str
62750 return 1;
62751
62752 bt = buf->chan->private_data;
62753 - atomic_inc(&bt->dropped);
62754 + atomic_inc_unchecked(&bt->dropped);
62755 return 0;
62756 }
62757
62758 @@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_qu
62759
62760 bt->dir = dir;
62761 bt->dev = dev;
62762 - atomic_set(&bt->dropped, 0);
62763 + atomic_set_unchecked(&bt->dropped, 0);
62764
62765 ret = -EIO;
62766 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
62767 diff -urNp linux-2.6.32.42/kernel/trace/ftrace.c linux-2.6.32.42/kernel/trace/ftrace.c
62768 --- linux-2.6.32.42/kernel/trace/ftrace.c 2011-06-25 12:55:35.000000000 -0400
62769 +++ linux-2.6.32.42/kernel/trace/ftrace.c 2011-06-25 12:56:37.000000000 -0400
62770 @@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod,
62771
62772 ip = rec->ip;
62773
62774 + ret = ftrace_arch_code_modify_prepare();
62775 + FTRACE_WARN_ON(ret);
62776 + if (ret)
62777 + return 0;
62778 +
62779 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
62780 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
62781 if (ret) {
62782 ftrace_bug(ret, ip);
62783 rec->flags |= FTRACE_FL_FAILED;
62784 - return 0;
62785 }
62786 - return 1;
62787 + return ret ? 0 : 1;
62788 }
62789
62790 /*
62791 diff -urNp linux-2.6.32.42/kernel/trace/ring_buffer.c linux-2.6.32.42/kernel/trace/ring_buffer.c
62792 --- linux-2.6.32.42/kernel/trace/ring_buffer.c 2011-03-27 14:31:47.000000000 -0400
62793 +++ linux-2.6.32.42/kernel/trace/ring_buffer.c 2011-04-17 15:56:46.000000000 -0400
62794 @@ -606,7 +606,7 @@ static struct list_head *rb_list_head(st
62795 * the reader page). But if the next page is a header page,
62796 * its flags will be non zero.
62797 */
62798 -static int inline
62799 +static inline int
62800 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
62801 struct buffer_page *page, struct list_head *list)
62802 {
62803 diff -urNp linux-2.6.32.42/kernel/trace/trace.c linux-2.6.32.42/kernel/trace/trace.c
62804 --- linux-2.6.32.42/kernel/trace/trace.c 2011-03-27 14:31:47.000000000 -0400
62805 +++ linux-2.6.32.42/kernel/trace/trace.c 2011-05-16 21:46:57.000000000 -0400
62806 @@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(
62807 size_t rem;
62808 unsigned int i;
62809
62810 + pax_track_stack();
62811 +
62812 /* copy the tracer to avoid using a global lock all around */
62813 mutex_lock(&trace_types_lock);
62814 if (unlikely(old_tracer != current_trace && current_trace)) {
62815 @@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file
62816 int entries, size, i;
62817 size_t ret;
62818
62819 + pax_track_stack();
62820 +
62821 if (*ppos & (PAGE_SIZE - 1)) {
62822 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
62823 return -EINVAL;
62824 @@ -3816,10 +3820,9 @@ static const struct file_operations trac
62825 };
62826 #endif
62827
62828 -static struct dentry *d_tracer;
62829 -
62830 struct dentry *tracing_init_dentry(void)
62831 {
62832 + static struct dentry *d_tracer;
62833 static int once;
62834
62835 if (d_tracer)
62836 @@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
62837 return d_tracer;
62838 }
62839
62840 -static struct dentry *d_percpu;
62841 -
62842 struct dentry *tracing_dentry_percpu(void)
62843 {
62844 + static struct dentry *d_percpu;
62845 static int once;
62846 struct dentry *d_tracer;
62847
62848 diff -urNp linux-2.6.32.42/kernel/trace/trace_events.c linux-2.6.32.42/kernel/trace/trace_events.c
62849 --- linux-2.6.32.42/kernel/trace/trace_events.c 2011-03-27 14:31:47.000000000 -0400
62850 +++ linux-2.6.32.42/kernel/trace/trace_events.c 2011-04-17 15:56:46.000000000 -0400
62851 @@ -951,6 +951,8 @@ static LIST_HEAD(ftrace_module_file_list
62852 * Modules must own their file_operations to keep up with
62853 * reference counting.
62854 */
62855 +
62856 +/* cannot be const */
62857 struct ftrace_module_file_ops {
62858 struct list_head list;
62859 struct module *mod;
62860 diff -urNp linux-2.6.32.42/kernel/trace/trace_mmiotrace.c linux-2.6.32.42/kernel/trace/trace_mmiotrace.c
62861 --- linux-2.6.32.42/kernel/trace/trace_mmiotrace.c 2011-03-27 14:31:47.000000000 -0400
62862 +++ linux-2.6.32.42/kernel/trace/trace_mmiotrace.c 2011-05-04 17:56:28.000000000 -0400
62863 @@ -23,7 +23,7 @@ struct header_iter {
62864 static struct trace_array *mmio_trace_array;
62865 static bool overrun_detected;
62866 static unsigned long prev_overruns;
62867 -static atomic_t dropped_count;
62868 +static atomic_unchecked_t dropped_count;
62869
62870 static void mmio_reset_data(struct trace_array *tr)
62871 {
62872 @@ -126,7 +126,7 @@ static void mmio_close(struct trace_iter
62873
62874 static unsigned long count_overruns(struct trace_iterator *iter)
62875 {
62876 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
62877 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
62878 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
62879
62880 if (over > prev_overruns)
62881 @@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct
62882 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
62883 sizeof(*entry), 0, pc);
62884 if (!event) {
62885 - atomic_inc(&dropped_count);
62886 + atomic_inc_unchecked(&dropped_count);
62887 return;
62888 }
62889 entry = ring_buffer_event_data(event);
62890 @@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct
62891 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
62892 sizeof(*entry), 0, pc);
62893 if (!event) {
62894 - atomic_inc(&dropped_count);
62895 + atomic_inc_unchecked(&dropped_count);
62896 return;
62897 }
62898 entry = ring_buffer_event_data(event);
62899 diff -urNp linux-2.6.32.42/kernel/trace/trace_output.c linux-2.6.32.42/kernel/trace/trace_output.c
62900 --- linux-2.6.32.42/kernel/trace/trace_output.c 2011-03-27 14:31:47.000000000 -0400
62901 +++ linux-2.6.32.42/kernel/trace/trace_output.c 2011-04-17 15:56:46.000000000 -0400
62902 @@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s,
62903 return 0;
62904 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
62905 if (!IS_ERR(p)) {
62906 - p = mangle_path(s->buffer + s->len, p, "\n");
62907 + p = mangle_path(s->buffer + s->len, p, "\n\\");
62908 if (p) {
62909 s->len = p - s->buffer;
62910 return 1;
62911 diff -urNp linux-2.6.32.42/kernel/trace/trace_stack.c linux-2.6.32.42/kernel/trace/trace_stack.c
62912 --- linux-2.6.32.42/kernel/trace/trace_stack.c 2011-03-27 14:31:47.000000000 -0400
62913 +++ linux-2.6.32.42/kernel/trace/trace_stack.c 2011-04-17 15:56:46.000000000 -0400
62914 @@ -50,7 +50,7 @@ static inline void check_stack(void)
62915 return;
62916
62917 /* we do not handle interrupt stacks yet */
62918 - if (!object_is_on_stack(&this_size))
62919 + if (!object_starts_on_stack(&this_size))
62920 return;
62921
62922 local_irq_save(flags);
62923 diff -urNp linux-2.6.32.42/kernel/trace/trace_workqueue.c linux-2.6.32.42/kernel/trace/trace_workqueue.c
62924 --- linux-2.6.32.42/kernel/trace/trace_workqueue.c 2011-03-27 14:31:47.000000000 -0400
62925 +++ linux-2.6.32.42/kernel/trace/trace_workqueue.c 2011-04-17 15:56:46.000000000 -0400
62926 @@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
62927 int cpu;
62928 pid_t pid;
62929 /* Can be inserted from interrupt or user context, need to be atomic */
62930 - atomic_t inserted;
62931 + atomic_unchecked_t inserted;
62932 /*
62933 * Don't need to be atomic, works are serialized in a single workqueue thread
62934 * on a single CPU.
62935 @@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_st
62936 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
62937 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
62938 if (node->pid == wq_thread->pid) {
62939 - atomic_inc(&node->inserted);
62940 + atomic_inc_unchecked(&node->inserted);
62941 goto found;
62942 }
62943 }
62944 @@ -205,7 +205,7 @@ static int workqueue_stat_show(struct se
62945 tsk = get_pid_task(pid, PIDTYPE_PID);
62946 if (tsk) {
62947 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
62948 - atomic_read(&cws->inserted), cws->executed,
62949 + atomic_read_unchecked(&cws->inserted), cws->executed,
62950 tsk->comm);
62951 put_task_struct(tsk);
62952 }
62953 diff -urNp linux-2.6.32.42/kernel/user.c linux-2.6.32.42/kernel/user.c
62954 --- linux-2.6.32.42/kernel/user.c 2011-03-27 14:31:47.000000000 -0400
62955 +++ linux-2.6.32.42/kernel/user.c 2011-04-17 15:56:46.000000000 -0400
62956 @@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct use
62957 spin_lock_irq(&uidhash_lock);
62958 up = uid_hash_find(uid, hashent);
62959 if (up) {
62960 + put_user_ns(ns);
62961 key_put(new->uid_keyring);
62962 key_put(new->session_keyring);
62963 kmem_cache_free(uid_cachep, new);
62964 diff -urNp linux-2.6.32.42/lib/bug.c linux-2.6.32.42/lib/bug.c
62965 --- linux-2.6.32.42/lib/bug.c 2011-03-27 14:31:47.000000000 -0400
62966 +++ linux-2.6.32.42/lib/bug.c 2011-04-17 15:56:46.000000000 -0400
62967 @@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
62968 return BUG_TRAP_TYPE_NONE;
62969
62970 bug = find_bug(bugaddr);
62971 + if (!bug)
62972 + return BUG_TRAP_TYPE_NONE;
62973
62974 printk(KERN_EMERG "------------[ cut here ]------------\n");
62975
62976 diff -urNp linux-2.6.32.42/lib/debugobjects.c linux-2.6.32.42/lib/debugobjects.c
62977 --- linux-2.6.32.42/lib/debugobjects.c 2011-03-27 14:31:47.000000000 -0400
62978 +++ linux-2.6.32.42/lib/debugobjects.c 2011-04-17 15:56:46.000000000 -0400
62979 @@ -277,7 +277,7 @@ static void debug_object_is_on_stack(voi
62980 if (limit > 4)
62981 return;
62982
62983 - is_on_stack = object_is_on_stack(addr);
62984 + is_on_stack = object_starts_on_stack(addr);
62985 if (is_on_stack == onstack)
62986 return;
62987
62988 diff -urNp linux-2.6.32.42/lib/dma-debug.c linux-2.6.32.42/lib/dma-debug.c
62989 --- linux-2.6.32.42/lib/dma-debug.c 2011-03-27 14:31:47.000000000 -0400
62990 +++ linux-2.6.32.42/lib/dma-debug.c 2011-04-17 15:56:46.000000000 -0400
62991 @@ -861,7 +861,7 @@ out:
62992
62993 static void check_for_stack(struct device *dev, void *addr)
62994 {
62995 - if (object_is_on_stack(addr))
62996 + if (object_starts_on_stack(addr))
62997 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
62998 "stack [addr=%p]\n", addr);
62999 }
63000 diff -urNp linux-2.6.32.42/lib/idr.c linux-2.6.32.42/lib/idr.c
63001 --- linux-2.6.32.42/lib/idr.c 2011-03-27 14:31:47.000000000 -0400
63002 +++ linux-2.6.32.42/lib/idr.c 2011-04-17 15:56:46.000000000 -0400
63003 @@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, in
63004 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
63005
63006 /* if already at the top layer, we need to grow */
63007 - if (id >= 1 << (idp->layers * IDR_BITS)) {
63008 + if (id >= (1 << (idp->layers * IDR_BITS))) {
63009 *starting_id = id;
63010 return IDR_NEED_TO_GROW;
63011 }
63012 diff -urNp linux-2.6.32.42/lib/inflate.c linux-2.6.32.42/lib/inflate.c
63013 --- linux-2.6.32.42/lib/inflate.c 2011-03-27 14:31:47.000000000 -0400
63014 +++ linux-2.6.32.42/lib/inflate.c 2011-04-17 15:56:46.000000000 -0400
63015 @@ -266,7 +266,7 @@ static void free(void *where)
63016 malloc_ptr = free_mem_ptr;
63017 }
63018 #else
63019 -#define malloc(a) kmalloc(a, GFP_KERNEL)
63020 +#define malloc(a) kmalloc((a), GFP_KERNEL)
63021 #define free(a) kfree(a)
63022 #endif
63023
63024 diff -urNp linux-2.6.32.42/lib/Kconfig.debug linux-2.6.32.42/lib/Kconfig.debug
63025 --- linux-2.6.32.42/lib/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
63026 +++ linux-2.6.32.42/lib/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
63027 @@ -905,7 +905,7 @@ config LATENCYTOP
63028 select STACKTRACE
63029 select SCHEDSTATS
63030 select SCHED_DEBUG
63031 - depends on HAVE_LATENCYTOP_SUPPORT
63032 + depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
63033 help
63034 Enable this option if you want to use the LatencyTOP tool
63035 to find out which userspace is blocking on what kernel operations.
63036 diff -urNp linux-2.6.32.42/lib/kobject.c linux-2.6.32.42/lib/kobject.c
63037 --- linux-2.6.32.42/lib/kobject.c 2011-03-27 14:31:47.000000000 -0400
63038 +++ linux-2.6.32.42/lib/kobject.c 2011-04-17 15:56:46.000000000 -0400
63039 @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct ko
63040 return ret;
63041 }
63042
63043 -struct sysfs_ops kobj_sysfs_ops = {
63044 +const struct sysfs_ops kobj_sysfs_ops = {
63045 .show = kobj_attr_show,
63046 .store = kobj_attr_store,
63047 };
63048 @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
63049 * If the kset was not able to be created, NULL will be returned.
63050 */
63051 static struct kset *kset_create(const char *name,
63052 - struct kset_uevent_ops *uevent_ops,
63053 + const struct kset_uevent_ops *uevent_ops,
63054 struct kobject *parent_kobj)
63055 {
63056 struct kset *kset;
63057 @@ -832,7 +832,7 @@ static struct kset *kset_create(const ch
63058 * If the kset was not able to be created, NULL will be returned.
63059 */
63060 struct kset *kset_create_and_add(const char *name,
63061 - struct kset_uevent_ops *uevent_ops,
63062 + const struct kset_uevent_ops *uevent_ops,
63063 struct kobject *parent_kobj)
63064 {
63065 struct kset *kset;
63066 diff -urNp linux-2.6.32.42/lib/kobject_uevent.c linux-2.6.32.42/lib/kobject_uevent.c
63067 --- linux-2.6.32.42/lib/kobject_uevent.c 2011-03-27 14:31:47.000000000 -0400
63068 +++ linux-2.6.32.42/lib/kobject_uevent.c 2011-04-17 15:56:46.000000000 -0400
63069 @@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *k
63070 const char *subsystem;
63071 struct kobject *top_kobj;
63072 struct kset *kset;
63073 - struct kset_uevent_ops *uevent_ops;
63074 + const struct kset_uevent_ops *uevent_ops;
63075 u64 seq;
63076 int i = 0;
63077 int retval = 0;
63078 diff -urNp linux-2.6.32.42/lib/kref.c linux-2.6.32.42/lib/kref.c
63079 --- linux-2.6.32.42/lib/kref.c 2011-03-27 14:31:47.000000000 -0400
63080 +++ linux-2.6.32.42/lib/kref.c 2011-04-17 15:56:46.000000000 -0400
63081 @@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
63082 */
63083 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
63084 {
63085 - WARN_ON(release == NULL);
63086 + BUG_ON(release == NULL);
63087 WARN_ON(release == (void (*)(struct kref *))kfree);
63088
63089 if (atomic_dec_and_test(&kref->refcount)) {
63090 diff -urNp linux-2.6.32.42/lib/parser.c linux-2.6.32.42/lib/parser.c
63091 --- linux-2.6.32.42/lib/parser.c 2011-03-27 14:31:47.000000000 -0400
63092 +++ linux-2.6.32.42/lib/parser.c 2011-04-17 15:56:46.000000000 -0400
63093 @@ -126,7 +126,7 @@ static int match_number(substring_t *s,
63094 char *buf;
63095 int ret;
63096
63097 - buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
63098 + buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
63099 if (!buf)
63100 return -ENOMEM;
63101 memcpy(buf, s->from, s->to - s->from);
63102 diff -urNp linux-2.6.32.42/lib/radix-tree.c linux-2.6.32.42/lib/radix-tree.c
63103 --- linux-2.6.32.42/lib/radix-tree.c 2011-03-27 14:31:47.000000000 -0400
63104 +++ linux-2.6.32.42/lib/radix-tree.c 2011-04-17 15:56:46.000000000 -0400
63105 @@ -81,7 +81,7 @@ struct radix_tree_preload {
63106 int nr;
63107 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
63108 };
63109 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
63110 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
63111
63112 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
63113 {
63114 diff -urNp linux-2.6.32.42/lib/random32.c linux-2.6.32.42/lib/random32.c
63115 --- linux-2.6.32.42/lib/random32.c 2011-03-27 14:31:47.000000000 -0400
63116 +++ linux-2.6.32.42/lib/random32.c 2011-04-17 15:56:46.000000000 -0400
63117 @@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
63118 */
63119 static inline u32 __seed(u32 x, u32 m)
63120 {
63121 - return (x < m) ? x + m : x;
63122 + return (x <= m) ? x + m + 1 : x;
63123 }
63124
63125 /**
63126 diff -urNp linux-2.6.32.42/lib/vsprintf.c linux-2.6.32.42/lib/vsprintf.c
63127 --- linux-2.6.32.42/lib/vsprintf.c 2011-03-27 14:31:47.000000000 -0400
63128 +++ linux-2.6.32.42/lib/vsprintf.c 2011-04-17 15:56:46.000000000 -0400
63129 @@ -16,6 +16,9 @@
63130 * - scnprintf and vscnprintf
63131 */
63132
63133 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63134 +#define __INCLUDED_BY_HIDESYM 1
63135 +#endif
63136 #include <stdarg.h>
63137 #include <linux/module.h>
63138 #include <linux/types.h>
63139 @@ -546,12 +549,12 @@ static char *number(char *buf, char *end
63140 return buf;
63141 }
63142
63143 -static char *string(char *buf, char *end, char *s, struct printf_spec spec)
63144 +static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
63145 {
63146 int len, i;
63147
63148 if ((unsigned long)s < PAGE_SIZE)
63149 - s = "<NULL>";
63150 + s = "(null)";
63151
63152 len = strnlen(s, spec.precision);
63153
63154 @@ -581,7 +584,7 @@ static char *symbol_string(char *buf, ch
63155 unsigned long value = (unsigned long) ptr;
63156 #ifdef CONFIG_KALLSYMS
63157 char sym[KSYM_SYMBOL_LEN];
63158 - if (ext != 'f' && ext != 's')
63159 + if (ext != 'f' && ext != 's' && ext != 'a')
63160 sprint_symbol(sym, value);
63161 else
63162 kallsyms_lookup(value, NULL, NULL, NULL, sym);
63163 @@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf,
63164 * - 'f' For simple symbolic function names without offset
63165 * - 'S' For symbolic direct pointers with offset
63166 * - 's' For symbolic direct pointers without offset
63167 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
63168 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
63169 * - 'R' For a struct resource pointer, it prints the range of
63170 * addresses (not the name nor the flags)
63171 * - 'M' For a 6-byte MAC address, it prints the address in the
63172 @@ -822,7 +827,7 @@ static char *pointer(const char *fmt, ch
63173 struct printf_spec spec)
63174 {
63175 if (!ptr)
63176 - return string(buf, end, "(null)", spec);
63177 + return string(buf, end, "(nil)", spec);
63178
63179 switch (*fmt) {
63180 case 'F':
63181 @@ -831,6 +836,14 @@ static char *pointer(const char *fmt, ch
63182 case 's':
63183 /* Fallthrough */
63184 case 'S':
63185 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63186 + break;
63187 +#else
63188 + return symbol_string(buf, end, ptr, spec, *fmt);
63189 +#endif
63190 + case 'a':
63191 + /* Fallthrough */
63192 + case 'A':
63193 return symbol_string(buf, end, ptr, spec, *fmt);
63194 case 'R':
63195 return resource_string(buf, end, ptr, spec);
63196 @@ -1445,7 +1458,7 @@ do { \
63197 size_t len;
63198 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
63199 || (unsigned long)save_str < PAGE_SIZE)
63200 - save_str = "<NULL>";
63201 + save_str = "(null)";
63202 len = strlen(save_str);
63203 if (str + len + 1 < end)
63204 memcpy(str, save_str, len + 1);
63205 @@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size,
63206 typeof(type) value; \
63207 if (sizeof(type) == 8) { \
63208 args = PTR_ALIGN(args, sizeof(u32)); \
63209 - *(u32 *)&value = *(u32 *)args; \
63210 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
63211 + *(u32 *)&value = *(const u32 *)args; \
63212 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
63213 } else { \
63214 args = PTR_ALIGN(args, sizeof(type)); \
63215 - value = *(typeof(type) *)args; \
63216 + value = *(const typeof(type) *)args; \
63217 } \
63218 args += sizeof(type); \
63219 value; \
63220 @@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size,
63221 const char *str_arg = args;
63222 size_t len = strlen(str_arg);
63223 args += len + 1;
63224 - str = string(str, end, (char *)str_arg, spec);
63225 + str = string(str, end, str_arg, spec);
63226 break;
63227 }
63228
63229 diff -urNp linux-2.6.32.42/localversion-grsec linux-2.6.32.42/localversion-grsec
63230 --- linux-2.6.32.42/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
63231 +++ linux-2.6.32.42/localversion-grsec 2011-04-17 15:56:46.000000000 -0400
63232 @@ -0,0 +1 @@
63233 +-grsec
63234 diff -urNp linux-2.6.32.42/Makefile linux-2.6.32.42/Makefile
63235 --- linux-2.6.32.42/Makefile 2011-06-25 12:55:34.000000000 -0400
63236 +++ linux-2.6.32.42/Makefile 2011-06-25 12:56:37.000000000 -0400
63237 @@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
63238
63239 HOSTCC = gcc
63240 HOSTCXX = g++
63241 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
63242 -HOSTCXXFLAGS = -O2
63243 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
63244 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
63245 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
63246
63247 # Decide whether to build built-in, modular, or both.
63248 # Normally, just do built-in.
63249 @@ -342,10 +343,12 @@ LINUXINCLUDE := -Iinclude \
63250 KBUILD_CPPFLAGS := -D__KERNEL__
63251
63252 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
63253 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
63254 -fno-strict-aliasing -fno-common \
63255 -Werror-implicit-function-declaration \
63256 -Wno-format-security \
63257 -fno-delete-null-pointer-checks
63258 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
63259 KBUILD_AFLAGS := -D__ASSEMBLY__
63260
63261 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
63262 @@ -403,7 +406,7 @@ endif
63263 # of make so .config is not included in this case either (for *config).
63264
63265 no-dot-config-targets := clean mrproper distclean \
63266 - cscope TAGS tags help %docs check% \
63267 + cscope gtags TAGS tags help %docs check% \
63268 include/linux/version.h headers_% \
63269 kernelrelease kernelversion
63270
63271 @@ -644,7 +647,7 @@ export mod_strip_cmd
63272
63273
63274 ifeq ($(KBUILD_EXTMOD),)
63275 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
63276 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
63277
63278 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
63279 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
63280 @@ -949,7 +952,19 @@ include/config/kernel.release: include/c
63281 # version.h and scripts_basic is processed / created.
63282
63283 # Listed in dependency order
63284 -PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
63285 +PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3 pax-plugin
63286 +
63287 +ifeq ($(CONFIG_PAX_MEMORY_STACKLEAK),y)
63288 +KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0405, -fplugin=$(objtree)/tools/gcc/pax_plugin.so -fplugin-arg-pax_plugin-track-lowest-sp=100)
63289 +endif
63290 +pax-plugin:
63291 +ifneq (,$(findstring pax_plugin, $(KBUILD_CFLAGS)))
63292 + $(Q)$(MAKE) $(build)=tools/gcc
63293 +else
63294 +ifeq ($(CONFIG_PAX_MEMORY_STACKLEAK),y)
63295 + $(Q)echo "warning, your gcc does not support plugins, PAX_MEMORY_STACKLEAK will be less secure"
63296 +endif
63297 +endif
63298
63299 # prepare3 is used to check if we are building in a separate output directory,
63300 # and if so do:
63301 @@ -970,7 +985,7 @@ ifneq ($(KBUILD_SRC),)
63302 endif
63303
63304 # prepare2 creates a makefile if using a separate output directory
63305 -prepare2: prepare3 outputmakefile
63306 +prepare2: prepare3 outputmakefile pax-plugin
63307
63308 prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \
63309 include/asm include/config/auto.conf
63310 @@ -1198,7 +1213,7 @@ MRPROPER_FILES += .config .config.old in
63311 include/linux/autoconf.h include/linux/version.h \
63312 include/linux/utsrelease.h \
63313 include/linux/bounds.h include/asm*/asm-offsets.h \
63314 - Module.symvers Module.markers tags TAGS cscope*
63315 + Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
63316
63317 # clean - Delete most, but leave enough to build external modules
63318 #
63319 @@ -1289,6 +1304,7 @@ help:
63320 @echo ' modules_prepare - Set up for building external modules'
63321 @echo ' tags/TAGS - Generate tags file for editors'
63322 @echo ' cscope - Generate cscope index'
63323 + @echo ' gtags - Generate GNU GLOBAL index'
63324 @echo ' kernelrelease - Output the release version string'
63325 @echo ' kernelversion - Output the version stored in Makefile'
63326 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
63327 @@ -1445,7 +1461,7 @@ endif # KBUILD_EXTMOD
63328 quiet_cmd_tags = GEN $@
63329 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
63330
63331 -tags TAGS cscope: FORCE
63332 +tags TAGS cscope gtags: FORCE
63333 $(call cmd,tags)
63334
63335 # Scripts to check various things for consistency
63336 diff -urNp linux-2.6.32.42/mm/backing-dev.c linux-2.6.32.42/mm/backing-dev.c
63337 --- linux-2.6.32.42/mm/backing-dev.c 2011-03-27 14:31:47.000000000 -0400
63338 +++ linux-2.6.32.42/mm/backing-dev.c 2011-05-04 17:56:28.000000000 -0400
63339 @@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rc
63340 * Add the default flusher task that gets created for any bdi
63341 * that has dirty data pending writeout
63342 */
63343 -void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
63344 +static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
63345 {
63346 if (!bdi_cap_writeback_dirty(bdi))
63347 return;
63348 diff -urNp linux-2.6.32.42/mm/filemap.c linux-2.6.32.42/mm/filemap.c
63349 --- linux-2.6.32.42/mm/filemap.c 2011-03-27 14:31:47.000000000 -0400
63350 +++ linux-2.6.32.42/mm/filemap.c 2011-04-17 15:56:46.000000000 -0400
63351 @@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file
63352 struct address_space *mapping = file->f_mapping;
63353
63354 if (!mapping->a_ops->readpage)
63355 - return -ENOEXEC;
63356 + return -ENODEV;
63357 file_accessed(file);
63358 vma->vm_ops = &generic_file_vm_ops;
63359 vma->vm_flags |= VM_CAN_NONLINEAR;
63360 @@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct f
63361 *pos = i_size_read(inode);
63362
63363 if (limit != RLIM_INFINITY) {
63364 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
63365 if (*pos >= limit) {
63366 send_sig(SIGXFSZ, current, 0);
63367 return -EFBIG;
63368 diff -urNp linux-2.6.32.42/mm/fremap.c linux-2.6.32.42/mm/fremap.c
63369 --- linux-2.6.32.42/mm/fremap.c 2011-03-27 14:31:47.000000000 -0400
63370 +++ linux-2.6.32.42/mm/fremap.c 2011-04-17 15:56:46.000000000 -0400
63371 @@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
63372 retry:
63373 vma = find_vma(mm, start);
63374
63375 +#ifdef CONFIG_PAX_SEGMEXEC
63376 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
63377 + goto out;
63378 +#endif
63379 +
63380 /*
63381 * Make sure the vma is shared, that it supports prefaulting,
63382 * and that the remapped range is valid and fully within
63383 @@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
63384 /*
63385 * drop PG_Mlocked flag for over-mapped range
63386 */
63387 - unsigned int saved_flags = vma->vm_flags;
63388 + unsigned long saved_flags = vma->vm_flags;
63389 munlock_vma_pages_range(vma, start, start + size);
63390 vma->vm_flags = saved_flags;
63391 }
63392 diff -urNp linux-2.6.32.42/mm/highmem.c linux-2.6.32.42/mm/highmem.c
63393 --- linux-2.6.32.42/mm/highmem.c 2011-03-27 14:31:47.000000000 -0400
63394 +++ linux-2.6.32.42/mm/highmem.c 2011-04-17 15:56:46.000000000 -0400
63395 @@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
63396 * So no dangers, even with speculative execution.
63397 */
63398 page = pte_page(pkmap_page_table[i]);
63399 + pax_open_kernel();
63400 pte_clear(&init_mm, (unsigned long)page_address(page),
63401 &pkmap_page_table[i]);
63402 -
63403 + pax_close_kernel();
63404 set_page_address(page, NULL);
63405 need_flush = 1;
63406 }
63407 @@ -177,9 +178,11 @@ start:
63408 }
63409 }
63410 vaddr = PKMAP_ADDR(last_pkmap_nr);
63411 +
63412 + pax_open_kernel();
63413 set_pte_at(&init_mm, vaddr,
63414 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
63415 -
63416 + pax_close_kernel();
63417 pkmap_count[last_pkmap_nr] = 1;
63418 set_page_address(page, (void *)vaddr);
63419
63420 diff -urNp linux-2.6.32.42/mm/hugetlb.c linux-2.6.32.42/mm/hugetlb.c
63421 --- linux-2.6.32.42/mm/hugetlb.c 2011-06-25 12:55:35.000000000 -0400
63422 +++ linux-2.6.32.42/mm/hugetlb.c 2011-06-25 12:56:37.000000000 -0400
63423 @@ -1925,6 +1925,26 @@ static int unmap_ref_private(struct mm_s
63424 return 1;
63425 }
63426
63427 +#ifdef CONFIG_PAX_SEGMEXEC
63428 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
63429 +{
63430 + struct mm_struct *mm = vma->vm_mm;
63431 + struct vm_area_struct *vma_m;
63432 + unsigned long address_m;
63433 + pte_t *ptep_m;
63434 +
63435 + vma_m = pax_find_mirror_vma(vma);
63436 + if (!vma_m)
63437 + return;
63438 +
63439 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63440 + address_m = address + SEGMEXEC_TASK_SIZE;
63441 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
63442 + get_page(page_m);
63443 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
63444 +}
63445 +#endif
63446 +
63447 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
63448 unsigned long address, pte_t *ptep, pte_t pte,
63449 struct page *pagecache_page)
63450 @@ -1996,6 +2016,11 @@ retry_avoidcopy:
63451 huge_ptep_clear_flush(vma, address, ptep);
63452 set_huge_pte_at(mm, address, ptep,
63453 make_huge_pte(vma, new_page, 1));
63454 +
63455 +#ifdef CONFIG_PAX_SEGMEXEC
63456 + pax_mirror_huge_pte(vma, address, new_page);
63457 +#endif
63458 +
63459 /* Make the old page be freed below */
63460 new_page = old_page;
63461 }
63462 @@ -2127,6 +2152,10 @@ retry:
63463 && (vma->vm_flags & VM_SHARED)));
63464 set_huge_pte_at(mm, address, ptep, new_pte);
63465
63466 +#ifdef CONFIG_PAX_SEGMEXEC
63467 + pax_mirror_huge_pte(vma, address, page);
63468 +#endif
63469 +
63470 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
63471 /* Optimization, do the COW without a second fault */
63472 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
63473 @@ -2155,6 +2184,28 @@ int hugetlb_fault(struct mm_struct *mm,
63474 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
63475 struct hstate *h = hstate_vma(vma);
63476
63477 +#ifdef CONFIG_PAX_SEGMEXEC
63478 + struct vm_area_struct *vma_m;
63479 +
63480 + vma_m = pax_find_mirror_vma(vma);
63481 + if (vma_m) {
63482 + unsigned long address_m;
63483 +
63484 + if (vma->vm_start > vma_m->vm_start) {
63485 + address_m = address;
63486 + address -= SEGMEXEC_TASK_SIZE;
63487 + vma = vma_m;
63488 + h = hstate_vma(vma);
63489 + } else
63490 + address_m = address + SEGMEXEC_TASK_SIZE;
63491 +
63492 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
63493 + return VM_FAULT_OOM;
63494 + address_m &= HPAGE_MASK;
63495 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
63496 + }
63497 +#endif
63498 +
63499 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
63500 if (!ptep)
63501 return VM_FAULT_OOM;
63502 diff -urNp linux-2.6.32.42/mm/Kconfig linux-2.6.32.42/mm/Kconfig
63503 --- linux-2.6.32.42/mm/Kconfig 2011-03-27 14:31:47.000000000 -0400
63504 +++ linux-2.6.32.42/mm/Kconfig 2011-04-17 15:56:46.000000000 -0400
63505 @@ -228,7 +228,7 @@ config KSM
63506 config DEFAULT_MMAP_MIN_ADDR
63507 int "Low address space to protect from user allocation"
63508 depends on MMU
63509 - default 4096
63510 + default 65536
63511 help
63512 This is the portion of low virtual memory which should be protected
63513 from userspace allocation. Keeping a user from writing to low pages
63514 diff -urNp linux-2.6.32.42/mm/kmemleak.c linux-2.6.32.42/mm/kmemleak.c
63515 --- linux-2.6.32.42/mm/kmemleak.c 2011-06-25 12:55:35.000000000 -0400
63516 +++ linux-2.6.32.42/mm/kmemleak.c 2011-06-25 12:56:37.000000000 -0400
63517 @@ -358,7 +358,7 @@ static void print_unreferenced(struct se
63518
63519 for (i = 0; i < object->trace_len; i++) {
63520 void *ptr = (void *)object->trace[i];
63521 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
63522 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
63523 }
63524 }
63525
63526 diff -urNp linux-2.6.32.42/mm/ksm.c linux-2.6.32.42/mm/ksm.c
63527 --- linux-2.6.32.42/mm/ksm.c 2011-03-27 14:31:47.000000000 -0400
63528 +++ linux-2.6.32.42/mm/ksm.c 2011-06-20 19:38:36.000000000 -0400
63529 @@ -1215,6 +1215,12 @@ static struct rmap_item *scan_get_next_r
63530 slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
63531 ksm_scan.mm_slot = slot;
63532 spin_unlock(&ksm_mmlist_lock);
63533 + /*
63534 + * Although we tested list_empty() above, a racing __ksm_exit
63535 + * of the last mm on the list may have removed it since then.
63536 + */
63537 + if (slot == &ksm_mm_head)
63538 + return NULL;
63539 next_mm:
63540 ksm_scan.address = 0;
63541 ksm_scan.rmap_item = list_entry(&slot->rmap_list,
63542 diff -urNp linux-2.6.32.42/mm/maccess.c linux-2.6.32.42/mm/maccess.c
63543 --- linux-2.6.32.42/mm/maccess.c 2011-03-27 14:31:47.000000000 -0400
63544 +++ linux-2.6.32.42/mm/maccess.c 2011-04-17 15:56:46.000000000 -0400
63545 @@ -14,7 +14,7 @@
63546 * Safely read from address @src to the buffer at @dst. If a kernel fault
63547 * happens, handle that and return -EFAULT.
63548 */
63549 -long probe_kernel_read(void *dst, void *src, size_t size)
63550 +long probe_kernel_read(void *dst, const void *src, size_t size)
63551 {
63552 long ret;
63553 mm_segment_t old_fs = get_fs();
63554 @@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
63555 * Safely write to address @dst from the buffer at @src. If a kernel fault
63556 * happens, handle that and return -EFAULT.
63557 */
63558 -long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
63559 +long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
63560 {
63561 long ret;
63562 mm_segment_t old_fs = get_fs();
63563 diff -urNp linux-2.6.32.42/mm/madvise.c linux-2.6.32.42/mm/madvise.c
63564 --- linux-2.6.32.42/mm/madvise.c 2011-03-27 14:31:47.000000000 -0400
63565 +++ linux-2.6.32.42/mm/madvise.c 2011-04-17 15:56:46.000000000 -0400
63566 @@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_a
63567 pgoff_t pgoff;
63568 unsigned long new_flags = vma->vm_flags;
63569
63570 +#ifdef CONFIG_PAX_SEGMEXEC
63571 + struct vm_area_struct *vma_m;
63572 +#endif
63573 +
63574 switch (behavior) {
63575 case MADV_NORMAL:
63576 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
63577 @@ -103,6 +107,13 @@ success:
63578 /*
63579 * vm_flags is protected by the mmap_sem held in write mode.
63580 */
63581 +
63582 +#ifdef CONFIG_PAX_SEGMEXEC
63583 + vma_m = pax_find_mirror_vma(vma);
63584 + if (vma_m)
63585 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
63586 +#endif
63587 +
63588 vma->vm_flags = new_flags;
63589
63590 out:
63591 @@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_a
63592 struct vm_area_struct ** prev,
63593 unsigned long start, unsigned long end)
63594 {
63595 +
63596 +#ifdef CONFIG_PAX_SEGMEXEC
63597 + struct vm_area_struct *vma_m;
63598 +#endif
63599 +
63600 *prev = vma;
63601 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
63602 return -EINVAL;
63603 @@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_a
63604 zap_page_range(vma, start, end - start, &details);
63605 } else
63606 zap_page_range(vma, start, end - start, NULL);
63607 +
63608 +#ifdef CONFIG_PAX_SEGMEXEC
63609 + vma_m = pax_find_mirror_vma(vma);
63610 + if (vma_m) {
63611 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
63612 + struct zap_details details = {
63613 + .nonlinear_vma = vma_m,
63614 + .last_index = ULONG_MAX,
63615 + };
63616 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
63617 + } else
63618 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
63619 + }
63620 +#endif
63621 +
63622 return 0;
63623 }
63624
63625 @@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
63626 if (end < start)
63627 goto out;
63628
63629 +#ifdef CONFIG_PAX_SEGMEXEC
63630 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
63631 + if (end > SEGMEXEC_TASK_SIZE)
63632 + goto out;
63633 + } else
63634 +#endif
63635 +
63636 + if (end > TASK_SIZE)
63637 + goto out;
63638 +
63639 error = 0;
63640 if (end == start)
63641 goto out;
63642 diff -urNp linux-2.6.32.42/mm/memory.c linux-2.6.32.42/mm/memory.c
63643 --- linux-2.6.32.42/mm/memory.c 2011-03-27 14:31:47.000000000 -0400
63644 +++ linux-2.6.32.42/mm/memory.c 2011-04-17 15:56:46.000000000 -0400
63645 @@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
63646 return;
63647
63648 pmd = pmd_offset(pud, start);
63649 +
63650 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
63651 pud_clear(pud);
63652 pmd_free_tlb(tlb, pmd, start);
63653 +#endif
63654 +
63655 }
63656
63657 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
63658 @@ -219,9 +223,12 @@ static inline void free_pud_range(struct
63659 if (end - 1 > ceiling - 1)
63660 return;
63661
63662 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
63663 pud = pud_offset(pgd, start);
63664 pgd_clear(pgd);
63665 pud_free_tlb(tlb, pud, start);
63666 +#endif
63667 +
63668 }
63669
63670 /*
63671 @@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct
63672 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
63673 i = 0;
63674
63675 - do {
63676 + while (nr_pages) {
63677 struct vm_area_struct *vma;
63678
63679 - vma = find_extend_vma(mm, start);
63680 + vma = find_vma(mm, start);
63681 if (!vma && in_gate_area(tsk, start)) {
63682 unsigned long pg = start & PAGE_MASK;
63683 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
63684 @@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct
63685 continue;
63686 }
63687
63688 - if (!vma ||
63689 + if (!vma || start < vma->vm_start ||
63690 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
63691 !(vm_flags & vma->vm_flags))
63692 return i ? : -EFAULT;
63693 @@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct
63694 start += PAGE_SIZE;
63695 nr_pages--;
63696 } while (nr_pages && start < vma->vm_end);
63697 - } while (nr_pages);
63698 + }
63699 return i;
63700 }
63701
63702 @@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_st
63703 page_add_file_rmap(page);
63704 set_pte_at(mm, addr, pte, mk_pte(page, prot));
63705
63706 +#ifdef CONFIG_PAX_SEGMEXEC
63707 + pax_mirror_file_pte(vma, addr, page, ptl);
63708 +#endif
63709 +
63710 retval = 0;
63711 pte_unmap_unlock(pte, ptl);
63712 return retval;
63713 @@ -1560,10 +1571,22 @@ out:
63714 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
63715 struct page *page)
63716 {
63717 +
63718 +#ifdef CONFIG_PAX_SEGMEXEC
63719 + struct vm_area_struct *vma_m;
63720 +#endif
63721 +
63722 if (addr < vma->vm_start || addr >= vma->vm_end)
63723 return -EFAULT;
63724 if (!page_count(page))
63725 return -EINVAL;
63726 +
63727 +#ifdef CONFIG_PAX_SEGMEXEC
63728 + vma_m = pax_find_mirror_vma(vma);
63729 + if (vma_m)
63730 + vma_m->vm_flags |= VM_INSERTPAGE;
63731 +#endif
63732 +
63733 vma->vm_flags |= VM_INSERTPAGE;
63734 return insert_page(vma, addr, page, vma->vm_page_prot);
63735 }
63736 @@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struc
63737 unsigned long pfn)
63738 {
63739 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
63740 + BUG_ON(vma->vm_mirror);
63741
63742 if (addr < vma->vm_start || addr >= vma->vm_end)
63743 return -EFAULT;
63744 @@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct
63745 copy_user_highpage(dst, src, va, vma);
63746 }
63747
63748 +#ifdef CONFIG_PAX_SEGMEXEC
63749 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
63750 +{
63751 + struct mm_struct *mm = vma->vm_mm;
63752 + spinlock_t *ptl;
63753 + pte_t *pte, entry;
63754 +
63755 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
63756 + entry = *pte;
63757 + if (!pte_present(entry)) {
63758 + if (!pte_none(entry)) {
63759 + BUG_ON(pte_file(entry));
63760 + free_swap_and_cache(pte_to_swp_entry(entry));
63761 + pte_clear_not_present_full(mm, address, pte, 0);
63762 + }
63763 + } else {
63764 + struct page *page;
63765 +
63766 + flush_cache_page(vma, address, pte_pfn(entry));
63767 + entry = ptep_clear_flush(vma, address, pte);
63768 + BUG_ON(pte_dirty(entry));
63769 + page = vm_normal_page(vma, address, entry);
63770 + if (page) {
63771 + update_hiwater_rss(mm);
63772 + if (PageAnon(page))
63773 + dec_mm_counter(mm, anon_rss);
63774 + else
63775 + dec_mm_counter(mm, file_rss);
63776 + page_remove_rmap(page);
63777 + page_cache_release(page);
63778 + }
63779 + }
63780 + pte_unmap_unlock(pte, ptl);
63781 +}
63782 +
63783 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
63784 + *
63785 + * the ptl of the lower mapped page is held on entry and is not released on exit
63786 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
63787 + */
63788 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
63789 +{
63790 + struct mm_struct *mm = vma->vm_mm;
63791 + unsigned long address_m;
63792 + spinlock_t *ptl_m;
63793 + struct vm_area_struct *vma_m;
63794 + pmd_t *pmd_m;
63795 + pte_t *pte_m, entry_m;
63796 +
63797 + BUG_ON(!page_m || !PageAnon(page_m));
63798 +
63799 + vma_m = pax_find_mirror_vma(vma);
63800 + if (!vma_m)
63801 + return;
63802 +
63803 + BUG_ON(!PageLocked(page_m));
63804 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63805 + address_m = address + SEGMEXEC_TASK_SIZE;
63806 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63807 + pte_m = pte_offset_map_nested(pmd_m, address_m);
63808 + ptl_m = pte_lockptr(mm, pmd_m);
63809 + if (ptl != ptl_m) {
63810 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63811 + if (!pte_none(*pte_m))
63812 + goto out;
63813 + }
63814 +
63815 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
63816 + page_cache_get(page_m);
63817 + page_add_anon_rmap(page_m, vma_m, address_m);
63818 + inc_mm_counter(mm, anon_rss);
63819 + set_pte_at(mm, address_m, pte_m, entry_m);
63820 + update_mmu_cache(vma_m, address_m, entry_m);
63821 +out:
63822 + if (ptl != ptl_m)
63823 + spin_unlock(ptl_m);
63824 + pte_unmap_nested(pte_m);
63825 + unlock_page(page_m);
63826 +}
63827 +
63828 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
63829 +{
63830 + struct mm_struct *mm = vma->vm_mm;
63831 + unsigned long address_m;
63832 + spinlock_t *ptl_m;
63833 + struct vm_area_struct *vma_m;
63834 + pmd_t *pmd_m;
63835 + pte_t *pte_m, entry_m;
63836 +
63837 + BUG_ON(!page_m || PageAnon(page_m));
63838 +
63839 + vma_m = pax_find_mirror_vma(vma);
63840 + if (!vma_m)
63841 + return;
63842 +
63843 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63844 + address_m = address + SEGMEXEC_TASK_SIZE;
63845 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63846 + pte_m = pte_offset_map_nested(pmd_m, address_m);
63847 + ptl_m = pte_lockptr(mm, pmd_m);
63848 + if (ptl != ptl_m) {
63849 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63850 + if (!pte_none(*pte_m))
63851 + goto out;
63852 + }
63853 +
63854 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
63855 + page_cache_get(page_m);
63856 + page_add_file_rmap(page_m);
63857 + inc_mm_counter(mm, file_rss);
63858 + set_pte_at(mm, address_m, pte_m, entry_m);
63859 + update_mmu_cache(vma_m, address_m, entry_m);
63860 +out:
63861 + if (ptl != ptl_m)
63862 + spin_unlock(ptl_m);
63863 + pte_unmap_nested(pte_m);
63864 +}
63865 +
63866 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
63867 +{
63868 + struct mm_struct *mm = vma->vm_mm;
63869 + unsigned long address_m;
63870 + spinlock_t *ptl_m;
63871 + struct vm_area_struct *vma_m;
63872 + pmd_t *pmd_m;
63873 + pte_t *pte_m, entry_m;
63874 +
63875 + vma_m = pax_find_mirror_vma(vma);
63876 + if (!vma_m)
63877 + return;
63878 +
63879 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63880 + address_m = address + SEGMEXEC_TASK_SIZE;
63881 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63882 + pte_m = pte_offset_map_nested(pmd_m, address_m);
63883 + ptl_m = pte_lockptr(mm, pmd_m);
63884 + if (ptl != ptl_m) {
63885 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63886 + if (!pte_none(*pte_m))
63887 + goto out;
63888 + }
63889 +
63890 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
63891 + set_pte_at(mm, address_m, pte_m, entry_m);
63892 +out:
63893 + if (ptl != ptl_m)
63894 + spin_unlock(ptl_m);
63895 + pte_unmap_nested(pte_m);
63896 +}
63897 +
63898 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
63899 +{
63900 + struct page *page_m;
63901 + pte_t entry;
63902 +
63903 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
63904 + goto out;
63905 +
63906 + entry = *pte;
63907 + page_m = vm_normal_page(vma, address, entry);
63908 + if (!page_m)
63909 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
63910 + else if (PageAnon(page_m)) {
63911 + if (pax_find_mirror_vma(vma)) {
63912 + pte_unmap_unlock(pte, ptl);
63913 + lock_page(page_m);
63914 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
63915 + if (pte_same(entry, *pte))
63916 + pax_mirror_anon_pte(vma, address, page_m, ptl);
63917 + else
63918 + unlock_page(page_m);
63919 + }
63920 + } else
63921 + pax_mirror_file_pte(vma, address, page_m, ptl);
63922 +
63923 +out:
63924 + pte_unmap_unlock(pte, ptl);
63925 +}
63926 +#endif
63927 +
63928 /*
63929 * This routine handles present pages, when users try to write
63930 * to a shared page. It is done by copying the page to a new address
63931 @@ -2156,6 +2360,12 @@ gotten:
63932 */
63933 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
63934 if (likely(pte_same(*page_table, orig_pte))) {
63935 +
63936 +#ifdef CONFIG_PAX_SEGMEXEC
63937 + if (pax_find_mirror_vma(vma))
63938 + BUG_ON(!trylock_page(new_page));
63939 +#endif
63940 +
63941 if (old_page) {
63942 if (!PageAnon(old_page)) {
63943 dec_mm_counter(mm, file_rss);
63944 @@ -2207,6 +2417,10 @@ gotten:
63945 page_remove_rmap(old_page);
63946 }
63947
63948 +#ifdef CONFIG_PAX_SEGMEXEC
63949 + pax_mirror_anon_pte(vma, address, new_page, ptl);
63950 +#endif
63951 +
63952 /* Free the old page.. */
63953 new_page = old_page;
63954 ret |= VM_FAULT_WRITE;
63955 @@ -2604,6 +2818,11 @@ static int do_swap_page(struct mm_struct
63956 swap_free(entry);
63957 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
63958 try_to_free_swap(page);
63959 +
63960 +#ifdef CONFIG_PAX_SEGMEXEC
63961 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
63962 +#endif
63963 +
63964 unlock_page(page);
63965
63966 if (flags & FAULT_FLAG_WRITE) {
63967 @@ -2615,6 +2834,11 @@ static int do_swap_page(struct mm_struct
63968
63969 /* No need to invalidate - it was non-present before */
63970 update_mmu_cache(vma, address, pte);
63971 +
63972 +#ifdef CONFIG_PAX_SEGMEXEC
63973 + pax_mirror_anon_pte(vma, address, page, ptl);
63974 +#endif
63975 +
63976 unlock:
63977 pte_unmap_unlock(page_table, ptl);
63978 out:
63979 @@ -2630,40 +2854,6 @@ out_release:
63980 }
63981
63982 /*
63983 - * This is like a special single-page "expand_{down|up}wards()",
63984 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
63985 - * doesn't hit another vma.
63986 - */
63987 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
63988 -{
63989 - address &= PAGE_MASK;
63990 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
63991 - struct vm_area_struct *prev = vma->vm_prev;
63992 -
63993 - /*
63994 - * Is there a mapping abutting this one below?
63995 - *
63996 - * That's only ok if it's the same stack mapping
63997 - * that has gotten split..
63998 - */
63999 - if (prev && prev->vm_end == address)
64000 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
64001 -
64002 - expand_stack(vma, address - PAGE_SIZE);
64003 - }
64004 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
64005 - struct vm_area_struct *next = vma->vm_next;
64006 -
64007 - /* As VM_GROWSDOWN but s/below/above/ */
64008 - if (next && next->vm_start == address + PAGE_SIZE)
64009 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
64010 -
64011 - expand_upwards(vma, address + PAGE_SIZE);
64012 - }
64013 - return 0;
64014 -}
64015 -
64016 -/*
64017 * We enter with non-exclusive mmap_sem (to exclude vma changes,
64018 * but allow concurrent faults), and pte mapped but not yet locked.
64019 * We return with mmap_sem still held, but pte unmapped and unlocked.
64020 @@ -2672,27 +2862,23 @@ static int do_anonymous_page(struct mm_s
64021 unsigned long address, pte_t *page_table, pmd_t *pmd,
64022 unsigned int flags)
64023 {
64024 - struct page *page;
64025 + struct page *page = NULL;
64026 spinlock_t *ptl;
64027 pte_t entry;
64028
64029 - pte_unmap(page_table);
64030 -
64031 - /* Check if we need to add a guard page to the stack */
64032 - if (check_stack_guard_page(vma, address) < 0)
64033 - return VM_FAULT_SIGBUS;
64034 -
64035 - /* Use the zero-page for reads */
64036 if (!(flags & FAULT_FLAG_WRITE)) {
64037 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
64038 vma->vm_page_prot));
64039 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
64040 + ptl = pte_lockptr(mm, pmd);
64041 + spin_lock(ptl);
64042 if (!pte_none(*page_table))
64043 goto unlock;
64044 goto setpte;
64045 }
64046
64047 /* Allocate our own private page. */
64048 + pte_unmap(page_table);
64049 +
64050 if (unlikely(anon_vma_prepare(vma)))
64051 goto oom;
64052 page = alloc_zeroed_user_highpage_movable(vma, address);
64053 @@ -2711,6 +2897,11 @@ static int do_anonymous_page(struct mm_s
64054 if (!pte_none(*page_table))
64055 goto release;
64056
64057 +#ifdef CONFIG_PAX_SEGMEXEC
64058 + if (pax_find_mirror_vma(vma))
64059 + BUG_ON(!trylock_page(page));
64060 +#endif
64061 +
64062 inc_mm_counter(mm, anon_rss);
64063 page_add_new_anon_rmap(page, vma, address);
64064 setpte:
64065 @@ -2718,6 +2909,12 @@ setpte:
64066
64067 /* No need to invalidate - it was non-present before */
64068 update_mmu_cache(vma, address, entry);
64069 +
64070 +#ifdef CONFIG_PAX_SEGMEXEC
64071 + if (page)
64072 + pax_mirror_anon_pte(vma, address, page, ptl);
64073 +#endif
64074 +
64075 unlock:
64076 pte_unmap_unlock(page_table, ptl);
64077 return 0;
64078 @@ -2860,6 +3057,12 @@ static int __do_fault(struct mm_struct *
64079 */
64080 /* Only go through if we didn't race with anybody else... */
64081 if (likely(pte_same(*page_table, orig_pte))) {
64082 +
64083 +#ifdef CONFIG_PAX_SEGMEXEC
64084 + if (anon && pax_find_mirror_vma(vma))
64085 + BUG_ON(!trylock_page(page));
64086 +#endif
64087 +
64088 flush_icache_page(vma, page);
64089 entry = mk_pte(page, vma->vm_page_prot);
64090 if (flags & FAULT_FLAG_WRITE)
64091 @@ -2879,6 +3082,14 @@ static int __do_fault(struct mm_struct *
64092
64093 /* no need to invalidate: a not-present page won't be cached */
64094 update_mmu_cache(vma, address, entry);
64095 +
64096 +#ifdef CONFIG_PAX_SEGMEXEC
64097 + if (anon)
64098 + pax_mirror_anon_pte(vma, address, page, ptl);
64099 + else
64100 + pax_mirror_file_pte(vma, address, page, ptl);
64101 +#endif
64102 +
64103 } else {
64104 if (charged)
64105 mem_cgroup_uncharge_page(page);
64106 @@ -3026,6 +3237,12 @@ static inline int handle_pte_fault(struc
64107 if (flags & FAULT_FLAG_WRITE)
64108 flush_tlb_page(vma, address);
64109 }
64110 +
64111 +#ifdef CONFIG_PAX_SEGMEXEC
64112 + pax_mirror_pte(vma, address, pte, pmd, ptl);
64113 + return 0;
64114 +#endif
64115 +
64116 unlock:
64117 pte_unmap_unlock(pte, ptl);
64118 return 0;
64119 @@ -3042,6 +3259,10 @@ int handle_mm_fault(struct mm_struct *mm
64120 pmd_t *pmd;
64121 pte_t *pte;
64122
64123 +#ifdef CONFIG_PAX_SEGMEXEC
64124 + struct vm_area_struct *vma_m;
64125 +#endif
64126 +
64127 __set_current_state(TASK_RUNNING);
64128
64129 count_vm_event(PGFAULT);
64130 @@ -3049,6 +3270,34 @@ int handle_mm_fault(struct mm_struct *mm
64131 if (unlikely(is_vm_hugetlb_page(vma)))
64132 return hugetlb_fault(mm, vma, address, flags);
64133
64134 +#ifdef CONFIG_PAX_SEGMEXEC
64135 + vma_m = pax_find_mirror_vma(vma);
64136 + if (vma_m) {
64137 + unsigned long address_m;
64138 + pgd_t *pgd_m;
64139 + pud_t *pud_m;
64140 + pmd_t *pmd_m;
64141 +
64142 + if (vma->vm_start > vma_m->vm_start) {
64143 + address_m = address;
64144 + address -= SEGMEXEC_TASK_SIZE;
64145 + vma = vma_m;
64146 + } else
64147 + address_m = address + SEGMEXEC_TASK_SIZE;
64148 +
64149 + pgd_m = pgd_offset(mm, address_m);
64150 + pud_m = pud_alloc(mm, pgd_m, address_m);
64151 + if (!pud_m)
64152 + return VM_FAULT_OOM;
64153 + pmd_m = pmd_alloc(mm, pud_m, address_m);
64154 + if (!pmd_m)
64155 + return VM_FAULT_OOM;
64156 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
64157 + return VM_FAULT_OOM;
64158 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
64159 + }
64160 +#endif
64161 +
64162 pgd = pgd_offset(mm, address);
64163 pud = pud_alloc(mm, pgd, address);
64164 if (!pud)
64165 @@ -3146,7 +3395,7 @@ static int __init gate_vma_init(void)
64166 gate_vma.vm_start = FIXADDR_USER_START;
64167 gate_vma.vm_end = FIXADDR_USER_END;
64168 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
64169 - gate_vma.vm_page_prot = __P101;
64170 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
64171 /*
64172 * Make sure the vDSO gets into every core dump.
64173 * Dumping its contents makes post-mortem fully interpretable later
64174 diff -urNp linux-2.6.32.42/mm/memory-failure.c linux-2.6.32.42/mm/memory-failure.c
64175 --- linux-2.6.32.42/mm/memory-failure.c 2011-03-27 14:31:47.000000000 -0400
64176 +++ linux-2.6.32.42/mm/memory-failure.c 2011-04-17 15:56:46.000000000 -0400
64177 @@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __r
64178
64179 int sysctl_memory_failure_recovery __read_mostly = 1;
64180
64181 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64182 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64183
64184 /*
64185 * Send all the processes who have the page mapped an ``action optional''
64186 @@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn,
64187 return 0;
64188 }
64189
64190 - atomic_long_add(1, &mce_bad_pages);
64191 + atomic_long_add_unchecked(1, &mce_bad_pages);
64192
64193 /*
64194 * We need/can do nothing about count=0 pages.
64195 diff -urNp linux-2.6.32.42/mm/mempolicy.c linux-2.6.32.42/mm/mempolicy.c
64196 --- linux-2.6.32.42/mm/mempolicy.c 2011-03-27 14:31:47.000000000 -0400
64197 +++ linux-2.6.32.42/mm/mempolicy.c 2011-04-17 15:56:46.000000000 -0400
64198 @@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_st
64199 struct vm_area_struct *next;
64200 int err;
64201
64202 +#ifdef CONFIG_PAX_SEGMEXEC
64203 + struct vm_area_struct *vma_m;
64204 +#endif
64205 +
64206 err = 0;
64207 for (; vma && vma->vm_start < end; vma = next) {
64208 next = vma->vm_next;
64209 @@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_st
64210 err = policy_vma(vma, new);
64211 if (err)
64212 break;
64213 +
64214 +#ifdef CONFIG_PAX_SEGMEXEC
64215 + vma_m = pax_find_mirror_vma(vma);
64216 + if (vma_m) {
64217 + err = policy_vma(vma_m, new);
64218 + if (err)
64219 + break;
64220 + }
64221 +#endif
64222 +
64223 }
64224 return err;
64225 }
64226 @@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start
64227
64228 if (end < start)
64229 return -EINVAL;
64230 +
64231 +#ifdef CONFIG_PAX_SEGMEXEC
64232 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
64233 + if (end > SEGMEXEC_TASK_SIZE)
64234 + return -EINVAL;
64235 + } else
64236 +#endif
64237 +
64238 + if (end > TASK_SIZE)
64239 + return -EINVAL;
64240 +
64241 if (end == start)
64242 return 0;
64243
64244 @@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
64245 if (!mm)
64246 return -EINVAL;
64247
64248 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64249 + if (mm != current->mm &&
64250 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
64251 + err = -EPERM;
64252 + goto out;
64253 + }
64254 +#endif
64255 +
64256 /*
64257 * Check if this process has the right to modify the specified
64258 * process. The right exists if the process has administrative
64259 @@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
64260 rcu_read_lock();
64261 tcred = __task_cred(task);
64262 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
64263 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
64264 - !capable(CAP_SYS_NICE)) {
64265 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
64266 rcu_read_unlock();
64267 err = -EPERM;
64268 goto out;
64269 @@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, vo
64270
64271 if (file) {
64272 seq_printf(m, " file=");
64273 - seq_path(m, &file->f_path, "\n\t= ");
64274 + seq_path(m, &file->f_path, "\n\t\\= ");
64275 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
64276 seq_printf(m, " heap");
64277 } else if (vma->vm_start <= mm->start_stack &&
64278 diff -urNp linux-2.6.32.42/mm/migrate.c linux-2.6.32.42/mm/migrate.c
64279 --- linux-2.6.32.42/mm/migrate.c 2011-03-27 14:31:47.000000000 -0400
64280 +++ linux-2.6.32.42/mm/migrate.c 2011-05-16 21:46:57.000000000 -0400
64281 @@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struc
64282 unsigned long chunk_start;
64283 int err;
64284
64285 + pax_track_stack();
64286 +
64287 task_nodes = cpuset_mems_allowed(task);
64288
64289 err = -ENOMEM;
64290 @@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
64291 if (!mm)
64292 return -EINVAL;
64293
64294 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64295 + if (mm != current->mm &&
64296 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
64297 + err = -EPERM;
64298 + goto out;
64299 + }
64300 +#endif
64301 +
64302 /*
64303 * Check if this process has the right to modify the specified
64304 * process. The right exists if the process has administrative
64305 @@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
64306 rcu_read_lock();
64307 tcred = __task_cred(task);
64308 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
64309 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
64310 - !capable(CAP_SYS_NICE)) {
64311 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
64312 rcu_read_unlock();
64313 err = -EPERM;
64314 goto out;
64315 diff -urNp linux-2.6.32.42/mm/mlock.c linux-2.6.32.42/mm/mlock.c
64316 --- linux-2.6.32.42/mm/mlock.c 2011-03-27 14:31:47.000000000 -0400
64317 +++ linux-2.6.32.42/mm/mlock.c 2011-04-17 15:56:46.000000000 -0400
64318 @@ -13,6 +13,7 @@
64319 #include <linux/pagemap.h>
64320 #include <linux/mempolicy.h>
64321 #include <linux/syscalls.h>
64322 +#include <linux/security.h>
64323 #include <linux/sched.h>
64324 #include <linux/module.h>
64325 #include <linux/rmap.h>
64326 @@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
64327 }
64328 }
64329
64330 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
64331 -{
64332 - return (vma->vm_flags & VM_GROWSDOWN) &&
64333 - (vma->vm_start == addr) &&
64334 - !vma_stack_continue(vma->vm_prev, addr);
64335 -}
64336 -
64337 /**
64338 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
64339 * @vma: target vma
64340 @@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(stru
64341 if (vma->vm_flags & VM_WRITE)
64342 gup_flags |= FOLL_WRITE;
64343
64344 - /* We don't try to access the guard page of a stack vma */
64345 - if (stack_guard_page(vma, start)) {
64346 - addr += PAGE_SIZE;
64347 - nr_pages--;
64348 - }
64349 -
64350 while (nr_pages > 0) {
64351 int i;
64352
64353 @@ -440,7 +428,7 @@ static int do_mlock(unsigned long start,
64354 {
64355 unsigned long nstart, end, tmp;
64356 struct vm_area_struct * vma, * prev;
64357 - int error;
64358 + int error = -EINVAL;
64359
64360 len = PAGE_ALIGN(len);
64361 end = start + len;
64362 @@ -448,6 +436,9 @@ static int do_mlock(unsigned long start,
64363 return -EINVAL;
64364 if (end == start)
64365 return 0;
64366 + if (end > TASK_SIZE)
64367 + return -EINVAL;
64368 +
64369 vma = find_vma_prev(current->mm, start, &prev);
64370 if (!vma || vma->vm_start > start)
64371 return -ENOMEM;
64372 @@ -458,6 +449,11 @@ static int do_mlock(unsigned long start,
64373 for (nstart = start ; ; ) {
64374 unsigned int newflags;
64375
64376 +#ifdef CONFIG_PAX_SEGMEXEC
64377 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
64378 + break;
64379 +#endif
64380 +
64381 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
64382
64383 newflags = vma->vm_flags | VM_LOCKED;
64384 @@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
64385 lock_limit >>= PAGE_SHIFT;
64386
64387 /* check against resource limits */
64388 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
64389 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
64390 error = do_mlock(start, len, 1);
64391 up_write(&current->mm->mmap_sem);
64392 @@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
64393 static int do_mlockall(int flags)
64394 {
64395 struct vm_area_struct * vma, * prev = NULL;
64396 - unsigned int def_flags = 0;
64397
64398 if (flags & MCL_FUTURE)
64399 - def_flags = VM_LOCKED;
64400 - current->mm->def_flags = def_flags;
64401 + current->mm->def_flags |= VM_LOCKED;
64402 + else
64403 + current->mm->def_flags &= ~VM_LOCKED;
64404 if (flags == MCL_FUTURE)
64405 goto out;
64406
64407 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
64408 - unsigned int newflags;
64409 + unsigned long newflags;
64410 +
64411 +#ifdef CONFIG_PAX_SEGMEXEC
64412 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
64413 + break;
64414 +#endif
64415
64416 + BUG_ON(vma->vm_end > TASK_SIZE);
64417 newflags = vma->vm_flags | VM_LOCKED;
64418 if (!(flags & MCL_CURRENT))
64419 newflags &= ~VM_LOCKED;
64420 @@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
64421 lock_limit >>= PAGE_SHIFT;
64422
64423 ret = -ENOMEM;
64424 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
64425 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
64426 capable(CAP_IPC_LOCK))
64427 ret = do_mlockall(flags);
64428 diff -urNp linux-2.6.32.42/mm/mmap.c linux-2.6.32.42/mm/mmap.c
64429 --- linux-2.6.32.42/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
64430 +++ linux-2.6.32.42/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
64431 @@ -45,6 +45,16 @@
64432 #define arch_rebalance_pgtables(addr, len) (addr)
64433 #endif
64434
64435 +static inline void verify_mm_writelocked(struct mm_struct *mm)
64436 +{
64437 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
64438 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
64439 + up_read(&mm->mmap_sem);
64440 + BUG();
64441 + }
64442 +#endif
64443 +}
64444 +
64445 static void unmap_region(struct mm_struct *mm,
64446 struct vm_area_struct *vma, struct vm_area_struct *prev,
64447 unsigned long start, unsigned long end);
64448 @@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
64449 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
64450 *
64451 */
64452 -pgprot_t protection_map[16] = {
64453 +pgprot_t protection_map[16] __read_only = {
64454 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
64455 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
64456 };
64457
64458 pgprot_t vm_get_page_prot(unsigned long vm_flags)
64459 {
64460 - return __pgprot(pgprot_val(protection_map[vm_flags &
64461 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
64462 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
64463 pgprot_val(arch_vm_get_page_prot(vm_flags)));
64464 +
64465 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64466 + if (!nx_enabled &&
64467 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
64468 + (vm_flags & (VM_READ | VM_WRITE)))
64469 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
64470 +#endif
64471 +
64472 + return prot;
64473 }
64474 EXPORT_SYMBOL(vm_get_page_prot);
64475
64476 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
64477 int sysctl_overcommit_ratio = 50; /* default is 50% */
64478 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
64479 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
64480 struct percpu_counter vm_committed_as;
64481
64482 /*
64483 @@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
64484 struct vm_area_struct *next = vma->vm_next;
64485
64486 might_sleep();
64487 + BUG_ON(vma->vm_mirror);
64488 if (vma->vm_ops && vma->vm_ops->close)
64489 vma->vm_ops->close(vma);
64490 if (vma->vm_file) {
64491 @@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
64492 * not page aligned -Ram Gupta
64493 */
64494 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
64495 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
64496 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
64497 (mm->end_data - mm->start_data) > rlim)
64498 goto out;
64499 @@ -704,6 +726,12 @@ static int
64500 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
64501 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
64502 {
64503 +
64504 +#ifdef CONFIG_PAX_SEGMEXEC
64505 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
64506 + return 0;
64507 +#endif
64508 +
64509 if (is_mergeable_vma(vma, file, vm_flags) &&
64510 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
64511 if (vma->vm_pgoff == vm_pgoff)
64512 @@ -723,6 +751,12 @@ static int
64513 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
64514 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
64515 {
64516 +
64517 +#ifdef CONFIG_PAX_SEGMEXEC
64518 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
64519 + return 0;
64520 +#endif
64521 +
64522 if (is_mergeable_vma(vma, file, vm_flags) &&
64523 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
64524 pgoff_t vm_pglen;
64525 @@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
64526 struct vm_area_struct *vma_merge(struct mm_struct *mm,
64527 struct vm_area_struct *prev, unsigned long addr,
64528 unsigned long end, unsigned long vm_flags,
64529 - struct anon_vma *anon_vma, struct file *file,
64530 + struct anon_vma *anon_vma, struct file *file,
64531 pgoff_t pgoff, struct mempolicy *policy)
64532 {
64533 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
64534 struct vm_area_struct *area, *next;
64535
64536 +#ifdef CONFIG_PAX_SEGMEXEC
64537 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
64538 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
64539 +
64540 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
64541 +#endif
64542 +
64543 /*
64544 * We later require that vma->vm_flags == vm_flags,
64545 * so this tests vma->vm_flags & VM_SPECIAL, too.
64546 @@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
64547 if (next && next->vm_end == end) /* cases 6, 7, 8 */
64548 next = next->vm_next;
64549
64550 +#ifdef CONFIG_PAX_SEGMEXEC
64551 + if (prev)
64552 + prev_m = pax_find_mirror_vma(prev);
64553 + if (area)
64554 + area_m = pax_find_mirror_vma(area);
64555 + if (next)
64556 + next_m = pax_find_mirror_vma(next);
64557 +#endif
64558 +
64559 /*
64560 * Can it merge with the predecessor?
64561 */
64562 @@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
64563 /* cases 1, 6 */
64564 vma_adjust(prev, prev->vm_start,
64565 next->vm_end, prev->vm_pgoff, NULL);
64566 - } else /* cases 2, 5, 7 */
64567 +
64568 +#ifdef CONFIG_PAX_SEGMEXEC
64569 + if (prev_m)
64570 + vma_adjust(prev_m, prev_m->vm_start,
64571 + next_m->vm_end, prev_m->vm_pgoff, NULL);
64572 +#endif
64573 +
64574 + } else { /* cases 2, 5, 7 */
64575 vma_adjust(prev, prev->vm_start,
64576 end, prev->vm_pgoff, NULL);
64577 +
64578 +#ifdef CONFIG_PAX_SEGMEXEC
64579 + if (prev_m)
64580 + vma_adjust(prev_m, prev_m->vm_start,
64581 + end_m, prev_m->vm_pgoff, NULL);
64582 +#endif
64583 +
64584 + }
64585 return prev;
64586 }
64587
64588 @@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
64589 mpol_equal(policy, vma_policy(next)) &&
64590 can_vma_merge_before(next, vm_flags,
64591 anon_vma, file, pgoff+pglen)) {
64592 - if (prev && addr < prev->vm_end) /* case 4 */
64593 + if (prev && addr < prev->vm_end) { /* case 4 */
64594 vma_adjust(prev, prev->vm_start,
64595 addr, prev->vm_pgoff, NULL);
64596 - else /* cases 3, 8 */
64597 +
64598 +#ifdef CONFIG_PAX_SEGMEXEC
64599 + if (prev_m)
64600 + vma_adjust(prev_m, prev_m->vm_start,
64601 + addr_m, prev_m->vm_pgoff, NULL);
64602 +#endif
64603 +
64604 + } else { /* cases 3, 8 */
64605 vma_adjust(area, addr, next->vm_end,
64606 next->vm_pgoff - pglen, NULL);
64607 +
64608 +#ifdef CONFIG_PAX_SEGMEXEC
64609 + if (area_m)
64610 + vma_adjust(area_m, addr_m, next_m->vm_end,
64611 + next_m->vm_pgoff - pglen, NULL);
64612 +#endif
64613 +
64614 + }
64615 return area;
64616 }
64617
64618 @@ -898,14 +978,11 @@ none:
64619 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
64620 struct file *file, long pages)
64621 {
64622 - const unsigned long stack_flags
64623 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
64624 -
64625 if (file) {
64626 mm->shared_vm += pages;
64627 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
64628 mm->exec_vm += pages;
64629 - } else if (flags & stack_flags)
64630 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
64631 mm->stack_vm += pages;
64632 if (flags & (VM_RESERVED|VM_IO))
64633 mm->reserved_vm += pages;
64634 @@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
64635 * (the exception is when the underlying filesystem is noexec
64636 * mounted, in which case we dont add PROT_EXEC.)
64637 */
64638 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
64639 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
64640 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
64641 prot |= PROT_EXEC;
64642
64643 @@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
64644 /* Obtain the address to map to. we verify (or select) it and ensure
64645 * that it represents a valid section of the address space.
64646 */
64647 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
64648 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
64649 if (addr & ~PAGE_MASK)
64650 return addr;
64651
64652 @@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file
64653 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
64654 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
64655
64656 +#ifdef CONFIG_PAX_MPROTECT
64657 + if (mm->pax_flags & MF_PAX_MPROTECT) {
64658 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
64659 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
64660 + gr_log_rwxmmap(file);
64661 +
64662 +#ifdef CONFIG_PAX_EMUPLT
64663 + vm_flags &= ~VM_EXEC;
64664 +#else
64665 + return -EPERM;
64666 +#endif
64667 +
64668 + }
64669 +
64670 + if (!(vm_flags & VM_EXEC))
64671 + vm_flags &= ~VM_MAYEXEC;
64672 +#else
64673 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
64674 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
64675 +#endif
64676 + else
64677 + vm_flags &= ~VM_MAYWRITE;
64678 + }
64679 +#endif
64680 +
64681 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64682 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
64683 + vm_flags &= ~VM_PAGEEXEC;
64684 +#endif
64685 +
64686 if (flags & MAP_LOCKED)
64687 if (!can_do_mlock())
64688 return -EPERM;
64689 @@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file
64690 locked += mm->locked_vm;
64691 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
64692 lock_limit >>= PAGE_SHIFT;
64693 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
64694 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
64695 return -EAGAIN;
64696 }
64697 @@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file
64698 if (error)
64699 return error;
64700
64701 + if (!gr_acl_handle_mmap(file, prot))
64702 + return -EACCES;
64703 +
64704 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
64705 }
64706 EXPORT_SYMBOL(do_mmap_pgoff);
64707 @@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
64708 */
64709 int vma_wants_writenotify(struct vm_area_struct *vma)
64710 {
64711 - unsigned int vm_flags = vma->vm_flags;
64712 + unsigned long vm_flags = vma->vm_flags;
64713
64714 /* If it was private or non-writable, the write bit is already clear */
64715 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
64716 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
64717 return 0;
64718
64719 /* The backer wishes to know when pages are first written to? */
64720 @@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *f
64721 unsigned long charged = 0;
64722 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
64723
64724 +#ifdef CONFIG_PAX_SEGMEXEC
64725 + struct vm_area_struct *vma_m = NULL;
64726 +#endif
64727 +
64728 + /*
64729 + * mm->mmap_sem is required to protect against another thread
64730 + * changing the mappings in case we sleep.
64731 + */
64732 + verify_mm_writelocked(mm);
64733 +
64734 /* Clear old maps */
64735 error = -ENOMEM;
64736 -munmap_back:
64737 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
64738 if (vma && vma->vm_start < addr + len) {
64739 if (do_munmap(mm, addr, len))
64740 return -ENOMEM;
64741 - goto munmap_back;
64742 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
64743 + BUG_ON(vma && vma->vm_start < addr + len);
64744 }
64745
64746 /* Check against address space limit. */
64747 @@ -1173,6 +1294,16 @@ munmap_back:
64748 goto unacct_error;
64749 }
64750
64751 +#ifdef CONFIG_PAX_SEGMEXEC
64752 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
64753 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
64754 + if (!vma_m) {
64755 + error = -ENOMEM;
64756 + goto free_vma;
64757 + }
64758 + }
64759 +#endif
64760 +
64761 vma->vm_mm = mm;
64762 vma->vm_start = addr;
64763 vma->vm_end = addr + len;
64764 @@ -1195,6 +1326,19 @@ munmap_back:
64765 error = file->f_op->mmap(file, vma);
64766 if (error)
64767 goto unmap_and_free_vma;
64768 +
64769 +#ifdef CONFIG_PAX_SEGMEXEC
64770 + if (vma_m && (vm_flags & VM_EXECUTABLE))
64771 + added_exe_file_vma(mm);
64772 +#endif
64773 +
64774 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64775 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
64776 + vma->vm_flags |= VM_PAGEEXEC;
64777 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
64778 + }
64779 +#endif
64780 +
64781 if (vm_flags & VM_EXECUTABLE)
64782 added_exe_file_vma(mm);
64783
64784 @@ -1218,6 +1362,11 @@ munmap_back:
64785 vma_link(mm, vma, prev, rb_link, rb_parent);
64786 file = vma->vm_file;
64787
64788 +#ifdef CONFIG_PAX_SEGMEXEC
64789 + if (vma_m)
64790 + pax_mirror_vma(vma_m, vma);
64791 +#endif
64792 +
64793 /* Once vma denies write, undo our temporary denial count */
64794 if (correct_wcount)
64795 atomic_inc(&inode->i_writecount);
64796 @@ -1226,6 +1375,7 @@ out:
64797
64798 mm->total_vm += len >> PAGE_SHIFT;
64799 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
64800 + track_exec_limit(mm, addr, addr + len, vm_flags);
64801 if (vm_flags & VM_LOCKED) {
64802 /*
64803 * makes pages present; downgrades, drops, reacquires mmap_sem
64804 @@ -1248,6 +1398,12 @@ unmap_and_free_vma:
64805 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
64806 charged = 0;
64807 free_vma:
64808 +
64809 +#ifdef CONFIG_PAX_SEGMEXEC
64810 + if (vma_m)
64811 + kmem_cache_free(vm_area_cachep, vma_m);
64812 +#endif
64813 +
64814 kmem_cache_free(vm_area_cachep, vma);
64815 unacct_error:
64816 if (charged)
64817 @@ -1255,6 +1411,44 @@ unacct_error:
64818 return error;
64819 }
64820
64821 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
64822 +{
64823 + if (!vma) {
64824 +#ifdef CONFIG_STACK_GROWSUP
64825 + if (addr > sysctl_heap_stack_gap)
64826 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
64827 + else
64828 + vma = find_vma(current->mm, 0);
64829 + if (vma && (vma->vm_flags & VM_GROWSUP))
64830 + return false;
64831 +#endif
64832 + return true;
64833 + }
64834 +
64835 + if (addr + len > vma->vm_start)
64836 + return false;
64837 +
64838 + if (vma->vm_flags & VM_GROWSDOWN)
64839 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
64840 +#ifdef CONFIG_STACK_GROWSUP
64841 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
64842 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
64843 +#endif
64844 +
64845 + return true;
64846 +}
64847 +
64848 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
64849 +{
64850 + if (vma->vm_start < len)
64851 + return -ENOMEM;
64852 + if (!(vma->vm_flags & VM_GROWSDOWN))
64853 + return vma->vm_start - len;
64854 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
64855 + return vma->vm_start - len - sysctl_heap_stack_gap;
64856 + return -ENOMEM;
64857 +}
64858 +
64859 /* Get an address range which is currently unmapped.
64860 * For shmat() with addr=0.
64861 *
64862 @@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp
64863 if (flags & MAP_FIXED)
64864 return addr;
64865
64866 +#ifdef CONFIG_PAX_RANDMMAP
64867 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64868 +#endif
64869 +
64870 if (addr) {
64871 addr = PAGE_ALIGN(addr);
64872 - vma = find_vma(mm, addr);
64873 - if (TASK_SIZE - len >= addr &&
64874 - (!vma || addr + len <= vma->vm_start))
64875 - return addr;
64876 + if (TASK_SIZE - len >= addr) {
64877 + vma = find_vma(mm, addr);
64878 + if (check_heap_stack_gap(vma, addr, len))
64879 + return addr;
64880 + }
64881 }
64882 if (len > mm->cached_hole_size) {
64883 - start_addr = addr = mm->free_area_cache;
64884 + start_addr = addr = mm->free_area_cache;
64885 } else {
64886 - start_addr = addr = TASK_UNMAPPED_BASE;
64887 - mm->cached_hole_size = 0;
64888 + start_addr = addr = mm->mmap_base;
64889 + mm->cached_hole_size = 0;
64890 }
64891
64892 full_search:
64893 @@ -1303,34 +1502,40 @@ full_search:
64894 * Start a new search - just in case we missed
64895 * some holes.
64896 */
64897 - if (start_addr != TASK_UNMAPPED_BASE) {
64898 - addr = TASK_UNMAPPED_BASE;
64899 - start_addr = addr;
64900 + if (start_addr != mm->mmap_base) {
64901 + start_addr = addr = mm->mmap_base;
64902 mm->cached_hole_size = 0;
64903 goto full_search;
64904 }
64905 return -ENOMEM;
64906 }
64907 - if (!vma || addr + len <= vma->vm_start) {
64908 - /*
64909 - * Remember the place where we stopped the search:
64910 - */
64911 - mm->free_area_cache = addr + len;
64912 - return addr;
64913 - }
64914 + if (check_heap_stack_gap(vma, addr, len))
64915 + break;
64916 if (addr + mm->cached_hole_size < vma->vm_start)
64917 mm->cached_hole_size = vma->vm_start - addr;
64918 addr = vma->vm_end;
64919 }
64920 +
64921 + /*
64922 + * Remember the place where we stopped the search:
64923 + */
64924 + mm->free_area_cache = addr + len;
64925 + return addr;
64926 }
64927 #endif
64928
64929 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
64930 {
64931 +
64932 +#ifdef CONFIG_PAX_SEGMEXEC
64933 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
64934 + return;
64935 +#endif
64936 +
64937 /*
64938 * Is this a new hole at the lowest possible address?
64939 */
64940 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
64941 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
64942 mm->free_area_cache = addr;
64943 mm->cached_hole_size = ~0UL;
64944 }
64945 @@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct fi
64946 {
64947 struct vm_area_struct *vma;
64948 struct mm_struct *mm = current->mm;
64949 - unsigned long addr = addr0;
64950 + unsigned long base = mm->mmap_base, addr = addr0;
64951
64952 /* requested length too big for entire address space */
64953 if (len > TASK_SIZE)
64954 @@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct fi
64955 if (flags & MAP_FIXED)
64956 return addr;
64957
64958 +#ifdef CONFIG_PAX_RANDMMAP
64959 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64960 +#endif
64961 +
64962 /* requesting a specific address */
64963 if (addr) {
64964 addr = PAGE_ALIGN(addr);
64965 - vma = find_vma(mm, addr);
64966 - if (TASK_SIZE - len >= addr &&
64967 - (!vma || addr + len <= vma->vm_start))
64968 - return addr;
64969 + if (TASK_SIZE - len >= addr) {
64970 + vma = find_vma(mm, addr);
64971 + if (check_heap_stack_gap(vma, addr, len))
64972 + return addr;
64973 + }
64974 }
64975
64976 /* check if free_area_cache is useful for us */
64977 @@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct fi
64978 /* make sure it can fit in the remaining address space */
64979 if (addr > len) {
64980 vma = find_vma(mm, addr-len);
64981 - if (!vma || addr <= vma->vm_start)
64982 + if (check_heap_stack_gap(vma, addr - len, len))
64983 /* remember the address as a hint for next time */
64984 return (mm->free_area_cache = addr-len);
64985 }
64986 @@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct fi
64987 * return with success:
64988 */
64989 vma = find_vma(mm, addr);
64990 - if (!vma || addr+len <= vma->vm_start)
64991 + if (check_heap_stack_gap(vma, addr, len))
64992 /* remember the address as a hint for next time */
64993 return (mm->free_area_cache = addr);
64994
64995 @@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct fi
64996 mm->cached_hole_size = vma->vm_start - addr;
64997
64998 /* try just below the current vma->vm_start */
64999 - addr = vma->vm_start-len;
65000 - } while (len < vma->vm_start);
65001 + addr = skip_heap_stack_gap(vma, len);
65002 + } while (!IS_ERR_VALUE(addr));
65003
65004 bottomup:
65005 /*
65006 @@ -1414,13 +1624,21 @@ bottomup:
65007 * can happen with large stack limits and large mmap()
65008 * allocations.
65009 */
65010 + mm->mmap_base = TASK_UNMAPPED_BASE;
65011 +
65012 +#ifdef CONFIG_PAX_RANDMMAP
65013 + if (mm->pax_flags & MF_PAX_RANDMMAP)
65014 + mm->mmap_base += mm->delta_mmap;
65015 +#endif
65016 +
65017 + mm->free_area_cache = mm->mmap_base;
65018 mm->cached_hole_size = ~0UL;
65019 - mm->free_area_cache = TASK_UNMAPPED_BASE;
65020 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
65021 /*
65022 * Restore the topdown base:
65023 */
65024 - mm->free_area_cache = mm->mmap_base;
65025 + mm->mmap_base = base;
65026 + mm->free_area_cache = base;
65027 mm->cached_hole_size = ~0UL;
65028
65029 return addr;
65030 @@ -1429,6 +1647,12 @@ bottomup:
65031
65032 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
65033 {
65034 +
65035 +#ifdef CONFIG_PAX_SEGMEXEC
65036 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
65037 + return;
65038 +#endif
65039 +
65040 /*
65041 * Is this a new hole at the highest possible address?
65042 */
65043 @@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_s
65044 mm->free_area_cache = addr;
65045
65046 /* dont allow allocations above current base */
65047 - if (mm->free_area_cache > mm->mmap_base)
65048 + if (mm->free_area_cache > mm->mmap_base) {
65049 mm->free_area_cache = mm->mmap_base;
65050 + mm->cached_hole_size = ~0UL;
65051 + }
65052 }
65053
65054 unsigned long
65055 @@ -1545,6 +1771,27 @@ out:
65056 return prev ? prev->vm_next : vma;
65057 }
65058
65059 +#ifdef CONFIG_PAX_SEGMEXEC
65060 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
65061 +{
65062 + struct vm_area_struct *vma_m;
65063 +
65064 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
65065 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
65066 + BUG_ON(vma->vm_mirror);
65067 + return NULL;
65068 + }
65069 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
65070 + vma_m = vma->vm_mirror;
65071 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
65072 + BUG_ON(vma->vm_file != vma_m->vm_file);
65073 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
65074 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
65075 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
65076 + return vma_m;
65077 +}
65078 +#endif
65079 +
65080 /*
65081 * Verify that the stack growth is acceptable and
65082 * update accounting. This is shared with both the
65083 @@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_a
65084 return -ENOMEM;
65085
65086 /* Stack limit test */
65087 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
65088 if (size > rlim[RLIMIT_STACK].rlim_cur)
65089 return -ENOMEM;
65090
65091 @@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_a
65092 unsigned long limit;
65093 locked = mm->locked_vm + grow;
65094 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
65095 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
65096 if (locked > limit && !capable(CAP_IPC_LOCK))
65097 return -ENOMEM;
65098 }
65099 @@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_a
65100 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
65101 * vma is the last one with address > vma->vm_end. Have to extend vma.
65102 */
65103 +#ifndef CONFIG_IA64
65104 +static
65105 +#endif
65106 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
65107 {
65108 int error;
65109 + bool locknext;
65110
65111 if (!(vma->vm_flags & VM_GROWSUP))
65112 return -EFAULT;
65113
65114 + /* Also guard against wrapping around to address 0. */
65115 + if (address < PAGE_ALIGN(address+1))
65116 + address = PAGE_ALIGN(address+1);
65117 + else
65118 + return -ENOMEM;
65119 +
65120 /*
65121 * We must make sure the anon_vma is allocated
65122 * so that the anon_vma locking is not a noop.
65123 */
65124 if (unlikely(anon_vma_prepare(vma)))
65125 return -ENOMEM;
65126 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
65127 + if (locknext && anon_vma_prepare(vma->vm_next))
65128 + return -ENOMEM;
65129 anon_vma_lock(vma);
65130 + if (locknext)
65131 + anon_vma_lock(vma->vm_next);
65132
65133 /*
65134 * vma->vm_start/vm_end cannot change under us because the caller
65135 * is required to hold the mmap_sem in read mode. We need the
65136 - * anon_vma lock to serialize against concurrent expand_stacks.
65137 - * Also guard against wrapping around to address 0.
65138 + * anon_vma locks to serialize against concurrent expand_stacks
65139 + * and expand_upwards.
65140 */
65141 - if (address < PAGE_ALIGN(address+4))
65142 - address = PAGE_ALIGN(address+4);
65143 - else {
65144 - anon_vma_unlock(vma);
65145 - return -ENOMEM;
65146 - }
65147 error = 0;
65148
65149 /* Somebody else might have raced and expanded it already */
65150 - if (address > vma->vm_end) {
65151 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
65152 + error = -ENOMEM;
65153 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
65154 unsigned long size, grow;
65155
65156 size = address - vma->vm_start;
65157 @@ -1640,6 +1900,8 @@ int expand_upwards(struct vm_area_struct
65158 if (!error)
65159 vma->vm_end = address;
65160 }
65161 + if (locknext)
65162 + anon_vma_unlock(vma->vm_next);
65163 anon_vma_unlock(vma);
65164 return error;
65165 }
65166 @@ -1652,6 +1914,8 @@ static int expand_downwards(struct vm_ar
65167 unsigned long address)
65168 {
65169 int error;
65170 + bool lockprev = false;
65171 + struct vm_area_struct *prev;
65172
65173 /*
65174 * We must make sure the anon_vma is allocated
65175 @@ -1665,6 +1929,15 @@ static int expand_downwards(struct vm_ar
65176 if (error)
65177 return error;
65178
65179 + prev = vma->vm_prev;
65180 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
65181 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
65182 +#endif
65183 + if (lockprev && anon_vma_prepare(prev))
65184 + return -ENOMEM;
65185 + if (lockprev)
65186 + anon_vma_lock(prev);
65187 +
65188 anon_vma_lock(vma);
65189
65190 /*
65191 @@ -1674,9 +1947,17 @@ static int expand_downwards(struct vm_ar
65192 */
65193
65194 /* Somebody else might have raced and expanded it already */
65195 - if (address < vma->vm_start) {
65196 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
65197 + error = -ENOMEM;
65198 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
65199 unsigned long size, grow;
65200
65201 +#ifdef CONFIG_PAX_SEGMEXEC
65202 + struct vm_area_struct *vma_m;
65203 +
65204 + vma_m = pax_find_mirror_vma(vma);
65205 +#endif
65206 +
65207 size = vma->vm_end - address;
65208 grow = (vma->vm_start - address) >> PAGE_SHIFT;
65209
65210 @@ -1684,9 +1965,20 @@ static int expand_downwards(struct vm_ar
65211 if (!error) {
65212 vma->vm_start = address;
65213 vma->vm_pgoff -= grow;
65214 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
65215 +
65216 +#ifdef CONFIG_PAX_SEGMEXEC
65217 + if (vma_m) {
65218 + vma_m->vm_start -= grow << PAGE_SHIFT;
65219 + vma_m->vm_pgoff -= grow;
65220 + }
65221 +#endif
65222 +
65223 }
65224 }
65225 anon_vma_unlock(vma);
65226 + if (lockprev)
65227 + anon_vma_unlock(prev);
65228 return error;
65229 }
65230
65231 @@ -1762,6 +2054,13 @@ static void remove_vma_list(struct mm_st
65232 do {
65233 long nrpages = vma_pages(vma);
65234
65235 +#ifdef CONFIG_PAX_SEGMEXEC
65236 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
65237 + vma = remove_vma(vma);
65238 + continue;
65239 + }
65240 +#endif
65241 +
65242 mm->total_vm -= nrpages;
65243 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
65244 vma = remove_vma(vma);
65245 @@ -1807,6 +2106,16 @@ detach_vmas_to_be_unmapped(struct mm_str
65246 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
65247 vma->vm_prev = NULL;
65248 do {
65249 +
65250 +#ifdef CONFIG_PAX_SEGMEXEC
65251 + if (vma->vm_mirror) {
65252 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
65253 + vma->vm_mirror->vm_mirror = NULL;
65254 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
65255 + vma->vm_mirror = NULL;
65256 + }
65257 +#endif
65258 +
65259 rb_erase(&vma->vm_rb, &mm->mm_rb);
65260 mm->map_count--;
65261 tail_vma = vma;
65262 @@ -1834,10 +2143,25 @@ int split_vma(struct mm_struct * mm, str
65263 struct mempolicy *pol;
65264 struct vm_area_struct *new;
65265
65266 +#ifdef CONFIG_PAX_SEGMEXEC
65267 + struct vm_area_struct *vma_m, *new_m = NULL;
65268 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
65269 +#endif
65270 +
65271 if (is_vm_hugetlb_page(vma) && (addr &
65272 ~(huge_page_mask(hstate_vma(vma)))))
65273 return -EINVAL;
65274
65275 +#ifdef CONFIG_PAX_SEGMEXEC
65276 + vma_m = pax_find_mirror_vma(vma);
65277 +
65278 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
65279 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
65280 + if (mm->map_count >= sysctl_max_map_count-1)
65281 + return -ENOMEM;
65282 + } else
65283 +#endif
65284 +
65285 if (mm->map_count >= sysctl_max_map_count)
65286 return -ENOMEM;
65287
65288 @@ -1845,6 +2169,16 @@ int split_vma(struct mm_struct * mm, str
65289 if (!new)
65290 return -ENOMEM;
65291
65292 +#ifdef CONFIG_PAX_SEGMEXEC
65293 + if (vma_m) {
65294 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65295 + if (!new_m) {
65296 + kmem_cache_free(vm_area_cachep, new);
65297 + return -ENOMEM;
65298 + }
65299 + }
65300 +#endif
65301 +
65302 /* most fields are the same, copy all, and then fixup */
65303 *new = *vma;
65304
65305 @@ -1855,8 +2189,29 @@ int split_vma(struct mm_struct * mm, str
65306 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
65307 }
65308
65309 +#ifdef CONFIG_PAX_SEGMEXEC
65310 + if (vma_m) {
65311 + *new_m = *vma_m;
65312 + new_m->vm_mirror = new;
65313 + new->vm_mirror = new_m;
65314 +
65315 + if (new_below)
65316 + new_m->vm_end = addr_m;
65317 + else {
65318 + new_m->vm_start = addr_m;
65319 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
65320 + }
65321 + }
65322 +#endif
65323 +
65324 pol = mpol_dup(vma_policy(vma));
65325 if (IS_ERR(pol)) {
65326 +
65327 +#ifdef CONFIG_PAX_SEGMEXEC
65328 + if (new_m)
65329 + kmem_cache_free(vm_area_cachep, new_m);
65330 +#endif
65331 +
65332 kmem_cache_free(vm_area_cachep, new);
65333 return PTR_ERR(pol);
65334 }
65335 @@ -1877,6 +2232,28 @@ int split_vma(struct mm_struct * mm, str
65336 else
65337 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
65338
65339 +#ifdef CONFIG_PAX_SEGMEXEC
65340 + if (vma_m) {
65341 + mpol_get(pol);
65342 + vma_set_policy(new_m, pol);
65343 +
65344 + if (new_m->vm_file) {
65345 + get_file(new_m->vm_file);
65346 + if (vma_m->vm_flags & VM_EXECUTABLE)
65347 + added_exe_file_vma(mm);
65348 + }
65349 +
65350 + if (new_m->vm_ops && new_m->vm_ops->open)
65351 + new_m->vm_ops->open(new_m);
65352 +
65353 + if (new_below)
65354 + vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
65355 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
65356 + else
65357 + vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
65358 + }
65359 +#endif
65360 +
65361 return 0;
65362 }
65363
65364 @@ -1885,11 +2262,30 @@ int split_vma(struct mm_struct * mm, str
65365 * work. This now handles partial unmappings.
65366 * Jeremy Fitzhardinge <jeremy@goop.org>
65367 */
65368 +#ifdef CONFIG_PAX_SEGMEXEC
65369 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65370 +{
65371 + int ret = __do_munmap(mm, start, len);
65372 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
65373 + return ret;
65374 +
65375 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
65376 +}
65377 +
65378 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65379 +#else
65380 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65381 +#endif
65382 {
65383 unsigned long end;
65384 struct vm_area_struct *vma, *prev, *last;
65385
65386 + /*
65387 + * mm->mmap_sem is required to protect against another thread
65388 + * changing the mappings in case we sleep.
65389 + */
65390 + verify_mm_writelocked(mm);
65391 +
65392 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
65393 return -EINVAL;
65394
65395 @@ -1953,6 +2349,8 @@ int do_munmap(struct mm_struct *mm, unsi
65396 /* Fix up all other VM information */
65397 remove_vma_list(mm, vma);
65398
65399 + track_exec_limit(mm, start, end, 0UL);
65400 +
65401 return 0;
65402 }
65403
65404 @@ -1965,22 +2363,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
65405
65406 profile_munmap(addr);
65407
65408 +#ifdef CONFIG_PAX_SEGMEXEC
65409 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
65410 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
65411 + return -EINVAL;
65412 +#endif
65413 +
65414 down_write(&mm->mmap_sem);
65415 ret = do_munmap(mm, addr, len);
65416 up_write(&mm->mmap_sem);
65417 return ret;
65418 }
65419
65420 -static inline void verify_mm_writelocked(struct mm_struct *mm)
65421 -{
65422 -#ifdef CONFIG_DEBUG_VM
65423 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
65424 - WARN_ON(1);
65425 - up_read(&mm->mmap_sem);
65426 - }
65427 -#endif
65428 -}
65429 -
65430 /*
65431 * this is really a simplified "do_mmap". it only handles
65432 * anonymous maps. eventually we may be able to do some
65433 @@ -1994,6 +2388,7 @@ unsigned long do_brk(unsigned long addr,
65434 struct rb_node ** rb_link, * rb_parent;
65435 pgoff_t pgoff = addr >> PAGE_SHIFT;
65436 int error;
65437 + unsigned long charged;
65438
65439 len = PAGE_ALIGN(len);
65440 if (!len)
65441 @@ -2005,16 +2400,30 @@ unsigned long do_brk(unsigned long addr,
65442
65443 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
65444
65445 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
65446 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65447 + flags &= ~VM_EXEC;
65448 +
65449 +#ifdef CONFIG_PAX_MPROTECT
65450 + if (mm->pax_flags & MF_PAX_MPROTECT)
65451 + flags &= ~VM_MAYEXEC;
65452 +#endif
65453 +
65454 + }
65455 +#endif
65456 +
65457 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
65458 if (error & ~PAGE_MASK)
65459 return error;
65460
65461 + charged = len >> PAGE_SHIFT;
65462 +
65463 /*
65464 * mlock MCL_FUTURE?
65465 */
65466 if (mm->def_flags & VM_LOCKED) {
65467 unsigned long locked, lock_limit;
65468 - locked = len >> PAGE_SHIFT;
65469 + locked = charged;
65470 locked += mm->locked_vm;
65471 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
65472 lock_limit >>= PAGE_SHIFT;
65473 @@ -2031,22 +2440,22 @@ unsigned long do_brk(unsigned long addr,
65474 /*
65475 * Clear old maps. this also does some error checking for us
65476 */
65477 - munmap_back:
65478 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65479 if (vma && vma->vm_start < addr + len) {
65480 if (do_munmap(mm, addr, len))
65481 return -ENOMEM;
65482 - goto munmap_back;
65483 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65484 + BUG_ON(vma && vma->vm_start < addr + len);
65485 }
65486
65487 /* Check against address space limits *after* clearing old maps... */
65488 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
65489 + if (!may_expand_vm(mm, charged))
65490 return -ENOMEM;
65491
65492 if (mm->map_count > sysctl_max_map_count)
65493 return -ENOMEM;
65494
65495 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
65496 + if (security_vm_enough_memory(charged))
65497 return -ENOMEM;
65498
65499 /* Can we just expand an old private anonymous mapping? */
65500 @@ -2060,7 +2469,7 @@ unsigned long do_brk(unsigned long addr,
65501 */
65502 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65503 if (!vma) {
65504 - vm_unacct_memory(len >> PAGE_SHIFT);
65505 + vm_unacct_memory(charged);
65506 return -ENOMEM;
65507 }
65508
65509 @@ -2072,11 +2481,12 @@ unsigned long do_brk(unsigned long addr,
65510 vma->vm_page_prot = vm_get_page_prot(flags);
65511 vma_link(mm, vma, prev, rb_link, rb_parent);
65512 out:
65513 - mm->total_vm += len >> PAGE_SHIFT;
65514 + mm->total_vm += charged;
65515 if (flags & VM_LOCKED) {
65516 if (!mlock_vma_pages_range(vma, addr, addr + len))
65517 - mm->locked_vm += (len >> PAGE_SHIFT);
65518 + mm->locked_vm += charged;
65519 }
65520 + track_exec_limit(mm, addr, addr + len, flags);
65521 return addr;
65522 }
65523
65524 @@ -2123,8 +2533,10 @@ void exit_mmap(struct mm_struct *mm)
65525 * Walk the list again, actually closing and freeing it,
65526 * with preemption enabled, without holding any MM locks.
65527 */
65528 - while (vma)
65529 + while (vma) {
65530 + vma->vm_mirror = NULL;
65531 vma = remove_vma(vma);
65532 + }
65533
65534 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
65535 }
65536 @@ -2138,6 +2550,10 @@ int insert_vm_struct(struct mm_struct *
65537 struct vm_area_struct * __vma, * prev;
65538 struct rb_node ** rb_link, * rb_parent;
65539
65540 +#ifdef CONFIG_PAX_SEGMEXEC
65541 + struct vm_area_struct *vma_m = NULL;
65542 +#endif
65543 +
65544 /*
65545 * The vm_pgoff of a purely anonymous vma should be irrelevant
65546 * until its first write fault, when page's anon_vma and index
65547 @@ -2160,7 +2576,22 @@ int insert_vm_struct(struct mm_struct *
65548 if ((vma->vm_flags & VM_ACCOUNT) &&
65549 security_vm_enough_memory_mm(mm, vma_pages(vma)))
65550 return -ENOMEM;
65551 +
65552 +#ifdef CONFIG_PAX_SEGMEXEC
65553 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
65554 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65555 + if (!vma_m)
65556 + return -ENOMEM;
65557 + }
65558 +#endif
65559 +
65560 vma_link(mm, vma, prev, rb_link, rb_parent);
65561 +
65562 +#ifdef CONFIG_PAX_SEGMEXEC
65563 + if (vma_m)
65564 + pax_mirror_vma(vma_m, vma);
65565 +#endif
65566 +
65567 return 0;
65568 }
65569
65570 @@ -2178,6 +2609,8 @@ struct vm_area_struct *copy_vma(struct v
65571 struct rb_node **rb_link, *rb_parent;
65572 struct mempolicy *pol;
65573
65574 + BUG_ON(vma->vm_mirror);
65575 +
65576 /*
65577 * If anonymous vma has not yet been faulted, update new pgoff
65578 * to match new location, to increase its chance of merging.
65579 @@ -2221,6 +2654,35 @@ struct vm_area_struct *copy_vma(struct v
65580 return new_vma;
65581 }
65582
65583 +#ifdef CONFIG_PAX_SEGMEXEC
65584 +void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
65585 +{
65586 + struct vm_area_struct *prev_m;
65587 + struct rb_node **rb_link_m, *rb_parent_m;
65588 + struct mempolicy *pol_m;
65589 +
65590 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
65591 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
65592 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
65593 + *vma_m = *vma;
65594 + pol_m = vma_policy(vma_m);
65595 + mpol_get(pol_m);
65596 + vma_set_policy(vma_m, pol_m);
65597 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
65598 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
65599 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
65600 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
65601 + if (vma_m->vm_file)
65602 + get_file(vma_m->vm_file);
65603 + if (vma_m->vm_ops && vma_m->vm_ops->open)
65604 + vma_m->vm_ops->open(vma_m);
65605 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
65606 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
65607 + vma_m->vm_mirror = vma;
65608 + vma->vm_mirror = vma_m;
65609 +}
65610 +#endif
65611 +
65612 /*
65613 * Return true if the calling process may expand its vm space by the passed
65614 * number of pages
65615 @@ -2231,7 +2693,7 @@ int may_expand_vm(struct mm_struct *mm,
65616 unsigned long lim;
65617
65618 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
65619 -
65620 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
65621 if (cur + npages > lim)
65622 return 0;
65623 return 1;
65624 @@ -2301,6 +2763,22 @@ int install_special_mapping(struct mm_st
65625 vma->vm_start = addr;
65626 vma->vm_end = addr + len;
65627
65628 +#ifdef CONFIG_PAX_MPROTECT
65629 + if (mm->pax_flags & MF_PAX_MPROTECT) {
65630 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
65631 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
65632 + return -EPERM;
65633 + if (!(vm_flags & VM_EXEC))
65634 + vm_flags &= ~VM_MAYEXEC;
65635 +#else
65636 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
65637 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
65638 +#endif
65639 + else
65640 + vm_flags &= ~VM_MAYWRITE;
65641 + }
65642 +#endif
65643 +
65644 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
65645 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
65646
65647 diff -urNp linux-2.6.32.42/mm/mprotect.c linux-2.6.32.42/mm/mprotect.c
65648 --- linux-2.6.32.42/mm/mprotect.c 2011-03-27 14:31:47.000000000 -0400
65649 +++ linux-2.6.32.42/mm/mprotect.c 2011-04-17 15:56:46.000000000 -0400
65650 @@ -24,10 +24,16 @@
65651 #include <linux/mmu_notifier.h>
65652 #include <linux/migrate.h>
65653 #include <linux/perf_event.h>
65654 +
65655 +#ifdef CONFIG_PAX_MPROTECT
65656 +#include <linux/elf.h>
65657 +#endif
65658 +
65659 #include <asm/uaccess.h>
65660 #include <asm/pgtable.h>
65661 #include <asm/cacheflush.h>
65662 #include <asm/tlbflush.h>
65663 +#include <asm/mmu_context.h>
65664
65665 #ifndef pgprot_modify
65666 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
65667 @@ -132,6 +138,48 @@ static void change_protection(struct vm_
65668 flush_tlb_range(vma, start, end);
65669 }
65670
65671 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65672 +/* called while holding the mmap semaphor for writing except stack expansion */
65673 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
65674 +{
65675 + unsigned long oldlimit, newlimit = 0UL;
65676 +
65677 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
65678 + return;
65679 +
65680 + spin_lock(&mm->page_table_lock);
65681 + oldlimit = mm->context.user_cs_limit;
65682 + if ((prot & VM_EXEC) && oldlimit < end)
65683 + /* USER_CS limit moved up */
65684 + newlimit = end;
65685 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
65686 + /* USER_CS limit moved down */
65687 + newlimit = start;
65688 +
65689 + if (newlimit) {
65690 + mm->context.user_cs_limit = newlimit;
65691 +
65692 +#ifdef CONFIG_SMP
65693 + wmb();
65694 + cpus_clear(mm->context.cpu_user_cs_mask);
65695 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
65696 +#endif
65697 +
65698 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
65699 + }
65700 + spin_unlock(&mm->page_table_lock);
65701 + if (newlimit == end) {
65702 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
65703 +
65704 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
65705 + if (is_vm_hugetlb_page(vma))
65706 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
65707 + else
65708 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
65709 + }
65710 +}
65711 +#endif
65712 +
65713 int
65714 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
65715 unsigned long start, unsigned long end, unsigned long newflags)
65716 @@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
65717 int error;
65718 int dirty_accountable = 0;
65719
65720 +#ifdef CONFIG_PAX_SEGMEXEC
65721 + struct vm_area_struct *vma_m = NULL;
65722 + unsigned long start_m, end_m;
65723 +
65724 + start_m = start + SEGMEXEC_TASK_SIZE;
65725 + end_m = end + SEGMEXEC_TASK_SIZE;
65726 +#endif
65727 +
65728 if (newflags == oldflags) {
65729 *pprev = vma;
65730 return 0;
65731 }
65732
65733 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
65734 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
65735 +
65736 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
65737 + return -ENOMEM;
65738 +
65739 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
65740 + return -ENOMEM;
65741 + }
65742 +
65743 /*
65744 * If we make a private mapping writable we increase our commit;
65745 * but (without finer accounting) cannot reduce our commit if we
65746 @@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
65747 }
65748 }
65749
65750 +#ifdef CONFIG_PAX_SEGMEXEC
65751 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
65752 + if (start != vma->vm_start) {
65753 + error = split_vma(mm, vma, start, 1);
65754 + if (error)
65755 + goto fail;
65756 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
65757 + *pprev = (*pprev)->vm_next;
65758 + }
65759 +
65760 + if (end != vma->vm_end) {
65761 + error = split_vma(mm, vma, end, 0);
65762 + if (error)
65763 + goto fail;
65764 + }
65765 +
65766 + if (pax_find_mirror_vma(vma)) {
65767 + error = __do_munmap(mm, start_m, end_m - start_m);
65768 + if (error)
65769 + goto fail;
65770 + } else {
65771 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65772 + if (!vma_m) {
65773 + error = -ENOMEM;
65774 + goto fail;
65775 + }
65776 + vma->vm_flags = newflags;
65777 + pax_mirror_vma(vma_m, vma);
65778 + }
65779 + }
65780 +#endif
65781 +
65782 /*
65783 * First try to merge with previous and/or next vma.
65784 */
65785 @@ -195,9 +293,21 @@ success:
65786 * vm_flags and vm_page_prot are protected by the mmap_sem
65787 * held in write mode.
65788 */
65789 +
65790 +#ifdef CONFIG_PAX_SEGMEXEC
65791 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
65792 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
65793 +#endif
65794 +
65795 vma->vm_flags = newflags;
65796 +
65797 +#ifdef CONFIG_PAX_MPROTECT
65798 + if (mm->binfmt && mm->binfmt->handle_mprotect)
65799 + mm->binfmt->handle_mprotect(vma, newflags);
65800 +#endif
65801 +
65802 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
65803 - vm_get_page_prot(newflags));
65804 + vm_get_page_prot(vma->vm_flags));
65805
65806 if (vma_wants_writenotify(vma)) {
65807 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
65808 @@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65809 end = start + len;
65810 if (end <= start)
65811 return -ENOMEM;
65812 +
65813 +#ifdef CONFIG_PAX_SEGMEXEC
65814 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
65815 + if (end > SEGMEXEC_TASK_SIZE)
65816 + return -EINVAL;
65817 + } else
65818 +#endif
65819 +
65820 + if (end > TASK_SIZE)
65821 + return -EINVAL;
65822 +
65823 if (!arch_validate_prot(prot))
65824 return -EINVAL;
65825
65826 @@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65827 /*
65828 * Does the application expect PROT_READ to imply PROT_EXEC:
65829 */
65830 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
65831 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
65832 prot |= PROT_EXEC;
65833
65834 vm_flags = calc_vm_prot_bits(prot);
65835 @@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65836 if (start > vma->vm_start)
65837 prev = vma;
65838
65839 +#ifdef CONFIG_PAX_MPROTECT
65840 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
65841 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
65842 +#endif
65843 +
65844 for (nstart = start ; ; ) {
65845 unsigned long newflags;
65846
65847 @@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65848
65849 /* newflags >> 4 shift VM_MAY% in place of VM_% */
65850 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
65851 + if (prot & (PROT_WRITE | PROT_EXEC))
65852 + gr_log_rwxmprotect(vma->vm_file);
65853 +
65854 + error = -EACCES;
65855 + goto out;
65856 + }
65857 +
65858 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
65859 error = -EACCES;
65860 goto out;
65861 }
65862 @@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65863 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
65864 if (error)
65865 goto out;
65866 +
65867 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
65868 +
65869 nstart = tmp;
65870
65871 if (nstart < prev->vm_end)
65872 diff -urNp linux-2.6.32.42/mm/mremap.c linux-2.6.32.42/mm/mremap.c
65873 --- linux-2.6.32.42/mm/mremap.c 2011-04-17 17:00:52.000000000 -0400
65874 +++ linux-2.6.32.42/mm/mremap.c 2011-04-17 17:03:58.000000000 -0400
65875 @@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_str
65876 continue;
65877 pte = ptep_clear_flush(vma, old_addr, old_pte);
65878 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
65879 +
65880 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65881 + if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
65882 + pte = pte_exprotect(pte);
65883 +#endif
65884 +
65885 set_pte_at(mm, new_addr, new_pte, pte);
65886 }
65887
65888 @@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_res
65889 if (is_vm_hugetlb_page(vma))
65890 goto Einval;
65891
65892 +#ifdef CONFIG_PAX_SEGMEXEC
65893 + if (pax_find_mirror_vma(vma))
65894 + goto Einval;
65895 +#endif
65896 +
65897 /* We can't remap across vm area boundaries */
65898 if (old_len > vma->vm_end - addr)
65899 goto Efault;
65900 @@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned
65901 unsigned long ret = -EINVAL;
65902 unsigned long charged = 0;
65903 unsigned long map_flags;
65904 + unsigned long pax_task_size = TASK_SIZE;
65905
65906 if (new_addr & ~PAGE_MASK)
65907 goto out;
65908
65909 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
65910 +#ifdef CONFIG_PAX_SEGMEXEC
65911 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
65912 + pax_task_size = SEGMEXEC_TASK_SIZE;
65913 +#endif
65914 +
65915 + pax_task_size -= PAGE_SIZE;
65916 +
65917 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
65918 goto out;
65919
65920 /* Check if the location we're moving into overlaps the
65921 * old location at all, and fail if it does.
65922 */
65923 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
65924 - goto out;
65925 -
65926 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
65927 + if (addr + old_len > new_addr && new_addr + new_len > addr)
65928 goto out;
65929
65930 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
65931 @@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long ad
65932 struct vm_area_struct *vma;
65933 unsigned long ret = -EINVAL;
65934 unsigned long charged = 0;
65935 + unsigned long pax_task_size = TASK_SIZE;
65936
65937 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
65938 goto out;
65939 @@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long ad
65940 if (!new_len)
65941 goto out;
65942
65943 +#ifdef CONFIG_PAX_SEGMEXEC
65944 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
65945 + pax_task_size = SEGMEXEC_TASK_SIZE;
65946 +#endif
65947 +
65948 + pax_task_size -= PAGE_SIZE;
65949 +
65950 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
65951 + old_len > pax_task_size || addr > pax_task_size-old_len)
65952 + goto out;
65953 +
65954 if (flags & MREMAP_FIXED) {
65955 if (flags & MREMAP_MAYMOVE)
65956 ret = mremap_to(addr, old_len, new_addr, new_len);
65957 @@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long ad
65958 addr + new_len);
65959 }
65960 ret = addr;
65961 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
65962 goto out;
65963 }
65964 }
65965 @@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long ad
65966 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
65967 if (ret)
65968 goto out;
65969 +
65970 + map_flags = vma->vm_flags;
65971 ret = move_vma(vma, addr, old_len, new_len, new_addr);
65972 + if (!(ret & ~PAGE_MASK)) {
65973 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
65974 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
65975 + }
65976 }
65977 out:
65978 if (ret & ~PAGE_MASK)
65979 diff -urNp linux-2.6.32.42/mm/nommu.c linux-2.6.32.42/mm/nommu.c
65980 --- linux-2.6.32.42/mm/nommu.c 2011-03-27 14:31:47.000000000 -0400
65981 +++ linux-2.6.32.42/mm/nommu.c 2011-04-17 15:56:46.000000000 -0400
65982 @@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
65983 int sysctl_overcommit_ratio = 50; /* default is 50% */
65984 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
65985 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
65986 -int heap_stack_gap = 0;
65987
65988 atomic_long_t mmap_pages_allocated;
65989
65990 @@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
65991 EXPORT_SYMBOL(find_vma);
65992
65993 /*
65994 - * find a VMA
65995 - * - we don't extend stack VMAs under NOMMU conditions
65996 - */
65997 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
65998 -{
65999 - return find_vma(mm, addr);
66000 -}
66001 -
66002 -/*
66003 * expand a stack to a given address
66004 * - not supported under NOMMU conditions
66005 */
66006 diff -urNp linux-2.6.32.42/mm/page_alloc.c linux-2.6.32.42/mm/page_alloc.c
66007 --- linux-2.6.32.42/mm/page_alloc.c 2011-06-25 12:55:35.000000000 -0400
66008 +++ linux-2.6.32.42/mm/page_alloc.c 2011-06-25 12:56:37.000000000 -0400
66009 @@ -587,6 +587,10 @@ static void __free_pages_ok(struct page
66010 int bad = 0;
66011 int wasMlocked = __TestClearPageMlocked(page);
66012
66013 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
66014 + unsigned long index = 1UL << order;
66015 +#endif
66016 +
66017 kmemcheck_free_shadow(page, order);
66018
66019 for (i = 0 ; i < (1 << order) ; ++i)
66020 @@ -599,6 +603,12 @@ static void __free_pages_ok(struct page
66021 debug_check_no_obj_freed(page_address(page),
66022 PAGE_SIZE << order);
66023 }
66024 +
66025 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
66026 + for (; index; --index)
66027 + sanitize_highpage(page + index - 1);
66028 +#endif
66029 +
66030 arch_free_page(page, order);
66031 kernel_map_pages(page, 1 << order, 0);
66032
66033 @@ -702,8 +712,10 @@ static int prep_new_page(struct page *pa
66034 arch_alloc_page(page, order);
66035 kernel_map_pages(page, 1 << order, 1);
66036
66037 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
66038 if (gfp_flags & __GFP_ZERO)
66039 prep_zero_page(page, order, gfp_flags);
66040 +#endif
66041
66042 if (order && (gfp_flags & __GFP_COMP))
66043 prep_compound_page(page, order);
66044 @@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct pa
66045 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
66046 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
66047 }
66048 +
66049 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
66050 + sanitize_highpage(page);
66051 +#endif
66052 +
66053 arch_free_page(page, 0);
66054 kernel_map_pages(page, 1, 0);
66055
66056 @@ -2179,6 +2196,8 @@ void show_free_areas(void)
66057 int cpu;
66058 struct zone *zone;
66059
66060 + pax_track_stack();
66061 +
66062 for_each_populated_zone(zone) {
66063 show_node(zone);
66064 printk("%s per-cpu:\n", zone->name);
66065 @@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct p
66066 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
66067 }
66068 #else
66069 -static void inline setup_usemap(struct pglist_data *pgdat,
66070 +static inline void setup_usemap(struct pglist_data *pgdat,
66071 struct zone *zone, unsigned long zonesize) {}
66072 #endif /* CONFIG_SPARSEMEM */
66073
66074 diff -urNp linux-2.6.32.42/mm/percpu.c linux-2.6.32.42/mm/percpu.c
66075 --- linux-2.6.32.42/mm/percpu.c 2011-03-27 14:31:47.000000000 -0400
66076 +++ linux-2.6.32.42/mm/percpu.c 2011-04-17 15:56:46.000000000 -0400
66077 @@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu
66078 static unsigned int pcpu_last_unit_cpu __read_mostly;
66079
66080 /* the address of the first chunk which starts with the kernel static area */
66081 -void *pcpu_base_addr __read_mostly;
66082 +void *pcpu_base_addr __read_only;
66083 EXPORT_SYMBOL_GPL(pcpu_base_addr);
66084
66085 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
66086 diff -urNp linux-2.6.32.42/mm/rmap.c linux-2.6.32.42/mm/rmap.c
66087 --- linux-2.6.32.42/mm/rmap.c 2011-03-27 14:31:47.000000000 -0400
66088 +++ linux-2.6.32.42/mm/rmap.c 2011-04-17 15:56:46.000000000 -0400
66089 @@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_stru
66090 /* page_table_lock to protect against threads */
66091 spin_lock(&mm->page_table_lock);
66092 if (likely(!vma->anon_vma)) {
66093 +
66094 +#ifdef CONFIG_PAX_SEGMEXEC
66095 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
66096 +
66097 + if (vma_m) {
66098 + BUG_ON(vma_m->anon_vma);
66099 + vma_m->anon_vma = anon_vma;
66100 + list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
66101 + }
66102 +#endif
66103 +
66104 vma->anon_vma = anon_vma;
66105 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
66106 allocated = NULL;
66107 diff -urNp linux-2.6.32.42/mm/shmem.c linux-2.6.32.42/mm/shmem.c
66108 --- linux-2.6.32.42/mm/shmem.c 2011-03-27 14:31:47.000000000 -0400
66109 +++ linux-2.6.32.42/mm/shmem.c 2011-05-18 20:09:37.000000000 -0400
66110 @@ -31,7 +31,7 @@
66111 #include <linux/swap.h>
66112 #include <linux/ima.h>
66113
66114 -static struct vfsmount *shm_mnt;
66115 +struct vfsmount *shm_mnt;
66116
66117 #ifdef CONFIG_SHMEM
66118 /*
66119 @@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *
66120 goto unlock;
66121 }
66122 entry = shmem_swp_entry(info, index, NULL);
66123 + if (!entry)
66124 + goto unlock;
66125 if (entry->val) {
66126 /*
66127 * The more uptodate page coming down from a stacked
66128 @@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_ent
66129 struct vm_area_struct pvma;
66130 struct page *page;
66131
66132 + pax_track_stack();
66133 +
66134 spol = mpol_cond_copy(&mpol,
66135 mpol_shared_policy_lookup(&info->policy, idx));
66136
66137 @@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *d
66138
66139 info = SHMEM_I(inode);
66140 inode->i_size = len-1;
66141 - if (len <= (char *)inode - (char *)info) {
66142 + if (len <= (char *)inode - (char *)info && len <= 64) {
66143 /* do it inline */
66144 memcpy(info, symname, len);
66145 inode->i_op = &shmem_symlink_inline_operations;
66146 @@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block
66147 int err = -ENOMEM;
66148
66149 /* Round up to L1_CACHE_BYTES to resist false sharing */
66150 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
66151 - L1_CACHE_BYTES), GFP_KERNEL);
66152 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
66153 if (!sbinfo)
66154 return -ENOMEM;
66155
66156 diff -urNp linux-2.6.32.42/mm/slab.c linux-2.6.32.42/mm/slab.c
66157 --- linux-2.6.32.42/mm/slab.c 2011-03-27 14:31:47.000000000 -0400
66158 +++ linux-2.6.32.42/mm/slab.c 2011-05-04 17:56:20.000000000 -0400
66159 @@ -174,7 +174,7 @@
66160
66161 /* Legal flag mask for kmem_cache_create(). */
66162 #if DEBUG
66163 -# define CREATE_MASK (SLAB_RED_ZONE | \
66164 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
66165 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
66166 SLAB_CACHE_DMA | \
66167 SLAB_STORE_USER | \
66168 @@ -182,7 +182,7 @@
66169 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
66170 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
66171 #else
66172 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
66173 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
66174 SLAB_CACHE_DMA | \
66175 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
66176 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
66177 @@ -308,7 +308,7 @@ struct kmem_list3 {
66178 * Need this for bootstrapping a per node allocator.
66179 */
66180 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
66181 -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
66182 +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
66183 #define CACHE_CACHE 0
66184 #define SIZE_AC MAX_NUMNODES
66185 #define SIZE_L3 (2 * MAX_NUMNODES)
66186 @@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_
66187 if ((x)->max_freeable < i) \
66188 (x)->max_freeable = i; \
66189 } while (0)
66190 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
66191 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
66192 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
66193 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
66194 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
66195 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
66196 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
66197 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
66198 #else
66199 #define STATS_INC_ACTIVE(x) do { } while (0)
66200 #define STATS_DEC_ACTIVE(x) do { } while (0)
66201 @@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
66202 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
66203 */
66204 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
66205 - const struct slab *slab, void *obj)
66206 + const struct slab *slab, const void *obj)
66207 {
66208 u32 offset = (obj - slab->s_mem);
66209 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
66210 @@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
66211 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
66212 sizes[INDEX_AC].cs_size,
66213 ARCH_KMALLOC_MINALIGN,
66214 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66215 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66216 NULL);
66217
66218 if (INDEX_AC != INDEX_L3) {
66219 @@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
66220 kmem_cache_create(names[INDEX_L3].name,
66221 sizes[INDEX_L3].cs_size,
66222 ARCH_KMALLOC_MINALIGN,
66223 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66224 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66225 NULL);
66226 }
66227
66228 @@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
66229 sizes->cs_cachep = kmem_cache_create(names->name,
66230 sizes->cs_size,
66231 ARCH_KMALLOC_MINALIGN,
66232 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66233 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66234 NULL);
66235 }
66236 #ifdef CONFIG_ZONE_DMA
66237 @@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, vo
66238 }
66239 /* cpu stats */
66240 {
66241 - unsigned long allochit = atomic_read(&cachep->allochit);
66242 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
66243 - unsigned long freehit = atomic_read(&cachep->freehit);
66244 - unsigned long freemiss = atomic_read(&cachep->freemiss);
66245 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
66246 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
66247 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
66248 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
66249
66250 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
66251 allochit, allocmiss, freehit, freemiss);
66252 @@ -4471,15 +4471,66 @@ static const struct file_operations proc
66253
66254 static int __init slab_proc_init(void)
66255 {
66256 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
66257 + mode_t gr_mode = S_IRUGO;
66258 +
66259 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66260 + gr_mode = S_IRUSR;
66261 +#endif
66262 +
66263 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
66264 #ifdef CONFIG_DEBUG_SLAB_LEAK
66265 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
66266 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
66267 #endif
66268 return 0;
66269 }
66270 module_init(slab_proc_init);
66271 #endif
66272
66273 +void check_object_size(const void *ptr, unsigned long n, bool to)
66274 +{
66275 +
66276 +#ifdef CONFIG_PAX_USERCOPY
66277 + struct page *page;
66278 + struct kmem_cache *cachep = NULL;
66279 + struct slab *slabp;
66280 + unsigned int objnr;
66281 + unsigned long offset;
66282 +
66283 + if (!n)
66284 + return;
66285 +
66286 + if (ZERO_OR_NULL_PTR(ptr))
66287 + goto report;
66288 +
66289 + if (!virt_addr_valid(ptr))
66290 + return;
66291 +
66292 + page = virt_to_head_page(ptr);
66293 +
66294 + if (!PageSlab(page)) {
66295 + if (object_is_on_stack(ptr, n) == -1)
66296 + goto report;
66297 + return;
66298 + }
66299 +
66300 + cachep = page_get_cache(page);
66301 + if (!(cachep->flags & SLAB_USERCOPY))
66302 + goto report;
66303 +
66304 + slabp = page_get_slab(page);
66305 + objnr = obj_to_index(cachep, slabp, ptr);
66306 + BUG_ON(objnr >= cachep->num);
66307 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
66308 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
66309 + return;
66310 +
66311 +report:
66312 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
66313 +#endif
66314 +
66315 +}
66316 +EXPORT_SYMBOL(check_object_size);
66317 +
66318 /**
66319 * ksize - get the actual amount of memory allocated for a given object
66320 * @objp: Pointer to the object
66321 diff -urNp linux-2.6.32.42/mm/slob.c linux-2.6.32.42/mm/slob.c
66322 --- linux-2.6.32.42/mm/slob.c 2011-03-27 14:31:47.000000000 -0400
66323 +++ linux-2.6.32.42/mm/slob.c 2011-04-17 15:56:46.000000000 -0400
66324 @@ -29,7 +29,7 @@
66325 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
66326 * alloc_pages() directly, allocating compound pages so the page order
66327 * does not have to be separately tracked, and also stores the exact
66328 - * allocation size in page->private so that it can be used to accurately
66329 + * allocation size in slob_page->size so that it can be used to accurately
66330 * provide ksize(). These objects are detected in kfree() because slob_page()
66331 * is false for them.
66332 *
66333 @@ -58,6 +58,7 @@
66334 */
66335
66336 #include <linux/kernel.h>
66337 +#include <linux/sched.h>
66338 #include <linux/slab.h>
66339 #include <linux/mm.h>
66340 #include <linux/swap.h> /* struct reclaim_state */
66341 @@ -100,7 +101,8 @@ struct slob_page {
66342 unsigned long flags; /* mandatory */
66343 atomic_t _count; /* mandatory */
66344 slobidx_t units; /* free units left in page */
66345 - unsigned long pad[2];
66346 + unsigned long pad[1];
66347 + unsigned long size; /* size when >=PAGE_SIZE */
66348 slob_t *free; /* first free slob_t in page */
66349 struct list_head list; /* linked list of free pages */
66350 };
66351 @@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
66352 */
66353 static inline int is_slob_page(struct slob_page *sp)
66354 {
66355 - return PageSlab((struct page *)sp);
66356 + return PageSlab((struct page *)sp) && !sp->size;
66357 }
66358
66359 static inline void set_slob_page(struct slob_page *sp)
66360 @@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
66361
66362 static inline struct slob_page *slob_page(const void *addr)
66363 {
66364 - return (struct slob_page *)virt_to_page(addr);
66365 + return (struct slob_page *)virt_to_head_page(addr);
66366 }
66367
66368 /*
66369 @@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
66370 /*
66371 * Return the size of a slob block.
66372 */
66373 -static slobidx_t slob_units(slob_t *s)
66374 +static slobidx_t slob_units(const slob_t *s)
66375 {
66376 if (s->units > 0)
66377 return s->units;
66378 @@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
66379 /*
66380 * Return the next free slob block pointer after this one.
66381 */
66382 -static slob_t *slob_next(slob_t *s)
66383 +static slob_t *slob_next(const slob_t *s)
66384 {
66385 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
66386 slobidx_t next;
66387 @@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
66388 /*
66389 * Returns true if s is the last free block in its page.
66390 */
66391 -static int slob_last(slob_t *s)
66392 +static int slob_last(const slob_t *s)
66393 {
66394 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
66395 }
66396 @@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
66397 if (!page)
66398 return NULL;
66399
66400 + set_slob_page(page);
66401 return page_address(page);
66402 }
66403
66404 @@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
66405 if (!b)
66406 return NULL;
66407 sp = slob_page(b);
66408 - set_slob_page(sp);
66409
66410 spin_lock_irqsave(&slob_lock, flags);
66411 sp->units = SLOB_UNITS(PAGE_SIZE);
66412 sp->free = b;
66413 + sp->size = 0;
66414 INIT_LIST_HEAD(&sp->list);
66415 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
66416 set_slob_page_free(sp, slob_list);
66417 @@ -475,10 +478,9 @@ out:
66418 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
66419 #endif
66420
66421 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
66422 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
66423 {
66424 - unsigned int *m;
66425 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66426 + slob_t *m;
66427 void *ret;
66428
66429 lockdep_trace_alloc(gfp);
66430 @@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
66431
66432 if (!m)
66433 return NULL;
66434 - *m = size;
66435 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
66436 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
66437 + m[0].units = size;
66438 + m[1].units = align;
66439 ret = (void *)m + align;
66440
66441 trace_kmalloc_node(_RET_IP_, ret,
66442 @@ -501,9 +506,9 @@ void *__kmalloc_node(size_t size, gfp_t
66443
66444 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
66445 if (ret) {
66446 - struct page *page;
66447 - page = virt_to_page(ret);
66448 - page->private = size;
66449 + struct slob_page *sp;
66450 + sp = slob_page(ret);
66451 + sp->size = size;
66452 }
66453
66454 trace_kmalloc_node(_RET_IP_, ret,
66455 @@ -513,6 +518,13 @@ void *__kmalloc_node(size_t size, gfp_t
66456 kmemleak_alloc(ret, size, 1, gfp);
66457 return ret;
66458 }
66459 +
66460 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
66461 +{
66462 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66463 +
66464 + return __kmalloc_node_align(size, gfp, node, align);
66465 +}
66466 EXPORT_SYMBOL(__kmalloc_node);
66467
66468 void kfree(const void *block)
66469 @@ -528,13 +540,81 @@ void kfree(const void *block)
66470 sp = slob_page(block);
66471 if (is_slob_page(sp)) {
66472 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66473 - unsigned int *m = (unsigned int *)(block - align);
66474 - slob_free(m, *m + align);
66475 - } else
66476 + slob_t *m = (slob_t *)(block - align);
66477 + slob_free(m, m[0].units + align);
66478 + } else {
66479 + clear_slob_page(sp);
66480 + free_slob_page(sp);
66481 + sp->size = 0;
66482 put_page(&sp->page);
66483 + }
66484 }
66485 EXPORT_SYMBOL(kfree);
66486
66487 +void check_object_size(const void *ptr, unsigned long n, bool to)
66488 +{
66489 +
66490 +#ifdef CONFIG_PAX_USERCOPY
66491 + struct slob_page *sp;
66492 + const slob_t *free;
66493 + const void *base;
66494 +
66495 + if (!n)
66496 + return;
66497 +
66498 + if (ZERO_OR_NULL_PTR(ptr))
66499 + goto report;
66500 +
66501 + if (!virt_addr_valid(ptr))
66502 + return;
66503 +
66504 + sp = slob_page(ptr);
66505 + if (!PageSlab((struct page*)sp)) {
66506 + if (object_is_on_stack(ptr, n) == -1)
66507 + goto report;
66508 + return;
66509 + }
66510 +
66511 + if (sp->size) {
66512 + base = page_address(&sp->page);
66513 + if (base <= ptr && n <= sp->size - (ptr - base))
66514 + return;
66515 + goto report;
66516 + }
66517 +
66518 + /* some tricky double walking to find the chunk */
66519 + base = (void *)((unsigned long)ptr & PAGE_MASK);
66520 + free = sp->free;
66521 +
66522 + while (!slob_last(free) && (void *)free <= ptr) {
66523 + base = free + slob_units(free);
66524 + free = slob_next(free);
66525 + }
66526 +
66527 + while (base < (void *)free) {
66528 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
66529 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
66530 + int offset;
66531 +
66532 + if (ptr < base + align)
66533 + goto report;
66534 +
66535 + offset = ptr - base - align;
66536 + if (offset < m) {
66537 + if (n <= m - offset)
66538 + return;
66539 + goto report;
66540 + }
66541 + base += size;
66542 + }
66543 +
66544 +report:
66545 + pax_report_usercopy(ptr, n, to, NULL);
66546 +#endif
66547 +
66548 +}
66549 +EXPORT_SYMBOL(check_object_size);
66550 +
66551 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
66552 size_t ksize(const void *block)
66553 {
66554 @@ -547,10 +627,10 @@ size_t ksize(const void *block)
66555 sp = slob_page(block);
66556 if (is_slob_page(sp)) {
66557 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66558 - unsigned int *m = (unsigned int *)(block - align);
66559 - return SLOB_UNITS(*m) * SLOB_UNIT;
66560 + slob_t *m = (slob_t *)(block - align);
66561 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
66562 } else
66563 - return sp->page.private;
66564 + return sp->size;
66565 }
66566 EXPORT_SYMBOL(ksize);
66567
66568 @@ -605,17 +685,25 @@ void *kmem_cache_alloc_node(struct kmem_
66569 {
66570 void *b;
66571
66572 +#ifdef CONFIG_PAX_USERCOPY
66573 + b = __kmalloc_node_align(c->size, flags, node, c->align);
66574 +#else
66575 if (c->size < PAGE_SIZE) {
66576 b = slob_alloc(c->size, flags, c->align, node);
66577 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
66578 SLOB_UNITS(c->size) * SLOB_UNIT,
66579 flags, node);
66580 } else {
66581 + struct slob_page *sp;
66582 +
66583 b = slob_new_pages(flags, get_order(c->size), node);
66584 + sp = slob_page(b);
66585 + sp->size = c->size;
66586 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
66587 PAGE_SIZE << get_order(c->size),
66588 flags, node);
66589 }
66590 +#endif
66591
66592 if (c->ctor)
66593 c->ctor(b);
66594 @@ -627,10 +715,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
66595
66596 static void __kmem_cache_free(void *b, int size)
66597 {
66598 - if (size < PAGE_SIZE)
66599 + struct slob_page *sp = slob_page(b);
66600 +
66601 + if (is_slob_page(sp))
66602 slob_free(b, size);
66603 - else
66604 + else {
66605 + clear_slob_page(sp);
66606 + free_slob_page(sp);
66607 + sp->size = 0;
66608 slob_free_pages(b, get_order(size));
66609 + }
66610 }
66611
66612 static void kmem_rcu_free(struct rcu_head *head)
66613 @@ -643,15 +737,24 @@ static void kmem_rcu_free(struct rcu_hea
66614
66615 void kmem_cache_free(struct kmem_cache *c, void *b)
66616 {
66617 + int size = c->size;
66618 +
66619 +#ifdef CONFIG_PAX_USERCOPY
66620 + if (size + c->align < PAGE_SIZE) {
66621 + size += c->align;
66622 + b -= c->align;
66623 + }
66624 +#endif
66625 +
66626 kmemleak_free_recursive(b, c->flags);
66627 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
66628 struct slob_rcu *slob_rcu;
66629 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
66630 + slob_rcu = b + (size - sizeof(struct slob_rcu));
66631 INIT_RCU_HEAD(&slob_rcu->head);
66632 - slob_rcu->size = c->size;
66633 + slob_rcu->size = size;
66634 call_rcu(&slob_rcu->head, kmem_rcu_free);
66635 } else {
66636 - __kmem_cache_free(b, c->size);
66637 + __kmem_cache_free(b, size);
66638 }
66639
66640 trace_kmem_cache_free(_RET_IP_, b);
66641 diff -urNp linux-2.6.32.42/mm/slub.c linux-2.6.32.42/mm/slub.c
66642 --- linux-2.6.32.42/mm/slub.c 2011-03-27 14:31:47.000000000 -0400
66643 +++ linux-2.6.32.42/mm/slub.c 2011-04-17 15:56:46.000000000 -0400
66644 @@ -410,7 +410,7 @@ static void print_track(const char *s, s
66645 if (!t->addr)
66646 return;
66647
66648 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
66649 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
66650 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
66651 }
66652
66653 @@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *
66654
66655 page = virt_to_head_page(x);
66656
66657 + BUG_ON(!PageSlab(page));
66658 +
66659 slab_free(s, page, x, _RET_IP_);
66660
66661 trace_kmem_cache_free(_RET_IP_, x);
66662 @@ -1937,7 +1939,7 @@ static int slub_min_objects;
66663 * Merge control. If this is set then no merging of slab caches will occur.
66664 * (Could be removed. This was introduced to pacify the merge skeptics.)
66665 */
66666 -static int slub_nomerge;
66667 +static int slub_nomerge = 1;
66668
66669 /*
66670 * Calculate the order of allocation given an slab object size.
66671 @@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_c
66672 * list to avoid pounding the page allocator excessively.
66673 */
66674 set_min_partial(s, ilog2(s->size));
66675 - s->refcount = 1;
66676 + atomic_set(&s->refcount, 1);
66677 #ifdef CONFIG_NUMA
66678 s->remote_node_defrag_ratio = 1000;
66679 #endif
66680 @@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struc
66681 void kmem_cache_destroy(struct kmem_cache *s)
66682 {
66683 down_write(&slub_lock);
66684 - s->refcount--;
66685 - if (!s->refcount) {
66686 + if (atomic_dec_and_test(&s->refcount)) {
66687 list_del(&s->list);
66688 up_write(&slub_lock);
66689 if (kmem_cache_close(s)) {
66690 @@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(cha
66691 __setup("slub_nomerge", setup_slub_nomerge);
66692
66693 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
66694 - const char *name, int size, gfp_t gfp_flags)
66695 + const char *name, int size, gfp_t gfp_flags, unsigned int flags)
66696 {
66697 - unsigned int flags = 0;
66698 -
66699 if (gfp_flags & SLUB_DMA)
66700 - flags = SLAB_CACHE_DMA;
66701 + flags |= SLAB_CACHE_DMA;
66702
66703 /*
66704 * This function is called with IRQs disabled during early-boot on
66705 @@ -2915,6 +2914,46 @@ void *__kmalloc_node(size_t size, gfp_t
66706 EXPORT_SYMBOL(__kmalloc_node);
66707 #endif
66708
66709 +void check_object_size(const void *ptr, unsigned long n, bool to)
66710 +{
66711 +
66712 +#ifdef CONFIG_PAX_USERCOPY
66713 + struct page *page;
66714 + struct kmem_cache *s = NULL;
66715 + unsigned long offset;
66716 +
66717 + if (!n)
66718 + return;
66719 +
66720 + if (ZERO_OR_NULL_PTR(ptr))
66721 + goto report;
66722 +
66723 + if (!virt_addr_valid(ptr))
66724 + return;
66725 +
66726 + page = get_object_page(ptr);
66727 +
66728 + if (!page) {
66729 + if (object_is_on_stack(ptr, n) == -1)
66730 + goto report;
66731 + return;
66732 + }
66733 +
66734 + s = page->slab;
66735 + if (!(s->flags & SLAB_USERCOPY))
66736 + goto report;
66737 +
66738 + offset = (ptr - page_address(page)) % s->size;
66739 + if (offset <= s->objsize && n <= s->objsize - offset)
66740 + return;
66741 +
66742 +report:
66743 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
66744 +#endif
66745 +
66746 +}
66747 +EXPORT_SYMBOL(check_object_size);
66748 +
66749 size_t ksize(const void *object)
66750 {
66751 struct page *page;
66752 @@ -3185,8 +3224,8 @@ void __init kmem_cache_init(void)
66753 * kmem_cache_open for slab_state == DOWN.
66754 */
66755 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
66756 - sizeof(struct kmem_cache_node), GFP_NOWAIT);
66757 - kmalloc_caches[0].refcount = -1;
66758 + sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
66759 + atomic_set(&kmalloc_caches[0].refcount, -1);
66760 caches++;
66761
66762 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
66763 @@ -3198,18 +3237,18 @@ void __init kmem_cache_init(void)
66764 /* Caches that are not of the two-to-the-power-of size */
66765 if (KMALLOC_MIN_SIZE <= 32) {
66766 create_kmalloc_cache(&kmalloc_caches[1],
66767 - "kmalloc-96", 96, GFP_NOWAIT);
66768 + "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
66769 caches++;
66770 }
66771 if (KMALLOC_MIN_SIZE <= 64) {
66772 create_kmalloc_cache(&kmalloc_caches[2],
66773 - "kmalloc-192", 192, GFP_NOWAIT);
66774 + "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
66775 caches++;
66776 }
66777
66778 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
66779 create_kmalloc_cache(&kmalloc_caches[i],
66780 - "kmalloc", 1 << i, GFP_NOWAIT);
66781 + "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
66782 caches++;
66783 }
66784
66785 @@ -3293,7 +3332,7 @@ static int slab_unmergeable(struct kmem_
66786 /*
66787 * We may have set a slab to be unmergeable during bootstrap.
66788 */
66789 - if (s->refcount < 0)
66790 + if (atomic_read(&s->refcount) < 0)
66791 return 1;
66792
66793 return 0;
66794 @@ -3353,7 +3392,7 @@ struct kmem_cache *kmem_cache_create(con
66795 if (s) {
66796 int cpu;
66797
66798 - s->refcount++;
66799 + atomic_inc(&s->refcount);
66800 /*
66801 * Adjust the object sizes so that we clear
66802 * the complete object on kzalloc.
66803 @@ -3372,7 +3411,7 @@ struct kmem_cache *kmem_cache_create(con
66804
66805 if (sysfs_slab_alias(s, name)) {
66806 down_write(&slub_lock);
66807 - s->refcount--;
66808 + atomic_dec(&s->refcount);
66809 up_write(&slub_lock);
66810 goto err;
66811 }
66812 @@ -4101,7 +4140,7 @@ SLAB_ATTR_RO(ctor);
66813
66814 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
66815 {
66816 - return sprintf(buf, "%d\n", s->refcount - 1);
66817 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
66818 }
66819 SLAB_ATTR_RO(aliases);
66820
66821 @@ -4503,7 +4542,7 @@ static void kmem_cache_release(struct ko
66822 kfree(s);
66823 }
66824
66825 -static struct sysfs_ops slab_sysfs_ops = {
66826 +static const struct sysfs_ops slab_sysfs_ops = {
66827 .show = slab_attr_show,
66828 .store = slab_attr_store,
66829 };
66830 @@ -4522,7 +4561,7 @@ static int uevent_filter(struct kset *ks
66831 return 0;
66832 }
66833
66834 -static struct kset_uevent_ops slab_uevent_ops = {
66835 +static const struct kset_uevent_ops slab_uevent_ops = {
66836 .filter = uevent_filter,
66837 };
66838
66839 @@ -4785,7 +4824,13 @@ static const struct file_operations proc
66840
66841 static int __init slab_proc_init(void)
66842 {
66843 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
66844 + mode_t gr_mode = S_IRUGO;
66845 +
66846 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66847 + gr_mode = S_IRUSR;
66848 +#endif
66849 +
66850 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
66851 return 0;
66852 }
66853 module_init(slab_proc_init);
66854 diff -urNp linux-2.6.32.42/mm/util.c linux-2.6.32.42/mm/util.c
66855 --- linux-2.6.32.42/mm/util.c 2011-03-27 14:31:47.000000000 -0400
66856 +++ linux-2.6.32.42/mm/util.c 2011-04-17 15:56:46.000000000 -0400
66857 @@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
66858 void arch_pick_mmap_layout(struct mm_struct *mm)
66859 {
66860 mm->mmap_base = TASK_UNMAPPED_BASE;
66861 +
66862 +#ifdef CONFIG_PAX_RANDMMAP
66863 + if (mm->pax_flags & MF_PAX_RANDMMAP)
66864 + mm->mmap_base += mm->delta_mmap;
66865 +#endif
66866 +
66867 mm->get_unmapped_area = arch_get_unmapped_area;
66868 mm->unmap_area = arch_unmap_area;
66869 }
66870 diff -urNp linux-2.6.32.42/mm/vmalloc.c linux-2.6.32.42/mm/vmalloc.c
66871 --- linux-2.6.32.42/mm/vmalloc.c 2011-03-27 14:31:47.000000000 -0400
66872 +++ linux-2.6.32.42/mm/vmalloc.c 2011-04-17 15:56:46.000000000 -0400
66873 @@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
66874
66875 pte = pte_offset_kernel(pmd, addr);
66876 do {
66877 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
66878 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
66879 +
66880 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66881 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
66882 + BUG_ON(!pte_exec(*pte));
66883 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
66884 + continue;
66885 + }
66886 +#endif
66887 +
66888 + {
66889 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
66890 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
66891 + }
66892 } while (pte++, addr += PAGE_SIZE, addr != end);
66893 }
66894
66895 @@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
66896 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
66897 {
66898 pte_t *pte;
66899 + int ret = -ENOMEM;
66900
66901 /*
66902 * nr is a running index into the array which helps higher level
66903 @@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, un
66904 pte = pte_alloc_kernel(pmd, addr);
66905 if (!pte)
66906 return -ENOMEM;
66907 +
66908 + pax_open_kernel();
66909 do {
66910 struct page *page = pages[*nr];
66911
66912 - if (WARN_ON(!pte_none(*pte)))
66913 - return -EBUSY;
66914 - if (WARN_ON(!page))
66915 - return -ENOMEM;
66916 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66917 + if (!(pgprot_val(prot) & _PAGE_NX))
66918 + BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
66919 + else
66920 +#endif
66921 +
66922 + if (WARN_ON(!pte_none(*pte))) {
66923 + ret = -EBUSY;
66924 + goto out;
66925 + }
66926 + if (WARN_ON(!page)) {
66927 + ret = -ENOMEM;
66928 + goto out;
66929 + }
66930 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
66931 (*nr)++;
66932 } while (pte++, addr += PAGE_SIZE, addr != end);
66933 - return 0;
66934 + ret = 0;
66935 +out:
66936 + pax_close_kernel();
66937 + return ret;
66938 }
66939
66940 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
66941 @@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void
66942 * and fall back on vmalloc() if that fails. Others
66943 * just put it in the vmalloc space.
66944 */
66945 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
66946 +#ifdef CONFIG_MODULES
66947 +#ifdef MODULES_VADDR
66948 unsigned long addr = (unsigned long)x;
66949 if (addr >= MODULES_VADDR && addr < MODULES_END)
66950 return 1;
66951 #endif
66952 +
66953 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66954 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
66955 + return 1;
66956 +#endif
66957 +
66958 +#endif
66959 +
66960 return is_vmalloc_addr(x);
66961 }
66962
66963 @@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void
66964
66965 if (!pgd_none(*pgd)) {
66966 pud_t *pud = pud_offset(pgd, addr);
66967 +#ifdef CONFIG_X86
66968 + if (!pud_large(*pud))
66969 +#endif
66970 if (!pud_none(*pud)) {
66971 pmd_t *pmd = pmd_offset(pud, addr);
66972 +#ifdef CONFIG_X86
66973 + if (!pmd_large(*pmd))
66974 +#endif
66975 if (!pmd_none(*pmd)) {
66976 pte_t *ptep, pte;
66977
66978 @@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vm
66979 struct rb_node *tmp;
66980
66981 while (*p) {
66982 - struct vmap_area *tmp;
66983 + struct vmap_area *varea;
66984
66985 parent = *p;
66986 - tmp = rb_entry(parent, struct vmap_area, rb_node);
66987 - if (va->va_start < tmp->va_end)
66988 + varea = rb_entry(parent, struct vmap_area, rb_node);
66989 + if (va->va_start < varea->va_end)
66990 p = &(*p)->rb_left;
66991 - else if (va->va_end > tmp->va_start)
66992 + else if (va->va_end > varea->va_start)
66993 p = &(*p)->rb_right;
66994 else
66995 BUG();
66996 @@ -1232,6 +1274,16 @@ static struct vm_struct *__get_vm_area_n
66997 struct vm_struct *area;
66998
66999 BUG_ON(in_interrupt());
67000 +
67001 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67002 + if (flags & VM_KERNEXEC) {
67003 + if (start != VMALLOC_START || end != VMALLOC_END)
67004 + return NULL;
67005 + start = (unsigned long)MODULES_EXEC_VADDR;
67006 + end = (unsigned long)MODULES_EXEC_END;
67007 + }
67008 +#endif
67009 +
67010 if (flags & VM_IOREMAP) {
67011 int bit = fls(size);
67012
67013 @@ -1457,6 +1509,11 @@ void *vmap(struct page **pages, unsigned
67014 if (count > totalram_pages)
67015 return NULL;
67016
67017 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67018 + if (!(pgprot_val(prot) & _PAGE_NX))
67019 + flags |= VM_KERNEXEC;
67020 +#endif
67021 +
67022 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
67023 __builtin_return_address(0));
67024 if (!area)
67025 @@ -1567,6 +1624,13 @@ static void *__vmalloc_node(unsigned lon
67026 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
67027 return NULL;
67028
67029 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67030 + if (!(pgprot_val(prot) & _PAGE_NX))
67031 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
67032 + node, gfp_mask, caller);
67033 + else
67034 +#endif
67035 +
67036 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
67037 VMALLOC_END, node, gfp_mask, caller);
67038
67039 @@ -1585,6 +1649,7 @@ static void *__vmalloc_node(unsigned lon
67040 return addr;
67041 }
67042
67043 +#undef __vmalloc
67044 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
67045 {
67046 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
67047 @@ -1601,6 +1666,7 @@ EXPORT_SYMBOL(__vmalloc);
67048 * For tight control over page level allocator and protection flags
67049 * use __vmalloc() instead.
67050 */
67051 +#undef vmalloc
67052 void *vmalloc(unsigned long size)
67053 {
67054 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
67055 @@ -1615,6 +1681,7 @@ EXPORT_SYMBOL(vmalloc);
67056 * The resulting memory area is zeroed so it can be mapped to userspace
67057 * without leaking data.
67058 */
67059 +#undef vmalloc_user
67060 void *vmalloc_user(unsigned long size)
67061 {
67062 struct vm_struct *area;
67063 @@ -1642,6 +1709,7 @@ EXPORT_SYMBOL(vmalloc_user);
67064 * For tight control over page level allocator and protection flags
67065 * use __vmalloc() instead.
67066 */
67067 +#undef vmalloc_node
67068 void *vmalloc_node(unsigned long size, int node)
67069 {
67070 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
67071 @@ -1664,10 +1732,10 @@ EXPORT_SYMBOL(vmalloc_node);
67072 * For tight control over page level allocator and protection flags
67073 * use __vmalloc() instead.
67074 */
67075 -
67076 +#undef vmalloc_exec
67077 void *vmalloc_exec(unsigned long size)
67078 {
67079 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
67080 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
67081 -1, __builtin_return_address(0));
67082 }
67083
67084 @@ -1686,6 +1754,7 @@ void *vmalloc_exec(unsigned long size)
67085 * Allocate enough 32bit PA addressable pages to cover @size from the
67086 * page level allocator and map them into contiguous kernel virtual space.
67087 */
67088 +#undef vmalloc_32
67089 void *vmalloc_32(unsigned long size)
67090 {
67091 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
67092 @@ -1700,6 +1769,7 @@ EXPORT_SYMBOL(vmalloc_32);
67093 * The resulting memory area is 32bit addressable and zeroed so it can be
67094 * mapped to userspace without leaking data.
67095 */
67096 +#undef vmalloc_32_user
67097 void *vmalloc_32_user(unsigned long size)
67098 {
67099 struct vm_struct *area;
67100 @@ -1964,6 +2034,8 @@ int remap_vmalloc_range(struct vm_area_s
67101 unsigned long uaddr = vma->vm_start;
67102 unsigned long usize = vma->vm_end - vma->vm_start;
67103
67104 + BUG_ON(vma->vm_mirror);
67105 +
67106 if ((PAGE_SIZE-1) & (unsigned long)addr)
67107 return -EINVAL;
67108
67109 diff -urNp linux-2.6.32.42/mm/vmstat.c linux-2.6.32.42/mm/vmstat.c
67110 --- linux-2.6.32.42/mm/vmstat.c 2011-03-27 14:31:47.000000000 -0400
67111 +++ linux-2.6.32.42/mm/vmstat.c 2011-04-17 15:56:46.000000000 -0400
67112 @@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
67113 *
67114 * vm_stat contains the global counters
67115 */
67116 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67117 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67118 EXPORT_SYMBOL(vm_stat);
67119
67120 #ifdef CONFIG_SMP
67121 @@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
67122 v = p->vm_stat_diff[i];
67123 p->vm_stat_diff[i] = 0;
67124 local_irq_restore(flags);
67125 - atomic_long_add(v, &zone->vm_stat[i]);
67126 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
67127 global_diff[i] += v;
67128 #ifdef CONFIG_NUMA
67129 /* 3 seconds idle till flush */
67130 @@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
67131
67132 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
67133 if (global_diff[i])
67134 - atomic_long_add(global_diff[i], &vm_stat[i]);
67135 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
67136 }
67137
67138 #endif
67139 @@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
67140 start_cpu_timer(cpu);
67141 #endif
67142 #ifdef CONFIG_PROC_FS
67143 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
67144 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
67145 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
67146 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
67147 + {
67148 + mode_t gr_mode = S_IRUGO;
67149 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
67150 + gr_mode = S_IRUSR;
67151 +#endif
67152 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
67153 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
67154 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67155 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
67156 +#else
67157 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
67158 +#endif
67159 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
67160 + }
67161 #endif
67162 return 0;
67163 }
67164 diff -urNp linux-2.6.32.42/net/8021q/vlan.c linux-2.6.32.42/net/8021q/vlan.c
67165 --- linux-2.6.32.42/net/8021q/vlan.c 2011-03-27 14:31:47.000000000 -0400
67166 +++ linux-2.6.32.42/net/8021q/vlan.c 2011-04-17 15:56:46.000000000 -0400
67167 @@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net
67168 err = -EPERM;
67169 if (!capable(CAP_NET_ADMIN))
67170 break;
67171 - if ((args.u.name_type >= 0) &&
67172 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
67173 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
67174 struct vlan_net *vn;
67175
67176 vn = net_generic(net, vlan_net_id);
67177 diff -urNp linux-2.6.32.42/net/atm/atm_misc.c linux-2.6.32.42/net/atm/atm_misc.c
67178 --- linux-2.6.32.42/net/atm/atm_misc.c 2011-03-27 14:31:47.000000000 -0400
67179 +++ linux-2.6.32.42/net/atm/atm_misc.c 2011-04-17 15:56:46.000000000 -0400
67180 @@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t
67181 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
67182 return 1;
67183 atm_return(vcc,truesize);
67184 - atomic_inc(&vcc->stats->rx_drop);
67185 + atomic_inc_unchecked(&vcc->stats->rx_drop);
67186 return 0;
67187 }
67188
67189 @@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct
67190 }
67191 }
67192 atm_return(vcc,guess);
67193 - atomic_inc(&vcc->stats->rx_drop);
67194 + atomic_inc_unchecked(&vcc->stats->rx_drop);
67195 return NULL;
67196 }
67197
67198 @@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr
67199
67200 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
67201 {
67202 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
67203 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
67204 __SONET_ITEMS
67205 #undef __HANDLE_ITEM
67206 }
67207 @@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta
67208
67209 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
67210 {
67211 -#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
67212 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
67213 __SONET_ITEMS
67214 #undef __HANDLE_ITEM
67215 }
67216 diff -urNp linux-2.6.32.42/net/atm/mpoa_caches.c linux-2.6.32.42/net/atm/mpoa_caches.c
67217 --- linux-2.6.32.42/net/atm/mpoa_caches.c 2011-03-27 14:31:47.000000000 -0400
67218 +++ linux-2.6.32.42/net/atm/mpoa_caches.c 2011-05-16 21:46:57.000000000 -0400
67219 @@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_cl
67220 struct timeval now;
67221 struct k_message msg;
67222
67223 + pax_track_stack();
67224 +
67225 do_gettimeofday(&now);
67226
67227 write_lock_irq(&client->egress_lock);
67228 diff -urNp linux-2.6.32.42/net/atm/proc.c linux-2.6.32.42/net/atm/proc.c
67229 --- linux-2.6.32.42/net/atm/proc.c 2011-03-27 14:31:47.000000000 -0400
67230 +++ linux-2.6.32.42/net/atm/proc.c 2011-04-17 15:56:46.000000000 -0400
67231 @@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s
67232 const struct k_atm_aal_stats *stats)
67233 {
67234 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
67235 - atomic_read(&stats->tx),atomic_read(&stats->tx_err),
67236 - atomic_read(&stats->rx),atomic_read(&stats->rx_err),
67237 - atomic_read(&stats->rx_drop));
67238 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
67239 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
67240 + atomic_read_unchecked(&stats->rx_drop));
67241 }
67242
67243 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
67244 @@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *se
67245 {
67246 struct sock *sk = sk_atm(vcc);
67247
67248 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67249 + seq_printf(seq, "%p ", NULL);
67250 +#else
67251 seq_printf(seq, "%p ", vcc);
67252 +#endif
67253 +
67254 if (!vcc->dev)
67255 seq_printf(seq, "Unassigned ");
67256 else
67257 @@ -214,7 +219,11 @@ static void svc_info(struct seq_file *se
67258 {
67259 if (!vcc->dev)
67260 seq_printf(seq, sizeof(void *) == 4 ?
67261 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67262 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
67263 +#else
67264 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
67265 +#endif
67266 else
67267 seq_printf(seq, "%3d %3d %5d ",
67268 vcc->dev->number, vcc->vpi, vcc->vci);
67269 diff -urNp linux-2.6.32.42/net/atm/resources.c linux-2.6.32.42/net/atm/resources.c
67270 --- linux-2.6.32.42/net/atm/resources.c 2011-03-27 14:31:47.000000000 -0400
67271 +++ linux-2.6.32.42/net/atm/resources.c 2011-04-17 15:56:46.000000000 -0400
67272 @@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *
67273 static void copy_aal_stats(struct k_atm_aal_stats *from,
67274 struct atm_aal_stats *to)
67275 {
67276 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
67277 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
67278 __AAL_STAT_ITEMS
67279 #undef __HANDLE_ITEM
67280 }
67281 @@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_
67282 static void subtract_aal_stats(struct k_atm_aal_stats *from,
67283 struct atm_aal_stats *to)
67284 {
67285 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
67286 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
67287 __AAL_STAT_ITEMS
67288 #undef __HANDLE_ITEM
67289 }
67290 diff -urNp linux-2.6.32.42/net/bluetooth/l2cap.c linux-2.6.32.42/net/bluetooth/l2cap.c
67291 --- linux-2.6.32.42/net/bluetooth/l2cap.c 2011-03-27 14:31:47.000000000 -0400
67292 +++ linux-2.6.32.42/net/bluetooth/l2cap.c 2011-06-25 14:36:21.000000000 -0400
67293 @@ -1885,7 +1885,7 @@ static int l2cap_sock_getsockopt_old(str
67294 err = -ENOTCONN;
67295 break;
67296 }
67297 -
67298 + memset(&cinfo, 0, sizeof(cinfo));
67299 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
67300 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
67301
67302 @@ -2719,7 +2719,7 @@ static inline int l2cap_config_req(struc
67303
67304 /* Reject if config buffer is too small. */
67305 len = cmd_len - sizeof(*req);
67306 - if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
67307 + if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
67308 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
67309 l2cap_build_conf_rsp(sk, rsp,
67310 L2CAP_CONF_REJECT, flags), rsp);
67311 diff -urNp linux-2.6.32.42/net/bluetooth/rfcomm/sock.c linux-2.6.32.42/net/bluetooth/rfcomm/sock.c
67312 --- linux-2.6.32.42/net/bluetooth/rfcomm/sock.c 2011-03-27 14:31:47.000000000 -0400
67313 +++ linux-2.6.32.42/net/bluetooth/rfcomm/sock.c 2011-06-12 06:35:00.000000000 -0400
67314 @@ -878,6 +878,7 @@ static int rfcomm_sock_getsockopt_old(st
67315
67316 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
67317
67318 + memset(&cinfo, 0, sizeof(cinfo));
67319 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
67320 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
67321
67322 diff -urNp linux-2.6.32.42/net/bridge/br_private.h linux-2.6.32.42/net/bridge/br_private.h
67323 --- linux-2.6.32.42/net/bridge/br_private.h 2011-03-27 14:31:47.000000000 -0400
67324 +++ linux-2.6.32.42/net/bridge/br_private.h 2011-04-17 15:56:46.000000000 -0400
67325 @@ -254,7 +254,7 @@ extern void br_ifinfo_notify(int event,
67326
67327 #ifdef CONFIG_SYSFS
67328 /* br_sysfs_if.c */
67329 -extern struct sysfs_ops brport_sysfs_ops;
67330 +extern const struct sysfs_ops brport_sysfs_ops;
67331 extern int br_sysfs_addif(struct net_bridge_port *p);
67332
67333 /* br_sysfs_br.c */
67334 diff -urNp linux-2.6.32.42/net/bridge/br_stp_if.c linux-2.6.32.42/net/bridge/br_stp_if.c
67335 --- linux-2.6.32.42/net/bridge/br_stp_if.c 2011-03-27 14:31:47.000000000 -0400
67336 +++ linux-2.6.32.42/net/bridge/br_stp_if.c 2011-04-17 15:56:46.000000000 -0400
67337 @@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridg
67338 char *envp[] = { NULL };
67339
67340 if (br->stp_enabled == BR_USER_STP) {
67341 - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
67342 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
67343 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
67344 br->dev->name, r);
67345
67346 diff -urNp linux-2.6.32.42/net/bridge/br_sysfs_if.c linux-2.6.32.42/net/bridge/br_sysfs_if.c
67347 --- linux-2.6.32.42/net/bridge/br_sysfs_if.c 2011-03-27 14:31:47.000000000 -0400
67348 +++ linux-2.6.32.42/net/bridge/br_sysfs_if.c 2011-04-17 15:56:46.000000000 -0400
67349 @@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobje
67350 return ret;
67351 }
67352
67353 -struct sysfs_ops brport_sysfs_ops = {
67354 +const struct sysfs_ops brport_sysfs_ops = {
67355 .show = brport_show,
67356 .store = brport_store,
67357 };
67358 diff -urNp linux-2.6.32.42/net/bridge/netfilter/ebtables.c linux-2.6.32.42/net/bridge/netfilter/ebtables.c
67359 --- linux-2.6.32.42/net/bridge/netfilter/ebtables.c 2011-04-17 17:00:52.000000000 -0400
67360 +++ linux-2.6.32.42/net/bridge/netfilter/ebtables.c 2011-05-16 21:46:57.000000000 -0400
67361 @@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struc
67362 unsigned int entries_size, nentries;
67363 char *entries;
67364
67365 + pax_track_stack();
67366 +
67367 if (cmd == EBT_SO_GET_ENTRIES) {
67368 entries_size = t->private->entries_size;
67369 nentries = t->private->nentries;
67370 diff -urNp linux-2.6.32.42/net/can/bcm.c linux-2.6.32.42/net/can/bcm.c
67371 --- linux-2.6.32.42/net/can/bcm.c 2011-05-10 22:12:01.000000000 -0400
67372 +++ linux-2.6.32.42/net/can/bcm.c 2011-05-10 22:12:34.000000000 -0400
67373 @@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file
67374 struct bcm_sock *bo = bcm_sk(sk);
67375 struct bcm_op *op;
67376
67377 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67378 + seq_printf(m, ">>> socket %p", NULL);
67379 + seq_printf(m, " / sk %p", NULL);
67380 + seq_printf(m, " / bo %p", NULL);
67381 +#else
67382 seq_printf(m, ">>> socket %p", sk->sk_socket);
67383 seq_printf(m, " / sk %p", sk);
67384 seq_printf(m, " / bo %p", bo);
67385 +#endif
67386 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
67387 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
67388 seq_printf(m, " <<<\n");
67389 diff -urNp linux-2.6.32.42/net/core/dev.c linux-2.6.32.42/net/core/dev.c
67390 --- linux-2.6.32.42/net/core/dev.c 2011-04-17 17:00:52.000000000 -0400
67391 +++ linux-2.6.32.42/net/core/dev.c 2011-04-17 17:04:18.000000000 -0400
67392 @@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const cha
67393 if (no_module && capable(CAP_NET_ADMIN))
67394 no_module = request_module("netdev-%s", name);
67395 if (no_module && capable(CAP_SYS_MODULE)) {
67396 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67397 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
67398 +#else
67399 if (!request_module("%s", name))
67400 pr_err("Loading kernel module for a network device "
67401 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
67402 "instead\n", name);
67403 +#endif
67404 }
67405 }
67406 EXPORT_SYMBOL(dev_load);
67407 @@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
67408 }
67409 EXPORT_SYMBOL(netif_rx_ni);
67410
67411 -static void net_tx_action(struct softirq_action *h)
67412 +static void net_tx_action(void)
67413 {
67414 struct softnet_data *sd = &__get_cpu_var(softnet_data);
67415
67416 @@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *
67417 EXPORT_SYMBOL(netif_napi_del);
67418
67419
67420 -static void net_rx_action(struct softirq_action *h)
67421 +static void net_rx_action(void)
67422 {
67423 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
67424 unsigned long time_limit = jiffies + 2;
67425 diff -urNp linux-2.6.32.42/net/core/flow.c linux-2.6.32.42/net/core/flow.c
67426 --- linux-2.6.32.42/net/core/flow.c 2011-03-27 14:31:47.000000000 -0400
67427 +++ linux-2.6.32.42/net/core/flow.c 2011-05-04 17:56:20.000000000 -0400
67428 @@ -35,11 +35,11 @@ struct flow_cache_entry {
67429 atomic_t *object_ref;
67430 };
67431
67432 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
67433 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
67434
67435 static u32 flow_hash_shift;
67436 #define flow_hash_size (1 << flow_hash_shift)
67437 -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
67438 +static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
67439
67440 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
67441
67442 @@ -52,7 +52,7 @@ struct flow_percpu_info {
67443 u32 hash_rnd;
67444 int count;
67445 };
67446 -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
67447 +static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
67448
67449 #define flow_hash_rnd_recalc(cpu) \
67450 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
67451 @@ -69,7 +69,7 @@ struct flow_flush_info {
67452 atomic_t cpuleft;
67453 struct completion completion;
67454 };
67455 -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
67456 +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
67457
67458 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
67459
67460 @@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net,
67461 if (fle->family == family &&
67462 fle->dir == dir &&
67463 flow_key_compare(key, &fle->key) == 0) {
67464 - if (fle->genid == atomic_read(&flow_cache_genid)) {
67465 + if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
67466 void *ret = fle->object;
67467
67468 if (ret)
67469 @@ -228,7 +228,7 @@ nocache:
67470 err = resolver(net, key, family, dir, &obj, &obj_ref);
67471
67472 if (fle && !err) {
67473 - fle->genid = atomic_read(&flow_cache_genid);
67474 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
67475
67476 if (fle->object)
67477 atomic_dec(fle->object_ref);
67478 @@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(uns
67479
67480 fle = flow_table(cpu)[i];
67481 for (; fle; fle = fle->next) {
67482 - unsigned genid = atomic_read(&flow_cache_genid);
67483 + unsigned genid = atomic_read_unchecked(&flow_cache_genid);
67484
67485 if (!fle->object || fle->genid == genid)
67486 continue;
67487 diff -urNp linux-2.6.32.42/net/core/skbuff.c linux-2.6.32.42/net/core/skbuff.c
67488 --- linux-2.6.32.42/net/core/skbuff.c 2011-03-27 14:31:47.000000000 -0400
67489 +++ linux-2.6.32.42/net/core/skbuff.c 2011-05-16 21:46:57.000000000 -0400
67490 @@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb,
67491 struct sk_buff *frag_iter;
67492 struct sock *sk = skb->sk;
67493
67494 + pax_track_stack();
67495 +
67496 /*
67497 * __skb_splice_bits() only fails if the output has no room left,
67498 * so no point in going over the frag_list for the error case.
67499 diff -urNp linux-2.6.32.42/net/core/sock.c linux-2.6.32.42/net/core/sock.c
67500 --- linux-2.6.32.42/net/core/sock.c 2011-03-27 14:31:47.000000000 -0400
67501 +++ linux-2.6.32.42/net/core/sock.c 2011-05-04 17:56:20.000000000 -0400
67502 @@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock,
67503 break;
67504
67505 case SO_PEERCRED:
67506 + {
67507 + struct ucred peercred;
67508 if (len > sizeof(sk->sk_peercred))
67509 len = sizeof(sk->sk_peercred);
67510 - if (copy_to_user(optval, &sk->sk_peercred, len))
67511 + peercred = sk->sk_peercred;
67512 + if (copy_to_user(optval, &peercred, len))
67513 return -EFAULT;
67514 goto lenout;
67515 + }
67516
67517 case SO_PEERNAME:
67518 {
67519 @@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock,
67520 */
67521 smp_wmb();
67522 atomic_set(&sk->sk_refcnt, 1);
67523 - atomic_set(&sk->sk_drops, 0);
67524 + atomic_set_unchecked(&sk->sk_drops, 0);
67525 }
67526 EXPORT_SYMBOL(sock_init_data);
67527
67528 diff -urNp linux-2.6.32.42/net/decnet/sysctl_net_decnet.c linux-2.6.32.42/net/decnet/sysctl_net_decnet.c
67529 --- linux-2.6.32.42/net/decnet/sysctl_net_decnet.c 2011-03-27 14:31:47.000000000 -0400
67530 +++ linux-2.6.32.42/net/decnet/sysctl_net_decnet.c 2011-04-17 15:56:46.000000000 -0400
67531 @@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_t
67532
67533 if (len > *lenp) len = *lenp;
67534
67535 - if (copy_to_user(buffer, addr, len))
67536 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
67537 return -EFAULT;
67538
67539 *lenp = len;
67540 @@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table
67541
67542 if (len > *lenp) len = *lenp;
67543
67544 - if (copy_to_user(buffer, devname, len))
67545 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
67546 return -EFAULT;
67547
67548 *lenp = len;
67549 diff -urNp linux-2.6.32.42/net/econet/Kconfig linux-2.6.32.42/net/econet/Kconfig
67550 --- linux-2.6.32.42/net/econet/Kconfig 2011-03-27 14:31:47.000000000 -0400
67551 +++ linux-2.6.32.42/net/econet/Kconfig 2011-04-17 15:56:46.000000000 -0400
67552 @@ -4,7 +4,7 @@
67553
67554 config ECONET
67555 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
67556 - depends on EXPERIMENTAL && INET
67557 + depends on EXPERIMENTAL && INET && BROKEN
67558 ---help---
67559 Econet is a fairly old and slow networking protocol mainly used by
67560 Acorn computers to access file and print servers. It uses native
67561 diff -urNp linux-2.6.32.42/net/ieee802154/dgram.c linux-2.6.32.42/net/ieee802154/dgram.c
67562 --- linux-2.6.32.42/net/ieee802154/dgram.c 2011-03-27 14:31:47.000000000 -0400
67563 +++ linux-2.6.32.42/net/ieee802154/dgram.c 2011-05-04 17:56:28.000000000 -0400
67564 @@ -318,7 +318,7 @@ out:
67565 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
67566 {
67567 if (sock_queue_rcv_skb(sk, skb) < 0) {
67568 - atomic_inc(&sk->sk_drops);
67569 + atomic_inc_unchecked(&sk->sk_drops);
67570 kfree_skb(skb);
67571 return NET_RX_DROP;
67572 }
67573 diff -urNp linux-2.6.32.42/net/ieee802154/raw.c linux-2.6.32.42/net/ieee802154/raw.c
67574 --- linux-2.6.32.42/net/ieee802154/raw.c 2011-03-27 14:31:47.000000000 -0400
67575 +++ linux-2.6.32.42/net/ieee802154/raw.c 2011-05-04 17:56:28.000000000 -0400
67576 @@ -206,7 +206,7 @@ out:
67577 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
67578 {
67579 if (sock_queue_rcv_skb(sk, skb) < 0) {
67580 - atomic_inc(&sk->sk_drops);
67581 + atomic_inc_unchecked(&sk->sk_drops);
67582 kfree_skb(skb);
67583 return NET_RX_DROP;
67584 }
67585 diff -urNp linux-2.6.32.42/net/ipv4/inet_diag.c linux-2.6.32.42/net/ipv4/inet_diag.c
67586 --- linux-2.6.32.42/net/ipv4/inet_diag.c 2011-04-17 17:00:52.000000000 -0400
67587 +++ linux-2.6.32.42/net/ipv4/inet_diag.c 2011-06-20 19:31:13.000000000 -0400
67588 @@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct soc
67589 r->idiag_retrans = 0;
67590
67591 r->id.idiag_if = sk->sk_bound_dev_if;
67592 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67593 + r->id.idiag_cookie[0] = 0;
67594 + r->id.idiag_cookie[1] = 0;
67595 +#else
67596 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
67597 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
67598 +#endif
67599
67600 r->id.idiag_sport = inet->sport;
67601 r->id.idiag_dport = inet->dport;
67602 @@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct in
67603 r->idiag_family = tw->tw_family;
67604 r->idiag_retrans = 0;
67605 r->id.idiag_if = tw->tw_bound_dev_if;
67606 +
67607 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67608 + r->id.idiag_cookie[0] = 0;
67609 + r->id.idiag_cookie[1] = 0;
67610 +#else
67611 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
67612 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
67613 +#endif
67614 +
67615 r->id.idiag_sport = tw->tw_sport;
67616 r->id.idiag_dport = tw->tw_dport;
67617 r->id.idiag_src[0] = tw->tw_rcv_saddr;
67618 @@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk
67619 if (sk == NULL)
67620 goto unlock;
67621
67622 +#ifndef CONFIG_GRKERNSEC_HIDESYM
67623 err = -ESTALE;
67624 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
67625 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
67626 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
67627 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
67628 goto out;
67629 +#endif
67630
67631 err = -ENOMEM;
67632 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
67633 @@ -436,7 +450,7 @@ static int valid_cc(const void *bc, int
67634 return 0;
67635 if (cc == len)
67636 return 1;
67637 - if (op->yes < 4)
67638 + if (op->yes < 4 || op->yes & 3)
67639 return 0;
67640 len -= op->yes;
67641 bc += op->yes;
67642 @@ -446,11 +460,11 @@ static int valid_cc(const void *bc, int
67643
67644 static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
67645 {
67646 - const unsigned char *bc = bytecode;
67647 + const void *bc = bytecode;
67648 int len = bytecode_len;
67649
67650 while (len > 0) {
67651 - struct inet_diag_bc_op *op = (struct inet_diag_bc_op *)bc;
67652 + const struct inet_diag_bc_op *op = bc;
67653
67654 //printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
67655 switch (op->code) {
67656 @@ -461,22 +475,20 @@ static int inet_diag_bc_audit(const void
67657 case INET_DIAG_BC_S_LE:
67658 case INET_DIAG_BC_D_GE:
67659 case INET_DIAG_BC_D_LE:
67660 - if (op->yes < 4 || op->yes > len + 4)
67661 - return -EINVAL;
67662 case INET_DIAG_BC_JMP:
67663 - if (op->no < 4 || op->no > len + 4)
67664 + if (op->no < 4 || op->no > len + 4 || op->no & 3)
67665 return -EINVAL;
67666 if (op->no < len &&
67667 !valid_cc(bytecode, bytecode_len, len - op->no))
67668 return -EINVAL;
67669 break;
67670 case INET_DIAG_BC_NOP:
67671 - if (op->yes < 4 || op->yes > len + 4)
67672 - return -EINVAL;
67673 break;
67674 default:
67675 return -EINVAL;
67676 }
67677 + if (op->yes < 4 || op->yes > len + 4 || op->yes & 3)
67678 + return -EINVAL;
67679 bc += op->yes;
67680 len -= op->yes;
67681 }
67682 @@ -581,8 +593,14 @@ static int inet_diag_fill_req(struct sk_
67683 r->idiag_retrans = req->retrans;
67684
67685 r->id.idiag_if = sk->sk_bound_dev_if;
67686 +
67687 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67688 + r->id.idiag_cookie[0] = 0;
67689 + r->id.idiag_cookie[1] = 0;
67690 +#else
67691 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
67692 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
67693 +#endif
67694
67695 tmo = req->expires - jiffies;
67696 if (tmo < 0)
67697 diff -urNp linux-2.6.32.42/net/ipv4/inet_hashtables.c linux-2.6.32.42/net/ipv4/inet_hashtables.c
67698 --- linux-2.6.32.42/net/ipv4/inet_hashtables.c 2011-03-27 14:31:47.000000000 -0400
67699 +++ linux-2.6.32.42/net/ipv4/inet_hashtables.c 2011-04-17 15:56:46.000000000 -0400
67700 @@ -18,11 +18,14 @@
67701 #include <linux/sched.h>
67702 #include <linux/slab.h>
67703 #include <linux/wait.h>
67704 +#include <linux/security.h>
67705
67706 #include <net/inet_connection_sock.h>
67707 #include <net/inet_hashtables.h>
67708 #include <net/ip.h>
67709
67710 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
67711 +
67712 /*
67713 * Allocate and initialize a new local port bind bucket.
67714 * The bindhash mutex for snum's hash chain must be held here.
67715 @@ -490,6 +493,8 @@ ok:
67716 }
67717 spin_unlock(&head->lock);
67718
67719 + gr_update_task_in_ip_table(current, inet_sk(sk));
67720 +
67721 if (tw) {
67722 inet_twsk_deschedule(tw, death_row);
67723 inet_twsk_put(tw);
67724 diff -urNp linux-2.6.32.42/net/ipv4/inetpeer.c linux-2.6.32.42/net/ipv4/inetpeer.c
67725 --- linux-2.6.32.42/net/ipv4/inetpeer.c 2011-03-27 14:31:47.000000000 -0400
67726 +++ linux-2.6.32.42/net/ipv4/inetpeer.c 2011-05-16 21:46:57.000000000 -0400
67727 @@ -366,6 +366,8 @@ struct inet_peer *inet_getpeer(__be32 da
67728 struct inet_peer *p, *n;
67729 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
67730
67731 + pax_track_stack();
67732 +
67733 /* Look up for the address quickly. */
67734 read_lock_bh(&peer_pool_lock);
67735 p = lookup(daddr, NULL);
67736 @@ -389,7 +391,7 @@ struct inet_peer *inet_getpeer(__be32 da
67737 return NULL;
67738 n->v4daddr = daddr;
67739 atomic_set(&n->refcnt, 1);
67740 - atomic_set(&n->rid, 0);
67741 + atomic_set_unchecked(&n->rid, 0);
67742 n->ip_id_count = secure_ip_id(daddr);
67743 n->tcp_ts_stamp = 0;
67744
67745 diff -urNp linux-2.6.32.42/net/ipv4/ip_fragment.c linux-2.6.32.42/net/ipv4/ip_fragment.c
67746 --- linux-2.6.32.42/net/ipv4/ip_fragment.c 2011-03-27 14:31:47.000000000 -0400
67747 +++ linux-2.6.32.42/net/ipv4/ip_fragment.c 2011-04-17 15:56:46.000000000 -0400
67748 @@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct
67749 return 0;
67750
67751 start = qp->rid;
67752 - end = atomic_inc_return(&peer->rid);
67753 + end = atomic_inc_return_unchecked(&peer->rid);
67754 qp->rid = end;
67755
67756 rc = qp->q.fragments && (end - start) > max;
67757 diff -urNp linux-2.6.32.42/net/ipv4/ip_sockglue.c linux-2.6.32.42/net/ipv4/ip_sockglue.c
67758 --- linux-2.6.32.42/net/ipv4/ip_sockglue.c 2011-03-27 14:31:47.000000000 -0400
67759 +++ linux-2.6.32.42/net/ipv4/ip_sockglue.c 2011-05-16 21:46:57.000000000 -0400
67760 @@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock
67761 int val;
67762 int len;
67763
67764 + pax_track_stack();
67765 +
67766 if (level != SOL_IP)
67767 return -EOPNOTSUPP;
67768
67769 diff -urNp linux-2.6.32.42/net/ipv4/netfilter/arp_tables.c linux-2.6.32.42/net/ipv4/netfilter/arp_tables.c
67770 --- linux-2.6.32.42/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:00:52.000000000 -0400
67771 +++ linux-2.6.32.42/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:04:18.000000000 -0400
67772 @@ -934,6 +934,7 @@ static int get_info(struct net *net, voi
67773 private = &tmp;
67774 }
67775 #endif
67776 + memset(&info, 0, sizeof(info));
67777 info.valid_hooks = t->valid_hooks;
67778 memcpy(info.hook_entry, private->hook_entry,
67779 sizeof(info.hook_entry));
67780 diff -urNp linux-2.6.32.42/net/ipv4/netfilter/ip_tables.c linux-2.6.32.42/net/ipv4/netfilter/ip_tables.c
67781 --- linux-2.6.32.42/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:00:52.000000000 -0400
67782 +++ linux-2.6.32.42/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:04:18.000000000 -0400
67783 @@ -1141,6 +1141,7 @@ static int get_info(struct net *net, voi
67784 private = &tmp;
67785 }
67786 #endif
67787 + memset(&info, 0, sizeof(info));
67788 info.valid_hooks = t->valid_hooks;
67789 memcpy(info.hook_entry, private->hook_entry,
67790 sizeof(info.hook_entry));
67791 diff -urNp linux-2.6.32.42/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.32.42/net/ipv4/netfilter/nf_nat_snmp_basic.c
67792 --- linux-2.6.32.42/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-03-27 14:31:47.000000000 -0400
67793 +++ linux-2.6.32.42/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-04-17 15:56:46.000000000 -0400
67794 @@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(
67795
67796 *len = 0;
67797
67798 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
67799 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
67800 if (*octets == NULL) {
67801 if (net_ratelimit())
67802 printk("OOM in bsalg (%d)\n", __LINE__);
67803 diff -urNp linux-2.6.32.42/net/ipv4/raw.c linux-2.6.32.42/net/ipv4/raw.c
67804 --- linux-2.6.32.42/net/ipv4/raw.c 2011-03-27 14:31:47.000000000 -0400
67805 +++ linux-2.6.32.42/net/ipv4/raw.c 2011-05-04 17:59:08.000000000 -0400
67806 @@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk,
67807 /* Charge it to the socket. */
67808
67809 if (sock_queue_rcv_skb(sk, skb) < 0) {
67810 - atomic_inc(&sk->sk_drops);
67811 + atomic_inc_unchecked(&sk->sk_drops);
67812 kfree_skb(skb);
67813 return NET_RX_DROP;
67814 }
67815 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk,
67816 int raw_rcv(struct sock *sk, struct sk_buff *skb)
67817 {
67818 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
67819 - atomic_inc(&sk->sk_drops);
67820 + atomic_inc_unchecked(&sk->sk_drops);
67821 kfree_skb(skb);
67822 return NET_RX_DROP;
67823 }
67824 @@ -724,15 +724,22 @@ static int raw_init(struct sock *sk)
67825
67826 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
67827 {
67828 + struct icmp_filter filter;
67829 +
67830 + if (optlen < 0)
67831 + return -EINVAL;
67832 if (optlen > sizeof(struct icmp_filter))
67833 optlen = sizeof(struct icmp_filter);
67834 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
67835 + if (copy_from_user(&filter, optval, optlen))
67836 return -EFAULT;
67837 + memcpy(&raw_sk(sk)->filter, &filter, optlen);
67838 +
67839 return 0;
67840 }
67841
67842 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
67843 {
67844 + struct icmp_filter filter;
67845 int len, ret = -EFAULT;
67846
67847 if (get_user(len, optlen))
67848 @@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock
67849 if (len > sizeof(struct icmp_filter))
67850 len = sizeof(struct icmp_filter);
67851 ret = -EFAULT;
67852 + memcpy(&filter, &raw_sk(sk)->filter, len);
67853 if (put_user(len, optlen) ||
67854 - copy_to_user(optval, &raw_sk(sk)->filter, len))
67855 + copy_to_user(optval, &filter, len))
67856 goto out;
67857 ret = 0;
67858 out: return ret;
67859 @@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq
67860 sk_wmem_alloc_get(sp),
67861 sk_rmem_alloc_get(sp),
67862 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
67863 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
67864 + atomic_read(&sp->sk_refcnt),
67865 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67866 + NULL,
67867 +#else
67868 + sp,
67869 +#endif
67870 + atomic_read_unchecked(&sp->sk_drops));
67871 }
67872
67873 static int raw_seq_show(struct seq_file *seq, void *v)
67874 diff -urNp linux-2.6.32.42/net/ipv4/route.c linux-2.6.32.42/net/ipv4/route.c
67875 --- linux-2.6.32.42/net/ipv4/route.c 2011-03-27 14:31:47.000000000 -0400
67876 +++ linux-2.6.32.42/net/ipv4/route.c 2011-05-04 17:56:28.000000000 -0400
67877 @@ -268,7 +268,7 @@ static inline unsigned int rt_hash(__be3
67878
67879 static inline int rt_genid(struct net *net)
67880 {
67881 - return atomic_read(&net->ipv4.rt_genid);
67882 + return atomic_read_unchecked(&net->ipv4.rt_genid);
67883 }
67884
67885 #ifdef CONFIG_PROC_FS
67886 @@ -888,7 +888,7 @@ static void rt_cache_invalidate(struct n
67887 unsigned char shuffle;
67888
67889 get_random_bytes(&shuffle, sizeof(shuffle));
67890 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
67891 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
67892 }
67893
67894 /*
67895 @@ -3356,7 +3356,7 @@ static __net_initdata struct pernet_oper
67896
67897 static __net_init int rt_secret_timer_init(struct net *net)
67898 {
67899 - atomic_set(&net->ipv4.rt_genid,
67900 + atomic_set_unchecked(&net->ipv4.rt_genid,
67901 (int) ((num_physpages ^ (num_physpages>>8)) ^
67902 (jiffies ^ (jiffies >> 7))));
67903
67904 diff -urNp linux-2.6.32.42/net/ipv4/tcp.c linux-2.6.32.42/net/ipv4/tcp.c
67905 --- linux-2.6.32.42/net/ipv4/tcp.c 2011-03-27 14:31:47.000000000 -0400
67906 +++ linux-2.6.32.42/net/ipv4/tcp.c 2011-05-16 21:46:57.000000000 -0400
67907 @@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock
67908 int val;
67909 int err = 0;
67910
67911 + pax_track_stack();
67912 +
67913 /* This is a string value all the others are int's */
67914 if (optname == TCP_CONGESTION) {
67915 char name[TCP_CA_NAME_MAX];
67916 @@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock
67917 struct tcp_sock *tp = tcp_sk(sk);
67918 int val, len;
67919
67920 + pax_track_stack();
67921 +
67922 if (get_user(len, optlen))
67923 return -EFAULT;
67924
67925 diff -urNp linux-2.6.32.42/net/ipv4/tcp_ipv4.c linux-2.6.32.42/net/ipv4/tcp_ipv4.c
67926 --- linux-2.6.32.42/net/ipv4/tcp_ipv4.c 2011-03-27 14:31:47.000000000 -0400
67927 +++ linux-2.6.32.42/net/ipv4/tcp_ipv4.c 2011-04-17 15:56:46.000000000 -0400
67928 @@ -84,6 +84,9 @@
67929 int sysctl_tcp_tw_reuse __read_mostly;
67930 int sysctl_tcp_low_latency __read_mostly;
67931
67932 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67933 +extern int grsec_enable_blackhole;
67934 +#endif
67935
67936 #ifdef CONFIG_TCP_MD5SIG
67937 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
67938 @@ -1542,6 +1545,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
67939 return 0;
67940
67941 reset:
67942 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67943 + if (!grsec_enable_blackhole)
67944 +#endif
67945 tcp_v4_send_reset(rsk, skb);
67946 discard:
67947 kfree_skb(skb);
67948 @@ -1603,12 +1609,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
67949 TCP_SKB_CB(skb)->sacked = 0;
67950
67951 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
67952 - if (!sk)
67953 + if (!sk) {
67954 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67955 + ret = 1;
67956 +#endif
67957 goto no_tcp_socket;
67958 + }
67959
67960 process:
67961 - if (sk->sk_state == TCP_TIME_WAIT)
67962 + if (sk->sk_state == TCP_TIME_WAIT) {
67963 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67964 + ret = 2;
67965 +#endif
67966 goto do_time_wait;
67967 + }
67968
67969 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
67970 goto discard_and_relse;
67971 @@ -1650,6 +1664,10 @@ no_tcp_socket:
67972 bad_packet:
67973 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
67974 } else {
67975 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67976 + if (!grsec_enable_blackhole || (ret == 1 &&
67977 + (skb->dev->flags & IFF_LOOPBACK)))
67978 +#endif
67979 tcp_v4_send_reset(NULL, skb);
67980 }
67981
67982 @@ -2237,7 +2255,11 @@ static void get_openreq4(struct sock *sk
67983 0, /* non standard timer */
67984 0, /* open_requests have no inode */
67985 atomic_read(&sk->sk_refcnt),
67986 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67987 + NULL,
67988 +#else
67989 req,
67990 +#endif
67991 len);
67992 }
67993
67994 @@ -2279,7 +2301,12 @@ static void get_tcp4_sock(struct sock *s
67995 sock_i_uid(sk),
67996 icsk->icsk_probes_out,
67997 sock_i_ino(sk),
67998 - atomic_read(&sk->sk_refcnt), sk,
67999 + atomic_read(&sk->sk_refcnt),
68000 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68001 + NULL,
68002 +#else
68003 + sk,
68004 +#endif
68005 jiffies_to_clock_t(icsk->icsk_rto),
68006 jiffies_to_clock_t(icsk->icsk_ack.ato),
68007 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
68008 @@ -2307,7 +2334,13 @@ static void get_timewait4_sock(struct in
68009 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
68010 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
68011 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
68012 - atomic_read(&tw->tw_refcnt), tw, len);
68013 + atomic_read(&tw->tw_refcnt),
68014 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68015 + NULL,
68016 +#else
68017 + tw,
68018 +#endif
68019 + len);
68020 }
68021
68022 #define TMPSZ 150
68023 diff -urNp linux-2.6.32.42/net/ipv4/tcp_minisocks.c linux-2.6.32.42/net/ipv4/tcp_minisocks.c
68024 --- linux-2.6.32.42/net/ipv4/tcp_minisocks.c 2011-03-27 14:31:47.000000000 -0400
68025 +++ linux-2.6.32.42/net/ipv4/tcp_minisocks.c 2011-04-17 15:56:46.000000000 -0400
68026 @@ -26,6 +26,10 @@
68027 #include <net/inet_common.h>
68028 #include <net/xfrm.h>
68029
68030 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68031 +extern int grsec_enable_blackhole;
68032 +#endif
68033 +
68034 #ifdef CONFIG_SYSCTL
68035 #define SYNC_INIT 0 /* let the user enable it */
68036 #else
68037 @@ -672,6 +676,10 @@ listen_overflow:
68038
68039 embryonic_reset:
68040 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
68041 +
68042 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68043 + if (!grsec_enable_blackhole)
68044 +#endif
68045 if (!(flg & TCP_FLAG_RST))
68046 req->rsk_ops->send_reset(sk, skb);
68047
68048 diff -urNp linux-2.6.32.42/net/ipv4/tcp_output.c linux-2.6.32.42/net/ipv4/tcp_output.c
68049 --- linux-2.6.32.42/net/ipv4/tcp_output.c 2011-03-27 14:31:47.000000000 -0400
68050 +++ linux-2.6.32.42/net/ipv4/tcp_output.c 2011-05-16 21:46:57.000000000 -0400
68051 @@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct s
68052 __u8 *md5_hash_location;
68053 int mss;
68054
68055 + pax_track_stack();
68056 +
68057 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
68058 if (skb == NULL)
68059 return NULL;
68060 diff -urNp linux-2.6.32.42/net/ipv4/tcp_probe.c linux-2.6.32.42/net/ipv4/tcp_probe.c
68061 --- linux-2.6.32.42/net/ipv4/tcp_probe.c 2011-03-27 14:31:47.000000000 -0400
68062 +++ linux-2.6.32.42/net/ipv4/tcp_probe.c 2011-04-17 15:56:46.000000000 -0400
68063 @@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file
68064 if (cnt + width >= len)
68065 break;
68066
68067 - if (copy_to_user(buf + cnt, tbuf, width))
68068 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
68069 return -EFAULT;
68070 cnt += width;
68071 }
68072 diff -urNp linux-2.6.32.42/net/ipv4/tcp_timer.c linux-2.6.32.42/net/ipv4/tcp_timer.c
68073 --- linux-2.6.32.42/net/ipv4/tcp_timer.c 2011-03-27 14:31:47.000000000 -0400
68074 +++ linux-2.6.32.42/net/ipv4/tcp_timer.c 2011-04-17 15:56:46.000000000 -0400
68075 @@ -21,6 +21,10 @@
68076 #include <linux/module.h>
68077 #include <net/tcp.h>
68078
68079 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68080 +extern int grsec_lastack_retries;
68081 +#endif
68082 +
68083 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
68084 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
68085 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
68086 @@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock
68087 }
68088 }
68089
68090 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68091 + if ((sk->sk_state == TCP_LAST_ACK) &&
68092 + (grsec_lastack_retries > 0) &&
68093 + (grsec_lastack_retries < retry_until))
68094 + retry_until = grsec_lastack_retries;
68095 +#endif
68096 +
68097 if (retransmits_timed_out(sk, retry_until)) {
68098 /* Has it gone just too far? */
68099 tcp_write_err(sk);
68100 diff -urNp linux-2.6.32.42/net/ipv4/udp.c linux-2.6.32.42/net/ipv4/udp.c
68101 --- linux-2.6.32.42/net/ipv4/udp.c 2011-03-27 14:31:47.000000000 -0400
68102 +++ linux-2.6.32.42/net/ipv4/udp.c 2011-05-04 17:57:28.000000000 -0400
68103 @@ -86,6 +86,7 @@
68104 #include <linux/types.h>
68105 #include <linux/fcntl.h>
68106 #include <linux/module.h>
68107 +#include <linux/security.h>
68108 #include <linux/socket.h>
68109 #include <linux/sockios.h>
68110 #include <linux/igmp.h>
68111 @@ -106,6 +107,10 @@
68112 #include <net/xfrm.h>
68113 #include "udp_impl.h"
68114
68115 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68116 +extern int grsec_enable_blackhole;
68117 +#endif
68118 +
68119 struct udp_table udp_table;
68120 EXPORT_SYMBOL(udp_table);
68121
68122 @@ -371,6 +376,9 @@ found:
68123 return s;
68124 }
68125
68126 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
68127 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
68128 +
68129 /*
68130 * This routine is called by the ICMP module when it gets some
68131 * sort of error condition. If err < 0 then the socket should
68132 @@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
68133 dport = usin->sin_port;
68134 if (dport == 0)
68135 return -EINVAL;
68136 +
68137 + err = gr_search_udp_sendmsg(sk, usin);
68138 + if (err)
68139 + return err;
68140 } else {
68141 if (sk->sk_state != TCP_ESTABLISHED)
68142 return -EDESTADDRREQ;
68143 +
68144 + err = gr_search_udp_sendmsg(sk, NULL);
68145 + if (err)
68146 + return err;
68147 +
68148 daddr = inet->daddr;
68149 dport = inet->dport;
68150 /* Open fast path for connected socket.
68151 @@ -945,6 +962,10 @@ try_again:
68152 if (!skb)
68153 goto out;
68154
68155 + err = gr_search_udp_recvmsg(sk, skb);
68156 + if (err)
68157 + goto out_free;
68158 +
68159 ulen = skb->len - sizeof(struct udphdr);
68160 copied = len;
68161 if (copied > ulen)
68162 @@ -1065,7 +1086,7 @@ static int __udp_queue_rcv_skb(struct so
68163 if (rc == -ENOMEM) {
68164 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
68165 is_udplite);
68166 - atomic_inc(&sk->sk_drops);
68167 + atomic_inc_unchecked(&sk->sk_drops);
68168 }
68169 goto drop;
68170 }
68171 @@ -1335,6 +1356,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
68172 goto csum_error;
68173
68174 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
68175 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68176 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
68177 +#endif
68178 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
68179
68180 /*
68181 @@ -1755,8 +1779,13 @@ static void udp4_format_sock(struct sock
68182 sk_wmem_alloc_get(sp),
68183 sk_rmem_alloc_get(sp),
68184 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
68185 - atomic_read(&sp->sk_refcnt), sp,
68186 - atomic_read(&sp->sk_drops), len);
68187 + atomic_read(&sp->sk_refcnt),
68188 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68189 + NULL,
68190 +#else
68191 + sp,
68192 +#endif
68193 + atomic_read_unchecked(&sp->sk_drops), len);
68194 }
68195
68196 int udp4_seq_show(struct seq_file *seq, void *v)
68197 diff -urNp linux-2.6.32.42/net/ipv6/inet6_connection_sock.c linux-2.6.32.42/net/ipv6/inet6_connection_sock.c
68198 --- linux-2.6.32.42/net/ipv6/inet6_connection_sock.c 2011-03-27 14:31:47.000000000 -0400
68199 +++ linux-2.6.32.42/net/ipv6/inet6_connection_sock.c 2011-05-04 17:56:28.000000000 -0400
68200 @@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *
68201 #ifdef CONFIG_XFRM
68202 {
68203 struct rt6_info *rt = (struct rt6_info *)dst;
68204 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
68205 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
68206 }
68207 #endif
68208 }
68209 @@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(
68210 #ifdef CONFIG_XFRM
68211 if (dst) {
68212 struct rt6_info *rt = (struct rt6_info *)dst;
68213 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
68214 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
68215 sk->sk_dst_cache = NULL;
68216 dst_release(dst);
68217 dst = NULL;
68218 diff -urNp linux-2.6.32.42/net/ipv6/inet6_hashtables.c linux-2.6.32.42/net/ipv6/inet6_hashtables.c
68219 --- linux-2.6.32.42/net/ipv6/inet6_hashtables.c 2011-03-27 14:31:47.000000000 -0400
68220 +++ linux-2.6.32.42/net/ipv6/inet6_hashtables.c 2011-05-04 17:56:28.000000000 -0400
68221 @@ -118,7 +118,7 @@ out:
68222 }
68223 EXPORT_SYMBOL(__inet6_lookup_established);
68224
68225 -static int inline compute_score(struct sock *sk, struct net *net,
68226 +static inline int compute_score(struct sock *sk, struct net *net,
68227 const unsigned short hnum,
68228 const struct in6_addr *daddr,
68229 const int dif)
68230 diff -urNp linux-2.6.32.42/net/ipv6/ipv6_sockglue.c linux-2.6.32.42/net/ipv6/ipv6_sockglue.c
68231 --- linux-2.6.32.42/net/ipv6/ipv6_sockglue.c 2011-03-27 14:31:47.000000000 -0400
68232 +++ linux-2.6.32.42/net/ipv6/ipv6_sockglue.c 2011-05-16 21:46:57.000000000 -0400
68233 @@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct soc
68234 int val, valbool;
68235 int retv = -ENOPROTOOPT;
68236
68237 + pax_track_stack();
68238 +
68239 if (optval == NULL)
68240 val=0;
68241 else {
68242 @@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct soc
68243 int len;
68244 int val;
68245
68246 + pax_track_stack();
68247 +
68248 if (ip6_mroute_opt(optname))
68249 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
68250
68251 diff -urNp linux-2.6.32.42/net/ipv6/netfilter/ip6_tables.c linux-2.6.32.42/net/ipv6/netfilter/ip6_tables.c
68252 --- linux-2.6.32.42/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:00:52.000000000 -0400
68253 +++ linux-2.6.32.42/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:04:18.000000000 -0400
68254 @@ -1173,6 +1173,7 @@ static int get_info(struct net *net, voi
68255 private = &tmp;
68256 }
68257 #endif
68258 + memset(&info, 0, sizeof(info));
68259 info.valid_hooks = t->valid_hooks;
68260 memcpy(info.hook_entry, private->hook_entry,
68261 sizeof(info.hook_entry));
68262 diff -urNp linux-2.6.32.42/net/ipv6/raw.c linux-2.6.32.42/net/ipv6/raw.c
68263 --- linux-2.6.32.42/net/ipv6/raw.c 2011-03-27 14:31:47.000000000 -0400
68264 +++ linux-2.6.32.42/net/ipv6/raw.c 2011-05-16 21:46:57.000000000 -0400
68265 @@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct s
68266 {
68267 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
68268 skb_checksum_complete(skb)) {
68269 - atomic_inc(&sk->sk_drops);
68270 + atomic_inc_unchecked(&sk->sk_drops);
68271 kfree_skb(skb);
68272 return NET_RX_DROP;
68273 }
68274
68275 /* Charge it to the socket. */
68276 if (sock_queue_rcv_skb(sk,skb)<0) {
68277 - atomic_inc(&sk->sk_drops);
68278 + atomic_inc_unchecked(&sk->sk_drops);
68279 kfree_skb(skb);
68280 return NET_RX_DROP;
68281 }
68282 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
68283 struct raw6_sock *rp = raw6_sk(sk);
68284
68285 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
68286 - atomic_inc(&sk->sk_drops);
68287 + atomic_inc_unchecked(&sk->sk_drops);
68288 kfree_skb(skb);
68289 return NET_RX_DROP;
68290 }
68291 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
68292
68293 if (inet->hdrincl) {
68294 if (skb_checksum_complete(skb)) {
68295 - atomic_inc(&sk->sk_drops);
68296 + atomic_inc_unchecked(&sk->sk_drops);
68297 kfree_skb(skb);
68298 return NET_RX_DROP;
68299 }
68300 @@ -518,7 +518,7 @@ csum_copy_err:
68301 as some normal condition.
68302 */
68303 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
68304 - atomic_inc(&sk->sk_drops);
68305 + atomic_inc_unchecked(&sk->sk_drops);
68306 goto out;
68307 }
68308
68309 @@ -600,7 +600,7 @@ out:
68310 return err;
68311 }
68312
68313 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
68314 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
68315 struct flowi *fl, struct rt6_info *rt,
68316 unsigned int flags)
68317 {
68318 @@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *i
68319 u16 proto;
68320 int err;
68321
68322 + pax_track_stack();
68323 +
68324 /* Rough check on arithmetic overflow,
68325 better check is made in ip6_append_data().
68326 */
68327 @@ -916,12 +918,17 @@ do_confirm:
68328 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
68329 char __user *optval, int optlen)
68330 {
68331 + struct icmp6_filter filter;
68332 +
68333 switch (optname) {
68334 case ICMPV6_FILTER:
68335 + if (optlen < 0)
68336 + return -EINVAL;
68337 if (optlen > sizeof(struct icmp6_filter))
68338 optlen = sizeof(struct icmp6_filter);
68339 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
68340 + if (copy_from_user(&filter, optval, optlen))
68341 return -EFAULT;
68342 + memcpy(&raw6_sk(sk)->filter, &filter, optlen);
68343 return 0;
68344 default:
68345 return -ENOPROTOOPT;
68346 @@ -933,6 +940,7 @@ static int rawv6_seticmpfilter(struct so
68347 static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
68348 char __user *optval, int __user *optlen)
68349 {
68350 + struct icmp6_filter filter;
68351 int len;
68352
68353 switch (optname) {
68354 @@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct so
68355 len = sizeof(struct icmp6_filter);
68356 if (put_user(len, optlen))
68357 return -EFAULT;
68358 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
68359 + memcpy(&filter, &raw6_sk(sk)->filter, len);
68360 + if (copy_to_user(optval, &filter, len))
68361 return -EFAULT;
68362 return 0;
68363 default:
68364 @@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct se
68365 0, 0L, 0,
68366 sock_i_uid(sp), 0,
68367 sock_i_ino(sp),
68368 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
68369 + atomic_read(&sp->sk_refcnt),
68370 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68371 + NULL,
68372 +#else
68373 + sp,
68374 +#endif
68375 + atomic_read_unchecked(&sp->sk_drops));
68376 }
68377
68378 static int raw6_seq_show(struct seq_file *seq, void *v)
68379 diff -urNp linux-2.6.32.42/net/ipv6/tcp_ipv6.c linux-2.6.32.42/net/ipv6/tcp_ipv6.c
68380 --- linux-2.6.32.42/net/ipv6/tcp_ipv6.c 2011-03-27 14:31:47.000000000 -0400
68381 +++ linux-2.6.32.42/net/ipv6/tcp_ipv6.c 2011-04-17 15:56:46.000000000 -0400
68382 @@ -88,6 +88,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
68383 }
68384 #endif
68385
68386 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68387 +extern int grsec_enable_blackhole;
68388 +#endif
68389 +
68390 static void tcp_v6_hash(struct sock *sk)
68391 {
68392 if (sk->sk_state != TCP_CLOSE) {
68393 @@ -1578,6 +1582,9 @@ static int tcp_v6_do_rcv(struct sock *sk
68394 return 0;
68395
68396 reset:
68397 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68398 + if (!grsec_enable_blackhole)
68399 +#endif
68400 tcp_v6_send_reset(sk, skb);
68401 discard:
68402 if (opt_skb)
68403 @@ -1655,12 +1662,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
68404 TCP_SKB_CB(skb)->sacked = 0;
68405
68406 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
68407 - if (!sk)
68408 + if (!sk) {
68409 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68410 + ret = 1;
68411 +#endif
68412 goto no_tcp_socket;
68413 + }
68414
68415 process:
68416 - if (sk->sk_state == TCP_TIME_WAIT)
68417 + if (sk->sk_state == TCP_TIME_WAIT) {
68418 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68419 + ret = 2;
68420 +#endif
68421 goto do_time_wait;
68422 + }
68423
68424 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
68425 goto discard_and_relse;
68426 @@ -1700,6 +1715,10 @@ no_tcp_socket:
68427 bad_packet:
68428 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
68429 } else {
68430 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68431 + if (!grsec_enable_blackhole || (ret == 1 &&
68432 + (skb->dev->flags & IFF_LOOPBACK)))
68433 +#endif
68434 tcp_v6_send_reset(NULL, skb);
68435 }
68436
68437 @@ -1915,7 +1934,13 @@ static void get_openreq6(struct seq_file
68438 uid,
68439 0, /* non standard timer */
68440 0, /* open_requests have no inode */
68441 - 0, req);
68442 + 0,
68443 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68444 + NULL
68445 +#else
68446 + req
68447 +#endif
68448 + );
68449 }
68450
68451 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
68452 @@ -1965,7 +1990,12 @@ static void get_tcp6_sock(struct seq_fil
68453 sock_i_uid(sp),
68454 icsk->icsk_probes_out,
68455 sock_i_ino(sp),
68456 - atomic_read(&sp->sk_refcnt), sp,
68457 + atomic_read(&sp->sk_refcnt),
68458 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68459 + NULL,
68460 +#else
68461 + sp,
68462 +#endif
68463 jiffies_to_clock_t(icsk->icsk_rto),
68464 jiffies_to_clock_t(icsk->icsk_ack.ato),
68465 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
68466 @@ -2000,7 +2030,13 @@ static void get_timewait6_sock(struct se
68467 dest->s6_addr32[2], dest->s6_addr32[3], destp,
68468 tw->tw_substate, 0, 0,
68469 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
68470 - atomic_read(&tw->tw_refcnt), tw);
68471 + atomic_read(&tw->tw_refcnt),
68472 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68473 + NULL
68474 +#else
68475 + tw
68476 +#endif
68477 + );
68478 }
68479
68480 static int tcp6_seq_show(struct seq_file *seq, void *v)
68481 diff -urNp linux-2.6.32.42/net/ipv6/udp.c linux-2.6.32.42/net/ipv6/udp.c
68482 --- linux-2.6.32.42/net/ipv6/udp.c 2011-03-27 14:31:47.000000000 -0400
68483 +++ linux-2.6.32.42/net/ipv6/udp.c 2011-05-04 17:58:16.000000000 -0400
68484 @@ -49,6 +49,10 @@
68485 #include <linux/seq_file.h>
68486 #include "udp_impl.h"
68487
68488 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68489 +extern int grsec_enable_blackhole;
68490 +#endif
68491 +
68492 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
68493 {
68494 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
68495 @@ -388,7 +392,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
68496 if (rc == -ENOMEM) {
68497 UDP6_INC_STATS_BH(sock_net(sk),
68498 UDP_MIB_RCVBUFERRORS, is_udplite);
68499 - atomic_inc(&sk->sk_drops);
68500 + atomic_inc_unchecked(&sk->sk_drops);
68501 }
68502 goto drop;
68503 }
68504 @@ -587,6 +591,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
68505 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
68506 proto == IPPROTO_UDPLITE);
68507
68508 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68509 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
68510 +#endif
68511 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
68512
68513 kfree_skb(skb);
68514 @@ -1206,8 +1213,13 @@ static void udp6_sock_seq_show(struct se
68515 0, 0L, 0,
68516 sock_i_uid(sp), 0,
68517 sock_i_ino(sp),
68518 - atomic_read(&sp->sk_refcnt), sp,
68519 - atomic_read(&sp->sk_drops));
68520 + atomic_read(&sp->sk_refcnt),
68521 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68522 + NULL,
68523 +#else
68524 + sp,
68525 +#endif
68526 + atomic_read_unchecked(&sp->sk_drops));
68527 }
68528
68529 int udp6_seq_show(struct seq_file *seq, void *v)
68530 diff -urNp linux-2.6.32.42/net/irda/ircomm/ircomm_tty.c linux-2.6.32.42/net/irda/ircomm/ircomm_tty.c
68531 --- linux-2.6.32.42/net/irda/ircomm/ircomm_tty.c 2011-03-27 14:31:47.000000000 -0400
68532 +++ linux-2.6.32.42/net/irda/ircomm/ircomm_tty.c 2011-04-17 15:56:46.000000000 -0400
68533 @@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(st
68534 add_wait_queue(&self->open_wait, &wait);
68535
68536 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
68537 - __FILE__,__LINE__, tty->driver->name, self->open_count );
68538 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
68539
68540 /* As far as I can see, we protect open_count - Jean II */
68541 spin_lock_irqsave(&self->spinlock, flags);
68542 if (!tty_hung_up_p(filp)) {
68543 extra_count = 1;
68544 - self->open_count--;
68545 + local_dec(&self->open_count);
68546 }
68547 spin_unlock_irqrestore(&self->spinlock, flags);
68548 - self->blocked_open++;
68549 + local_inc(&self->blocked_open);
68550
68551 while (1) {
68552 if (tty->termios->c_cflag & CBAUD) {
68553 @@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(st
68554 }
68555
68556 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
68557 - __FILE__,__LINE__, tty->driver->name, self->open_count );
68558 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
68559
68560 schedule();
68561 }
68562 @@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(st
68563 if (extra_count) {
68564 /* ++ is not atomic, so this should be protected - Jean II */
68565 spin_lock_irqsave(&self->spinlock, flags);
68566 - self->open_count++;
68567 + local_inc(&self->open_count);
68568 spin_unlock_irqrestore(&self->spinlock, flags);
68569 }
68570 - self->blocked_open--;
68571 + local_dec(&self->blocked_open);
68572
68573 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
68574 - __FILE__,__LINE__, tty->driver->name, self->open_count);
68575 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
68576
68577 if (!retval)
68578 self->flags |= ASYNC_NORMAL_ACTIVE;
68579 @@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_st
68580 }
68581 /* ++ is not atomic, so this should be protected - Jean II */
68582 spin_lock_irqsave(&self->spinlock, flags);
68583 - self->open_count++;
68584 + local_inc(&self->open_count);
68585
68586 tty->driver_data = self;
68587 self->tty = tty;
68588 spin_unlock_irqrestore(&self->spinlock, flags);
68589
68590 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
68591 - self->line, self->open_count);
68592 + self->line, local_read(&self->open_count));
68593
68594 /* Not really used by us, but lets do it anyway */
68595 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
68596 @@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_
68597 return;
68598 }
68599
68600 - if ((tty->count == 1) && (self->open_count != 1)) {
68601 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
68602 /*
68603 * Uh, oh. tty->count is 1, which means that the tty
68604 * structure will be freed. state->count should always
68605 @@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_
68606 */
68607 IRDA_DEBUG(0, "%s(), bad serial port count; "
68608 "tty->count is 1, state->count is %d\n", __func__ ,
68609 - self->open_count);
68610 - self->open_count = 1;
68611 + local_read(&self->open_count));
68612 + local_set(&self->open_count, 1);
68613 }
68614
68615 - if (--self->open_count < 0) {
68616 + if (local_dec_return(&self->open_count) < 0) {
68617 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
68618 - __func__, self->line, self->open_count);
68619 - self->open_count = 0;
68620 + __func__, self->line, local_read(&self->open_count));
68621 + local_set(&self->open_count, 0);
68622 }
68623 - if (self->open_count) {
68624 + if (local_read(&self->open_count)) {
68625 spin_unlock_irqrestore(&self->spinlock, flags);
68626
68627 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
68628 @@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_
68629 tty->closing = 0;
68630 self->tty = NULL;
68631
68632 - if (self->blocked_open) {
68633 + if (local_read(&self->blocked_open)) {
68634 if (self->close_delay)
68635 schedule_timeout_interruptible(self->close_delay);
68636 wake_up_interruptible(&self->open_wait);
68637 @@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty
68638 spin_lock_irqsave(&self->spinlock, flags);
68639 self->flags &= ~ASYNC_NORMAL_ACTIVE;
68640 self->tty = NULL;
68641 - self->open_count = 0;
68642 + local_set(&self->open_count, 0);
68643 spin_unlock_irqrestore(&self->spinlock, flags);
68644
68645 wake_up_interruptible(&self->open_wait);
68646 @@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct
68647 seq_putc(m, '\n');
68648
68649 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
68650 - seq_printf(m, "Open count: %d\n", self->open_count);
68651 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
68652 seq_printf(m, "Max data size: %d\n", self->max_data_size);
68653 seq_printf(m, "Max header size: %d\n", self->max_header_size);
68654
68655 diff -urNp linux-2.6.32.42/net/iucv/af_iucv.c linux-2.6.32.42/net/iucv/af_iucv.c
68656 --- linux-2.6.32.42/net/iucv/af_iucv.c 2011-03-27 14:31:47.000000000 -0400
68657 +++ linux-2.6.32.42/net/iucv/af_iucv.c 2011-05-04 17:56:28.000000000 -0400
68658 @@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct soc
68659
68660 write_lock_bh(&iucv_sk_list.lock);
68661
68662 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
68663 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
68664 while (__iucv_get_sock_by_name(name)) {
68665 sprintf(name, "%08x",
68666 - atomic_inc_return(&iucv_sk_list.autobind_name));
68667 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
68668 }
68669
68670 write_unlock_bh(&iucv_sk_list.lock);
68671 diff -urNp linux-2.6.32.42/net/key/af_key.c linux-2.6.32.42/net/key/af_key.c
68672 --- linux-2.6.32.42/net/key/af_key.c 2011-03-27 14:31:47.000000000 -0400
68673 +++ linux-2.6.32.42/net/key/af_key.c 2011-05-16 21:46:57.000000000 -0400
68674 @@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk
68675 struct xfrm_migrate m[XFRM_MAX_DEPTH];
68676 struct xfrm_kmaddress k;
68677
68678 + pax_track_stack();
68679 +
68680 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
68681 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
68682 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
68683 @@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_fil
68684 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
68685 else
68686 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
68687 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68688 + NULL,
68689 +#else
68690 s,
68691 +#endif
68692 atomic_read(&s->sk_refcnt),
68693 sk_rmem_alloc_get(s),
68694 sk_wmem_alloc_get(s),
68695 diff -urNp linux-2.6.32.42/net/mac80211/cfg.c linux-2.6.32.42/net/mac80211/cfg.c
68696 --- linux-2.6.32.42/net/mac80211/cfg.c 2011-03-27 14:31:47.000000000 -0400
68697 +++ linux-2.6.32.42/net/mac80211/cfg.c 2011-04-17 15:56:46.000000000 -0400
68698 @@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(st
68699 return err;
68700 }
68701
68702 -struct cfg80211_ops mac80211_config_ops = {
68703 +const struct cfg80211_ops mac80211_config_ops = {
68704 .add_virtual_intf = ieee80211_add_iface,
68705 .del_virtual_intf = ieee80211_del_iface,
68706 .change_virtual_intf = ieee80211_change_iface,
68707 diff -urNp linux-2.6.32.42/net/mac80211/cfg.h linux-2.6.32.42/net/mac80211/cfg.h
68708 --- linux-2.6.32.42/net/mac80211/cfg.h 2011-03-27 14:31:47.000000000 -0400
68709 +++ linux-2.6.32.42/net/mac80211/cfg.h 2011-04-17 15:56:46.000000000 -0400
68710 @@ -4,6 +4,6 @@
68711 #ifndef __CFG_H
68712 #define __CFG_H
68713
68714 -extern struct cfg80211_ops mac80211_config_ops;
68715 +extern const struct cfg80211_ops mac80211_config_ops;
68716
68717 #endif /* __CFG_H */
68718 diff -urNp linux-2.6.32.42/net/mac80211/debugfs_key.c linux-2.6.32.42/net/mac80211/debugfs_key.c
68719 --- linux-2.6.32.42/net/mac80211/debugfs_key.c 2011-03-27 14:31:47.000000000 -0400
68720 +++ linux-2.6.32.42/net/mac80211/debugfs_key.c 2011-04-17 15:56:46.000000000 -0400
68721 @@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file
68722 size_t count, loff_t *ppos)
68723 {
68724 struct ieee80211_key *key = file->private_data;
68725 - int i, res, bufsize = 2 * key->conf.keylen + 2;
68726 + int i, bufsize = 2 * key->conf.keylen + 2;
68727 char *buf = kmalloc(bufsize, GFP_KERNEL);
68728 char *p = buf;
68729 + ssize_t res;
68730 +
68731 + if (buf == NULL)
68732 + return -ENOMEM;
68733
68734 for (i = 0; i < key->conf.keylen; i++)
68735 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
68736 diff -urNp linux-2.6.32.42/net/mac80211/debugfs_sta.c linux-2.6.32.42/net/mac80211/debugfs_sta.c
68737 --- linux-2.6.32.42/net/mac80211/debugfs_sta.c 2011-03-27 14:31:47.000000000 -0400
68738 +++ linux-2.6.32.42/net/mac80211/debugfs_sta.c 2011-05-16 21:46:57.000000000 -0400
68739 @@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struc
68740 int i;
68741 struct sta_info *sta = file->private_data;
68742
68743 + pax_track_stack();
68744 +
68745 spin_lock_bh(&sta->lock);
68746 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
68747 sta->ampdu_mlme.dialog_token_allocator + 1);
68748 diff -urNp linux-2.6.32.42/net/mac80211/ieee80211_i.h linux-2.6.32.42/net/mac80211/ieee80211_i.h
68749 --- linux-2.6.32.42/net/mac80211/ieee80211_i.h 2011-03-27 14:31:47.000000000 -0400
68750 +++ linux-2.6.32.42/net/mac80211/ieee80211_i.h 2011-04-17 15:56:46.000000000 -0400
68751 @@ -25,6 +25,7 @@
68752 #include <linux/etherdevice.h>
68753 #include <net/cfg80211.h>
68754 #include <net/mac80211.h>
68755 +#include <asm/local.h>
68756 #include "key.h"
68757 #include "sta_info.h"
68758
68759 @@ -635,7 +636,7 @@ struct ieee80211_local {
68760 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
68761 spinlock_t queue_stop_reason_lock;
68762
68763 - int open_count;
68764 + local_t open_count;
68765 int monitors, cooked_mntrs;
68766 /* number of interfaces with corresponding FIF_ flags */
68767 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
68768 diff -urNp linux-2.6.32.42/net/mac80211/iface.c linux-2.6.32.42/net/mac80211/iface.c
68769 --- linux-2.6.32.42/net/mac80211/iface.c 2011-03-27 14:31:47.000000000 -0400
68770 +++ linux-2.6.32.42/net/mac80211/iface.c 2011-04-17 15:56:46.000000000 -0400
68771 @@ -166,7 +166,7 @@ static int ieee80211_open(struct net_dev
68772 break;
68773 }
68774
68775 - if (local->open_count == 0) {
68776 + if (local_read(&local->open_count) == 0) {
68777 res = drv_start(local);
68778 if (res)
68779 goto err_del_bss;
68780 @@ -196,7 +196,7 @@ static int ieee80211_open(struct net_dev
68781 * Validate the MAC address for this device.
68782 */
68783 if (!is_valid_ether_addr(dev->dev_addr)) {
68784 - if (!local->open_count)
68785 + if (!local_read(&local->open_count))
68786 drv_stop(local);
68787 return -EADDRNOTAVAIL;
68788 }
68789 @@ -292,7 +292,7 @@ static int ieee80211_open(struct net_dev
68790
68791 hw_reconf_flags |= __ieee80211_recalc_idle(local);
68792
68793 - local->open_count++;
68794 + local_inc(&local->open_count);
68795 if (hw_reconf_flags) {
68796 ieee80211_hw_config(local, hw_reconf_flags);
68797 /*
68798 @@ -320,7 +320,7 @@ static int ieee80211_open(struct net_dev
68799 err_del_interface:
68800 drv_remove_interface(local, &conf);
68801 err_stop:
68802 - if (!local->open_count)
68803 + if (!local_read(&local->open_count))
68804 drv_stop(local);
68805 err_del_bss:
68806 sdata->bss = NULL;
68807 @@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_dev
68808 WARN_ON(!list_empty(&sdata->u.ap.vlans));
68809 }
68810
68811 - local->open_count--;
68812 + local_dec(&local->open_count);
68813
68814 switch (sdata->vif.type) {
68815 case NL80211_IFTYPE_AP_VLAN:
68816 @@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_dev
68817
68818 ieee80211_recalc_ps(local, -1);
68819
68820 - if (local->open_count == 0) {
68821 + if (local_read(&local->open_count) == 0) {
68822 ieee80211_clear_tx_pending(local);
68823 ieee80211_stop_device(local);
68824
68825 diff -urNp linux-2.6.32.42/net/mac80211/main.c linux-2.6.32.42/net/mac80211/main.c
68826 --- linux-2.6.32.42/net/mac80211/main.c 2011-05-10 22:12:02.000000000 -0400
68827 +++ linux-2.6.32.42/net/mac80211/main.c 2011-05-10 22:12:34.000000000 -0400
68828 @@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211
68829 local->hw.conf.power_level = power;
68830 }
68831
68832 - if (changed && local->open_count) {
68833 + if (changed && local_read(&local->open_count)) {
68834 ret = drv_config(local, changed);
68835 /*
68836 * Goal:
68837 diff -urNp linux-2.6.32.42/net/mac80211/mlme.c linux-2.6.32.42/net/mac80211/mlme.c
68838 --- linux-2.6.32.42/net/mac80211/mlme.c 2011-03-27 14:31:47.000000000 -0400
68839 +++ linux-2.6.32.42/net/mac80211/mlme.c 2011-05-16 21:46:57.000000000 -0400
68840 @@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee
68841 bool have_higher_than_11mbit = false, newsta = false;
68842 u16 ap_ht_cap_flags;
68843
68844 + pax_track_stack();
68845 +
68846 /*
68847 * AssocResp and ReassocResp have identical structure, so process both
68848 * of them in this function.
68849 diff -urNp linux-2.6.32.42/net/mac80211/pm.c linux-2.6.32.42/net/mac80211/pm.c
68850 --- linux-2.6.32.42/net/mac80211/pm.c 2011-03-27 14:31:47.000000000 -0400
68851 +++ linux-2.6.32.42/net/mac80211/pm.c 2011-04-17 15:56:46.000000000 -0400
68852 @@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211
68853 }
68854
68855 /* stop hardware - this must stop RX */
68856 - if (local->open_count)
68857 + if (local_read(&local->open_count))
68858 ieee80211_stop_device(local);
68859
68860 local->suspended = true;
68861 diff -urNp linux-2.6.32.42/net/mac80211/rate.c linux-2.6.32.42/net/mac80211/rate.c
68862 --- linux-2.6.32.42/net/mac80211/rate.c 2011-03-27 14:31:47.000000000 -0400
68863 +++ linux-2.6.32.42/net/mac80211/rate.c 2011-04-17 15:56:46.000000000 -0400
68864 @@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct
68865 struct rate_control_ref *ref, *old;
68866
68867 ASSERT_RTNL();
68868 - if (local->open_count)
68869 + if (local_read(&local->open_count))
68870 return -EBUSY;
68871
68872 ref = rate_control_alloc(name, local);
68873 diff -urNp linux-2.6.32.42/net/mac80211/tx.c linux-2.6.32.42/net/mac80211/tx.c
68874 --- linux-2.6.32.42/net/mac80211/tx.c 2011-03-27 14:31:47.000000000 -0400
68875 +++ linux-2.6.32.42/net/mac80211/tx.c 2011-04-17 15:56:46.000000000 -0400
68876 @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
68877 return cpu_to_le16(dur);
68878 }
68879
68880 -static int inline is_ieee80211_device(struct ieee80211_local *local,
68881 +static inline int is_ieee80211_device(struct ieee80211_local *local,
68882 struct net_device *dev)
68883 {
68884 return local == wdev_priv(dev->ieee80211_ptr);
68885 diff -urNp linux-2.6.32.42/net/mac80211/util.c linux-2.6.32.42/net/mac80211/util.c
68886 --- linux-2.6.32.42/net/mac80211/util.c 2011-03-27 14:31:47.000000000 -0400
68887 +++ linux-2.6.32.42/net/mac80211/util.c 2011-04-17 15:56:46.000000000 -0400
68888 @@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_
68889 local->resuming = true;
68890
68891 /* restart hardware */
68892 - if (local->open_count) {
68893 + if (local_read(&local->open_count)) {
68894 /*
68895 * Upon resume hardware can sometimes be goofy due to
68896 * various platform / driver / bus issues, so restarting
68897 diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_app.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_app.c
68898 --- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_app.c 2011-03-27 14:31:47.000000000 -0400
68899 +++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_app.c 2011-05-17 19:26:34.000000000 -0400
68900 @@ -564,7 +564,7 @@ static const struct file_operations ip_v
68901 .open = ip_vs_app_open,
68902 .read = seq_read,
68903 .llseek = seq_lseek,
68904 - .release = seq_release,
68905 + .release = seq_release_net,
68906 };
68907 #endif
68908
68909 diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_conn.c
68910 --- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_conn.c 2011-03-27 14:31:47.000000000 -0400
68911 +++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_conn.c 2011-05-17 19:26:34.000000000 -0400
68912 @@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
68913 /* if the connection is not template and is created
68914 * by sync, preserve the activity flag.
68915 */
68916 - cp->flags |= atomic_read(&dest->conn_flags) &
68917 + cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
68918 (~IP_VS_CONN_F_INACTIVE);
68919 else
68920 - cp->flags |= atomic_read(&dest->conn_flags);
68921 + cp->flags |= atomic_read_unchecked(&dest->conn_flags);
68922 cp->dest = dest;
68923
68924 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
68925 @@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const
68926 atomic_set(&cp->refcnt, 1);
68927
68928 atomic_set(&cp->n_control, 0);
68929 - atomic_set(&cp->in_pkts, 0);
68930 + atomic_set_unchecked(&cp->in_pkts, 0);
68931
68932 atomic_inc(&ip_vs_conn_count);
68933 if (flags & IP_VS_CONN_F_NO_CPORT)
68934 @@ -871,7 +871,7 @@ static const struct file_operations ip_v
68935 .open = ip_vs_conn_open,
68936 .read = seq_read,
68937 .llseek = seq_lseek,
68938 - .release = seq_release,
68939 + .release = seq_release_net,
68940 };
68941
68942 static const char *ip_vs_origin_name(unsigned flags)
68943 @@ -934,7 +934,7 @@ static const struct file_operations ip_v
68944 .open = ip_vs_conn_sync_open,
68945 .read = seq_read,
68946 .llseek = seq_lseek,
68947 - .release = seq_release,
68948 + .release = seq_release_net,
68949 };
68950
68951 #endif
68952 @@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip
68953
68954 /* Don't drop the entry if its number of incoming packets is not
68955 located in [0, 8] */
68956 - i = atomic_read(&cp->in_pkts);
68957 + i = atomic_read_unchecked(&cp->in_pkts);
68958 if (i > 8 || i < 0) return 0;
68959
68960 if (!todrop_rate[i]) return 0;
68961 diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_core.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_core.c
68962 --- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_core.c 2011-03-27 14:31:47.000000000 -0400
68963 +++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_core.c 2011-05-04 17:56:28.000000000 -0400
68964 @@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *sv
68965 ret = cp->packet_xmit(skb, cp, pp);
68966 /* do not touch skb anymore */
68967
68968 - atomic_inc(&cp->in_pkts);
68969 + atomic_inc_unchecked(&cp->in_pkts);
68970 ip_vs_conn_put(cp);
68971 return ret;
68972 }
68973 @@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk
68974 * Sync connection if it is about to close to
68975 * encorage the standby servers to update the connections timeout
68976 */
68977 - pkts = atomic_add_return(1, &cp->in_pkts);
68978 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
68979 if (af == AF_INET &&
68980 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
68981 (((cp->protocol != IPPROTO_TCP ||
68982 diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_ctl.c
68983 --- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_ctl.c 2011-03-27 14:31:47.000000000 -0400
68984 +++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-17 19:26:34.000000000 -0400
68985 @@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service
68986 ip_vs_rs_hash(dest);
68987 write_unlock_bh(&__ip_vs_rs_lock);
68988 }
68989 - atomic_set(&dest->conn_flags, conn_flags);
68990 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
68991
68992 /* bind the service */
68993 if (!dest->svc) {
68994 @@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct se
68995 " %-7s %-6d %-10d %-10d\n",
68996 &dest->addr.in6,
68997 ntohs(dest->port),
68998 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
68999 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
69000 atomic_read(&dest->weight),
69001 atomic_read(&dest->activeconns),
69002 atomic_read(&dest->inactconns));
69003 @@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct se
69004 "%-7s %-6d %-10d %-10d\n",
69005 ntohl(dest->addr.ip),
69006 ntohs(dest->port),
69007 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
69008 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
69009 atomic_read(&dest->weight),
69010 atomic_read(&dest->activeconns),
69011 atomic_read(&dest->inactconns));
69012 @@ -1927,7 +1927,7 @@ static const struct file_operations ip_v
69013 .open = ip_vs_info_open,
69014 .read = seq_read,
69015 .llseek = seq_lseek,
69016 - .release = seq_release_private,
69017 + .release = seq_release_net,
69018 };
69019
69020 #endif
69021 @@ -1976,7 +1976,7 @@ static const struct file_operations ip_v
69022 .open = ip_vs_stats_seq_open,
69023 .read = seq_read,
69024 .llseek = seq_lseek,
69025 - .release = single_release,
69026 + .release = single_release_net,
69027 };
69028
69029 #endif
69030 @@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip
69031
69032 entry.addr = dest->addr.ip;
69033 entry.port = dest->port;
69034 - entry.conn_flags = atomic_read(&dest->conn_flags);
69035 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
69036 entry.weight = atomic_read(&dest->weight);
69037 entry.u_threshold = dest->u_threshold;
69038 entry.l_threshold = dest->l_threshold;
69039 @@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cm
69040 unsigned char arg[128];
69041 int ret = 0;
69042
69043 + pax_track_stack();
69044 +
69045 if (!capable(CAP_NET_ADMIN))
69046 return -EPERM;
69047
69048 @@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct s
69049 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
69050
69051 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
69052 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
69053 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
69054 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
69055 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
69056 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
69057 diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_sync.c
69058 --- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_sync.c 2011-03-27 14:31:47.000000000 -0400
69059 +++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_sync.c 2011-05-04 17:56:28.000000000 -0400
69060 @@ -438,7 +438,7 @@ static void ip_vs_process_message(const
69061
69062 if (opt)
69063 memcpy(&cp->in_seq, opt, sizeof(*opt));
69064 - atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
69065 + atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
69066 cp->state = state;
69067 cp->old_state = cp->state;
69068 /*
69069 diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_xmit.c
69070 --- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_xmit.c 2011-03-27 14:31:47.000000000 -0400
69071 +++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-04 17:56:28.000000000 -0400
69072 @@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
69073 else
69074 rc = NF_ACCEPT;
69075 /* do not touch skb anymore */
69076 - atomic_inc(&cp->in_pkts);
69077 + atomic_inc_unchecked(&cp->in_pkts);
69078 goto out;
69079 }
69080
69081 @@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
69082 else
69083 rc = NF_ACCEPT;
69084 /* do not touch skb anymore */
69085 - atomic_inc(&cp->in_pkts);
69086 + atomic_inc_unchecked(&cp->in_pkts);
69087 goto out;
69088 }
69089
69090 diff -urNp linux-2.6.32.42/net/netfilter/Kconfig linux-2.6.32.42/net/netfilter/Kconfig
69091 --- linux-2.6.32.42/net/netfilter/Kconfig 2011-03-27 14:31:47.000000000 -0400
69092 +++ linux-2.6.32.42/net/netfilter/Kconfig 2011-04-17 15:56:46.000000000 -0400
69093 @@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
69094
69095 To compile it as a module, choose M here. If unsure, say N.
69096
69097 +config NETFILTER_XT_MATCH_GRADM
69098 + tristate '"gradm" match support'
69099 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
69100 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
69101 + ---help---
69102 + The gradm match allows to match on grsecurity RBAC being enabled.
69103 + It is useful when iptables rules are applied early on bootup to
69104 + prevent connections to the machine (except from a trusted host)
69105 + while the RBAC system is disabled.
69106 +
69107 config NETFILTER_XT_MATCH_HASHLIMIT
69108 tristate '"hashlimit" match support'
69109 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
69110 diff -urNp linux-2.6.32.42/net/netfilter/Makefile linux-2.6.32.42/net/netfilter/Makefile
69111 --- linux-2.6.32.42/net/netfilter/Makefile 2011-03-27 14:31:47.000000000 -0400
69112 +++ linux-2.6.32.42/net/netfilter/Makefile 2011-04-17 15:56:46.000000000 -0400
69113 @@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRAC
69114 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
69115 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
69116 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
69117 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
69118 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
69119 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
69120 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
69121 diff -urNp linux-2.6.32.42/net/netfilter/nf_conntrack_netlink.c linux-2.6.32.42/net/netfilter/nf_conntrack_netlink.c
69122 --- linux-2.6.32.42/net/netfilter/nf_conntrack_netlink.c 2011-03-27 14:31:47.000000000 -0400
69123 +++ linux-2.6.32.42/net/netfilter/nf_conntrack_netlink.c 2011-04-17 15:56:46.000000000 -0400
69124 @@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlatt
69125 static int
69126 ctnetlink_parse_tuple(const struct nlattr * const cda[],
69127 struct nf_conntrack_tuple *tuple,
69128 - enum ctattr_tuple type, u_int8_t l3num)
69129 + enum ctattr_type type, u_int8_t l3num)
69130 {
69131 struct nlattr *tb[CTA_TUPLE_MAX+1];
69132 int err;
69133 diff -urNp linux-2.6.32.42/net/netfilter/nfnetlink_log.c linux-2.6.32.42/net/netfilter/nfnetlink_log.c
69134 --- linux-2.6.32.42/net/netfilter/nfnetlink_log.c 2011-03-27 14:31:47.000000000 -0400
69135 +++ linux-2.6.32.42/net/netfilter/nfnetlink_log.c 2011-05-04 17:56:28.000000000 -0400
69136 @@ -68,7 +68,7 @@ struct nfulnl_instance {
69137 };
69138
69139 static DEFINE_RWLOCK(instances_lock);
69140 -static atomic_t global_seq;
69141 +static atomic_unchecked_t global_seq;
69142
69143 #define INSTANCE_BUCKETS 16
69144 static struct hlist_head instance_table[INSTANCE_BUCKETS];
69145 @@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_ins
69146 /* global sequence number */
69147 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
69148 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
69149 - htonl(atomic_inc_return(&global_seq)));
69150 + htonl(atomic_inc_return_unchecked(&global_seq)));
69151
69152 if (data_len) {
69153 struct nlattr *nla;
69154 diff -urNp linux-2.6.32.42/net/netfilter/xt_gradm.c linux-2.6.32.42/net/netfilter/xt_gradm.c
69155 --- linux-2.6.32.42/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
69156 +++ linux-2.6.32.42/net/netfilter/xt_gradm.c 2011-04-17 15:56:46.000000000 -0400
69157 @@ -0,0 +1,51 @@
69158 +/*
69159 + * gradm match for netfilter
69160 + * Copyright © Zbigniew Krzystolik, 2010
69161 + *
69162 + * This program is free software; you can redistribute it and/or modify
69163 + * it under the terms of the GNU General Public License; either version
69164 + * 2 or 3 as published by the Free Software Foundation.
69165 + */
69166 +#include <linux/module.h>
69167 +#include <linux/moduleparam.h>
69168 +#include <linux/skbuff.h>
69169 +#include <linux/netfilter/x_tables.h>
69170 +#include <linux/grsecurity.h>
69171 +#include <linux/netfilter/xt_gradm.h>
69172 +
69173 +static bool
69174 +gradm_mt(const struct sk_buff *skb, const struct xt_match_param *par)
69175 +{
69176 + const struct xt_gradm_mtinfo *info = par->matchinfo;
69177 + bool retval = false;
69178 + if (gr_acl_is_enabled())
69179 + retval = true;
69180 + return retval ^ info->invflags;
69181 +}
69182 +
69183 +static struct xt_match gradm_mt_reg __read_mostly = {
69184 + .name = "gradm",
69185 + .revision = 0,
69186 + .family = NFPROTO_UNSPEC,
69187 + .match = gradm_mt,
69188 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
69189 + .me = THIS_MODULE,
69190 +};
69191 +
69192 +static int __init gradm_mt_init(void)
69193 +{
69194 + return xt_register_match(&gradm_mt_reg);
69195 +}
69196 +
69197 +static void __exit gradm_mt_exit(void)
69198 +{
69199 + xt_unregister_match(&gradm_mt_reg);
69200 +}
69201 +
69202 +module_init(gradm_mt_init);
69203 +module_exit(gradm_mt_exit);
69204 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
69205 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
69206 +MODULE_LICENSE("GPL");
69207 +MODULE_ALIAS("ipt_gradm");
69208 +MODULE_ALIAS("ip6t_gradm");
69209 diff -urNp linux-2.6.32.42/net/netlink/af_netlink.c linux-2.6.32.42/net/netlink/af_netlink.c
69210 --- linux-2.6.32.42/net/netlink/af_netlink.c 2011-03-27 14:31:47.000000000 -0400
69211 +++ linux-2.6.32.42/net/netlink/af_netlink.c 2011-05-04 17:56:28.000000000 -0400
69212 @@ -733,7 +733,7 @@ static void netlink_overrun(struct sock
69213 sk->sk_error_report(sk);
69214 }
69215 }
69216 - atomic_inc(&sk->sk_drops);
69217 + atomic_inc_unchecked(&sk->sk_drops);
69218 }
69219
69220 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
69221 @@ -1964,15 +1964,23 @@ static int netlink_seq_show(struct seq_f
69222 struct netlink_sock *nlk = nlk_sk(s);
69223
69224 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n",
69225 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69226 + NULL,
69227 +#else
69228 s,
69229 +#endif
69230 s->sk_protocol,
69231 nlk->pid,
69232 nlk->groups ? (u32)nlk->groups[0] : 0,
69233 sk_rmem_alloc_get(s),
69234 sk_wmem_alloc_get(s),
69235 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69236 + NULL,
69237 +#else
69238 nlk->cb,
69239 +#endif
69240 atomic_read(&s->sk_refcnt),
69241 - atomic_read(&s->sk_drops)
69242 + atomic_read_unchecked(&s->sk_drops)
69243 );
69244
69245 }
69246 diff -urNp linux-2.6.32.42/net/netrom/af_netrom.c linux-2.6.32.42/net/netrom/af_netrom.c
69247 --- linux-2.6.32.42/net/netrom/af_netrom.c 2011-03-27 14:31:47.000000000 -0400
69248 +++ linux-2.6.32.42/net/netrom/af_netrom.c 2011-04-17 15:56:46.000000000 -0400
69249 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *soc
69250 struct sock *sk = sock->sk;
69251 struct nr_sock *nr = nr_sk(sk);
69252
69253 + memset(sax, 0, sizeof(*sax));
69254 lock_sock(sk);
69255 if (peer != 0) {
69256 if (sk->sk_state != TCP_ESTABLISHED) {
69257 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *soc
69258 *uaddr_len = sizeof(struct full_sockaddr_ax25);
69259 } else {
69260 sax->fsa_ax25.sax25_family = AF_NETROM;
69261 - sax->fsa_ax25.sax25_ndigis = 0;
69262 sax->fsa_ax25.sax25_call = nr->source_addr;
69263 *uaddr_len = sizeof(struct sockaddr_ax25);
69264 }
69265 diff -urNp linux-2.6.32.42/net/packet/af_packet.c linux-2.6.32.42/net/packet/af_packet.c
69266 --- linux-2.6.32.42/net/packet/af_packet.c 2011-04-17 17:00:52.000000000 -0400
69267 +++ linux-2.6.32.42/net/packet/af_packet.c 2011-04-17 15:56:46.000000000 -0400
69268 @@ -2427,7 +2427,11 @@ static int packet_seq_show(struct seq_fi
69269
69270 seq_printf(seq,
69271 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
69272 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69273 + NULL,
69274 +#else
69275 s,
69276 +#endif
69277 atomic_read(&s->sk_refcnt),
69278 s->sk_type,
69279 ntohs(po->num),
69280 diff -urNp linux-2.6.32.42/net/phonet/af_phonet.c linux-2.6.32.42/net/phonet/af_phonet.c
69281 --- linux-2.6.32.42/net/phonet/af_phonet.c 2011-03-27 14:31:47.000000000 -0400
69282 +++ linux-2.6.32.42/net/phonet/af_phonet.c 2011-04-17 15:56:46.000000000 -0400
69283 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
69284 {
69285 struct phonet_protocol *pp;
69286
69287 - if (protocol >= PHONET_NPROTO)
69288 + if (protocol < 0 || protocol >= PHONET_NPROTO)
69289 return NULL;
69290
69291 spin_lock(&proto_tab_lock);
69292 @@ -402,7 +402,7 @@ int __init_or_module phonet_proto_regist
69293 {
69294 int err = 0;
69295
69296 - if (protocol >= PHONET_NPROTO)
69297 + if (protocol < 0 || protocol >= PHONET_NPROTO)
69298 return -EINVAL;
69299
69300 err = proto_register(pp->prot, 1);
69301 diff -urNp linux-2.6.32.42/net/phonet/datagram.c linux-2.6.32.42/net/phonet/datagram.c
69302 --- linux-2.6.32.42/net/phonet/datagram.c 2011-03-27 14:31:47.000000000 -0400
69303 +++ linux-2.6.32.42/net/phonet/datagram.c 2011-05-04 17:56:28.000000000 -0400
69304 @@ -162,7 +162,7 @@ static int pn_backlog_rcv(struct sock *s
69305 if (err < 0) {
69306 kfree_skb(skb);
69307 if (err == -ENOMEM)
69308 - atomic_inc(&sk->sk_drops);
69309 + atomic_inc_unchecked(&sk->sk_drops);
69310 }
69311 return err ? NET_RX_DROP : NET_RX_SUCCESS;
69312 }
69313 diff -urNp linux-2.6.32.42/net/phonet/pep.c linux-2.6.32.42/net/phonet/pep.c
69314 --- linux-2.6.32.42/net/phonet/pep.c 2011-03-27 14:31:47.000000000 -0400
69315 +++ linux-2.6.32.42/net/phonet/pep.c 2011-05-04 17:56:28.000000000 -0400
69316 @@ -348,7 +348,7 @@ static int pipe_do_rcv(struct sock *sk,
69317
69318 case PNS_PEP_CTRL_REQ:
69319 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
69320 - atomic_inc(&sk->sk_drops);
69321 + atomic_inc_unchecked(&sk->sk_drops);
69322 break;
69323 }
69324 __skb_pull(skb, 4);
69325 @@ -362,12 +362,12 @@ static int pipe_do_rcv(struct sock *sk,
69326 if (!err)
69327 return 0;
69328 if (err == -ENOMEM)
69329 - atomic_inc(&sk->sk_drops);
69330 + atomic_inc_unchecked(&sk->sk_drops);
69331 break;
69332 }
69333
69334 if (pn->rx_credits == 0) {
69335 - atomic_inc(&sk->sk_drops);
69336 + atomic_inc_unchecked(&sk->sk_drops);
69337 err = -ENOBUFS;
69338 break;
69339 }
69340 diff -urNp linux-2.6.32.42/net/phonet/socket.c linux-2.6.32.42/net/phonet/socket.c
69341 --- linux-2.6.32.42/net/phonet/socket.c 2011-03-27 14:31:47.000000000 -0400
69342 +++ linux-2.6.32.42/net/phonet/socket.c 2011-05-04 17:57:07.000000000 -0400
69343 @@ -482,8 +482,13 @@ static int pn_sock_seq_show(struct seq_f
69344 sk->sk_state,
69345 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
69346 sock_i_uid(sk), sock_i_ino(sk),
69347 - atomic_read(&sk->sk_refcnt), sk,
69348 - atomic_read(&sk->sk_drops), &len);
69349 + atomic_read(&sk->sk_refcnt),
69350 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69351 + NULL,
69352 +#else
69353 + sk,
69354 +#endif
69355 + atomic_read_unchecked(&sk->sk_drops), &len);
69356 }
69357 seq_printf(seq, "%*s\n", 127 - len, "");
69358 return 0;
69359 diff -urNp linux-2.6.32.42/net/rds/cong.c linux-2.6.32.42/net/rds/cong.c
69360 --- linux-2.6.32.42/net/rds/cong.c 2011-03-27 14:31:47.000000000 -0400
69361 +++ linux-2.6.32.42/net/rds/cong.c 2011-05-04 17:56:28.000000000 -0400
69362 @@ -77,7 +77,7 @@
69363 * finds that the saved generation number is smaller than the global generation
69364 * number, it wakes up the process.
69365 */
69366 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
69367 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
69368
69369 /*
69370 * Congestion monitoring
69371 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
69372 rdsdebug("waking map %p for %pI4\n",
69373 map, &map->m_addr);
69374 rds_stats_inc(s_cong_update_received);
69375 - atomic_inc(&rds_cong_generation);
69376 + atomic_inc_unchecked(&rds_cong_generation);
69377 if (waitqueue_active(&map->m_waitq))
69378 wake_up(&map->m_waitq);
69379 if (waitqueue_active(&rds_poll_waitq))
69380 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
69381
69382 int rds_cong_updated_since(unsigned long *recent)
69383 {
69384 - unsigned long gen = atomic_read(&rds_cong_generation);
69385 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
69386
69387 if (likely(*recent == gen))
69388 return 0;
69389 diff -urNp linux-2.6.32.42/net/rds/iw_rdma.c linux-2.6.32.42/net/rds/iw_rdma.c
69390 --- linux-2.6.32.42/net/rds/iw_rdma.c 2011-03-27 14:31:47.000000000 -0400
69391 +++ linux-2.6.32.42/net/rds/iw_rdma.c 2011-05-16 21:46:57.000000000 -0400
69392 @@ -181,6 +181,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
69393 struct rdma_cm_id *pcm_id;
69394 int rc;
69395
69396 + pax_track_stack();
69397 +
69398 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
69399 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
69400
69401 diff -urNp linux-2.6.32.42/net/rds/Kconfig linux-2.6.32.42/net/rds/Kconfig
69402 --- linux-2.6.32.42/net/rds/Kconfig 2011-03-27 14:31:47.000000000 -0400
69403 +++ linux-2.6.32.42/net/rds/Kconfig 2011-04-17 15:56:46.000000000 -0400
69404 @@ -1,7 +1,7 @@
69405
69406 config RDS
69407 tristate "The RDS Protocol (EXPERIMENTAL)"
69408 - depends on INET && EXPERIMENTAL
69409 + depends on INET && EXPERIMENTAL && BROKEN
69410 ---help---
69411 The RDS (Reliable Datagram Sockets) protocol provides reliable,
69412 sequenced delivery of datagrams over Infiniband, iWARP,
69413 diff -urNp linux-2.6.32.42/net/rxrpc/af_rxrpc.c linux-2.6.32.42/net/rxrpc/af_rxrpc.c
69414 --- linux-2.6.32.42/net/rxrpc/af_rxrpc.c 2011-03-27 14:31:47.000000000 -0400
69415 +++ linux-2.6.32.42/net/rxrpc/af_rxrpc.c 2011-05-04 17:56:28.000000000 -0400
69416 @@ -38,7 +38,7 @@ static const struct proto_ops rxrpc_rpc_
69417 __be32 rxrpc_epoch;
69418
69419 /* current debugging ID */
69420 -atomic_t rxrpc_debug_id;
69421 +atomic_unchecked_t rxrpc_debug_id;
69422
69423 /* count of skbs currently in use */
69424 atomic_t rxrpc_n_skbs;
69425 diff -urNp linux-2.6.32.42/net/rxrpc/ar-ack.c linux-2.6.32.42/net/rxrpc/ar-ack.c
69426 --- linux-2.6.32.42/net/rxrpc/ar-ack.c 2011-03-27 14:31:47.000000000 -0400
69427 +++ linux-2.6.32.42/net/rxrpc/ar-ack.c 2011-05-16 21:46:57.000000000 -0400
69428 @@ -174,7 +174,7 @@ static void rxrpc_resend(struct rxrpc_ca
69429
69430 _enter("{%d,%d,%d,%d},",
69431 call->acks_hard, call->acks_unacked,
69432 - atomic_read(&call->sequence),
69433 + atomic_read_unchecked(&call->sequence),
69434 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
69435
69436 stop = 0;
69437 @@ -198,7 +198,7 @@ static void rxrpc_resend(struct rxrpc_ca
69438
69439 /* each Tx packet has a new serial number */
69440 sp->hdr.serial =
69441 - htonl(atomic_inc_return(&call->conn->serial));
69442 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
69443
69444 hdr = (struct rxrpc_header *) txb->head;
69445 hdr->serial = sp->hdr.serial;
69446 @@ -401,7 +401,7 @@ static void rxrpc_rotate_tx_window(struc
69447 */
69448 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
69449 {
69450 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
69451 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
69452 }
69453
69454 /*
69455 @@ -627,7 +627,7 @@ process_further:
69456
69457 latest = ntohl(sp->hdr.serial);
69458 hard = ntohl(ack.firstPacket);
69459 - tx = atomic_read(&call->sequence);
69460 + tx = atomic_read_unchecked(&call->sequence);
69461
69462 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
69463 latest,
69464 @@ -840,6 +840,8 @@ void rxrpc_process_call(struct work_stru
69465 u32 abort_code = RX_PROTOCOL_ERROR;
69466 u8 *acks = NULL;
69467
69468 + pax_track_stack();
69469 +
69470 //printk("\n--------------------\n");
69471 _enter("{%d,%s,%lx} [%lu]",
69472 call->debug_id, rxrpc_call_states[call->state], call->events,
69473 @@ -1159,7 +1161,7 @@ void rxrpc_process_call(struct work_stru
69474 goto maybe_reschedule;
69475
69476 send_ACK_with_skew:
69477 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
69478 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
69479 ntohl(ack.serial));
69480 send_ACK:
69481 mtu = call->conn->trans->peer->if_mtu;
69482 @@ -1171,7 +1173,7 @@ send_ACK:
69483 ackinfo.rxMTU = htonl(5692);
69484 ackinfo.jumbo_max = htonl(4);
69485
69486 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
69487 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
69488 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
69489 ntohl(hdr.serial),
69490 ntohs(ack.maxSkew),
69491 @@ -1189,7 +1191,7 @@ send_ACK:
69492 send_message:
69493 _debug("send message");
69494
69495 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
69496 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
69497 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
69498 send_message_2:
69499
69500 diff -urNp linux-2.6.32.42/net/rxrpc/ar-call.c linux-2.6.32.42/net/rxrpc/ar-call.c
69501 --- linux-2.6.32.42/net/rxrpc/ar-call.c 2011-03-27 14:31:47.000000000 -0400
69502 +++ linux-2.6.32.42/net/rxrpc/ar-call.c 2011-05-04 17:56:28.000000000 -0400
69503 @@ -82,7 +82,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
69504 spin_lock_init(&call->lock);
69505 rwlock_init(&call->state_lock);
69506 atomic_set(&call->usage, 1);
69507 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
69508 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69509 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
69510
69511 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
69512 diff -urNp linux-2.6.32.42/net/rxrpc/ar-connection.c linux-2.6.32.42/net/rxrpc/ar-connection.c
69513 --- linux-2.6.32.42/net/rxrpc/ar-connection.c 2011-03-27 14:31:47.000000000 -0400
69514 +++ linux-2.6.32.42/net/rxrpc/ar-connection.c 2011-05-04 17:56:28.000000000 -0400
69515 @@ -205,7 +205,7 @@ static struct rxrpc_connection *rxrpc_al
69516 rwlock_init(&conn->lock);
69517 spin_lock_init(&conn->state_lock);
69518 atomic_set(&conn->usage, 1);
69519 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
69520 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69521 conn->avail_calls = RXRPC_MAXCALLS;
69522 conn->size_align = 4;
69523 conn->header_size = sizeof(struct rxrpc_header);
69524 diff -urNp linux-2.6.32.42/net/rxrpc/ar-connevent.c linux-2.6.32.42/net/rxrpc/ar-connevent.c
69525 --- linux-2.6.32.42/net/rxrpc/ar-connevent.c 2011-03-27 14:31:47.000000000 -0400
69526 +++ linux-2.6.32.42/net/rxrpc/ar-connevent.c 2011-05-04 17:56:28.000000000 -0400
69527 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
69528
69529 len = iov[0].iov_len + iov[1].iov_len;
69530
69531 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
69532 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
69533 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
69534
69535 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
69536 diff -urNp linux-2.6.32.42/net/rxrpc/ar-input.c linux-2.6.32.42/net/rxrpc/ar-input.c
69537 --- linux-2.6.32.42/net/rxrpc/ar-input.c 2011-03-27 14:31:47.000000000 -0400
69538 +++ linux-2.6.32.42/net/rxrpc/ar-input.c 2011-05-04 17:56:28.000000000 -0400
69539 @@ -339,9 +339,9 @@ void rxrpc_fast_process_packet(struct rx
69540 /* track the latest serial number on this connection for ACK packet
69541 * information */
69542 serial = ntohl(sp->hdr.serial);
69543 - hi_serial = atomic_read(&call->conn->hi_serial);
69544 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
69545 while (serial > hi_serial)
69546 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
69547 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
69548 serial);
69549
69550 /* request ACK generation for any ACK or DATA packet that requests
69551 diff -urNp linux-2.6.32.42/net/rxrpc/ar-internal.h linux-2.6.32.42/net/rxrpc/ar-internal.h
69552 --- linux-2.6.32.42/net/rxrpc/ar-internal.h 2011-03-27 14:31:47.000000000 -0400
69553 +++ linux-2.6.32.42/net/rxrpc/ar-internal.h 2011-05-04 17:56:28.000000000 -0400
69554 @@ -272,8 +272,8 @@ struct rxrpc_connection {
69555 int error; /* error code for local abort */
69556 int debug_id; /* debug ID for printks */
69557 unsigned call_counter; /* call ID counter */
69558 - atomic_t serial; /* packet serial number counter */
69559 - atomic_t hi_serial; /* highest serial number received */
69560 + atomic_unchecked_t serial; /* packet serial number counter */
69561 + atomic_unchecked_t hi_serial; /* highest serial number received */
69562 u8 avail_calls; /* number of calls available */
69563 u8 size_align; /* data size alignment (for security) */
69564 u8 header_size; /* rxrpc + security header size */
69565 @@ -346,7 +346,7 @@ struct rxrpc_call {
69566 spinlock_t lock;
69567 rwlock_t state_lock; /* lock for state transition */
69568 atomic_t usage;
69569 - atomic_t sequence; /* Tx data packet sequence counter */
69570 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
69571 u32 abort_code; /* local/remote abort code */
69572 enum { /* current state of call */
69573 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
69574 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
69575 */
69576 extern atomic_t rxrpc_n_skbs;
69577 extern __be32 rxrpc_epoch;
69578 -extern atomic_t rxrpc_debug_id;
69579 +extern atomic_unchecked_t rxrpc_debug_id;
69580 extern struct workqueue_struct *rxrpc_workqueue;
69581
69582 /*
69583 diff -urNp linux-2.6.32.42/net/rxrpc/ar-key.c linux-2.6.32.42/net/rxrpc/ar-key.c
69584 --- linux-2.6.32.42/net/rxrpc/ar-key.c 2011-03-27 14:31:47.000000000 -0400
69585 +++ linux-2.6.32.42/net/rxrpc/ar-key.c 2011-04-17 15:56:46.000000000 -0400
69586 @@ -88,11 +88,11 @@ static int rxrpc_instantiate_xdr_rxkad(s
69587 return ret;
69588
69589 plen -= sizeof(*token);
69590 - token = kmalloc(sizeof(*token), GFP_KERNEL);
69591 + token = kzalloc(sizeof(*token), GFP_KERNEL);
69592 if (!token)
69593 return -ENOMEM;
69594
69595 - token->kad = kmalloc(plen, GFP_KERNEL);
69596 + token->kad = kzalloc(plen, GFP_KERNEL);
69597 if (!token->kad) {
69598 kfree(token);
69599 return -ENOMEM;
69600 @@ -730,10 +730,10 @@ static int rxrpc_instantiate(struct key
69601 goto error;
69602
69603 ret = -ENOMEM;
69604 - token = kmalloc(sizeof(*token), GFP_KERNEL);
69605 + token = kzalloc(sizeof(*token), GFP_KERNEL);
69606 if (!token)
69607 goto error;
69608 - token->kad = kmalloc(plen, GFP_KERNEL);
69609 + token->kad = kzalloc(plen, GFP_KERNEL);
69610 if (!token->kad)
69611 goto error_free;
69612
69613 diff -urNp linux-2.6.32.42/net/rxrpc/ar-local.c linux-2.6.32.42/net/rxrpc/ar-local.c
69614 --- linux-2.6.32.42/net/rxrpc/ar-local.c 2011-03-27 14:31:47.000000000 -0400
69615 +++ linux-2.6.32.42/net/rxrpc/ar-local.c 2011-05-04 17:56:28.000000000 -0400
69616 @@ -44,7 +44,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
69617 spin_lock_init(&local->lock);
69618 rwlock_init(&local->services_lock);
69619 atomic_set(&local->usage, 1);
69620 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
69621 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69622 memcpy(&local->srx, srx, sizeof(*srx));
69623 }
69624
69625 diff -urNp linux-2.6.32.42/net/rxrpc/ar-output.c linux-2.6.32.42/net/rxrpc/ar-output.c
69626 --- linux-2.6.32.42/net/rxrpc/ar-output.c 2011-03-27 14:31:47.000000000 -0400
69627 +++ linux-2.6.32.42/net/rxrpc/ar-output.c 2011-05-04 17:56:28.000000000 -0400
69628 @@ -680,9 +680,9 @@ static int rxrpc_send_data(struct kiocb
69629 sp->hdr.cid = call->cid;
69630 sp->hdr.callNumber = call->call_id;
69631 sp->hdr.seq =
69632 - htonl(atomic_inc_return(&call->sequence));
69633 + htonl(atomic_inc_return_unchecked(&call->sequence));
69634 sp->hdr.serial =
69635 - htonl(atomic_inc_return(&conn->serial));
69636 + htonl(atomic_inc_return_unchecked(&conn->serial));
69637 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
69638 sp->hdr.userStatus = 0;
69639 sp->hdr.securityIndex = conn->security_ix;
69640 diff -urNp linux-2.6.32.42/net/rxrpc/ar-peer.c linux-2.6.32.42/net/rxrpc/ar-peer.c
69641 --- linux-2.6.32.42/net/rxrpc/ar-peer.c 2011-03-27 14:31:47.000000000 -0400
69642 +++ linux-2.6.32.42/net/rxrpc/ar-peer.c 2011-05-04 17:56:28.000000000 -0400
69643 @@ -86,7 +86,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
69644 INIT_LIST_HEAD(&peer->error_targets);
69645 spin_lock_init(&peer->lock);
69646 atomic_set(&peer->usage, 1);
69647 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
69648 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69649 memcpy(&peer->srx, srx, sizeof(*srx));
69650
69651 rxrpc_assess_MTU_size(peer);
69652 diff -urNp linux-2.6.32.42/net/rxrpc/ar-proc.c linux-2.6.32.42/net/rxrpc/ar-proc.c
69653 --- linux-2.6.32.42/net/rxrpc/ar-proc.c 2011-03-27 14:31:47.000000000 -0400
69654 +++ linux-2.6.32.42/net/rxrpc/ar-proc.c 2011-05-04 17:56:28.000000000 -0400
69655 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
69656 atomic_read(&conn->usage),
69657 rxrpc_conn_states[conn->state],
69658 key_serial(conn->key),
69659 - atomic_read(&conn->serial),
69660 - atomic_read(&conn->hi_serial));
69661 + atomic_read_unchecked(&conn->serial),
69662 + atomic_read_unchecked(&conn->hi_serial));
69663
69664 return 0;
69665 }
69666 diff -urNp linux-2.6.32.42/net/rxrpc/ar-transport.c linux-2.6.32.42/net/rxrpc/ar-transport.c
69667 --- linux-2.6.32.42/net/rxrpc/ar-transport.c 2011-03-27 14:31:47.000000000 -0400
69668 +++ linux-2.6.32.42/net/rxrpc/ar-transport.c 2011-05-04 17:56:28.000000000 -0400
69669 @@ -46,7 +46,7 @@ static struct rxrpc_transport *rxrpc_all
69670 spin_lock_init(&trans->client_lock);
69671 rwlock_init(&trans->conn_lock);
69672 atomic_set(&trans->usage, 1);
69673 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
69674 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69675
69676 if (peer->srx.transport.family == AF_INET) {
69677 switch (peer->srx.transport_type) {
69678 diff -urNp linux-2.6.32.42/net/rxrpc/rxkad.c linux-2.6.32.42/net/rxrpc/rxkad.c
69679 --- linux-2.6.32.42/net/rxrpc/rxkad.c 2011-03-27 14:31:47.000000000 -0400
69680 +++ linux-2.6.32.42/net/rxrpc/rxkad.c 2011-05-16 21:46:57.000000000 -0400
69681 @@ -210,6 +210,8 @@ static int rxkad_secure_packet_encrypt(c
69682 u16 check;
69683 int nsg;
69684
69685 + pax_track_stack();
69686 +
69687 sp = rxrpc_skb(skb);
69688
69689 _enter("");
69690 @@ -337,6 +339,8 @@ static int rxkad_verify_packet_auth(cons
69691 u16 check;
69692 int nsg;
69693
69694 + pax_track_stack();
69695 +
69696 _enter("");
69697
69698 sp = rxrpc_skb(skb);
69699 @@ -609,7 +613,7 @@ static int rxkad_issue_challenge(struct
69700
69701 len = iov[0].iov_len + iov[1].iov_len;
69702
69703 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
69704 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
69705 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
69706
69707 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
69708 @@ -659,7 +663,7 @@ static int rxkad_send_response(struct rx
69709
69710 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
69711
69712 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
69713 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
69714 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
69715
69716 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
69717 diff -urNp linux-2.6.32.42/net/sctp/proc.c linux-2.6.32.42/net/sctp/proc.c
69718 --- linux-2.6.32.42/net/sctp/proc.c 2011-03-27 14:31:47.000000000 -0400
69719 +++ linux-2.6.32.42/net/sctp/proc.c 2011-04-17 15:56:46.000000000 -0400
69720 @@ -213,7 +213,12 @@ static int sctp_eps_seq_show(struct seq_
69721 sctp_for_each_hentry(epb, node, &head->chain) {
69722 ep = sctp_ep(epb);
69723 sk = epb->sk;
69724 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
69725 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
69726 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69727 + NULL, NULL,
69728 +#else
69729 + ep, sk,
69730 +#endif
69731 sctp_sk(sk)->type, sk->sk_state, hash,
69732 epb->bind_addr.port,
69733 sock_i_uid(sk), sock_i_ino(sk));
69734 @@ -320,7 +325,12 @@ static int sctp_assocs_seq_show(struct s
69735 seq_printf(seq,
69736 "%8p %8p %-3d %-3d %-2d %-4d "
69737 "%4d %8d %8d %7d %5lu %-5d %5d ",
69738 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
69739 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69740 + NULL, NULL,
69741 +#else
69742 + assoc, sk,
69743 +#endif
69744 + sctp_sk(sk)->type, sk->sk_state,
69745 assoc->state, hash,
69746 assoc->assoc_id,
69747 assoc->sndbuf_used,
69748 diff -urNp linux-2.6.32.42/net/sctp/socket.c linux-2.6.32.42/net/sctp/socket.c
69749 --- linux-2.6.32.42/net/sctp/socket.c 2011-03-27 14:31:47.000000000 -0400
69750 +++ linux-2.6.32.42/net/sctp/socket.c 2011-04-23 12:56:11.000000000 -0400
69751 @@ -5802,7 +5802,6 @@ pp_found:
69752 */
69753 int reuse = sk->sk_reuse;
69754 struct sock *sk2;
69755 - struct hlist_node *node;
69756
69757 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
69758 if (pp->fastreuse && sk->sk_reuse &&
69759 diff -urNp linux-2.6.32.42/net/socket.c linux-2.6.32.42/net/socket.c
69760 --- linux-2.6.32.42/net/socket.c 2011-03-27 14:31:47.000000000 -0400
69761 +++ linux-2.6.32.42/net/socket.c 2011-05-16 21:46:57.000000000 -0400
69762 @@ -87,6 +87,7 @@
69763 #include <linux/wireless.h>
69764 #include <linux/nsproxy.h>
69765 #include <linux/magic.h>
69766 +#include <linux/in.h>
69767
69768 #include <asm/uaccess.h>
69769 #include <asm/unistd.h>
69770 @@ -97,6 +98,21 @@
69771 #include <net/sock.h>
69772 #include <linux/netfilter.h>
69773
69774 +extern void gr_attach_curr_ip(const struct sock *sk);
69775 +extern int gr_handle_sock_all(const int family, const int type,
69776 + const int protocol);
69777 +extern int gr_handle_sock_server(const struct sockaddr *sck);
69778 +extern int gr_handle_sock_server_other(const struct sock *sck);
69779 +extern int gr_handle_sock_client(const struct sockaddr *sck);
69780 +extern int gr_search_connect(struct socket * sock,
69781 + struct sockaddr_in * addr);
69782 +extern int gr_search_bind(struct socket * sock,
69783 + struct sockaddr_in * addr);
69784 +extern int gr_search_listen(struct socket * sock);
69785 +extern int gr_search_accept(struct socket * sock);
69786 +extern int gr_search_socket(const int domain, const int type,
69787 + const int protocol);
69788 +
69789 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
69790 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
69791 unsigned long nr_segs, loff_t pos);
69792 @@ -298,7 +314,7 @@ static int sockfs_get_sb(struct file_sys
69793 mnt);
69794 }
69795
69796 -static struct vfsmount *sock_mnt __read_mostly;
69797 +struct vfsmount *sock_mnt __read_mostly;
69798
69799 static struct file_system_type sock_fs_type = {
69800 .name = "sockfs",
69801 @@ -1154,6 +1170,8 @@ static int __sock_create(struct net *net
69802 return -EAFNOSUPPORT;
69803 if (type < 0 || type >= SOCK_MAX)
69804 return -EINVAL;
69805 + if (protocol < 0)
69806 + return -EINVAL;
69807
69808 /* Compatibility.
69809
69810 @@ -1283,6 +1301,16 @@ SYSCALL_DEFINE3(socket, int, family, int
69811 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
69812 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
69813
69814 + if(!gr_search_socket(family, type, protocol)) {
69815 + retval = -EACCES;
69816 + goto out;
69817 + }
69818 +
69819 + if (gr_handle_sock_all(family, type, protocol)) {
69820 + retval = -EACCES;
69821 + goto out;
69822 + }
69823 +
69824 retval = sock_create(family, type, protocol, &sock);
69825 if (retval < 0)
69826 goto out;
69827 @@ -1415,6 +1443,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
69828 if (sock) {
69829 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
69830 if (err >= 0) {
69831 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
69832 + err = -EACCES;
69833 + goto error;
69834 + }
69835 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
69836 + if (err)
69837 + goto error;
69838 +
69839 err = security_socket_bind(sock,
69840 (struct sockaddr *)&address,
69841 addrlen);
69842 @@ -1423,6 +1459,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
69843 (struct sockaddr *)
69844 &address, addrlen);
69845 }
69846 +error:
69847 fput_light(sock->file, fput_needed);
69848 }
69849 return err;
69850 @@ -1446,10 +1483,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
69851 if ((unsigned)backlog > somaxconn)
69852 backlog = somaxconn;
69853
69854 + if (gr_handle_sock_server_other(sock->sk)) {
69855 + err = -EPERM;
69856 + goto error;
69857 + }
69858 +
69859 + err = gr_search_listen(sock);
69860 + if (err)
69861 + goto error;
69862 +
69863 err = security_socket_listen(sock, backlog);
69864 if (!err)
69865 err = sock->ops->listen(sock, backlog);
69866
69867 +error:
69868 fput_light(sock->file, fput_needed);
69869 }
69870 return err;
69871 @@ -1492,6 +1539,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
69872 newsock->type = sock->type;
69873 newsock->ops = sock->ops;
69874
69875 + if (gr_handle_sock_server_other(sock->sk)) {
69876 + err = -EPERM;
69877 + sock_release(newsock);
69878 + goto out_put;
69879 + }
69880 +
69881 + err = gr_search_accept(sock);
69882 + if (err) {
69883 + sock_release(newsock);
69884 + goto out_put;
69885 + }
69886 +
69887 /*
69888 * We don't need try_module_get here, as the listening socket (sock)
69889 * has the protocol module (sock->ops->owner) held.
69890 @@ -1534,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
69891 fd_install(newfd, newfile);
69892 err = newfd;
69893
69894 + gr_attach_curr_ip(newsock->sk);
69895 +
69896 out_put:
69897 fput_light(sock->file, fput_needed);
69898 out:
69899 @@ -1571,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
69900 int, addrlen)
69901 {
69902 struct socket *sock;
69903 + struct sockaddr *sck;
69904 struct sockaddr_storage address;
69905 int err, fput_needed;
69906
69907 @@ -1581,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
69908 if (err < 0)
69909 goto out_put;
69910
69911 + sck = (struct sockaddr *)&address;
69912 +
69913 + if (gr_handle_sock_client(sck)) {
69914 + err = -EACCES;
69915 + goto out_put;
69916 + }
69917 +
69918 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
69919 + if (err)
69920 + goto out_put;
69921 +
69922 err =
69923 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
69924 if (err)
69925 @@ -1882,6 +1955,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct
69926 int err, ctl_len, iov_size, total_len;
69927 int fput_needed;
69928
69929 + pax_track_stack();
69930 +
69931 err = -EFAULT;
69932 if (MSG_CMSG_COMPAT & flags) {
69933 if (get_compat_msghdr(&msg_sys, msg_compat))
69934 diff -urNp linux-2.6.32.42/net/sunrpc/sched.c linux-2.6.32.42/net/sunrpc/sched.c
69935 --- linux-2.6.32.42/net/sunrpc/sched.c 2011-03-27 14:31:47.000000000 -0400
69936 +++ linux-2.6.32.42/net/sunrpc/sched.c 2011-04-17 15:56:46.000000000 -0400
69937 @@ -234,10 +234,10 @@ static int rpc_wait_bit_killable(void *w
69938 #ifdef RPC_DEBUG
69939 static void rpc_task_set_debuginfo(struct rpc_task *task)
69940 {
69941 - static atomic_t rpc_pid;
69942 + static atomic_unchecked_t rpc_pid;
69943
69944 task->tk_magic = RPC_TASK_MAGIC_ID;
69945 - task->tk_pid = atomic_inc_return(&rpc_pid);
69946 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
69947 }
69948 #else
69949 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
69950 diff -urNp linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma.c linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma.c
69951 --- linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma.c 2011-03-27 14:31:47.000000000 -0400
69952 +++ linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma.c 2011-05-04 17:56:20.000000000 -0400
69953 @@ -59,15 +59,15 @@ unsigned int svcrdma_max_req_size = RPCR
69954 static unsigned int min_max_inline = 4096;
69955 static unsigned int max_max_inline = 65536;
69956
69957 -atomic_t rdma_stat_recv;
69958 -atomic_t rdma_stat_read;
69959 -atomic_t rdma_stat_write;
69960 -atomic_t rdma_stat_sq_starve;
69961 -atomic_t rdma_stat_rq_starve;
69962 -atomic_t rdma_stat_rq_poll;
69963 -atomic_t rdma_stat_rq_prod;
69964 -atomic_t rdma_stat_sq_poll;
69965 -atomic_t rdma_stat_sq_prod;
69966 +atomic_unchecked_t rdma_stat_recv;
69967 +atomic_unchecked_t rdma_stat_read;
69968 +atomic_unchecked_t rdma_stat_write;
69969 +atomic_unchecked_t rdma_stat_sq_starve;
69970 +atomic_unchecked_t rdma_stat_rq_starve;
69971 +atomic_unchecked_t rdma_stat_rq_poll;
69972 +atomic_unchecked_t rdma_stat_rq_prod;
69973 +atomic_unchecked_t rdma_stat_sq_poll;
69974 +atomic_unchecked_t rdma_stat_sq_prod;
69975
69976 /* Temporary NFS request map and context caches */
69977 struct kmem_cache *svc_rdma_map_cachep;
69978 @@ -105,7 +105,7 @@ static int read_reset_stat(ctl_table *ta
69979 len -= *ppos;
69980 if (len > *lenp)
69981 len = *lenp;
69982 - if (len && copy_to_user(buffer, str_buf, len))
69983 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
69984 return -EFAULT;
69985 *lenp = len;
69986 *ppos += len;
69987 @@ -149,63 +149,63 @@ static ctl_table svcrdma_parm_table[] =
69988 {
69989 .procname = "rdma_stat_read",
69990 .data = &rdma_stat_read,
69991 - .maxlen = sizeof(atomic_t),
69992 + .maxlen = sizeof(atomic_unchecked_t),
69993 .mode = 0644,
69994 .proc_handler = &read_reset_stat,
69995 },
69996 {
69997 .procname = "rdma_stat_recv",
69998 .data = &rdma_stat_recv,
69999 - .maxlen = sizeof(atomic_t),
70000 + .maxlen = sizeof(atomic_unchecked_t),
70001 .mode = 0644,
70002 .proc_handler = &read_reset_stat,
70003 },
70004 {
70005 .procname = "rdma_stat_write",
70006 .data = &rdma_stat_write,
70007 - .maxlen = sizeof(atomic_t),
70008 + .maxlen = sizeof(atomic_unchecked_t),
70009 .mode = 0644,
70010 .proc_handler = &read_reset_stat,
70011 },
70012 {
70013 .procname = "rdma_stat_sq_starve",
70014 .data = &rdma_stat_sq_starve,
70015 - .maxlen = sizeof(atomic_t),
70016 + .maxlen = sizeof(atomic_unchecked_t),
70017 .mode = 0644,
70018 .proc_handler = &read_reset_stat,
70019 },
70020 {
70021 .procname = "rdma_stat_rq_starve",
70022 .data = &rdma_stat_rq_starve,
70023 - .maxlen = sizeof(atomic_t),
70024 + .maxlen = sizeof(atomic_unchecked_t),
70025 .mode = 0644,
70026 .proc_handler = &read_reset_stat,
70027 },
70028 {
70029 .procname = "rdma_stat_rq_poll",
70030 .data = &rdma_stat_rq_poll,
70031 - .maxlen = sizeof(atomic_t),
70032 + .maxlen = sizeof(atomic_unchecked_t),
70033 .mode = 0644,
70034 .proc_handler = &read_reset_stat,
70035 },
70036 {
70037 .procname = "rdma_stat_rq_prod",
70038 .data = &rdma_stat_rq_prod,
70039 - .maxlen = sizeof(atomic_t),
70040 + .maxlen = sizeof(atomic_unchecked_t),
70041 .mode = 0644,
70042 .proc_handler = &read_reset_stat,
70043 },
70044 {
70045 .procname = "rdma_stat_sq_poll",
70046 .data = &rdma_stat_sq_poll,
70047 - .maxlen = sizeof(atomic_t),
70048 + .maxlen = sizeof(atomic_unchecked_t),
70049 .mode = 0644,
70050 .proc_handler = &read_reset_stat,
70051 },
70052 {
70053 .procname = "rdma_stat_sq_prod",
70054 .data = &rdma_stat_sq_prod,
70055 - .maxlen = sizeof(atomic_t),
70056 + .maxlen = sizeof(atomic_unchecked_t),
70057 .mode = 0644,
70058 .proc_handler = &read_reset_stat,
70059 },
70060 diff -urNp linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
70061 --- linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-03-27 14:31:47.000000000 -0400
70062 +++ linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-05-04 17:56:28.000000000 -0400
70063 @@ -495,7 +495,7 @@ next_sge:
70064 svc_rdma_put_context(ctxt, 0);
70065 goto out;
70066 }
70067 - atomic_inc(&rdma_stat_read);
70068 + atomic_inc_unchecked(&rdma_stat_read);
70069
70070 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
70071 chl_map->ch[ch_no].count -= read_wr.num_sge;
70072 @@ -606,7 +606,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
70073 dto_q);
70074 list_del_init(&ctxt->dto_q);
70075 } else {
70076 - atomic_inc(&rdma_stat_rq_starve);
70077 + atomic_inc_unchecked(&rdma_stat_rq_starve);
70078 clear_bit(XPT_DATA, &xprt->xpt_flags);
70079 ctxt = NULL;
70080 }
70081 @@ -626,7 +626,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
70082 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
70083 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
70084 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
70085 - atomic_inc(&rdma_stat_recv);
70086 + atomic_inc_unchecked(&rdma_stat_recv);
70087
70088 /* Build up the XDR from the receive buffers. */
70089 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
70090 diff -urNp linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_sendto.c
70091 --- linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-03-27 14:31:47.000000000 -0400
70092 +++ linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-05-04 17:56:28.000000000 -0400
70093 @@ -328,7 +328,7 @@ static int send_write(struct svcxprt_rdm
70094 write_wr.wr.rdma.remote_addr = to;
70095
70096 /* Post It */
70097 - atomic_inc(&rdma_stat_write);
70098 + atomic_inc_unchecked(&rdma_stat_write);
70099 if (svc_rdma_send(xprt, &write_wr))
70100 goto err;
70101 return 0;
70102 diff -urNp linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_transport.c
70103 --- linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-03-27 14:31:47.000000000 -0400
70104 +++ linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-05-04 17:56:28.000000000 -0400
70105 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rd
70106 return;
70107
70108 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
70109 - atomic_inc(&rdma_stat_rq_poll);
70110 + atomic_inc_unchecked(&rdma_stat_rq_poll);
70111
70112 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
70113 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
70114 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rd
70115 }
70116
70117 if (ctxt)
70118 - atomic_inc(&rdma_stat_rq_prod);
70119 + atomic_inc_unchecked(&rdma_stat_rq_prod);
70120
70121 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
70122 /*
70123 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rd
70124 return;
70125
70126 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
70127 - atomic_inc(&rdma_stat_sq_poll);
70128 + atomic_inc_unchecked(&rdma_stat_sq_poll);
70129 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
70130 if (wc.status != IB_WC_SUCCESS)
70131 /* Close the transport */
70132 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rd
70133 }
70134
70135 if (ctxt)
70136 - atomic_inc(&rdma_stat_sq_prod);
70137 + atomic_inc_unchecked(&rdma_stat_sq_prod);
70138 }
70139
70140 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
70141 @@ -1260,7 +1260,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
70142 spin_lock_bh(&xprt->sc_lock);
70143 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
70144 spin_unlock_bh(&xprt->sc_lock);
70145 - atomic_inc(&rdma_stat_sq_starve);
70146 + atomic_inc_unchecked(&rdma_stat_sq_starve);
70147
70148 /* See if we can opportunistically reap SQ WR to make room */
70149 sq_cq_reap(xprt);
70150 diff -urNp linux-2.6.32.42/net/sysctl_net.c linux-2.6.32.42/net/sysctl_net.c
70151 --- linux-2.6.32.42/net/sysctl_net.c 2011-03-27 14:31:47.000000000 -0400
70152 +++ linux-2.6.32.42/net/sysctl_net.c 2011-04-17 15:56:46.000000000 -0400
70153 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
70154 struct ctl_table *table)
70155 {
70156 /* Allow network administrator to have same access as root. */
70157 - if (capable(CAP_NET_ADMIN)) {
70158 + if (capable_nolog(CAP_NET_ADMIN)) {
70159 int mode = (table->mode >> 6) & 7;
70160 return (mode << 6) | (mode << 3) | mode;
70161 }
70162 diff -urNp linux-2.6.32.42/net/unix/af_unix.c linux-2.6.32.42/net/unix/af_unix.c
70163 --- linux-2.6.32.42/net/unix/af_unix.c 2011-05-10 22:12:02.000000000 -0400
70164 +++ linux-2.6.32.42/net/unix/af_unix.c 2011-05-10 22:12:34.000000000 -0400
70165 @@ -745,6 +745,12 @@ static struct sock *unix_find_other(stru
70166 err = -ECONNREFUSED;
70167 if (!S_ISSOCK(inode->i_mode))
70168 goto put_fail;
70169 +
70170 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
70171 + err = -EACCES;
70172 + goto put_fail;
70173 + }
70174 +
70175 u = unix_find_socket_byinode(net, inode);
70176 if (!u)
70177 goto put_fail;
70178 @@ -765,6 +771,13 @@ static struct sock *unix_find_other(stru
70179 if (u) {
70180 struct dentry *dentry;
70181 dentry = unix_sk(u)->dentry;
70182 +
70183 + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
70184 + err = -EPERM;
70185 + sock_put(u);
70186 + goto fail;
70187 + }
70188 +
70189 if (dentry)
70190 touch_atime(unix_sk(u)->mnt, dentry);
70191 } else
70192 @@ -850,11 +863,18 @@ static int unix_bind(struct socket *sock
70193 err = security_path_mknod(&nd.path, dentry, mode, 0);
70194 if (err)
70195 goto out_mknod_drop_write;
70196 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
70197 + err = -EACCES;
70198 + goto out_mknod_drop_write;
70199 + }
70200 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
70201 out_mknod_drop_write:
70202 mnt_drop_write(nd.path.mnt);
70203 if (err)
70204 goto out_mknod_dput;
70205 +
70206 + gr_handle_create(dentry, nd.path.mnt);
70207 +
70208 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
70209 dput(nd.path.dentry);
70210 nd.path.dentry = dentry;
70211 @@ -872,6 +892,10 @@ out_mknod_drop_write:
70212 goto out_unlock;
70213 }
70214
70215 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
70216 + sk->sk_peercred.pid = current->pid;
70217 +#endif
70218 +
70219 list = &unix_socket_table[addr->hash];
70220 } else {
70221 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
70222 @@ -2211,7 +2235,11 @@ static int unix_seq_show(struct seq_file
70223 unix_state_lock(s);
70224
70225 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
70226 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70227 + NULL,
70228 +#else
70229 s,
70230 +#endif
70231 atomic_read(&s->sk_refcnt),
70232 0,
70233 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
70234 diff -urNp linux-2.6.32.42/net/wireless/wext.c linux-2.6.32.42/net/wireless/wext.c
70235 --- linux-2.6.32.42/net/wireless/wext.c 2011-03-27 14:31:47.000000000 -0400
70236 +++ linux-2.6.32.42/net/wireless/wext.c 2011-04-17 15:56:46.000000000 -0400
70237 @@ -816,8 +816,7 @@ static int ioctl_standard_iw_point(struc
70238 */
70239
70240 /* Support for very large requests */
70241 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
70242 - (user_length > descr->max_tokens)) {
70243 + if (user_length > descr->max_tokens) {
70244 /* Allow userspace to GET more than max so
70245 * we can support any size GET requests.
70246 * There is still a limit : -ENOMEM.
70247 @@ -854,22 +853,6 @@ static int ioctl_standard_iw_point(struc
70248 }
70249 }
70250
70251 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
70252 - /*
70253 - * If this is a GET, but not NOMAX, it means that the extra
70254 - * data is not bounded by userspace, but by max_tokens. Thus
70255 - * set the length to max_tokens. This matches the extra data
70256 - * allocation.
70257 - * The driver should fill it with the number of tokens it
70258 - * provided, and it may check iwp->length rather than having
70259 - * knowledge of max_tokens. If the driver doesn't change the
70260 - * iwp->length, this ioctl just copies back max_token tokens
70261 - * filled with zeroes. Hopefully the driver isn't claiming
70262 - * them to be valid data.
70263 - */
70264 - iwp->length = descr->max_tokens;
70265 - }
70266 -
70267 err = handler(dev, info, (union iwreq_data *) iwp, extra);
70268
70269 iwp->length += essid_compat;
70270 diff -urNp linux-2.6.32.42/net/xfrm/xfrm_policy.c linux-2.6.32.42/net/xfrm/xfrm_policy.c
70271 --- linux-2.6.32.42/net/xfrm/xfrm_policy.c 2011-03-27 14:31:47.000000000 -0400
70272 +++ linux-2.6.32.42/net/xfrm/xfrm_policy.c 2011-05-04 17:56:20.000000000 -0400
70273 @@ -586,7 +586,7 @@ int xfrm_policy_insert(int dir, struct x
70274 hlist_add_head(&policy->bydst, chain);
70275 xfrm_pol_hold(policy);
70276 net->xfrm.policy_count[dir]++;
70277 - atomic_inc(&flow_cache_genid);
70278 + atomic_inc_unchecked(&flow_cache_genid);
70279 if (delpol)
70280 __xfrm_policy_unlink(delpol, dir);
70281 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
70282 @@ -669,7 +669,7 @@ struct xfrm_policy *xfrm_policy_bysel_ct
70283 write_unlock_bh(&xfrm_policy_lock);
70284
70285 if (ret && delete) {
70286 - atomic_inc(&flow_cache_genid);
70287 + atomic_inc_unchecked(&flow_cache_genid);
70288 xfrm_policy_kill(ret);
70289 }
70290 return ret;
70291 @@ -710,7 +710,7 @@ struct xfrm_policy *xfrm_policy_byid(str
70292 write_unlock_bh(&xfrm_policy_lock);
70293
70294 if (ret && delete) {
70295 - atomic_inc(&flow_cache_genid);
70296 + atomic_inc_unchecked(&flow_cache_genid);
70297 xfrm_policy_kill(ret);
70298 }
70299 return ret;
70300 @@ -824,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u
70301 }
70302
70303 }
70304 - atomic_inc(&flow_cache_genid);
70305 + atomic_inc_unchecked(&flow_cache_genid);
70306 out:
70307 write_unlock_bh(&xfrm_policy_lock);
70308 return err;
70309 @@ -1088,7 +1088,7 @@ int xfrm_policy_delete(struct xfrm_polic
70310 write_unlock_bh(&xfrm_policy_lock);
70311 if (pol) {
70312 if (dir < XFRM_POLICY_MAX)
70313 - atomic_inc(&flow_cache_genid);
70314 + atomic_inc_unchecked(&flow_cache_genid);
70315 xfrm_policy_kill(pol);
70316 return 0;
70317 }
70318 @@ -1477,7 +1477,7 @@ free_dst:
70319 goto out;
70320 }
70321
70322 -static int inline
70323 +static inline int
70324 xfrm_dst_alloc_copy(void **target, void *src, int size)
70325 {
70326 if (!*target) {
70327 @@ -1489,7 +1489,7 @@ xfrm_dst_alloc_copy(void **target, void
70328 return 0;
70329 }
70330
70331 -static int inline
70332 +static inline int
70333 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
70334 {
70335 #ifdef CONFIG_XFRM_SUB_POLICY
70336 @@ -1501,7 +1501,7 @@ xfrm_dst_update_parent(struct dst_entry
70337 #endif
70338 }
70339
70340 -static int inline
70341 +static inline int
70342 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
70343 {
70344 #ifdef CONFIG_XFRM_SUB_POLICY
70345 @@ -1537,7 +1537,7 @@ int __xfrm_lookup(struct net *net, struc
70346 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
70347
70348 restart:
70349 - genid = atomic_read(&flow_cache_genid);
70350 + genid = atomic_read_unchecked(&flow_cache_genid);
70351 policy = NULL;
70352 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
70353 pols[pi] = NULL;
70354 @@ -1680,7 +1680,7 @@ restart:
70355 goto error;
70356 }
70357 if (nx == -EAGAIN ||
70358 - genid != atomic_read(&flow_cache_genid)) {
70359 + genid != atomic_read_unchecked(&flow_cache_genid)) {
70360 xfrm_pols_put(pols, npols);
70361 goto restart;
70362 }
70363 diff -urNp linux-2.6.32.42/net/xfrm/xfrm_user.c linux-2.6.32.42/net/xfrm/xfrm_user.c
70364 --- linux-2.6.32.42/net/xfrm/xfrm_user.c 2011-03-27 14:31:47.000000000 -0400
70365 +++ linux-2.6.32.42/net/xfrm/xfrm_user.c 2011-05-16 21:46:57.000000000 -0400
70366 @@ -1169,6 +1169,8 @@ static int copy_to_user_tmpl(struct xfrm
70367 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
70368 int i;
70369
70370 + pax_track_stack();
70371 +
70372 if (xp->xfrm_nr == 0)
70373 return 0;
70374
70375 @@ -1784,6 +1786,8 @@ static int xfrm_do_migrate(struct sk_buf
70376 int err;
70377 int n = 0;
70378
70379 + pax_track_stack();
70380 +
70381 if (attrs[XFRMA_MIGRATE] == NULL)
70382 return -EINVAL;
70383
70384 diff -urNp linux-2.6.32.42/samples/kobject/kset-example.c linux-2.6.32.42/samples/kobject/kset-example.c
70385 --- linux-2.6.32.42/samples/kobject/kset-example.c 2011-03-27 14:31:47.000000000 -0400
70386 +++ linux-2.6.32.42/samples/kobject/kset-example.c 2011-04-17 15:56:46.000000000 -0400
70387 @@ -87,7 +87,7 @@ static ssize_t foo_attr_store(struct kob
70388 }
70389
70390 /* Our custom sysfs_ops that we will associate with our ktype later on */
70391 -static struct sysfs_ops foo_sysfs_ops = {
70392 +static const struct sysfs_ops foo_sysfs_ops = {
70393 .show = foo_attr_show,
70394 .store = foo_attr_store,
70395 };
70396 diff -urNp linux-2.6.32.42/scripts/basic/fixdep.c linux-2.6.32.42/scripts/basic/fixdep.c
70397 --- linux-2.6.32.42/scripts/basic/fixdep.c 2011-03-27 14:31:47.000000000 -0400
70398 +++ linux-2.6.32.42/scripts/basic/fixdep.c 2011-04-17 15:56:46.000000000 -0400
70399 @@ -222,9 +222,9 @@ static void use_config(char *m, int slen
70400
70401 static void parse_config_file(char *map, size_t len)
70402 {
70403 - int *end = (int *) (map + len);
70404 + unsigned int *end = (unsigned int *) (map + len);
70405 /* start at +1, so that p can never be < map */
70406 - int *m = (int *) map + 1;
70407 + unsigned int *m = (unsigned int *) map + 1;
70408 char *p, *q;
70409
70410 for (; m < end; m++) {
70411 @@ -371,7 +371,7 @@ static void print_deps(void)
70412 static void traps(void)
70413 {
70414 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
70415 - int *p = (int *)test;
70416 + unsigned int *p = (unsigned int *)test;
70417
70418 if (*p != INT_CONF) {
70419 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
70420 diff -urNp linux-2.6.32.42/scripts/Makefile.build linux-2.6.32.42/scripts/Makefile.build
70421 --- linux-2.6.32.42/scripts/Makefile.build 2011-03-27 14:31:47.000000000 -0400
70422 +++ linux-2.6.32.42/scripts/Makefile.build 2011-06-04 20:46:51.000000000 -0400
70423 @@ -59,7 +59,7 @@ endif
70424 endif
70425
70426 # Do not include host rules unless needed
70427 -ifneq ($(hostprogs-y)$(hostprogs-m),)
70428 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
70429 include scripts/Makefile.host
70430 endif
70431
70432 diff -urNp linux-2.6.32.42/scripts/Makefile.clean linux-2.6.32.42/scripts/Makefile.clean
70433 --- linux-2.6.32.42/scripts/Makefile.clean 2011-03-27 14:31:47.000000000 -0400
70434 +++ linux-2.6.32.42/scripts/Makefile.clean 2011-06-04 20:47:19.000000000 -0400
70435 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
70436 __clean-files := $(extra-y) $(always) \
70437 $(targets) $(clean-files) \
70438 $(host-progs) \
70439 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
70440 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
70441 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
70442
70443 # as clean-files is given relative to the current directory, this adds
70444 # a $(obj) prefix, except for absolute paths
70445 diff -urNp linux-2.6.32.42/scripts/Makefile.host linux-2.6.32.42/scripts/Makefile.host
70446 --- linux-2.6.32.42/scripts/Makefile.host 2011-03-27 14:31:47.000000000 -0400
70447 +++ linux-2.6.32.42/scripts/Makefile.host 2011-06-04 20:48:22.000000000 -0400
70448 @@ -31,6 +31,7 @@
70449 # Note: Shared libraries consisting of C++ files are not supported
70450
70451 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
70452 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
70453
70454 # C code
70455 # Executables compiled from a single .c file
70456 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
70457 # Shared libaries (only .c supported)
70458 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
70459 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
70460 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
70461 # Remove .so files from "xxx-objs"
70462 host-cobjs := $(filter-out %.so,$(host-cobjs))
70463
70464 diff -urNp linux-2.6.32.42/scripts/mod/file2alias.c linux-2.6.32.42/scripts/mod/file2alias.c
70465 --- linux-2.6.32.42/scripts/mod/file2alias.c 2011-03-27 14:31:47.000000000 -0400
70466 +++ linux-2.6.32.42/scripts/mod/file2alias.c 2011-04-17 15:56:46.000000000 -0400
70467 @@ -72,7 +72,7 @@ static void device_id_check(const char *
70468 unsigned long size, unsigned long id_size,
70469 void *symval)
70470 {
70471 - int i;
70472 + unsigned int i;
70473
70474 if (size % id_size || size < id_size) {
70475 if (cross_build != 0)
70476 @@ -102,7 +102,7 @@ static void device_id_check(const char *
70477 /* USB is special because the bcdDevice can be matched against a numeric range */
70478 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
70479 static void do_usb_entry(struct usb_device_id *id,
70480 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
70481 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
70482 unsigned char range_lo, unsigned char range_hi,
70483 struct module *mod)
70484 {
70485 @@ -368,7 +368,7 @@ static void do_pnp_device_entry(void *sy
70486 for (i = 0; i < count; i++) {
70487 const char *id = (char *)devs[i].id;
70488 char acpi_id[sizeof(devs[0].id)];
70489 - int j;
70490 + unsigned int j;
70491
70492 buf_printf(&mod->dev_table_buf,
70493 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
70494 @@ -398,7 +398,7 @@ static void do_pnp_card_entries(void *sy
70495
70496 for (j = 0; j < PNP_MAX_DEVICES; j++) {
70497 const char *id = (char *)card->devs[j].id;
70498 - int i2, j2;
70499 + unsigned int i2, j2;
70500 int dup = 0;
70501
70502 if (!id[0])
70503 @@ -424,7 +424,7 @@ static void do_pnp_card_entries(void *sy
70504 /* add an individual alias for every device entry */
70505 if (!dup) {
70506 char acpi_id[sizeof(card->devs[0].id)];
70507 - int k;
70508 + unsigned int k;
70509
70510 buf_printf(&mod->dev_table_buf,
70511 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
70512 @@ -699,7 +699,7 @@ static void dmi_ascii_filter(char *d, co
70513 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
70514 char *alias)
70515 {
70516 - int i, j;
70517 + unsigned int i, j;
70518
70519 sprintf(alias, "dmi*");
70520
70521 diff -urNp linux-2.6.32.42/scripts/mod/modpost.c linux-2.6.32.42/scripts/mod/modpost.c
70522 --- linux-2.6.32.42/scripts/mod/modpost.c 2011-03-27 14:31:47.000000000 -0400
70523 +++ linux-2.6.32.42/scripts/mod/modpost.c 2011-04-17 15:56:46.000000000 -0400
70524 @@ -835,6 +835,7 @@ enum mismatch {
70525 INIT_TO_EXIT,
70526 EXIT_TO_INIT,
70527 EXPORT_TO_INIT_EXIT,
70528 + DATA_TO_TEXT
70529 };
70530
70531 struct sectioncheck {
70532 @@ -920,6 +921,12 @@ const struct sectioncheck sectioncheck[]
70533 .fromsec = { "__ksymtab*", NULL },
70534 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
70535 .mismatch = EXPORT_TO_INIT_EXIT
70536 +},
70537 +/* Do not reference code from writable data */
70538 +{
70539 + .fromsec = { DATA_SECTIONS, NULL },
70540 + .tosec = { TEXT_SECTIONS, NULL },
70541 + .mismatch = DATA_TO_TEXT
70542 }
70543 };
70544
70545 @@ -1024,10 +1031,10 @@ static Elf_Sym *find_elf_symbol(struct e
70546 continue;
70547 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
70548 continue;
70549 - if (sym->st_value == addr)
70550 - return sym;
70551 /* Find a symbol nearby - addr are maybe negative */
70552 d = sym->st_value - addr;
70553 + if (d == 0)
70554 + return sym;
70555 if (d < 0)
70556 d = addr - sym->st_value;
70557 if (d < distance) {
70558 @@ -1268,6 +1275,14 @@ static void report_sec_mismatch(const ch
70559 "Fix this by removing the %sannotation of %s "
70560 "or drop the export.\n",
70561 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
70562 + case DATA_TO_TEXT:
70563 +/*
70564 + fprintf(stderr,
70565 + "The variable %s references\n"
70566 + "the %s %s%s%s\n",
70567 + fromsym, to, sec2annotation(tosec), tosym, to_p);
70568 +*/
70569 + break;
70570 case NO_MISMATCH:
70571 /* To get warnings on missing members */
70572 break;
70573 @@ -1651,7 +1666,7 @@ void __attribute__((format(printf, 2, 3)
70574 va_end(ap);
70575 }
70576
70577 -void buf_write(struct buffer *buf, const char *s, int len)
70578 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
70579 {
70580 if (buf->size - buf->pos < len) {
70581 buf->size += len + SZ;
70582 @@ -1863,7 +1878,7 @@ static void write_if_changed(struct buff
70583 if (fstat(fileno(file), &st) < 0)
70584 goto close_write;
70585
70586 - if (st.st_size != b->pos)
70587 + if (st.st_size != (off_t)b->pos)
70588 goto close_write;
70589
70590 tmp = NOFAIL(malloc(b->pos));
70591 diff -urNp linux-2.6.32.42/scripts/mod/modpost.h linux-2.6.32.42/scripts/mod/modpost.h
70592 --- linux-2.6.32.42/scripts/mod/modpost.h 2011-03-27 14:31:47.000000000 -0400
70593 +++ linux-2.6.32.42/scripts/mod/modpost.h 2011-04-17 15:56:46.000000000 -0400
70594 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
70595
70596 struct buffer {
70597 char *p;
70598 - int pos;
70599 - int size;
70600 + unsigned int pos;
70601 + unsigned int size;
70602 };
70603
70604 void __attribute__((format(printf, 2, 3)))
70605 buf_printf(struct buffer *buf, const char *fmt, ...);
70606
70607 void
70608 -buf_write(struct buffer *buf, const char *s, int len);
70609 +buf_write(struct buffer *buf, const char *s, unsigned int len);
70610
70611 struct module {
70612 struct module *next;
70613 diff -urNp linux-2.6.32.42/scripts/mod/sumversion.c linux-2.6.32.42/scripts/mod/sumversion.c
70614 --- linux-2.6.32.42/scripts/mod/sumversion.c 2011-03-27 14:31:47.000000000 -0400
70615 +++ linux-2.6.32.42/scripts/mod/sumversion.c 2011-04-17 15:56:46.000000000 -0400
70616 @@ -455,7 +455,7 @@ static void write_version(const char *fi
70617 goto out;
70618 }
70619
70620 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
70621 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
70622 warn("writing sum in %s failed: %s\n",
70623 filename, strerror(errno));
70624 goto out;
70625 diff -urNp linux-2.6.32.42/scripts/pnmtologo.c linux-2.6.32.42/scripts/pnmtologo.c
70626 --- linux-2.6.32.42/scripts/pnmtologo.c 2011-03-27 14:31:47.000000000 -0400
70627 +++ linux-2.6.32.42/scripts/pnmtologo.c 2011-04-17 15:56:46.000000000 -0400
70628 @@ -237,14 +237,14 @@ static void write_header(void)
70629 fprintf(out, " * Linux logo %s\n", logoname);
70630 fputs(" */\n\n", out);
70631 fputs("#include <linux/linux_logo.h>\n\n", out);
70632 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
70633 + fprintf(out, "static unsigned char %s_data[] = {\n",
70634 logoname);
70635 }
70636
70637 static void write_footer(void)
70638 {
70639 fputs("\n};\n\n", out);
70640 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
70641 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
70642 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
70643 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
70644 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
70645 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
70646 fputs("\n};\n\n", out);
70647
70648 /* write logo clut */
70649 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
70650 + fprintf(out, "static unsigned char %s_clut[] = {\n",
70651 logoname);
70652 write_hex_cnt = 0;
70653 for (i = 0; i < logo_clutsize; i++) {
70654 diff -urNp linux-2.6.32.42/scripts/tags.sh linux-2.6.32.42/scripts/tags.sh
70655 --- linux-2.6.32.42/scripts/tags.sh 2011-03-27 14:31:47.000000000 -0400
70656 +++ linux-2.6.32.42/scripts/tags.sh 2011-06-07 18:06:04.000000000 -0400
70657 @@ -93,6 +93,11 @@ docscope()
70658 cscope -b -f cscope.out
70659 }
70660
70661 +dogtags()
70662 +{
70663 + all_sources | gtags -f -
70664 +}
70665 +
70666 exuberant()
70667 {
70668 all_sources | xargs $1 -a \
70669 @@ -164,6 +169,10 @@ case "$1" in
70670 docscope
70671 ;;
70672
70673 + "gtags")
70674 + dogtags
70675 + ;;
70676 +
70677 "tags")
70678 rm -f tags
70679 xtags ctags
70680 diff -urNp linux-2.6.32.42/security/capability.c linux-2.6.32.42/security/capability.c
70681 --- linux-2.6.32.42/security/capability.c 2011-03-27 14:31:47.000000000 -0400
70682 +++ linux-2.6.32.42/security/capability.c 2011-04-17 15:56:46.000000000 -0400
70683 @@ -890,7 +890,7 @@ static void cap_audit_rule_free(void *ls
70684 }
70685 #endif /* CONFIG_AUDIT */
70686
70687 -struct security_operations default_security_ops = {
70688 +struct security_operations default_security_ops __read_only = {
70689 .name = "default",
70690 };
70691
70692 diff -urNp linux-2.6.32.42/security/commoncap.c linux-2.6.32.42/security/commoncap.c
70693 --- linux-2.6.32.42/security/commoncap.c 2011-03-27 14:31:47.000000000 -0400
70694 +++ linux-2.6.32.42/security/commoncap.c 2011-04-17 15:56:46.000000000 -0400
70695 @@ -27,7 +27,7 @@
70696 #include <linux/sched.h>
70697 #include <linux/prctl.h>
70698 #include <linux/securebits.h>
70699 -
70700 +#include <net/sock.h>
70701 /*
70702 * If a non-root user executes a setuid-root binary in
70703 * !secure(SECURE_NOROOT) mode, then we raise capabilities.
70704 @@ -50,9 +50,11 @@ static void warn_setuid_and_fcaps_mixed(
70705 }
70706 }
70707
70708 +extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
70709 +
70710 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
70711 {
70712 - NETLINK_CB(skb).eff_cap = current_cap();
70713 + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
70714 return 0;
70715 }
70716
70717 @@ -582,6 +584,9 @@ int cap_bprm_secureexec(struct linux_bin
70718 {
70719 const struct cred *cred = current_cred();
70720
70721 + if (gr_acl_enable_at_secure())
70722 + return 1;
70723 +
70724 if (cred->uid != 0) {
70725 if (bprm->cap_effective)
70726 return 1;
70727 diff -urNp linux-2.6.32.42/security/integrity/ima/ima_api.c linux-2.6.32.42/security/integrity/ima/ima_api.c
70728 --- linux-2.6.32.42/security/integrity/ima/ima_api.c 2011-03-27 14:31:47.000000000 -0400
70729 +++ linux-2.6.32.42/security/integrity/ima/ima_api.c 2011-04-17 15:56:46.000000000 -0400
70730 @@ -74,7 +74,7 @@ void ima_add_violation(struct inode *ino
70731 int result;
70732
70733 /* can overflow, only indicator */
70734 - atomic_long_inc(&ima_htable.violations);
70735 + atomic_long_inc_unchecked(&ima_htable.violations);
70736
70737 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
70738 if (!entry) {
70739 diff -urNp linux-2.6.32.42/security/integrity/ima/ima_fs.c linux-2.6.32.42/security/integrity/ima/ima_fs.c
70740 --- linux-2.6.32.42/security/integrity/ima/ima_fs.c 2011-03-27 14:31:47.000000000 -0400
70741 +++ linux-2.6.32.42/security/integrity/ima/ima_fs.c 2011-04-17 15:56:46.000000000 -0400
70742 @@ -27,12 +27,12 @@
70743 static int valid_policy = 1;
70744 #define TMPBUFLEN 12
70745 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
70746 - loff_t *ppos, atomic_long_t *val)
70747 + loff_t *ppos, atomic_long_unchecked_t *val)
70748 {
70749 char tmpbuf[TMPBUFLEN];
70750 ssize_t len;
70751
70752 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
70753 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
70754 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
70755 }
70756
70757 diff -urNp linux-2.6.32.42/security/integrity/ima/ima.h linux-2.6.32.42/security/integrity/ima/ima.h
70758 --- linux-2.6.32.42/security/integrity/ima/ima.h 2011-03-27 14:31:47.000000000 -0400
70759 +++ linux-2.6.32.42/security/integrity/ima/ima.h 2011-04-17 15:56:46.000000000 -0400
70760 @@ -84,8 +84,8 @@ void ima_add_violation(struct inode *ino
70761 extern spinlock_t ima_queue_lock;
70762
70763 struct ima_h_table {
70764 - atomic_long_t len; /* number of stored measurements in the list */
70765 - atomic_long_t violations;
70766 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
70767 + atomic_long_unchecked_t violations;
70768 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
70769 };
70770 extern struct ima_h_table ima_htable;
70771 diff -urNp linux-2.6.32.42/security/integrity/ima/ima_queue.c linux-2.6.32.42/security/integrity/ima/ima_queue.c
70772 --- linux-2.6.32.42/security/integrity/ima/ima_queue.c 2011-03-27 14:31:47.000000000 -0400
70773 +++ linux-2.6.32.42/security/integrity/ima/ima_queue.c 2011-04-17 15:56:46.000000000 -0400
70774 @@ -78,7 +78,7 @@ static int ima_add_digest_entry(struct i
70775 INIT_LIST_HEAD(&qe->later);
70776 list_add_tail_rcu(&qe->later, &ima_measurements);
70777
70778 - atomic_long_inc(&ima_htable.len);
70779 + atomic_long_inc_unchecked(&ima_htable.len);
70780 key = ima_hash_key(entry->digest);
70781 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
70782 return 0;
70783 diff -urNp linux-2.6.32.42/security/Kconfig linux-2.6.32.42/security/Kconfig
70784 --- linux-2.6.32.42/security/Kconfig 2011-03-27 14:31:47.000000000 -0400
70785 +++ linux-2.6.32.42/security/Kconfig 2011-06-29 20:55:36.000000000 -0400
70786 @@ -4,6 +4,555 @@
70787
70788 menu "Security options"
70789
70790 +source grsecurity/Kconfig
70791 +
70792 +menu "PaX"
70793 +
70794 + config ARCH_TRACK_EXEC_LIMIT
70795 + bool
70796 +
70797 + config PAX_PER_CPU_PGD
70798 + bool
70799 +
70800 + config TASK_SIZE_MAX_SHIFT
70801 + int
70802 + depends on X86_64
70803 + default 47 if !PAX_PER_CPU_PGD
70804 + default 42 if PAX_PER_CPU_PGD
70805 +
70806 + config PAX_ENABLE_PAE
70807 + bool
70808 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
70809 +
70810 +config PAX
70811 + bool "Enable various PaX features"
70812 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
70813 + help
70814 + This allows you to enable various PaX features. PaX adds
70815 + intrusion prevention mechanisms to the kernel that reduce
70816 + the risks posed by exploitable memory corruption bugs.
70817 +
70818 +menu "PaX Control"
70819 + depends on PAX
70820 +
70821 +config PAX_SOFTMODE
70822 + bool 'Support soft mode'
70823 + select PAX_PT_PAX_FLAGS
70824 + help
70825 + Enabling this option will allow you to run PaX in soft mode, that
70826 + is, PaX features will not be enforced by default, only on executables
70827 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
70828 + is the only way to mark executables for soft mode use.
70829 +
70830 + Soft mode can be activated by using the "pax_softmode=1" kernel command
70831 + line option on boot. Furthermore you can control various PaX features
70832 + at runtime via the entries in /proc/sys/kernel/pax.
70833 +
70834 +config PAX_EI_PAX
70835 + bool 'Use legacy ELF header marking'
70836 + help
70837 + Enabling this option will allow you to control PaX features on
70838 + a per executable basis via the 'chpax' utility available at
70839 + http://pax.grsecurity.net/. The control flags will be read from
70840 + an otherwise reserved part of the ELF header. This marking has
70841 + numerous drawbacks (no support for soft-mode, toolchain does not
70842 + know about the non-standard use of the ELF header) therefore it
70843 + has been deprecated in favour of PT_PAX_FLAGS support.
70844 +
70845 + Note that if you enable PT_PAX_FLAGS marking support as well,
70846 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
70847 +
70848 +config PAX_PT_PAX_FLAGS
70849 + bool 'Use ELF program header marking'
70850 + help
70851 + Enabling this option will allow you to control PaX features on
70852 + a per executable basis via the 'paxctl' utility available at
70853 + http://pax.grsecurity.net/. The control flags will be read from
70854 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
70855 + has the benefits of supporting both soft mode and being fully
70856 + integrated into the toolchain (the binutils patch is available
70857 + from http://pax.grsecurity.net).
70858 +
70859 + If your toolchain does not support PT_PAX_FLAGS markings,
70860 + you can create one in most cases with 'paxctl -C'.
70861 +
70862 + Note that if you enable the legacy EI_PAX marking support as well,
70863 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
70864 +
70865 +choice
70866 + prompt 'MAC system integration'
70867 + default PAX_HAVE_ACL_FLAGS
70868 + help
70869 + Mandatory Access Control systems have the option of controlling
70870 + PaX flags on a per executable basis, choose the method supported
70871 + by your particular system.
70872 +
70873 + - "none": if your MAC system does not interact with PaX,
70874 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
70875 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
70876 +
70877 + NOTE: this option is for developers/integrators only.
70878 +
70879 + config PAX_NO_ACL_FLAGS
70880 + bool 'none'
70881 +
70882 + config PAX_HAVE_ACL_FLAGS
70883 + bool 'direct'
70884 +
70885 + config PAX_HOOK_ACL_FLAGS
70886 + bool 'hook'
70887 +endchoice
70888 +
70889 +endmenu
70890 +
70891 +menu "Non-executable pages"
70892 + depends on PAX
70893 +
70894 +config PAX_NOEXEC
70895 + bool "Enforce non-executable pages"
70896 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
70897 + help
70898 + By design some architectures do not allow for protecting memory
70899 + pages against execution or even if they do, Linux does not make
70900 + use of this feature. In practice this means that if a page is
70901 + readable (such as the stack or heap) it is also executable.
70902 +
70903 + There is a well known exploit technique that makes use of this
70904 + fact and a common programming mistake where an attacker can
70905 + introduce code of his choice somewhere in the attacked program's
70906 + memory (typically the stack or the heap) and then execute it.
70907 +
70908 + If the attacked program was running with different (typically
70909 + higher) privileges than that of the attacker, then he can elevate
70910 + his own privilege level (e.g. get a root shell, write to files for
70911 + which he does not have write access to, etc).
70912 +
70913 + Enabling this option will let you choose from various features
70914 + that prevent the injection and execution of 'foreign' code in
70915 + a program.
70916 +
70917 + This will also break programs that rely on the old behaviour and
70918 + expect that dynamically allocated memory via the malloc() family
70919 + of functions is executable (which it is not). Notable examples
70920 + are the XFree86 4.x server, the java runtime and wine.
70921 +
70922 +config PAX_PAGEEXEC
70923 + bool "Paging based non-executable pages"
70924 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
70925 + select S390_SWITCH_AMODE if S390
70926 + select S390_EXEC_PROTECT if S390
70927 + select ARCH_TRACK_EXEC_LIMIT if X86_32
70928 + help
70929 + This implementation is based on the paging feature of the CPU.
70930 + On i386 without hardware non-executable bit support there is a
70931 + variable but usually low performance impact, however on Intel's
70932 + P4 core based CPUs it is very high so you should not enable this
70933 + for kernels meant to be used on such CPUs.
70934 +
70935 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
70936 + with hardware non-executable bit support there is no performance
70937 + impact, on ppc the impact is negligible.
70938 +
70939 + Note that several architectures require various emulations due to
70940 + badly designed userland ABIs, this will cause a performance impact
70941 + but will disappear as soon as userland is fixed. For example, ppc
70942 + userland MUST have been built with secure-plt by a recent toolchain.
70943 +
70944 +config PAX_SEGMEXEC
70945 + bool "Segmentation based non-executable pages"
70946 + depends on PAX_NOEXEC && X86_32
70947 + help
70948 + This implementation is based on the segmentation feature of the
70949 + CPU and has a very small performance impact, however applications
70950 + will be limited to a 1.5 GB address space instead of the normal
70951 + 3 GB.
70952 +
70953 +config PAX_EMUTRAMP
70954 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
70955 + default y if PARISC
70956 + help
70957 + There are some programs and libraries that for one reason or
70958 + another attempt to execute special small code snippets from
70959 + non-executable memory pages. Most notable examples are the
70960 + signal handler return code generated by the kernel itself and
70961 + the GCC trampolines.
70962 +
70963 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
70964 + such programs will no longer work under your kernel.
70965 +
70966 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
70967 + utilities to enable trampoline emulation for the affected programs
70968 + yet still have the protection provided by the non-executable pages.
70969 +
70970 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
70971 + your system will not even boot.
70972 +
70973 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
70974 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
70975 + for the affected files.
70976 +
70977 + NOTE: enabling this feature *may* open up a loophole in the
70978 + protection provided by non-executable pages that an attacker
70979 + could abuse. Therefore the best solution is to not have any
70980 + files on your system that would require this option. This can
70981 + be achieved by not using libc5 (which relies on the kernel
70982 + signal handler return code) and not using or rewriting programs
70983 + that make use of the nested function implementation of GCC.
70984 + Skilled users can just fix GCC itself so that it implements
70985 + nested function calls in a way that does not interfere with PaX.
70986 +
70987 +config PAX_EMUSIGRT
70988 + bool "Automatically emulate sigreturn trampolines"
70989 + depends on PAX_EMUTRAMP && PARISC
70990 + default y
70991 + help
70992 + Enabling this option will have the kernel automatically detect
70993 + and emulate signal return trampolines executing on the stack
70994 + that would otherwise lead to task termination.
70995 +
70996 + This solution is intended as a temporary one for users with
70997 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
70998 + Modula-3 runtime, etc) or executables linked to such, basically
70999 + everything that does not specify its own SA_RESTORER function in
71000 + normal executable memory like glibc 2.1+ does.
71001 +
71002 + On parisc you MUST enable this option, otherwise your system will
71003 + not even boot.
71004 +
71005 + NOTE: this feature cannot be disabled on a per executable basis
71006 + and since it *does* open up a loophole in the protection provided
71007 + by non-executable pages, the best solution is to not have any
71008 + files on your system that would require this option.
71009 +
71010 +config PAX_MPROTECT
71011 + bool "Restrict mprotect()"
71012 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
71013 + help
71014 + Enabling this option will prevent programs from
71015 + - changing the executable status of memory pages that were
71016 + not originally created as executable,
71017 + - making read-only executable pages writable again,
71018 + - creating executable pages from anonymous memory,
71019 + - making read-only-after-relocations (RELRO) data pages writable again.
71020 +
71021 + You should say Y here to complete the protection provided by
71022 + the enforcement of non-executable pages.
71023 +
71024 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
71025 + this feature on a per file basis.
71026 +
71027 +config PAX_MPROTECT_COMPAT
71028 + bool "Use legacy/compat protection demoting (read help)"
71029 + depends on PAX_MPROTECT
71030 + default n
71031 + help
71032 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
71033 + by sending the proper error code to the application. For some broken
71034 + userland, this can cause problems with Python or other applications. The
71035 + current implementation however allows for applications like clamav to
71036 + detect if JIT compilation/execution is allowed and to fall back gracefully
71037 + to an interpreter-based mode if it does not. While we encourage everyone
71038 + to use the current implementation as-is and push upstream to fix broken
71039 + userland (note that the RWX logging option can assist with this), in some
71040 + environments this may not be possible. Having to disable MPROTECT
71041 + completely on certain binaries reduces the security benefit of PaX,
71042 + so this option is provided for those environments to revert to the old
71043 + behavior.
71044 +
71045 +config PAX_ELFRELOCS
71046 + bool "Allow ELF text relocations (read help)"
71047 + depends on PAX_MPROTECT
71048 + default n
71049 + help
71050 + Non-executable pages and mprotect() restrictions are effective
71051 + in preventing the introduction of new executable code into an
71052 + attacked task's address space. There remain only two venues
71053 + for this kind of attack: if the attacker can execute already
71054 + existing code in the attacked task then he can either have it
71055 + create and mmap() a file containing his code or have it mmap()
71056 + an already existing ELF library that does not have position
71057 + independent code in it and use mprotect() on it to make it
71058 + writable and copy his code there. While protecting against
71059 + the former approach is beyond PaX, the latter can be prevented
71060 + by having only PIC ELF libraries on one's system (which do not
71061 + need to relocate their code). If you are sure this is your case,
71062 + as is the case with all modern Linux distributions, then leave
71063 + this option disabled. You should say 'n' here.
71064 +
71065 +config PAX_ETEXECRELOCS
71066 + bool "Allow ELF ET_EXEC text relocations"
71067 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
71068 + select PAX_ELFRELOCS
71069 + default y
71070 + help
71071 + On some architectures there are incorrectly created applications
71072 + that require text relocations and would not work without enabling
71073 + this option. If you are an alpha, ia64 or parisc user, you should
71074 + enable this option and disable it once you have made sure that
71075 + none of your applications need it.
71076 +
71077 +config PAX_EMUPLT
71078 + bool "Automatically emulate ELF PLT"
71079 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
71080 + default y
71081 + help
71082 + Enabling this option will have the kernel automatically detect
71083 + and emulate the Procedure Linkage Table entries in ELF files.
71084 + On some architectures such entries are in writable memory, and
71085 + become non-executable leading to task termination. Therefore
71086 + it is mandatory that you enable this option on alpha, parisc,
71087 + sparc and sparc64, otherwise your system would not even boot.
71088 +
71089 + NOTE: this feature *does* open up a loophole in the protection
71090 + provided by the non-executable pages, therefore the proper
71091 + solution is to modify the toolchain to produce a PLT that does
71092 + not need to be writable.
71093 +
71094 +config PAX_DLRESOLVE
71095 + bool 'Emulate old glibc resolver stub'
71096 + depends on PAX_EMUPLT && SPARC
71097 + default n
71098 + help
71099 + This option is needed if userland has an old glibc (before 2.4)
71100 + that puts a 'save' instruction into the runtime generated resolver
71101 + stub that needs special emulation.
71102 +
71103 +config PAX_KERNEXEC
71104 + bool "Enforce non-executable kernel pages"
71105 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
71106 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
71107 + help
71108 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
71109 + that is, enabling this option will make it harder to inject
71110 + and execute 'foreign' code in kernel memory itself.
71111 +
71112 + Note that on x86_64 kernels there is a known regression when
71113 + this feature and KVM/VMX are both enabled in the host kernel.
71114 +
71115 +config PAX_KERNEXEC_MODULE_TEXT
71116 + int "Minimum amount of memory reserved for module code"
71117 + default "4"
71118 + depends on PAX_KERNEXEC && X86_32 && MODULES
71119 + help
71120 + Due to implementation details the kernel must reserve a fixed
71121 + amount of memory for module code at compile time that cannot be
71122 + changed at runtime. Here you can specify the minimum amount
71123 + in MB that will be reserved. Due to the same implementation
71124 + details this size will always be rounded up to the next 2/4 MB
71125 + boundary (depends on PAE) so the actually available memory for
71126 + module code will usually be more than this minimum.
71127 +
71128 + The default 4 MB should be enough for most users but if you have
71129 + an excessive number of modules (e.g., most distribution configs
71130 + compile many drivers as modules) or use huge modules such as
71131 + nvidia's kernel driver, you will need to adjust this amount.
71132 + A good rule of thumb is to look at your currently loaded kernel
71133 + modules and add up their sizes.
71134 +
71135 +endmenu
71136 +
71137 +menu "Address Space Layout Randomization"
71138 + depends on PAX
71139 +
71140 +config PAX_ASLR
71141 + bool "Address Space Layout Randomization"
71142 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
71143 + help
71144 + Many if not most exploit techniques rely on the knowledge of
71145 + certain addresses in the attacked program. The following options
71146 + will allow the kernel to apply a certain amount of randomization
71147 + to specific parts of the program thereby forcing an attacker to
71148 + guess them in most cases. Any failed guess will most likely crash
71149 + the attacked program which allows the kernel to detect such attempts
71150 + and react on them. PaX itself provides no reaction mechanisms,
71151 + instead it is strongly encouraged that you make use of Nergal's
71152 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
71153 + (http://www.grsecurity.net/) built-in crash detection features or
71154 + develop one yourself.
71155 +
71156 + By saying Y here you can choose to randomize the following areas:
71157 + - top of the task's kernel stack
71158 + - top of the task's userland stack
71159 + - base address for mmap() requests that do not specify one
71160 + (this includes all libraries)
71161 + - base address of the main executable
71162 +
71163 + It is strongly recommended to say Y here as address space layout
71164 + randomization has negligible impact on performance yet it provides
71165 + a very effective protection.
71166 +
71167 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
71168 + this feature on a per file basis.
71169 +
71170 +config PAX_RANDKSTACK
71171 + bool "Randomize kernel stack base"
71172 + depends on PAX_ASLR && X86_TSC && X86
71173 + help
71174 + By saying Y here the kernel will randomize every task's kernel
71175 + stack on every system call. This will not only force an attacker
71176 + to guess it but also prevent him from making use of possible
71177 + leaked information about it.
71178 +
71179 + Since the kernel stack is a rather scarce resource, randomization
71180 + may cause unexpected stack overflows, therefore you should very
71181 + carefully test your system. Note that once enabled in the kernel
71182 + configuration, this feature cannot be disabled on a per file basis.
71183 +
71184 +config PAX_RANDUSTACK
71185 + bool "Randomize user stack base"
71186 + depends on PAX_ASLR
71187 + help
71188 + By saying Y here the kernel will randomize every task's userland
71189 + stack. The randomization is done in two steps where the second
71190 + one may apply a big amount of shift to the top of the stack and
71191 + cause problems for programs that want to use lots of memory (more
71192 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
71193 + For this reason the second step can be controlled by 'chpax' or
71194 + 'paxctl' on a per file basis.
71195 +
71196 +config PAX_RANDMMAP
71197 + bool "Randomize mmap() base"
71198 + depends on PAX_ASLR
71199 + help
71200 + By saying Y here the kernel will use a randomized base address for
71201 + mmap() requests that do not specify one themselves. As a result
71202 + all dynamically loaded libraries will appear at random addresses
71203 + and therefore be harder to exploit by a technique where an attacker
71204 + attempts to execute library code for his purposes (e.g. spawn a
71205 + shell from an exploited program that is running at an elevated
71206 + privilege level).
71207 +
71208 + Furthermore, if a program is relinked as a dynamic ELF file, its
71209 + base address will be randomized as well, completing the full
71210 + randomization of the address space layout. Attacking such programs
71211 + becomes a guess game. You can find an example of doing this at
71212 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
71213 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
71214 +
71215 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
71216 + feature on a per file basis.
71217 +
71218 +endmenu
71219 +
71220 +menu "Miscellaneous hardening features"
71221 +
71222 +config PAX_MEMORY_SANITIZE
71223 + bool "Sanitize all freed memory"
71224 + help
71225 + By saying Y here the kernel will erase memory pages as soon as they
71226 + are freed. This in turn reduces the lifetime of data stored in the
71227 + pages, making it less likely that sensitive information such as
71228 + passwords, cryptographic secrets, etc stay in memory for too long.
71229 +
71230 + This is especially useful for programs whose runtime is short, long
71231 + lived processes and the kernel itself benefit from this as long as
71232 + they operate on whole memory pages and ensure timely freeing of pages
71233 + that may hold sensitive information.
71234 +
71235 + The tradeoff is performance impact, on a single CPU system kernel
71236 + compilation sees a 3% slowdown, other systems and workloads may vary
71237 + and you are advised to test this feature on your expected workload
71238 + before deploying it.
71239 +
71240 + Note that this feature does not protect data stored in live pages,
71241 + e.g., process memory swapped to disk may stay there for a long time.
71242 +
71243 +config PAX_MEMORY_STACKLEAK
71244 + bool "Sanitize kernel stack"
71245 + depends on X86
71246 + help
71247 + By saying Y here the kernel will erase the kernel stack before it
71248 + returns from a system call. This in turn reduces the information
71249 + that a kernel stack leak bug can reveal.
71250 +
71251 + Note that such a bug can still leak information that was put on
71252 + the stack by the current system call (the one eventually triggering
71253 + the bug) but traces of earlier system calls on the kernel stack
71254 + cannot leak anymore.
71255 +
71256 + The tradeoff is performance impact, on a single CPU system kernel
71257 + compilation sees a 1% slowdown, other systems and workloads may vary
71258 + and you are advised to test this feature on your expected workload
71259 + before deploying it.
71260 +
71261 + Note: full support for this feature requires gcc with plugin support
71262 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
71263 + is not supported). Using older gcc versions means that functions
71264 + with large enough stack frames may leave uninitialized memory behind
71265 + that may be exposed to a later syscall leaking the stack.
71266 +
71267 +config PAX_MEMORY_UDEREF
71268 + bool "Prevent invalid userland pointer dereference"
71269 + depends on X86 && !UML_X86 && !XEN
71270 + select PAX_PER_CPU_PGD if X86_64
71271 + help
71272 + By saying Y here the kernel will be prevented from dereferencing
71273 + userland pointers in contexts where the kernel expects only kernel
71274 + pointers. This is both a useful runtime debugging feature and a
71275 + security measure that prevents exploiting a class of kernel bugs.
71276 +
71277 + The tradeoff is that some virtualization solutions may experience
71278 + a huge slowdown and therefore you should not enable this feature
71279 + for kernels meant to run in such environments. Whether a given VM
71280 + solution is affected or not is best determined by simply trying it
71281 + out, the performance impact will be obvious right on boot as this
71282 + mechanism engages from very early on. A good rule of thumb is that
71283 + VMs running on CPUs without hardware virtualization support (i.e.,
71284 + the majority of IA-32 CPUs) will likely experience the slowdown.
71285 +
71286 +config PAX_REFCOUNT
71287 + bool "Prevent various kernel object reference counter overflows"
71288 + depends on GRKERNSEC && (X86 || SPARC64)
71289 + help
71290 + By saying Y here the kernel will detect and prevent overflowing
71291 + various (but not all) kinds of object reference counters. Such
71292 + overflows can normally occur due to bugs only and are often, if
71293 + not always, exploitable.
71294 +
71295 + The tradeoff is that data structures protected by an overflowed
71296 + refcount will never be freed and therefore will leak memory. Note
71297 + that this leak also happens even without this protection but in
71298 + that case the overflow can eventually trigger the freeing of the
71299 + data structure while it is still being used elsewhere, resulting
71300 + in the exploitable situation that this feature prevents.
71301 +
71302 + Since this has a negligible performance impact, you should enable
71303 + this feature.
71304 +
71305 +config PAX_USERCOPY
71306 + bool "Harden heap object copies between kernel and userland"
71307 + depends on X86 || PPC || SPARC || ARM
71308 + depends on GRKERNSEC && (SLAB || SLUB)
71309 + help
71310 + By saying Y here the kernel will enforce the size of heap objects
71311 + when they are copied in either direction between the kernel and
71312 + userland, even if only a part of the heap object is copied.
71313 +
71314 + Specifically, this checking prevents information leaking from the
71315 + kernel heap during kernel to userland copies (if the kernel heap
71316 + object is otherwise fully initialized) and prevents kernel heap
71317 + overflows during userland to kernel copies.
71318 +
71319 + Note that the current implementation provides the strictest bounds
71320 + checks for the SLUB allocator.
71321 +
71322 + Enabling this option also enables per-slab cache protection against
71323 + data in a given cache being copied into/out of via userland
71324 + accessors. Though the whitelist of regions will be reduced over
71325 + time, it notably protects important data structures like task structs.
71326 +
71327 +
71328 + If frame pointers are enabled on x86, this option will also
71329 + restrict copies into and out of the kernel stack to local variables
71330 + within a single frame.
71331 +
71332 + Since this has a negligible performance impact, you should enable
71333 + this feature.
71334 +
71335 +endmenu
71336 +
71337 +endmenu
71338 +
71339 config KEYS
71340 bool "Enable access key retention support"
71341 help
71342 @@ -146,7 +695,7 @@ config INTEL_TXT
71343 config LSM_MMAP_MIN_ADDR
71344 int "Low address space for LSM to protect from user allocation"
71345 depends on SECURITY && SECURITY_SELINUX
71346 - default 65536
71347 + default 32768
71348 help
71349 This is the portion of low virtual memory which should be protected
71350 from userspace allocation. Keeping a user from writing to low pages
71351 diff -urNp linux-2.6.32.42/security/keys/keyring.c linux-2.6.32.42/security/keys/keyring.c
71352 --- linux-2.6.32.42/security/keys/keyring.c 2011-03-27 14:31:47.000000000 -0400
71353 +++ linux-2.6.32.42/security/keys/keyring.c 2011-04-18 22:03:00.000000000 -0400
71354 @@ -214,15 +214,15 @@ static long keyring_read(const struct ke
71355 ret = -EFAULT;
71356
71357 for (loop = 0; loop < klist->nkeys; loop++) {
71358 + key_serial_t serial;
71359 key = klist->keys[loop];
71360 + serial = key->serial;
71361
71362 tmp = sizeof(key_serial_t);
71363 if (tmp > buflen)
71364 tmp = buflen;
71365
71366 - if (copy_to_user(buffer,
71367 - &key->serial,
71368 - tmp) != 0)
71369 + if (copy_to_user(buffer, &serial, tmp))
71370 goto error;
71371
71372 buflen -= tmp;
71373 diff -urNp linux-2.6.32.42/security/min_addr.c linux-2.6.32.42/security/min_addr.c
71374 --- linux-2.6.32.42/security/min_addr.c 2011-03-27 14:31:47.000000000 -0400
71375 +++ linux-2.6.32.42/security/min_addr.c 2011-04-17 15:56:46.000000000 -0400
71376 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
71377 */
71378 static void update_mmap_min_addr(void)
71379 {
71380 +#ifndef SPARC
71381 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
71382 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
71383 mmap_min_addr = dac_mmap_min_addr;
71384 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
71385 #else
71386 mmap_min_addr = dac_mmap_min_addr;
71387 #endif
71388 +#endif
71389 }
71390
71391 /*
71392 diff -urNp linux-2.6.32.42/security/root_plug.c linux-2.6.32.42/security/root_plug.c
71393 --- linux-2.6.32.42/security/root_plug.c 2011-03-27 14:31:47.000000000 -0400
71394 +++ linux-2.6.32.42/security/root_plug.c 2011-04-17 15:56:46.000000000 -0400
71395 @@ -70,7 +70,7 @@ static int rootplug_bprm_check_security
71396 return 0;
71397 }
71398
71399 -static struct security_operations rootplug_security_ops = {
71400 +static struct security_operations rootplug_security_ops __read_only = {
71401 .bprm_check_security = rootplug_bprm_check_security,
71402 };
71403
71404 diff -urNp linux-2.6.32.42/security/security.c linux-2.6.32.42/security/security.c
71405 --- linux-2.6.32.42/security/security.c 2011-03-27 14:31:47.000000000 -0400
71406 +++ linux-2.6.32.42/security/security.c 2011-04-17 15:56:46.000000000 -0400
71407 @@ -24,7 +24,7 @@ static __initdata char chosen_lsm[SECURI
71408 extern struct security_operations default_security_ops;
71409 extern void security_fixup_ops(struct security_operations *ops);
71410
71411 -struct security_operations *security_ops; /* Initialized to NULL */
71412 +struct security_operations *security_ops __read_only; /* Initialized to NULL */
71413
71414 static inline int verify(struct security_operations *ops)
71415 {
71416 @@ -106,7 +106,7 @@ int __init security_module_enable(struct
71417 * If there is already a security module registered with the kernel,
71418 * an error will be returned. Otherwise %0 is returned on success.
71419 */
71420 -int register_security(struct security_operations *ops)
71421 +int __init register_security(struct security_operations *ops)
71422 {
71423 if (verify(ops)) {
71424 printk(KERN_DEBUG "%s could not verify "
71425 diff -urNp linux-2.6.32.42/security/selinux/hooks.c linux-2.6.32.42/security/selinux/hooks.c
71426 --- linux-2.6.32.42/security/selinux/hooks.c 2011-03-27 14:31:47.000000000 -0400
71427 +++ linux-2.6.32.42/security/selinux/hooks.c 2011-04-17 15:56:46.000000000 -0400
71428 @@ -131,7 +131,7 @@ int selinux_enabled = 1;
71429 * Minimal support for a secondary security module,
71430 * just to allow the use of the capability module.
71431 */
71432 -static struct security_operations *secondary_ops;
71433 +static struct security_operations *secondary_ops __read_only;
71434
71435 /* Lists of inode and superblock security structures initialized
71436 before the policy was loaded. */
71437 @@ -5457,7 +5457,7 @@ static int selinux_key_getsecurity(struc
71438
71439 #endif
71440
71441 -static struct security_operations selinux_ops = {
71442 +static struct security_operations selinux_ops __read_only = {
71443 .name = "selinux",
71444
71445 .ptrace_access_check = selinux_ptrace_access_check,
71446 @@ -5841,7 +5841,9 @@ int selinux_disable(void)
71447 avc_disable();
71448
71449 /* Reset security_ops to the secondary module, dummy or capability. */
71450 + pax_open_kernel();
71451 security_ops = secondary_ops;
71452 + pax_close_kernel();
71453
71454 /* Unregister netfilter hooks. */
71455 selinux_nf_ip_exit();
71456 diff -urNp linux-2.6.32.42/security/selinux/include/xfrm.h linux-2.6.32.42/security/selinux/include/xfrm.h
71457 --- linux-2.6.32.42/security/selinux/include/xfrm.h 2011-03-27 14:31:47.000000000 -0400
71458 +++ linux-2.6.32.42/security/selinux/include/xfrm.h 2011-05-18 20:09:37.000000000 -0400
71459 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
71460
71461 static inline void selinux_xfrm_notify_policyload(void)
71462 {
71463 - atomic_inc(&flow_cache_genid);
71464 + atomic_inc_unchecked(&flow_cache_genid);
71465 }
71466 #else
71467 static inline int selinux_xfrm_enabled(void)
71468 diff -urNp linux-2.6.32.42/security/selinux/ss/services.c linux-2.6.32.42/security/selinux/ss/services.c
71469 --- linux-2.6.32.42/security/selinux/ss/services.c 2011-03-27 14:31:47.000000000 -0400
71470 +++ linux-2.6.32.42/security/selinux/ss/services.c 2011-05-16 21:46:57.000000000 -0400
71471 @@ -1715,6 +1715,8 @@ int security_load_policy(void *data, siz
71472 int rc = 0;
71473 struct policy_file file = { data, len }, *fp = &file;
71474
71475 + pax_track_stack();
71476 +
71477 if (!ss_initialized) {
71478 avtab_cache_init();
71479 if (policydb_read(&policydb, fp)) {
71480 diff -urNp linux-2.6.32.42/security/smack/smack_lsm.c linux-2.6.32.42/security/smack/smack_lsm.c
71481 --- linux-2.6.32.42/security/smack/smack_lsm.c 2011-03-27 14:31:47.000000000 -0400
71482 +++ linux-2.6.32.42/security/smack/smack_lsm.c 2011-04-17 15:56:46.000000000 -0400
71483 @@ -3073,7 +3073,7 @@ static int smack_inode_getsecctx(struct
71484 return 0;
71485 }
71486
71487 -struct security_operations smack_ops = {
71488 +struct security_operations smack_ops __read_only = {
71489 .name = "smack",
71490
71491 .ptrace_access_check = smack_ptrace_access_check,
71492 diff -urNp linux-2.6.32.42/security/tomoyo/tomoyo.c linux-2.6.32.42/security/tomoyo/tomoyo.c
71493 --- linux-2.6.32.42/security/tomoyo/tomoyo.c 2011-03-27 14:31:47.000000000 -0400
71494 +++ linux-2.6.32.42/security/tomoyo/tomoyo.c 2011-04-17 15:56:46.000000000 -0400
71495 @@ -275,7 +275,7 @@ static int tomoyo_dentry_open(struct fil
71496 * tomoyo_security_ops is a "struct security_operations" which is used for
71497 * registering TOMOYO.
71498 */
71499 -static struct security_operations tomoyo_security_ops = {
71500 +static struct security_operations tomoyo_security_ops __read_only = {
71501 .name = "tomoyo",
71502 .cred_alloc_blank = tomoyo_cred_alloc_blank,
71503 .cred_prepare = tomoyo_cred_prepare,
71504 diff -urNp linux-2.6.32.42/sound/aoa/codecs/onyx.c linux-2.6.32.42/sound/aoa/codecs/onyx.c
71505 --- linux-2.6.32.42/sound/aoa/codecs/onyx.c 2011-03-27 14:31:47.000000000 -0400
71506 +++ linux-2.6.32.42/sound/aoa/codecs/onyx.c 2011-04-17 15:56:46.000000000 -0400
71507 @@ -53,7 +53,7 @@ struct onyx {
71508 spdif_locked:1,
71509 analog_locked:1,
71510 original_mute:2;
71511 - int open_count;
71512 + local_t open_count;
71513 struct codec_info *codec_info;
71514
71515 /* mutex serializes concurrent access to the device
71516 @@ -752,7 +752,7 @@ static int onyx_open(struct codec_info_i
71517 struct onyx *onyx = cii->codec_data;
71518
71519 mutex_lock(&onyx->mutex);
71520 - onyx->open_count++;
71521 + local_inc(&onyx->open_count);
71522 mutex_unlock(&onyx->mutex);
71523
71524 return 0;
71525 @@ -764,8 +764,7 @@ static int onyx_close(struct codec_info_
71526 struct onyx *onyx = cii->codec_data;
71527
71528 mutex_lock(&onyx->mutex);
71529 - onyx->open_count--;
71530 - if (!onyx->open_count)
71531 + if (local_dec_and_test(&onyx->open_count))
71532 onyx->spdif_locked = onyx->analog_locked = 0;
71533 mutex_unlock(&onyx->mutex);
71534
71535 diff -urNp linux-2.6.32.42/sound/aoa/codecs/onyx.h linux-2.6.32.42/sound/aoa/codecs/onyx.h
71536 --- linux-2.6.32.42/sound/aoa/codecs/onyx.h 2011-03-27 14:31:47.000000000 -0400
71537 +++ linux-2.6.32.42/sound/aoa/codecs/onyx.h 2011-04-17 15:56:46.000000000 -0400
71538 @@ -11,6 +11,7 @@
71539 #include <linux/i2c.h>
71540 #include <asm/pmac_low_i2c.h>
71541 #include <asm/prom.h>
71542 +#include <asm/local.h>
71543
71544 /* PCM3052 register definitions */
71545
71546 diff -urNp linux-2.6.32.42/sound/drivers/mts64.c linux-2.6.32.42/sound/drivers/mts64.c
71547 --- linux-2.6.32.42/sound/drivers/mts64.c 2011-03-27 14:31:47.000000000 -0400
71548 +++ linux-2.6.32.42/sound/drivers/mts64.c 2011-04-17 15:56:46.000000000 -0400
71549 @@ -27,6 +27,7 @@
71550 #include <sound/initval.h>
71551 #include <sound/rawmidi.h>
71552 #include <sound/control.h>
71553 +#include <asm/local.h>
71554
71555 #define CARD_NAME "Miditerminal 4140"
71556 #define DRIVER_NAME "MTS64"
71557 @@ -65,7 +66,7 @@ struct mts64 {
71558 struct pardevice *pardev;
71559 int pardev_claimed;
71560
71561 - int open_count;
71562 + local_t open_count;
71563 int current_midi_output_port;
71564 int current_midi_input_port;
71565 u8 mode[MTS64_NUM_INPUT_PORTS];
71566 @@ -695,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct
71567 {
71568 struct mts64 *mts = substream->rmidi->private_data;
71569
71570 - if (mts->open_count == 0) {
71571 + if (local_read(&mts->open_count) == 0) {
71572 /* We don't need a spinlock here, because this is just called
71573 if the device has not been opened before.
71574 So there aren't any IRQs from the device */
71575 @@ -703,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct
71576
71577 msleep(50);
71578 }
71579 - ++(mts->open_count);
71580 + local_inc(&mts->open_count);
71581
71582 return 0;
71583 }
71584 @@ -713,8 +714,7 @@ static int snd_mts64_rawmidi_close(struc
71585 struct mts64 *mts = substream->rmidi->private_data;
71586 unsigned long flags;
71587
71588 - --(mts->open_count);
71589 - if (mts->open_count == 0) {
71590 + if (local_dec_return(&mts->open_count) == 0) {
71591 /* We need the spinlock_irqsave here because we can still
71592 have IRQs at this point */
71593 spin_lock_irqsave(&mts->lock, flags);
71594 @@ -723,8 +723,8 @@ static int snd_mts64_rawmidi_close(struc
71595
71596 msleep(500);
71597
71598 - } else if (mts->open_count < 0)
71599 - mts->open_count = 0;
71600 + } else if (local_read(&mts->open_count) < 0)
71601 + local_set(&mts->open_count, 0);
71602
71603 return 0;
71604 }
71605 diff -urNp linux-2.6.32.42/sound/drivers/portman2x4.c linux-2.6.32.42/sound/drivers/portman2x4.c
71606 --- linux-2.6.32.42/sound/drivers/portman2x4.c 2011-03-27 14:31:47.000000000 -0400
71607 +++ linux-2.6.32.42/sound/drivers/portman2x4.c 2011-04-17 15:56:46.000000000 -0400
71608 @@ -46,6 +46,7 @@
71609 #include <sound/initval.h>
71610 #include <sound/rawmidi.h>
71611 #include <sound/control.h>
71612 +#include <asm/local.h>
71613
71614 #define CARD_NAME "Portman 2x4"
71615 #define DRIVER_NAME "portman"
71616 @@ -83,7 +84,7 @@ struct portman {
71617 struct pardevice *pardev;
71618 int pardev_claimed;
71619
71620 - int open_count;
71621 + local_t open_count;
71622 int mode[PORTMAN_NUM_INPUT_PORTS];
71623 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
71624 };
71625 diff -urNp linux-2.6.32.42/sound/oss/sb_audio.c linux-2.6.32.42/sound/oss/sb_audio.c
71626 --- linux-2.6.32.42/sound/oss/sb_audio.c 2011-03-27 14:31:47.000000000 -0400
71627 +++ linux-2.6.32.42/sound/oss/sb_audio.c 2011-04-17 15:56:46.000000000 -0400
71628 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
71629 buf16 = (signed short *)(localbuf + localoffs);
71630 while (c)
71631 {
71632 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
71633 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
71634 if (copy_from_user(lbuf8,
71635 userbuf+useroffs + p,
71636 locallen))
71637 diff -urNp linux-2.6.32.42/sound/oss/swarm_cs4297a.c linux-2.6.32.42/sound/oss/swarm_cs4297a.c
71638 --- linux-2.6.32.42/sound/oss/swarm_cs4297a.c 2011-03-27 14:31:47.000000000 -0400
71639 +++ linux-2.6.32.42/sound/oss/swarm_cs4297a.c 2011-04-17 15:56:46.000000000 -0400
71640 @@ -2577,7 +2577,6 @@ static int __init cs4297a_init(void)
71641 {
71642 struct cs4297a_state *s;
71643 u32 pwr, id;
71644 - mm_segment_t fs;
71645 int rval;
71646 #ifndef CONFIG_BCM_CS4297A_CSWARM
71647 u64 cfg;
71648 @@ -2667,22 +2666,23 @@ static int __init cs4297a_init(void)
71649 if (!rval) {
71650 char *sb1250_duart_present;
71651
71652 +#if 0
71653 + mm_segment_t fs;
71654 fs = get_fs();
71655 set_fs(KERNEL_DS);
71656 -#if 0
71657 val = SOUND_MASK_LINE;
71658 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
71659 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
71660 val = initvol[i].vol;
71661 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
71662 }
71663 + set_fs(fs);
71664 // cs4297a_write_ac97(s, 0x18, 0x0808);
71665 #else
71666 // cs4297a_write_ac97(s, 0x5e, 0x180);
71667 cs4297a_write_ac97(s, 0x02, 0x0808);
71668 cs4297a_write_ac97(s, 0x18, 0x0808);
71669 #endif
71670 - set_fs(fs);
71671
71672 list_add(&s->list, &cs4297a_devs);
71673
71674 diff -urNp linux-2.6.32.42/sound/pci/ac97/ac97_codec.c linux-2.6.32.42/sound/pci/ac97/ac97_codec.c
71675 --- linux-2.6.32.42/sound/pci/ac97/ac97_codec.c 2011-03-27 14:31:47.000000000 -0400
71676 +++ linux-2.6.32.42/sound/pci/ac97/ac97_codec.c 2011-04-17 15:56:46.000000000 -0400
71677 @@ -1952,7 +1952,7 @@ static int snd_ac97_dev_disconnect(struc
71678 }
71679
71680 /* build_ops to do nothing */
71681 -static struct snd_ac97_build_ops null_build_ops;
71682 +static const struct snd_ac97_build_ops null_build_ops;
71683
71684 #ifdef CONFIG_SND_AC97_POWER_SAVE
71685 static void do_update_power(struct work_struct *work)
71686 diff -urNp linux-2.6.32.42/sound/pci/ac97/ac97_patch.c linux-2.6.32.42/sound/pci/ac97/ac97_patch.c
71687 --- linux-2.6.32.42/sound/pci/ac97/ac97_patch.c 2011-03-27 14:31:47.000000000 -0400
71688 +++ linux-2.6.32.42/sound/pci/ac97/ac97_patch.c 2011-04-23 12:56:12.000000000 -0400
71689 @@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spd
71690 return 0;
71691 }
71692
71693 -static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
71694 +static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
71695 .build_spdif = patch_yamaha_ymf743_build_spdif,
71696 .build_3d = patch_yamaha_ymf7x3_3d,
71697 };
71698 @@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdi
71699 return 0;
71700 }
71701
71702 -static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
71703 +static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
71704 .build_3d = patch_yamaha_ymf7x3_3d,
71705 .build_post_spdif = patch_yamaha_ymf753_post_spdif
71706 };
71707 @@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific
71708 return 0;
71709 }
71710
71711 -static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
71712 +static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
71713 .build_specific = patch_wolfson_wm9703_specific,
71714 };
71715
71716 @@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific
71717 return 0;
71718 }
71719
71720 -static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
71721 +static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
71722 .build_specific = patch_wolfson_wm9704_specific,
71723 };
71724
71725 @@ -555,7 +555,7 @@ static int patch_wolfson_wm9705_specific
71726 return 0;
71727 }
71728
71729 -static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
71730 +static const struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
71731 .build_specific = patch_wolfson_wm9705_specific,
71732 };
71733
71734 @@ -692,7 +692,7 @@ static int patch_wolfson_wm9711_specific
71735 return 0;
71736 }
71737
71738 -static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
71739 +static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
71740 .build_specific = patch_wolfson_wm9711_specific,
71741 };
71742
71743 @@ -886,7 +886,7 @@ static void patch_wolfson_wm9713_resume
71744 }
71745 #endif
71746
71747 -static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
71748 +static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
71749 .build_specific = patch_wolfson_wm9713_specific,
71750 .build_3d = patch_wolfson_wm9713_3d,
71751 #ifdef CONFIG_PM
71752 @@ -991,7 +991,7 @@ static int patch_sigmatel_stac97xx_speci
71753 return 0;
71754 }
71755
71756 -static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
71757 +static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
71758 .build_3d = patch_sigmatel_stac9700_3d,
71759 .build_specific = patch_sigmatel_stac97xx_specific
71760 };
71761 @@ -1038,7 +1038,7 @@ static int patch_sigmatel_stac9708_speci
71762 return patch_sigmatel_stac97xx_specific(ac97);
71763 }
71764
71765 -static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
71766 +static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
71767 .build_3d = patch_sigmatel_stac9708_3d,
71768 .build_specific = patch_sigmatel_stac9708_specific
71769 };
71770 @@ -1267,7 +1267,7 @@ static int patch_sigmatel_stac9758_speci
71771 return 0;
71772 }
71773
71774 -static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
71775 +static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
71776 .build_3d = patch_sigmatel_stac9700_3d,
71777 .build_specific = patch_sigmatel_stac9758_specific
71778 };
71779 @@ -1342,7 +1342,7 @@ static int patch_cirrus_build_spdif(stru
71780 return 0;
71781 }
71782
71783 -static struct snd_ac97_build_ops patch_cirrus_ops = {
71784 +static const struct snd_ac97_build_ops patch_cirrus_ops = {
71785 .build_spdif = patch_cirrus_build_spdif
71786 };
71787
71788 @@ -1399,7 +1399,7 @@ static int patch_conexant_build_spdif(st
71789 return 0;
71790 }
71791
71792 -static struct snd_ac97_build_ops patch_conexant_ops = {
71793 +static const struct snd_ac97_build_ops patch_conexant_ops = {
71794 .build_spdif = patch_conexant_build_spdif
71795 };
71796
71797 @@ -1575,7 +1575,7 @@ static void patch_ad1881_chained(struct
71798 }
71799 }
71800
71801 -static struct snd_ac97_build_ops patch_ad1881_build_ops = {
71802 +static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
71803 #ifdef CONFIG_PM
71804 .resume = ad18xx_resume
71805 #endif
71806 @@ -1662,7 +1662,7 @@ static int patch_ad1885_specific(struct
71807 return 0;
71808 }
71809
71810 -static struct snd_ac97_build_ops patch_ad1885_build_ops = {
71811 +static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
71812 .build_specific = &patch_ad1885_specific,
71813 #ifdef CONFIG_PM
71814 .resume = ad18xx_resume
71815 @@ -1689,7 +1689,7 @@ static int patch_ad1886_specific(struct
71816 return 0;
71817 }
71818
71819 -static struct snd_ac97_build_ops patch_ad1886_build_ops = {
71820 +static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
71821 .build_specific = &patch_ad1886_specific,
71822 #ifdef CONFIG_PM
71823 .resume = ad18xx_resume
71824 @@ -1896,7 +1896,7 @@ static int patch_ad1981a_specific(struct
71825 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
71826 }
71827
71828 -static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
71829 +static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
71830 .build_post_spdif = patch_ad198x_post_spdif,
71831 .build_specific = patch_ad1981a_specific,
71832 #ifdef CONFIG_PM
71833 @@ -1951,7 +1951,7 @@ static int patch_ad1981b_specific(struct
71834 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
71835 }
71836
71837 -static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
71838 +static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
71839 .build_post_spdif = patch_ad198x_post_spdif,
71840 .build_specific = patch_ad1981b_specific,
71841 #ifdef CONFIG_PM
71842 @@ -2090,7 +2090,7 @@ static int patch_ad1888_specific(struct
71843 return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
71844 }
71845
71846 -static struct snd_ac97_build_ops patch_ad1888_build_ops = {
71847 +static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
71848 .build_post_spdif = patch_ad198x_post_spdif,
71849 .build_specific = patch_ad1888_specific,
71850 #ifdef CONFIG_PM
71851 @@ -2139,7 +2139,7 @@ static int patch_ad1980_specific(struct
71852 return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
71853 }
71854
71855 -static struct snd_ac97_build_ops patch_ad1980_build_ops = {
71856 +static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
71857 .build_post_spdif = patch_ad198x_post_spdif,
71858 .build_specific = patch_ad1980_specific,
71859 #ifdef CONFIG_PM
71860 @@ -2254,7 +2254,7 @@ static int patch_ad1985_specific(struct
71861 ARRAY_SIZE(snd_ac97_ad1985_controls));
71862 }
71863
71864 -static struct snd_ac97_build_ops patch_ad1985_build_ops = {
71865 +static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
71866 .build_post_spdif = patch_ad198x_post_spdif,
71867 .build_specific = patch_ad1985_specific,
71868 #ifdef CONFIG_PM
71869 @@ -2546,7 +2546,7 @@ static int patch_ad1986_specific(struct
71870 ARRAY_SIZE(snd_ac97_ad1985_controls));
71871 }
71872
71873 -static struct snd_ac97_build_ops patch_ad1986_build_ops = {
71874 +static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
71875 .build_post_spdif = patch_ad198x_post_spdif,
71876 .build_specific = patch_ad1986_specific,
71877 #ifdef CONFIG_PM
71878 @@ -2651,7 +2651,7 @@ static int patch_alc650_specific(struct
71879 return 0;
71880 }
71881
71882 -static struct snd_ac97_build_ops patch_alc650_ops = {
71883 +static const struct snd_ac97_build_ops patch_alc650_ops = {
71884 .build_specific = patch_alc650_specific,
71885 .update_jacks = alc650_update_jacks
71886 };
71887 @@ -2803,7 +2803,7 @@ static int patch_alc655_specific(struct
71888 return 0;
71889 }
71890
71891 -static struct snd_ac97_build_ops patch_alc655_ops = {
71892 +static const struct snd_ac97_build_ops patch_alc655_ops = {
71893 .build_specific = patch_alc655_specific,
71894 .update_jacks = alc655_update_jacks
71895 };
71896 @@ -2915,7 +2915,7 @@ static int patch_alc850_specific(struct
71897 return 0;
71898 }
71899
71900 -static struct snd_ac97_build_ops patch_alc850_ops = {
71901 +static const struct snd_ac97_build_ops patch_alc850_ops = {
71902 .build_specific = patch_alc850_specific,
71903 .update_jacks = alc850_update_jacks
71904 };
71905 @@ -2977,7 +2977,7 @@ static int patch_cm9738_specific(struct
71906 return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
71907 }
71908
71909 -static struct snd_ac97_build_ops patch_cm9738_ops = {
71910 +static const struct snd_ac97_build_ops patch_cm9738_ops = {
71911 .build_specific = patch_cm9738_specific,
71912 .update_jacks = cm9738_update_jacks
71913 };
71914 @@ -3068,7 +3068,7 @@ static int patch_cm9739_post_spdif(struc
71915 return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
71916 }
71917
71918 -static struct snd_ac97_build_ops patch_cm9739_ops = {
71919 +static const struct snd_ac97_build_ops patch_cm9739_ops = {
71920 .build_specific = patch_cm9739_specific,
71921 .build_post_spdif = patch_cm9739_post_spdif,
71922 .update_jacks = cm9739_update_jacks
71923 @@ -3242,7 +3242,7 @@ static int patch_cm9761_specific(struct
71924 return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
71925 }
71926
71927 -static struct snd_ac97_build_ops patch_cm9761_ops = {
71928 +static const struct snd_ac97_build_ops patch_cm9761_ops = {
71929 .build_specific = patch_cm9761_specific,
71930 .build_post_spdif = patch_cm9761_post_spdif,
71931 .update_jacks = cm9761_update_jacks
71932 @@ -3338,7 +3338,7 @@ static int patch_cm9780_specific(struct
71933 return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
71934 }
71935
71936 -static struct snd_ac97_build_ops patch_cm9780_ops = {
71937 +static const struct snd_ac97_build_ops patch_cm9780_ops = {
71938 .build_specific = patch_cm9780_specific,
71939 .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
71940 };
71941 @@ -3458,7 +3458,7 @@ static int patch_vt1616_specific(struct
71942 return 0;
71943 }
71944
71945 -static struct snd_ac97_build_ops patch_vt1616_ops = {
71946 +static const struct snd_ac97_build_ops patch_vt1616_ops = {
71947 .build_specific = patch_vt1616_specific
71948 };
71949
71950 @@ -3812,7 +3812,7 @@ static int patch_it2646_specific(struct
71951 return 0;
71952 }
71953
71954 -static struct snd_ac97_build_ops patch_it2646_ops = {
71955 +static const struct snd_ac97_build_ops patch_it2646_ops = {
71956 .build_specific = patch_it2646_specific,
71957 .update_jacks = it2646_update_jacks
71958 };
71959 @@ -3846,7 +3846,7 @@ static int patch_si3036_specific(struct
71960 return 0;
71961 }
71962
71963 -static struct snd_ac97_build_ops patch_si3036_ops = {
71964 +static const struct snd_ac97_build_ops patch_si3036_ops = {
71965 .build_specific = patch_si3036_specific,
71966 };
71967
71968 @@ -3913,7 +3913,7 @@ static int patch_ucb1400_specific(struct
71969 return 0;
71970 }
71971
71972 -static struct snd_ac97_build_ops patch_ucb1400_ops = {
71973 +static const struct snd_ac97_build_ops patch_ucb1400_ops = {
71974 .build_specific = patch_ucb1400_specific,
71975 };
71976
71977 diff -urNp linux-2.6.32.42/sound/pci/hda/patch_intelhdmi.c linux-2.6.32.42/sound/pci/hda/patch_intelhdmi.c
71978 --- linux-2.6.32.42/sound/pci/hda/patch_intelhdmi.c 2011-03-27 14:31:47.000000000 -0400
71979 +++ linux-2.6.32.42/sound/pci/hda/patch_intelhdmi.c 2011-04-17 15:56:46.000000000 -0400
71980 @@ -511,10 +511,10 @@ static void hdmi_non_intrinsic_event(str
71981 cp_ready);
71982
71983 /* TODO */
71984 - if (cp_state)
71985 - ;
71986 - if (cp_ready)
71987 - ;
71988 + if (cp_state) {
71989 + }
71990 + if (cp_ready) {
71991 + }
71992 }
71993
71994
71995 diff -urNp linux-2.6.32.42/sound/pci/intel8x0m.c linux-2.6.32.42/sound/pci/intel8x0m.c
71996 --- linux-2.6.32.42/sound/pci/intel8x0m.c 2011-03-27 14:31:47.000000000 -0400
71997 +++ linux-2.6.32.42/sound/pci/intel8x0m.c 2011-04-23 12:56:12.000000000 -0400
71998 @@ -1264,7 +1264,7 @@ static struct shortname_table {
71999 { 0x5455, "ALi M5455" },
72000 { 0x746d, "AMD AMD8111" },
72001 #endif
72002 - { 0 },
72003 + { 0, },
72004 };
72005
72006 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
72007 diff -urNp linux-2.6.32.42/sound/pci/ymfpci/ymfpci_main.c linux-2.6.32.42/sound/pci/ymfpci/ymfpci_main.c
72008 --- linux-2.6.32.42/sound/pci/ymfpci/ymfpci_main.c 2011-03-27 14:31:47.000000000 -0400
72009 +++ linux-2.6.32.42/sound/pci/ymfpci/ymfpci_main.c 2011-05-04 17:56:28.000000000 -0400
72010 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
72011 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
72012 break;
72013 }
72014 - if (atomic_read(&chip->interrupt_sleep_count)) {
72015 - atomic_set(&chip->interrupt_sleep_count, 0);
72016 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
72017 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
72018 wake_up(&chip->interrupt_sleep);
72019 }
72020 __end:
72021 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
72022 continue;
72023 init_waitqueue_entry(&wait, current);
72024 add_wait_queue(&chip->interrupt_sleep, &wait);
72025 - atomic_inc(&chip->interrupt_sleep_count);
72026 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
72027 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
72028 remove_wait_queue(&chip->interrupt_sleep, &wait);
72029 }
72030 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
72031 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
72032 spin_unlock(&chip->reg_lock);
72033
72034 - if (atomic_read(&chip->interrupt_sleep_count)) {
72035 - atomic_set(&chip->interrupt_sleep_count, 0);
72036 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
72037 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
72038 wake_up(&chip->interrupt_sleep);
72039 }
72040 }
72041 @@ -2369,7 +2369,7 @@ int __devinit snd_ymfpci_create(struct s
72042 spin_lock_init(&chip->reg_lock);
72043 spin_lock_init(&chip->voice_lock);
72044 init_waitqueue_head(&chip->interrupt_sleep);
72045 - atomic_set(&chip->interrupt_sleep_count, 0);
72046 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
72047 chip->card = card;
72048 chip->pci = pci;
72049 chip->irq = -1;
72050 diff -urNp linux-2.6.32.42/tools/gcc/Makefile linux-2.6.32.42/tools/gcc/Makefile
72051 --- linux-2.6.32.42/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
72052 +++ linux-2.6.32.42/tools/gcc/Makefile 2011-06-04 20:52:13.000000000 -0400
72053 @@ -0,0 +1,11 @@
72054 +#CC := gcc
72055 +#PLUGIN_SOURCE_FILES := pax_plugin.c
72056 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
72057 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
72058 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
72059 +
72060 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
72061 +
72062 +hostlibs-y := pax_plugin.so
72063 +always := $(hostlibs-y)
72064 +pax_plugin-objs := pax_plugin.o
72065 diff -urNp linux-2.6.32.42/tools/gcc/pax_plugin.c linux-2.6.32.42/tools/gcc/pax_plugin.c
72066 --- linux-2.6.32.42/tools/gcc/pax_plugin.c 1969-12-31 19:00:00.000000000 -0500
72067 +++ linux-2.6.32.42/tools/gcc/pax_plugin.c 2011-06-04 20:52:13.000000000 -0400
72068 @@ -0,0 +1,242 @@
72069 +/*
72070 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
72071 + * Licensed under the GPL v2
72072 + *
72073 + * Note: the choice of the license means that the compilation process is
72074 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
72075 + * but for the kernel it doesn't matter since it doesn't link against
72076 + * any of the gcc libraries
72077 + *
72078 + * gcc plugin to help implement various PaX features
72079 + *
72080 + * - track lowest stack pointer
72081 + *
72082 + * TODO:
72083 + * - initialize all local variables
72084 + *
72085 + * BUGS:
72086 + */
72087 +#include "gcc-plugin.h"
72088 +#include "plugin-version.h"
72089 +#include "config.h"
72090 +#include "system.h"
72091 +#include "coretypes.h"
72092 +#include "tm.h"
72093 +#include "toplev.h"
72094 +#include "basic-block.h"
72095 +#include "gimple.h"
72096 +//#include "expr.h" where are you...
72097 +#include "diagnostic.h"
72098 +#include "rtl.h"
72099 +#include "emit-rtl.h"
72100 +#include "function.h"
72101 +#include "tree.h"
72102 +#include "tree-pass.h"
72103 +#include "intl.h"
72104 +
72105 +int plugin_is_GPL_compatible;
72106 +
72107 +static int track_frame_size = -1;
72108 +static const char track_function[] = "pax_track_stack";
72109 +static bool init_locals;
72110 +
72111 +static struct plugin_info pax_plugin_info = {
72112 + .version = "201106030000",
72113 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
72114 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
72115 +};
72116 +
72117 +static bool gate_pax_track_stack(void);
72118 +static unsigned int execute_pax_tree_instrument(void);
72119 +static unsigned int execute_pax_final(void);
72120 +
72121 +static struct gimple_opt_pass pax_tree_instrument_pass = {
72122 + .pass = {
72123 + .type = GIMPLE_PASS,
72124 + .name = "pax_tree_instrument",
72125 + .gate = gate_pax_track_stack,
72126 + .execute = execute_pax_tree_instrument,
72127 + .sub = NULL,
72128 + .next = NULL,
72129 + .static_pass_number = 0,
72130 + .tv_id = TV_NONE,
72131 + .properties_required = PROP_gimple_leh | PROP_cfg,
72132 + .properties_provided = 0,
72133 + .properties_destroyed = 0,
72134 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
72135 + .todo_flags_finish = TODO_verify_stmts // | TODO_dump_func
72136 + }
72137 +};
72138 +
72139 +static struct rtl_opt_pass pax_final_rtl_opt_pass = {
72140 + .pass = {
72141 + .type = RTL_PASS,
72142 + .name = "pax_final",
72143 + .gate = gate_pax_track_stack,
72144 + .execute = execute_pax_final,
72145 + .sub = NULL,
72146 + .next = NULL,
72147 + .static_pass_number = 0,
72148 + .tv_id = TV_NONE,
72149 + .properties_required = 0,
72150 + .properties_provided = 0,
72151 + .properties_destroyed = 0,
72152 + .todo_flags_start = 0,
72153 + .todo_flags_finish = 0
72154 + }
72155 +};
72156 +
72157 +static bool gate_pax_track_stack(void)
72158 +{
72159 + return track_frame_size >= 0;
72160 +}
72161 +
72162 +static void pax_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
72163 +{
72164 + gimple call;
72165 + tree decl, type;
72166 +
72167 + // insert call to void pax_track_stack(void)
72168 + type = build_function_type_list(void_type_node, NULL_TREE);
72169 + decl = build_fn_decl(track_function, type);
72170 + DECL_ASSEMBLER_NAME(decl); // for LTO
72171 + call = gimple_build_call(decl, 0);
72172 + if (before)
72173 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
72174 + else
72175 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
72176 +}
72177 +
72178 +static unsigned int execute_pax_tree_instrument(void)
72179 +{
72180 + basic_block bb;
72181 + gimple_stmt_iterator gsi;
72182 +
72183 + // 1. loop through BBs and GIMPLE statements
72184 + FOR_EACH_BB(bb) {
72185 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
72186 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
72187 + tree decl;
72188 + gimple stmt = gsi_stmt(gsi);
72189 +
72190 + if (!is_gimple_call(stmt))
72191 + continue;
72192 + decl = gimple_call_fndecl(stmt);
72193 + if (!decl)
72194 + continue;
72195 + if (TREE_CODE(decl) != FUNCTION_DECL)
72196 + continue;
72197 + if (!DECL_BUILT_IN(decl))
72198 + continue;
72199 + if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
72200 + continue;
72201 + if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
72202 + continue;
72203 +
72204 + // 2. insert track call after each __builtin_alloca call
72205 + pax_add_instrumentation(&gsi, false);
72206 +// print_node(stderr, "pax", decl, 4);
72207 + }
72208 + }
72209 +
72210 + // 3. insert track call at the beginning
72211 + bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
72212 + gsi = gsi_start_bb(bb);
72213 + pax_add_instrumentation(&gsi, true);
72214 +
72215 + return 0;
72216 +}
72217 +
72218 +static unsigned int execute_pax_final(void)
72219 +{
72220 + rtx insn;
72221 +
72222 + if (cfun->calls_alloca)
72223 + return 0;
72224 +
72225 + // 1. find pax_track_stack calls
72226 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
72227 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
72228 + rtx body;
72229 +
72230 + if (!CALL_P(insn))
72231 + continue;
72232 + body = PATTERN(insn);
72233 + if (GET_CODE(body) != CALL)
72234 + continue;
72235 + body = XEXP(body, 0);
72236 + if (GET_CODE(body) != MEM)
72237 + continue;
72238 + body = XEXP(body, 0);
72239 + if (GET_CODE(body) != SYMBOL_REF)
72240 + continue;
72241 + if (strcmp(XSTR(body, 0), track_function))
72242 + continue;
72243 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
72244 + // 2. delete call if function frame is not big enough
72245 + if (get_frame_size() >= track_frame_size)
72246 + continue;
72247 + delete_insn_and_edges(insn);
72248 + }
72249 +
72250 +// print_simple_rtl(stderr, get_insns());
72251 +// print_rtl(stderr, get_insns());
72252 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
72253 +
72254 + return 0;
72255 +}
72256 +
72257 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
72258 +{
72259 + const char * const plugin_name = plugin_info->base_name;
72260 + const int argc = plugin_info->argc;
72261 + const struct plugin_argument * const argv = plugin_info->argv;
72262 + int i;
72263 + struct register_pass_info pax_tree_instrument_pass_info = {
72264 + .pass = &pax_tree_instrument_pass.pass,
72265 +// .reference_pass_name = "tree_profile",
72266 + .reference_pass_name = "optimized",
72267 + .ref_pass_instance_number = 0,
72268 + .pos_op = PASS_POS_INSERT_AFTER
72269 + };
72270 + struct register_pass_info pax_final_pass_info = {
72271 + .pass = &pax_final_rtl_opt_pass.pass,
72272 + .reference_pass_name = "final",
72273 + .ref_pass_instance_number = 0,
72274 + .pos_op = PASS_POS_INSERT_BEFORE
72275 + };
72276 +
72277 + if (!plugin_default_version_check(version, &gcc_version)) {
72278 + error(G_("incompatible gcc/plugin versions"));
72279 + return 1;
72280 + }
72281 +
72282 + register_callback(plugin_name, PLUGIN_INFO, NULL, &pax_plugin_info);
72283 +
72284 + for (i = 0; i < argc; ++i) {
72285 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
72286 + if (!argv[i].value) {
72287 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
72288 + continue;
72289 + }
72290 + track_frame_size = atoi(argv[i].value);
72291 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
72292 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
72293 + continue;
72294 + }
72295 + if (!strcmp(argv[i].key, "initialize-locals")) {
72296 + if (argv[i].value) {
72297 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
72298 + continue;
72299 + }
72300 + init_locals = true;
72301 + continue;
72302 + }
72303 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
72304 + }
72305 +
72306 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pax_tree_instrument_pass_info);
72307 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pax_final_pass_info);
72308 +
72309 + return 0;
72310 +}
72311 Binary files linux-2.6.32.42/tools/gcc/pax_plugin.so and linux-2.6.32.42/tools/gcc/pax_plugin.so differ
72312 diff -urNp linux-2.6.32.42/usr/gen_init_cpio.c linux-2.6.32.42/usr/gen_init_cpio.c
72313 --- linux-2.6.32.42/usr/gen_init_cpio.c 2011-03-27 14:31:47.000000000 -0400
72314 +++ linux-2.6.32.42/usr/gen_init_cpio.c 2011-04-17 15:56:46.000000000 -0400
72315 @@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name,
72316 int retval;
72317 int rc = -1;
72318 int namesize;
72319 - int i;
72320 + unsigned int i;
72321
72322 mode |= S_IFREG;
72323
72324 @@ -383,9 +383,10 @@ static char *cpio_replace_env(char *new_
72325 *env_var = *expanded = '\0';
72326 strncat(env_var, start + 2, end - start - 2);
72327 strncat(expanded, new_location, start - new_location);
72328 - strncat(expanded, getenv(env_var), PATH_MAX);
72329 - strncat(expanded, end + 1, PATH_MAX);
72330 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
72331 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
72332 strncpy(new_location, expanded, PATH_MAX);
72333 + new_location[PATH_MAX] = 0;
72334 } else
72335 break;
72336 }
72337 diff -urNp linux-2.6.32.42/virt/kvm/kvm_main.c linux-2.6.32.42/virt/kvm/kvm_main.c
72338 --- linux-2.6.32.42/virt/kvm/kvm_main.c 2011-03-27 14:31:47.000000000 -0400
72339 +++ linux-2.6.32.42/virt/kvm/kvm_main.c 2011-04-23 21:41:37.000000000 -0400
72340 @@ -1748,6 +1748,7 @@ static int kvm_vcpu_release(struct inode
72341 return 0;
72342 }
72343
72344 +/* cannot be const */
72345 static struct file_operations kvm_vcpu_fops = {
72346 .release = kvm_vcpu_release,
72347 .unlocked_ioctl = kvm_vcpu_ioctl,
72348 @@ -2344,6 +2345,7 @@ static int kvm_vm_mmap(struct file *file
72349 return 0;
72350 }
72351
72352 +/* cannot be const */
72353 static struct file_operations kvm_vm_fops = {
72354 .release = kvm_vm_release,
72355 .unlocked_ioctl = kvm_vm_ioctl,
72356 @@ -2431,6 +2433,7 @@ out:
72357 return r;
72358 }
72359
72360 +/* cannot be const */
72361 static struct file_operations kvm_chardev_ops = {
72362 .unlocked_ioctl = kvm_dev_ioctl,
72363 .compat_ioctl = kvm_dev_ioctl,
72364 @@ -2494,7 +2497,7 @@ asmlinkage void kvm_handle_fault_on_rebo
72365 if (kvm_rebooting)
72366 /* spin while reset goes on */
72367 while (true)
72368 - ;
72369 + cpu_relax();
72370 /* Fault while not rebooting. We want the trace. */
72371 BUG();
72372 }
72373 @@ -2714,7 +2717,7 @@ static void kvm_sched_out(struct preempt
72374 kvm_arch_vcpu_put(vcpu);
72375 }
72376
72377 -int kvm_init(void *opaque, unsigned int vcpu_size,
72378 +int kvm_init(const void *opaque, unsigned int vcpu_size,
72379 struct module *module)
72380 {
72381 int r;
72382 @@ -2767,7 +2770,7 @@ int kvm_init(void *opaque, unsigned int
72383 /* A kmem cache lets us meet the alignment requirements of fx_save. */
72384 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
72385 __alignof__(struct kvm_vcpu),
72386 - 0, NULL);
72387 + SLAB_USERCOPY, NULL);
72388 if (!kvm_vcpu_cache) {
72389 r = -ENOMEM;
72390 goto out_free_5;