]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-2.6.32.43-201107142110.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.43-201107142110.patch
1 diff -urNp linux-2.6.32.43/arch/alpha/include/asm/elf.h linux-2.6.32.43/arch/alpha/include/asm/elf.h
2 --- linux-2.6.32.43/arch/alpha/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3 +++ linux-2.6.32.43/arch/alpha/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
4 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-2.6.32.43/arch/alpha/include/asm/pgtable.h linux-2.6.32.43/arch/alpha/include/asm/pgtable.h
19 --- linux-2.6.32.43/arch/alpha/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
20 +++ linux-2.6.32.43/arch/alpha/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-2.6.32.43/arch/alpha/kernel/module.c linux-2.6.32.43/arch/alpha/kernel/module.c
40 --- linux-2.6.32.43/arch/alpha/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
41 +++ linux-2.6.32.43/arch/alpha/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-2.6.32.43/arch/alpha/kernel/osf_sys.c linux-2.6.32.43/arch/alpha/kernel/osf_sys.c
52 --- linux-2.6.32.43/arch/alpha/kernel/osf_sys.c 2011-03-27 14:31:47.000000000 -0400
53 +++ linux-2.6.32.43/arch/alpha/kernel/osf_sys.c 2011-06-13 17:19:47.000000000 -0400
54 @@ -431,7 +431,7 @@ SYSCALL_DEFINE2(osf_getdomainname, char
55 return -EFAULT;
56
57 len = namelen;
58 - if (namelen > 32)
59 + if (len > 32)
60 len = 32;
61
62 down_read(&uts_sem);
63 @@ -618,7 +618,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, comman
64 down_read(&uts_sem);
65 res = sysinfo_table[offset];
66 len = strlen(res)+1;
67 - if (len > count)
68 + if ((unsigned long)len > (unsigned long)count)
69 len = count;
70 if (copy_to_user(buf, res, len))
71 err = -EFAULT;
72 @@ -673,7 +673,7 @@ SYSCALL_DEFINE5(osf_getsysinfo, unsigned
73 return 1;
74
75 case GSI_GET_HWRPB:
76 - if (nbytes < sizeof(*hwrpb))
77 + if (nbytes > sizeof(*hwrpb))
78 return -EINVAL;
79 if (copy_to_user(buffer, hwrpb, nbytes) != 0)
80 return -EFAULT;
81 @@ -1035,6 +1035,7 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, i
82 {
83 struct rusage r;
84 long ret, err;
85 + unsigned int status = 0;
86 mm_segment_t old_fs;
87
88 if (!ur)
89 @@ -1043,13 +1044,15 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, i
90 old_fs = get_fs();
91
92 set_fs (KERNEL_DS);
93 - ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r);
94 + ret = sys_wait4(pid, (unsigned int __user *) &status, options,
95 + (struct rusage __user *) &r);
96 set_fs (old_fs);
97
98 if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
99 return -EFAULT;
100
101 err = 0;
102 + err |= put_user(status, ustatus);
103 err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
104 err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
105 err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
106 @@ -1169,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long a
107 /* At this point: (!vma || addr < vma->vm_end). */
108 if (limit - len < addr)
109 return -ENOMEM;
110 - if (!vma || addr + len <= vma->vm_start)
111 + if (check_heap_stack_gap(vma, addr, len))
112 return addr;
113 addr = vma->vm_end;
114 vma = vma->vm_next;
115 @@ -1205,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp
116 merely specific addresses, but regions of memory -- perhaps
117 this feature should be incorporated into all ports? */
118
119 +#ifdef CONFIG_PAX_RANDMMAP
120 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
121 +#endif
122 +
123 if (addr) {
124 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
125 if (addr != (unsigned long) -ENOMEM)
126 @@ -1212,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp
127 }
128
129 /* Next, try allocating at TASK_UNMAPPED_BASE. */
130 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
131 - len, limit);
132 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
133 +
134 if (addr != (unsigned long) -ENOMEM)
135 return addr;
136
137 diff -urNp linux-2.6.32.43/arch/alpha/mm/fault.c linux-2.6.32.43/arch/alpha/mm/fault.c
138 --- linux-2.6.32.43/arch/alpha/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
139 +++ linux-2.6.32.43/arch/alpha/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
140 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
141 __reload_thread(pcb);
142 }
143
144 +#ifdef CONFIG_PAX_PAGEEXEC
145 +/*
146 + * PaX: decide what to do with offenders (regs->pc = fault address)
147 + *
148 + * returns 1 when task should be killed
149 + * 2 when patched PLT trampoline was detected
150 + * 3 when unpatched PLT trampoline was detected
151 + */
152 +static int pax_handle_fetch_fault(struct pt_regs *regs)
153 +{
154 +
155 +#ifdef CONFIG_PAX_EMUPLT
156 + int err;
157 +
158 + do { /* PaX: patched PLT emulation #1 */
159 + unsigned int ldah, ldq, jmp;
160 +
161 + err = get_user(ldah, (unsigned int *)regs->pc);
162 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
163 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
164 +
165 + if (err)
166 + break;
167 +
168 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
169 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
170 + jmp == 0x6BFB0000U)
171 + {
172 + unsigned long r27, addr;
173 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
174 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
175 +
176 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
177 + err = get_user(r27, (unsigned long *)addr);
178 + if (err)
179 + break;
180 +
181 + regs->r27 = r27;
182 + regs->pc = r27;
183 + return 2;
184 + }
185 + } while (0);
186 +
187 + do { /* PaX: patched PLT emulation #2 */
188 + unsigned int ldah, lda, br;
189 +
190 + err = get_user(ldah, (unsigned int *)regs->pc);
191 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
192 + err |= get_user(br, (unsigned int *)(regs->pc+8));
193 +
194 + if (err)
195 + break;
196 +
197 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
198 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
199 + (br & 0xFFE00000U) == 0xC3E00000U)
200 + {
201 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
202 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
203 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
204 +
205 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
206 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
207 + return 2;
208 + }
209 + } while (0);
210 +
211 + do { /* PaX: unpatched PLT emulation */
212 + unsigned int br;
213 +
214 + err = get_user(br, (unsigned int *)regs->pc);
215 +
216 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
217 + unsigned int br2, ldq, nop, jmp;
218 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
219 +
220 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
221 + err = get_user(br2, (unsigned int *)addr);
222 + err |= get_user(ldq, (unsigned int *)(addr+4));
223 + err |= get_user(nop, (unsigned int *)(addr+8));
224 + err |= get_user(jmp, (unsigned int *)(addr+12));
225 + err |= get_user(resolver, (unsigned long *)(addr+16));
226 +
227 + if (err)
228 + break;
229 +
230 + if (br2 == 0xC3600000U &&
231 + ldq == 0xA77B000CU &&
232 + nop == 0x47FF041FU &&
233 + jmp == 0x6B7B0000U)
234 + {
235 + regs->r28 = regs->pc+4;
236 + regs->r27 = addr+16;
237 + regs->pc = resolver;
238 + return 3;
239 + }
240 + }
241 + } while (0);
242 +#endif
243 +
244 + return 1;
245 +}
246 +
247 +void pax_report_insns(void *pc, void *sp)
248 +{
249 + unsigned long i;
250 +
251 + printk(KERN_ERR "PAX: bytes at PC: ");
252 + for (i = 0; i < 5; i++) {
253 + unsigned int c;
254 + if (get_user(c, (unsigned int *)pc+i))
255 + printk(KERN_CONT "???????? ");
256 + else
257 + printk(KERN_CONT "%08x ", c);
258 + }
259 + printk("\n");
260 +}
261 +#endif
262
263 /*
264 * This routine handles page faults. It determines the address,
265 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
266 good_area:
267 si_code = SEGV_ACCERR;
268 if (cause < 0) {
269 - if (!(vma->vm_flags & VM_EXEC))
270 + if (!(vma->vm_flags & VM_EXEC)) {
271 +
272 +#ifdef CONFIG_PAX_PAGEEXEC
273 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
274 + goto bad_area;
275 +
276 + up_read(&mm->mmap_sem);
277 + switch (pax_handle_fetch_fault(regs)) {
278 +
279 +#ifdef CONFIG_PAX_EMUPLT
280 + case 2:
281 + case 3:
282 + return;
283 +#endif
284 +
285 + }
286 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
287 + do_group_exit(SIGKILL);
288 +#else
289 goto bad_area;
290 +#endif
291 +
292 + }
293 } else if (!cause) {
294 /* Allow reads even for write-only mappings */
295 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
296 diff -urNp linux-2.6.32.43/arch/arm/include/asm/elf.h linux-2.6.32.43/arch/arm/include/asm/elf.h
297 --- linux-2.6.32.43/arch/arm/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
298 +++ linux-2.6.32.43/arch/arm/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
299 @@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t
300 the loader. We need to make sure that it is out of the way of the program
301 that it will "exec", and that there is sufficient room for the brk. */
302
303 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
304 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
305 +
306 +#ifdef CONFIG_PAX_ASLR
307 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
308 +
309 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
310 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
311 +#endif
312
313 /* When the program starts, a1 contains a pointer to a function to be
314 registered with atexit, as per the SVR4 ABI. A value of 0 means we
315 diff -urNp linux-2.6.32.43/arch/arm/include/asm/kmap_types.h linux-2.6.32.43/arch/arm/include/asm/kmap_types.h
316 --- linux-2.6.32.43/arch/arm/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
317 +++ linux-2.6.32.43/arch/arm/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
318 @@ -19,6 +19,7 @@ enum km_type {
319 KM_SOFTIRQ0,
320 KM_SOFTIRQ1,
321 KM_L2_CACHE,
322 + KM_CLEARPAGE,
323 KM_TYPE_NR
324 };
325
326 diff -urNp linux-2.6.32.43/arch/arm/include/asm/uaccess.h linux-2.6.32.43/arch/arm/include/asm/uaccess.h
327 --- linux-2.6.32.43/arch/arm/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
328 +++ linux-2.6.32.43/arch/arm/include/asm/uaccess.h 2011-06-29 21:02:24.000000000 -0400
329 @@ -22,6 +22,8 @@
330 #define VERIFY_READ 0
331 #define VERIFY_WRITE 1
332
333 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
334 +
335 /*
336 * The exception table consists of pairs of addresses: the first is the
337 * address of an instruction that is allowed to fault, and the second is
338 @@ -387,8 +389,23 @@ do { \
339
340
341 #ifdef CONFIG_MMU
342 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
343 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
344 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
345 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
346 +
347 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
348 +{
349 + if (!__builtin_constant_p(n))
350 + check_object_size(to, n, false);
351 + return ___copy_from_user(to, from, n);
352 +}
353 +
354 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
355 +{
356 + if (!__builtin_constant_p(n))
357 + check_object_size(from, n, true);
358 + return ___copy_to_user(to, from, n);
359 +}
360 +
361 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
362 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
363 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
364 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
365
366 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
367 {
368 + if ((long)n < 0)
369 + return n;
370 +
371 if (access_ok(VERIFY_READ, from, n))
372 n = __copy_from_user(to, from, n);
373 else /* security hole - plug it */
374 @@ -412,6 +432,9 @@ static inline unsigned long __must_check
375
376 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
377 {
378 + if ((long)n < 0)
379 + return n;
380 +
381 if (access_ok(VERIFY_WRITE, to, n))
382 n = __copy_to_user(to, from, n);
383 return n;
384 diff -urNp linux-2.6.32.43/arch/arm/kernel/armksyms.c linux-2.6.32.43/arch/arm/kernel/armksyms.c
385 --- linux-2.6.32.43/arch/arm/kernel/armksyms.c 2011-03-27 14:31:47.000000000 -0400
386 +++ linux-2.6.32.43/arch/arm/kernel/armksyms.c 2011-07-06 19:51:50.000000000 -0400
387 @@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
388 #ifdef CONFIG_MMU
389 EXPORT_SYMBOL(copy_page);
390
391 -EXPORT_SYMBOL(__copy_from_user);
392 -EXPORT_SYMBOL(__copy_to_user);
393 +EXPORT_SYMBOL(___copy_from_user);
394 +EXPORT_SYMBOL(___copy_to_user);
395 EXPORT_SYMBOL(__clear_user);
396
397 EXPORT_SYMBOL(__get_user_1);
398 diff -urNp linux-2.6.32.43/arch/arm/kernel/kgdb.c linux-2.6.32.43/arch/arm/kernel/kgdb.c
399 --- linux-2.6.32.43/arch/arm/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
400 +++ linux-2.6.32.43/arch/arm/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
401 @@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
402 * and we handle the normal undef case within the do_undefinstr
403 * handler.
404 */
405 -struct kgdb_arch arch_kgdb_ops = {
406 +const struct kgdb_arch arch_kgdb_ops = {
407 #ifndef __ARMEB__
408 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
409 #else /* ! __ARMEB__ */
410 diff -urNp linux-2.6.32.43/arch/arm/kernel/traps.c linux-2.6.32.43/arch/arm/kernel/traps.c
411 --- linux-2.6.32.43/arch/arm/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
412 +++ linux-2.6.32.43/arch/arm/kernel/traps.c 2011-06-13 21:31:18.000000000 -0400
413 @@ -247,6 +247,8 @@ static void __die(const char *str, int e
414
415 DEFINE_SPINLOCK(die_lock);
416
417 +extern void gr_handle_kernel_exploit(void);
418 +
419 /*
420 * This function is protected against re-entrancy.
421 */
422 @@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, str
423 if (panic_on_oops)
424 panic("Fatal exception");
425
426 + gr_handle_kernel_exploit();
427 +
428 do_exit(SIGSEGV);
429 }
430
431 diff -urNp linux-2.6.32.43/arch/arm/lib/copy_from_user.S linux-2.6.32.43/arch/arm/lib/copy_from_user.S
432 --- linux-2.6.32.43/arch/arm/lib/copy_from_user.S 2011-03-27 14:31:47.000000000 -0400
433 +++ linux-2.6.32.43/arch/arm/lib/copy_from_user.S 2011-06-29 20:48:38.000000000 -0400
434 @@ -16,7 +16,7 @@
435 /*
436 * Prototype:
437 *
438 - * size_t __copy_from_user(void *to, const void *from, size_t n)
439 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
440 *
441 * Purpose:
442 *
443 @@ -84,11 +84,11 @@
444
445 .text
446
447 -ENTRY(__copy_from_user)
448 +ENTRY(___copy_from_user)
449
450 #include "copy_template.S"
451
452 -ENDPROC(__copy_from_user)
453 +ENDPROC(___copy_from_user)
454
455 .section .fixup,"ax"
456 .align 0
457 diff -urNp linux-2.6.32.43/arch/arm/lib/copy_to_user.S linux-2.6.32.43/arch/arm/lib/copy_to_user.S
458 --- linux-2.6.32.43/arch/arm/lib/copy_to_user.S 2011-03-27 14:31:47.000000000 -0400
459 +++ linux-2.6.32.43/arch/arm/lib/copy_to_user.S 2011-06-29 20:46:49.000000000 -0400
460 @@ -16,7 +16,7 @@
461 /*
462 * Prototype:
463 *
464 - * size_t __copy_to_user(void *to, const void *from, size_t n)
465 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
466 *
467 * Purpose:
468 *
469 @@ -88,11 +88,11 @@
470 .text
471
472 ENTRY(__copy_to_user_std)
473 -WEAK(__copy_to_user)
474 +WEAK(___copy_to_user)
475
476 #include "copy_template.S"
477
478 -ENDPROC(__copy_to_user)
479 +ENDPROC(___copy_to_user)
480
481 .section .fixup,"ax"
482 .align 0
483 diff -urNp linux-2.6.32.43/arch/arm/lib/uaccess.S linux-2.6.32.43/arch/arm/lib/uaccess.S
484 --- linux-2.6.32.43/arch/arm/lib/uaccess.S 2011-03-27 14:31:47.000000000 -0400
485 +++ linux-2.6.32.43/arch/arm/lib/uaccess.S 2011-06-29 20:48:53.000000000 -0400
486 @@ -19,7 +19,7 @@
487
488 #define PAGE_SHIFT 12
489
490 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
491 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
492 * Purpose : copy a block to user memory from kernel memory
493 * Params : to - user memory
494 * : from - kernel memory
495 @@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fau
496 sub r2, r2, ip
497 b .Lc2u_dest_aligned
498
499 -ENTRY(__copy_to_user)
500 +ENTRY(___copy_to_user)
501 stmfd sp!, {r2, r4 - r7, lr}
502 cmp r2, #4
503 blt .Lc2u_not_enough
504 @@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fau
505 ldrgtb r3, [r1], #0
506 USER( strgtbt r3, [r0], #1) @ May fault
507 b .Lc2u_finished
508 -ENDPROC(__copy_to_user)
509 +ENDPROC(___copy_to_user)
510
511 .section .fixup,"ax"
512 .align 0
513 9001: ldmfd sp!, {r0, r4 - r7, pc}
514 .previous
515
516 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
517 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
518 * Purpose : copy a block from user memory to kernel memory
519 * Params : to - kernel memory
520 * : from - user memory
521 @@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fau
522 sub r2, r2, ip
523 b .Lcfu_dest_aligned
524
525 -ENTRY(__copy_from_user)
526 +ENTRY(___copy_from_user)
527 stmfd sp!, {r0, r2, r4 - r7, lr}
528 cmp r2, #4
529 blt .Lcfu_not_enough
530 @@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fau
531 USER( ldrgtbt r3, [r1], #1) @ May fault
532 strgtb r3, [r0], #1
533 b .Lcfu_finished
534 -ENDPROC(__copy_from_user)
535 +ENDPROC(___copy_from_user)
536
537 .section .fixup,"ax"
538 .align 0
539 diff -urNp linux-2.6.32.43/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.32.43/arch/arm/lib/uaccess_with_memcpy.c
540 --- linux-2.6.32.43/arch/arm/lib/uaccess_with_memcpy.c 2011-03-27 14:31:47.000000000 -0400
541 +++ linux-2.6.32.43/arch/arm/lib/uaccess_with_memcpy.c 2011-06-29 20:44:35.000000000 -0400
542 @@ -97,7 +97,7 @@ out:
543 }
544
545 unsigned long
546 -__copy_to_user(void __user *to, const void *from, unsigned long n)
547 +___copy_to_user(void __user *to, const void *from, unsigned long n)
548 {
549 /*
550 * This test is stubbed out of the main function above to keep
551 diff -urNp linux-2.6.32.43/arch/arm/mach-at91/pm.c linux-2.6.32.43/arch/arm/mach-at91/pm.c
552 --- linux-2.6.32.43/arch/arm/mach-at91/pm.c 2011-03-27 14:31:47.000000000 -0400
553 +++ linux-2.6.32.43/arch/arm/mach-at91/pm.c 2011-04-17 15:56:45.000000000 -0400
554 @@ -348,7 +348,7 @@ static void at91_pm_end(void)
555 }
556
557
558 -static struct platform_suspend_ops at91_pm_ops ={
559 +static const struct platform_suspend_ops at91_pm_ops ={
560 .valid = at91_pm_valid_state,
561 .begin = at91_pm_begin,
562 .enter = at91_pm_enter,
563 diff -urNp linux-2.6.32.43/arch/arm/mach-omap1/pm.c linux-2.6.32.43/arch/arm/mach-omap1/pm.c
564 --- linux-2.6.32.43/arch/arm/mach-omap1/pm.c 2011-03-27 14:31:47.000000000 -0400
565 +++ linux-2.6.32.43/arch/arm/mach-omap1/pm.c 2011-04-17 15:56:45.000000000 -0400
566 @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
567
568
569
570 -static struct platform_suspend_ops omap_pm_ops ={
571 +static const struct platform_suspend_ops omap_pm_ops ={
572 .prepare = omap_pm_prepare,
573 .enter = omap_pm_enter,
574 .finish = omap_pm_finish,
575 diff -urNp linux-2.6.32.43/arch/arm/mach-omap2/pm24xx.c linux-2.6.32.43/arch/arm/mach-omap2/pm24xx.c
576 --- linux-2.6.32.43/arch/arm/mach-omap2/pm24xx.c 2011-03-27 14:31:47.000000000 -0400
577 +++ linux-2.6.32.43/arch/arm/mach-omap2/pm24xx.c 2011-04-17 15:56:45.000000000 -0400
578 @@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
579 enable_hlt();
580 }
581
582 -static struct platform_suspend_ops omap_pm_ops = {
583 +static const struct platform_suspend_ops omap_pm_ops = {
584 .prepare = omap2_pm_prepare,
585 .enter = omap2_pm_enter,
586 .finish = omap2_pm_finish,
587 diff -urNp linux-2.6.32.43/arch/arm/mach-omap2/pm34xx.c linux-2.6.32.43/arch/arm/mach-omap2/pm34xx.c
588 --- linux-2.6.32.43/arch/arm/mach-omap2/pm34xx.c 2011-03-27 14:31:47.000000000 -0400
589 +++ linux-2.6.32.43/arch/arm/mach-omap2/pm34xx.c 2011-04-17 15:56:45.000000000 -0400
590 @@ -401,7 +401,7 @@ static void omap3_pm_end(void)
591 return;
592 }
593
594 -static struct platform_suspend_ops omap_pm_ops = {
595 +static const struct platform_suspend_ops omap_pm_ops = {
596 .begin = omap3_pm_begin,
597 .end = omap3_pm_end,
598 .prepare = omap3_pm_prepare,
599 diff -urNp linux-2.6.32.43/arch/arm/mach-pnx4008/pm.c linux-2.6.32.43/arch/arm/mach-pnx4008/pm.c
600 --- linux-2.6.32.43/arch/arm/mach-pnx4008/pm.c 2011-03-27 14:31:47.000000000 -0400
601 +++ linux-2.6.32.43/arch/arm/mach-pnx4008/pm.c 2011-04-17 15:56:45.000000000 -0400
602 @@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_stat
603 (state == PM_SUSPEND_MEM);
604 }
605
606 -static struct platform_suspend_ops pnx4008_pm_ops = {
607 +static const struct platform_suspend_ops pnx4008_pm_ops = {
608 .enter = pnx4008_pm_enter,
609 .valid = pnx4008_pm_valid,
610 };
611 diff -urNp linux-2.6.32.43/arch/arm/mach-pxa/pm.c linux-2.6.32.43/arch/arm/mach-pxa/pm.c
612 --- linux-2.6.32.43/arch/arm/mach-pxa/pm.c 2011-03-27 14:31:47.000000000 -0400
613 +++ linux-2.6.32.43/arch/arm/mach-pxa/pm.c 2011-04-17 15:56:45.000000000 -0400
614 @@ -95,7 +95,7 @@ void pxa_pm_finish(void)
615 pxa_cpu_pm_fns->finish();
616 }
617
618 -static struct platform_suspend_ops pxa_pm_ops = {
619 +static const struct platform_suspend_ops pxa_pm_ops = {
620 .valid = pxa_pm_valid,
621 .enter = pxa_pm_enter,
622 .prepare = pxa_pm_prepare,
623 diff -urNp linux-2.6.32.43/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.32.43/arch/arm/mach-pxa/sharpsl_pm.c
624 --- linux-2.6.32.43/arch/arm/mach-pxa/sharpsl_pm.c 2011-03-27 14:31:47.000000000 -0400
625 +++ linux-2.6.32.43/arch/arm/mach-pxa/sharpsl_pm.c 2011-04-17 15:56:45.000000000 -0400
626 @@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
627 }
628
629 #ifdef CONFIG_PM
630 -static struct platform_suspend_ops sharpsl_pm_ops = {
631 +static const struct platform_suspend_ops sharpsl_pm_ops = {
632 .prepare = pxa_pm_prepare,
633 .finish = pxa_pm_finish,
634 .enter = corgi_pxa_pm_enter,
635 diff -urNp linux-2.6.32.43/arch/arm/mach-sa1100/pm.c linux-2.6.32.43/arch/arm/mach-sa1100/pm.c
636 --- linux-2.6.32.43/arch/arm/mach-sa1100/pm.c 2011-03-27 14:31:47.000000000 -0400
637 +++ linux-2.6.32.43/arch/arm/mach-sa1100/pm.c 2011-04-17 15:56:45.000000000 -0400
638 @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
639 return virt_to_phys(sp);
640 }
641
642 -static struct platform_suspend_ops sa11x0_pm_ops = {
643 +static const struct platform_suspend_ops sa11x0_pm_ops = {
644 .enter = sa11x0_pm_enter,
645 .valid = suspend_valid_only_mem,
646 };
647 diff -urNp linux-2.6.32.43/arch/arm/mm/fault.c linux-2.6.32.43/arch/arm/mm/fault.c
648 --- linux-2.6.32.43/arch/arm/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
649 +++ linux-2.6.32.43/arch/arm/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
650 @@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk,
651 }
652 #endif
653
654 +#ifdef CONFIG_PAX_PAGEEXEC
655 + if (fsr & FSR_LNX_PF) {
656 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
657 + do_group_exit(SIGKILL);
658 + }
659 +#endif
660 +
661 tsk->thread.address = addr;
662 tsk->thread.error_code = fsr;
663 tsk->thread.trap_no = 14;
664 @@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsign
665 }
666 #endif /* CONFIG_MMU */
667
668 +#ifdef CONFIG_PAX_PAGEEXEC
669 +void pax_report_insns(void *pc, void *sp)
670 +{
671 + long i;
672 +
673 + printk(KERN_ERR "PAX: bytes at PC: ");
674 + for (i = 0; i < 20; i++) {
675 + unsigned char c;
676 + if (get_user(c, (__force unsigned char __user *)pc+i))
677 + printk(KERN_CONT "?? ");
678 + else
679 + printk(KERN_CONT "%02x ", c);
680 + }
681 + printk("\n");
682 +
683 + printk(KERN_ERR "PAX: bytes at SP-4: ");
684 + for (i = -1; i < 20; i++) {
685 + unsigned long c;
686 + if (get_user(c, (__force unsigned long __user *)sp+i))
687 + printk(KERN_CONT "???????? ");
688 + else
689 + printk(KERN_CONT "%08lx ", c);
690 + }
691 + printk("\n");
692 +}
693 +#endif
694 +
695 /*
696 * First Level Translation Fault Handler
697 *
698 diff -urNp linux-2.6.32.43/arch/arm/mm/mmap.c linux-2.6.32.43/arch/arm/mm/mmap.c
699 --- linux-2.6.32.43/arch/arm/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
700 +++ linux-2.6.32.43/arch/arm/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
701 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
702 if (len > TASK_SIZE)
703 return -ENOMEM;
704
705 +#ifdef CONFIG_PAX_RANDMMAP
706 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
707 +#endif
708 +
709 if (addr) {
710 if (do_align)
711 addr = COLOUR_ALIGN(addr, pgoff);
712 @@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
713 addr = PAGE_ALIGN(addr);
714
715 vma = find_vma(mm, addr);
716 - if (TASK_SIZE - len >= addr &&
717 - (!vma || addr + len <= vma->vm_start))
718 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
719 return addr;
720 }
721 if (len > mm->cached_hole_size) {
722 - start_addr = addr = mm->free_area_cache;
723 + start_addr = addr = mm->free_area_cache;
724 } else {
725 - start_addr = addr = TASK_UNMAPPED_BASE;
726 - mm->cached_hole_size = 0;
727 + start_addr = addr = mm->mmap_base;
728 + mm->cached_hole_size = 0;
729 }
730
731 full_search:
732 @@ -94,14 +97,14 @@ full_search:
733 * Start a new search - just in case we missed
734 * some holes.
735 */
736 - if (start_addr != TASK_UNMAPPED_BASE) {
737 - start_addr = addr = TASK_UNMAPPED_BASE;
738 + if (start_addr != mm->mmap_base) {
739 + start_addr = addr = mm->mmap_base;
740 mm->cached_hole_size = 0;
741 goto full_search;
742 }
743 return -ENOMEM;
744 }
745 - if (!vma || addr + len <= vma->vm_start) {
746 + if (check_heap_stack_gap(vma, addr, len)) {
747 /*
748 * Remember the place where we stopped the search:
749 */
750 diff -urNp linux-2.6.32.43/arch/arm/plat-s3c/pm.c linux-2.6.32.43/arch/arm/plat-s3c/pm.c
751 --- linux-2.6.32.43/arch/arm/plat-s3c/pm.c 2011-03-27 14:31:47.000000000 -0400
752 +++ linux-2.6.32.43/arch/arm/plat-s3c/pm.c 2011-04-17 15:56:45.000000000 -0400
753 @@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
754 s3c_pm_check_cleanup();
755 }
756
757 -static struct platform_suspend_ops s3c_pm_ops = {
758 +static const struct platform_suspend_ops s3c_pm_ops = {
759 .enter = s3c_pm_enter,
760 .prepare = s3c_pm_prepare,
761 .finish = s3c_pm_finish,
762 diff -urNp linux-2.6.32.43/arch/avr32/include/asm/elf.h linux-2.6.32.43/arch/avr32/include/asm/elf.h
763 --- linux-2.6.32.43/arch/avr32/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
764 +++ linux-2.6.32.43/arch/avr32/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
765 @@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
766 the loader. We need to make sure that it is out of the way of the program
767 that it will "exec", and that there is sufficient room for the brk. */
768
769 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
770 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
771
772 +#ifdef CONFIG_PAX_ASLR
773 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
774 +
775 +#define PAX_DELTA_MMAP_LEN 15
776 +#define PAX_DELTA_STACK_LEN 15
777 +#endif
778
779 /* This yields a mask that user programs can use to figure out what
780 instruction set this CPU supports. This could be done in user space,
781 diff -urNp linux-2.6.32.43/arch/avr32/include/asm/kmap_types.h linux-2.6.32.43/arch/avr32/include/asm/kmap_types.h
782 --- linux-2.6.32.43/arch/avr32/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
783 +++ linux-2.6.32.43/arch/avr32/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
784 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
785 D(11) KM_IRQ1,
786 D(12) KM_SOFTIRQ0,
787 D(13) KM_SOFTIRQ1,
788 -D(14) KM_TYPE_NR
789 +D(14) KM_CLEARPAGE,
790 +D(15) KM_TYPE_NR
791 };
792
793 #undef D
794 diff -urNp linux-2.6.32.43/arch/avr32/mach-at32ap/pm.c linux-2.6.32.43/arch/avr32/mach-at32ap/pm.c
795 --- linux-2.6.32.43/arch/avr32/mach-at32ap/pm.c 2011-03-27 14:31:47.000000000 -0400
796 +++ linux-2.6.32.43/arch/avr32/mach-at32ap/pm.c 2011-04-17 15:56:45.000000000 -0400
797 @@ -176,7 +176,7 @@ out:
798 return 0;
799 }
800
801 -static struct platform_suspend_ops avr32_pm_ops = {
802 +static const struct platform_suspend_ops avr32_pm_ops = {
803 .valid = avr32_pm_valid_state,
804 .enter = avr32_pm_enter,
805 };
806 diff -urNp linux-2.6.32.43/arch/avr32/mm/fault.c linux-2.6.32.43/arch/avr32/mm/fault.c
807 --- linux-2.6.32.43/arch/avr32/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
808 +++ linux-2.6.32.43/arch/avr32/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
809 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
810
811 int exception_trace = 1;
812
813 +#ifdef CONFIG_PAX_PAGEEXEC
814 +void pax_report_insns(void *pc, void *sp)
815 +{
816 + unsigned long i;
817 +
818 + printk(KERN_ERR "PAX: bytes at PC: ");
819 + for (i = 0; i < 20; i++) {
820 + unsigned char c;
821 + if (get_user(c, (unsigned char *)pc+i))
822 + printk(KERN_CONT "???????? ");
823 + else
824 + printk(KERN_CONT "%02x ", c);
825 + }
826 + printk("\n");
827 +}
828 +#endif
829 +
830 /*
831 * This routine handles page faults. It determines the address and the
832 * problem, and then passes it off to one of the appropriate routines.
833 @@ -157,6 +174,16 @@ bad_area:
834 up_read(&mm->mmap_sem);
835
836 if (user_mode(regs)) {
837 +
838 +#ifdef CONFIG_PAX_PAGEEXEC
839 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
840 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
841 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
842 + do_group_exit(SIGKILL);
843 + }
844 + }
845 +#endif
846 +
847 if (exception_trace && printk_ratelimit())
848 printk("%s%s[%d]: segfault at %08lx pc %08lx "
849 "sp %08lx ecr %lu\n",
850 diff -urNp linux-2.6.32.43/arch/blackfin/kernel/kgdb.c linux-2.6.32.43/arch/blackfin/kernel/kgdb.c
851 --- linux-2.6.32.43/arch/blackfin/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
852 +++ linux-2.6.32.43/arch/blackfin/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
853 @@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vecto
854 return -1; /* this means that we do not want to exit from the handler */
855 }
856
857 -struct kgdb_arch arch_kgdb_ops = {
858 +const struct kgdb_arch arch_kgdb_ops = {
859 .gdb_bpt_instr = {0xa1},
860 #ifdef CONFIG_SMP
861 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
862 diff -urNp linux-2.6.32.43/arch/blackfin/mach-common/pm.c linux-2.6.32.43/arch/blackfin/mach-common/pm.c
863 --- linux-2.6.32.43/arch/blackfin/mach-common/pm.c 2011-03-27 14:31:47.000000000 -0400
864 +++ linux-2.6.32.43/arch/blackfin/mach-common/pm.c 2011-04-17 15:56:45.000000000 -0400
865 @@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t
866 return 0;
867 }
868
869 -struct platform_suspend_ops bfin_pm_ops = {
870 +const struct platform_suspend_ops bfin_pm_ops = {
871 .enter = bfin_pm_enter,
872 .valid = bfin_pm_valid,
873 };
874 diff -urNp linux-2.6.32.43/arch/frv/include/asm/kmap_types.h linux-2.6.32.43/arch/frv/include/asm/kmap_types.h
875 --- linux-2.6.32.43/arch/frv/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
876 +++ linux-2.6.32.43/arch/frv/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
877 @@ -23,6 +23,7 @@ enum km_type {
878 KM_IRQ1,
879 KM_SOFTIRQ0,
880 KM_SOFTIRQ1,
881 + KM_CLEARPAGE,
882 KM_TYPE_NR
883 };
884
885 diff -urNp linux-2.6.32.43/arch/frv/mm/elf-fdpic.c linux-2.6.32.43/arch/frv/mm/elf-fdpic.c
886 --- linux-2.6.32.43/arch/frv/mm/elf-fdpic.c 2011-03-27 14:31:47.000000000 -0400
887 +++ linux-2.6.32.43/arch/frv/mm/elf-fdpic.c 2011-04-17 15:56:45.000000000 -0400
888 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
889 if (addr) {
890 addr = PAGE_ALIGN(addr);
891 vma = find_vma(current->mm, addr);
892 - if (TASK_SIZE - len >= addr &&
893 - (!vma || addr + len <= vma->vm_start))
894 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
895 goto success;
896 }
897
898 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
899 for (; vma; vma = vma->vm_next) {
900 if (addr > limit)
901 break;
902 - if (addr + len <= vma->vm_start)
903 + if (check_heap_stack_gap(vma, addr, len))
904 goto success;
905 addr = vma->vm_end;
906 }
907 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
908 for (; vma; vma = vma->vm_next) {
909 if (addr > limit)
910 break;
911 - if (addr + len <= vma->vm_start)
912 + if (check_heap_stack_gap(vma, addr, len))
913 goto success;
914 addr = vma->vm_end;
915 }
916 diff -urNp linux-2.6.32.43/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.43/arch/ia64/hp/common/hwsw_iommu.c
917 --- linux-2.6.32.43/arch/ia64/hp/common/hwsw_iommu.c 2011-03-27 14:31:47.000000000 -0400
918 +++ linux-2.6.32.43/arch/ia64/hp/common/hwsw_iommu.c 2011-04-17 15:56:45.000000000 -0400
919 @@ -17,7 +17,7 @@
920 #include <linux/swiotlb.h>
921 #include <asm/machvec.h>
922
923 -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
924 +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
925
926 /* swiotlb declarations & definitions: */
927 extern int swiotlb_late_init_with_default_size (size_t size);
928 @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
929 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
930 }
931
932 -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
933 +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
934 {
935 if (use_swiotlb(dev))
936 return &swiotlb_dma_ops;
937 diff -urNp linux-2.6.32.43/arch/ia64/hp/common/sba_iommu.c linux-2.6.32.43/arch/ia64/hp/common/sba_iommu.c
938 --- linux-2.6.32.43/arch/ia64/hp/common/sba_iommu.c 2011-03-27 14:31:47.000000000 -0400
939 +++ linux-2.6.32.43/arch/ia64/hp/common/sba_iommu.c 2011-04-17 15:56:45.000000000 -0400
940 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
941 },
942 };
943
944 -extern struct dma_map_ops swiotlb_dma_ops;
945 +extern const struct dma_map_ops swiotlb_dma_ops;
946
947 static int __init
948 sba_init(void)
949 @@ -2211,7 +2211,7 @@ sba_page_override(char *str)
950
951 __setup("sbapagesize=",sba_page_override);
952
953 -struct dma_map_ops sba_dma_ops = {
954 +const struct dma_map_ops sba_dma_ops = {
955 .alloc_coherent = sba_alloc_coherent,
956 .free_coherent = sba_free_coherent,
957 .map_page = sba_map_page,
958 diff -urNp linux-2.6.32.43/arch/ia64/ia32/binfmt_elf32.c linux-2.6.32.43/arch/ia64/ia32/binfmt_elf32.c
959 --- linux-2.6.32.43/arch/ia64/ia32/binfmt_elf32.c 2011-03-27 14:31:47.000000000 -0400
960 +++ linux-2.6.32.43/arch/ia64/ia32/binfmt_elf32.c 2011-04-17 15:56:45.000000000 -0400
961 @@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
962
963 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
964
965 +#ifdef CONFIG_PAX_ASLR
966 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
967 +
968 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
969 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
970 +#endif
971 +
972 /* Ugly but avoids duplication */
973 #include "../../../fs/binfmt_elf.c"
974
975 diff -urNp linux-2.6.32.43/arch/ia64/ia32/ia32priv.h linux-2.6.32.43/arch/ia64/ia32/ia32priv.h
976 --- linux-2.6.32.43/arch/ia64/ia32/ia32priv.h 2011-03-27 14:31:47.000000000 -0400
977 +++ linux-2.6.32.43/arch/ia64/ia32/ia32priv.h 2011-04-17 15:56:45.000000000 -0400
978 @@ -296,7 +296,14 @@ typedef struct compat_siginfo {
979 #define ELF_DATA ELFDATA2LSB
980 #define ELF_ARCH EM_386
981
982 -#define IA32_STACK_TOP IA32_PAGE_OFFSET
983 +#ifdef CONFIG_PAX_RANDUSTACK
984 +#define __IA32_DELTA_STACK (current->mm->delta_stack)
985 +#else
986 +#define __IA32_DELTA_STACK 0UL
987 +#endif
988 +
989 +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
990 +
991 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
992 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
993
994 diff -urNp linux-2.6.32.43/arch/ia64/include/asm/dma-mapping.h linux-2.6.32.43/arch/ia64/include/asm/dma-mapping.h
995 --- linux-2.6.32.43/arch/ia64/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
996 +++ linux-2.6.32.43/arch/ia64/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
997 @@ -12,7 +12,7 @@
998
999 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
1000
1001 -extern struct dma_map_ops *dma_ops;
1002 +extern const struct dma_map_ops *dma_ops;
1003 extern struct ia64_machine_vector ia64_mv;
1004 extern void set_iommu_machvec(void);
1005
1006 @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
1007 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1008 dma_addr_t *daddr, gfp_t gfp)
1009 {
1010 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1011 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1012 void *caddr;
1013
1014 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
1015 @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
1016 static inline void dma_free_coherent(struct device *dev, size_t size,
1017 void *caddr, dma_addr_t daddr)
1018 {
1019 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1020 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1021 debug_dma_free_coherent(dev, size, caddr, daddr);
1022 ops->free_coherent(dev, size, caddr, daddr);
1023 }
1024 @@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
1025
1026 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
1027 {
1028 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1029 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1030 return ops->mapping_error(dev, daddr);
1031 }
1032
1033 static inline int dma_supported(struct device *dev, u64 mask)
1034 {
1035 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
1036 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1037 return ops->dma_supported(dev, mask);
1038 }
1039
1040 diff -urNp linux-2.6.32.43/arch/ia64/include/asm/elf.h linux-2.6.32.43/arch/ia64/include/asm/elf.h
1041 --- linux-2.6.32.43/arch/ia64/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1042 +++ linux-2.6.32.43/arch/ia64/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1043 @@ -43,6 +43,13 @@
1044 */
1045 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1046
1047 +#ifdef CONFIG_PAX_ASLR
1048 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1049 +
1050 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1051 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1052 +#endif
1053 +
1054 #define PT_IA_64_UNWIND 0x70000001
1055
1056 /* IA-64 relocations: */
1057 diff -urNp linux-2.6.32.43/arch/ia64/include/asm/machvec.h linux-2.6.32.43/arch/ia64/include/asm/machvec.h
1058 --- linux-2.6.32.43/arch/ia64/include/asm/machvec.h 2011-03-27 14:31:47.000000000 -0400
1059 +++ linux-2.6.32.43/arch/ia64/include/asm/machvec.h 2011-04-17 15:56:45.000000000 -0400
1060 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
1061 /* DMA-mapping interface: */
1062 typedef void ia64_mv_dma_init (void);
1063 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1064 -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1065 +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1066
1067 /*
1068 * WARNING: The legacy I/O space is _architected_. Platforms are
1069 @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
1070 # endif /* CONFIG_IA64_GENERIC */
1071
1072 extern void swiotlb_dma_init(void);
1073 -extern struct dma_map_ops *dma_get_ops(struct device *);
1074 +extern const struct dma_map_ops *dma_get_ops(struct device *);
1075
1076 /*
1077 * Define default versions so we can extend machvec for new platforms without having
1078 diff -urNp linux-2.6.32.43/arch/ia64/include/asm/pgtable.h linux-2.6.32.43/arch/ia64/include/asm/pgtable.h
1079 --- linux-2.6.32.43/arch/ia64/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1080 +++ linux-2.6.32.43/arch/ia64/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1081 @@ -12,7 +12,7 @@
1082 * David Mosberger-Tang <davidm@hpl.hp.com>
1083 */
1084
1085 -
1086 +#include <linux/const.h>
1087 #include <asm/mman.h>
1088 #include <asm/page.h>
1089 #include <asm/processor.h>
1090 @@ -143,6 +143,17 @@
1091 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1092 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1093 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1094 +
1095 +#ifdef CONFIG_PAX_PAGEEXEC
1096 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1097 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1098 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1099 +#else
1100 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1101 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1102 +# define PAGE_COPY_NOEXEC PAGE_COPY
1103 +#endif
1104 +
1105 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1106 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1107 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1108 diff -urNp linux-2.6.32.43/arch/ia64/include/asm/spinlock.h linux-2.6.32.43/arch/ia64/include/asm/spinlock.h
1109 --- linux-2.6.32.43/arch/ia64/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
1110 +++ linux-2.6.32.43/arch/ia64/include/asm/spinlock.h 2011-04-17 15:56:45.000000000 -0400
1111 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
1112 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1113
1114 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1115 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1116 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1117 }
1118
1119 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1120 diff -urNp linux-2.6.32.43/arch/ia64/include/asm/uaccess.h linux-2.6.32.43/arch/ia64/include/asm/uaccess.h
1121 --- linux-2.6.32.43/arch/ia64/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
1122 +++ linux-2.6.32.43/arch/ia64/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
1123 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
1124 const void *__cu_from = (from); \
1125 long __cu_len = (n); \
1126 \
1127 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1128 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1129 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1130 __cu_len; \
1131 })
1132 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
1133 long __cu_len = (n); \
1134 \
1135 __chk_user_ptr(__cu_from); \
1136 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1137 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1138 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1139 __cu_len; \
1140 })
1141 diff -urNp linux-2.6.32.43/arch/ia64/kernel/dma-mapping.c linux-2.6.32.43/arch/ia64/kernel/dma-mapping.c
1142 --- linux-2.6.32.43/arch/ia64/kernel/dma-mapping.c 2011-03-27 14:31:47.000000000 -0400
1143 +++ linux-2.6.32.43/arch/ia64/kernel/dma-mapping.c 2011-04-17 15:56:45.000000000 -0400
1144 @@ -3,7 +3,7 @@
1145 /* Set this to 1 if there is a HW IOMMU in the system */
1146 int iommu_detected __read_mostly;
1147
1148 -struct dma_map_ops *dma_ops;
1149 +const struct dma_map_ops *dma_ops;
1150 EXPORT_SYMBOL(dma_ops);
1151
1152 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1153 @@ -16,7 +16,7 @@ static int __init dma_init(void)
1154 }
1155 fs_initcall(dma_init);
1156
1157 -struct dma_map_ops *dma_get_ops(struct device *dev)
1158 +const struct dma_map_ops *dma_get_ops(struct device *dev)
1159 {
1160 return dma_ops;
1161 }
1162 diff -urNp linux-2.6.32.43/arch/ia64/kernel/module.c linux-2.6.32.43/arch/ia64/kernel/module.c
1163 --- linux-2.6.32.43/arch/ia64/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1164 +++ linux-2.6.32.43/arch/ia64/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1165 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1166 void
1167 module_free (struct module *mod, void *module_region)
1168 {
1169 - if (mod && mod->arch.init_unw_table &&
1170 - module_region == mod->module_init) {
1171 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1172 unw_remove_unwind_table(mod->arch.init_unw_table);
1173 mod->arch.init_unw_table = NULL;
1174 }
1175 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
1176 }
1177
1178 static inline int
1179 +in_init_rx (const struct module *mod, uint64_t addr)
1180 +{
1181 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1182 +}
1183 +
1184 +static inline int
1185 +in_init_rw (const struct module *mod, uint64_t addr)
1186 +{
1187 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1188 +}
1189 +
1190 +static inline int
1191 in_init (const struct module *mod, uint64_t addr)
1192 {
1193 - return addr - (uint64_t) mod->module_init < mod->init_size;
1194 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1195 +}
1196 +
1197 +static inline int
1198 +in_core_rx (const struct module *mod, uint64_t addr)
1199 +{
1200 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1201 +}
1202 +
1203 +static inline int
1204 +in_core_rw (const struct module *mod, uint64_t addr)
1205 +{
1206 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1207 }
1208
1209 static inline int
1210 in_core (const struct module *mod, uint64_t addr)
1211 {
1212 - return addr - (uint64_t) mod->module_core < mod->core_size;
1213 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1214 }
1215
1216 static inline int
1217 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
1218 break;
1219
1220 case RV_BDREL:
1221 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1222 + if (in_init_rx(mod, val))
1223 + val -= (uint64_t) mod->module_init_rx;
1224 + else if (in_init_rw(mod, val))
1225 + val -= (uint64_t) mod->module_init_rw;
1226 + else if (in_core_rx(mod, val))
1227 + val -= (uint64_t) mod->module_core_rx;
1228 + else if (in_core_rw(mod, val))
1229 + val -= (uint64_t) mod->module_core_rw;
1230 break;
1231
1232 case RV_LTV:
1233 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
1234 * addresses have been selected...
1235 */
1236 uint64_t gp;
1237 - if (mod->core_size > MAX_LTOFF)
1238 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1239 /*
1240 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1241 * at the end of the module.
1242 */
1243 - gp = mod->core_size - MAX_LTOFF / 2;
1244 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1245 else
1246 - gp = mod->core_size / 2;
1247 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1248 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1249 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1250 mod->arch.gp = gp;
1251 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1252 }
1253 diff -urNp linux-2.6.32.43/arch/ia64/kernel/pci-dma.c linux-2.6.32.43/arch/ia64/kernel/pci-dma.c
1254 --- linux-2.6.32.43/arch/ia64/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
1255 +++ linux-2.6.32.43/arch/ia64/kernel/pci-dma.c 2011-04-17 15:56:45.000000000 -0400
1256 @@ -43,7 +43,7 @@ struct device fallback_dev = {
1257 .dma_mask = &fallback_dev.coherent_dma_mask,
1258 };
1259
1260 -extern struct dma_map_ops intel_dma_ops;
1261 +extern const struct dma_map_ops intel_dma_ops;
1262
1263 static int __init pci_iommu_init(void)
1264 {
1265 @@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *d
1266 }
1267 EXPORT_SYMBOL(iommu_dma_supported);
1268
1269 +extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1270 +extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1271 +extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1272 +extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1273 +extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1274 +extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1275 +extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1276 +
1277 +static const struct dma_map_ops intel_iommu_dma_ops = {
1278 + /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1279 + .alloc_coherent = intel_alloc_coherent,
1280 + .free_coherent = intel_free_coherent,
1281 + .map_sg = intel_map_sg,
1282 + .unmap_sg = intel_unmap_sg,
1283 + .map_page = intel_map_page,
1284 + .unmap_page = intel_unmap_page,
1285 + .mapping_error = intel_mapping_error,
1286 +
1287 + .sync_single_for_cpu = machvec_dma_sync_single,
1288 + .sync_sg_for_cpu = machvec_dma_sync_sg,
1289 + .sync_single_for_device = machvec_dma_sync_single,
1290 + .sync_sg_for_device = machvec_dma_sync_sg,
1291 + .dma_supported = iommu_dma_supported,
1292 +};
1293 +
1294 void __init pci_iommu_alloc(void)
1295 {
1296 - dma_ops = &intel_dma_ops;
1297 -
1298 - dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1299 - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1300 - dma_ops->sync_single_for_device = machvec_dma_sync_single;
1301 - dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1302 - dma_ops->dma_supported = iommu_dma_supported;
1303 + dma_ops = &intel_iommu_dma_ops;
1304
1305 /*
1306 * The order of these functions is important for
1307 diff -urNp linux-2.6.32.43/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.43/arch/ia64/kernel/pci-swiotlb.c
1308 --- linux-2.6.32.43/arch/ia64/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
1309 +++ linux-2.6.32.43/arch/ia64/kernel/pci-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
1310 @@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent
1311 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1312 }
1313
1314 -struct dma_map_ops swiotlb_dma_ops = {
1315 +const struct dma_map_ops swiotlb_dma_ops = {
1316 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1317 .free_coherent = swiotlb_free_coherent,
1318 .map_page = swiotlb_map_page,
1319 diff -urNp linux-2.6.32.43/arch/ia64/kernel/sys_ia64.c linux-2.6.32.43/arch/ia64/kernel/sys_ia64.c
1320 --- linux-2.6.32.43/arch/ia64/kernel/sys_ia64.c 2011-03-27 14:31:47.000000000 -0400
1321 +++ linux-2.6.32.43/arch/ia64/kernel/sys_ia64.c 2011-04-17 15:56:45.000000000 -0400
1322 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1323 if (REGION_NUMBER(addr) == RGN_HPAGE)
1324 addr = 0;
1325 #endif
1326 +
1327 +#ifdef CONFIG_PAX_RANDMMAP
1328 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1329 + addr = mm->free_area_cache;
1330 + else
1331 +#endif
1332 +
1333 if (!addr)
1334 addr = mm->free_area_cache;
1335
1336 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1337 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1338 /* At this point: (!vma || addr < vma->vm_end). */
1339 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1340 - if (start_addr != TASK_UNMAPPED_BASE) {
1341 + if (start_addr != mm->mmap_base) {
1342 /* Start a new search --- just in case we missed some holes. */
1343 - addr = TASK_UNMAPPED_BASE;
1344 + addr = mm->mmap_base;
1345 goto full_search;
1346 }
1347 return -ENOMEM;
1348 }
1349 - if (!vma || addr + len <= vma->vm_start) {
1350 + if (check_heap_stack_gap(vma, addr, len)) {
1351 /* Remember the address where we stopped this search: */
1352 mm->free_area_cache = addr + len;
1353 return addr;
1354 diff -urNp linux-2.6.32.43/arch/ia64/kernel/topology.c linux-2.6.32.43/arch/ia64/kernel/topology.c
1355 --- linux-2.6.32.43/arch/ia64/kernel/topology.c 2011-03-27 14:31:47.000000000 -0400
1356 +++ linux-2.6.32.43/arch/ia64/kernel/topology.c 2011-04-17 15:56:45.000000000 -0400
1357 @@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject
1358 return ret;
1359 }
1360
1361 -static struct sysfs_ops cache_sysfs_ops = {
1362 +static const struct sysfs_ops cache_sysfs_ops = {
1363 .show = cache_show
1364 };
1365
1366 diff -urNp linux-2.6.32.43/arch/ia64/kernel/vmlinux.lds.S linux-2.6.32.43/arch/ia64/kernel/vmlinux.lds.S
1367 --- linux-2.6.32.43/arch/ia64/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
1368 +++ linux-2.6.32.43/arch/ia64/kernel/vmlinux.lds.S 2011-04-17 15:56:45.000000000 -0400
1369 @@ -190,7 +190,7 @@ SECTIONS
1370 /* Per-cpu data: */
1371 . = ALIGN(PERCPU_PAGE_SIZE);
1372 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1373 - __phys_per_cpu_start = __per_cpu_load;
1374 + __phys_per_cpu_start = per_cpu_load;
1375 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1376 * into percpu page size
1377 */
1378 diff -urNp linux-2.6.32.43/arch/ia64/mm/fault.c linux-2.6.32.43/arch/ia64/mm/fault.c
1379 --- linux-2.6.32.43/arch/ia64/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1380 +++ linux-2.6.32.43/arch/ia64/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1381 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1382 return pte_present(pte);
1383 }
1384
1385 +#ifdef CONFIG_PAX_PAGEEXEC
1386 +void pax_report_insns(void *pc, void *sp)
1387 +{
1388 + unsigned long i;
1389 +
1390 + printk(KERN_ERR "PAX: bytes at PC: ");
1391 + for (i = 0; i < 8; i++) {
1392 + unsigned int c;
1393 + if (get_user(c, (unsigned int *)pc+i))
1394 + printk(KERN_CONT "???????? ");
1395 + else
1396 + printk(KERN_CONT "%08x ", c);
1397 + }
1398 + printk("\n");
1399 +}
1400 +#endif
1401 +
1402 void __kprobes
1403 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1404 {
1405 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1406 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1407 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1408
1409 - if ((vma->vm_flags & mask) != mask)
1410 + if ((vma->vm_flags & mask) != mask) {
1411 +
1412 +#ifdef CONFIG_PAX_PAGEEXEC
1413 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1414 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1415 + goto bad_area;
1416 +
1417 + up_read(&mm->mmap_sem);
1418 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1419 + do_group_exit(SIGKILL);
1420 + }
1421 +#endif
1422 +
1423 goto bad_area;
1424
1425 + }
1426 +
1427 survive:
1428 /*
1429 * If for any reason at all we couldn't handle the fault, make
1430 diff -urNp linux-2.6.32.43/arch/ia64/mm/hugetlbpage.c linux-2.6.32.43/arch/ia64/mm/hugetlbpage.c
1431 --- linux-2.6.32.43/arch/ia64/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
1432 +++ linux-2.6.32.43/arch/ia64/mm/hugetlbpage.c 2011-04-17 15:56:45.000000000 -0400
1433 @@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
1434 /* At this point: (!vmm || addr < vmm->vm_end). */
1435 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1436 return -ENOMEM;
1437 - if (!vmm || (addr + len) <= vmm->vm_start)
1438 + if (check_heap_stack_gap(vmm, addr, len))
1439 return addr;
1440 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1441 }
1442 diff -urNp linux-2.6.32.43/arch/ia64/mm/init.c linux-2.6.32.43/arch/ia64/mm/init.c
1443 --- linux-2.6.32.43/arch/ia64/mm/init.c 2011-03-27 14:31:47.000000000 -0400
1444 +++ linux-2.6.32.43/arch/ia64/mm/init.c 2011-04-17 15:56:45.000000000 -0400
1445 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1446 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1447 vma->vm_end = vma->vm_start + PAGE_SIZE;
1448 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1449 +
1450 +#ifdef CONFIG_PAX_PAGEEXEC
1451 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1452 + vma->vm_flags &= ~VM_EXEC;
1453 +
1454 +#ifdef CONFIG_PAX_MPROTECT
1455 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1456 + vma->vm_flags &= ~VM_MAYEXEC;
1457 +#endif
1458 +
1459 + }
1460 +#endif
1461 +
1462 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1463 down_write(&current->mm->mmap_sem);
1464 if (insert_vm_struct(current->mm, vma)) {
1465 diff -urNp linux-2.6.32.43/arch/ia64/sn/pci/pci_dma.c linux-2.6.32.43/arch/ia64/sn/pci/pci_dma.c
1466 --- linux-2.6.32.43/arch/ia64/sn/pci/pci_dma.c 2011-03-27 14:31:47.000000000 -0400
1467 +++ linux-2.6.32.43/arch/ia64/sn/pci/pci_dma.c 2011-04-17 15:56:45.000000000 -0400
1468 @@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *
1469 return ret;
1470 }
1471
1472 -static struct dma_map_ops sn_dma_ops = {
1473 +static const struct dma_map_ops sn_dma_ops = {
1474 .alloc_coherent = sn_dma_alloc_coherent,
1475 .free_coherent = sn_dma_free_coherent,
1476 .map_page = sn_dma_map_page,
1477 diff -urNp linux-2.6.32.43/arch/m32r/lib/usercopy.c linux-2.6.32.43/arch/m32r/lib/usercopy.c
1478 --- linux-2.6.32.43/arch/m32r/lib/usercopy.c 2011-03-27 14:31:47.000000000 -0400
1479 +++ linux-2.6.32.43/arch/m32r/lib/usercopy.c 2011-04-17 15:56:45.000000000 -0400
1480 @@ -14,6 +14,9 @@
1481 unsigned long
1482 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1483 {
1484 + if ((long)n < 0)
1485 + return n;
1486 +
1487 prefetch(from);
1488 if (access_ok(VERIFY_WRITE, to, n))
1489 __copy_user(to,from,n);
1490 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1491 unsigned long
1492 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1493 {
1494 + if ((long)n < 0)
1495 + return n;
1496 +
1497 prefetchw(to);
1498 if (access_ok(VERIFY_READ, from, n))
1499 __copy_user_zeroing(to,from,n);
1500 diff -urNp linux-2.6.32.43/arch/mips/alchemy/devboards/pm.c linux-2.6.32.43/arch/mips/alchemy/devboards/pm.c
1501 --- linux-2.6.32.43/arch/mips/alchemy/devboards/pm.c 2011-03-27 14:31:47.000000000 -0400
1502 +++ linux-2.6.32.43/arch/mips/alchemy/devboards/pm.c 2011-04-17 15:56:45.000000000 -0400
1503 @@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1504
1505 }
1506
1507 -static struct platform_suspend_ops db1x_pm_ops = {
1508 +static const struct platform_suspend_ops db1x_pm_ops = {
1509 .valid = suspend_valid_only_mem,
1510 .begin = db1x_pm_begin,
1511 .enter = db1x_pm_enter,
1512 diff -urNp linux-2.6.32.43/arch/mips/include/asm/elf.h linux-2.6.32.43/arch/mips/include/asm/elf.h
1513 --- linux-2.6.32.43/arch/mips/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1514 +++ linux-2.6.32.43/arch/mips/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1515 @@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_str
1516 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1517 #endif
1518
1519 +#ifdef CONFIG_PAX_ASLR
1520 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1521 +
1522 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1523 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1524 +#endif
1525 +
1526 #endif /* _ASM_ELF_H */
1527 diff -urNp linux-2.6.32.43/arch/mips/include/asm/page.h linux-2.6.32.43/arch/mips/include/asm/page.h
1528 --- linux-2.6.32.43/arch/mips/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1529 +++ linux-2.6.32.43/arch/mips/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1530 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1531 #ifdef CONFIG_CPU_MIPS32
1532 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1533 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1534 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1535 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1536 #else
1537 typedef struct { unsigned long long pte; } pte_t;
1538 #define pte_val(x) ((x).pte)
1539 diff -urNp linux-2.6.32.43/arch/mips/include/asm/system.h linux-2.6.32.43/arch/mips/include/asm/system.h
1540 --- linux-2.6.32.43/arch/mips/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
1541 +++ linux-2.6.32.43/arch/mips/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
1542 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1543 */
1544 #define __ARCH_WANT_UNLOCKED_CTXSW
1545
1546 -extern unsigned long arch_align_stack(unsigned long sp);
1547 +#define arch_align_stack(x) ((x) & ~0xfUL)
1548
1549 #endif /* _ASM_SYSTEM_H */
1550 diff -urNp linux-2.6.32.43/arch/mips/kernel/binfmt_elfn32.c linux-2.6.32.43/arch/mips/kernel/binfmt_elfn32.c
1551 --- linux-2.6.32.43/arch/mips/kernel/binfmt_elfn32.c 2011-03-27 14:31:47.000000000 -0400
1552 +++ linux-2.6.32.43/arch/mips/kernel/binfmt_elfn32.c 2011-04-17 15:56:45.000000000 -0400
1553 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1554 #undef ELF_ET_DYN_BASE
1555 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1556
1557 +#ifdef CONFIG_PAX_ASLR
1558 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1559 +
1560 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1561 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1562 +#endif
1563 +
1564 #include <asm/processor.h>
1565 #include <linux/module.h>
1566 #include <linux/elfcore.h>
1567 diff -urNp linux-2.6.32.43/arch/mips/kernel/binfmt_elfo32.c linux-2.6.32.43/arch/mips/kernel/binfmt_elfo32.c
1568 --- linux-2.6.32.43/arch/mips/kernel/binfmt_elfo32.c 2011-03-27 14:31:47.000000000 -0400
1569 +++ linux-2.6.32.43/arch/mips/kernel/binfmt_elfo32.c 2011-04-17 15:56:45.000000000 -0400
1570 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1571 #undef ELF_ET_DYN_BASE
1572 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1573
1574 +#ifdef CONFIG_PAX_ASLR
1575 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1576 +
1577 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1578 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1579 +#endif
1580 +
1581 #include <asm/processor.h>
1582
1583 /*
1584 diff -urNp linux-2.6.32.43/arch/mips/kernel/kgdb.c linux-2.6.32.43/arch/mips/kernel/kgdb.c
1585 --- linux-2.6.32.43/arch/mips/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
1586 +++ linux-2.6.32.43/arch/mips/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
1587 @@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vecto
1588 return -1;
1589 }
1590
1591 +/* cannot be const */
1592 struct kgdb_arch arch_kgdb_ops;
1593
1594 /*
1595 diff -urNp linux-2.6.32.43/arch/mips/kernel/process.c linux-2.6.32.43/arch/mips/kernel/process.c
1596 --- linux-2.6.32.43/arch/mips/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
1597 +++ linux-2.6.32.43/arch/mips/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
1598 @@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_stru
1599 out:
1600 return pc;
1601 }
1602 -
1603 -/*
1604 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1605 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1606 - */
1607 -unsigned long arch_align_stack(unsigned long sp)
1608 -{
1609 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1610 - sp -= get_random_int() & ~PAGE_MASK;
1611 -
1612 - return sp & ALMASK;
1613 -}
1614 diff -urNp linux-2.6.32.43/arch/mips/kernel/syscall.c linux-2.6.32.43/arch/mips/kernel/syscall.c
1615 --- linux-2.6.32.43/arch/mips/kernel/syscall.c 2011-03-27 14:31:47.000000000 -0400
1616 +++ linux-2.6.32.43/arch/mips/kernel/syscall.c 2011-04-17 15:56:45.000000000 -0400
1617 @@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
1618 do_color_align = 0;
1619 if (filp || (flags & MAP_SHARED))
1620 do_color_align = 1;
1621 +
1622 +#ifdef CONFIG_PAX_RANDMMAP
1623 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1624 +#endif
1625 +
1626 if (addr) {
1627 if (do_color_align)
1628 addr = COLOUR_ALIGN(addr, pgoff);
1629 else
1630 addr = PAGE_ALIGN(addr);
1631 vmm = find_vma(current->mm, addr);
1632 - if (task_size - len >= addr &&
1633 - (!vmm || addr + len <= vmm->vm_start))
1634 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1635 return addr;
1636 }
1637 - addr = TASK_UNMAPPED_BASE;
1638 + addr = current->mm->mmap_base;
1639 if (do_color_align)
1640 addr = COLOUR_ALIGN(addr, pgoff);
1641 else
1642 @@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
1643 /* At this point: (!vmm || addr < vmm->vm_end). */
1644 if (task_size - len < addr)
1645 return -ENOMEM;
1646 - if (!vmm || addr + len <= vmm->vm_start)
1647 + if (check_heap_stack_gap(vmm, addr, len))
1648 return addr;
1649 addr = vmm->vm_end;
1650 if (do_color_align)
1651 diff -urNp linux-2.6.32.43/arch/mips/mm/fault.c linux-2.6.32.43/arch/mips/mm/fault.c
1652 --- linux-2.6.32.43/arch/mips/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1653 +++ linux-2.6.32.43/arch/mips/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1654 @@ -26,6 +26,23 @@
1655 #include <asm/ptrace.h>
1656 #include <asm/highmem.h> /* For VMALLOC_END */
1657
1658 +#ifdef CONFIG_PAX_PAGEEXEC
1659 +void pax_report_insns(void *pc, void *sp)
1660 +{
1661 + unsigned long i;
1662 +
1663 + printk(KERN_ERR "PAX: bytes at PC: ");
1664 + for (i = 0; i < 5; i++) {
1665 + unsigned int c;
1666 + if (get_user(c, (unsigned int *)pc+i))
1667 + printk(KERN_CONT "???????? ");
1668 + else
1669 + printk(KERN_CONT "%08x ", c);
1670 + }
1671 + printk("\n");
1672 +}
1673 +#endif
1674 +
1675 /*
1676 * This routine handles page faults. It determines the address,
1677 * and the problem, and then passes it off to one of the appropriate
1678 diff -urNp linux-2.6.32.43/arch/parisc/include/asm/elf.h linux-2.6.32.43/arch/parisc/include/asm/elf.h
1679 --- linux-2.6.32.43/arch/parisc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1680 +++ linux-2.6.32.43/arch/parisc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1681 @@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
1682
1683 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1684
1685 +#ifdef CONFIG_PAX_ASLR
1686 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1687 +
1688 +#define PAX_DELTA_MMAP_LEN 16
1689 +#define PAX_DELTA_STACK_LEN 16
1690 +#endif
1691 +
1692 /* This yields a mask that user programs can use to figure out what
1693 instruction set this CPU supports. This could be done in user space,
1694 but it's not easy, and we've already done it here. */
1695 diff -urNp linux-2.6.32.43/arch/parisc/include/asm/pgtable.h linux-2.6.32.43/arch/parisc/include/asm/pgtable.h
1696 --- linux-2.6.32.43/arch/parisc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1697 +++ linux-2.6.32.43/arch/parisc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1698 @@ -207,6 +207,17 @@
1699 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1700 #define PAGE_COPY PAGE_EXECREAD
1701 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1702 +
1703 +#ifdef CONFIG_PAX_PAGEEXEC
1704 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1705 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1706 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1707 +#else
1708 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1709 +# define PAGE_COPY_NOEXEC PAGE_COPY
1710 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1711 +#endif
1712 +
1713 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1714 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1715 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1716 diff -urNp linux-2.6.32.43/arch/parisc/kernel/module.c linux-2.6.32.43/arch/parisc/kernel/module.c
1717 --- linux-2.6.32.43/arch/parisc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1718 +++ linux-2.6.32.43/arch/parisc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1719 @@ -95,16 +95,38 @@
1720
1721 /* three functions to determine where in the module core
1722 * or init pieces the location is */
1723 +static inline int in_init_rx(struct module *me, void *loc)
1724 +{
1725 + return (loc >= me->module_init_rx &&
1726 + loc < (me->module_init_rx + me->init_size_rx));
1727 +}
1728 +
1729 +static inline int in_init_rw(struct module *me, void *loc)
1730 +{
1731 + return (loc >= me->module_init_rw &&
1732 + loc < (me->module_init_rw + me->init_size_rw));
1733 +}
1734 +
1735 static inline int in_init(struct module *me, void *loc)
1736 {
1737 - return (loc >= me->module_init &&
1738 - loc <= (me->module_init + me->init_size));
1739 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1740 +}
1741 +
1742 +static inline int in_core_rx(struct module *me, void *loc)
1743 +{
1744 + return (loc >= me->module_core_rx &&
1745 + loc < (me->module_core_rx + me->core_size_rx));
1746 +}
1747 +
1748 +static inline int in_core_rw(struct module *me, void *loc)
1749 +{
1750 + return (loc >= me->module_core_rw &&
1751 + loc < (me->module_core_rw + me->core_size_rw));
1752 }
1753
1754 static inline int in_core(struct module *me, void *loc)
1755 {
1756 - return (loc >= me->module_core &&
1757 - loc <= (me->module_core + me->core_size));
1758 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1759 }
1760
1761 static inline int in_local(struct module *me, void *loc)
1762 @@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_
1763 }
1764
1765 /* align things a bit */
1766 - me->core_size = ALIGN(me->core_size, 16);
1767 - me->arch.got_offset = me->core_size;
1768 - me->core_size += gots * sizeof(struct got_entry);
1769 -
1770 - me->core_size = ALIGN(me->core_size, 16);
1771 - me->arch.fdesc_offset = me->core_size;
1772 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1773 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1774 + me->arch.got_offset = me->core_size_rw;
1775 + me->core_size_rw += gots * sizeof(struct got_entry);
1776 +
1777 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1778 + me->arch.fdesc_offset = me->core_size_rw;
1779 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1780
1781 me->arch.got_max = gots;
1782 me->arch.fdesc_max = fdescs;
1783 @@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module
1784
1785 BUG_ON(value == 0);
1786
1787 - got = me->module_core + me->arch.got_offset;
1788 + got = me->module_core_rw + me->arch.got_offset;
1789 for (i = 0; got[i].addr; i++)
1790 if (got[i].addr == value)
1791 goto out;
1792 @@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module
1793 #ifdef CONFIG_64BIT
1794 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1795 {
1796 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1797 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1798
1799 if (!value) {
1800 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1801 @@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module
1802
1803 /* Create new one */
1804 fdesc->addr = value;
1805 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1806 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1807 return (Elf_Addr)fdesc;
1808 }
1809 #endif /* CONFIG_64BIT */
1810 @@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
1811
1812 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1813 end = table + sechdrs[me->arch.unwind_section].sh_size;
1814 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1815 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1816
1817 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1818 me->arch.unwind_section, table, end, gp);
1819 diff -urNp linux-2.6.32.43/arch/parisc/kernel/sys_parisc.c linux-2.6.32.43/arch/parisc/kernel/sys_parisc.c
1820 --- linux-2.6.32.43/arch/parisc/kernel/sys_parisc.c 2011-03-27 14:31:47.000000000 -0400
1821 +++ linux-2.6.32.43/arch/parisc/kernel/sys_parisc.c 2011-04-17 15:56:45.000000000 -0400
1822 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1823 /* At this point: (!vma || addr < vma->vm_end). */
1824 if (TASK_SIZE - len < addr)
1825 return -ENOMEM;
1826 - if (!vma || addr + len <= vma->vm_start)
1827 + if (check_heap_stack_gap(vma, addr, len))
1828 return addr;
1829 addr = vma->vm_end;
1830 }
1831 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1832 /* At this point: (!vma || addr < vma->vm_end). */
1833 if (TASK_SIZE - len < addr)
1834 return -ENOMEM;
1835 - if (!vma || addr + len <= vma->vm_start)
1836 + if (check_heap_stack_gap(vma, addr, len))
1837 return addr;
1838 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1839 if (addr < vma->vm_end) /* handle wraparound */
1840 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1841 if (flags & MAP_FIXED)
1842 return addr;
1843 if (!addr)
1844 - addr = TASK_UNMAPPED_BASE;
1845 + addr = current->mm->mmap_base;
1846
1847 if (filp) {
1848 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1849 diff -urNp linux-2.6.32.43/arch/parisc/kernel/traps.c linux-2.6.32.43/arch/parisc/kernel/traps.c
1850 --- linux-2.6.32.43/arch/parisc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
1851 +++ linux-2.6.32.43/arch/parisc/kernel/traps.c 2011-04-17 15:56:45.000000000 -0400
1852 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1853
1854 down_read(&current->mm->mmap_sem);
1855 vma = find_vma(current->mm,regs->iaoq[0]);
1856 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1857 - && (vma->vm_flags & VM_EXEC)) {
1858 -
1859 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1860 fault_address = regs->iaoq[0];
1861 fault_space = regs->iasq[0];
1862
1863 diff -urNp linux-2.6.32.43/arch/parisc/mm/fault.c linux-2.6.32.43/arch/parisc/mm/fault.c
1864 --- linux-2.6.32.43/arch/parisc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1865 +++ linux-2.6.32.43/arch/parisc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1866 @@ -15,6 +15,7 @@
1867 #include <linux/sched.h>
1868 #include <linux/interrupt.h>
1869 #include <linux/module.h>
1870 +#include <linux/unistd.h>
1871
1872 #include <asm/uaccess.h>
1873 #include <asm/traps.h>
1874 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1875 static unsigned long
1876 parisc_acctyp(unsigned long code, unsigned int inst)
1877 {
1878 - if (code == 6 || code == 16)
1879 + if (code == 6 || code == 7 || code == 16)
1880 return VM_EXEC;
1881
1882 switch (inst & 0xf0000000) {
1883 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1884 }
1885 #endif
1886
1887 +#ifdef CONFIG_PAX_PAGEEXEC
1888 +/*
1889 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1890 + *
1891 + * returns 1 when task should be killed
1892 + * 2 when rt_sigreturn trampoline was detected
1893 + * 3 when unpatched PLT trampoline was detected
1894 + */
1895 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1896 +{
1897 +
1898 +#ifdef CONFIG_PAX_EMUPLT
1899 + int err;
1900 +
1901 + do { /* PaX: unpatched PLT emulation */
1902 + unsigned int bl, depwi;
1903 +
1904 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1905 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1906 +
1907 + if (err)
1908 + break;
1909 +
1910 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1911 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1912 +
1913 + err = get_user(ldw, (unsigned int *)addr);
1914 + err |= get_user(bv, (unsigned int *)(addr+4));
1915 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1916 +
1917 + if (err)
1918 + break;
1919 +
1920 + if (ldw == 0x0E801096U &&
1921 + bv == 0xEAC0C000U &&
1922 + ldw2 == 0x0E881095U)
1923 + {
1924 + unsigned int resolver, map;
1925 +
1926 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1927 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1928 + if (err)
1929 + break;
1930 +
1931 + regs->gr[20] = instruction_pointer(regs)+8;
1932 + regs->gr[21] = map;
1933 + regs->gr[22] = resolver;
1934 + regs->iaoq[0] = resolver | 3UL;
1935 + regs->iaoq[1] = regs->iaoq[0] + 4;
1936 + return 3;
1937 + }
1938 + }
1939 + } while (0);
1940 +#endif
1941 +
1942 +#ifdef CONFIG_PAX_EMUTRAMP
1943 +
1944 +#ifndef CONFIG_PAX_EMUSIGRT
1945 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1946 + return 1;
1947 +#endif
1948 +
1949 + do { /* PaX: rt_sigreturn emulation */
1950 + unsigned int ldi1, ldi2, bel, nop;
1951 +
1952 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1953 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1954 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1955 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1956 +
1957 + if (err)
1958 + break;
1959 +
1960 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1961 + ldi2 == 0x3414015AU &&
1962 + bel == 0xE4008200U &&
1963 + nop == 0x08000240U)
1964 + {
1965 + regs->gr[25] = (ldi1 & 2) >> 1;
1966 + regs->gr[20] = __NR_rt_sigreturn;
1967 + regs->gr[31] = regs->iaoq[1] + 16;
1968 + regs->sr[0] = regs->iasq[1];
1969 + regs->iaoq[0] = 0x100UL;
1970 + regs->iaoq[1] = regs->iaoq[0] + 4;
1971 + regs->iasq[0] = regs->sr[2];
1972 + regs->iasq[1] = regs->sr[2];
1973 + return 2;
1974 + }
1975 + } while (0);
1976 +#endif
1977 +
1978 + return 1;
1979 +}
1980 +
1981 +void pax_report_insns(void *pc, void *sp)
1982 +{
1983 + unsigned long i;
1984 +
1985 + printk(KERN_ERR "PAX: bytes at PC: ");
1986 + for (i = 0; i < 5; i++) {
1987 + unsigned int c;
1988 + if (get_user(c, (unsigned int *)pc+i))
1989 + printk(KERN_CONT "???????? ");
1990 + else
1991 + printk(KERN_CONT "%08x ", c);
1992 + }
1993 + printk("\n");
1994 +}
1995 +#endif
1996 +
1997 int fixup_exception(struct pt_regs *regs)
1998 {
1999 const struct exception_table_entry *fix;
2000 @@ -192,8 +303,33 @@ good_area:
2001
2002 acc_type = parisc_acctyp(code,regs->iir);
2003
2004 - if ((vma->vm_flags & acc_type) != acc_type)
2005 + if ((vma->vm_flags & acc_type) != acc_type) {
2006 +
2007 +#ifdef CONFIG_PAX_PAGEEXEC
2008 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2009 + (address & ~3UL) == instruction_pointer(regs))
2010 + {
2011 + up_read(&mm->mmap_sem);
2012 + switch (pax_handle_fetch_fault(regs)) {
2013 +
2014 +#ifdef CONFIG_PAX_EMUPLT
2015 + case 3:
2016 + return;
2017 +#endif
2018 +
2019 +#ifdef CONFIG_PAX_EMUTRAMP
2020 + case 2:
2021 + return;
2022 +#endif
2023 +
2024 + }
2025 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2026 + do_group_exit(SIGKILL);
2027 + }
2028 +#endif
2029 +
2030 goto bad_area;
2031 + }
2032
2033 /*
2034 * If for any reason at all we couldn't handle the fault, make
2035 diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/device.h linux-2.6.32.43/arch/powerpc/include/asm/device.h
2036 --- linux-2.6.32.43/arch/powerpc/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
2037 +++ linux-2.6.32.43/arch/powerpc/include/asm/device.h 2011-04-17 15:56:45.000000000 -0400
2038 @@ -14,7 +14,7 @@ struct dev_archdata {
2039 struct device_node *of_node;
2040
2041 /* DMA operations on that device */
2042 - struct dma_map_ops *dma_ops;
2043 + const struct dma_map_ops *dma_ops;
2044
2045 /*
2046 * When an iommu is in use, dma_data is used as a ptr to the base of the
2047 diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/dma-mapping.h linux-2.6.32.43/arch/powerpc/include/asm/dma-mapping.h
2048 --- linux-2.6.32.43/arch/powerpc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
2049 +++ linux-2.6.32.43/arch/powerpc/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
2050 @@ -69,9 +69,9 @@ static inline unsigned long device_to_ma
2051 #ifdef CONFIG_PPC64
2052 extern struct dma_map_ops dma_iommu_ops;
2053 #endif
2054 -extern struct dma_map_ops dma_direct_ops;
2055 +extern const struct dma_map_ops dma_direct_ops;
2056
2057 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2058 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2059 {
2060 /* We don't handle the NULL dev case for ISA for now. We could
2061 * do it via an out of line call but it is not needed for now. The
2062 @@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dm
2063 return dev->archdata.dma_ops;
2064 }
2065
2066 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2067 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2068 {
2069 dev->archdata.dma_ops = ops;
2070 }
2071 @@ -118,7 +118,7 @@ static inline void set_dma_offset(struct
2072
2073 static inline int dma_supported(struct device *dev, u64 mask)
2074 {
2075 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2076 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2077
2078 if (unlikely(dma_ops == NULL))
2079 return 0;
2080 @@ -132,7 +132,7 @@ static inline int dma_supported(struct d
2081
2082 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2083 {
2084 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2085 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2086
2087 if (unlikely(dma_ops == NULL))
2088 return -EIO;
2089 @@ -147,7 +147,7 @@ static inline int dma_set_mask(struct de
2090 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2091 dma_addr_t *dma_handle, gfp_t flag)
2092 {
2093 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2094 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2095 void *cpu_addr;
2096
2097 BUG_ON(!dma_ops);
2098 @@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(s
2099 static inline void dma_free_coherent(struct device *dev, size_t size,
2100 void *cpu_addr, dma_addr_t dma_handle)
2101 {
2102 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2103 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2104
2105 BUG_ON(!dma_ops);
2106
2107 @@ -173,7 +173,7 @@ static inline void dma_free_coherent(str
2108
2109 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2110 {
2111 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2112 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2113
2114 if (dma_ops->mapping_error)
2115 return dma_ops->mapping_error(dev, dma_addr);
2116 diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/elf.h linux-2.6.32.43/arch/powerpc/include/asm/elf.h
2117 --- linux-2.6.32.43/arch/powerpc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
2118 +++ linux-2.6.32.43/arch/powerpc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
2119 @@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
2120 the loader. We need to make sure that it is out of the way of the program
2121 that it will "exec", and that there is sufficient room for the brk. */
2122
2123 -extern unsigned long randomize_et_dyn(unsigned long base);
2124 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2125 +#define ELF_ET_DYN_BASE (0x20000000)
2126 +
2127 +#ifdef CONFIG_PAX_ASLR
2128 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2129 +
2130 +#ifdef __powerpc64__
2131 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2132 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2133 +#else
2134 +#define PAX_DELTA_MMAP_LEN 15
2135 +#define PAX_DELTA_STACK_LEN 15
2136 +#endif
2137 +#endif
2138
2139 /*
2140 * Our registers are always unsigned longs, whether we're a 32 bit
2141 @@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(s
2142 (0x7ff >> (PAGE_SHIFT - 12)) : \
2143 (0x3ffff >> (PAGE_SHIFT - 12)))
2144
2145 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2146 -#define arch_randomize_brk arch_randomize_brk
2147 -
2148 #endif /* __KERNEL__ */
2149
2150 /*
2151 diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/iommu.h linux-2.6.32.43/arch/powerpc/include/asm/iommu.h
2152 --- linux-2.6.32.43/arch/powerpc/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
2153 +++ linux-2.6.32.43/arch/powerpc/include/asm/iommu.h 2011-04-17 15:56:45.000000000 -0400
2154 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
2155 extern void iommu_init_early_dart(void);
2156 extern void iommu_init_early_pasemi(void);
2157
2158 +/* dma-iommu.c */
2159 +extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2160 +
2161 #ifdef CONFIG_PCI
2162 extern void pci_iommu_init(void);
2163 extern void pci_direct_iommu_init(void);
2164 diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/kmap_types.h linux-2.6.32.43/arch/powerpc/include/asm/kmap_types.h
2165 --- linux-2.6.32.43/arch/powerpc/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
2166 +++ linux-2.6.32.43/arch/powerpc/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
2167 @@ -26,6 +26,7 @@ enum km_type {
2168 KM_SOFTIRQ1,
2169 KM_PPC_SYNC_PAGE,
2170 KM_PPC_SYNC_ICACHE,
2171 + KM_CLEARPAGE,
2172 KM_TYPE_NR
2173 };
2174
2175 diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/page_64.h linux-2.6.32.43/arch/powerpc/include/asm/page_64.h
2176 --- linux-2.6.32.43/arch/powerpc/include/asm/page_64.h 2011-03-27 14:31:47.000000000 -0400
2177 +++ linux-2.6.32.43/arch/powerpc/include/asm/page_64.h 2011-04-17 15:56:45.000000000 -0400
2178 @@ -180,15 +180,18 @@ do { \
2179 * stack by default, so in the absense of a PT_GNU_STACK program header
2180 * we turn execute permission off.
2181 */
2182 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2183 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2184 +#define VM_STACK_DEFAULT_FLAGS32 \
2185 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2186 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2187
2188 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2189 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2190
2191 +#ifndef CONFIG_PAX_PAGEEXEC
2192 #define VM_STACK_DEFAULT_FLAGS \
2193 (test_thread_flag(TIF_32BIT) ? \
2194 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2195 +#endif
2196
2197 #include <asm-generic/getorder.h>
2198
2199 diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/page.h linux-2.6.32.43/arch/powerpc/include/asm/page.h
2200 --- linux-2.6.32.43/arch/powerpc/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
2201 +++ linux-2.6.32.43/arch/powerpc/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
2202 @@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2203 * and needs to be executable. This means the whole heap ends
2204 * up being executable.
2205 */
2206 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2207 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2208 +#define VM_DATA_DEFAULT_FLAGS32 \
2209 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2210 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2211
2212 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2213 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2214 @@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2215 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2216 #endif
2217
2218 +#define ktla_ktva(addr) (addr)
2219 +#define ktva_ktla(addr) (addr)
2220 +
2221 #ifndef __ASSEMBLY__
2222
2223 #undef STRICT_MM_TYPECHECKS
2224 diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/pci.h linux-2.6.32.43/arch/powerpc/include/asm/pci.h
2225 --- linux-2.6.32.43/arch/powerpc/include/asm/pci.h 2011-03-27 14:31:47.000000000 -0400
2226 +++ linux-2.6.32.43/arch/powerpc/include/asm/pci.h 2011-04-17 15:56:45.000000000 -0400
2227 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
2228 }
2229
2230 #ifdef CONFIG_PCI
2231 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2232 -extern struct dma_map_ops *get_pci_dma_ops(void);
2233 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2234 +extern const struct dma_map_ops *get_pci_dma_ops(void);
2235 #else /* CONFIG_PCI */
2236 #define set_pci_dma_ops(d)
2237 #define get_pci_dma_ops() NULL
2238 diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/pgtable.h linux-2.6.32.43/arch/powerpc/include/asm/pgtable.h
2239 --- linux-2.6.32.43/arch/powerpc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
2240 +++ linux-2.6.32.43/arch/powerpc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
2241 @@ -2,6 +2,7 @@
2242 #define _ASM_POWERPC_PGTABLE_H
2243 #ifdef __KERNEL__
2244
2245 +#include <linux/const.h>
2246 #ifndef __ASSEMBLY__
2247 #include <asm/processor.h> /* For TASK_SIZE */
2248 #include <asm/mmu.h>
2249 diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/pte-hash32.h linux-2.6.32.43/arch/powerpc/include/asm/pte-hash32.h
2250 --- linux-2.6.32.43/arch/powerpc/include/asm/pte-hash32.h 2011-03-27 14:31:47.000000000 -0400
2251 +++ linux-2.6.32.43/arch/powerpc/include/asm/pte-hash32.h 2011-04-17 15:56:45.000000000 -0400
2252 @@ -21,6 +21,7 @@
2253 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2254 #define _PAGE_USER 0x004 /* usermode access allowed */
2255 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2256 +#define _PAGE_EXEC _PAGE_GUARDED
2257 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2258 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2259 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2260 diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/reg.h linux-2.6.32.43/arch/powerpc/include/asm/reg.h
2261 --- linux-2.6.32.43/arch/powerpc/include/asm/reg.h 2011-03-27 14:31:47.000000000 -0400
2262 +++ linux-2.6.32.43/arch/powerpc/include/asm/reg.h 2011-04-17 15:56:45.000000000 -0400
2263 @@ -191,6 +191,7 @@
2264 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2265 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2266 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2267 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2268 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2269 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2270 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2271 diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/swiotlb.h linux-2.6.32.43/arch/powerpc/include/asm/swiotlb.h
2272 --- linux-2.6.32.43/arch/powerpc/include/asm/swiotlb.h 2011-03-27 14:31:47.000000000 -0400
2273 +++ linux-2.6.32.43/arch/powerpc/include/asm/swiotlb.h 2011-04-17 15:56:45.000000000 -0400
2274 @@ -13,7 +13,7 @@
2275
2276 #include <linux/swiotlb.h>
2277
2278 -extern struct dma_map_ops swiotlb_dma_ops;
2279 +extern const struct dma_map_ops swiotlb_dma_ops;
2280
2281 static inline void dma_mark_clean(void *addr, size_t size) {}
2282
2283 diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/system.h linux-2.6.32.43/arch/powerpc/include/asm/system.h
2284 --- linux-2.6.32.43/arch/powerpc/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
2285 +++ linux-2.6.32.43/arch/powerpc/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
2286 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
2287 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2288 #endif
2289
2290 -extern unsigned long arch_align_stack(unsigned long sp);
2291 +#define arch_align_stack(x) ((x) & ~0xfUL)
2292
2293 /* Used in very early kernel initialization. */
2294 extern unsigned long reloc_offset(void);
2295 diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/uaccess.h linux-2.6.32.43/arch/powerpc/include/asm/uaccess.h
2296 --- linux-2.6.32.43/arch/powerpc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2297 +++ linux-2.6.32.43/arch/powerpc/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2298 @@ -13,6 +13,8 @@
2299 #define VERIFY_READ 0
2300 #define VERIFY_WRITE 1
2301
2302 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2303 +
2304 /*
2305 * The fs value determines whether argument validity checking should be
2306 * performed or not. If get_fs() == USER_DS, checking is performed, with
2307 @@ -327,52 +329,6 @@ do { \
2308 extern unsigned long __copy_tofrom_user(void __user *to,
2309 const void __user *from, unsigned long size);
2310
2311 -#ifndef __powerpc64__
2312 -
2313 -static inline unsigned long copy_from_user(void *to,
2314 - const void __user *from, unsigned long n)
2315 -{
2316 - unsigned long over;
2317 -
2318 - if (access_ok(VERIFY_READ, from, n))
2319 - return __copy_tofrom_user((__force void __user *)to, from, n);
2320 - if ((unsigned long)from < TASK_SIZE) {
2321 - over = (unsigned long)from + n - TASK_SIZE;
2322 - return __copy_tofrom_user((__force void __user *)to, from,
2323 - n - over) + over;
2324 - }
2325 - return n;
2326 -}
2327 -
2328 -static inline unsigned long copy_to_user(void __user *to,
2329 - const void *from, unsigned long n)
2330 -{
2331 - unsigned long over;
2332 -
2333 - if (access_ok(VERIFY_WRITE, to, n))
2334 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2335 - if ((unsigned long)to < TASK_SIZE) {
2336 - over = (unsigned long)to + n - TASK_SIZE;
2337 - return __copy_tofrom_user(to, (__force void __user *)from,
2338 - n - over) + over;
2339 - }
2340 - return n;
2341 -}
2342 -
2343 -#else /* __powerpc64__ */
2344 -
2345 -#define __copy_in_user(to, from, size) \
2346 - __copy_tofrom_user((to), (from), (size))
2347 -
2348 -extern unsigned long copy_from_user(void *to, const void __user *from,
2349 - unsigned long n);
2350 -extern unsigned long copy_to_user(void __user *to, const void *from,
2351 - unsigned long n);
2352 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2353 - unsigned long n);
2354 -
2355 -#endif /* __powerpc64__ */
2356 -
2357 static inline unsigned long __copy_from_user_inatomic(void *to,
2358 const void __user *from, unsigned long n)
2359 {
2360 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2361 if (ret == 0)
2362 return 0;
2363 }
2364 +
2365 + if (!__builtin_constant_p(n))
2366 + check_object_size(to, n, false);
2367 +
2368 return __copy_tofrom_user((__force void __user *)to, from, n);
2369 }
2370
2371 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2372 if (ret == 0)
2373 return 0;
2374 }
2375 +
2376 + if (!__builtin_constant_p(n))
2377 + check_object_size(from, n, true);
2378 +
2379 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2380 }
2381
2382 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2383 return __copy_to_user_inatomic(to, from, size);
2384 }
2385
2386 +#ifndef __powerpc64__
2387 +
2388 +static inline unsigned long __must_check copy_from_user(void *to,
2389 + const void __user *from, unsigned long n)
2390 +{
2391 + unsigned long over;
2392 +
2393 + if ((long)n < 0)
2394 + return n;
2395 +
2396 + if (access_ok(VERIFY_READ, from, n)) {
2397 + if (!__builtin_constant_p(n))
2398 + check_object_size(to, n, false);
2399 + return __copy_tofrom_user((__force void __user *)to, from, n);
2400 + }
2401 + if ((unsigned long)from < TASK_SIZE) {
2402 + over = (unsigned long)from + n - TASK_SIZE;
2403 + if (!__builtin_constant_p(n - over))
2404 + check_object_size(to, n - over, false);
2405 + return __copy_tofrom_user((__force void __user *)to, from,
2406 + n - over) + over;
2407 + }
2408 + return n;
2409 +}
2410 +
2411 +static inline unsigned long __must_check copy_to_user(void __user *to,
2412 + const void *from, unsigned long n)
2413 +{
2414 + unsigned long over;
2415 +
2416 + if ((long)n < 0)
2417 + return n;
2418 +
2419 + if (access_ok(VERIFY_WRITE, to, n)) {
2420 + if (!__builtin_constant_p(n))
2421 + check_object_size(from, n, true);
2422 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2423 + }
2424 + if ((unsigned long)to < TASK_SIZE) {
2425 + over = (unsigned long)to + n - TASK_SIZE;
2426 + if (!__builtin_constant_p(n))
2427 + check_object_size(from, n - over, true);
2428 + return __copy_tofrom_user(to, (__force void __user *)from,
2429 + n - over) + over;
2430 + }
2431 + return n;
2432 +}
2433 +
2434 +#else /* __powerpc64__ */
2435 +
2436 +#define __copy_in_user(to, from, size) \
2437 + __copy_tofrom_user((to), (from), (size))
2438 +
2439 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2440 +{
2441 + if ((long)n < 0 || n > INT_MAX)
2442 + return n;
2443 +
2444 + if (!__builtin_constant_p(n))
2445 + check_object_size(to, n, false);
2446 +
2447 + if (likely(access_ok(VERIFY_READ, from, n)))
2448 + n = __copy_from_user(to, from, n);
2449 + else
2450 + memset(to, 0, n);
2451 + return n;
2452 +}
2453 +
2454 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2455 +{
2456 + if ((long)n < 0 || n > INT_MAX)
2457 + return n;
2458 +
2459 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2460 + if (!__builtin_constant_p(n))
2461 + check_object_size(from, n, true);
2462 + n = __copy_to_user(to, from, n);
2463 + }
2464 + return n;
2465 +}
2466 +
2467 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2468 + unsigned long n);
2469 +
2470 +#endif /* __powerpc64__ */
2471 +
2472 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2473
2474 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2475 diff -urNp linux-2.6.32.43/arch/powerpc/kernel/cacheinfo.c linux-2.6.32.43/arch/powerpc/kernel/cacheinfo.c
2476 --- linux-2.6.32.43/arch/powerpc/kernel/cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
2477 +++ linux-2.6.32.43/arch/powerpc/kernel/cacheinfo.c 2011-04-17 15:56:45.000000000 -0400
2478 @@ -642,7 +642,7 @@ static struct kobj_attribute *cache_inde
2479 &cache_assoc_attr,
2480 };
2481
2482 -static struct sysfs_ops cache_index_ops = {
2483 +static const struct sysfs_ops cache_index_ops = {
2484 .show = cache_index_show,
2485 };
2486
2487 diff -urNp linux-2.6.32.43/arch/powerpc/kernel/dma.c linux-2.6.32.43/arch/powerpc/kernel/dma.c
2488 --- linux-2.6.32.43/arch/powerpc/kernel/dma.c 2011-03-27 14:31:47.000000000 -0400
2489 +++ linux-2.6.32.43/arch/powerpc/kernel/dma.c 2011-04-17 15:56:45.000000000 -0400
2490 @@ -134,7 +134,7 @@ static inline void dma_direct_sync_singl
2491 }
2492 #endif
2493
2494 -struct dma_map_ops dma_direct_ops = {
2495 +const struct dma_map_ops dma_direct_ops = {
2496 .alloc_coherent = dma_direct_alloc_coherent,
2497 .free_coherent = dma_direct_free_coherent,
2498 .map_sg = dma_direct_map_sg,
2499 diff -urNp linux-2.6.32.43/arch/powerpc/kernel/dma-iommu.c linux-2.6.32.43/arch/powerpc/kernel/dma-iommu.c
2500 --- linux-2.6.32.43/arch/powerpc/kernel/dma-iommu.c 2011-03-27 14:31:47.000000000 -0400
2501 +++ linux-2.6.32.43/arch/powerpc/kernel/dma-iommu.c 2011-04-17 15:56:45.000000000 -0400
2502 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2503 }
2504
2505 /* We support DMA to/from any memory page via the iommu */
2506 -static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2507 +int dma_iommu_dma_supported(struct device *dev, u64 mask)
2508 {
2509 struct iommu_table *tbl = get_iommu_table_base(dev);
2510
2511 diff -urNp linux-2.6.32.43/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.32.43/arch/powerpc/kernel/dma-swiotlb.c
2512 --- linux-2.6.32.43/arch/powerpc/kernel/dma-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
2513 +++ linux-2.6.32.43/arch/powerpc/kernel/dma-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
2514 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2515 * map_page, and unmap_page on highmem, use normal dma_ops
2516 * for everything else.
2517 */
2518 -struct dma_map_ops swiotlb_dma_ops = {
2519 +const struct dma_map_ops swiotlb_dma_ops = {
2520 .alloc_coherent = dma_direct_alloc_coherent,
2521 .free_coherent = dma_direct_free_coherent,
2522 .map_sg = swiotlb_map_sg_attrs,
2523 diff -urNp linux-2.6.32.43/arch/powerpc/kernel/exceptions-64e.S linux-2.6.32.43/arch/powerpc/kernel/exceptions-64e.S
2524 --- linux-2.6.32.43/arch/powerpc/kernel/exceptions-64e.S 2011-03-27 14:31:47.000000000 -0400
2525 +++ linux-2.6.32.43/arch/powerpc/kernel/exceptions-64e.S 2011-04-17 15:56:45.000000000 -0400
2526 @@ -455,6 +455,7 @@ storage_fault_common:
2527 std r14,_DAR(r1)
2528 std r15,_DSISR(r1)
2529 addi r3,r1,STACK_FRAME_OVERHEAD
2530 + bl .save_nvgprs
2531 mr r4,r14
2532 mr r5,r15
2533 ld r14,PACA_EXGEN+EX_R14(r13)
2534 @@ -464,8 +465,7 @@ storage_fault_common:
2535 cmpdi r3,0
2536 bne- 1f
2537 b .ret_from_except_lite
2538 -1: bl .save_nvgprs
2539 - mr r5,r3
2540 +1: mr r5,r3
2541 addi r3,r1,STACK_FRAME_OVERHEAD
2542 ld r4,_DAR(r1)
2543 bl .bad_page_fault
2544 diff -urNp linux-2.6.32.43/arch/powerpc/kernel/exceptions-64s.S linux-2.6.32.43/arch/powerpc/kernel/exceptions-64s.S
2545 --- linux-2.6.32.43/arch/powerpc/kernel/exceptions-64s.S 2011-03-27 14:31:47.000000000 -0400
2546 +++ linux-2.6.32.43/arch/powerpc/kernel/exceptions-64s.S 2011-04-17 15:56:45.000000000 -0400
2547 @@ -818,10 +818,10 @@ handle_page_fault:
2548 11: ld r4,_DAR(r1)
2549 ld r5,_DSISR(r1)
2550 addi r3,r1,STACK_FRAME_OVERHEAD
2551 + bl .save_nvgprs
2552 bl .do_page_fault
2553 cmpdi r3,0
2554 beq+ 13f
2555 - bl .save_nvgprs
2556 mr r5,r3
2557 addi r3,r1,STACK_FRAME_OVERHEAD
2558 lwz r4,_DAR(r1)
2559 diff -urNp linux-2.6.32.43/arch/powerpc/kernel/ibmebus.c linux-2.6.32.43/arch/powerpc/kernel/ibmebus.c
2560 --- linux-2.6.32.43/arch/powerpc/kernel/ibmebus.c 2011-03-27 14:31:47.000000000 -0400
2561 +++ linux-2.6.32.43/arch/powerpc/kernel/ibmebus.c 2011-04-17 15:56:45.000000000 -0400
2562 @@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct
2563 return 1;
2564 }
2565
2566 -static struct dma_map_ops ibmebus_dma_ops = {
2567 +static const struct dma_map_ops ibmebus_dma_ops = {
2568 .alloc_coherent = ibmebus_alloc_coherent,
2569 .free_coherent = ibmebus_free_coherent,
2570 .map_sg = ibmebus_map_sg,
2571 diff -urNp linux-2.6.32.43/arch/powerpc/kernel/kgdb.c linux-2.6.32.43/arch/powerpc/kernel/kgdb.c
2572 --- linux-2.6.32.43/arch/powerpc/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
2573 +++ linux-2.6.32.43/arch/powerpc/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
2574 @@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct
2575 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
2576 return 0;
2577
2578 - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2579 + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2580 regs->nip += 4;
2581
2582 return 1;
2583 @@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vecto
2584 /*
2585 * Global data
2586 */
2587 -struct kgdb_arch arch_kgdb_ops = {
2588 +const struct kgdb_arch arch_kgdb_ops = {
2589 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2590 };
2591
2592 diff -urNp linux-2.6.32.43/arch/powerpc/kernel/module_32.c linux-2.6.32.43/arch/powerpc/kernel/module_32.c
2593 --- linux-2.6.32.43/arch/powerpc/kernel/module_32.c 2011-03-27 14:31:47.000000000 -0400
2594 +++ linux-2.6.32.43/arch/powerpc/kernel/module_32.c 2011-04-17 15:56:45.000000000 -0400
2595 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2596 me->arch.core_plt_section = i;
2597 }
2598 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2599 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2600 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2601 return -ENOEXEC;
2602 }
2603
2604 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2605
2606 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2607 /* Init, or core PLT? */
2608 - if (location >= mod->module_core
2609 - && location < mod->module_core + mod->core_size)
2610 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2611 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2612 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2613 - else
2614 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2615 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2616 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2617 + else {
2618 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2619 + return ~0UL;
2620 + }
2621
2622 /* Find this entry, or if that fails, the next avail. entry */
2623 while (entry->jump[0]) {
2624 diff -urNp linux-2.6.32.43/arch/powerpc/kernel/module.c linux-2.6.32.43/arch/powerpc/kernel/module.c
2625 --- linux-2.6.32.43/arch/powerpc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
2626 +++ linux-2.6.32.43/arch/powerpc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
2627 @@ -31,11 +31,24 @@
2628
2629 LIST_HEAD(module_bug_list);
2630
2631 +#ifdef CONFIG_PAX_KERNEXEC
2632 void *module_alloc(unsigned long size)
2633 {
2634 if (size == 0)
2635 return NULL;
2636
2637 + return vmalloc(size);
2638 +}
2639 +
2640 +void *module_alloc_exec(unsigned long size)
2641 +#else
2642 +void *module_alloc(unsigned long size)
2643 +#endif
2644 +
2645 +{
2646 + if (size == 0)
2647 + return NULL;
2648 +
2649 return vmalloc_exec(size);
2650 }
2651
2652 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2653 vfree(module_region);
2654 }
2655
2656 +#ifdef CONFIG_PAX_KERNEXEC
2657 +void module_free_exec(struct module *mod, void *module_region)
2658 +{
2659 + module_free(mod, module_region);
2660 +}
2661 +#endif
2662 +
2663 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2664 const Elf_Shdr *sechdrs,
2665 const char *name)
2666 diff -urNp linux-2.6.32.43/arch/powerpc/kernel/pci-common.c linux-2.6.32.43/arch/powerpc/kernel/pci-common.c
2667 --- linux-2.6.32.43/arch/powerpc/kernel/pci-common.c 2011-03-27 14:31:47.000000000 -0400
2668 +++ linux-2.6.32.43/arch/powerpc/kernel/pci-common.c 2011-04-17 15:56:45.000000000 -0400
2669 @@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
2670 unsigned int ppc_pci_flags = 0;
2671
2672
2673 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2674 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2675
2676 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2677 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2678 {
2679 pci_dma_ops = dma_ops;
2680 }
2681
2682 -struct dma_map_ops *get_pci_dma_ops(void)
2683 +const struct dma_map_ops *get_pci_dma_ops(void)
2684 {
2685 return pci_dma_ops;
2686 }
2687 diff -urNp linux-2.6.32.43/arch/powerpc/kernel/process.c linux-2.6.32.43/arch/powerpc/kernel/process.c
2688 --- linux-2.6.32.43/arch/powerpc/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
2689 +++ linux-2.6.32.43/arch/powerpc/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
2690 @@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
2691 * Lookup NIP late so we have the best change of getting the
2692 * above info out without failing
2693 */
2694 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2695 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2696 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2697 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2698 #endif
2699 show_stack(current, (unsigned long *) regs->gpr[1]);
2700 if (!user_mode(regs))
2701 @@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk,
2702 newsp = stack[0];
2703 ip = stack[STACK_FRAME_LR_SAVE];
2704 if (!firstframe || ip != lr) {
2705 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2706 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2707 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2708 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2709 - printk(" (%pS)",
2710 + printk(" (%pA)",
2711 (void *)current->ret_stack[curr_frame].ret);
2712 curr_frame--;
2713 }
2714 @@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk,
2715 struct pt_regs *regs = (struct pt_regs *)
2716 (sp + STACK_FRAME_OVERHEAD);
2717 lr = regs->link;
2718 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2719 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2720 regs->trap, (void *)regs->nip, (void *)lr);
2721 firstframe = 1;
2722 }
2723 @@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
2724 }
2725
2726 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2727 -
2728 -unsigned long arch_align_stack(unsigned long sp)
2729 -{
2730 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2731 - sp -= get_random_int() & ~PAGE_MASK;
2732 - return sp & ~0xf;
2733 -}
2734 -
2735 -static inline unsigned long brk_rnd(void)
2736 -{
2737 - unsigned long rnd = 0;
2738 -
2739 - /* 8MB for 32bit, 1GB for 64bit */
2740 - if (is_32bit_task())
2741 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2742 - else
2743 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2744 -
2745 - return rnd << PAGE_SHIFT;
2746 -}
2747 -
2748 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2749 -{
2750 - unsigned long base = mm->brk;
2751 - unsigned long ret;
2752 -
2753 -#ifdef CONFIG_PPC_STD_MMU_64
2754 - /*
2755 - * If we are using 1TB segments and we are allowed to randomise
2756 - * the heap, we can put it above 1TB so it is backed by a 1TB
2757 - * segment. Otherwise the heap will be in the bottom 1TB
2758 - * which always uses 256MB segments and this may result in a
2759 - * performance penalty.
2760 - */
2761 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2762 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2763 -#endif
2764 -
2765 - ret = PAGE_ALIGN(base + brk_rnd());
2766 -
2767 - if (ret < mm->brk)
2768 - return mm->brk;
2769 -
2770 - return ret;
2771 -}
2772 -
2773 -unsigned long randomize_et_dyn(unsigned long base)
2774 -{
2775 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2776 -
2777 - if (ret < base)
2778 - return base;
2779 -
2780 - return ret;
2781 -}
2782 diff -urNp linux-2.6.32.43/arch/powerpc/kernel/signal_32.c linux-2.6.32.43/arch/powerpc/kernel/signal_32.c
2783 --- linux-2.6.32.43/arch/powerpc/kernel/signal_32.c 2011-03-27 14:31:47.000000000 -0400
2784 +++ linux-2.6.32.43/arch/powerpc/kernel/signal_32.c 2011-04-17 15:56:45.000000000 -0400
2785 @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2786 /* Save user registers on the stack */
2787 frame = &rt_sf->uc.uc_mcontext;
2788 addr = frame;
2789 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2790 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2791 if (save_user_regs(regs, frame, 0, 1))
2792 goto badframe;
2793 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2794 diff -urNp linux-2.6.32.43/arch/powerpc/kernel/signal_64.c linux-2.6.32.43/arch/powerpc/kernel/signal_64.c
2795 --- linux-2.6.32.43/arch/powerpc/kernel/signal_64.c 2011-03-27 14:31:47.000000000 -0400
2796 +++ linux-2.6.32.43/arch/powerpc/kernel/signal_64.c 2011-04-17 15:56:45.000000000 -0400
2797 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2798 current->thread.fpscr.val = 0;
2799
2800 /* Set up to return from userspace. */
2801 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2802 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2803 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2804 } else {
2805 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2806 diff -urNp linux-2.6.32.43/arch/powerpc/kernel/sys_ppc32.c linux-2.6.32.43/arch/powerpc/kernel/sys_ppc32.c
2807 --- linux-2.6.32.43/arch/powerpc/kernel/sys_ppc32.c 2011-03-27 14:31:47.000000000 -0400
2808 +++ linux-2.6.32.43/arch/powerpc/kernel/sys_ppc32.c 2011-04-17 15:56:45.000000000 -0400
2809 @@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct
2810 if (oldlenp) {
2811 if (!error) {
2812 if (get_user(oldlen, oldlenp) ||
2813 - put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
2814 + put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
2815 + copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
2816 error = -EFAULT;
2817 }
2818 - copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
2819 }
2820 return error;
2821 }
2822 diff -urNp linux-2.6.32.43/arch/powerpc/kernel/traps.c linux-2.6.32.43/arch/powerpc/kernel/traps.c
2823 --- linux-2.6.32.43/arch/powerpc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
2824 +++ linux-2.6.32.43/arch/powerpc/kernel/traps.c 2011-06-13 21:33:37.000000000 -0400
2825 @@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
2826 static inline void pmac_backlight_unblank(void) { }
2827 #endif
2828
2829 +extern void gr_handle_kernel_exploit(void);
2830 +
2831 int die(const char *str, struct pt_regs *regs, long err)
2832 {
2833 static struct {
2834 @@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs
2835 if (panic_on_oops)
2836 panic("Fatal exception");
2837
2838 + gr_handle_kernel_exploit();
2839 +
2840 oops_exit();
2841 do_exit(err);
2842
2843 diff -urNp linux-2.6.32.43/arch/powerpc/kernel/vdso.c linux-2.6.32.43/arch/powerpc/kernel/vdso.c
2844 --- linux-2.6.32.43/arch/powerpc/kernel/vdso.c 2011-03-27 14:31:47.000000000 -0400
2845 +++ linux-2.6.32.43/arch/powerpc/kernel/vdso.c 2011-04-17 15:56:45.000000000 -0400
2846 @@ -36,6 +36,7 @@
2847 #include <asm/firmware.h>
2848 #include <asm/vdso.h>
2849 #include <asm/vdso_datapage.h>
2850 +#include <asm/mman.h>
2851
2852 #include "setup.h"
2853
2854 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2855 vdso_base = VDSO32_MBASE;
2856 #endif
2857
2858 - current->mm->context.vdso_base = 0;
2859 + current->mm->context.vdso_base = ~0UL;
2860
2861 /* vDSO has a problem and was disabled, just don't "enable" it for the
2862 * process
2863 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2864 vdso_base = get_unmapped_area(NULL, vdso_base,
2865 (vdso_pages << PAGE_SHIFT) +
2866 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2867 - 0, 0);
2868 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2869 if (IS_ERR_VALUE(vdso_base)) {
2870 rc = vdso_base;
2871 goto fail_mmapsem;
2872 diff -urNp linux-2.6.32.43/arch/powerpc/kernel/vio.c linux-2.6.32.43/arch/powerpc/kernel/vio.c
2873 --- linux-2.6.32.43/arch/powerpc/kernel/vio.c 2011-03-27 14:31:47.000000000 -0400
2874 +++ linux-2.6.32.43/arch/powerpc/kernel/vio.c 2011-04-17 15:56:45.000000000 -0400
2875 @@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struc
2876 vio_cmo_dealloc(viodev, alloc_size);
2877 }
2878
2879 -struct dma_map_ops vio_dma_mapping_ops = {
2880 +static const struct dma_map_ops vio_dma_mapping_ops = {
2881 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2882 .free_coherent = vio_dma_iommu_free_coherent,
2883 .map_sg = vio_dma_iommu_map_sg,
2884 .unmap_sg = vio_dma_iommu_unmap_sg,
2885 + .dma_supported = dma_iommu_dma_supported,
2886 .map_page = vio_dma_iommu_map_page,
2887 .unmap_page = vio_dma_iommu_unmap_page,
2888
2889 @@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vi
2890
2891 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2892 {
2893 - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2894 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2895 }
2896
2897 diff -urNp linux-2.6.32.43/arch/powerpc/lib/usercopy_64.c linux-2.6.32.43/arch/powerpc/lib/usercopy_64.c
2898 --- linux-2.6.32.43/arch/powerpc/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
2899 +++ linux-2.6.32.43/arch/powerpc/lib/usercopy_64.c 2011-04-17 15:56:45.000000000 -0400
2900 @@ -9,22 +9,6 @@
2901 #include <linux/module.h>
2902 #include <asm/uaccess.h>
2903
2904 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2905 -{
2906 - if (likely(access_ok(VERIFY_READ, from, n)))
2907 - n = __copy_from_user(to, from, n);
2908 - else
2909 - memset(to, 0, n);
2910 - return n;
2911 -}
2912 -
2913 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2914 -{
2915 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2916 - n = __copy_to_user(to, from, n);
2917 - return n;
2918 -}
2919 -
2920 unsigned long copy_in_user(void __user *to, const void __user *from,
2921 unsigned long n)
2922 {
2923 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2924 return n;
2925 }
2926
2927 -EXPORT_SYMBOL(copy_from_user);
2928 -EXPORT_SYMBOL(copy_to_user);
2929 EXPORT_SYMBOL(copy_in_user);
2930
2931 diff -urNp linux-2.6.32.43/arch/powerpc/mm/fault.c linux-2.6.32.43/arch/powerpc/mm/fault.c
2932 --- linux-2.6.32.43/arch/powerpc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
2933 +++ linux-2.6.32.43/arch/powerpc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
2934 @@ -30,6 +30,10 @@
2935 #include <linux/kprobes.h>
2936 #include <linux/kdebug.h>
2937 #include <linux/perf_event.h>
2938 +#include <linux/slab.h>
2939 +#include <linux/pagemap.h>
2940 +#include <linux/compiler.h>
2941 +#include <linux/unistd.h>
2942
2943 #include <asm/firmware.h>
2944 #include <asm/page.h>
2945 @@ -40,6 +44,7 @@
2946 #include <asm/uaccess.h>
2947 #include <asm/tlbflush.h>
2948 #include <asm/siginfo.h>
2949 +#include <asm/ptrace.h>
2950
2951
2952 #ifdef CONFIG_KPROBES
2953 @@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
2954 }
2955 #endif
2956
2957 +#ifdef CONFIG_PAX_PAGEEXEC
2958 +/*
2959 + * PaX: decide what to do with offenders (regs->nip = fault address)
2960 + *
2961 + * returns 1 when task should be killed
2962 + */
2963 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2964 +{
2965 + return 1;
2966 +}
2967 +
2968 +void pax_report_insns(void *pc, void *sp)
2969 +{
2970 + unsigned long i;
2971 +
2972 + printk(KERN_ERR "PAX: bytes at PC: ");
2973 + for (i = 0; i < 5; i++) {
2974 + unsigned int c;
2975 + if (get_user(c, (unsigned int __user *)pc+i))
2976 + printk(KERN_CONT "???????? ");
2977 + else
2978 + printk(KERN_CONT "%08x ", c);
2979 + }
2980 + printk("\n");
2981 +}
2982 +#endif
2983 +
2984 /*
2985 * Check whether the instruction at regs->nip is a store using
2986 * an update addressing form which will update r1.
2987 @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
2988 * indicate errors in DSISR but can validly be set in SRR1.
2989 */
2990 if (trap == 0x400)
2991 - error_code &= 0x48200000;
2992 + error_code &= 0x58200000;
2993 else
2994 is_write = error_code & DSISR_ISSTORE;
2995 #else
2996 @@ -250,7 +282,7 @@ good_area:
2997 * "undefined". Of those that can be set, this is the only
2998 * one which seems bad.
2999 */
3000 - if (error_code & 0x10000000)
3001 + if (error_code & DSISR_GUARDED)
3002 /* Guarded storage error. */
3003 goto bad_area;
3004 #endif /* CONFIG_8xx */
3005 @@ -265,7 +297,7 @@ good_area:
3006 * processors use the same I/D cache coherency mechanism
3007 * as embedded.
3008 */
3009 - if (error_code & DSISR_PROTFAULT)
3010 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3011 goto bad_area;
3012 #endif /* CONFIG_PPC_STD_MMU */
3013
3014 @@ -335,6 +367,23 @@ bad_area:
3015 bad_area_nosemaphore:
3016 /* User mode accesses cause a SIGSEGV */
3017 if (user_mode(regs)) {
3018 +
3019 +#ifdef CONFIG_PAX_PAGEEXEC
3020 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3021 +#ifdef CONFIG_PPC_STD_MMU
3022 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3023 +#else
3024 + if (is_exec && regs->nip == address) {
3025 +#endif
3026 + switch (pax_handle_fetch_fault(regs)) {
3027 + }
3028 +
3029 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3030 + do_group_exit(SIGKILL);
3031 + }
3032 + }
3033 +#endif
3034 +
3035 _exception(SIGSEGV, regs, code, address);
3036 return 0;
3037 }
3038 diff -urNp linux-2.6.32.43/arch/powerpc/mm/mmap_64.c linux-2.6.32.43/arch/powerpc/mm/mmap_64.c
3039 --- linux-2.6.32.43/arch/powerpc/mm/mmap_64.c 2011-03-27 14:31:47.000000000 -0400
3040 +++ linux-2.6.32.43/arch/powerpc/mm/mmap_64.c 2011-04-17 15:56:45.000000000 -0400
3041 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
3042 */
3043 if (mmap_is_legacy()) {
3044 mm->mmap_base = TASK_UNMAPPED_BASE;
3045 +
3046 +#ifdef CONFIG_PAX_RANDMMAP
3047 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3048 + mm->mmap_base += mm->delta_mmap;
3049 +#endif
3050 +
3051 mm->get_unmapped_area = arch_get_unmapped_area;
3052 mm->unmap_area = arch_unmap_area;
3053 } else {
3054 mm->mmap_base = mmap_base();
3055 +
3056 +#ifdef CONFIG_PAX_RANDMMAP
3057 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3058 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3059 +#endif
3060 +
3061 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3062 mm->unmap_area = arch_unmap_area_topdown;
3063 }
3064 diff -urNp linux-2.6.32.43/arch/powerpc/mm/slice.c linux-2.6.32.43/arch/powerpc/mm/slice.c
3065 --- linux-2.6.32.43/arch/powerpc/mm/slice.c 2011-03-27 14:31:47.000000000 -0400
3066 +++ linux-2.6.32.43/arch/powerpc/mm/slice.c 2011-04-17 15:56:45.000000000 -0400
3067 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
3068 if ((mm->task_size - len) < addr)
3069 return 0;
3070 vma = find_vma(mm, addr);
3071 - return (!vma || (addr + len) <= vma->vm_start);
3072 + return check_heap_stack_gap(vma, addr, len);
3073 }
3074
3075 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3076 @@ -256,7 +256,7 @@ full_search:
3077 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3078 continue;
3079 }
3080 - if (!vma || addr + len <= vma->vm_start) {
3081 + if (check_heap_stack_gap(vma, addr, len)) {
3082 /*
3083 * Remember the place where we stopped the search:
3084 */
3085 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
3086 }
3087 }
3088
3089 - addr = mm->mmap_base;
3090 - while (addr > len) {
3091 + if (mm->mmap_base < len)
3092 + addr = -ENOMEM;
3093 + else
3094 + addr = mm->mmap_base - len;
3095 +
3096 + while (!IS_ERR_VALUE(addr)) {
3097 /* Go down by chunk size */
3098 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3099 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
3100
3101 /* Check for hit with different page size */
3102 mask = slice_range_to_mask(addr, len);
3103 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
3104 * return with success:
3105 */
3106 vma = find_vma(mm, addr);
3107 - if (!vma || (addr + len) <= vma->vm_start) {
3108 + if (check_heap_stack_gap(vma, addr, len)) {
3109 /* remember the address as a hint for next time */
3110 if (use_cache)
3111 mm->free_area_cache = addr;
3112 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
3113 mm->cached_hole_size = vma->vm_start - addr;
3114
3115 /* try just below the current vma->vm_start */
3116 - addr = vma->vm_start;
3117 + addr = skip_heap_stack_gap(vma, len);
3118 }
3119
3120 /*
3121 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
3122 if (fixed && addr > (mm->task_size - len))
3123 return -EINVAL;
3124
3125 +#ifdef CONFIG_PAX_RANDMMAP
3126 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3127 + addr = 0;
3128 +#endif
3129 +
3130 /* If hint, make sure it matches our alignment restrictions */
3131 if (!fixed && addr) {
3132 addr = _ALIGN_UP(addr, 1ul << pshift);
3133 diff -urNp linux-2.6.32.43/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.32.43/arch/powerpc/platforms/52xx/lite5200_pm.c
3134 --- linux-2.6.32.43/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-03-27 14:31:47.000000000 -0400
3135 +++ linux-2.6.32.43/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-04-17 15:56:45.000000000 -0400
3136 @@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3137 lite5200_pm_target_state = PM_SUSPEND_ON;
3138 }
3139
3140 -static struct platform_suspend_ops lite5200_pm_ops = {
3141 +static const struct platform_suspend_ops lite5200_pm_ops = {
3142 .valid = lite5200_pm_valid,
3143 .begin = lite5200_pm_begin,
3144 .prepare = lite5200_pm_prepare,
3145 diff -urNp linux-2.6.32.43/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.32.43/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3146 --- linux-2.6.32.43/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-03-27 14:31:47.000000000 -0400
3147 +++ linux-2.6.32.43/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-04-17 15:56:45.000000000 -0400
3148 @@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3149 iounmap(mbar);
3150 }
3151
3152 -static struct platform_suspend_ops mpc52xx_pm_ops = {
3153 +static const struct platform_suspend_ops mpc52xx_pm_ops = {
3154 .valid = mpc52xx_pm_valid,
3155 .prepare = mpc52xx_pm_prepare,
3156 .enter = mpc52xx_pm_enter,
3157 diff -urNp linux-2.6.32.43/arch/powerpc/platforms/83xx/suspend.c linux-2.6.32.43/arch/powerpc/platforms/83xx/suspend.c
3158 --- linux-2.6.32.43/arch/powerpc/platforms/83xx/suspend.c 2011-03-27 14:31:47.000000000 -0400
3159 +++ linux-2.6.32.43/arch/powerpc/platforms/83xx/suspend.c 2011-04-17 15:56:45.000000000 -0400
3160 @@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3161 return ret;
3162 }
3163
3164 -static struct platform_suspend_ops mpc83xx_suspend_ops = {
3165 +static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3166 .valid = mpc83xx_suspend_valid,
3167 .begin = mpc83xx_suspend_begin,
3168 .enter = mpc83xx_suspend_enter,
3169 diff -urNp linux-2.6.32.43/arch/powerpc/platforms/cell/iommu.c linux-2.6.32.43/arch/powerpc/platforms/cell/iommu.c
3170 --- linux-2.6.32.43/arch/powerpc/platforms/cell/iommu.c 2011-03-27 14:31:47.000000000 -0400
3171 +++ linux-2.6.32.43/arch/powerpc/platforms/cell/iommu.c 2011-04-17 15:56:45.000000000 -0400
3172 @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
3173
3174 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3175
3176 -struct dma_map_ops dma_iommu_fixed_ops = {
3177 +const struct dma_map_ops dma_iommu_fixed_ops = {
3178 .alloc_coherent = dma_fixed_alloc_coherent,
3179 .free_coherent = dma_fixed_free_coherent,
3180 .map_sg = dma_fixed_map_sg,
3181 diff -urNp linux-2.6.32.43/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.32.43/arch/powerpc/platforms/ps3/system-bus.c
3182 --- linux-2.6.32.43/arch/powerpc/platforms/ps3/system-bus.c 2011-03-27 14:31:47.000000000 -0400
3183 +++ linux-2.6.32.43/arch/powerpc/platforms/ps3/system-bus.c 2011-04-17 15:56:45.000000000 -0400
3184 @@ -694,7 +694,7 @@ static int ps3_dma_supported(struct devi
3185 return mask >= DMA_BIT_MASK(32);
3186 }
3187
3188 -static struct dma_map_ops ps3_sb_dma_ops = {
3189 +static const struct dma_map_ops ps3_sb_dma_ops = {
3190 .alloc_coherent = ps3_alloc_coherent,
3191 .free_coherent = ps3_free_coherent,
3192 .map_sg = ps3_sb_map_sg,
3193 @@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops
3194 .unmap_page = ps3_unmap_page,
3195 };
3196
3197 -static struct dma_map_ops ps3_ioc0_dma_ops = {
3198 +static const struct dma_map_ops ps3_ioc0_dma_ops = {
3199 .alloc_coherent = ps3_alloc_coherent,
3200 .free_coherent = ps3_free_coherent,
3201 .map_sg = ps3_ioc0_map_sg,
3202 diff -urNp linux-2.6.32.43/arch/powerpc/platforms/pseries/Kconfig linux-2.6.32.43/arch/powerpc/platforms/pseries/Kconfig
3203 --- linux-2.6.32.43/arch/powerpc/platforms/pseries/Kconfig 2011-03-27 14:31:47.000000000 -0400
3204 +++ linux-2.6.32.43/arch/powerpc/platforms/pseries/Kconfig 2011-04-17 15:56:45.000000000 -0400
3205 @@ -2,6 +2,8 @@ config PPC_PSERIES
3206 depends on PPC64 && PPC_BOOK3S
3207 bool "IBM pSeries & new (POWER5-based) iSeries"
3208 select MPIC
3209 + select PCI_MSI
3210 + select XICS
3211 select PPC_I8259
3212 select PPC_RTAS
3213 select RTAS_ERROR_LOGGING
3214 diff -urNp linux-2.6.32.43/arch/s390/include/asm/elf.h linux-2.6.32.43/arch/s390/include/asm/elf.h
3215 --- linux-2.6.32.43/arch/s390/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3216 +++ linux-2.6.32.43/arch/s390/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
3217 @@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3218 that it will "exec", and that there is sufficient room for the brk. */
3219 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3220
3221 +#ifdef CONFIG_PAX_ASLR
3222 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3223 +
3224 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3225 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3226 +#endif
3227 +
3228 /* This yields a mask that user programs can use to figure out what
3229 instruction set this CPU supports. */
3230
3231 diff -urNp linux-2.6.32.43/arch/s390/include/asm/setup.h linux-2.6.32.43/arch/s390/include/asm/setup.h
3232 --- linux-2.6.32.43/arch/s390/include/asm/setup.h 2011-03-27 14:31:47.000000000 -0400
3233 +++ linux-2.6.32.43/arch/s390/include/asm/setup.h 2011-04-17 15:56:45.000000000 -0400
3234 @@ -50,13 +50,13 @@ extern unsigned long memory_end;
3235 void detect_memory_layout(struct mem_chunk chunk[]);
3236
3237 #ifdef CONFIG_S390_SWITCH_AMODE
3238 -extern unsigned int switch_amode;
3239 +#define switch_amode (1)
3240 #else
3241 #define switch_amode (0)
3242 #endif
3243
3244 #ifdef CONFIG_S390_EXEC_PROTECT
3245 -extern unsigned int s390_noexec;
3246 +#define s390_noexec (1)
3247 #else
3248 #define s390_noexec (0)
3249 #endif
3250 diff -urNp linux-2.6.32.43/arch/s390/include/asm/uaccess.h linux-2.6.32.43/arch/s390/include/asm/uaccess.h
3251 --- linux-2.6.32.43/arch/s390/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3252 +++ linux-2.6.32.43/arch/s390/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
3253 @@ -232,6 +232,10 @@ static inline unsigned long __must_check
3254 copy_to_user(void __user *to, const void *from, unsigned long n)
3255 {
3256 might_fault();
3257 +
3258 + if ((long)n < 0)
3259 + return n;
3260 +
3261 if (access_ok(VERIFY_WRITE, to, n))
3262 n = __copy_to_user(to, from, n);
3263 return n;
3264 @@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
3265 static inline unsigned long __must_check
3266 __copy_from_user(void *to, const void __user *from, unsigned long n)
3267 {
3268 + if ((long)n < 0)
3269 + return n;
3270 +
3271 if (__builtin_constant_p(n) && (n <= 256))
3272 return uaccess.copy_from_user_small(n, from, to);
3273 else
3274 @@ -283,6 +290,10 @@ static inline unsigned long __must_check
3275 copy_from_user(void *to, const void __user *from, unsigned long n)
3276 {
3277 might_fault();
3278 +
3279 + if ((long)n < 0)
3280 + return n;
3281 +
3282 if (access_ok(VERIFY_READ, from, n))
3283 n = __copy_from_user(to, from, n);
3284 else
3285 diff -urNp linux-2.6.32.43/arch/s390/Kconfig linux-2.6.32.43/arch/s390/Kconfig
3286 --- linux-2.6.32.43/arch/s390/Kconfig 2011-03-27 14:31:47.000000000 -0400
3287 +++ linux-2.6.32.43/arch/s390/Kconfig 2011-04-17 15:56:45.000000000 -0400
3288 @@ -194,28 +194,26 @@ config AUDIT_ARCH
3289
3290 config S390_SWITCH_AMODE
3291 bool "Switch kernel/user addressing modes"
3292 + default y
3293 help
3294 This option allows to switch the addressing modes of kernel and user
3295 - space. The kernel parameter switch_amode=on will enable this feature,
3296 - default is disabled. Enabling this (via kernel parameter) on machines
3297 - earlier than IBM System z9-109 EC/BC will reduce system performance.
3298 + space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3299 + will reduce system performance.
3300
3301 Note that this option will also be selected by selecting the execute
3302 - protection option below. Enabling the execute protection via the
3303 - noexec kernel parameter will also switch the addressing modes,
3304 - independent of the switch_amode kernel parameter.
3305 + protection option below. Enabling the execute protection will also
3306 + switch the addressing modes, independent of this option.
3307
3308
3309 config S390_EXEC_PROTECT
3310 bool "Data execute protection"
3311 + default y
3312 select S390_SWITCH_AMODE
3313 help
3314 This option allows to enable a buffer overflow protection for user
3315 space programs and it also selects the addressing mode option above.
3316 - The kernel parameter noexec=on will enable this feature and also
3317 - switch the addressing modes, default is disabled. Enabling this (via
3318 - kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3319 - will reduce system performance.
3320 + Enabling this on machines earlier than IBM System z9-109 EC/BC will
3321 + reduce system performance.
3322
3323 comment "Code generation options"
3324
3325 diff -urNp linux-2.6.32.43/arch/s390/kernel/module.c linux-2.6.32.43/arch/s390/kernel/module.c
3326 --- linux-2.6.32.43/arch/s390/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
3327 +++ linux-2.6.32.43/arch/s390/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
3328 @@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3329
3330 /* Increase core size by size of got & plt and set start
3331 offsets for got and plt. */
3332 - me->core_size = ALIGN(me->core_size, 4);
3333 - me->arch.got_offset = me->core_size;
3334 - me->core_size += me->arch.got_size;
3335 - me->arch.plt_offset = me->core_size;
3336 - me->core_size += me->arch.plt_size;
3337 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3338 + me->arch.got_offset = me->core_size_rw;
3339 + me->core_size_rw += me->arch.got_size;
3340 + me->arch.plt_offset = me->core_size_rx;
3341 + me->core_size_rx += me->arch.plt_size;
3342 return 0;
3343 }
3344
3345 @@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3346 if (info->got_initialized == 0) {
3347 Elf_Addr *gotent;
3348
3349 - gotent = me->module_core + me->arch.got_offset +
3350 + gotent = me->module_core_rw + me->arch.got_offset +
3351 info->got_offset;
3352 *gotent = val;
3353 info->got_initialized = 1;
3354 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3355 else if (r_type == R_390_GOTENT ||
3356 r_type == R_390_GOTPLTENT)
3357 *(unsigned int *) loc =
3358 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3359 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3360 else if (r_type == R_390_GOT64 ||
3361 r_type == R_390_GOTPLT64)
3362 *(unsigned long *) loc = val;
3363 @@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3364 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3365 if (info->plt_initialized == 0) {
3366 unsigned int *ip;
3367 - ip = me->module_core + me->arch.plt_offset +
3368 + ip = me->module_core_rx + me->arch.plt_offset +
3369 info->plt_offset;
3370 #ifndef CONFIG_64BIT
3371 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3372 @@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3373 val - loc + 0xffffUL < 0x1ffffeUL) ||
3374 (r_type == R_390_PLT32DBL &&
3375 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3376 - val = (Elf_Addr) me->module_core +
3377 + val = (Elf_Addr) me->module_core_rx +
3378 me->arch.plt_offset +
3379 info->plt_offset;
3380 val += rela->r_addend - loc;
3381 @@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3382 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3383 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3384 val = val + rela->r_addend -
3385 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3386 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3387 if (r_type == R_390_GOTOFF16)
3388 *(unsigned short *) loc = val;
3389 else if (r_type == R_390_GOTOFF32)
3390 @@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3391 break;
3392 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3393 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3394 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3395 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3396 rela->r_addend - loc;
3397 if (r_type == R_390_GOTPC)
3398 *(unsigned int *) loc = val;
3399 diff -urNp linux-2.6.32.43/arch/s390/kernel/setup.c linux-2.6.32.43/arch/s390/kernel/setup.c
3400 --- linux-2.6.32.43/arch/s390/kernel/setup.c 2011-03-27 14:31:47.000000000 -0400
3401 +++ linux-2.6.32.43/arch/s390/kernel/setup.c 2011-04-17 15:56:45.000000000 -0400
3402 @@ -306,9 +306,6 @@ static int __init early_parse_mem(char *
3403 early_param("mem", early_parse_mem);
3404
3405 #ifdef CONFIG_S390_SWITCH_AMODE
3406 -unsigned int switch_amode = 0;
3407 -EXPORT_SYMBOL_GPL(switch_amode);
3408 -
3409 static int set_amode_and_uaccess(unsigned long user_amode,
3410 unsigned long user32_amode)
3411 {
3412 @@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigne
3413 return 0;
3414 }
3415 }
3416 -
3417 -/*
3418 - * Switch kernel/user addressing modes?
3419 - */
3420 -static int __init early_parse_switch_amode(char *p)
3421 -{
3422 - switch_amode = 1;
3423 - return 0;
3424 -}
3425 -early_param("switch_amode", early_parse_switch_amode);
3426 -
3427 #else /* CONFIG_S390_SWITCH_AMODE */
3428 static inline int set_amode_and_uaccess(unsigned long user_amode,
3429 unsigned long user32_amode)
3430 @@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(
3431 }
3432 #endif /* CONFIG_S390_SWITCH_AMODE */
3433
3434 -#ifdef CONFIG_S390_EXEC_PROTECT
3435 -unsigned int s390_noexec = 0;
3436 -EXPORT_SYMBOL_GPL(s390_noexec);
3437 -
3438 -/*
3439 - * Enable execute protection?
3440 - */
3441 -static int __init early_parse_noexec(char *p)
3442 -{
3443 - if (!strncmp(p, "off", 3))
3444 - return 0;
3445 - switch_amode = 1;
3446 - s390_noexec = 1;
3447 - return 0;
3448 -}
3449 -early_param("noexec", early_parse_noexec);
3450 -#endif /* CONFIG_S390_EXEC_PROTECT */
3451 -
3452 static void setup_addressing_mode(void)
3453 {
3454 if (s390_noexec) {
3455 diff -urNp linux-2.6.32.43/arch/s390/mm/mmap.c linux-2.6.32.43/arch/s390/mm/mmap.c
3456 --- linux-2.6.32.43/arch/s390/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3457 +++ linux-2.6.32.43/arch/s390/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
3458 @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3459 */
3460 if (mmap_is_legacy()) {
3461 mm->mmap_base = TASK_UNMAPPED_BASE;
3462 +
3463 +#ifdef CONFIG_PAX_RANDMMAP
3464 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3465 + mm->mmap_base += mm->delta_mmap;
3466 +#endif
3467 +
3468 mm->get_unmapped_area = arch_get_unmapped_area;
3469 mm->unmap_area = arch_unmap_area;
3470 } else {
3471 mm->mmap_base = mmap_base();
3472 +
3473 +#ifdef CONFIG_PAX_RANDMMAP
3474 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3475 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3476 +#endif
3477 +
3478 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3479 mm->unmap_area = arch_unmap_area_topdown;
3480 }
3481 @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3482 */
3483 if (mmap_is_legacy()) {
3484 mm->mmap_base = TASK_UNMAPPED_BASE;
3485 +
3486 +#ifdef CONFIG_PAX_RANDMMAP
3487 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3488 + mm->mmap_base += mm->delta_mmap;
3489 +#endif
3490 +
3491 mm->get_unmapped_area = s390_get_unmapped_area;
3492 mm->unmap_area = arch_unmap_area;
3493 } else {
3494 mm->mmap_base = mmap_base();
3495 +
3496 +#ifdef CONFIG_PAX_RANDMMAP
3497 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3498 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3499 +#endif
3500 +
3501 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3502 mm->unmap_area = arch_unmap_area_topdown;
3503 }
3504 diff -urNp linux-2.6.32.43/arch/score/include/asm/system.h linux-2.6.32.43/arch/score/include/asm/system.h
3505 --- linux-2.6.32.43/arch/score/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
3506 +++ linux-2.6.32.43/arch/score/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
3507 @@ -17,7 +17,7 @@ do { \
3508 #define finish_arch_switch(prev) do {} while (0)
3509
3510 typedef void (*vi_handler_t)(void);
3511 -extern unsigned long arch_align_stack(unsigned long sp);
3512 +#define arch_align_stack(x) (x)
3513
3514 #define mb() barrier()
3515 #define rmb() barrier()
3516 diff -urNp linux-2.6.32.43/arch/score/kernel/process.c linux-2.6.32.43/arch/score/kernel/process.c
3517 --- linux-2.6.32.43/arch/score/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
3518 +++ linux-2.6.32.43/arch/score/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
3519 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
3520
3521 return task_pt_regs(task)->cp0_epc;
3522 }
3523 -
3524 -unsigned long arch_align_stack(unsigned long sp)
3525 -{
3526 - return sp;
3527 -}
3528 diff -urNp linux-2.6.32.43/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.32.43/arch/sh/boards/mach-hp6xx/pm.c
3529 --- linux-2.6.32.43/arch/sh/boards/mach-hp6xx/pm.c 2011-03-27 14:31:47.000000000 -0400
3530 +++ linux-2.6.32.43/arch/sh/boards/mach-hp6xx/pm.c 2011-04-17 15:56:45.000000000 -0400
3531 @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3532 return 0;
3533 }
3534
3535 -static struct platform_suspend_ops hp6x0_pm_ops = {
3536 +static const struct platform_suspend_ops hp6x0_pm_ops = {
3537 .enter = hp6x0_pm_enter,
3538 .valid = suspend_valid_only_mem,
3539 };
3540 diff -urNp linux-2.6.32.43/arch/sh/kernel/cpu/sh4/sq.c linux-2.6.32.43/arch/sh/kernel/cpu/sh4/sq.c
3541 --- linux-2.6.32.43/arch/sh/kernel/cpu/sh4/sq.c 2011-03-27 14:31:47.000000000 -0400
3542 +++ linux-2.6.32.43/arch/sh/kernel/cpu/sh4/sq.c 2011-04-17 15:56:46.000000000 -0400
3543 @@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[
3544 NULL,
3545 };
3546
3547 -static struct sysfs_ops sq_sysfs_ops = {
3548 +static const struct sysfs_ops sq_sysfs_ops = {
3549 .show = sq_sysfs_show,
3550 .store = sq_sysfs_store,
3551 };
3552 diff -urNp linux-2.6.32.43/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.32.43/arch/sh/kernel/cpu/shmobile/pm.c
3553 --- linux-2.6.32.43/arch/sh/kernel/cpu/shmobile/pm.c 2011-03-27 14:31:47.000000000 -0400
3554 +++ linux-2.6.32.43/arch/sh/kernel/cpu/shmobile/pm.c 2011-04-17 15:56:46.000000000 -0400
3555 @@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t s
3556 return 0;
3557 }
3558
3559 -static struct platform_suspend_ops sh_pm_ops = {
3560 +static const struct platform_suspend_ops sh_pm_ops = {
3561 .enter = sh_pm_enter,
3562 .valid = suspend_valid_only_mem,
3563 };
3564 diff -urNp linux-2.6.32.43/arch/sh/kernel/kgdb.c linux-2.6.32.43/arch/sh/kernel/kgdb.c
3565 --- linux-2.6.32.43/arch/sh/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
3566 +++ linux-2.6.32.43/arch/sh/kernel/kgdb.c 2011-04-17 15:56:46.000000000 -0400
3567 @@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
3568 {
3569 }
3570
3571 -struct kgdb_arch arch_kgdb_ops = {
3572 +const struct kgdb_arch arch_kgdb_ops = {
3573 /* Breakpoint instruction: trapa #0x3c */
3574 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3575 .gdb_bpt_instr = { 0x3c, 0xc3 },
3576 diff -urNp linux-2.6.32.43/arch/sh/mm/mmap.c linux-2.6.32.43/arch/sh/mm/mmap.c
3577 --- linux-2.6.32.43/arch/sh/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3578 +++ linux-2.6.32.43/arch/sh/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
3579 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3580 addr = PAGE_ALIGN(addr);
3581
3582 vma = find_vma(mm, addr);
3583 - if (TASK_SIZE - len >= addr &&
3584 - (!vma || addr + len <= vma->vm_start))
3585 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3586 return addr;
3587 }
3588
3589 @@ -106,7 +105,7 @@ full_search:
3590 }
3591 return -ENOMEM;
3592 }
3593 - if (likely(!vma || addr + len <= vma->vm_start)) {
3594 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3595 /*
3596 * Remember the place where we stopped the search:
3597 */
3598 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3599 addr = PAGE_ALIGN(addr);
3600
3601 vma = find_vma(mm, addr);
3602 - if (TASK_SIZE - len >= addr &&
3603 - (!vma || addr + len <= vma->vm_start))
3604 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3605 return addr;
3606 }
3607
3608 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3609 /* make sure it can fit in the remaining address space */
3610 if (likely(addr > len)) {
3611 vma = find_vma(mm, addr-len);
3612 - if (!vma || addr <= vma->vm_start) {
3613 + if (check_heap_stack_gap(vma, addr - len, len)) {
3614 /* remember the address as a hint for next time */
3615 return (mm->free_area_cache = addr-len);
3616 }
3617 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
3618 if (unlikely(mm->mmap_base < len))
3619 goto bottomup;
3620
3621 - addr = mm->mmap_base-len;
3622 - if (do_colour_align)
3623 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3624 + addr = mm->mmap_base - len;
3625
3626 do {
3627 + if (do_colour_align)
3628 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3629 /*
3630 * Lookup failure means no vma is above this address,
3631 * else if new region fits below vma->vm_start,
3632 * return with success:
3633 */
3634 vma = find_vma(mm, addr);
3635 - if (likely(!vma || addr+len <= vma->vm_start)) {
3636 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3637 /* remember the address as a hint for next time */
3638 return (mm->free_area_cache = addr);
3639 }
3640 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
3641 mm->cached_hole_size = vma->vm_start - addr;
3642
3643 /* try just below the current vma->vm_start */
3644 - addr = vma->vm_start-len;
3645 - if (do_colour_align)
3646 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3647 - } while (likely(len < vma->vm_start));
3648 + addr = skip_heap_stack_gap(vma, len);
3649 + } while (!IS_ERR_VALUE(addr));
3650
3651 bottomup:
3652 /*
3653 diff -urNp linux-2.6.32.43/arch/sparc/include/asm/atomic_64.h linux-2.6.32.43/arch/sparc/include/asm/atomic_64.h
3654 --- linux-2.6.32.43/arch/sparc/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
3655 +++ linux-2.6.32.43/arch/sparc/include/asm/atomic_64.h 2011-07-13 22:22:56.000000000 -0400
3656 @@ -14,18 +14,40 @@
3657 #define ATOMIC64_INIT(i) { (i) }
3658
3659 #define atomic_read(v) ((v)->counter)
3660 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3661 +{
3662 + return v->counter;
3663 +}
3664 #define atomic64_read(v) ((v)->counter)
3665 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3666 +{
3667 + return v->counter;
3668 +}
3669
3670 #define atomic_set(v, i) (((v)->counter) = i)
3671 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3672 +{
3673 + v->counter = i;
3674 +}
3675 #define atomic64_set(v, i) (((v)->counter) = i)
3676 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3677 +{
3678 + v->counter = i;
3679 +}
3680
3681 extern void atomic_add(int, atomic_t *);
3682 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3683 extern void atomic64_add(long, atomic64_t *);
3684 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3685 extern void atomic_sub(int, atomic_t *);
3686 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3687 extern void atomic64_sub(long, atomic64_t *);
3688 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3689
3690 extern int atomic_add_ret(int, atomic_t *);
3691 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3692 extern long atomic64_add_ret(long, atomic64_t *);
3693 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3694 extern int atomic_sub_ret(int, atomic_t *);
3695 extern long atomic64_sub_ret(long, atomic64_t *);
3696
3697 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
3698 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3699
3700 #define atomic_inc_return(v) atomic_add_ret(1, v)
3701 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3702 +{
3703 + return atomic_add_ret_unchecked(1, v);
3704 +}
3705 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3706 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3707 +{
3708 + return atomic64_add_ret_unchecked(1, v);
3709 +}
3710
3711 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3712 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3713
3714 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3715 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3716 +{
3717 + return atomic_add_ret_unchecked(i, v);
3718 +}
3719 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3720 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3721 +{
3722 + return atomic64_add_ret_unchecked(i, v);
3723 +}
3724
3725 /*
3726 * atomic_inc_and_test - increment and test
3727 @@ -50,6 +88,7 @@ extern long atomic64_sub_ret(long, atomi
3728 * other cases.
3729 */
3730 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3731 +#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0)
3732 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3733
3734 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3735 @@ -59,30 +98,59 @@ extern long atomic64_sub_ret(long, atomi
3736 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3737
3738 #define atomic_inc(v) atomic_add(1, v)
3739 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3740 +{
3741 + atomic_add_unchecked(1, v);
3742 +}
3743 #define atomic64_inc(v) atomic64_add(1, v)
3744 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3745 +{
3746 + atomic64_add_unchecked(1, v);
3747 +}
3748
3749 #define atomic_dec(v) atomic_sub(1, v)
3750 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3751 +{
3752 + atomic_sub_unchecked(1, v);
3753 +}
3754 #define atomic64_dec(v) atomic64_sub(1, v)
3755 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3756 +{
3757 + atomic64_sub_unchecked(1, v);
3758 +}
3759
3760 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3761 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3762
3763 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3764 +#define atomic_cmpxchg_unchecked(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3765 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3766 +#define atomic_xchg_unchecked(v, new) (xchg(&((v)->counter), new))
3767
3768 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3769 {
3770 - int c, old;
3771 + int c, old, new;
3772 c = atomic_read(v);
3773 for (;;) {
3774 - if (unlikely(c == (u)))
3775 + if (unlikely(c == u))
3776 break;
3777 - old = atomic_cmpxchg((v), c, c + (a));
3778 +
3779 + asm volatile("addcc %2, %0, %0\n"
3780 +
3781 +#ifdef CONFIG_PAX_REFCOUNT
3782 + "tvs %%icc, 6\n"
3783 +#endif
3784 +
3785 + : "=r" (new)
3786 + : "0" (c), "ir" (a)
3787 + : "cc");
3788 +
3789 + old = atomic_cmpxchg(v, c, new);
3790 if (likely(old == c))
3791 break;
3792 c = old;
3793 }
3794 - return c != (u);
3795 + return c != u;
3796 }
3797
3798 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3799 @@ -93,17 +161,28 @@ static inline int atomic_add_unless(atom
3800
3801 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3802 {
3803 - long c, old;
3804 + long c, old, new;
3805 c = atomic64_read(v);
3806 for (;;) {
3807 - if (unlikely(c == (u)))
3808 + if (unlikely(c == u))
3809 break;
3810 - old = atomic64_cmpxchg((v), c, c + (a));
3811 +
3812 + asm volatile("addcc %2, %0, %0\n"
3813 +
3814 +#ifdef CONFIG_PAX_REFCOUNT
3815 + "tvs %%xcc, 6\n"
3816 +#endif
3817 +
3818 + : "=r" (new)
3819 + : "0" (c), "ir" (a)
3820 + : "cc");
3821 +
3822 + old = atomic64_cmpxchg(v, c, new);
3823 if (likely(old == c))
3824 break;
3825 c = old;
3826 }
3827 - return c != (u);
3828 + return c != u;
3829 }
3830
3831 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3832 diff -urNp linux-2.6.32.43/arch/sparc/include/asm/cache.h linux-2.6.32.43/arch/sparc/include/asm/cache.h
3833 --- linux-2.6.32.43/arch/sparc/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
3834 +++ linux-2.6.32.43/arch/sparc/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
3835 @@ -8,7 +8,7 @@
3836 #define _SPARC_CACHE_H
3837
3838 #define L1_CACHE_SHIFT 5
3839 -#define L1_CACHE_BYTES 32
3840 +#define L1_CACHE_BYTES 32UL
3841 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
3842
3843 #ifdef CONFIG_SPARC32
3844 diff -urNp linux-2.6.32.43/arch/sparc/include/asm/dma-mapping.h linux-2.6.32.43/arch/sparc/include/asm/dma-mapping.h
3845 --- linux-2.6.32.43/arch/sparc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
3846 +++ linux-2.6.32.43/arch/sparc/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
3847 @@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *d
3848 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3849 #define dma_is_consistent(d, h) (1)
3850
3851 -extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3852 +extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3853 extern struct bus_type pci_bus_type;
3854
3855 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3856 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3857 {
3858 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3859 if (dev->bus == &pci_bus_type)
3860 @@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dm
3861 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3862 dma_addr_t *dma_handle, gfp_t flag)
3863 {
3864 - struct dma_map_ops *ops = get_dma_ops(dev);
3865 + const struct dma_map_ops *ops = get_dma_ops(dev);
3866 void *cpu_addr;
3867
3868 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3869 @@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(s
3870 static inline void dma_free_coherent(struct device *dev, size_t size,
3871 void *cpu_addr, dma_addr_t dma_handle)
3872 {
3873 - struct dma_map_ops *ops = get_dma_ops(dev);
3874 + const struct dma_map_ops *ops = get_dma_ops(dev);
3875
3876 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3877 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3878 diff -urNp linux-2.6.32.43/arch/sparc/include/asm/elf_32.h linux-2.6.32.43/arch/sparc/include/asm/elf_32.h
3879 --- linux-2.6.32.43/arch/sparc/include/asm/elf_32.h 2011-03-27 14:31:47.000000000 -0400
3880 +++ linux-2.6.32.43/arch/sparc/include/asm/elf_32.h 2011-04-17 15:56:46.000000000 -0400
3881 @@ -116,6 +116,13 @@ typedef struct {
3882
3883 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3884
3885 +#ifdef CONFIG_PAX_ASLR
3886 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3887 +
3888 +#define PAX_DELTA_MMAP_LEN 16
3889 +#define PAX_DELTA_STACK_LEN 16
3890 +#endif
3891 +
3892 /* This yields a mask that user programs can use to figure out what
3893 instruction set this cpu supports. This can NOT be done in userspace
3894 on Sparc. */
3895 diff -urNp linux-2.6.32.43/arch/sparc/include/asm/elf_64.h linux-2.6.32.43/arch/sparc/include/asm/elf_64.h
3896 --- linux-2.6.32.43/arch/sparc/include/asm/elf_64.h 2011-03-27 14:31:47.000000000 -0400
3897 +++ linux-2.6.32.43/arch/sparc/include/asm/elf_64.h 2011-04-17 15:56:46.000000000 -0400
3898 @@ -163,6 +163,12 @@ typedef struct {
3899 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3900 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3901
3902 +#ifdef CONFIG_PAX_ASLR
3903 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3904 +
3905 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3906 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3907 +#endif
3908
3909 /* This yields a mask that user programs can use to figure out what
3910 instruction set this cpu supports. */
3911 diff -urNp linux-2.6.32.43/arch/sparc/include/asm/pgtable_32.h linux-2.6.32.43/arch/sparc/include/asm/pgtable_32.h
3912 --- linux-2.6.32.43/arch/sparc/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
3913 +++ linux-2.6.32.43/arch/sparc/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
3914 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3915 BTFIXUPDEF_INT(page_none)
3916 BTFIXUPDEF_INT(page_copy)
3917 BTFIXUPDEF_INT(page_readonly)
3918 +
3919 +#ifdef CONFIG_PAX_PAGEEXEC
3920 +BTFIXUPDEF_INT(page_shared_noexec)
3921 +BTFIXUPDEF_INT(page_copy_noexec)
3922 +BTFIXUPDEF_INT(page_readonly_noexec)
3923 +#endif
3924 +
3925 BTFIXUPDEF_INT(page_kernel)
3926
3927 #define PMD_SHIFT SUN4C_PMD_SHIFT
3928 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3929 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3930 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3931
3932 +#ifdef CONFIG_PAX_PAGEEXEC
3933 +extern pgprot_t PAGE_SHARED_NOEXEC;
3934 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3935 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3936 +#else
3937 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3938 +# define PAGE_COPY_NOEXEC PAGE_COPY
3939 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3940 +#endif
3941 +
3942 extern unsigned long page_kernel;
3943
3944 #ifdef MODULE
3945 diff -urNp linux-2.6.32.43/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.32.43/arch/sparc/include/asm/pgtsrmmu.h
3946 --- linux-2.6.32.43/arch/sparc/include/asm/pgtsrmmu.h 2011-03-27 14:31:47.000000000 -0400
3947 +++ linux-2.6.32.43/arch/sparc/include/asm/pgtsrmmu.h 2011-04-17 15:56:46.000000000 -0400
3948 @@ -115,6 +115,13 @@
3949 SRMMU_EXEC | SRMMU_REF)
3950 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3951 SRMMU_EXEC | SRMMU_REF)
3952 +
3953 +#ifdef CONFIG_PAX_PAGEEXEC
3954 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3955 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3956 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3957 +#endif
3958 +
3959 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3960 SRMMU_DIRTY | SRMMU_REF)
3961
3962 diff -urNp linux-2.6.32.43/arch/sparc/include/asm/spinlock_64.h linux-2.6.32.43/arch/sparc/include/asm/spinlock_64.h
3963 --- linux-2.6.32.43/arch/sparc/include/asm/spinlock_64.h 2011-03-27 14:31:47.000000000 -0400
3964 +++ linux-2.6.32.43/arch/sparc/include/asm/spinlock_64.h 2011-05-04 17:56:20.000000000 -0400
3965 @@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags
3966
3967 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3968
3969 -static void inline arch_read_lock(raw_rwlock_t *lock)
3970 +static inline void arch_read_lock(raw_rwlock_t *lock)
3971 {
3972 unsigned long tmp1, tmp2;
3973
3974 __asm__ __volatile__ (
3975 "1: ldsw [%2], %0\n"
3976 " brlz,pn %0, 2f\n"
3977 -"4: add %0, 1, %1\n"
3978 +"4: addcc %0, 1, %1\n"
3979 +
3980 +#ifdef CONFIG_PAX_REFCOUNT
3981 +" tvs %%icc, 6\n"
3982 +#endif
3983 +
3984 " cas [%2], %0, %1\n"
3985 " cmp %0, %1\n"
3986 " bne,pn %%icc, 1b\n"
3987 @@ -112,7 +117,7 @@ static void inline arch_read_lock(raw_rw
3988 " .previous"
3989 : "=&r" (tmp1), "=&r" (tmp2)
3990 : "r" (lock)
3991 - : "memory");
3992 + : "memory", "cc");
3993 }
3994
3995 static int inline arch_read_trylock(raw_rwlock_t *lock)
3996 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_
3997 "1: ldsw [%2], %0\n"
3998 " brlz,a,pn %0, 2f\n"
3999 " mov 0, %0\n"
4000 -" add %0, 1, %1\n"
4001 +" addcc %0, 1, %1\n"
4002 +
4003 +#ifdef CONFIG_PAX_REFCOUNT
4004 +" tvs %%icc, 6\n"
4005 +#endif
4006 +
4007 " cas [%2], %0, %1\n"
4008 " cmp %0, %1\n"
4009 " bne,pn %%icc, 1b\n"
4010 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_
4011 return tmp1;
4012 }
4013
4014 -static void inline arch_read_unlock(raw_rwlock_t *lock)
4015 +static inline void arch_read_unlock(raw_rwlock_t *lock)
4016 {
4017 unsigned long tmp1, tmp2;
4018
4019 __asm__ __volatile__(
4020 "1: lduw [%2], %0\n"
4021 -" sub %0, 1, %1\n"
4022 +" subcc %0, 1, %1\n"
4023 +
4024 +#ifdef CONFIG_PAX_REFCOUNT
4025 +" tvs %%icc, 6\n"
4026 +#endif
4027 +
4028 " cas [%2], %0, %1\n"
4029 " cmp %0, %1\n"
4030 " bne,pn %%xcc, 1b\n"
4031 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_
4032 : "memory");
4033 }
4034
4035 -static void inline arch_write_lock(raw_rwlock_t *lock)
4036 +static inline void arch_write_lock(raw_rwlock_t *lock)
4037 {
4038 unsigned long mask, tmp1, tmp2;
4039
4040 @@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_r
4041 : "memory");
4042 }
4043
4044 -static void inline arch_write_unlock(raw_rwlock_t *lock)
4045 +static inline void arch_write_unlock(raw_rwlock_t *lock)
4046 {
4047 __asm__ __volatile__(
4048 " stw %%g0, [%0]"
4049 diff -urNp linux-2.6.32.43/arch/sparc/include/asm/thread_info_32.h linux-2.6.32.43/arch/sparc/include/asm/thread_info_32.h
4050 --- linux-2.6.32.43/arch/sparc/include/asm/thread_info_32.h 2011-03-27 14:31:47.000000000 -0400
4051 +++ linux-2.6.32.43/arch/sparc/include/asm/thread_info_32.h 2011-06-04 20:46:01.000000000 -0400
4052 @@ -50,6 +50,8 @@ struct thread_info {
4053 unsigned long w_saved;
4054
4055 struct restart_block restart_block;
4056 +
4057 + unsigned long lowest_stack;
4058 };
4059
4060 /*
4061 diff -urNp linux-2.6.32.43/arch/sparc/include/asm/thread_info_64.h linux-2.6.32.43/arch/sparc/include/asm/thread_info_64.h
4062 --- linux-2.6.32.43/arch/sparc/include/asm/thread_info_64.h 2011-03-27 14:31:47.000000000 -0400
4063 +++ linux-2.6.32.43/arch/sparc/include/asm/thread_info_64.h 2011-06-04 20:46:21.000000000 -0400
4064 @@ -68,6 +68,8 @@ struct thread_info {
4065 struct pt_regs *kern_una_regs;
4066 unsigned int kern_una_insn;
4067
4068 + unsigned long lowest_stack;
4069 +
4070 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4071 };
4072
4073 diff -urNp linux-2.6.32.43/arch/sparc/include/asm/uaccess_32.h linux-2.6.32.43/arch/sparc/include/asm/uaccess_32.h
4074 --- linux-2.6.32.43/arch/sparc/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
4075 +++ linux-2.6.32.43/arch/sparc/include/asm/uaccess_32.h 2011-04-17 15:56:46.000000000 -0400
4076 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
4077
4078 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4079 {
4080 - if (n && __access_ok((unsigned long) to, n))
4081 + if ((long)n < 0)
4082 + return n;
4083 +
4084 + if (n && __access_ok((unsigned long) to, n)) {
4085 + if (!__builtin_constant_p(n))
4086 + check_object_size(from, n, true);
4087 return __copy_user(to, (__force void __user *) from, n);
4088 - else
4089 + } else
4090 return n;
4091 }
4092
4093 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4094 {
4095 + if ((long)n < 0)
4096 + return n;
4097 +
4098 + if (!__builtin_constant_p(n))
4099 + check_object_size(from, n, true);
4100 +
4101 return __copy_user(to, (__force void __user *) from, n);
4102 }
4103
4104 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4105 {
4106 - if (n && __access_ok((unsigned long) from, n))
4107 + if ((long)n < 0)
4108 + return n;
4109 +
4110 + if (n && __access_ok((unsigned long) from, n)) {
4111 + if (!__builtin_constant_p(n))
4112 + check_object_size(to, n, false);
4113 return __copy_user((__force void __user *) to, from, n);
4114 - else
4115 + } else
4116 return n;
4117 }
4118
4119 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4120 {
4121 + if ((long)n < 0)
4122 + return n;
4123 +
4124 return __copy_user((__force void __user *) to, from, n);
4125 }
4126
4127 diff -urNp linux-2.6.32.43/arch/sparc/include/asm/uaccess_64.h linux-2.6.32.43/arch/sparc/include/asm/uaccess_64.h
4128 --- linux-2.6.32.43/arch/sparc/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
4129 +++ linux-2.6.32.43/arch/sparc/include/asm/uaccess_64.h 2011-04-17 15:56:46.000000000 -0400
4130 @@ -9,6 +9,7 @@
4131 #include <linux/compiler.h>
4132 #include <linux/string.h>
4133 #include <linux/thread_info.h>
4134 +#include <linux/kernel.h>
4135 #include <asm/asi.h>
4136 #include <asm/system.h>
4137 #include <asm/spitfire.h>
4138 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixu
4139 static inline unsigned long __must_check
4140 copy_from_user(void *to, const void __user *from, unsigned long size)
4141 {
4142 - unsigned long ret = ___copy_from_user(to, from, size);
4143 + unsigned long ret;
4144
4145 + if ((long)size < 0 || size > INT_MAX)
4146 + return size;
4147 +
4148 + if (!__builtin_constant_p(size))
4149 + check_object_size(to, size, false);
4150 +
4151 + ret = ___copy_from_user(to, from, size);
4152 if (unlikely(ret))
4153 ret = copy_from_user_fixup(to, from, size);
4154 return ret;
4155 @@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(
4156 static inline unsigned long __must_check
4157 copy_to_user(void __user *to, const void *from, unsigned long size)
4158 {
4159 - unsigned long ret = ___copy_to_user(to, from, size);
4160 + unsigned long ret;
4161 +
4162 + if ((long)size < 0 || size > INT_MAX)
4163 + return size;
4164 +
4165 + if (!__builtin_constant_p(size))
4166 + check_object_size(from, size, true);
4167
4168 + ret = ___copy_to_user(to, from, size);
4169 if (unlikely(ret))
4170 ret = copy_to_user_fixup(to, from, size);
4171 return ret;
4172 diff -urNp linux-2.6.32.43/arch/sparc/include/asm/uaccess.h linux-2.6.32.43/arch/sparc/include/asm/uaccess.h
4173 --- linux-2.6.32.43/arch/sparc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
4174 +++ linux-2.6.32.43/arch/sparc/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
4175 @@ -1,5 +1,13 @@
4176 #ifndef ___ASM_SPARC_UACCESS_H
4177 #define ___ASM_SPARC_UACCESS_H
4178 +
4179 +#ifdef __KERNEL__
4180 +#ifndef __ASSEMBLY__
4181 +#include <linux/types.h>
4182 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
4183 +#endif
4184 +#endif
4185 +
4186 #if defined(__sparc__) && defined(__arch64__)
4187 #include <asm/uaccess_64.h>
4188 #else
4189 diff -urNp linux-2.6.32.43/arch/sparc/kernel/iommu.c linux-2.6.32.43/arch/sparc/kernel/iommu.c
4190 --- linux-2.6.32.43/arch/sparc/kernel/iommu.c 2011-03-27 14:31:47.000000000 -0400
4191 +++ linux-2.6.32.43/arch/sparc/kernel/iommu.c 2011-04-17 15:56:46.000000000 -0400
4192 @@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struc
4193 spin_unlock_irqrestore(&iommu->lock, flags);
4194 }
4195
4196 -static struct dma_map_ops sun4u_dma_ops = {
4197 +static const struct dma_map_ops sun4u_dma_ops = {
4198 .alloc_coherent = dma_4u_alloc_coherent,
4199 .free_coherent = dma_4u_free_coherent,
4200 .map_page = dma_4u_map_page,
4201 @@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops
4202 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4203 };
4204
4205 -struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4206 +const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4207 EXPORT_SYMBOL(dma_ops);
4208
4209 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4210 diff -urNp linux-2.6.32.43/arch/sparc/kernel/ioport.c linux-2.6.32.43/arch/sparc/kernel/ioport.c
4211 --- linux-2.6.32.43/arch/sparc/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
4212 +++ linux-2.6.32.43/arch/sparc/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
4213 @@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(stru
4214 BUG();
4215 }
4216
4217 -struct dma_map_ops sbus_dma_ops = {
4218 +const struct dma_map_ops sbus_dma_ops = {
4219 .alloc_coherent = sbus_alloc_coherent,
4220 .free_coherent = sbus_free_coherent,
4221 .map_page = sbus_map_page,
4222 @@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4223 .sync_sg_for_device = sbus_sync_sg_for_device,
4224 };
4225
4226 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
4227 +const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4228 EXPORT_SYMBOL(dma_ops);
4229
4230 static int __init sparc_register_ioport(void)
4231 @@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(str
4232 }
4233 }
4234
4235 -struct dma_map_ops pci32_dma_ops = {
4236 +const struct dma_map_ops pci32_dma_ops = {
4237 .alloc_coherent = pci32_alloc_coherent,
4238 .free_coherent = pci32_free_coherent,
4239 .map_page = pci32_map_page,
4240 diff -urNp linux-2.6.32.43/arch/sparc/kernel/kgdb_32.c linux-2.6.32.43/arch/sparc/kernel/kgdb_32.c
4241 --- linux-2.6.32.43/arch/sparc/kernel/kgdb_32.c 2011-03-27 14:31:47.000000000 -0400
4242 +++ linux-2.6.32.43/arch/sparc/kernel/kgdb_32.c 2011-04-17 15:56:46.000000000 -0400
4243 @@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4244 {
4245 }
4246
4247 -struct kgdb_arch arch_kgdb_ops = {
4248 +const struct kgdb_arch arch_kgdb_ops = {
4249 /* Breakpoint instruction: ta 0x7d */
4250 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4251 };
4252 diff -urNp linux-2.6.32.43/arch/sparc/kernel/kgdb_64.c linux-2.6.32.43/arch/sparc/kernel/kgdb_64.c
4253 --- linux-2.6.32.43/arch/sparc/kernel/kgdb_64.c 2011-03-27 14:31:47.000000000 -0400
4254 +++ linux-2.6.32.43/arch/sparc/kernel/kgdb_64.c 2011-04-17 15:56:46.000000000 -0400
4255 @@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4256 {
4257 }
4258
4259 -struct kgdb_arch arch_kgdb_ops = {
4260 +const struct kgdb_arch arch_kgdb_ops = {
4261 /* Breakpoint instruction: ta 0x72 */
4262 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4263 };
4264 diff -urNp linux-2.6.32.43/arch/sparc/kernel/Makefile linux-2.6.32.43/arch/sparc/kernel/Makefile
4265 --- linux-2.6.32.43/arch/sparc/kernel/Makefile 2011-03-27 14:31:47.000000000 -0400
4266 +++ linux-2.6.32.43/arch/sparc/kernel/Makefile 2011-04-17 15:56:46.000000000 -0400
4267 @@ -3,7 +3,7 @@
4268 #
4269
4270 asflags-y := -ansi
4271 -ccflags-y := -Werror
4272 +#ccflags-y := -Werror
4273
4274 extra-y := head_$(BITS).o
4275 extra-y += init_task.o
4276 diff -urNp linux-2.6.32.43/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.43/arch/sparc/kernel/pci_sun4v.c
4277 --- linux-2.6.32.43/arch/sparc/kernel/pci_sun4v.c 2011-03-27 14:31:47.000000000 -0400
4278 +++ linux-2.6.32.43/arch/sparc/kernel/pci_sun4v.c 2011-04-17 15:56:46.000000000 -0400
4279 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
4280 spin_unlock_irqrestore(&iommu->lock, flags);
4281 }
4282
4283 -static struct dma_map_ops sun4v_dma_ops = {
4284 +static const struct dma_map_ops sun4v_dma_ops = {
4285 .alloc_coherent = dma_4v_alloc_coherent,
4286 .free_coherent = dma_4v_free_coherent,
4287 .map_page = dma_4v_map_page,
4288 diff -urNp linux-2.6.32.43/arch/sparc/kernel/process_32.c linux-2.6.32.43/arch/sparc/kernel/process_32.c
4289 --- linux-2.6.32.43/arch/sparc/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
4290 +++ linux-2.6.32.43/arch/sparc/kernel/process_32.c 2011-04-17 15:56:46.000000000 -0400
4291 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4292 rw->ins[4], rw->ins[5],
4293 rw->ins[6],
4294 rw->ins[7]);
4295 - printk("%pS\n", (void *) rw->ins[7]);
4296 + printk("%pA\n", (void *) rw->ins[7]);
4297 rw = (struct reg_window32 *) rw->ins[6];
4298 }
4299 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4300 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4301
4302 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4303 r->psr, r->pc, r->npc, r->y, print_tainted());
4304 - printk("PC: <%pS>\n", (void *) r->pc);
4305 + printk("PC: <%pA>\n", (void *) r->pc);
4306 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4307 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4308 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4309 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4310 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4311 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4312 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4313 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4314
4315 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4316 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4317 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
4318 rw = (struct reg_window32 *) fp;
4319 pc = rw->ins[7];
4320 printk("[%08lx : ", pc);
4321 - printk("%pS ] ", (void *) pc);
4322 + printk("%pA ] ", (void *) pc);
4323 fp = rw->ins[6];
4324 } while (++count < 16);
4325 printk("\n");
4326 diff -urNp linux-2.6.32.43/arch/sparc/kernel/process_64.c linux-2.6.32.43/arch/sparc/kernel/process_64.c
4327 --- linux-2.6.32.43/arch/sparc/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
4328 +++ linux-2.6.32.43/arch/sparc/kernel/process_64.c 2011-04-17 15:56:46.000000000 -0400
4329 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
4330 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4331 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4332 if (regs->tstate & TSTATE_PRIV)
4333 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4334 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4335 }
4336
4337 void show_regs(struct pt_regs *regs)
4338 {
4339 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4340 regs->tpc, regs->tnpc, regs->y, print_tainted());
4341 - printk("TPC: <%pS>\n", (void *) regs->tpc);
4342 + printk("TPC: <%pA>\n", (void *) regs->tpc);
4343 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4344 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4345 regs->u_regs[3]);
4346 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4347 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4348 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4349 regs->u_regs[15]);
4350 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4351 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4352 show_regwindow(regs);
4353 }
4354
4355 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void
4356 ((tp && tp->task) ? tp->task->pid : -1));
4357
4358 if (gp->tstate & TSTATE_PRIV) {
4359 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4360 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4361 (void *) gp->tpc,
4362 (void *) gp->o7,
4363 (void *) gp->i7,
4364 diff -urNp linux-2.6.32.43/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.43/arch/sparc/kernel/sys_sparc_32.c
4365 --- linux-2.6.32.43/arch/sparc/kernel/sys_sparc_32.c 2011-03-27 14:31:47.000000000 -0400
4366 +++ linux-2.6.32.43/arch/sparc/kernel/sys_sparc_32.c 2011-04-17 15:56:46.000000000 -0400
4367 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
4368 if (ARCH_SUN4C && len > 0x20000000)
4369 return -ENOMEM;
4370 if (!addr)
4371 - addr = TASK_UNMAPPED_BASE;
4372 + addr = current->mm->mmap_base;
4373
4374 if (flags & MAP_SHARED)
4375 addr = COLOUR_ALIGN(addr);
4376 @@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
4377 }
4378 if (TASK_SIZE - PAGE_SIZE - len < addr)
4379 return -ENOMEM;
4380 - if (!vmm || addr + len <= vmm->vm_start)
4381 + if (check_heap_stack_gap(vmm, addr, len))
4382 return addr;
4383 addr = vmm->vm_end;
4384 if (flags & MAP_SHARED)
4385 diff -urNp linux-2.6.32.43/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.43/arch/sparc/kernel/sys_sparc_64.c
4386 --- linux-2.6.32.43/arch/sparc/kernel/sys_sparc_64.c 2011-03-27 14:31:47.000000000 -0400
4387 +++ linux-2.6.32.43/arch/sparc/kernel/sys_sparc_64.c 2011-04-17 15:56:46.000000000 -0400
4388 @@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
4389 /* We do not accept a shared mapping if it would violate
4390 * cache aliasing constraints.
4391 */
4392 - if ((flags & MAP_SHARED) &&
4393 + if ((filp || (flags & MAP_SHARED)) &&
4394 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4395 return -EINVAL;
4396 return addr;
4397 @@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
4398 if (filp || (flags & MAP_SHARED))
4399 do_color_align = 1;
4400
4401 +#ifdef CONFIG_PAX_RANDMMAP
4402 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4403 +#endif
4404 +
4405 if (addr) {
4406 if (do_color_align)
4407 addr = COLOUR_ALIGN(addr, pgoff);
4408 @@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
4409 addr = PAGE_ALIGN(addr);
4410
4411 vma = find_vma(mm, addr);
4412 - if (task_size - len >= addr &&
4413 - (!vma || addr + len <= vma->vm_start))
4414 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4415 return addr;
4416 }
4417
4418 if (len > mm->cached_hole_size) {
4419 - start_addr = addr = mm->free_area_cache;
4420 + start_addr = addr = mm->free_area_cache;
4421 } else {
4422 - start_addr = addr = TASK_UNMAPPED_BASE;
4423 + start_addr = addr = mm->mmap_base;
4424 mm->cached_hole_size = 0;
4425 }
4426
4427 @@ -175,14 +178,14 @@ full_search:
4428 vma = find_vma(mm, VA_EXCLUDE_END);
4429 }
4430 if (unlikely(task_size < addr)) {
4431 - if (start_addr != TASK_UNMAPPED_BASE) {
4432 - start_addr = addr = TASK_UNMAPPED_BASE;
4433 + if (start_addr != mm->mmap_base) {
4434 + start_addr = addr = mm->mmap_base;
4435 mm->cached_hole_size = 0;
4436 goto full_search;
4437 }
4438 return -ENOMEM;
4439 }
4440 - if (likely(!vma || addr + len <= vma->vm_start)) {
4441 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4442 /*
4443 * Remember the place where we stopped the search:
4444 */
4445 @@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
4446 /* We do not accept a shared mapping if it would violate
4447 * cache aliasing constraints.
4448 */
4449 - if ((flags & MAP_SHARED) &&
4450 + if ((filp || (flags & MAP_SHARED)) &&
4451 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4452 return -EINVAL;
4453 return addr;
4454 @@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
4455 addr = PAGE_ALIGN(addr);
4456
4457 vma = find_vma(mm, addr);
4458 - if (task_size - len >= addr &&
4459 - (!vma || addr + len <= vma->vm_start))
4460 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4461 return addr;
4462 }
4463
4464 @@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
4465 /* make sure it can fit in the remaining address space */
4466 if (likely(addr > len)) {
4467 vma = find_vma(mm, addr-len);
4468 - if (!vma || addr <= vma->vm_start) {
4469 + if (check_heap_stack_gap(vma, addr - len, len)) {
4470 /* remember the address as a hint for next time */
4471 return (mm->free_area_cache = addr-len);
4472 }
4473 @@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct fi
4474 if (unlikely(mm->mmap_base < len))
4475 goto bottomup;
4476
4477 - addr = mm->mmap_base-len;
4478 - if (do_color_align)
4479 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4480 + addr = mm->mmap_base - len;
4481
4482 do {
4483 + if (do_color_align)
4484 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4485 /*
4486 * Lookup failure means no vma is above this address,
4487 * else if new region fits below vma->vm_start,
4488 * return with success:
4489 */
4490 vma = find_vma(mm, addr);
4491 - if (likely(!vma || addr+len <= vma->vm_start)) {
4492 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4493 /* remember the address as a hint for next time */
4494 return (mm->free_area_cache = addr);
4495 }
4496 @@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct fi
4497 mm->cached_hole_size = vma->vm_start - addr;
4498
4499 /* try just below the current vma->vm_start */
4500 - addr = vma->vm_start-len;
4501 - if (do_color_align)
4502 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4503 - } while (likely(len < vma->vm_start));
4504 + addr = skip_heap_stack_gap(vma, len);
4505 + } while (!IS_ERR_VALUE(addr));
4506
4507 bottomup:
4508 /*
4509 @@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
4510 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
4511 sysctl_legacy_va_layout) {
4512 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4513 +
4514 +#ifdef CONFIG_PAX_RANDMMAP
4515 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4516 + mm->mmap_base += mm->delta_mmap;
4517 +#endif
4518 +
4519 mm->get_unmapped_area = arch_get_unmapped_area;
4520 mm->unmap_area = arch_unmap_area;
4521 } else {
4522 @@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
4523 gap = (task_size / 6 * 5);
4524
4525 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4526 +
4527 +#ifdef CONFIG_PAX_RANDMMAP
4528 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4529 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4530 +#endif
4531 +
4532 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4533 mm->unmap_area = arch_unmap_area_topdown;
4534 }
4535 diff -urNp linux-2.6.32.43/arch/sparc/kernel/traps_32.c linux-2.6.32.43/arch/sparc/kernel/traps_32.c
4536 --- linux-2.6.32.43/arch/sparc/kernel/traps_32.c 2011-03-27 14:31:47.000000000 -0400
4537 +++ linux-2.6.32.43/arch/sparc/kernel/traps_32.c 2011-06-13 21:25:39.000000000 -0400
4538 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
4539 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4540 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4541
4542 +extern void gr_handle_kernel_exploit(void);
4543 +
4544 void die_if_kernel(char *str, struct pt_regs *regs)
4545 {
4546 static int die_counter;
4547 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
4548 count++ < 30 &&
4549 (((unsigned long) rw) >= PAGE_OFFSET) &&
4550 !(((unsigned long) rw) & 0x7)) {
4551 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
4552 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
4553 (void *) rw->ins[7]);
4554 rw = (struct reg_window32 *)rw->ins[6];
4555 }
4556 }
4557 printk("Instruction DUMP:");
4558 instruction_dump ((unsigned long *) regs->pc);
4559 - if(regs->psr & PSR_PS)
4560 + if(regs->psr & PSR_PS) {
4561 + gr_handle_kernel_exploit();
4562 do_exit(SIGKILL);
4563 + }
4564 do_exit(SIGSEGV);
4565 }
4566
4567 diff -urNp linux-2.6.32.43/arch/sparc/kernel/traps_64.c linux-2.6.32.43/arch/sparc/kernel/traps_64.c
4568 --- linux-2.6.32.43/arch/sparc/kernel/traps_64.c 2011-03-27 14:31:47.000000000 -0400
4569 +++ linux-2.6.32.43/arch/sparc/kernel/traps_64.c 2011-06-13 21:24:11.000000000 -0400
4570 @@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_
4571 i + 1,
4572 p->trapstack[i].tstate, p->trapstack[i].tpc,
4573 p->trapstack[i].tnpc, p->trapstack[i].tt);
4574 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4575 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4576 }
4577 }
4578
4579 @@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long
4580
4581 lvl -= 0x100;
4582 if (regs->tstate & TSTATE_PRIV) {
4583 +
4584 +#ifdef CONFIG_PAX_REFCOUNT
4585 + if (lvl == 6)
4586 + pax_report_refcount_overflow(regs);
4587 +#endif
4588 +
4589 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4590 die_if_kernel(buffer, regs);
4591 }
4592 @@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long
4593 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4594 {
4595 char buffer[32];
4596 -
4597 +
4598 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4599 0, lvl, SIGTRAP) == NOTIFY_STOP)
4600 return;
4601
4602 +#ifdef CONFIG_PAX_REFCOUNT
4603 + if (lvl == 6)
4604 + pax_report_refcount_overflow(regs);
4605 +#endif
4606 +
4607 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4608
4609 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4610 @@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt
4611 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4612 printk("%s" "ERROR(%d): ",
4613 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4614 - printk("TPC<%pS>\n", (void *) regs->tpc);
4615 + printk("TPC<%pA>\n", (void *) regs->tpc);
4616 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4617 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4618 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4619 @@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type,
4620 smp_processor_id(),
4621 (type & 0x1) ? 'I' : 'D',
4622 regs->tpc);
4623 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4624 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4625 panic("Irrecoverable Cheetah+ parity error.");
4626 }
4627
4628 @@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type,
4629 smp_processor_id(),
4630 (type & 0x1) ? 'I' : 'D',
4631 regs->tpc);
4632 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4633 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4634 }
4635
4636 struct sun4v_error_entry {
4637 @@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_r
4638
4639 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4640 regs->tpc, tl);
4641 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4642 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4643 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4644 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4645 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4646 (void *) regs->u_regs[UREG_I7]);
4647 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4648 "pte[%lx] error[%lx]\n",
4649 @@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_r
4650
4651 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4652 regs->tpc, tl);
4653 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4654 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4655 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4656 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4657 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4658 (void *) regs->u_regs[UREG_I7]);
4659 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4660 "pte[%lx] error[%lx]\n",
4661 @@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk,
4662 fp = (unsigned long)sf->fp + STACK_BIAS;
4663 }
4664
4665 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4666 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4667 } while (++count < 16);
4668 }
4669
4670 @@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_
4671 return (struct reg_window *) (fp + STACK_BIAS);
4672 }
4673
4674 +extern void gr_handle_kernel_exploit(void);
4675 +
4676 void die_if_kernel(char *str, struct pt_regs *regs)
4677 {
4678 static int die_counter;
4679 @@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_
4680 while (rw &&
4681 count++ < 30&&
4682 is_kernel_stack(current, rw)) {
4683 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
4684 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
4685 (void *) rw->ins[7]);
4686
4687 rw = kernel_stack_up(rw);
4688 @@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_
4689 }
4690 user_instruction_dump ((unsigned int __user *) regs->tpc);
4691 }
4692 - if (regs->tstate & TSTATE_PRIV)
4693 + if (regs->tstate & TSTATE_PRIV) {
4694 + gr_handle_kernel_exploit();
4695 do_exit(SIGKILL);
4696 + }
4697 +
4698 do_exit(SIGSEGV);
4699 }
4700 EXPORT_SYMBOL(die_if_kernel);
4701 diff -urNp linux-2.6.32.43/arch/sparc/kernel/una_asm_64.S linux-2.6.32.43/arch/sparc/kernel/una_asm_64.S
4702 --- linux-2.6.32.43/arch/sparc/kernel/una_asm_64.S 2011-03-27 14:31:47.000000000 -0400
4703 +++ linux-2.6.32.43/arch/sparc/kernel/una_asm_64.S 2011-07-13 22:20:05.000000000 -0400
4704 @@ -127,7 +127,7 @@ do_int_load:
4705 wr %o5, 0x0, %asi
4706 retl
4707 mov 0, %o0
4708 - .size __do_int_load, .-__do_int_load
4709 + .size do_int_load, .-do_int_load
4710
4711 .section __ex_table,"a"
4712 .word 4b, __retl_efault
4713 diff -urNp linux-2.6.32.43/arch/sparc/kernel/unaligned_64.c linux-2.6.32.43/arch/sparc/kernel/unaligned_64.c
4714 --- linux-2.6.32.43/arch/sparc/kernel/unaligned_64.c 2011-03-27 14:31:47.000000000 -0400
4715 +++ linux-2.6.32.43/arch/sparc/kernel/unaligned_64.c 2011-04-17 15:56:46.000000000 -0400
4716 @@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs
4717 if (count < 5) {
4718 last_time = jiffies;
4719 count++;
4720 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
4721 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
4722 regs->tpc, (void *) regs->tpc);
4723 }
4724 }
4725 diff -urNp linux-2.6.32.43/arch/sparc/lib/atomic_64.S linux-2.6.32.43/arch/sparc/lib/atomic_64.S
4726 --- linux-2.6.32.43/arch/sparc/lib/atomic_64.S 2011-03-27 14:31:47.000000000 -0400
4727 +++ linux-2.6.32.43/arch/sparc/lib/atomic_64.S 2011-04-17 15:56:46.000000000 -0400
4728 @@ -18,7 +18,12 @@
4729 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4730 BACKOFF_SETUP(%o2)
4731 1: lduw [%o1], %g1
4732 - add %g1, %o0, %g7
4733 + addcc %g1, %o0, %g7
4734 +
4735 +#ifdef CONFIG_PAX_REFCOUNT
4736 + tvs %icc, 6
4737 +#endif
4738 +
4739 cas [%o1], %g1, %g7
4740 cmp %g1, %g7
4741 bne,pn %icc, 2f
4742 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4743 2: BACKOFF_SPIN(%o2, %o3, 1b)
4744 .size atomic_add, .-atomic_add
4745
4746 + .globl atomic_add_unchecked
4747 + .type atomic_add_unchecked,#function
4748 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4749 + BACKOFF_SETUP(%o2)
4750 +1: lduw [%o1], %g1
4751 + add %g1, %o0, %g7
4752 + cas [%o1], %g1, %g7
4753 + cmp %g1, %g7
4754 + bne,pn %icc, 2f
4755 + nop
4756 + retl
4757 + nop
4758 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4759 + .size atomic_add_unchecked, .-atomic_add_unchecked
4760 +
4761 .globl atomic_sub
4762 .type atomic_sub,#function
4763 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4764 BACKOFF_SETUP(%o2)
4765 1: lduw [%o1], %g1
4766 - sub %g1, %o0, %g7
4767 + subcc %g1, %o0, %g7
4768 +
4769 +#ifdef CONFIG_PAX_REFCOUNT
4770 + tvs %icc, 6
4771 +#endif
4772 +
4773 cas [%o1], %g1, %g7
4774 cmp %g1, %g7
4775 bne,pn %icc, 2f
4776 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4777 2: BACKOFF_SPIN(%o2, %o3, 1b)
4778 .size atomic_sub, .-atomic_sub
4779
4780 + .globl atomic_sub_unchecked
4781 + .type atomic_sub_unchecked,#function
4782 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4783 + BACKOFF_SETUP(%o2)
4784 +1: lduw [%o1], %g1
4785 + sub %g1, %o0, %g7
4786 + cas [%o1], %g1, %g7
4787 + cmp %g1, %g7
4788 + bne,pn %icc, 2f
4789 + nop
4790 + retl
4791 + nop
4792 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4793 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4794 +
4795 .globl atomic_add_ret
4796 .type atomic_add_ret,#function
4797 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4798 BACKOFF_SETUP(%o2)
4799 1: lduw [%o1], %g1
4800 - add %g1, %o0, %g7
4801 + addcc %g1, %o0, %g7
4802 +
4803 +#ifdef CONFIG_PAX_REFCOUNT
4804 + tvs %icc, 6
4805 +#endif
4806 +
4807 cas [%o1], %g1, %g7
4808 cmp %g1, %g7
4809 bne,pn %icc, 2f
4810 @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4811 2: BACKOFF_SPIN(%o2, %o3, 1b)
4812 .size atomic_add_ret, .-atomic_add_ret
4813
4814 + .globl atomic_add_ret_unchecked
4815 + .type atomic_add_ret_unchecked,#function
4816 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4817 + BACKOFF_SETUP(%o2)
4818 +1: lduw [%o1], %g1
4819 + addcc %g1, %o0, %g7
4820 + cas [%o1], %g1, %g7
4821 + cmp %g1, %g7
4822 + bne,pn %icc, 2f
4823 + add %g7, %o0, %g7
4824 + sra %g7, 0, %o0
4825 + retl
4826 + nop
4827 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4828 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4829 +
4830 .globl atomic_sub_ret
4831 .type atomic_sub_ret,#function
4832 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4833 BACKOFF_SETUP(%o2)
4834 1: lduw [%o1], %g1
4835 - sub %g1, %o0, %g7
4836 + subcc %g1, %o0, %g7
4837 +
4838 +#ifdef CONFIG_PAX_REFCOUNT
4839 + tvs %icc, 6
4840 +#endif
4841 +
4842 cas [%o1], %g1, %g7
4843 cmp %g1, %g7
4844 bne,pn %icc, 2f
4845 @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4846 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4847 BACKOFF_SETUP(%o2)
4848 1: ldx [%o1], %g1
4849 - add %g1, %o0, %g7
4850 + addcc %g1, %o0, %g7
4851 +
4852 +#ifdef CONFIG_PAX_REFCOUNT
4853 + tvs %xcc, 6
4854 +#endif
4855 +
4856 casx [%o1], %g1, %g7
4857 cmp %g1, %g7
4858 bne,pn %xcc, 2f
4859 @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4860 2: BACKOFF_SPIN(%o2, %o3, 1b)
4861 .size atomic64_add, .-atomic64_add
4862
4863 + .globl atomic64_add_unchecked
4864 + .type atomic64_add_unchecked,#function
4865 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4866 + BACKOFF_SETUP(%o2)
4867 +1: ldx [%o1], %g1
4868 + addcc %g1, %o0, %g7
4869 + casx [%o1], %g1, %g7
4870 + cmp %g1, %g7
4871 + bne,pn %xcc, 2f
4872 + nop
4873 + retl
4874 + nop
4875 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4876 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4877 +
4878 .globl atomic64_sub
4879 .type atomic64_sub,#function
4880 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4881 BACKOFF_SETUP(%o2)
4882 1: ldx [%o1], %g1
4883 - sub %g1, %o0, %g7
4884 + subcc %g1, %o0, %g7
4885 +
4886 +#ifdef CONFIG_PAX_REFCOUNT
4887 + tvs %xcc, 6
4888 +#endif
4889 +
4890 casx [%o1], %g1, %g7
4891 cmp %g1, %g7
4892 bne,pn %xcc, 2f
4893 @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4894 2: BACKOFF_SPIN(%o2, %o3, 1b)
4895 .size atomic64_sub, .-atomic64_sub
4896
4897 + .globl atomic64_sub_unchecked
4898 + .type atomic64_sub_unchecked,#function
4899 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4900 + BACKOFF_SETUP(%o2)
4901 +1: ldx [%o1], %g1
4902 + subcc %g1, %o0, %g7
4903 + casx [%o1], %g1, %g7
4904 + cmp %g1, %g7
4905 + bne,pn %xcc, 2f
4906 + nop
4907 + retl
4908 + nop
4909 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4910 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4911 +
4912 .globl atomic64_add_ret
4913 .type atomic64_add_ret,#function
4914 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4915 BACKOFF_SETUP(%o2)
4916 1: ldx [%o1], %g1
4917 - add %g1, %o0, %g7
4918 + addcc %g1, %o0, %g7
4919 +
4920 +#ifdef CONFIG_PAX_REFCOUNT
4921 + tvs %xcc, 6
4922 +#endif
4923 +
4924 casx [%o1], %g1, %g7
4925 cmp %g1, %g7
4926 bne,pn %xcc, 2f
4927 @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4928 2: BACKOFF_SPIN(%o2, %o3, 1b)
4929 .size atomic64_add_ret, .-atomic64_add_ret
4930
4931 + .globl atomic64_add_ret_unchecked
4932 + .type atomic64_add_ret_unchecked,#function
4933 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4934 + BACKOFF_SETUP(%o2)
4935 +1: ldx [%o1], %g1
4936 + addcc %g1, %o0, %g7
4937 + casx [%o1], %g1, %g7
4938 + cmp %g1, %g7
4939 + bne,pn %xcc, 2f
4940 + add %g7, %o0, %g7
4941 + mov %g7, %o0
4942 + retl
4943 + nop
4944 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4945 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4946 +
4947 .globl atomic64_sub_ret
4948 .type atomic64_sub_ret,#function
4949 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4950 BACKOFF_SETUP(%o2)
4951 1: ldx [%o1], %g1
4952 - sub %g1, %o0, %g7
4953 + subcc %g1, %o0, %g7
4954 +
4955 +#ifdef CONFIG_PAX_REFCOUNT
4956 + tvs %xcc, 6
4957 +#endif
4958 +
4959 casx [%o1], %g1, %g7
4960 cmp %g1, %g7
4961 bne,pn %xcc, 2f
4962 diff -urNp linux-2.6.32.43/arch/sparc/lib/ksyms.c linux-2.6.32.43/arch/sparc/lib/ksyms.c
4963 --- linux-2.6.32.43/arch/sparc/lib/ksyms.c 2011-03-27 14:31:47.000000000 -0400
4964 +++ linux-2.6.32.43/arch/sparc/lib/ksyms.c 2011-04-17 15:56:46.000000000 -0400
4965 @@ -144,12 +144,17 @@ EXPORT_SYMBOL(__downgrade_write);
4966
4967 /* Atomic counter implementation. */
4968 EXPORT_SYMBOL(atomic_add);
4969 +EXPORT_SYMBOL(atomic_add_unchecked);
4970 EXPORT_SYMBOL(atomic_add_ret);
4971 EXPORT_SYMBOL(atomic_sub);
4972 +EXPORT_SYMBOL(atomic_sub_unchecked);
4973 EXPORT_SYMBOL(atomic_sub_ret);
4974 EXPORT_SYMBOL(atomic64_add);
4975 +EXPORT_SYMBOL(atomic64_add_unchecked);
4976 EXPORT_SYMBOL(atomic64_add_ret);
4977 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4978 EXPORT_SYMBOL(atomic64_sub);
4979 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4980 EXPORT_SYMBOL(atomic64_sub_ret);
4981
4982 /* Atomic bit operations. */
4983 diff -urNp linux-2.6.32.43/arch/sparc/lib/Makefile linux-2.6.32.43/arch/sparc/lib/Makefile
4984 --- linux-2.6.32.43/arch/sparc/lib/Makefile 2011-03-27 14:31:47.000000000 -0400
4985 +++ linux-2.6.32.43/arch/sparc/lib/Makefile 2011-05-17 19:26:34.000000000 -0400
4986 @@ -2,7 +2,7 @@
4987 #
4988
4989 asflags-y := -ansi -DST_DIV0=0x02
4990 -ccflags-y := -Werror
4991 +#ccflags-y := -Werror
4992
4993 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4994 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4995 diff -urNp linux-2.6.32.43/arch/sparc/lib/rwsem_64.S linux-2.6.32.43/arch/sparc/lib/rwsem_64.S
4996 --- linux-2.6.32.43/arch/sparc/lib/rwsem_64.S 2011-03-27 14:31:47.000000000 -0400
4997 +++ linux-2.6.32.43/arch/sparc/lib/rwsem_64.S 2011-04-17 15:56:46.000000000 -0400
4998 @@ -11,7 +11,12 @@
4999 .globl __down_read
5000 __down_read:
5001 1: lduw [%o0], %g1
5002 - add %g1, 1, %g7
5003 + addcc %g1, 1, %g7
5004 +
5005 +#ifdef CONFIG_PAX_REFCOUNT
5006 + tvs %icc, 6
5007 +#endif
5008 +
5009 cas [%o0], %g1, %g7
5010 cmp %g1, %g7
5011 bne,pn %icc, 1b
5012 @@ -33,7 +38,12 @@ __down_read:
5013 .globl __down_read_trylock
5014 __down_read_trylock:
5015 1: lduw [%o0], %g1
5016 - add %g1, 1, %g7
5017 + addcc %g1, 1, %g7
5018 +
5019 +#ifdef CONFIG_PAX_REFCOUNT
5020 + tvs %icc, 6
5021 +#endif
5022 +
5023 cmp %g7, 0
5024 bl,pn %icc, 2f
5025 mov 0, %o1
5026 @@ -51,7 +61,12 @@ __down_write:
5027 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5028 1:
5029 lduw [%o0], %g3
5030 - add %g3, %g1, %g7
5031 + addcc %g3, %g1, %g7
5032 +
5033 +#ifdef CONFIG_PAX_REFCOUNT
5034 + tvs %icc, 6
5035 +#endif
5036 +
5037 cas [%o0], %g3, %g7
5038 cmp %g3, %g7
5039 bne,pn %icc, 1b
5040 @@ -77,7 +92,12 @@ __down_write_trylock:
5041 cmp %g3, 0
5042 bne,pn %icc, 2f
5043 mov 0, %o1
5044 - add %g3, %g1, %g7
5045 + addcc %g3, %g1, %g7
5046 +
5047 +#ifdef CONFIG_PAX_REFCOUNT
5048 + tvs %icc, 6
5049 +#endif
5050 +
5051 cas [%o0], %g3, %g7
5052 cmp %g3, %g7
5053 bne,pn %icc, 1b
5054 @@ -90,7 +110,12 @@ __down_write_trylock:
5055 __up_read:
5056 1:
5057 lduw [%o0], %g1
5058 - sub %g1, 1, %g7
5059 + subcc %g1, 1, %g7
5060 +
5061 +#ifdef CONFIG_PAX_REFCOUNT
5062 + tvs %icc, 6
5063 +#endif
5064 +
5065 cas [%o0], %g1, %g7
5066 cmp %g1, %g7
5067 bne,pn %icc, 1b
5068 @@ -118,7 +143,12 @@ __up_write:
5069 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5070 1:
5071 lduw [%o0], %g3
5072 - sub %g3, %g1, %g7
5073 + subcc %g3, %g1, %g7
5074 +
5075 +#ifdef CONFIG_PAX_REFCOUNT
5076 + tvs %icc, 6
5077 +#endif
5078 +
5079 cas [%o0], %g3, %g7
5080 cmp %g3, %g7
5081 bne,pn %icc, 1b
5082 @@ -143,7 +173,12 @@ __downgrade_write:
5083 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5084 1:
5085 lduw [%o0], %g3
5086 - sub %g3, %g1, %g7
5087 + subcc %g3, %g1, %g7
5088 +
5089 +#ifdef CONFIG_PAX_REFCOUNT
5090 + tvs %icc, 6
5091 +#endif
5092 +
5093 cas [%o0], %g3, %g7
5094 cmp %g3, %g7
5095 bne,pn %icc, 1b
5096 diff -urNp linux-2.6.32.43/arch/sparc/Makefile linux-2.6.32.43/arch/sparc/Makefile
5097 --- linux-2.6.32.43/arch/sparc/Makefile 2011-03-27 14:31:47.000000000 -0400
5098 +++ linux-2.6.32.43/arch/sparc/Makefile 2011-04-17 15:56:46.000000000 -0400
5099 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
5100 # Export what is needed by arch/sparc/boot/Makefile
5101 export VMLINUX_INIT VMLINUX_MAIN
5102 VMLINUX_INIT := $(head-y) $(init-y)
5103 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5104 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5105 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5106 VMLINUX_MAIN += $(drivers-y) $(net-y)
5107
5108 diff -urNp linux-2.6.32.43/arch/sparc/mm/fault_32.c linux-2.6.32.43/arch/sparc/mm/fault_32.c
5109 --- linux-2.6.32.43/arch/sparc/mm/fault_32.c 2011-03-27 14:31:47.000000000 -0400
5110 +++ linux-2.6.32.43/arch/sparc/mm/fault_32.c 2011-04-17 15:56:46.000000000 -0400
5111 @@ -21,6 +21,9 @@
5112 #include <linux/interrupt.h>
5113 #include <linux/module.h>
5114 #include <linux/kdebug.h>
5115 +#include <linux/slab.h>
5116 +#include <linux/pagemap.h>
5117 +#include <linux/compiler.h>
5118
5119 #include <asm/system.h>
5120 #include <asm/page.h>
5121 @@ -167,6 +170,267 @@ static unsigned long compute_si_addr(str
5122 return safe_compute_effective_address(regs, insn);
5123 }
5124
5125 +#ifdef CONFIG_PAX_PAGEEXEC
5126 +#ifdef CONFIG_PAX_DLRESOLVE
5127 +static void pax_emuplt_close(struct vm_area_struct *vma)
5128 +{
5129 + vma->vm_mm->call_dl_resolve = 0UL;
5130 +}
5131 +
5132 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5133 +{
5134 + unsigned int *kaddr;
5135 +
5136 + vmf->page = alloc_page(GFP_HIGHUSER);
5137 + if (!vmf->page)
5138 + return VM_FAULT_OOM;
5139 +
5140 + kaddr = kmap(vmf->page);
5141 + memset(kaddr, 0, PAGE_SIZE);
5142 + kaddr[0] = 0x9DE3BFA8U; /* save */
5143 + flush_dcache_page(vmf->page);
5144 + kunmap(vmf->page);
5145 + return VM_FAULT_MAJOR;
5146 +}
5147 +
5148 +static const struct vm_operations_struct pax_vm_ops = {
5149 + .close = pax_emuplt_close,
5150 + .fault = pax_emuplt_fault
5151 +};
5152 +
5153 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5154 +{
5155 + int ret;
5156 +
5157 + vma->vm_mm = current->mm;
5158 + vma->vm_start = addr;
5159 + vma->vm_end = addr + PAGE_SIZE;
5160 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5161 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5162 + vma->vm_ops = &pax_vm_ops;
5163 +
5164 + ret = insert_vm_struct(current->mm, vma);
5165 + if (ret)
5166 + return ret;
5167 +
5168 + ++current->mm->total_vm;
5169 + return 0;
5170 +}
5171 +#endif
5172 +
5173 +/*
5174 + * PaX: decide what to do with offenders (regs->pc = fault address)
5175 + *
5176 + * returns 1 when task should be killed
5177 + * 2 when patched PLT trampoline was detected
5178 + * 3 when unpatched PLT trampoline was detected
5179 + */
5180 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5181 +{
5182 +
5183 +#ifdef CONFIG_PAX_EMUPLT
5184 + int err;
5185 +
5186 + do { /* PaX: patched PLT emulation #1 */
5187 + unsigned int sethi1, sethi2, jmpl;
5188 +
5189 + err = get_user(sethi1, (unsigned int *)regs->pc);
5190 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5191 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5192 +
5193 + if (err)
5194 + break;
5195 +
5196 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5197 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5198 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5199 + {
5200 + unsigned int addr;
5201 +
5202 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5203 + addr = regs->u_regs[UREG_G1];
5204 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5205 + regs->pc = addr;
5206 + regs->npc = addr+4;
5207 + return 2;
5208 + }
5209 + } while (0);
5210 +
5211 + { /* PaX: patched PLT emulation #2 */
5212 + unsigned int ba;
5213 +
5214 + err = get_user(ba, (unsigned int *)regs->pc);
5215 +
5216 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5217 + unsigned int addr;
5218 +
5219 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5220 + regs->pc = addr;
5221 + regs->npc = addr+4;
5222 + return 2;
5223 + }
5224 + }
5225 +
5226 + do { /* PaX: patched PLT emulation #3 */
5227 + unsigned int sethi, jmpl, nop;
5228 +
5229 + err = get_user(sethi, (unsigned int *)regs->pc);
5230 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5231 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5232 +
5233 + if (err)
5234 + break;
5235 +
5236 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5237 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5238 + nop == 0x01000000U)
5239 + {
5240 + unsigned int addr;
5241 +
5242 + addr = (sethi & 0x003FFFFFU) << 10;
5243 + regs->u_regs[UREG_G1] = addr;
5244 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5245 + regs->pc = addr;
5246 + regs->npc = addr+4;
5247 + return 2;
5248 + }
5249 + } while (0);
5250 +
5251 + do { /* PaX: unpatched PLT emulation step 1 */
5252 + unsigned int sethi, ba, nop;
5253 +
5254 + err = get_user(sethi, (unsigned int *)regs->pc);
5255 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
5256 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5257 +
5258 + if (err)
5259 + break;
5260 +
5261 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5262 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5263 + nop == 0x01000000U)
5264 + {
5265 + unsigned int addr, save, call;
5266 +
5267 + if ((ba & 0xFFC00000U) == 0x30800000U)
5268 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5269 + else
5270 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5271 +
5272 + err = get_user(save, (unsigned int *)addr);
5273 + err |= get_user(call, (unsigned int *)(addr+4));
5274 + err |= get_user(nop, (unsigned int *)(addr+8));
5275 + if (err)
5276 + break;
5277 +
5278 +#ifdef CONFIG_PAX_DLRESOLVE
5279 + if (save == 0x9DE3BFA8U &&
5280 + (call & 0xC0000000U) == 0x40000000U &&
5281 + nop == 0x01000000U)
5282 + {
5283 + struct vm_area_struct *vma;
5284 + unsigned long call_dl_resolve;
5285 +
5286 + down_read(&current->mm->mmap_sem);
5287 + call_dl_resolve = current->mm->call_dl_resolve;
5288 + up_read(&current->mm->mmap_sem);
5289 + if (likely(call_dl_resolve))
5290 + goto emulate;
5291 +
5292 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5293 +
5294 + down_write(&current->mm->mmap_sem);
5295 + if (current->mm->call_dl_resolve) {
5296 + call_dl_resolve = current->mm->call_dl_resolve;
5297 + up_write(&current->mm->mmap_sem);
5298 + if (vma)
5299 + kmem_cache_free(vm_area_cachep, vma);
5300 + goto emulate;
5301 + }
5302 +
5303 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5304 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5305 + up_write(&current->mm->mmap_sem);
5306 + if (vma)
5307 + kmem_cache_free(vm_area_cachep, vma);
5308 + return 1;
5309 + }
5310 +
5311 + if (pax_insert_vma(vma, call_dl_resolve)) {
5312 + up_write(&current->mm->mmap_sem);
5313 + kmem_cache_free(vm_area_cachep, vma);
5314 + return 1;
5315 + }
5316 +
5317 + current->mm->call_dl_resolve = call_dl_resolve;
5318 + up_write(&current->mm->mmap_sem);
5319 +
5320 +emulate:
5321 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5322 + regs->pc = call_dl_resolve;
5323 + regs->npc = addr+4;
5324 + return 3;
5325 + }
5326 +#endif
5327 +
5328 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5329 + if ((save & 0xFFC00000U) == 0x05000000U &&
5330 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5331 + nop == 0x01000000U)
5332 + {
5333 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5334 + regs->u_regs[UREG_G2] = addr + 4;
5335 + addr = (save & 0x003FFFFFU) << 10;
5336 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5337 + regs->pc = addr;
5338 + regs->npc = addr+4;
5339 + return 3;
5340 + }
5341 + }
5342 + } while (0);
5343 +
5344 + do { /* PaX: unpatched PLT emulation step 2 */
5345 + unsigned int save, call, nop;
5346 +
5347 + err = get_user(save, (unsigned int *)(regs->pc-4));
5348 + err |= get_user(call, (unsigned int *)regs->pc);
5349 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
5350 + if (err)
5351 + break;
5352 +
5353 + if (save == 0x9DE3BFA8U &&
5354 + (call & 0xC0000000U) == 0x40000000U &&
5355 + nop == 0x01000000U)
5356 + {
5357 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5358 +
5359 + regs->u_regs[UREG_RETPC] = regs->pc;
5360 + regs->pc = dl_resolve;
5361 + regs->npc = dl_resolve+4;
5362 + return 3;
5363 + }
5364 + } while (0);
5365 +#endif
5366 +
5367 + return 1;
5368 +}
5369 +
5370 +void pax_report_insns(void *pc, void *sp)
5371 +{
5372 + unsigned long i;
5373 +
5374 + printk(KERN_ERR "PAX: bytes at PC: ");
5375 + for (i = 0; i < 8; i++) {
5376 + unsigned int c;
5377 + if (get_user(c, (unsigned int *)pc+i))
5378 + printk(KERN_CONT "???????? ");
5379 + else
5380 + printk(KERN_CONT "%08x ", c);
5381 + }
5382 + printk("\n");
5383 +}
5384 +#endif
5385 +
5386 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
5387 unsigned long address)
5388 {
5389 @@ -231,6 +495,24 @@ good_area:
5390 if(!(vma->vm_flags & VM_WRITE))
5391 goto bad_area;
5392 } else {
5393 +
5394 +#ifdef CONFIG_PAX_PAGEEXEC
5395 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5396 + up_read(&mm->mmap_sem);
5397 + switch (pax_handle_fetch_fault(regs)) {
5398 +
5399 +#ifdef CONFIG_PAX_EMUPLT
5400 + case 2:
5401 + case 3:
5402 + return;
5403 +#endif
5404 +
5405 + }
5406 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5407 + do_group_exit(SIGKILL);
5408 + }
5409 +#endif
5410 +
5411 /* Allow reads even for write-only mappings */
5412 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5413 goto bad_area;
5414 diff -urNp linux-2.6.32.43/arch/sparc/mm/fault_64.c linux-2.6.32.43/arch/sparc/mm/fault_64.c
5415 --- linux-2.6.32.43/arch/sparc/mm/fault_64.c 2011-03-27 14:31:47.000000000 -0400
5416 +++ linux-2.6.32.43/arch/sparc/mm/fault_64.c 2011-04-17 15:56:46.000000000 -0400
5417 @@ -20,6 +20,9 @@
5418 #include <linux/kprobes.h>
5419 #include <linux/kdebug.h>
5420 #include <linux/percpu.h>
5421 +#include <linux/slab.h>
5422 +#include <linux/pagemap.h>
5423 +#include <linux/compiler.h>
5424
5425 #include <asm/page.h>
5426 #include <asm/pgtable.h>
5427 @@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs
5428 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5429 regs->tpc);
5430 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5431 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5432 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5433 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5434 dump_stack();
5435 unhandled_fault(regs->tpc, current, regs);
5436 @@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_a
5437 show_regs(regs);
5438 }
5439
5440 +#ifdef CONFIG_PAX_PAGEEXEC
5441 +#ifdef CONFIG_PAX_DLRESOLVE
5442 +static void pax_emuplt_close(struct vm_area_struct *vma)
5443 +{
5444 + vma->vm_mm->call_dl_resolve = 0UL;
5445 +}
5446 +
5447 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5448 +{
5449 + unsigned int *kaddr;
5450 +
5451 + vmf->page = alloc_page(GFP_HIGHUSER);
5452 + if (!vmf->page)
5453 + return VM_FAULT_OOM;
5454 +
5455 + kaddr = kmap(vmf->page);
5456 + memset(kaddr, 0, PAGE_SIZE);
5457 + kaddr[0] = 0x9DE3BFA8U; /* save */
5458 + flush_dcache_page(vmf->page);
5459 + kunmap(vmf->page);
5460 + return VM_FAULT_MAJOR;
5461 +}
5462 +
5463 +static const struct vm_operations_struct pax_vm_ops = {
5464 + .close = pax_emuplt_close,
5465 + .fault = pax_emuplt_fault
5466 +};
5467 +
5468 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5469 +{
5470 + int ret;
5471 +
5472 + vma->vm_mm = current->mm;
5473 + vma->vm_start = addr;
5474 + vma->vm_end = addr + PAGE_SIZE;
5475 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5476 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5477 + vma->vm_ops = &pax_vm_ops;
5478 +
5479 + ret = insert_vm_struct(current->mm, vma);
5480 + if (ret)
5481 + return ret;
5482 +
5483 + ++current->mm->total_vm;
5484 + return 0;
5485 +}
5486 +#endif
5487 +
5488 +/*
5489 + * PaX: decide what to do with offenders (regs->tpc = fault address)
5490 + *
5491 + * returns 1 when task should be killed
5492 + * 2 when patched PLT trampoline was detected
5493 + * 3 when unpatched PLT trampoline was detected
5494 + */
5495 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5496 +{
5497 +
5498 +#ifdef CONFIG_PAX_EMUPLT
5499 + int err;
5500 +
5501 + do { /* PaX: patched PLT emulation #1 */
5502 + unsigned int sethi1, sethi2, jmpl;
5503 +
5504 + err = get_user(sethi1, (unsigned int *)regs->tpc);
5505 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5506 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5507 +
5508 + if (err)
5509 + break;
5510 +
5511 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5512 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5513 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5514 + {
5515 + unsigned long addr;
5516 +
5517 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5518 + addr = regs->u_regs[UREG_G1];
5519 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5520 +
5521 + if (test_thread_flag(TIF_32BIT))
5522 + addr &= 0xFFFFFFFFUL;
5523 +
5524 + regs->tpc = addr;
5525 + regs->tnpc = addr+4;
5526 + return 2;
5527 + }
5528 + } while (0);
5529 +
5530 + { /* PaX: patched PLT emulation #2 */
5531 + unsigned int ba;
5532 +
5533 + err = get_user(ba, (unsigned int *)regs->tpc);
5534 +
5535 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5536 + unsigned long addr;
5537 +
5538 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5539 +
5540 + if (test_thread_flag(TIF_32BIT))
5541 + addr &= 0xFFFFFFFFUL;
5542 +
5543 + regs->tpc = addr;
5544 + regs->tnpc = addr+4;
5545 + return 2;
5546 + }
5547 + }
5548 +
5549 + do { /* PaX: patched PLT emulation #3 */
5550 + unsigned int sethi, jmpl, nop;
5551 +
5552 + err = get_user(sethi, (unsigned int *)regs->tpc);
5553 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5554 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5555 +
5556 + if (err)
5557 + break;
5558 +
5559 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5560 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5561 + nop == 0x01000000U)
5562 + {
5563 + unsigned long addr;
5564 +
5565 + addr = (sethi & 0x003FFFFFU) << 10;
5566 + regs->u_regs[UREG_G1] = addr;
5567 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5568 +
5569 + if (test_thread_flag(TIF_32BIT))
5570 + addr &= 0xFFFFFFFFUL;
5571 +
5572 + regs->tpc = addr;
5573 + regs->tnpc = addr+4;
5574 + return 2;
5575 + }
5576 + } while (0);
5577 +
5578 + do { /* PaX: patched PLT emulation #4 */
5579 + unsigned int sethi, mov1, call, mov2;
5580 +
5581 + err = get_user(sethi, (unsigned int *)regs->tpc);
5582 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5583 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
5584 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5585 +
5586 + if (err)
5587 + break;
5588 +
5589 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5590 + mov1 == 0x8210000FU &&
5591 + (call & 0xC0000000U) == 0x40000000U &&
5592 + mov2 == 0x9E100001U)
5593 + {
5594 + unsigned long addr;
5595 +
5596 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5597 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5598 +
5599 + if (test_thread_flag(TIF_32BIT))
5600 + addr &= 0xFFFFFFFFUL;
5601 +
5602 + regs->tpc = addr;
5603 + regs->tnpc = addr+4;
5604 + return 2;
5605 + }
5606 + } while (0);
5607 +
5608 + do { /* PaX: patched PLT emulation #5 */
5609 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5610 +
5611 + err = get_user(sethi, (unsigned int *)regs->tpc);
5612 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5613 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5614 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5615 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5616 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5617 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5618 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5619 +
5620 + if (err)
5621 + break;
5622 +
5623 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5624 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5625 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5626 + (or1 & 0xFFFFE000U) == 0x82106000U &&
5627 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5628 + sllx == 0x83287020U &&
5629 + jmpl == 0x81C04005U &&
5630 + nop == 0x01000000U)
5631 + {
5632 + unsigned long addr;
5633 +
5634 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5635 + regs->u_regs[UREG_G1] <<= 32;
5636 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5637 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5638 + regs->tpc = addr;
5639 + regs->tnpc = addr+4;
5640 + return 2;
5641 + }
5642 + } while (0);
5643 +
5644 + do { /* PaX: patched PLT emulation #6 */
5645 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5646 +
5647 + err = get_user(sethi, (unsigned int *)regs->tpc);
5648 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5649 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5650 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5651 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
5652 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5653 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5654 +
5655 + if (err)
5656 + break;
5657 +
5658 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5659 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5660 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5661 + sllx == 0x83287020U &&
5662 + (or & 0xFFFFE000U) == 0x8A116000U &&
5663 + jmpl == 0x81C04005U &&
5664 + nop == 0x01000000U)
5665 + {
5666 + unsigned long addr;
5667 +
5668 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5669 + regs->u_regs[UREG_G1] <<= 32;
5670 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5671 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5672 + regs->tpc = addr;
5673 + regs->tnpc = addr+4;
5674 + return 2;
5675 + }
5676 + } while (0);
5677 +
5678 + do { /* PaX: unpatched PLT emulation step 1 */
5679 + unsigned int sethi, ba, nop;
5680 +
5681 + err = get_user(sethi, (unsigned int *)regs->tpc);
5682 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5683 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5684 +
5685 + if (err)
5686 + break;
5687 +
5688 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5689 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5690 + nop == 0x01000000U)
5691 + {
5692 + unsigned long addr;
5693 + unsigned int save, call;
5694 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5695 +
5696 + if ((ba & 0xFFC00000U) == 0x30800000U)
5697 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5698 + else
5699 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5700 +
5701 + if (test_thread_flag(TIF_32BIT))
5702 + addr &= 0xFFFFFFFFUL;
5703 +
5704 + err = get_user(save, (unsigned int *)addr);
5705 + err |= get_user(call, (unsigned int *)(addr+4));
5706 + err |= get_user(nop, (unsigned int *)(addr+8));
5707 + if (err)
5708 + break;
5709 +
5710 +#ifdef CONFIG_PAX_DLRESOLVE
5711 + if (save == 0x9DE3BFA8U &&
5712 + (call & 0xC0000000U) == 0x40000000U &&
5713 + nop == 0x01000000U)
5714 + {
5715 + struct vm_area_struct *vma;
5716 + unsigned long call_dl_resolve;
5717 +
5718 + down_read(&current->mm->mmap_sem);
5719 + call_dl_resolve = current->mm->call_dl_resolve;
5720 + up_read(&current->mm->mmap_sem);
5721 + if (likely(call_dl_resolve))
5722 + goto emulate;
5723 +
5724 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5725 +
5726 + down_write(&current->mm->mmap_sem);
5727 + if (current->mm->call_dl_resolve) {
5728 + call_dl_resolve = current->mm->call_dl_resolve;
5729 + up_write(&current->mm->mmap_sem);
5730 + if (vma)
5731 + kmem_cache_free(vm_area_cachep, vma);
5732 + goto emulate;
5733 + }
5734 +
5735 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5736 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5737 + up_write(&current->mm->mmap_sem);
5738 + if (vma)
5739 + kmem_cache_free(vm_area_cachep, vma);
5740 + return 1;
5741 + }
5742 +
5743 + if (pax_insert_vma(vma, call_dl_resolve)) {
5744 + up_write(&current->mm->mmap_sem);
5745 + kmem_cache_free(vm_area_cachep, vma);
5746 + return 1;
5747 + }
5748 +
5749 + current->mm->call_dl_resolve = call_dl_resolve;
5750 + up_write(&current->mm->mmap_sem);
5751 +
5752 +emulate:
5753 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5754 + regs->tpc = call_dl_resolve;
5755 + regs->tnpc = addr+4;
5756 + return 3;
5757 + }
5758 +#endif
5759 +
5760 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5761 + if ((save & 0xFFC00000U) == 0x05000000U &&
5762 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5763 + nop == 0x01000000U)
5764 + {
5765 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5766 + regs->u_regs[UREG_G2] = addr + 4;
5767 + addr = (save & 0x003FFFFFU) << 10;
5768 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5769 +
5770 + if (test_thread_flag(TIF_32BIT))
5771 + addr &= 0xFFFFFFFFUL;
5772 +
5773 + regs->tpc = addr;
5774 + regs->tnpc = addr+4;
5775 + return 3;
5776 + }
5777 +
5778 + /* PaX: 64-bit PLT stub */
5779 + err = get_user(sethi1, (unsigned int *)addr);
5780 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5781 + err |= get_user(or1, (unsigned int *)(addr+8));
5782 + err |= get_user(or2, (unsigned int *)(addr+12));
5783 + err |= get_user(sllx, (unsigned int *)(addr+16));
5784 + err |= get_user(add, (unsigned int *)(addr+20));
5785 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5786 + err |= get_user(nop, (unsigned int *)(addr+28));
5787 + if (err)
5788 + break;
5789 +
5790 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5791 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5792 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5793 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5794 + sllx == 0x89293020U &&
5795 + add == 0x8A010005U &&
5796 + jmpl == 0x89C14000U &&
5797 + nop == 0x01000000U)
5798 + {
5799 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5800 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5801 + regs->u_regs[UREG_G4] <<= 32;
5802 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5803 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5804 + regs->u_regs[UREG_G4] = addr + 24;
5805 + addr = regs->u_regs[UREG_G5];
5806 + regs->tpc = addr;
5807 + regs->tnpc = addr+4;
5808 + return 3;
5809 + }
5810 + }
5811 + } while (0);
5812 +
5813 +#ifdef CONFIG_PAX_DLRESOLVE
5814 + do { /* PaX: unpatched PLT emulation step 2 */
5815 + unsigned int save, call, nop;
5816 +
5817 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5818 + err |= get_user(call, (unsigned int *)regs->tpc);
5819 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5820 + if (err)
5821 + break;
5822 +
5823 + if (save == 0x9DE3BFA8U &&
5824 + (call & 0xC0000000U) == 0x40000000U &&
5825 + nop == 0x01000000U)
5826 + {
5827 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5828 +
5829 + if (test_thread_flag(TIF_32BIT))
5830 + dl_resolve &= 0xFFFFFFFFUL;
5831 +
5832 + regs->u_regs[UREG_RETPC] = regs->tpc;
5833 + regs->tpc = dl_resolve;
5834 + regs->tnpc = dl_resolve+4;
5835 + return 3;
5836 + }
5837 + } while (0);
5838 +#endif
5839 +
5840 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5841 + unsigned int sethi, ba, nop;
5842 +
5843 + err = get_user(sethi, (unsigned int *)regs->tpc);
5844 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5845 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5846 +
5847 + if (err)
5848 + break;
5849 +
5850 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5851 + (ba & 0xFFF00000U) == 0x30600000U &&
5852 + nop == 0x01000000U)
5853 + {
5854 + unsigned long addr;
5855 +
5856 + addr = (sethi & 0x003FFFFFU) << 10;
5857 + regs->u_regs[UREG_G1] = addr;
5858 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5859 +
5860 + if (test_thread_flag(TIF_32BIT))
5861 + addr &= 0xFFFFFFFFUL;
5862 +
5863 + regs->tpc = addr;
5864 + regs->tnpc = addr+4;
5865 + return 2;
5866 + }
5867 + } while (0);
5868 +
5869 +#endif
5870 +
5871 + return 1;
5872 +}
5873 +
5874 +void pax_report_insns(void *pc, void *sp)
5875 +{
5876 + unsigned long i;
5877 +
5878 + printk(KERN_ERR "PAX: bytes at PC: ");
5879 + for (i = 0; i < 8; i++) {
5880 + unsigned int c;
5881 + if (get_user(c, (unsigned int *)pc+i))
5882 + printk(KERN_CONT "???????? ");
5883 + else
5884 + printk(KERN_CONT "%08x ", c);
5885 + }
5886 + printk("\n");
5887 +}
5888 +#endif
5889 +
5890 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5891 {
5892 struct mm_struct *mm = current->mm;
5893 @@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fau
5894 if (!vma)
5895 goto bad_area;
5896
5897 +#ifdef CONFIG_PAX_PAGEEXEC
5898 + /* PaX: detect ITLB misses on non-exec pages */
5899 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5900 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5901 + {
5902 + if (address != regs->tpc)
5903 + goto good_area;
5904 +
5905 + up_read(&mm->mmap_sem);
5906 + switch (pax_handle_fetch_fault(regs)) {
5907 +
5908 +#ifdef CONFIG_PAX_EMUPLT
5909 + case 2:
5910 + case 3:
5911 + return;
5912 +#endif
5913 +
5914 + }
5915 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5916 + do_group_exit(SIGKILL);
5917 + }
5918 +#endif
5919 +
5920 /* Pure DTLB misses do not tell us whether the fault causing
5921 * load/store/atomic was a write or not, it only says that there
5922 * was no match. So in such a case we (carefully) read the
5923 diff -urNp linux-2.6.32.43/arch/sparc/mm/hugetlbpage.c linux-2.6.32.43/arch/sparc/mm/hugetlbpage.c
5924 --- linux-2.6.32.43/arch/sparc/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
5925 +++ linux-2.6.32.43/arch/sparc/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
5926 @@ -69,7 +69,7 @@ full_search:
5927 }
5928 return -ENOMEM;
5929 }
5930 - if (likely(!vma || addr + len <= vma->vm_start)) {
5931 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5932 /*
5933 * Remember the place where we stopped the search:
5934 */
5935 @@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
5936 /* make sure it can fit in the remaining address space */
5937 if (likely(addr > len)) {
5938 vma = find_vma(mm, addr-len);
5939 - if (!vma || addr <= vma->vm_start) {
5940 + if (check_heap_stack_gap(vma, addr - len, len)) {
5941 /* remember the address as a hint for next time */
5942 return (mm->free_area_cache = addr-len);
5943 }
5944 @@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct
5945 if (unlikely(mm->mmap_base < len))
5946 goto bottomup;
5947
5948 - addr = (mm->mmap_base-len) & HPAGE_MASK;
5949 + addr = mm->mmap_base - len;
5950
5951 do {
5952 + addr &= HPAGE_MASK;
5953 /*
5954 * Lookup failure means no vma is above this address,
5955 * else if new region fits below vma->vm_start,
5956 * return with success:
5957 */
5958 vma = find_vma(mm, addr);
5959 - if (likely(!vma || addr+len <= vma->vm_start)) {
5960 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5961 /* remember the address as a hint for next time */
5962 return (mm->free_area_cache = addr);
5963 }
5964 @@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct
5965 mm->cached_hole_size = vma->vm_start - addr;
5966
5967 /* try just below the current vma->vm_start */
5968 - addr = (vma->vm_start-len) & HPAGE_MASK;
5969 - } while (likely(len < vma->vm_start));
5970 + addr = skip_heap_stack_gap(vma, len);
5971 + } while (!IS_ERR_VALUE(addr));
5972
5973 bottomup:
5974 /*
5975 @@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *f
5976 if (addr) {
5977 addr = ALIGN(addr, HPAGE_SIZE);
5978 vma = find_vma(mm, addr);
5979 - if (task_size - len >= addr &&
5980 - (!vma || addr + len <= vma->vm_start))
5981 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5982 return addr;
5983 }
5984 if (mm->get_unmapped_area == arch_get_unmapped_area)
5985 diff -urNp linux-2.6.32.43/arch/sparc/mm/init_32.c linux-2.6.32.43/arch/sparc/mm/init_32.c
5986 --- linux-2.6.32.43/arch/sparc/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
5987 +++ linux-2.6.32.43/arch/sparc/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
5988 @@ -317,6 +317,9 @@ extern void device_scan(void);
5989 pgprot_t PAGE_SHARED __read_mostly;
5990 EXPORT_SYMBOL(PAGE_SHARED);
5991
5992 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5993 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5994 +
5995 void __init paging_init(void)
5996 {
5997 switch(sparc_cpu_model) {
5998 @@ -345,17 +348,17 @@ void __init paging_init(void)
5999
6000 /* Initialize the protection map with non-constant, MMU dependent values. */
6001 protection_map[0] = PAGE_NONE;
6002 - protection_map[1] = PAGE_READONLY;
6003 - protection_map[2] = PAGE_COPY;
6004 - protection_map[3] = PAGE_COPY;
6005 + protection_map[1] = PAGE_READONLY_NOEXEC;
6006 + protection_map[2] = PAGE_COPY_NOEXEC;
6007 + protection_map[3] = PAGE_COPY_NOEXEC;
6008 protection_map[4] = PAGE_READONLY;
6009 protection_map[5] = PAGE_READONLY;
6010 protection_map[6] = PAGE_COPY;
6011 protection_map[7] = PAGE_COPY;
6012 protection_map[8] = PAGE_NONE;
6013 - protection_map[9] = PAGE_READONLY;
6014 - protection_map[10] = PAGE_SHARED;
6015 - protection_map[11] = PAGE_SHARED;
6016 + protection_map[9] = PAGE_READONLY_NOEXEC;
6017 + protection_map[10] = PAGE_SHARED_NOEXEC;
6018 + protection_map[11] = PAGE_SHARED_NOEXEC;
6019 protection_map[12] = PAGE_READONLY;
6020 protection_map[13] = PAGE_READONLY;
6021 protection_map[14] = PAGE_SHARED;
6022 diff -urNp linux-2.6.32.43/arch/sparc/mm/Makefile linux-2.6.32.43/arch/sparc/mm/Makefile
6023 --- linux-2.6.32.43/arch/sparc/mm/Makefile 2011-03-27 14:31:47.000000000 -0400
6024 +++ linux-2.6.32.43/arch/sparc/mm/Makefile 2011-04-17 15:56:46.000000000 -0400
6025 @@ -2,7 +2,7 @@
6026 #
6027
6028 asflags-y := -ansi
6029 -ccflags-y := -Werror
6030 +#ccflags-y := -Werror
6031
6032 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
6033 obj-y += fault_$(BITS).o
6034 diff -urNp linux-2.6.32.43/arch/sparc/mm/srmmu.c linux-2.6.32.43/arch/sparc/mm/srmmu.c
6035 --- linux-2.6.32.43/arch/sparc/mm/srmmu.c 2011-03-27 14:31:47.000000000 -0400
6036 +++ linux-2.6.32.43/arch/sparc/mm/srmmu.c 2011-04-17 15:56:46.000000000 -0400
6037 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6038 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6039 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6040 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6041 +
6042 +#ifdef CONFIG_PAX_PAGEEXEC
6043 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6044 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6045 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6046 +#endif
6047 +
6048 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6049 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6050
6051 diff -urNp linux-2.6.32.43/arch/um/include/asm/kmap_types.h linux-2.6.32.43/arch/um/include/asm/kmap_types.h
6052 --- linux-2.6.32.43/arch/um/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
6053 +++ linux-2.6.32.43/arch/um/include/asm/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
6054 @@ -23,6 +23,7 @@ enum km_type {
6055 KM_IRQ1,
6056 KM_SOFTIRQ0,
6057 KM_SOFTIRQ1,
6058 + KM_CLEARPAGE,
6059 KM_TYPE_NR
6060 };
6061
6062 diff -urNp linux-2.6.32.43/arch/um/include/asm/page.h linux-2.6.32.43/arch/um/include/asm/page.h
6063 --- linux-2.6.32.43/arch/um/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
6064 +++ linux-2.6.32.43/arch/um/include/asm/page.h 2011-04-17 15:56:46.000000000 -0400
6065 @@ -14,6 +14,9 @@
6066 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6067 #define PAGE_MASK (~(PAGE_SIZE-1))
6068
6069 +#define ktla_ktva(addr) (addr)
6070 +#define ktva_ktla(addr) (addr)
6071 +
6072 #ifndef __ASSEMBLY__
6073
6074 struct page;
6075 diff -urNp linux-2.6.32.43/arch/um/kernel/process.c linux-2.6.32.43/arch/um/kernel/process.c
6076 --- linux-2.6.32.43/arch/um/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
6077 +++ linux-2.6.32.43/arch/um/kernel/process.c 2011-04-17 15:56:46.000000000 -0400
6078 @@ -393,22 +393,6 @@ int singlestepping(void * t)
6079 return 2;
6080 }
6081
6082 -/*
6083 - * Only x86 and x86_64 have an arch_align_stack().
6084 - * All other arches have "#define arch_align_stack(x) (x)"
6085 - * in their asm/system.h
6086 - * As this is included in UML from asm-um/system-generic.h,
6087 - * we can use it to behave as the subarch does.
6088 - */
6089 -#ifndef arch_align_stack
6090 -unsigned long arch_align_stack(unsigned long sp)
6091 -{
6092 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6093 - sp -= get_random_int() % 8192;
6094 - return sp & ~0xf;
6095 -}
6096 -#endif
6097 -
6098 unsigned long get_wchan(struct task_struct *p)
6099 {
6100 unsigned long stack_page, sp, ip;
6101 diff -urNp linux-2.6.32.43/arch/um/sys-i386/syscalls.c linux-2.6.32.43/arch/um/sys-i386/syscalls.c
6102 --- linux-2.6.32.43/arch/um/sys-i386/syscalls.c 2011-03-27 14:31:47.000000000 -0400
6103 +++ linux-2.6.32.43/arch/um/sys-i386/syscalls.c 2011-04-17 15:56:46.000000000 -0400
6104 @@ -11,6 +11,21 @@
6105 #include "asm/uaccess.h"
6106 #include "asm/unistd.h"
6107
6108 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6109 +{
6110 + unsigned long pax_task_size = TASK_SIZE;
6111 +
6112 +#ifdef CONFIG_PAX_SEGMEXEC
6113 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6114 + pax_task_size = SEGMEXEC_TASK_SIZE;
6115 +#endif
6116 +
6117 + if (len > pax_task_size || addr > pax_task_size - len)
6118 + return -EINVAL;
6119 +
6120 + return 0;
6121 +}
6122 +
6123 /*
6124 * Perform the select(nd, in, out, ex, tv) and mmap() system
6125 * calls. Linux/i386 didn't use to be able to handle more than
6126 diff -urNp linux-2.6.32.43/arch/x86/boot/bitops.h linux-2.6.32.43/arch/x86/boot/bitops.h
6127 --- linux-2.6.32.43/arch/x86/boot/bitops.h 2011-03-27 14:31:47.000000000 -0400
6128 +++ linux-2.6.32.43/arch/x86/boot/bitops.h 2011-04-17 15:56:46.000000000 -0400
6129 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
6130 u8 v;
6131 const u32 *p = (const u32 *)addr;
6132
6133 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6134 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6135 return v;
6136 }
6137
6138 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
6139
6140 static inline void set_bit(int nr, void *addr)
6141 {
6142 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6143 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6144 }
6145
6146 #endif /* BOOT_BITOPS_H */
6147 diff -urNp linux-2.6.32.43/arch/x86/boot/boot.h linux-2.6.32.43/arch/x86/boot/boot.h
6148 --- linux-2.6.32.43/arch/x86/boot/boot.h 2011-03-27 14:31:47.000000000 -0400
6149 +++ linux-2.6.32.43/arch/x86/boot/boot.h 2011-04-17 15:56:46.000000000 -0400
6150 @@ -82,7 +82,7 @@ static inline void io_delay(void)
6151 static inline u16 ds(void)
6152 {
6153 u16 seg;
6154 - asm("movw %%ds,%0" : "=rm" (seg));
6155 + asm volatile("movw %%ds,%0" : "=rm" (seg));
6156 return seg;
6157 }
6158
6159 @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
6160 static inline int memcmp(const void *s1, const void *s2, size_t len)
6161 {
6162 u8 diff;
6163 - asm("repe; cmpsb; setnz %0"
6164 + asm volatile("repe; cmpsb; setnz %0"
6165 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
6166 return diff;
6167 }
6168 diff -urNp linux-2.6.32.43/arch/x86/boot/compressed/head_32.S linux-2.6.32.43/arch/x86/boot/compressed/head_32.S
6169 --- linux-2.6.32.43/arch/x86/boot/compressed/head_32.S 2011-03-27 14:31:47.000000000 -0400
6170 +++ linux-2.6.32.43/arch/x86/boot/compressed/head_32.S 2011-04-17 15:56:46.000000000 -0400
6171 @@ -76,7 +76,7 @@ ENTRY(startup_32)
6172 notl %eax
6173 andl %eax, %ebx
6174 #else
6175 - movl $LOAD_PHYSICAL_ADDR, %ebx
6176 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6177 #endif
6178
6179 /* Target address to relocate to for decompression */
6180 @@ -149,7 +149,7 @@ relocated:
6181 * and where it was actually loaded.
6182 */
6183 movl %ebp, %ebx
6184 - subl $LOAD_PHYSICAL_ADDR, %ebx
6185 + subl $____LOAD_PHYSICAL_ADDR, %ebx
6186 jz 2f /* Nothing to be done if loaded at compiled addr. */
6187 /*
6188 * Process relocations.
6189 @@ -157,8 +157,7 @@ relocated:
6190
6191 1: subl $4, %edi
6192 movl (%edi), %ecx
6193 - testl %ecx, %ecx
6194 - jz 2f
6195 + jecxz 2f
6196 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
6197 jmp 1b
6198 2:
6199 diff -urNp linux-2.6.32.43/arch/x86/boot/compressed/head_64.S linux-2.6.32.43/arch/x86/boot/compressed/head_64.S
6200 --- linux-2.6.32.43/arch/x86/boot/compressed/head_64.S 2011-03-27 14:31:47.000000000 -0400
6201 +++ linux-2.6.32.43/arch/x86/boot/compressed/head_64.S 2011-07-01 18:53:00.000000000 -0400
6202 @@ -91,7 +91,7 @@ ENTRY(startup_32)
6203 notl %eax
6204 andl %eax, %ebx
6205 #else
6206 - movl $LOAD_PHYSICAL_ADDR, %ebx
6207 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6208 #endif
6209
6210 /* Target address to relocate to for decompression */
6211 @@ -183,7 +183,7 @@ no_longmode:
6212 hlt
6213 jmp 1b
6214
6215 -#include "../../kernel/verify_cpu_64.S"
6216 +#include "../../kernel/verify_cpu.S"
6217
6218 /*
6219 * Be careful here startup_64 needs to be at a predictable
6220 @@ -234,7 +234,7 @@ ENTRY(startup_64)
6221 notq %rax
6222 andq %rax, %rbp
6223 #else
6224 - movq $LOAD_PHYSICAL_ADDR, %rbp
6225 + movq $____LOAD_PHYSICAL_ADDR, %rbp
6226 #endif
6227
6228 /* Target address to relocate to for decompression */
6229 diff -urNp linux-2.6.32.43/arch/x86/boot/compressed/misc.c linux-2.6.32.43/arch/x86/boot/compressed/misc.c
6230 --- linux-2.6.32.43/arch/x86/boot/compressed/misc.c 2011-03-27 14:31:47.000000000 -0400
6231 +++ linux-2.6.32.43/arch/x86/boot/compressed/misc.c 2011-04-17 15:56:46.000000000 -0400
6232 @@ -288,7 +288,7 @@ static void parse_elf(void *output)
6233 case PT_LOAD:
6234 #ifdef CONFIG_RELOCATABLE
6235 dest = output;
6236 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
6237 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
6238 #else
6239 dest = (void *)(phdr->p_paddr);
6240 #endif
6241 @@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *
6242 error("Destination address too large");
6243 #endif
6244 #ifndef CONFIG_RELOCATABLE
6245 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
6246 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
6247 error("Wrong destination address");
6248 #endif
6249
6250 diff -urNp linux-2.6.32.43/arch/x86/boot/compressed/mkpiggy.c linux-2.6.32.43/arch/x86/boot/compressed/mkpiggy.c
6251 --- linux-2.6.32.43/arch/x86/boot/compressed/mkpiggy.c 2011-03-27 14:31:47.000000000 -0400
6252 +++ linux-2.6.32.43/arch/x86/boot/compressed/mkpiggy.c 2011-04-17 15:56:46.000000000 -0400
6253 @@ -74,7 +74,7 @@ int main(int argc, char *argv[])
6254
6255 offs = (olen > ilen) ? olen - ilen : 0;
6256 offs += olen >> 12; /* Add 8 bytes for each 32K block */
6257 - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
6258 + offs += 64*1024; /* Add 64K bytes slack */
6259 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
6260
6261 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
6262 diff -urNp linux-2.6.32.43/arch/x86/boot/compressed/relocs.c linux-2.6.32.43/arch/x86/boot/compressed/relocs.c
6263 --- linux-2.6.32.43/arch/x86/boot/compressed/relocs.c 2011-03-27 14:31:47.000000000 -0400
6264 +++ linux-2.6.32.43/arch/x86/boot/compressed/relocs.c 2011-04-17 15:56:46.000000000 -0400
6265 @@ -10,8 +10,11 @@
6266 #define USE_BSD
6267 #include <endian.h>
6268
6269 +#include "../../../../include/linux/autoconf.h"
6270 +
6271 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
6272 static Elf32_Ehdr ehdr;
6273 +static Elf32_Phdr *phdr;
6274 static unsigned long reloc_count, reloc_idx;
6275 static unsigned long *relocs;
6276
6277 @@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
6278
6279 static int is_safe_abs_reloc(const char* sym_name)
6280 {
6281 - int i;
6282 + unsigned int i;
6283
6284 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
6285 if (!strcmp(sym_name, safe_abs_relocs[i]))
6286 @@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
6287 }
6288 }
6289
6290 +static void read_phdrs(FILE *fp)
6291 +{
6292 + unsigned int i;
6293 +
6294 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6295 + if (!phdr) {
6296 + die("Unable to allocate %d program headers\n",
6297 + ehdr.e_phnum);
6298 + }
6299 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6300 + die("Seek to %d failed: %s\n",
6301 + ehdr.e_phoff, strerror(errno));
6302 + }
6303 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6304 + die("Cannot read ELF program headers: %s\n",
6305 + strerror(errno));
6306 + }
6307 + for(i = 0; i < ehdr.e_phnum; i++) {
6308 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6309 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6310 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6311 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6312 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6313 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6314 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6315 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6316 + }
6317 +
6318 +}
6319 +
6320 static void read_shdrs(FILE *fp)
6321 {
6322 - int i;
6323 + unsigned int i;
6324 Elf32_Shdr shdr;
6325
6326 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6327 @@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
6328
6329 static void read_strtabs(FILE *fp)
6330 {
6331 - int i;
6332 + unsigned int i;
6333 for (i = 0; i < ehdr.e_shnum; i++) {
6334 struct section *sec = &secs[i];
6335 if (sec->shdr.sh_type != SHT_STRTAB) {
6336 @@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
6337
6338 static void read_symtabs(FILE *fp)
6339 {
6340 - int i,j;
6341 + unsigned int i,j;
6342 for (i = 0; i < ehdr.e_shnum; i++) {
6343 struct section *sec = &secs[i];
6344 if (sec->shdr.sh_type != SHT_SYMTAB) {
6345 @@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
6346
6347 static void read_relocs(FILE *fp)
6348 {
6349 - int i,j;
6350 + unsigned int i,j;
6351 + uint32_t base;
6352 +
6353 for (i = 0; i < ehdr.e_shnum; i++) {
6354 struct section *sec = &secs[i];
6355 if (sec->shdr.sh_type != SHT_REL) {
6356 @@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
6357 die("Cannot read symbol table: %s\n",
6358 strerror(errno));
6359 }
6360 + base = 0;
6361 + for (j = 0; j < ehdr.e_phnum; j++) {
6362 + if (phdr[j].p_type != PT_LOAD )
6363 + continue;
6364 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6365 + continue;
6366 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6367 + break;
6368 + }
6369 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6370 Elf32_Rel *rel = &sec->reltab[j];
6371 - rel->r_offset = elf32_to_cpu(rel->r_offset);
6372 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6373 rel->r_info = elf32_to_cpu(rel->r_info);
6374 }
6375 }
6376 @@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
6377
6378 static void print_absolute_symbols(void)
6379 {
6380 - int i;
6381 + unsigned int i;
6382 printf("Absolute symbols\n");
6383 printf(" Num: Value Size Type Bind Visibility Name\n");
6384 for (i = 0; i < ehdr.e_shnum; i++) {
6385 struct section *sec = &secs[i];
6386 char *sym_strtab;
6387 Elf32_Sym *sh_symtab;
6388 - int j;
6389 + unsigned int j;
6390
6391 if (sec->shdr.sh_type != SHT_SYMTAB) {
6392 continue;
6393 @@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
6394
6395 static void print_absolute_relocs(void)
6396 {
6397 - int i, printed = 0;
6398 + unsigned int i, printed = 0;
6399
6400 for (i = 0; i < ehdr.e_shnum; i++) {
6401 struct section *sec = &secs[i];
6402 struct section *sec_applies, *sec_symtab;
6403 char *sym_strtab;
6404 Elf32_Sym *sh_symtab;
6405 - int j;
6406 + unsigned int j;
6407 if (sec->shdr.sh_type != SHT_REL) {
6408 continue;
6409 }
6410 @@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
6411
6412 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6413 {
6414 - int i;
6415 + unsigned int i;
6416 /* Walk through the relocations */
6417 for (i = 0; i < ehdr.e_shnum; i++) {
6418 char *sym_strtab;
6419 Elf32_Sym *sh_symtab;
6420 struct section *sec_applies, *sec_symtab;
6421 - int j;
6422 + unsigned int j;
6423 struct section *sec = &secs[i];
6424
6425 if (sec->shdr.sh_type != SHT_REL) {
6426 @@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(El
6427 if (sym->st_shndx == SHN_ABS) {
6428 continue;
6429 }
6430 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6431 + if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6432 + continue;
6433 +
6434 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6435 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6436 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6437 + continue;
6438 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6439 + continue;
6440 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6441 + continue;
6442 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6443 + continue;
6444 +#endif
6445 if (r_type == R_386_NONE || r_type == R_386_PC32) {
6446 /*
6447 * NONE can be ignored and and PC relative
6448 @@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, co
6449
6450 static void emit_relocs(int as_text)
6451 {
6452 - int i;
6453 + unsigned int i;
6454 /* Count how many relocations I have and allocate space for them. */
6455 reloc_count = 0;
6456 walk_relocs(count_reloc);
6457 @@ -634,6 +693,7 @@ int main(int argc, char **argv)
6458 fname, strerror(errno));
6459 }
6460 read_ehdr(fp);
6461 + read_phdrs(fp);
6462 read_shdrs(fp);
6463 read_strtabs(fp);
6464 read_symtabs(fp);
6465 diff -urNp linux-2.6.32.43/arch/x86/boot/cpucheck.c linux-2.6.32.43/arch/x86/boot/cpucheck.c
6466 --- linux-2.6.32.43/arch/x86/boot/cpucheck.c 2011-03-27 14:31:47.000000000 -0400
6467 +++ linux-2.6.32.43/arch/x86/boot/cpucheck.c 2011-04-17 15:56:46.000000000 -0400
6468 @@ -74,7 +74,7 @@ static int has_fpu(void)
6469 u16 fcw = -1, fsw = -1;
6470 u32 cr0;
6471
6472 - asm("movl %%cr0,%0" : "=r" (cr0));
6473 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
6474 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6475 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6476 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6477 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6478 {
6479 u32 f0, f1;
6480
6481 - asm("pushfl ; "
6482 + asm volatile("pushfl ; "
6483 "pushfl ; "
6484 "popl %0 ; "
6485 "movl %0,%1 ; "
6486 @@ -115,7 +115,7 @@ static void get_flags(void)
6487 set_bit(X86_FEATURE_FPU, cpu.flags);
6488
6489 if (has_eflag(X86_EFLAGS_ID)) {
6490 - asm("cpuid"
6491 + asm volatile("cpuid"
6492 : "=a" (max_intel_level),
6493 "=b" (cpu_vendor[0]),
6494 "=d" (cpu_vendor[1]),
6495 @@ -124,7 +124,7 @@ static void get_flags(void)
6496
6497 if (max_intel_level >= 0x00000001 &&
6498 max_intel_level <= 0x0000ffff) {
6499 - asm("cpuid"
6500 + asm volatile("cpuid"
6501 : "=a" (tfms),
6502 "=c" (cpu.flags[4]),
6503 "=d" (cpu.flags[0])
6504 @@ -136,7 +136,7 @@ static void get_flags(void)
6505 cpu.model += ((tfms >> 16) & 0xf) << 4;
6506 }
6507
6508 - asm("cpuid"
6509 + asm volatile("cpuid"
6510 : "=a" (max_amd_level)
6511 : "a" (0x80000000)
6512 : "ebx", "ecx", "edx");
6513 @@ -144,7 +144,7 @@ static void get_flags(void)
6514 if (max_amd_level >= 0x80000001 &&
6515 max_amd_level <= 0x8000ffff) {
6516 u32 eax = 0x80000001;
6517 - asm("cpuid"
6518 + asm volatile("cpuid"
6519 : "+a" (eax),
6520 "=c" (cpu.flags[6]),
6521 "=d" (cpu.flags[1])
6522 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6523 u32 ecx = MSR_K7_HWCR;
6524 u32 eax, edx;
6525
6526 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6527 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6528 eax &= ~(1 << 15);
6529 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6530 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6531
6532 get_flags(); /* Make sure it really did something */
6533 err = check_flags();
6534 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6535 u32 ecx = MSR_VIA_FCR;
6536 u32 eax, edx;
6537
6538 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6539 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6540 eax |= (1<<1)|(1<<7);
6541 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6542 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6543
6544 set_bit(X86_FEATURE_CX8, cpu.flags);
6545 err = check_flags();
6546 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
6547 u32 eax, edx;
6548 u32 level = 1;
6549
6550 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6551 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6552 - asm("cpuid"
6553 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6554 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6555 + asm volatile("cpuid"
6556 : "+a" (level), "=d" (cpu.flags[0])
6557 : : "ecx", "ebx");
6558 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6559 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6560
6561 err = check_flags();
6562 }
6563 diff -urNp linux-2.6.32.43/arch/x86/boot/header.S linux-2.6.32.43/arch/x86/boot/header.S
6564 --- linux-2.6.32.43/arch/x86/boot/header.S 2011-03-27 14:31:47.000000000 -0400
6565 +++ linux-2.6.32.43/arch/x86/boot/header.S 2011-04-17 15:56:46.000000000 -0400
6566 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
6567 # single linked list of
6568 # struct setup_data
6569
6570 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6571 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6572
6573 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6574 #define VO_INIT_SIZE (VO__end - VO__text)
6575 diff -urNp linux-2.6.32.43/arch/x86/boot/memory.c linux-2.6.32.43/arch/x86/boot/memory.c
6576 --- linux-2.6.32.43/arch/x86/boot/memory.c 2011-03-27 14:31:47.000000000 -0400
6577 +++ linux-2.6.32.43/arch/x86/boot/memory.c 2011-04-17 15:56:46.000000000 -0400
6578 @@ -19,7 +19,7 @@
6579
6580 static int detect_memory_e820(void)
6581 {
6582 - int count = 0;
6583 + unsigned int count = 0;
6584 struct biosregs ireg, oreg;
6585 struct e820entry *desc = boot_params.e820_map;
6586 static struct e820entry buf; /* static so it is zeroed */
6587 diff -urNp linux-2.6.32.43/arch/x86/boot/video.c linux-2.6.32.43/arch/x86/boot/video.c
6588 --- linux-2.6.32.43/arch/x86/boot/video.c 2011-03-27 14:31:47.000000000 -0400
6589 +++ linux-2.6.32.43/arch/x86/boot/video.c 2011-04-17 15:56:46.000000000 -0400
6590 @@ -90,7 +90,7 @@ static void store_mode_params(void)
6591 static unsigned int get_entry(void)
6592 {
6593 char entry_buf[4];
6594 - int i, len = 0;
6595 + unsigned int i, len = 0;
6596 int key;
6597 unsigned int v;
6598
6599 diff -urNp linux-2.6.32.43/arch/x86/boot/video-vesa.c linux-2.6.32.43/arch/x86/boot/video-vesa.c
6600 --- linux-2.6.32.43/arch/x86/boot/video-vesa.c 2011-03-27 14:31:47.000000000 -0400
6601 +++ linux-2.6.32.43/arch/x86/boot/video-vesa.c 2011-04-17 15:56:46.000000000 -0400
6602 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6603
6604 boot_params.screen_info.vesapm_seg = oreg.es;
6605 boot_params.screen_info.vesapm_off = oreg.di;
6606 + boot_params.screen_info.vesapm_size = oreg.cx;
6607 }
6608
6609 /*
6610 diff -urNp linux-2.6.32.43/arch/x86/ia32/ia32_aout.c linux-2.6.32.43/arch/x86/ia32/ia32_aout.c
6611 --- linux-2.6.32.43/arch/x86/ia32/ia32_aout.c 2011-03-27 14:31:47.000000000 -0400
6612 +++ linux-2.6.32.43/arch/x86/ia32/ia32_aout.c 2011-04-17 15:56:46.000000000 -0400
6613 @@ -169,6 +169,8 @@ static int aout_core_dump(long signr, st
6614 unsigned long dump_start, dump_size;
6615 struct user32 dump;
6616
6617 + memset(&dump, 0, sizeof(dump));
6618 +
6619 fs = get_fs();
6620 set_fs(KERNEL_DS);
6621 has_dumped = 1;
6622 @@ -218,12 +220,6 @@ static int aout_core_dump(long signr, st
6623 dump_size = dump.u_ssize << PAGE_SHIFT;
6624 DUMP_WRITE(dump_start, dump_size);
6625 }
6626 - /*
6627 - * Finally dump the task struct. Not be used by gdb, but
6628 - * could be useful
6629 - */
6630 - set_fs(KERNEL_DS);
6631 - DUMP_WRITE(current, sizeof(*current));
6632 end_coredump:
6633 set_fs(fs);
6634 return has_dumped;
6635 diff -urNp linux-2.6.32.43/arch/x86/ia32/ia32entry.S linux-2.6.32.43/arch/x86/ia32/ia32entry.S
6636 --- linux-2.6.32.43/arch/x86/ia32/ia32entry.S 2011-03-27 14:31:47.000000000 -0400
6637 +++ linux-2.6.32.43/arch/x86/ia32/ia32entry.S 2011-06-04 20:29:52.000000000 -0400
6638 @@ -13,6 +13,7 @@
6639 #include <asm/thread_info.h>
6640 #include <asm/segment.h>
6641 #include <asm/irqflags.h>
6642 +#include <asm/pgtable.h>
6643 #include <linux/linkage.h>
6644
6645 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6646 @@ -93,6 +94,30 @@ ENTRY(native_irq_enable_sysexit)
6647 ENDPROC(native_irq_enable_sysexit)
6648 #endif
6649
6650 + .macro pax_enter_kernel_user
6651 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6652 + call pax_enter_kernel_user
6653 +#endif
6654 + .endm
6655 +
6656 + .macro pax_exit_kernel_user
6657 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6658 + call pax_exit_kernel_user
6659 +#endif
6660 +#ifdef CONFIG_PAX_RANDKSTACK
6661 + pushq %rax
6662 + call pax_randomize_kstack
6663 + popq %rax
6664 +#endif
6665 + pax_erase_kstack
6666 + .endm
6667 +
6668 +.macro pax_erase_kstack
6669 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6670 + call pax_erase_kstack
6671 +#endif
6672 +.endm
6673 +
6674 /*
6675 * 32bit SYSENTER instruction entry.
6676 *
6677 @@ -119,7 +144,7 @@ ENTRY(ia32_sysenter_target)
6678 CFI_REGISTER rsp,rbp
6679 SWAPGS_UNSAFE_STACK
6680 movq PER_CPU_VAR(kernel_stack), %rsp
6681 - addq $(KERNEL_STACK_OFFSET),%rsp
6682 + pax_enter_kernel_user
6683 /*
6684 * No need to follow this irqs on/off section: the syscall
6685 * disabled irqs, here we enable it straight after entry:
6686 @@ -135,7 +160,8 @@ ENTRY(ia32_sysenter_target)
6687 pushfq
6688 CFI_ADJUST_CFA_OFFSET 8
6689 /*CFI_REL_OFFSET rflags,0*/
6690 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6691 + GET_THREAD_INFO(%r10)
6692 + movl TI_sysenter_return(%r10), %r10d
6693 CFI_REGISTER rip,r10
6694 pushq $__USER32_CS
6695 CFI_ADJUST_CFA_OFFSET 8
6696 @@ -150,6 +176,12 @@ ENTRY(ia32_sysenter_target)
6697 SAVE_ARGS 0,0,1
6698 /* no need to do an access_ok check here because rbp has been
6699 32bit zero extended */
6700 +
6701 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6702 + mov $PAX_USER_SHADOW_BASE,%r10
6703 + add %r10,%rbp
6704 +#endif
6705 +
6706 1: movl (%rbp),%ebp
6707 .section __ex_table,"a"
6708 .quad 1b,ia32_badarg
6709 @@ -172,6 +204,7 @@ sysenter_dispatch:
6710 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6711 jnz sysexit_audit
6712 sysexit_from_sys_call:
6713 + pax_exit_kernel_user
6714 andl $~TS_COMPAT,TI_status(%r10)
6715 /* clear IF, that popfq doesn't enable interrupts early */
6716 andl $~0x200,EFLAGS-R11(%rsp)
6717 @@ -200,6 +233,9 @@ sysexit_from_sys_call:
6718 movl %eax,%esi /* 2nd arg: syscall number */
6719 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6720 call audit_syscall_entry
6721 +
6722 + pax_erase_kstack
6723 +
6724 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6725 cmpq $(IA32_NR_syscalls-1),%rax
6726 ja ia32_badsys
6727 @@ -252,6 +288,9 @@ sysenter_tracesys:
6728 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6729 movq %rsp,%rdi /* &pt_regs -> arg1 */
6730 call syscall_trace_enter
6731 +
6732 + pax_erase_kstack
6733 +
6734 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6735 RESTORE_REST
6736 cmpq $(IA32_NR_syscalls-1),%rax
6737 @@ -283,19 +322,24 @@ ENDPROC(ia32_sysenter_target)
6738 ENTRY(ia32_cstar_target)
6739 CFI_STARTPROC32 simple
6740 CFI_SIGNAL_FRAME
6741 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6742 + CFI_DEF_CFA rsp,0
6743 CFI_REGISTER rip,rcx
6744 /*CFI_REGISTER rflags,r11*/
6745 SWAPGS_UNSAFE_STACK
6746 movl %esp,%r8d
6747 CFI_REGISTER rsp,r8
6748 movq PER_CPU_VAR(kernel_stack),%rsp
6749 +
6750 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6751 + pax_enter_kernel_user
6752 +#endif
6753 +
6754 /*
6755 * No need to follow this irqs on/off section: the syscall
6756 * disabled irqs and here we enable it straight after entry:
6757 */
6758 ENABLE_INTERRUPTS(CLBR_NONE)
6759 - SAVE_ARGS 8,1,1
6760 + SAVE_ARGS 8*6,1,1
6761 movl %eax,%eax /* zero extension */
6762 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6763 movq %rcx,RIP-ARGOFFSET(%rsp)
6764 @@ -311,6 +355,12 @@ ENTRY(ia32_cstar_target)
6765 /* no need to do an access_ok check here because r8 has been
6766 32bit zero extended */
6767 /* hardware stack frame is complete now */
6768 +
6769 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6770 + mov $PAX_USER_SHADOW_BASE,%r10
6771 + add %r10,%r8
6772 +#endif
6773 +
6774 1: movl (%r8),%r9d
6775 .section __ex_table,"a"
6776 .quad 1b,ia32_badarg
6777 @@ -333,6 +383,7 @@ cstar_dispatch:
6778 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6779 jnz sysretl_audit
6780 sysretl_from_sys_call:
6781 + pax_exit_kernel_user
6782 andl $~TS_COMPAT,TI_status(%r10)
6783 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6784 movl RIP-ARGOFFSET(%rsp),%ecx
6785 @@ -370,6 +421,9 @@ cstar_tracesys:
6786 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6787 movq %rsp,%rdi /* &pt_regs -> arg1 */
6788 call syscall_trace_enter
6789 +
6790 + pax_erase_kstack
6791 +
6792 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6793 RESTORE_REST
6794 xchgl %ebp,%r9d
6795 @@ -415,6 +469,7 @@ ENTRY(ia32_syscall)
6796 CFI_REL_OFFSET rip,RIP-RIP
6797 PARAVIRT_ADJUST_EXCEPTION_FRAME
6798 SWAPGS
6799 + pax_enter_kernel_user
6800 /*
6801 * No need to follow this irqs on/off section: the syscall
6802 * disabled irqs and here we enable it straight after entry:
6803 @@ -448,6 +503,9 @@ ia32_tracesys:
6804 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6805 movq %rsp,%rdi /* &pt_regs -> arg1 */
6806 call syscall_trace_enter
6807 +
6808 + pax_erase_kstack
6809 +
6810 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6811 RESTORE_REST
6812 cmpq $(IA32_NR_syscalls-1),%rax
6813 diff -urNp linux-2.6.32.43/arch/x86/ia32/ia32_signal.c linux-2.6.32.43/arch/x86/ia32/ia32_signal.c
6814 --- linux-2.6.32.43/arch/x86/ia32/ia32_signal.c 2011-03-27 14:31:47.000000000 -0400
6815 +++ linux-2.6.32.43/arch/x86/ia32/ia32_signal.c 2011-04-17 15:56:46.000000000 -0400
6816 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
6817 sp -= frame_size;
6818 /* Align the stack pointer according to the i386 ABI,
6819 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6820 - sp = ((sp + 4) & -16ul) - 4;
6821 + sp = ((sp - 12) & -16ul) - 4;
6822 return (void __user *) sp;
6823 }
6824
6825 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
6826 * These are actually not used anymore, but left because some
6827 * gdb versions depend on them as a marker.
6828 */
6829 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6830 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6831 } put_user_catch(err);
6832
6833 if (err)
6834 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
6835 0xb8,
6836 __NR_ia32_rt_sigreturn,
6837 0x80cd,
6838 - 0,
6839 + 0
6840 };
6841
6842 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6843 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
6844
6845 if (ka->sa.sa_flags & SA_RESTORER)
6846 restorer = ka->sa.sa_restorer;
6847 + else if (current->mm->context.vdso)
6848 + /* Return stub is in 32bit vsyscall page */
6849 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6850 else
6851 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6852 - rt_sigreturn);
6853 + restorer = &frame->retcode;
6854 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6855
6856 /*
6857 * Not actually used anymore, but left because some gdb
6858 * versions need it.
6859 */
6860 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6861 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6862 } put_user_catch(err);
6863
6864 if (err)
6865 diff -urNp linux-2.6.32.43/arch/x86/include/asm/alternative.h linux-2.6.32.43/arch/x86/include/asm/alternative.h
6866 --- linux-2.6.32.43/arch/x86/include/asm/alternative.h 2011-03-27 14:31:47.000000000 -0400
6867 +++ linux-2.6.32.43/arch/x86/include/asm/alternative.h 2011-04-17 15:56:46.000000000 -0400
6868 @@ -85,7 +85,7 @@ static inline void alternatives_smp_swit
6869 " .byte 662b-661b\n" /* sourcelen */ \
6870 " .byte 664f-663f\n" /* replacementlen */ \
6871 ".previous\n" \
6872 - ".section .altinstr_replacement, \"ax\"\n" \
6873 + ".section .altinstr_replacement, \"a\"\n" \
6874 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6875 ".previous"
6876
6877 diff -urNp linux-2.6.32.43/arch/x86/include/asm/apm.h linux-2.6.32.43/arch/x86/include/asm/apm.h
6878 --- linux-2.6.32.43/arch/x86/include/asm/apm.h 2011-03-27 14:31:47.000000000 -0400
6879 +++ linux-2.6.32.43/arch/x86/include/asm/apm.h 2011-04-17 15:56:46.000000000 -0400
6880 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6881 __asm__ __volatile__(APM_DO_ZERO_SEGS
6882 "pushl %%edi\n\t"
6883 "pushl %%ebp\n\t"
6884 - "lcall *%%cs:apm_bios_entry\n\t"
6885 + "lcall *%%ss:apm_bios_entry\n\t"
6886 "setc %%al\n\t"
6887 "popl %%ebp\n\t"
6888 "popl %%edi\n\t"
6889 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6890 __asm__ __volatile__(APM_DO_ZERO_SEGS
6891 "pushl %%edi\n\t"
6892 "pushl %%ebp\n\t"
6893 - "lcall *%%cs:apm_bios_entry\n\t"
6894 + "lcall *%%ss:apm_bios_entry\n\t"
6895 "setc %%bl\n\t"
6896 "popl %%ebp\n\t"
6897 "popl %%edi\n\t"
6898 diff -urNp linux-2.6.32.43/arch/x86/include/asm/atomic_32.h linux-2.6.32.43/arch/x86/include/asm/atomic_32.h
6899 --- linux-2.6.32.43/arch/x86/include/asm/atomic_32.h 2011-03-27 14:31:47.000000000 -0400
6900 +++ linux-2.6.32.43/arch/x86/include/asm/atomic_32.h 2011-05-04 17:56:20.000000000 -0400
6901 @@ -25,6 +25,17 @@ static inline int atomic_read(const atom
6902 }
6903
6904 /**
6905 + * atomic_read_unchecked - read atomic variable
6906 + * @v: pointer of type atomic_unchecked_t
6907 + *
6908 + * Atomically reads the value of @v.
6909 + */
6910 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6911 +{
6912 + return v->counter;
6913 +}
6914 +
6915 +/**
6916 * atomic_set - set atomic variable
6917 * @v: pointer of type atomic_t
6918 * @i: required value
6919 @@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *
6920 }
6921
6922 /**
6923 + * atomic_set_unchecked - set atomic variable
6924 + * @v: pointer of type atomic_unchecked_t
6925 + * @i: required value
6926 + *
6927 + * Atomically sets the value of @v to @i.
6928 + */
6929 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6930 +{
6931 + v->counter = i;
6932 +}
6933 +
6934 +/**
6935 * atomic_add - add integer to atomic variable
6936 * @i: integer value to add
6937 * @v: pointer of type atomic_t
6938 @@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *
6939 */
6940 static inline void atomic_add(int i, atomic_t *v)
6941 {
6942 - asm volatile(LOCK_PREFIX "addl %1,%0"
6943 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6944 +
6945 +#ifdef CONFIG_PAX_REFCOUNT
6946 + "jno 0f\n"
6947 + LOCK_PREFIX "subl %1,%0\n"
6948 + "int $4\n0:\n"
6949 + _ASM_EXTABLE(0b, 0b)
6950 +#endif
6951 +
6952 + : "+m" (v->counter)
6953 + : "ir" (i));
6954 +}
6955 +
6956 +/**
6957 + * atomic_add_unchecked - add integer to atomic variable
6958 + * @i: integer value to add
6959 + * @v: pointer of type atomic_unchecked_t
6960 + *
6961 + * Atomically adds @i to @v.
6962 + */
6963 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6964 +{
6965 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6966 : "+m" (v->counter)
6967 : "ir" (i));
6968 }
6969 @@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato
6970 */
6971 static inline void atomic_sub(int i, atomic_t *v)
6972 {
6973 - asm volatile(LOCK_PREFIX "subl %1,%0"
6974 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6975 +
6976 +#ifdef CONFIG_PAX_REFCOUNT
6977 + "jno 0f\n"
6978 + LOCK_PREFIX "addl %1,%0\n"
6979 + "int $4\n0:\n"
6980 + _ASM_EXTABLE(0b, 0b)
6981 +#endif
6982 +
6983 + : "+m" (v->counter)
6984 + : "ir" (i));
6985 +}
6986 +
6987 +/**
6988 + * atomic_sub_unchecked - subtract integer from atomic variable
6989 + * @i: integer value to subtract
6990 + * @v: pointer of type atomic_unchecked_t
6991 + *
6992 + * Atomically subtracts @i from @v.
6993 + */
6994 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6995 +{
6996 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6997 : "+m" (v->counter)
6998 : "ir" (i));
6999 }
7000 @@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in
7001 {
7002 unsigned char c;
7003
7004 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7005 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7006 +
7007 +#ifdef CONFIG_PAX_REFCOUNT
7008 + "jno 0f\n"
7009 + LOCK_PREFIX "addl %2,%0\n"
7010 + "int $4\n0:\n"
7011 + _ASM_EXTABLE(0b, 0b)
7012 +#endif
7013 +
7014 + "sete %1\n"
7015 : "+m" (v->counter), "=qm" (c)
7016 : "ir" (i) : "memory");
7017 return c;
7018 @@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(in
7019 */
7020 static inline void atomic_inc(atomic_t *v)
7021 {
7022 - asm volatile(LOCK_PREFIX "incl %0"
7023 + asm volatile(LOCK_PREFIX "incl %0\n"
7024 +
7025 +#ifdef CONFIG_PAX_REFCOUNT
7026 + "jno 0f\n"
7027 + LOCK_PREFIX "decl %0\n"
7028 + "int $4\n0:\n"
7029 + _ASM_EXTABLE(0b, 0b)
7030 +#endif
7031 +
7032 + : "+m" (v->counter));
7033 +}
7034 +
7035 +/**
7036 + * atomic_inc_unchecked - increment atomic variable
7037 + * @v: pointer of type atomic_unchecked_t
7038 + *
7039 + * Atomically increments @v by 1.
7040 + */
7041 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7042 +{
7043 + asm volatile(LOCK_PREFIX "incl %0\n"
7044 : "+m" (v->counter));
7045 }
7046
7047 @@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *
7048 */
7049 static inline void atomic_dec(atomic_t *v)
7050 {
7051 - asm volatile(LOCK_PREFIX "decl %0"
7052 + asm volatile(LOCK_PREFIX "decl %0\n"
7053 +
7054 +#ifdef CONFIG_PAX_REFCOUNT
7055 + "jno 0f\n"
7056 + LOCK_PREFIX "incl %0\n"
7057 + "int $4\n0:\n"
7058 + _ASM_EXTABLE(0b, 0b)
7059 +#endif
7060 +
7061 + : "+m" (v->counter));
7062 +}
7063 +
7064 +/**
7065 + * atomic_dec_unchecked - decrement atomic variable
7066 + * @v: pointer of type atomic_unchecked_t
7067 + *
7068 + * Atomically decrements @v by 1.
7069 + */
7070 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7071 +{
7072 + asm volatile(LOCK_PREFIX "decl %0\n"
7073 : "+m" (v->counter));
7074 }
7075
7076 @@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(at
7077 {
7078 unsigned char c;
7079
7080 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7081 + asm volatile(LOCK_PREFIX "decl %0\n"
7082 +
7083 +#ifdef CONFIG_PAX_REFCOUNT
7084 + "jno 0f\n"
7085 + LOCK_PREFIX "incl %0\n"
7086 + "int $4\n0:\n"
7087 + _ASM_EXTABLE(0b, 0b)
7088 +#endif
7089 +
7090 + "sete %1\n"
7091 : "+m" (v->counter), "=qm" (c)
7092 : : "memory");
7093 return c != 0;
7094 @@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(at
7095 {
7096 unsigned char c;
7097
7098 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7099 + asm volatile(LOCK_PREFIX "incl %0\n"
7100 +
7101 +#ifdef CONFIG_PAX_REFCOUNT
7102 + "jno 0f\n"
7103 + LOCK_PREFIX "decl %0\n"
7104 + "into\n0:\n"
7105 + _ASM_EXTABLE(0b, 0b)
7106 +#endif
7107 +
7108 + "sete %1\n"
7109 + : "+m" (v->counter), "=qm" (c)
7110 + : : "memory");
7111 + return c != 0;
7112 +}
7113 +
7114 +/**
7115 + * atomic_inc_and_test_unchecked - increment and test
7116 + * @v: pointer of type atomic_unchecked_t
7117 + *
7118 + * Atomically increments @v by 1
7119 + * and returns true if the result is zero, or false for all
7120 + * other cases.
7121 + */
7122 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7123 +{
7124 + unsigned char c;
7125 +
7126 + asm volatile(LOCK_PREFIX "incl %0\n"
7127 + "sete %1\n"
7128 : "+m" (v->counter), "=qm" (c)
7129 : : "memory");
7130 return c != 0;
7131 @@ -156,7 +309,16 @@ static inline int atomic_add_negative(in
7132 {
7133 unsigned char c;
7134
7135 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7136 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7137 +
7138 +#ifdef CONFIG_PAX_REFCOUNT
7139 + "jno 0f\n"
7140 + LOCK_PREFIX "subl %2,%0\n"
7141 + "int $4\n0:\n"
7142 + _ASM_EXTABLE(0b, 0b)
7143 +#endif
7144 +
7145 + "sets %1\n"
7146 : "+m" (v->counter), "=qm" (c)
7147 : "ir" (i) : "memory");
7148 return c;
7149 @@ -179,6 +341,46 @@ static inline int atomic_add_return(int
7150 #endif
7151 /* Modern 486+ processor */
7152 __i = i;
7153 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7154 +
7155 +#ifdef CONFIG_PAX_REFCOUNT
7156 + "jno 0f\n"
7157 + "movl %0, %1\n"
7158 + "int $4\n0:\n"
7159 + _ASM_EXTABLE(0b, 0b)
7160 +#endif
7161 +
7162 + : "+r" (i), "+m" (v->counter)
7163 + : : "memory");
7164 + return i + __i;
7165 +
7166 +#ifdef CONFIG_M386
7167 +no_xadd: /* Legacy 386 processor */
7168 + local_irq_save(flags);
7169 + __i = atomic_read(v);
7170 + atomic_set(v, i + __i);
7171 + local_irq_restore(flags);
7172 + return i + __i;
7173 +#endif
7174 +}
7175 +
7176 +/**
7177 + * atomic_add_return_unchecked - add integer and return
7178 + * @v: pointer of type atomic_unchecked_t
7179 + * @i: integer value to add
7180 + *
7181 + * Atomically adds @i to @v and returns @i + @v
7182 + */
7183 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7184 +{
7185 + int __i;
7186 +#ifdef CONFIG_M386
7187 + unsigned long flags;
7188 + if (unlikely(boot_cpu_data.x86 <= 3))
7189 + goto no_xadd;
7190 +#endif
7191 + /* Modern 486+ processor */
7192 + __i = i;
7193 asm volatile(LOCK_PREFIX "xaddl %0, %1"
7194 : "+r" (i), "+m" (v->counter)
7195 : : "memory");
7196 @@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_
7197 return cmpxchg(&v->counter, old, new);
7198 }
7199
7200 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7201 +{
7202 + return cmpxchg(&v->counter, old, new);
7203 +}
7204 +
7205 static inline int atomic_xchg(atomic_t *v, int new)
7206 {
7207 return xchg(&v->counter, new);
7208 }
7209
7210 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7211 +{
7212 + return xchg(&v->counter, new);
7213 +}
7214 +
7215 /**
7216 * atomic_add_unless - add unless the number is already a given value
7217 * @v: pointer of type atomic_t
7218 @@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *
7219 */
7220 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7221 {
7222 - int c, old;
7223 + int c, old, new;
7224 c = atomic_read(v);
7225 for (;;) {
7226 - if (unlikely(c == (u)))
7227 + if (unlikely(c == u))
7228 break;
7229 - old = atomic_cmpxchg((v), c, c + (a));
7230 +
7231 + asm volatile("addl %2,%0\n"
7232 +
7233 +#ifdef CONFIG_PAX_REFCOUNT
7234 + "jno 0f\n"
7235 + "subl %2,%0\n"
7236 + "int $4\n0:\n"
7237 + _ASM_EXTABLE(0b, 0b)
7238 +#endif
7239 +
7240 + : "=r" (new)
7241 + : "0" (c), "ir" (a));
7242 +
7243 + old = atomic_cmpxchg(v, c, new);
7244 if (likely(old == c))
7245 break;
7246 c = old;
7247 }
7248 - return c != (u);
7249 + return c != u;
7250 }
7251
7252 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7253
7254 #define atomic_inc_return(v) (atomic_add_return(1, v))
7255 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7256 +{
7257 + return atomic_add_return_unchecked(1, v);
7258 +}
7259 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7260
7261 /* These are x86-specific, used by some header files */
7262 @@ -266,9 +495,18 @@ typedef struct {
7263 u64 __aligned(8) counter;
7264 } atomic64_t;
7265
7266 +#ifdef CONFIG_PAX_REFCOUNT
7267 +typedef struct {
7268 + u64 __aligned(8) counter;
7269 +} atomic64_unchecked_t;
7270 +#else
7271 +typedef atomic64_t atomic64_unchecked_t;
7272 +#endif
7273 +
7274 #define ATOMIC64_INIT(val) { (val) }
7275
7276 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
7277 +extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
7278
7279 /**
7280 * atomic64_xchg - xchg atomic64 variable
7281 @@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *
7282 * the old value.
7283 */
7284 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
7285 +extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7286
7287 /**
7288 * atomic64_set - set atomic64 variable
7289 @@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr
7290 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
7291
7292 /**
7293 + * atomic64_unchecked_set - set atomic64 variable
7294 + * @ptr: pointer to type atomic64_unchecked_t
7295 + * @new_val: value to assign
7296 + *
7297 + * Atomically sets the value of @ptr to @new_val.
7298 + */
7299 +extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7300 +
7301 +/**
7302 * atomic64_read - read atomic64 variable
7303 * @ptr: pointer to type atomic64_t
7304 *
7305 @@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64
7306 return res;
7307 }
7308
7309 -extern u64 atomic64_read(atomic64_t *ptr);
7310 +/**
7311 + * atomic64_read_unchecked - read atomic64 variable
7312 + * @ptr: pointer to type atomic64_unchecked_t
7313 + *
7314 + * Atomically reads the value of @ptr and returns it.
7315 + */
7316 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
7317 +{
7318 + u64 res;
7319 +
7320 + /*
7321 + * Note, we inline this atomic64_unchecked_t primitive because
7322 + * it only clobbers EAX/EDX and leaves the others
7323 + * untouched. We also (somewhat subtly) rely on the
7324 + * fact that cmpxchg8b returns the current 64-bit value
7325 + * of the memory location we are touching:
7326 + */
7327 + asm volatile(
7328 + "mov %%ebx, %%eax\n\t"
7329 + "mov %%ecx, %%edx\n\t"
7330 + LOCK_PREFIX "cmpxchg8b %1\n"
7331 + : "=&A" (res)
7332 + : "m" (*ptr)
7333 + );
7334 +
7335 + return res;
7336 +}
7337
7338 /**
7339 * atomic64_add_return - add and return
7340 @@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta
7341 * Other variants with different arithmetic operators:
7342 */
7343 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
7344 +extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7345 extern u64 atomic64_inc_return(atomic64_t *ptr);
7346 +extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
7347 extern u64 atomic64_dec_return(atomic64_t *ptr);
7348 +extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
7349
7350 /**
7351 * atomic64_add - add integer to atomic64 variable
7352 @@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_
7353 extern void atomic64_add(u64 delta, atomic64_t *ptr);
7354
7355 /**
7356 + * atomic64_add_unchecked - add integer to atomic64 variable
7357 + * @delta: integer value to add
7358 + * @ptr: pointer to type atomic64_unchecked_t
7359 + *
7360 + * Atomically adds @delta to @ptr.
7361 + */
7362 +extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7363 +
7364 +/**
7365 * atomic64_sub - subtract the atomic64 variable
7366 * @delta: integer value to subtract
7367 * @ptr: pointer to type atomic64_t
7368 @@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atom
7369 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
7370
7371 /**
7372 + * atomic64_sub_unchecked - subtract the atomic64 variable
7373 + * @delta: integer value to subtract
7374 + * @ptr: pointer to type atomic64_unchecked_t
7375 + *
7376 + * Atomically subtracts @delta from @ptr.
7377 + */
7378 +extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7379 +
7380 +/**
7381 * atomic64_sub_and_test - subtract value from variable and test result
7382 * @delta: integer value to subtract
7383 * @ptr: pointer to type atomic64_t
7384 @@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 del
7385 extern void atomic64_inc(atomic64_t *ptr);
7386
7387 /**
7388 + * atomic64_inc_unchecked - increment atomic64 variable
7389 + * @ptr: pointer to type atomic64_unchecked_t
7390 + *
7391 + * Atomically increments @ptr by 1.
7392 + */
7393 +extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
7394 +
7395 +/**
7396 * atomic64_dec - decrement atomic64 variable
7397 * @ptr: pointer to type atomic64_t
7398 *
7399 @@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr
7400 extern void atomic64_dec(atomic64_t *ptr);
7401
7402 /**
7403 + * atomic64_dec_unchecked - decrement atomic64 variable
7404 + * @ptr: pointer to type atomic64_unchecked_t
7405 + *
7406 + * Atomically decrements @ptr by 1.
7407 + */
7408 +extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
7409 +
7410 +/**
7411 * atomic64_dec_and_test - decrement and test
7412 * @ptr: pointer to type atomic64_t
7413 *
7414 diff -urNp linux-2.6.32.43/arch/x86/include/asm/atomic_64.h linux-2.6.32.43/arch/x86/include/asm/atomic_64.h
7415 --- linux-2.6.32.43/arch/x86/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
7416 +++ linux-2.6.32.43/arch/x86/include/asm/atomic_64.h 2011-05-04 18:35:31.000000000 -0400
7417 @@ -24,6 +24,17 @@ static inline int atomic_read(const atom
7418 }
7419
7420 /**
7421 + * atomic_read_unchecked - read atomic variable
7422 + * @v: pointer of type atomic_unchecked_t
7423 + *
7424 + * Atomically reads the value of @v.
7425 + */
7426 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7427 +{
7428 + return v->counter;
7429 +}
7430 +
7431 +/**
7432 * atomic_set - set atomic variable
7433 * @v: pointer of type atomic_t
7434 * @i: required value
7435 @@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *
7436 }
7437
7438 /**
7439 + * atomic_set_unchecked - set atomic variable
7440 + * @v: pointer of type atomic_unchecked_t
7441 + * @i: required value
7442 + *
7443 + * Atomically sets the value of @v to @i.
7444 + */
7445 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7446 +{
7447 + v->counter = i;
7448 +}
7449 +
7450 +/**
7451 * atomic_add - add integer to atomic variable
7452 * @i: integer value to add
7453 * @v: pointer of type atomic_t
7454 @@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *
7455 */
7456 static inline void atomic_add(int i, atomic_t *v)
7457 {
7458 - asm volatile(LOCK_PREFIX "addl %1,%0"
7459 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7460 +
7461 +#ifdef CONFIG_PAX_REFCOUNT
7462 + "jno 0f\n"
7463 + LOCK_PREFIX "subl %1,%0\n"
7464 + "int $4\n0:\n"
7465 + _ASM_EXTABLE(0b, 0b)
7466 +#endif
7467 +
7468 + : "=m" (v->counter)
7469 + : "ir" (i), "m" (v->counter));
7470 +}
7471 +
7472 +/**
7473 + * atomic_add_unchecked - add integer to atomic variable
7474 + * @i: integer value to add
7475 + * @v: pointer of type atomic_unchecked_t
7476 + *
7477 + * Atomically adds @i to @v.
7478 + */
7479 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7480 +{
7481 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7482 : "=m" (v->counter)
7483 : "ir" (i), "m" (v->counter));
7484 }
7485 @@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato
7486 */
7487 static inline void atomic_sub(int i, atomic_t *v)
7488 {
7489 - asm volatile(LOCK_PREFIX "subl %1,%0"
7490 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7491 +
7492 +#ifdef CONFIG_PAX_REFCOUNT
7493 + "jno 0f\n"
7494 + LOCK_PREFIX "addl %1,%0\n"
7495 + "int $4\n0:\n"
7496 + _ASM_EXTABLE(0b, 0b)
7497 +#endif
7498 +
7499 + : "=m" (v->counter)
7500 + : "ir" (i), "m" (v->counter));
7501 +}
7502 +
7503 +/**
7504 + * atomic_sub_unchecked - subtract the atomic variable
7505 + * @i: integer value to subtract
7506 + * @v: pointer of type atomic_unchecked_t
7507 + *
7508 + * Atomically subtracts @i from @v.
7509 + */
7510 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7511 +{
7512 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7513 : "=m" (v->counter)
7514 : "ir" (i), "m" (v->counter));
7515 }
7516 @@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in
7517 {
7518 unsigned char c;
7519
7520 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7521 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7522 +
7523 +#ifdef CONFIG_PAX_REFCOUNT
7524 + "jno 0f\n"
7525 + LOCK_PREFIX "addl %2,%0\n"
7526 + "int $4\n0:\n"
7527 + _ASM_EXTABLE(0b, 0b)
7528 +#endif
7529 +
7530 + "sete %1\n"
7531 : "=m" (v->counter), "=qm" (c)
7532 : "ir" (i), "m" (v->counter) : "memory");
7533 return c;
7534 @@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(in
7535 */
7536 static inline void atomic_inc(atomic_t *v)
7537 {
7538 - asm volatile(LOCK_PREFIX "incl %0"
7539 + asm volatile(LOCK_PREFIX "incl %0\n"
7540 +
7541 +#ifdef CONFIG_PAX_REFCOUNT
7542 + "jno 0f\n"
7543 + LOCK_PREFIX "decl %0\n"
7544 + "int $4\n0:\n"
7545 + _ASM_EXTABLE(0b, 0b)
7546 +#endif
7547 +
7548 + : "=m" (v->counter)
7549 + : "m" (v->counter));
7550 +}
7551 +
7552 +/**
7553 + * atomic_inc_unchecked - increment atomic variable
7554 + * @v: pointer of type atomic_unchecked_t
7555 + *
7556 + * Atomically increments @v by 1.
7557 + */
7558 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7559 +{
7560 + asm volatile(LOCK_PREFIX "incl %0\n"
7561 : "=m" (v->counter)
7562 : "m" (v->counter));
7563 }
7564 @@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *
7565 */
7566 static inline void atomic_dec(atomic_t *v)
7567 {
7568 - asm volatile(LOCK_PREFIX "decl %0"
7569 + asm volatile(LOCK_PREFIX "decl %0\n"
7570 +
7571 +#ifdef CONFIG_PAX_REFCOUNT
7572 + "jno 0f\n"
7573 + LOCK_PREFIX "incl %0\n"
7574 + "int $4\n0:\n"
7575 + _ASM_EXTABLE(0b, 0b)
7576 +#endif
7577 +
7578 + : "=m" (v->counter)
7579 + : "m" (v->counter));
7580 +}
7581 +
7582 +/**
7583 + * atomic_dec_unchecked - decrement atomic variable
7584 + * @v: pointer of type atomic_unchecked_t
7585 + *
7586 + * Atomically decrements @v by 1.
7587 + */
7588 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7589 +{
7590 + asm volatile(LOCK_PREFIX "decl %0\n"
7591 : "=m" (v->counter)
7592 : "m" (v->counter));
7593 }
7594 @@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(at
7595 {
7596 unsigned char c;
7597
7598 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7599 + asm volatile(LOCK_PREFIX "decl %0\n"
7600 +
7601 +#ifdef CONFIG_PAX_REFCOUNT
7602 + "jno 0f\n"
7603 + LOCK_PREFIX "incl %0\n"
7604 + "int $4\n0:\n"
7605 + _ASM_EXTABLE(0b, 0b)
7606 +#endif
7607 +
7608 + "sete %1\n"
7609 : "=m" (v->counter), "=qm" (c)
7610 : "m" (v->counter) : "memory");
7611 return c != 0;
7612 @@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(at
7613 {
7614 unsigned char c;
7615
7616 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7617 + asm volatile(LOCK_PREFIX "incl %0\n"
7618 +
7619 +#ifdef CONFIG_PAX_REFCOUNT
7620 + "jno 0f\n"
7621 + LOCK_PREFIX "decl %0\n"
7622 + "int $4\n0:\n"
7623 + _ASM_EXTABLE(0b, 0b)
7624 +#endif
7625 +
7626 + "sete %1\n"
7627 + : "=m" (v->counter), "=qm" (c)
7628 + : "m" (v->counter) : "memory");
7629 + return c != 0;
7630 +}
7631 +
7632 +/**
7633 + * atomic_inc_and_test_unchecked - increment and test
7634 + * @v: pointer of type atomic_unchecked_t
7635 + *
7636 + * Atomically increments @v by 1
7637 + * and returns true if the result is zero, or false for all
7638 + * other cases.
7639 + */
7640 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7641 +{
7642 + unsigned char c;
7643 +
7644 + asm volatile(LOCK_PREFIX "incl %0\n"
7645 + "sete %1\n"
7646 : "=m" (v->counter), "=qm" (c)
7647 : "m" (v->counter) : "memory");
7648 return c != 0;
7649 @@ -157,7 +312,16 @@ static inline int atomic_add_negative(in
7650 {
7651 unsigned char c;
7652
7653 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7654 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7655 +
7656 +#ifdef CONFIG_PAX_REFCOUNT
7657 + "jno 0f\n"
7658 + LOCK_PREFIX "subl %2,%0\n"
7659 + "int $4\n0:\n"
7660 + _ASM_EXTABLE(0b, 0b)
7661 +#endif
7662 +
7663 + "sets %1\n"
7664 : "=m" (v->counter), "=qm" (c)
7665 : "ir" (i), "m" (v->counter) : "memory");
7666 return c;
7667 @@ -173,7 +337,31 @@ static inline int atomic_add_negative(in
7668 static inline int atomic_add_return(int i, atomic_t *v)
7669 {
7670 int __i = i;
7671 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
7672 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7673 +
7674 +#ifdef CONFIG_PAX_REFCOUNT
7675 + "jno 0f\n"
7676 + "movl %0, %1\n"
7677 + "int $4\n0:\n"
7678 + _ASM_EXTABLE(0b, 0b)
7679 +#endif
7680 +
7681 + : "+r" (i), "+m" (v->counter)
7682 + : : "memory");
7683 + return i + __i;
7684 +}
7685 +
7686 +/**
7687 + * atomic_add_return_unchecked - add and return
7688 + * @i: integer value to add
7689 + * @v: pointer of type atomic_unchecked_t
7690 + *
7691 + * Atomically adds @i to @v and returns @i + @v
7692 + */
7693 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7694 +{
7695 + int __i = i;
7696 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7697 : "+r" (i), "+m" (v->counter)
7698 : : "memory");
7699 return i + __i;
7700 @@ -185,6 +373,10 @@ static inline int atomic_sub_return(int
7701 }
7702
7703 #define atomic_inc_return(v) (atomic_add_return(1, v))
7704 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7705 +{
7706 + return atomic_add_return_unchecked(1, v);
7707 +}
7708 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7709
7710 /* The 64-bit atomic type */
7711 @@ -204,6 +396,18 @@ static inline long atomic64_read(const a
7712 }
7713
7714 /**
7715 + * atomic64_read_unchecked - read atomic64 variable
7716 + * @v: pointer of type atomic64_unchecked_t
7717 + *
7718 + * Atomically reads the value of @v.
7719 + * Doesn't imply a read memory barrier.
7720 + */
7721 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7722 +{
7723 + return v->counter;
7724 +}
7725 +
7726 +/**
7727 * atomic64_set - set atomic64 variable
7728 * @v: pointer to type atomic64_t
7729 * @i: required value
7730 @@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64
7731 }
7732
7733 /**
7734 + * atomic64_set_unchecked - set atomic64 variable
7735 + * @v: pointer to type atomic64_unchecked_t
7736 + * @i: required value
7737 + *
7738 + * Atomically sets the value of @v to @i.
7739 + */
7740 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7741 +{
7742 + v->counter = i;
7743 +}
7744 +
7745 +/**
7746 * atomic64_add - add integer to atomic64 variable
7747 * @i: integer value to add
7748 * @v: pointer to type atomic64_t
7749 @@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64
7750 */
7751 static inline void atomic64_add(long i, atomic64_t *v)
7752 {
7753 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
7754 +
7755 +#ifdef CONFIG_PAX_REFCOUNT
7756 + "jno 0f\n"
7757 + LOCK_PREFIX "subq %1,%0\n"
7758 + "int $4\n0:\n"
7759 + _ASM_EXTABLE(0b, 0b)
7760 +#endif
7761 +
7762 + : "=m" (v->counter)
7763 + : "er" (i), "m" (v->counter));
7764 +}
7765 +
7766 +/**
7767 + * atomic64_add_unchecked - add integer to atomic64 variable
7768 + * @i: integer value to add
7769 + * @v: pointer to type atomic64_unchecked_t
7770 + *
7771 + * Atomically adds @i to @v.
7772 + */
7773 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7774 +{
7775 asm volatile(LOCK_PREFIX "addq %1,%0"
7776 : "=m" (v->counter)
7777 : "er" (i), "m" (v->counter));
7778 @@ -238,7 +476,15 @@ static inline void atomic64_add(long i,
7779 */
7780 static inline void atomic64_sub(long i, atomic64_t *v)
7781 {
7782 - asm volatile(LOCK_PREFIX "subq %1,%0"
7783 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7784 +
7785 +#ifdef CONFIG_PAX_REFCOUNT
7786 + "jno 0f\n"
7787 + LOCK_PREFIX "addq %1,%0\n"
7788 + "int $4\n0:\n"
7789 + _ASM_EXTABLE(0b, 0b)
7790 +#endif
7791 +
7792 : "=m" (v->counter)
7793 : "er" (i), "m" (v->counter));
7794 }
7795 @@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(
7796 {
7797 unsigned char c;
7798
7799 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7800 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
7801 +
7802 +#ifdef CONFIG_PAX_REFCOUNT
7803 + "jno 0f\n"
7804 + LOCK_PREFIX "addq %2,%0\n"
7805 + "int $4\n0:\n"
7806 + _ASM_EXTABLE(0b, 0b)
7807 +#endif
7808 +
7809 + "sete %1\n"
7810 : "=m" (v->counter), "=qm" (c)
7811 : "er" (i), "m" (v->counter) : "memory");
7812 return c;
7813 @@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(
7814 */
7815 static inline void atomic64_inc(atomic64_t *v)
7816 {
7817 + asm volatile(LOCK_PREFIX "incq %0\n"
7818 +
7819 +#ifdef CONFIG_PAX_REFCOUNT
7820 + "jno 0f\n"
7821 + LOCK_PREFIX "decq %0\n"
7822 + "int $4\n0:\n"
7823 + _ASM_EXTABLE(0b, 0b)
7824 +#endif
7825 +
7826 + : "=m" (v->counter)
7827 + : "m" (v->counter));
7828 +}
7829 +
7830 +/**
7831 + * atomic64_inc_unchecked - increment atomic64 variable
7832 + * @v: pointer to type atomic64_unchecked_t
7833 + *
7834 + * Atomically increments @v by 1.
7835 + */
7836 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7837 +{
7838 asm volatile(LOCK_PREFIX "incq %0"
7839 : "=m" (v->counter)
7840 : "m" (v->counter));
7841 @@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64
7842 */
7843 static inline void atomic64_dec(atomic64_t *v)
7844 {
7845 - asm volatile(LOCK_PREFIX "decq %0"
7846 + asm volatile(LOCK_PREFIX "decq %0\n"
7847 +
7848 +#ifdef CONFIG_PAX_REFCOUNT
7849 + "jno 0f\n"
7850 + LOCK_PREFIX "incq %0\n"
7851 + "int $4\n0:\n"
7852 + _ASM_EXTABLE(0b, 0b)
7853 +#endif
7854 +
7855 + : "=m" (v->counter)
7856 + : "m" (v->counter));
7857 +}
7858 +
7859 +/**
7860 + * atomic64_dec_unchecked - decrement atomic64 variable
7861 + * @v: pointer to type atomic64_t
7862 + *
7863 + * Atomically decrements @v by 1.
7864 + */
7865 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7866 +{
7867 + asm volatile(LOCK_PREFIX "decq %0\n"
7868 : "=m" (v->counter)
7869 : "m" (v->counter));
7870 }
7871 @@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(
7872 {
7873 unsigned char c;
7874
7875 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
7876 + asm volatile(LOCK_PREFIX "decq %0\n"
7877 +
7878 +#ifdef CONFIG_PAX_REFCOUNT
7879 + "jno 0f\n"
7880 + LOCK_PREFIX "incq %0\n"
7881 + "int $4\n0:\n"
7882 + _ASM_EXTABLE(0b, 0b)
7883 +#endif
7884 +
7885 + "sete %1\n"
7886 : "=m" (v->counter), "=qm" (c)
7887 : "m" (v->counter) : "memory");
7888 return c != 0;
7889 @@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(
7890 {
7891 unsigned char c;
7892
7893 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
7894 + asm volatile(LOCK_PREFIX "incq %0\n"
7895 +
7896 +#ifdef CONFIG_PAX_REFCOUNT
7897 + "jno 0f\n"
7898 + LOCK_PREFIX "decq %0\n"
7899 + "int $4\n0:\n"
7900 + _ASM_EXTABLE(0b, 0b)
7901 +#endif
7902 +
7903 + "sete %1\n"
7904 : "=m" (v->counter), "=qm" (c)
7905 : "m" (v->counter) : "memory");
7906 return c != 0;
7907 @@ -337,7 +652,16 @@ static inline int atomic64_add_negative(
7908 {
7909 unsigned char c;
7910
7911 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7912 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
7913 +
7914 +#ifdef CONFIG_PAX_REFCOUNT
7915 + "jno 0f\n"
7916 + LOCK_PREFIX "subq %2,%0\n"
7917 + "int $4\n0:\n"
7918 + _ASM_EXTABLE(0b, 0b)
7919 +#endif
7920 +
7921 + "sets %1\n"
7922 : "=m" (v->counter), "=qm" (c)
7923 : "er" (i), "m" (v->counter) : "memory");
7924 return c;
7925 @@ -353,7 +677,31 @@ static inline int atomic64_add_negative(
7926 static inline long atomic64_add_return(long i, atomic64_t *v)
7927 {
7928 long __i = i;
7929 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
7930 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
7931 +
7932 +#ifdef CONFIG_PAX_REFCOUNT
7933 + "jno 0f\n"
7934 + "movq %0, %1\n"
7935 + "int $4\n0:\n"
7936 + _ASM_EXTABLE(0b, 0b)
7937 +#endif
7938 +
7939 + : "+r" (i), "+m" (v->counter)
7940 + : : "memory");
7941 + return i + __i;
7942 +}
7943 +
7944 +/**
7945 + * atomic64_add_return_unchecked - add and return
7946 + * @i: integer value to add
7947 + * @v: pointer to type atomic64_unchecked_t
7948 + *
7949 + * Atomically adds @i to @v and returns @i + @v
7950 + */
7951 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7952 +{
7953 + long __i = i;
7954 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
7955 : "+r" (i), "+m" (v->counter)
7956 : : "memory");
7957 return i + __i;
7958 @@ -365,6 +713,10 @@ static inline long atomic64_sub_return(l
7959 }
7960
7961 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7962 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7963 +{
7964 + return atomic64_add_return_unchecked(1, v);
7965 +}
7966 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7967
7968 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7969 @@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atom
7970 return cmpxchg(&v->counter, old, new);
7971 }
7972
7973 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
7974 +{
7975 + return cmpxchg(&v->counter, old, new);
7976 +}
7977 +
7978 static inline long atomic64_xchg(atomic64_t *v, long new)
7979 {
7980 return xchg(&v->counter, new);
7981 }
7982
7983 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7984 +{
7985 + return xchg(&v->counter, new);
7986 +}
7987 +
7988 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
7989 {
7990 return cmpxchg(&v->counter, old, new);
7991 }
7992
7993 +static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7994 +{
7995 + return cmpxchg(&v->counter, old, new);
7996 +}
7997 +
7998 static inline long atomic_xchg(atomic_t *v, int new)
7999 {
8000 return xchg(&v->counter, new);
8001 }
8002
8003 +static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8004 +{
8005 + return xchg(&v->counter, new);
8006 +}
8007 +
8008 /**
8009 * atomic_add_unless - add unless the number is a given value
8010 * @v: pointer of type atomic_t
8011 @@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t
8012 */
8013 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8014 {
8015 - int c, old;
8016 + int c, old, new;
8017 c = atomic_read(v);
8018 for (;;) {
8019 - if (unlikely(c == (u)))
8020 + if (unlikely(c == u))
8021 break;
8022 - old = atomic_cmpxchg((v), c, c + (a));
8023 +
8024 + asm volatile("addl %2,%0\n"
8025 +
8026 +#ifdef CONFIG_PAX_REFCOUNT
8027 + "jno 0f\n"
8028 + "subl %2,%0\n"
8029 + "int $4\n0:\n"
8030 + _ASM_EXTABLE(0b, 0b)
8031 +#endif
8032 +
8033 + : "=r" (new)
8034 + : "0" (c), "ir" (a));
8035 +
8036 + old = atomic_cmpxchg(v, c, new);
8037 if (likely(old == c))
8038 break;
8039 c = old;
8040 }
8041 - return c != (u);
8042 + return c != u;
8043 }
8044
8045 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8046 @@ -424,17 +809,30 @@ static inline int atomic_add_unless(atom
8047 */
8048 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8049 {
8050 - long c, old;
8051 + long c, old, new;
8052 c = atomic64_read(v);
8053 for (;;) {
8054 - if (unlikely(c == (u)))
8055 + if (unlikely(c == u))
8056 break;
8057 - old = atomic64_cmpxchg((v), c, c + (a));
8058 +
8059 + asm volatile("addq %2,%0\n"
8060 +
8061 +#ifdef CONFIG_PAX_REFCOUNT
8062 + "jno 0f\n"
8063 + "subq %2,%0\n"
8064 + "int $4\n0:\n"
8065 + _ASM_EXTABLE(0b, 0b)
8066 +#endif
8067 +
8068 + : "=r" (new)
8069 + : "0" (c), "er" (a));
8070 +
8071 + old = atomic64_cmpxchg(v, c, new);
8072 if (likely(old == c))
8073 break;
8074 c = old;
8075 }
8076 - return c != (u);
8077 + return c != u;
8078 }
8079
8080 /**
8081 diff -urNp linux-2.6.32.43/arch/x86/include/asm/bitops.h linux-2.6.32.43/arch/x86/include/asm/bitops.h
8082 --- linux-2.6.32.43/arch/x86/include/asm/bitops.h 2011-03-27 14:31:47.000000000 -0400
8083 +++ linux-2.6.32.43/arch/x86/include/asm/bitops.h 2011-04-17 15:56:46.000000000 -0400
8084 @@ -38,7 +38,7 @@
8085 * a mask operation on a byte.
8086 */
8087 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8088 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8089 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8090 #define CONST_MASK(nr) (1 << ((nr) & 7))
8091
8092 /**
8093 diff -urNp linux-2.6.32.43/arch/x86/include/asm/boot.h linux-2.6.32.43/arch/x86/include/asm/boot.h
8094 --- linux-2.6.32.43/arch/x86/include/asm/boot.h 2011-03-27 14:31:47.000000000 -0400
8095 +++ linux-2.6.32.43/arch/x86/include/asm/boot.h 2011-04-17 15:56:46.000000000 -0400
8096 @@ -11,10 +11,15 @@
8097 #include <asm/pgtable_types.h>
8098
8099 /* Physical address where kernel should be loaded. */
8100 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8101 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8102 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8103 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8104
8105 +#ifndef __ASSEMBLY__
8106 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
8107 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8108 +#endif
8109 +
8110 /* Minimum kernel alignment, as a power of two */
8111 #ifdef CONFIG_X86_64
8112 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8113 diff -urNp linux-2.6.32.43/arch/x86/include/asm/cacheflush.h linux-2.6.32.43/arch/x86/include/asm/cacheflush.h
8114 --- linux-2.6.32.43/arch/x86/include/asm/cacheflush.h 2011-03-27 14:31:47.000000000 -0400
8115 +++ linux-2.6.32.43/arch/x86/include/asm/cacheflush.h 2011-04-17 15:56:46.000000000 -0400
8116 @@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
8117 static inline unsigned long get_page_memtype(struct page *pg)
8118 {
8119 if (!PageUncached(pg) && !PageWC(pg))
8120 - return -1;
8121 + return ~0UL;
8122 else if (!PageUncached(pg) && PageWC(pg))
8123 return _PAGE_CACHE_WC;
8124 else if (PageUncached(pg) && !PageWC(pg))
8125 @@ -85,7 +85,7 @@ static inline void set_page_memtype(stru
8126 SetPageWC(pg);
8127 break;
8128 default:
8129 - case -1:
8130 + case ~0UL:
8131 ClearPageUncached(pg);
8132 ClearPageWC(pg);
8133 break;
8134 diff -urNp linux-2.6.32.43/arch/x86/include/asm/cache.h linux-2.6.32.43/arch/x86/include/asm/cache.h
8135 --- linux-2.6.32.43/arch/x86/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
8136 +++ linux-2.6.32.43/arch/x86/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
8137 @@ -5,9 +5,10 @@
8138
8139 /* L1 cache line size */
8140 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8141 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8142 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8143
8144 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
8145 +#define __read_only __attribute__((__section__(".data.read_only")))
8146
8147 #ifdef CONFIG_X86_VSMP
8148 /* vSMP Internode cacheline shift */
8149 diff -urNp linux-2.6.32.43/arch/x86/include/asm/checksum_32.h linux-2.6.32.43/arch/x86/include/asm/checksum_32.h
8150 --- linux-2.6.32.43/arch/x86/include/asm/checksum_32.h 2011-03-27 14:31:47.000000000 -0400
8151 +++ linux-2.6.32.43/arch/x86/include/asm/checksum_32.h 2011-04-17 15:56:46.000000000 -0400
8152 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
8153 int len, __wsum sum,
8154 int *src_err_ptr, int *dst_err_ptr);
8155
8156 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8157 + int len, __wsum sum,
8158 + int *src_err_ptr, int *dst_err_ptr);
8159 +
8160 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8161 + int len, __wsum sum,
8162 + int *src_err_ptr, int *dst_err_ptr);
8163 +
8164 /*
8165 * Note: when you get a NULL pointer exception here this means someone
8166 * passed in an incorrect kernel address to one of these functions.
8167 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
8168 int *err_ptr)
8169 {
8170 might_sleep();
8171 - return csum_partial_copy_generic((__force void *)src, dst,
8172 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
8173 len, sum, err_ptr, NULL);
8174 }
8175
8176 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
8177 {
8178 might_sleep();
8179 if (access_ok(VERIFY_WRITE, dst, len))
8180 - return csum_partial_copy_generic(src, (__force void *)dst,
8181 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8182 len, sum, NULL, err_ptr);
8183
8184 if (len)
8185 diff -urNp linux-2.6.32.43/arch/x86/include/asm/desc_defs.h linux-2.6.32.43/arch/x86/include/asm/desc_defs.h
8186 --- linux-2.6.32.43/arch/x86/include/asm/desc_defs.h 2011-03-27 14:31:47.000000000 -0400
8187 +++ linux-2.6.32.43/arch/x86/include/asm/desc_defs.h 2011-04-17 15:56:46.000000000 -0400
8188 @@ -31,6 +31,12 @@ struct desc_struct {
8189 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8190 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8191 };
8192 + struct {
8193 + u16 offset_low;
8194 + u16 seg;
8195 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8196 + unsigned offset_high: 16;
8197 + } gate;
8198 };
8199 } __attribute__((packed));
8200
8201 diff -urNp linux-2.6.32.43/arch/x86/include/asm/desc.h linux-2.6.32.43/arch/x86/include/asm/desc.h
8202 --- linux-2.6.32.43/arch/x86/include/asm/desc.h 2011-03-27 14:31:47.000000000 -0400
8203 +++ linux-2.6.32.43/arch/x86/include/asm/desc.h 2011-04-23 12:56:10.000000000 -0400
8204 @@ -4,6 +4,7 @@
8205 #include <asm/desc_defs.h>
8206 #include <asm/ldt.h>
8207 #include <asm/mmu.h>
8208 +#include <asm/pgtable.h>
8209 #include <linux/smp.h>
8210
8211 static inline void fill_ldt(struct desc_struct *desc,
8212 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
8213 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
8214 desc->type = (info->read_exec_only ^ 1) << 1;
8215 desc->type |= info->contents << 2;
8216 + desc->type |= info->seg_not_present ^ 1;
8217 desc->s = 1;
8218 desc->dpl = 0x3;
8219 desc->p = info->seg_not_present ^ 1;
8220 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
8221 }
8222
8223 extern struct desc_ptr idt_descr;
8224 -extern gate_desc idt_table[];
8225 -
8226 -struct gdt_page {
8227 - struct desc_struct gdt[GDT_ENTRIES];
8228 -} __attribute__((aligned(PAGE_SIZE)));
8229 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8230 +extern gate_desc idt_table[256];
8231
8232 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8233 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8234 {
8235 - return per_cpu(gdt_page, cpu).gdt;
8236 + return cpu_gdt_table[cpu];
8237 }
8238
8239 #ifdef CONFIG_X86_64
8240 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
8241 unsigned long base, unsigned dpl, unsigned flags,
8242 unsigned short seg)
8243 {
8244 - gate->a = (seg << 16) | (base & 0xffff);
8245 - gate->b = (base & 0xffff0000) |
8246 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8247 + gate->gate.offset_low = base;
8248 + gate->gate.seg = seg;
8249 + gate->gate.reserved = 0;
8250 + gate->gate.type = type;
8251 + gate->gate.s = 0;
8252 + gate->gate.dpl = dpl;
8253 + gate->gate.p = 1;
8254 + gate->gate.offset_high = base >> 16;
8255 }
8256
8257 #endif
8258 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
8259 static inline void native_write_idt_entry(gate_desc *idt, int entry,
8260 const gate_desc *gate)
8261 {
8262 + pax_open_kernel();
8263 memcpy(&idt[entry], gate, sizeof(*gate));
8264 + pax_close_kernel();
8265 }
8266
8267 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
8268 const void *desc)
8269 {
8270 + pax_open_kernel();
8271 memcpy(&ldt[entry], desc, 8);
8272 + pax_close_kernel();
8273 }
8274
8275 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
8276 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
8277 size = sizeof(struct desc_struct);
8278 break;
8279 }
8280 +
8281 + pax_open_kernel();
8282 memcpy(&gdt[entry], desc, size);
8283 + pax_close_kernel();
8284 }
8285
8286 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8287 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const
8288
8289 static inline void native_load_tr_desc(void)
8290 {
8291 + pax_open_kernel();
8292 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8293 + pax_close_kernel();
8294 }
8295
8296 static inline void native_load_gdt(const struct desc_ptr *dtr)
8297 @@ -246,8 +258,10 @@ static inline void native_load_tls(struc
8298 unsigned int i;
8299 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8300
8301 + pax_open_kernel();
8302 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8303 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8304 + pax_close_kernel();
8305 }
8306
8307 #define _LDT_empty(info) \
8308 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
8309 desc->limit = (limit >> 16) & 0xf;
8310 }
8311
8312 -static inline void _set_gate(int gate, unsigned type, void *addr,
8313 +static inline void _set_gate(int gate, unsigned type, const void *addr,
8314 unsigned dpl, unsigned ist, unsigned seg)
8315 {
8316 gate_desc s;
8317 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
8318 * Pentium F0 0F bugfix can have resulted in the mapped
8319 * IDT being write-protected.
8320 */
8321 -static inline void set_intr_gate(unsigned int n, void *addr)
8322 +static inline void set_intr_gate(unsigned int n, const void *addr)
8323 {
8324 BUG_ON((unsigned)n > 0xFF);
8325 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8326 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
8327 /*
8328 * This routine sets up an interrupt gate at directory privilege level 3.
8329 */
8330 -static inline void set_system_intr_gate(unsigned int n, void *addr)
8331 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
8332 {
8333 BUG_ON((unsigned)n > 0xFF);
8334 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8335 }
8336
8337 -static inline void set_system_trap_gate(unsigned int n, void *addr)
8338 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
8339 {
8340 BUG_ON((unsigned)n > 0xFF);
8341 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8342 }
8343
8344 -static inline void set_trap_gate(unsigned int n, void *addr)
8345 +static inline void set_trap_gate(unsigned int n, const void *addr)
8346 {
8347 BUG_ON((unsigned)n > 0xFF);
8348 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8349 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
8350 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8351 {
8352 BUG_ON((unsigned)n > 0xFF);
8353 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8354 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8355 }
8356
8357 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8358 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8359 {
8360 BUG_ON((unsigned)n > 0xFF);
8361 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8362 }
8363
8364 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8365 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8366 {
8367 BUG_ON((unsigned)n > 0xFF);
8368 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8369 }
8370
8371 +#ifdef CONFIG_X86_32
8372 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8373 +{
8374 + struct desc_struct d;
8375 +
8376 + if (likely(limit))
8377 + limit = (limit - 1UL) >> PAGE_SHIFT;
8378 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
8379 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8380 +}
8381 +#endif
8382 +
8383 #endif /* _ASM_X86_DESC_H */
8384 diff -urNp linux-2.6.32.43/arch/x86/include/asm/device.h linux-2.6.32.43/arch/x86/include/asm/device.h
8385 --- linux-2.6.32.43/arch/x86/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
8386 +++ linux-2.6.32.43/arch/x86/include/asm/device.h 2011-04-17 15:56:46.000000000 -0400
8387 @@ -6,7 +6,7 @@ struct dev_archdata {
8388 void *acpi_handle;
8389 #endif
8390 #ifdef CONFIG_X86_64
8391 -struct dma_map_ops *dma_ops;
8392 + const struct dma_map_ops *dma_ops;
8393 #endif
8394 #ifdef CONFIG_DMAR
8395 void *iommu; /* hook for IOMMU specific extension */
8396 diff -urNp linux-2.6.32.43/arch/x86/include/asm/dma-mapping.h linux-2.6.32.43/arch/x86/include/asm/dma-mapping.h
8397 --- linux-2.6.32.43/arch/x86/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
8398 +++ linux-2.6.32.43/arch/x86/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
8399 @@ -25,9 +25,9 @@ extern int iommu_merge;
8400 extern struct device x86_dma_fallback_dev;
8401 extern int panic_on_overflow;
8402
8403 -extern struct dma_map_ops *dma_ops;
8404 +extern const struct dma_map_ops *dma_ops;
8405
8406 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8407 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
8408 {
8409 #ifdef CONFIG_X86_32
8410 return dma_ops;
8411 @@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dm
8412 /* Make sure we keep the same behaviour */
8413 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
8414 {
8415 - struct dma_map_ops *ops = get_dma_ops(dev);
8416 + const struct dma_map_ops *ops = get_dma_ops(dev);
8417 if (ops->mapping_error)
8418 return ops->mapping_error(dev, dma_addr);
8419
8420 @@ -122,7 +122,7 @@ static inline void *
8421 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8422 gfp_t gfp)
8423 {
8424 - struct dma_map_ops *ops = get_dma_ops(dev);
8425 + const struct dma_map_ops *ops = get_dma_ops(dev);
8426 void *memory;
8427
8428 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
8429 @@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, s
8430 static inline void dma_free_coherent(struct device *dev, size_t size,
8431 void *vaddr, dma_addr_t bus)
8432 {
8433 - struct dma_map_ops *ops = get_dma_ops(dev);
8434 + const struct dma_map_ops *ops = get_dma_ops(dev);
8435
8436 WARN_ON(irqs_disabled()); /* for portability */
8437
8438 diff -urNp linux-2.6.32.43/arch/x86/include/asm/e820.h linux-2.6.32.43/arch/x86/include/asm/e820.h
8439 --- linux-2.6.32.43/arch/x86/include/asm/e820.h 2011-03-27 14:31:47.000000000 -0400
8440 +++ linux-2.6.32.43/arch/x86/include/asm/e820.h 2011-04-17 15:56:46.000000000 -0400
8441 @@ -133,7 +133,7 @@ extern char *default_machine_specific_me
8442 #define ISA_END_ADDRESS 0x100000
8443 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
8444
8445 -#define BIOS_BEGIN 0x000a0000
8446 +#define BIOS_BEGIN 0x000c0000
8447 #define BIOS_END 0x00100000
8448
8449 #ifdef __KERNEL__
8450 diff -urNp linux-2.6.32.43/arch/x86/include/asm/elf.h linux-2.6.32.43/arch/x86/include/asm/elf.h
8451 --- linux-2.6.32.43/arch/x86/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
8452 +++ linux-2.6.32.43/arch/x86/include/asm/elf.h 2011-04-17 15:56:46.000000000 -0400
8453 @@ -257,7 +257,25 @@ extern int force_personality32;
8454 the loader. We need to make sure that it is out of the way of the program
8455 that it will "exec", and that there is sufficient room for the brk. */
8456
8457 +#ifdef CONFIG_PAX_SEGMEXEC
8458 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8459 +#else
8460 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8461 +#endif
8462 +
8463 +#ifdef CONFIG_PAX_ASLR
8464 +#ifdef CONFIG_X86_32
8465 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8466 +
8467 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8468 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8469 +#else
8470 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
8471 +
8472 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8473 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8474 +#endif
8475 +#endif
8476
8477 /* This yields a mask that user programs can use to figure out what
8478 instruction set this CPU supports. This could be done in user space,
8479 @@ -311,8 +329,7 @@ do { \
8480 #define ARCH_DLINFO \
8481 do { \
8482 if (vdso_enabled) \
8483 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8484 - (unsigned long)current->mm->context.vdso); \
8485 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
8486 } while (0)
8487
8488 #define AT_SYSINFO 32
8489 @@ -323,7 +340,7 @@ do { \
8490
8491 #endif /* !CONFIG_X86_32 */
8492
8493 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8494 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8495
8496 #define VDSO_ENTRY \
8497 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8498 @@ -337,7 +354,4 @@ extern int arch_setup_additional_pages(s
8499 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8500 #define compat_arch_setup_additional_pages syscall32_setup_pages
8501
8502 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8503 -#define arch_randomize_brk arch_randomize_brk
8504 -
8505 #endif /* _ASM_X86_ELF_H */
8506 diff -urNp linux-2.6.32.43/arch/x86/include/asm/emergency-restart.h linux-2.6.32.43/arch/x86/include/asm/emergency-restart.h
8507 --- linux-2.6.32.43/arch/x86/include/asm/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
8508 +++ linux-2.6.32.43/arch/x86/include/asm/emergency-restart.h 2011-05-22 23:02:06.000000000 -0400
8509 @@ -15,6 +15,6 @@ enum reboot_type {
8510
8511 extern enum reboot_type reboot_type;
8512
8513 -extern void machine_emergency_restart(void);
8514 +extern void machine_emergency_restart(void) __noreturn;
8515
8516 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8517 diff -urNp linux-2.6.32.43/arch/x86/include/asm/futex.h linux-2.6.32.43/arch/x86/include/asm/futex.h
8518 --- linux-2.6.32.43/arch/x86/include/asm/futex.h 2011-03-27 14:31:47.000000000 -0400
8519 +++ linux-2.6.32.43/arch/x86/include/asm/futex.h 2011-04-17 15:56:46.000000000 -0400
8520 @@ -12,16 +12,18 @@
8521 #include <asm/system.h>
8522
8523 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8524 + typecheck(u32 *, uaddr); \
8525 asm volatile("1:\t" insn "\n" \
8526 "2:\t.section .fixup,\"ax\"\n" \
8527 "3:\tmov\t%3, %1\n" \
8528 "\tjmp\t2b\n" \
8529 "\t.previous\n" \
8530 _ASM_EXTABLE(1b, 3b) \
8531 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8532 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
8533 : "i" (-EFAULT), "0" (oparg), "1" (0))
8534
8535 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8536 + typecheck(u32 *, uaddr); \
8537 asm volatile("1:\tmovl %2, %0\n" \
8538 "\tmovl\t%0, %3\n" \
8539 "\t" insn "\n" \
8540 @@ -34,10 +36,10 @@
8541 _ASM_EXTABLE(1b, 4b) \
8542 _ASM_EXTABLE(2b, 4b) \
8543 : "=&a" (oldval), "=&r" (ret), \
8544 - "+m" (*uaddr), "=&r" (tem) \
8545 + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
8546 : "r" (oparg), "i" (-EFAULT), "1" (0))
8547
8548 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
8549 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8550 {
8551 int op = (encoded_op >> 28) & 7;
8552 int cmp = (encoded_op >> 24) & 15;
8553 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
8554
8555 switch (op) {
8556 case FUTEX_OP_SET:
8557 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8558 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8559 break;
8560 case FUTEX_OP_ADD:
8561 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8562 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8563 uaddr, oparg);
8564 break;
8565 case FUTEX_OP_OR:
8566 @@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
8567 return ret;
8568 }
8569
8570 -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
8571 +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
8572 int newval)
8573 {
8574
8575 @@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
8576 return -ENOSYS;
8577 #endif
8578
8579 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
8580 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8581 return -EFAULT;
8582
8583 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
8584 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
8585 "2:\t.section .fixup, \"ax\"\n"
8586 "3:\tmov %2, %0\n"
8587 "\tjmp 2b\n"
8588 "\t.previous\n"
8589 _ASM_EXTABLE(1b, 3b)
8590 - : "=a" (oldval), "+m" (*uaddr)
8591 + : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
8592 : "i" (-EFAULT), "r" (newval), "0" (oldval)
8593 : "memory"
8594 );
8595 diff -urNp linux-2.6.32.43/arch/x86/include/asm/hw_irq.h linux-2.6.32.43/arch/x86/include/asm/hw_irq.h
8596 --- linux-2.6.32.43/arch/x86/include/asm/hw_irq.h 2011-03-27 14:31:47.000000000 -0400
8597 +++ linux-2.6.32.43/arch/x86/include/asm/hw_irq.h 2011-05-04 17:56:28.000000000 -0400
8598 @@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
8599 extern void enable_IO_APIC(void);
8600
8601 /* Statistics */
8602 -extern atomic_t irq_err_count;
8603 -extern atomic_t irq_mis_count;
8604 +extern atomic_unchecked_t irq_err_count;
8605 +extern atomic_unchecked_t irq_mis_count;
8606
8607 /* EISA */
8608 extern void eisa_set_level_irq(unsigned int irq);
8609 diff -urNp linux-2.6.32.43/arch/x86/include/asm/i387.h linux-2.6.32.43/arch/x86/include/asm/i387.h
8610 --- linux-2.6.32.43/arch/x86/include/asm/i387.h 2011-03-27 14:31:47.000000000 -0400
8611 +++ linux-2.6.32.43/arch/x86/include/asm/i387.h 2011-04-17 15:56:46.000000000 -0400
8612 @@ -60,6 +60,11 @@ static inline int fxrstor_checking(struc
8613 {
8614 int err;
8615
8616 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8617 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8618 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
8619 +#endif
8620 +
8621 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
8622 "2:\n"
8623 ".section .fixup,\"ax\"\n"
8624 @@ -105,6 +110,11 @@ static inline int fxsave_user(struct i38
8625 {
8626 int err;
8627
8628 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8629 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8630 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8631 +#endif
8632 +
8633 asm volatile("1: rex64/fxsave (%[fx])\n\t"
8634 "2:\n"
8635 ".section .fixup,\"ax\"\n"
8636 @@ -195,13 +205,8 @@ static inline int fxrstor_checking(struc
8637 }
8638
8639 /* We need a safe address that is cheap to find and that is already
8640 - in L1 during context switch. The best choices are unfortunately
8641 - different for UP and SMP */
8642 -#ifdef CONFIG_SMP
8643 -#define safe_address (__per_cpu_offset[0])
8644 -#else
8645 -#define safe_address (kstat_cpu(0).cpustat.user)
8646 -#endif
8647 + in L1 during context switch. */
8648 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8649
8650 /*
8651 * These must be called with preempt disabled
8652 @@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void
8653 struct thread_info *me = current_thread_info();
8654 preempt_disable();
8655 if (me->status & TS_USEDFPU)
8656 - __save_init_fpu(me->task);
8657 + __save_init_fpu(current);
8658 else
8659 clts();
8660 }
8661 diff -urNp linux-2.6.32.43/arch/x86/include/asm/io_32.h linux-2.6.32.43/arch/x86/include/asm/io_32.h
8662 --- linux-2.6.32.43/arch/x86/include/asm/io_32.h 2011-03-27 14:31:47.000000000 -0400
8663 +++ linux-2.6.32.43/arch/x86/include/asm/io_32.h 2011-04-17 15:56:46.000000000 -0400
8664 @@ -3,6 +3,7 @@
8665
8666 #include <linux/string.h>
8667 #include <linux/compiler.h>
8668 +#include <asm/processor.h>
8669
8670 /*
8671 * This file contains the definitions for the x86 IO instructions
8672 @@ -42,6 +43,17 @@
8673
8674 #ifdef __KERNEL__
8675
8676 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8677 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8678 +{
8679 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8680 +}
8681 +
8682 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8683 +{
8684 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8685 +}
8686 +
8687 #include <asm-generic/iomap.h>
8688
8689 #include <linux/vmalloc.h>
8690 diff -urNp linux-2.6.32.43/arch/x86/include/asm/io_64.h linux-2.6.32.43/arch/x86/include/asm/io_64.h
8691 --- linux-2.6.32.43/arch/x86/include/asm/io_64.h 2011-03-27 14:31:47.000000000 -0400
8692 +++ linux-2.6.32.43/arch/x86/include/asm/io_64.h 2011-04-17 15:56:46.000000000 -0400
8693 @@ -140,6 +140,17 @@ __OUTS(l)
8694
8695 #include <linux/vmalloc.h>
8696
8697 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8698 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8699 +{
8700 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8701 +}
8702 +
8703 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8704 +{
8705 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8706 +}
8707 +
8708 #include <asm-generic/iomap.h>
8709
8710 void __memcpy_fromio(void *, unsigned long, unsigned);
8711 diff -urNp linux-2.6.32.43/arch/x86/include/asm/iommu.h linux-2.6.32.43/arch/x86/include/asm/iommu.h
8712 --- linux-2.6.32.43/arch/x86/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
8713 +++ linux-2.6.32.43/arch/x86/include/asm/iommu.h 2011-04-17 15:56:46.000000000 -0400
8714 @@ -3,7 +3,7 @@
8715
8716 extern void pci_iommu_shutdown(void);
8717 extern void no_iommu_init(void);
8718 -extern struct dma_map_ops nommu_dma_ops;
8719 +extern const struct dma_map_ops nommu_dma_ops;
8720 extern int force_iommu, no_iommu;
8721 extern int iommu_detected;
8722 extern int iommu_pass_through;
8723 diff -urNp linux-2.6.32.43/arch/x86/include/asm/irqflags.h linux-2.6.32.43/arch/x86/include/asm/irqflags.h
8724 --- linux-2.6.32.43/arch/x86/include/asm/irqflags.h 2011-03-27 14:31:47.000000000 -0400
8725 +++ linux-2.6.32.43/arch/x86/include/asm/irqflags.h 2011-04-17 15:56:46.000000000 -0400
8726 @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
8727 sti; \
8728 sysexit
8729
8730 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
8731 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8732 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
8733 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8734 +
8735 #else
8736 #define INTERRUPT_RETURN iret
8737 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8738 diff -urNp linux-2.6.32.43/arch/x86/include/asm/kprobes.h linux-2.6.32.43/arch/x86/include/asm/kprobes.h
8739 --- linux-2.6.32.43/arch/x86/include/asm/kprobes.h 2011-03-27 14:31:47.000000000 -0400
8740 +++ linux-2.6.32.43/arch/x86/include/asm/kprobes.h 2011-04-23 12:56:12.000000000 -0400
8741 @@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
8742 #define BREAKPOINT_INSTRUCTION 0xcc
8743 #define RELATIVEJUMP_INSTRUCTION 0xe9
8744 #define MAX_INSN_SIZE 16
8745 -#define MAX_STACK_SIZE 64
8746 -#define MIN_STACK_SIZE(ADDR) \
8747 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8748 - THREAD_SIZE - (unsigned long)(ADDR))) \
8749 - ? (MAX_STACK_SIZE) \
8750 - : (((unsigned long)current_thread_info()) + \
8751 - THREAD_SIZE - (unsigned long)(ADDR)))
8752 +#define MAX_STACK_SIZE 64UL
8753 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8754
8755 #define flush_insn_slot(p) do { } while (0)
8756
8757 diff -urNp linux-2.6.32.43/arch/x86/include/asm/kvm_host.h linux-2.6.32.43/arch/x86/include/asm/kvm_host.h
8758 --- linux-2.6.32.43/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:01.000000000 -0400
8759 +++ linux-2.6.32.43/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:26.000000000 -0400
8760 @@ -536,7 +536,7 @@ struct kvm_x86_ops {
8761 const struct trace_print_flags *exit_reasons_str;
8762 };
8763
8764 -extern struct kvm_x86_ops *kvm_x86_ops;
8765 +extern const struct kvm_x86_ops *kvm_x86_ops;
8766
8767 int kvm_mmu_module_init(void);
8768 void kvm_mmu_module_exit(void);
8769 diff -urNp linux-2.6.32.43/arch/x86/include/asm/local.h linux-2.6.32.43/arch/x86/include/asm/local.h
8770 --- linux-2.6.32.43/arch/x86/include/asm/local.h 2011-03-27 14:31:47.000000000 -0400
8771 +++ linux-2.6.32.43/arch/x86/include/asm/local.h 2011-04-17 15:56:46.000000000 -0400
8772 @@ -18,26 +18,58 @@ typedef struct {
8773
8774 static inline void local_inc(local_t *l)
8775 {
8776 - asm volatile(_ASM_INC "%0"
8777 + asm volatile(_ASM_INC "%0\n"
8778 +
8779 +#ifdef CONFIG_PAX_REFCOUNT
8780 + "jno 0f\n"
8781 + _ASM_DEC "%0\n"
8782 + "int $4\n0:\n"
8783 + _ASM_EXTABLE(0b, 0b)
8784 +#endif
8785 +
8786 : "+m" (l->a.counter));
8787 }
8788
8789 static inline void local_dec(local_t *l)
8790 {
8791 - asm volatile(_ASM_DEC "%0"
8792 + asm volatile(_ASM_DEC "%0\n"
8793 +
8794 +#ifdef CONFIG_PAX_REFCOUNT
8795 + "jno 0f\n"
8796 + _ASM_INC "%0\n"
8797 + "int $4\n0:\n"
8798 + _ASM_EXTABLE(0b, 0b)
8799 +#endif
8800 +
8801 : "+m" (l->a.counter));
8802 }
8803
8804 static inline void local_add(long i, local_t *l)
8805 {
8806 - asm volatile(_ASM_ADD "%1,%0"
8807 + asm volatile(_ASM_ADD "%1,%0\n"
8808 +
8809 +#ifdef CONFIG_PAX_REFCOUNT
8810 + "jno 0f\n"
8811 + _ASM_SUB "%1,%0\n"
8812 + "int $4\n0:\n"
8813 + _ASM_EXTABLE(0b, 0b)
8814 +#endif
8815 +
8816 : "+m" (l->a.counter)
8817 : "ir" (i));
8818 }
8819
8820 static inline void local_sub(long i, local_t *l)
8821 {
8822 - asm volatile(_ASM_SUB "%1,%0"
8823 + asm volatile(_ASM_SUB "%1,%0\n"
8824 +
8825 +#ifdef CONFIG_PAX_REFCOUNT
8826 + "jno 0f\n"
8827 + _ASM_ADD "%1,%0\n"
8828 + "int $4\n0:\n"
8829 + _ASM_EXTABLE(0b, 0b)
8830 +#endif
8831 +
8832 : "+m" (l->a.counter)
8833 : "ir" (i));
8834 }
8835 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
8836 {
8837 unsigned char c;
8838
8839 - asm volatile(_ASM_SUB "%2,%0; sete %1"
8840 + asm volatile(_ASM_SUB "%2,%0\n"
8841 +
8842 +#ifdef CONFIG_PAX_REFCOUNT
8843 + "jno 0f\n"
8844 + _ASM_ADD "%2,%0\n"
8845 + "int $4\n0:\n"
8846 + _ASM_EXTABLE(0b, 0b)
8847 +#endif
8848 +
8849 + "sete %1\n"
8850 : "+m" (l->a.counter), "=qm" (c)
8851 : "ir" (i) : "memory");
8852 return c;
8853 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
8854 {
8855 unsigned char c;
8856
8857 - asm volatile(_ASM_DEC "%0; sete %1"
8858 + asm volatile(_ASM_DEC "%0\n"
8859 +
8860 +#ifdef CONFIG_PAX_REFCOUNT
8861 + "jno 0f\n"
8862 + _ASM_INC "%0\n"
8863 + "int $4\n0:\n"
8864 + _ASM_EXTABLE(0b, 0b)
8865 +#endif
8866 +
8867 + "sete %1\n"
8868 : "+m" (l->a.counter), "=qm" (c)
8869 : : "memory");
8870 return c != 0;
8871 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
8872 {
8873 unsigned char c;
8874
8875 - asm volatile(_ASM_INC "%0; sete %1"
8876 + asm volatile(_ASM_INC "%0\n"
8877 +
8878 +#ifdef CONFIG_PAX_REFCOUNT
8879 + "jno 0f\n"
8880 + _ASM_DEC "%0\n"
8881 + "int $4\n0:\n"
8882 + _ASM_EXTABLE(0b, 0b)
8883 +#endif
8884 +
8885 + "sete %1\n"
8886 : "+m" (l->a.counter), "=qm" (c)
8887 : : "memory");
8888 return c != 0;
8889 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
8890 {
8891 unsigned char c;
8892
8893 - asm volatile(_ASM_ADD "%2,%0; sets %1"
8894 + asm volatile(_ASM_ADD "%2,%0\n"
8895 +
8896 +#ifdef CONFIG_PAX_REFCOUNT
8897 + "jno 0f\n"
8898 + _ASM_SUB "%2,%0\n"
8899 + "int $4\n0:\n"
8900 + _ASM_EXTABLE(0b, 0b)
8901 +#endif
8902 +
8903 + "sets %1\n"
8904 : "+m" (l->a.counter), "=qm" (c)
8905 : "ir" (i) : "memory");
8906 return c;
8907 @@ -133,7 +201,15 @@ static inline long local_add_return(long
8908 #endif
8909 /* Modern 486+ processor */
8910 __i = i;
8911 - asm volatile(_ASM_XADD "%0, %1;"
8912 + asm volatile(_ASM_XADD "%0, %1\n"
8913 +
8914 +#ifdef CONFIG_PAX_REFCOUNT
8915 + "jno 0f\n"
8916 + _ASM_MOV "%0,%1\n"
8917 + "int $4\n0:\n"
8918 + _ASM_EXTABLE(0b, 0b)
8919 +#endif
8920 +
8921 : "+r" (i), "+m" (l->a.counter)
8922 : : "memory");
8923 return i + __i;
8924 diff -urNp linux-2.6.32.43/arch/x86/include/asm/microcode.h linux-2.6.32.43/arch/x86/include/asm/microcode.h
8925 --- linux-2.6.32.43/arch/x86/include/asm/microcode.h 2011-03-27 14:31:47.000000000 -0400
8926 +++ linux-2.6.32.43/arch/x86/include/asm/microcode.h 2011-04-17 15:56:46.000000000 -0400
8927 @@ -12,13 +12,13 @@ struct device;
8928 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
8929
8930 struct microcode_ops {
8931 - enum ucode_state (*request_microcode_user) (int cpu,
8932 + enum ucode_state (* const request_microcode_user) (int cpu,
8933 const void __user *buf, size_t size);
8934
8935 - enum ucode_state (*request_microcode_fw) (int cpu,
8936 + enum ucode_state (* const request_microcode_fw) (int cpu,
8937 struct device *device);
8938
8939 - void (*microcode_fini_cpu) (int cpu);
8940 + void (* const microcode_fini_cpu) (int cpu);
8941
8942 /*
8943 * The generic 'microcode_core' part guarantees that
8944 @@ -38,18 +38,18 @@ struct ucode_cpu_info {
8945 extern struct ucode_cpu_info ucode_cpu_info[];
8946
8947 #ifdef CONFIG_MICROCODE_INTEL
8948 -extern struct microcode_ops * __init init_intel_microcode(void);
8949 +extern const struct microcode_ops * __init init_intel_microcode(void);
8950 #else
8951 -static inline struct microcode_ops * __init init_intel_microcode(void)
8952 +static inline const struct microcode_ops * __init init_intel_microcode(void)
8953 {
8954 return NULL;
8955 }
8956 #endif /* CONFIG_MICROCODE_INTEL */
8957
8958 #ifdef CONFIG_MICROCODE_AMD
8959 -extern struct microcode_ops * __init init_amd_microcode(void);
8960 +extern const struct microcode_ops * __init init_amd_microcode(void);
8961 #else
8962 -static inline struct microcode_ops * __init init_amd_microcode(void)
8963 +static inline const struct microcode_ops * __init init_amd_microcode(void)
8964 {
8965 return NULL;
8966 }
8967 diff -urNp linux-2.6.32.43/arch/x86/include/asm/mman.h linux-2.6.32.43/arch/x86/include/asm/mman.h
8968 --- linux-2.6.32.43/arch/x86/include/asm/mman.h 2011-03-27 14:31:47.000000000 -0400
8969 +++ linux-2.6.32.43/arch/x86/include/asm/mman.h 2011-04-17 15:56:46.000000000 -0400
8970 @@ -5,4 +5,14 @@
8971
8972 #include <asm-generic/mman.h>
8973
8974 +#ifdef __KERNEL__
8975 +#ifndef __ASSEMBLY__
8976 +#ifdef CONFIG_X86_32
8977 +#define arch_mmap_check i386_mmap_check
8978 +int i386_mmap_check(unsigned long addr, unsigned long len,
8979 + unsigned long flags);
8980 +#endif
8981 +#endif
8982 +#endif
8983 +
8984 #endif /* _ASM_X86_MMAN_H */
8985 diff -urNp linux-2.6.32.43/arch/x86/include/asm/mmu_context.h linux-2.6.32.43/arch/x86/include/asm/mmu_context.h
8986 --- linux-2.6.32.43/arch/x86/include/asm/mmu_context.h 2011-03-27 14:31:47.000000000 -0400
8987 +++ linux-2.6.32.43/arch/x86/include/asm/mmu_context.h 2011-04-17 15:56:46.000000000 -0400
8988 @@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
8989
8990 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8991 {
8992 +
8993 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8994 + unsigned int i;
8995 + pgd_t *pgd;
8996 +
8997 + pax_open_kernel();
8998 + pgd = get_cpu_pgd(smp_processor_id());
8999 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
9000 + if (paravirt_enabled())
9001 + set_pgd(pgd+i, native_make_pgd(0));
9002 + else
9003 + pgd[i] = native_make_pgd(0);
9004 + pax_close_kernel();
9005 +#endif
9006 +
9007 #ifdef CONFIG_SMP
9008 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
9009 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
9010 @@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
9011 struct task_struct *tsk)
9012 {
9013 unsigned cpu = smp_processor_id();
9014 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
9015 + int tlbstate = TLBSTATE_OK;
9016 +#endif
9017
9018 if (likely(prev != next)) {
9019 #ifdef CONFIG_SMP
9020 +#ifdef CONFIG_X86_32
9021 + tlbstate = percpu_read(cpu_tlbstate.state);
9022 +#endif
9023 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9024 percpu_write(cpu_tlbstate.active_mm, next);
9025 #endif
9026 cpumask_set_cpu(cpu, mm_cpumask(next));
9027
9028 /* Re-load page tables */
9029 +#ifdef CONFIG_PAX_PER_CPU_PGD
9030 + pax_open_kernel();
9031 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9032 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9033 + pax_close_kernel();
9034 + load_cr3(get_cpu_pgd(cpu));
9035 +#else
9036 load_cr3(next->pgd);
9037 +#endif
9038
9039 /* stop flush ipis for the previous mm */
9040 cpumask_clear_cpu(cpu, mm_cpumask(prev));
9041 @@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
9042 */
9043 if (unlikely(prev->context.ldt != next->context.ldt))
9044 load_LDT_nolock(&next->context);
9045 - }
9046 +
9047 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9048 + if (!nx_enabled) {
9049 + smp_mb__before_clear_bit();
9050 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
9051 + smp_mb__after_clear_bit();
9052 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9053 + }
9054 +#endif
9055 +
9056 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9057 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
9058 + prev->context.user_cs_limit != next->context.user_cs_limit))
9059 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9060 #ifdef CONFIG_SMP
9061 + else if (unlikely(tlbstate != TLBSTATE_OK))
9062 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9063 +#endif
9064 +#endif
9065 +
9066 + }
9067 else {
9068 +
9069 +#ifdef CONFIG_PAX_PER_CPU_PGD
9070 + pax_open_kernel();
9071 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9072 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9073 + pax_close_kernel();
9074 + load_cr3(get_cpu_pgd(cpu));
9075 +#endif
9076 +
9077 +#ifdef CONFIG_SMP
9078 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9079 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9080
9081 @@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
9082 * tlb flush IPI delivery. We must reload CR3
9083 * to make sure to use no freed page tables.
9084 */
9085 +
9086 +#ifndef CONFIG_PAX_PER_CPU_PGD
9087 load_cr3(next->pgd);
9088 +#endif
9089 +
9090 load_LDT_nolock(&next->context);
9091 +
9092 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9093 + if (!nx_enabled)
9094 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9095 +#endif
9096 +
9097 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9098 +#ifdef CONFIG_PAX_PAGEEXEC
9099 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
9100 +#endif
9101 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9102 +#endif
9103 +
9104 }
9105 - }
9106 #endif
9107 + }
9108 }
9109
9110 #define activate_mm(prev, next) \
9111 diff -urNp linux-2.6.32.43/arch/x86/include/asm/mmu.h linux-2.6.32.43/arch/x86/include/asm/mmu.h
9112 --- linux-2.6.32.43/arch/x86/include/asm/mmu.h 2011-03-27 14:31:47.000000000 -0400
9113 +++ linux-2.6.32.43/arch/x86/include/asm/mmu.h 2011-04-17 15:56:46.000000000 -0400
9114 @@ -9,10 +9,23 @@
9115 * we put the segment information here.
9116 */
9117 typedef struct {
9118 - void *ldt;
9119 + struct desc_struct *ldt;
9120 int size;
9121 struct mutex lock;
9122 - void *vdso;
9123 + unsigned long vdso;
9124 +
9125 +#ifdef CONFIG_X86_32
9126 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
9127 + unsigned long user_cs_base;
9128 + unsigned long user_cs_limit;
9129 +
9130 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9131 + cpumask_t cpu_user_cs_mask;
9132 +#endif
9133 +
9134 +#endif
9135 +#endif
9136 +
9137 } mm_context_t;
9138
9139 #ifdef CONFIG_SMP
9140 diff -urNp linux-2.6.32.43/arch/x86/include/asm/module.h linux-2.6.32.43/arch/x86/include/asm/module.h
9141 --- linux-2.6.32.43/arch/x86/include/asm/module.h 2011-03-27 14:31:47.000000000 -0400
9142 +++ linux-2.6.32.43/arch/x86/include/asm/module.h 2011-04-23 13:18:57.000000000 -0400
9143 @@ -5,6 +5,7 @@
9144
9145 #ifdef CONFIG_X86_64
9146 /* X86_64 does not define MODULE_PROC_FAMILY */
9147 +#define MODULE_PROC_FAMILY ""
9148 #elif defined CONFIG_M386
9149 #define MODULE_PROC_FAMILY "386 "
9150 #elif defined CONFIG_M486
9151 @@ -59,13 +60,36 @@
9152 #error unknown processor family
9153 #endif
9154
9155 -#ifdef CONFIG_X86_32
9156 -# ifdef CONFIG_4KSTACKS
9157 -# define MODULE_STACKSIZE "4KSTACKS "
9158 -# else
9159 -# define MODULE_STACKSIZE ""
9160 -# endif
9161 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
9162 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9163 +#define MODULE_PAX_UDEREF "UDEREF "
9164 +#else
9165 +#define MODULE_PAX_UDEREF ""
9166 +#endif
9167 +
9168 +#ifdef CONFIG_PAX_KERNEXEC
9169 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
9170 +#else
9171 +#define MODULE_PAX_KERNEXEC ""
9172 +#endif
9173 +
9174 +#ifdef CONFIG_PAX_REFCOUNT
9175 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
9176 +#else
9177 +#define MODULE_PAX_REFCOUNT ""
9178 #endif
9179
9180 +#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
9181 +#define MODULE_STACKSIZE "4KSTACKS "
9182 +#else
9183 +#define MODULE_STACKSIZE ""
9184 +#endif
9185 +
9186 +#ifdef CONFIG_GRKERNSEC
9187 +#define MODULE_GRSEC "GRSECURITY "
9188 +#else
9189 +#define MODULE_GRSEC ""
9190 +#endif
9191 +
9192 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
9193 +
9194 #endif /* _ASM_X86_MODULE_H */
9195 diff -urNp linux-2.6.32.43/arch/x86/include/asm/page_64_types.h linux-2.6.32.43/arch/x86/include/asm/page_64_types.h
9196 --- linux-2.6.32.43/arch/x86/include/asm/page_64_types.h 2011-03-27 14:31:47.000000000 -0400
9197 +++ linux-2.6.32.43/arch/x86/include/asm/page_64_types.h 2011-04-17 15:56:46.000000000 -0400
9198 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9199
9200 /* duplicated to the one in bootmem.h */
9201 extern unsigned long max_pfn;
9202 -extern unsigned long phys_base;
9203 +extern const unsigned long phys_base;
9204
9205 extern unsigned long __phys_addr(unsigned long);
9206 #define __phys_reloc_hide(x) (x)
9207 diff -urNp linux-2.6.32.43/arch/x86/include/asm/paravirt.h linux-2.6.32.43/arch/x86/include/asm/paravirt.h
9208 --- linux-2.6.32.43/arch/x86/include/asm/paravirt.h 2011-03-27 14:31:47.000000000 -0400
9209 +++ linux-2.6.32.43/arch/x86/include/asm/paravirt.h 2011-04-17 15:56:46.000000000 -0400
9210 @@ -729,6 +729,21 @@ static inline void __set_fixmap(unsigned
9211 pv_mmu_ops.set_fixmap(idx, phys, flags);
9212 }
9213
9214 +#ifdef CONFIG_PAX_KERNEXEC
9215 +static inline unsigned long pax_open_kernel(void)
9216 +{
9217 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9218 +}
9219 +
9220 +static inline unsigned long pax_close_kernel(void)
9221 +{
9222 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9223 +}
9224 +#else
9225 +static inline unsigned long pax_open_kernel(void) { return 0; }
9226 +static inline unsigned long pax_close_kernel(void) { return 0; }
9227 +#endif
9228 +
9229 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9230
9231 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
9232 @@ -945,7 +960,7 @@ extern void default_banner(void);
9233
9234 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9235 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9236 -#define PARA_INDIRECT(addr) *%cs:addr
9237 +#define PARA_INDIRECT(addr) *%ss:addr
9238 #endif
9239
9240 #define INTERRUPT_RETURN \
9241 @@ -1022,6 +1037,21 @@ extern void default_banner(void);
9242 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9243 CLBR_NONE, \
9244 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9245 +
9246 +#define GET_CR0_INTO_RDI \
9247 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9248 + mov %rax,%rdi
9249 +
9250 +#define SET_RDI_INTO_CR0 \
9251 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9252 +
9253 +#define GET_CR3_INTO_RDI \
9254 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9255 + mov %rax,%rdi
9256 +
9257 +#define SET_RDI_INTO_CR3 \
9258 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9259 +
9260 #endif /* CONFIG_X86_32 */
9261
9262 #endif /* __ASSEMBLY__ */
9263 diff -urNp linux-2.6.32.43/arch/x86/include/asm/paravirt_types.h linux-2.6.32.43/arch/x86/include/asm/paravirt_types.h
9264 --- linux-2.6.32.43/arch/x86/include/asm/paravirt_types.h 2011-03-27 14:31:47.000000000 -0400
9265 +++ linux-2.6.32.43/arch/x86/include/asm/paravirt_types.h 2011-04-17 15:56:46.000000000 -0400
9266 @@ -316,6 +316,12 @@ struct pv_mmu_ops {
9267 an mfn. We can tell which is which from the index. */
9268 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9269 phys_addr_t phys, pgprot_t flags);
9270 +
9271 +#ifdef CONFIG_PAX_KERNEXEC
9272 + unsigned long (*pax_open_kernel)(void);
9273 + unsigned long (*pax_close_kernel)(void);
9274 +#endif
9275 +
9276 };
9277
9278 struct raw_spinlock;
9279 diff -urNp linux-2.6.32.43/arch/x86/include/asm/pci_x86.h linux-2.6.32.43/arch/x86/include/asm/pci_x86.h
9280 --- linux-2.6.32.43/arch/x86/include/asm/pci_x86.h 2011-03-27 14:31:47.000000000 -0400
9281 +++ linux-2.6.32.43/arch/x86/include/asm/pci_x86.h 2011-04-17 15:56:46.000000000 -0400
9282 @@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct
9283 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
9284
9285 struct pci_raw_ops {
9286 - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9287 + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9288 int reg, int len, u32 *val);
9289 - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9290 + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9291 int reg, int len, u32 val);
9292 };
9293
9294 -extern struct pci_raw_ops *raw_pci_ops;
9295 -extern struct pci_raw_ops *raw_pci_ext_ops;
9296 +extern const struct pci_raw_ops *raw_pci_ops;
9297 +extern const struct pci_raw_ops *raw_pci_ext_ops;
9298
9299 -extern struct pci_raw_ops pci_direct_conf1;
9300 +extern const struct pci_raw_ops pci_direct_conf1;
9301 extern bool port_cf9_safe;
9302
9303 /* arch_initcall level */
9304 diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgalloc.h linux-2.6.32.43/arch/x86/include/asm/pgalloc.h
9305 --- linux-2.6.32.43/arch/x86/include/asm/pgalloc.h 2011-03-27 14:31:47.000000000 -0400
9306 +++ linux-2.6.32.43/arch/x86/include/asm/pgalloc.h 2011-04-17 15:56:46.000000000 -0400
9307 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
9308 pmd_t *pmd, pte_t *pte)
9309 {
9310 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9311 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9312 +}
9313 +
9314 +static inline void pmd_populate_user(struct mm_struct *mm,
9315 + pmd_t *pmd, pte_t *pte)
9316 +{
9317 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9318 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9319 }
9320
9321 diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgtable-2level.h linux-2.6.32.43/arch/x86/include/asm/pgtable-2level.h
9322 --- linux-2.6.32.43/arch/x86/include/asm/pgtable-2level.h 2011-03-27 14:31:47.000000000 -0400
9323 +++ linux-2.6.32.43/arch/x86/include/asm/pgtable-2level.h 2011-04-17 15:56:46.000000000 -0400
9324 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
9325
9326 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9327 {
9328 + pax_open_kernel();
9329 *pmdp = pmd;
9330 + pax_close_kernel();
9331 }
9332
9333 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9334 diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgtable_32.h linux-2.6.32.43/arch/x86/include/asm/pgtable_32.h
9335 --- linux-2.6.32.43/arch/x86/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
9336 +++ linux-2.6.32.43/arch/x86/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
9337 @@ -26,9 +26,6 @@
9338 struct mm_struct;
9339 struct vm_area_struct;
9340
9341 -extern pgd_t swapper_pg_dir[1024];
9342 -extern pgd_t trampoline_pg_dir[1024];
9343 -
9344 static inline void pgtable_cache_init(void) { }
9345 static inline void check_pgt_cache(void) { }
9346 void paging_init(void);
9347 @@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, u
9348 # include <asm/pgtable-2level.h>
9349 #endif
9350
9351 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9352 +extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
9353 +#ifdef CONFIG_X86_PAE
9354 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9355 +#endif
9356 +
9357 #if defined(CONFIG_HIGHPTE)
9358 #define __KM_PTE \
9359 (in_nmi() ? KM_NMI_PTE : \
9360 @@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, u
9361 /* Clear a kernel PTE and flush it from the TLB */
9362 #define kpte_clear_flush(ptep, vaddr) \
9363 do { \
9364 + pax_open_kernel(); \
9365 pte_clear(&init_mm, (vaddr), (ptep)); \
9366 + pax_close_kernel(); \
9367 __flush_tlb_one((vaddr)); \
9368 } while (0)
9369
9370 @@ -85,6 +90,9 @@ do { \
9371
9372 #endif /* !__ASSEMBLY__ */
9373
9374 +#define HAVE_ARCH_UNMAPPED_AREA
9375 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9376 +
9377 /*
9378 * kern_addr_valid() is (1) for FLATMEM and (0) for
9379 * SPARSEMEM and DISCONTIGMEM
9380 diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgtable_32_types.h linux-2.6.32.43/arch/x86/include/asm/pgtable_32_types.h
9381 --- linux-2.6.32.43/arch/x86/include/asm/pgtable_32_types.h 2011-03-27 14:31:47.000000000 -0400
9382 +++ linux-2.6.32.43/arch/x86/include/asm/pgtable_32_types.h 2011-04-17 15:56:46.000000000 -0400
9383 @@ -8,7 +8,7 @@
9384 */
9385 #ifdef CONFIG_X86_PAE
9386 # include <asm/pgtable-3level_types.h>
9387 -# define PMD_SIZE (1UL << PMD_SHIFT)
9388 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9389 # define PMD_MASK (~(PMD_SIZE - 1))
9390 #else
9391 # include <asm/pgtable-2level_types.h>
9392 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
9393 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9394 #endif
9395
9396 +#ifdef CONFIG_PAX_KERNEXEC
9397 +#ifndef __ASSEMBLY__
9398 +extern unsigned char MODULES_EXEC_VADDR[];
9399 +extern unsigned char MODULES_EXEC_END[];
9400 +#endif
9401 +#include <asm/boot.h>
9402 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9403 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9404 +#else
9405 +#define ktla_ktva(addr) (addr)
9406 +#define ktva_ktla(addr) (addr)
9407 +#endif
9408 +
9409 #define MODULES_VADDR VMALLOC_START
9410 #define MODULES_END VMALLOC_END
9411 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9412 diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgtable-3level.h linux-2.6.32.43/arch/x86/include/asm/pgtable-3level.h
9413 --- linux-2.6.32.43/arch/x86/include/asm/pgtable-3level.h 2011-03-27 14:31:47.000000000 -0400
9414 +++ linux-2.6.32.43/arch/x86/include/asm/pgtable-3level.h 2011-04-17 15:56:46.000000000 -0400
9415 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
9416
9417 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9418 {
9419 + pax_open_kernel();
9420 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9421 + pax_close_kernel();
9422 }
9423
9424 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9425 {
9426 + pax_open_kernel();
9427 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9428 + pax_close_kernel();
9429 }
9430
9431 /*
9432 diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgtable_64.h linux-2.6.32.43/arch/x86/include/asm/pgtable_64.h
9433 --- linux-2.6.32.43/arch/x86/include/asm/pgtable_64.h 2011-03-27 14:31:47.000000000 -0400
9434 +++ linux-2.6.32.43/arch/x86/include/asm/pgtable_64.h 2011-04-17 15:56:46.000000000 -0400
9435 @@ -16,10 +16,13 @@
9436
9437 extern pud_t level3_kernel_pgt[512];
9438 extern pud_t level3_ident_pgt[512];
9439 +extern pud_t level3_vmalloc_pgt[512];
9440 +extern pud_t level3_vmemmap_pgt[512];
9441 +extern pud_t level2_vmemmap_pgt[512];
9442 extern pmd_t level2_kernel_pgt[512];
9443 extern pmd_t level2_fixmap_pgt[512];
9444 -extern pmd_t level2_ident_pgt[512];
9445 -extern pgd_t init_level4_pgt[];
9446 +extern pmd_t level2_ident_pgt[512*2];
9447 +extern pgd_t init_level4_pgt[512];
9448
9449 #define swapper_pg_dir init_level4_pgt
9450
9451 @@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
9452
9453 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9454 {
9455 + pax_open_kernel();
9456 *pmdp = pmd;
9457 + pax_close_kernel();
9458 }
9459
9460 static inline void native_pmd_clear(pmd_t *pmd)
9461 @@ -94,7 +99,9 @@ static inline void native_pud_clear(pud_
9462
9463 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9464 {
9465 + pax_open_kernel();
9466 *pgdp = pgd;
9467 + pax_close_kernel();
9468 }
9469
9470 static inline void native_pgd_clear(pgd_t *pgd)
9471 diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgtable_64_types.h linux-2.6.32.43/arch/x86/include/asm/pgtable_64_types.h
9472 --- linux-2.6.32.43/arch/x86/include/asm/pgtable_64_types.h 2011-03-27 14:31:47.000000000 -0400
9473 +++ linux-2.6.32.43/arch/x86/include/asm/pgtable_64_types.h 2011-04-17 15:56:46.000000000 -0400
9474 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9475 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9476 #define MODULES_END _AC(0xffffffffff000000, UL)
9477 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9478 +#define MODULES_EXEC_VADDR MODULES_VADDR
9479 +#define MODULES_EXEC_END MODULES_END
9480 +
9481 +#define ktla_ktva(addr) (addr)
9482 +#define ktva_ktla(addr) (addr)
9483
9484 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9485 diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgtable.h linux-2.6.32.43/arch/x86/include/asm/pgtable.h
9486 --- linux-2.6.32.43/arch/x86/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
9487 +++ linux-2.6.32.43/arch/x86/include/asm/pgtable.h 2011-04-17 15:56:46.000000000 -0400
9488 @@ -74,12 +74,51 @@ extern struct list_head pgd_list;
9489
9490 #define arch_end_context_switch(prev) do {} while(0)
9491
9492 +#define pax_open_kernel() native_pax_open_kernel()
9493 +#define pax_close_kernel() native_pax_close_kernel()
9494 #endif /* CONFIG_PARAVIRT */
9495
9496 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
9497 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9498 +
9499 +#ifdef CONFIG_PAX_KERNEXEC
9500 +static inline unsigned long native_pax_open_kernel(void)
9501 +{
9502 + unsigned long cr0;
9503 +
9504 + preempt_disable();
9505 + barrier();
9506 + cr0 = read_cr0() ^ X86_CR0_WP;
9507 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
9508 + write_cr0(cr0);
9509 + return cr0 ^ X86_CR0_WP;
9510 +}
9511 +
9512 +static inline unsigned long native_pax_close_kernel(void)
9513 +{
9514 + unsigned long cr0;
9515 +
9516 + cr0 = read_cr0() ^ X86_CR0_WP;
9517 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9518 + write_cr0(cr0);
9519 + barrier();
9520 + preempt_enable_no_resched();
9521 + return cr0 ^ X86_CR0_WP;
9522 +}
9523 +#else
9524 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
9525 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
9526 +#endif
9527 +
9528 /*
9529 * The following only work if pte_present() is true.
9530 * Undefined behaviour if not..
9531 */
9532 +static inline int pte_user(pte_t pte)
9533 +{
9534 + return pte_val(pte) & _PAGE_USER;
9535 +}
9536 +
9537 static inline int pte_dirty(pte_t pte)
9538 {
9539 return pte_flags(pte) & _PAGE_DIRTY;
9540 @@ -167,9 +206,29 @@ static inline pte_t pte_wrprotect(pte_t
9541 return pte_clear_flags(pte, _PAGE_RW);
9542 }
9543
9544 +static inline pte_t pte_mkread(pte_t pte)
9545 +{
9546 + return __pte(pte_val(pte) | _PAGE_USER);
9547 +}
9548 +
9549 static inline pte_t pte_mkexec(pte_t pte)
9550 {
9551 - return pte_clear_flags(pte, _PAGE_NX);
9552 +#ifdef CONFIG_X86_PAE
9553 + if (__supported_pte_mask & _PAGE_NX)
9554 + return pte_clear_flags(pte, _PAGE_NX);
9555 + else
9556 +#endif
9557 + return pte_set_flags(pte, _PAGE_USER);
9558 +}
9559 +
9560 +static inline pte_t pte_exprotect(pte_t pte)
9561 +{
9562 +#ifdef CONFIG_X86_PAE
9563 + if (__supported_pte_mask & _PAGE_NX)
9564 + return pte_set_flags(pte, _PAGE_NX);
9565 + else
9566 +#endif
9567 + return pte_clear_flags(pte, _PAGE_USER);
9568 }
9569
9570 static inline pte_t pte_mkdirty(pte_t pte)
9571 @@ -302,6 +361,15 @@ pte_t *populate_extra_pte(unsigned long
9572 #endif
9573
9574 #ifndef __ASSEMBLY__
9575 +
9576 +#ifdef CONFIG_PAX_PER_CPU_PGD
9577 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9578 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9579 +{
9580 + return cpu_pgd[cpu];
9581 +}
9582 +#endif
9583 +
9584 #include <linux/mm_types.h>
9585
9586 static inline int pte_none(pte_t pte)
9587 @@ -472,7 +540,7 @@ static inline pud_t *pud_offset(pgd_t *p
9588
9589 static inline int pgd_bad(pgd_t pgd)
9590 {
9591 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9592 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9593 }
9594
9595 static inline int pgd_none(pgd_t pgd)
9596 @@ -495,7 +563,12 @@ static inline int pgd_none(pgd_t pgd)
9597 * pgd_offset() returns a (pgd_t *)
9598 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9599 */
9600 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9601 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9602 +
9603 +#ifdef CONFIG_PAX_PER_CPU_PGD
9604 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9605 +#endif
9606 +
9607 /*
9608 * a shortcut which implies the use of the kernel's pgd, instead
9609 * of a process's
9610 @@ -506,6 +579,20 @@ static inline int pgd_none(pgd_t pgd)
9611 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9612 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9613
9614 +#ifdef CONFIG_X86_32
9615 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9616 +#else
9617 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9618 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9619 +
9620 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9621 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9622 +#else
9623 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9624 +#endif
9625 +
9626 +#endif
9627 +
9628 #ifndef __ASSEMBLY__
9629
9630 extern int direct_gbpages;
9631 @@ -611,11 +698,23 @@ static inline void ptep_set_wrprotect(st
9632 * dst and src can be on the same page, but the range must not overlap,
9633 * and must not cross a page boundary.
9634 */
9635 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9636 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9637 {
9638 - memcpy(dst, src, count * sizeof(pgd_t));
9639 + pax_open_kernel();
9640 + while (count--)
9641 + *dst++ = *src++;
9642 + pax_close_kernel();
9643 }
9644
9645 +#ifdef CONFIG_PAX_PER_CPU_PGD
9646 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9647 +#endif
9648 +
9649 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9650 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9651 +#else
9652 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9653 +#endif
9654
9655 #include <asm-generic/pgtable.h>
9656 #endif /* __ASSEMBLY__ */
9657 diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgtable_types.h linux-2.6.32.43/arch/x86/include/asm/pgtable_types.h
9658 --- linux-2.6.32.43/arch/x86/include/asm/pgtable_types.h 2011-03-27 14:31:47.000000000 -0400
9659 +++ linux-2.6.32.43/arch/x86/include/asm/pgtable_types.h 2011-04-17 15:56:46.000000000 -0400
9660 @@ -16,12 +16,11 @@
9661 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9662 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9663 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9664 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9665 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9666 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9667 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9668 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9669 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9670 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9671 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9672 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9673
9674 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9675 @@ -39,7 +38,6 @@
9676 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9677 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9678 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9679 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9680 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9681 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9682 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9683 @@ -55,8 +53,10 @@
9684
9685 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9686 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9687 -#else
9688 +#elif defined(CONFIG_KMEMCHECK)
9689 #define _PAGE_NX (_AT(pteval_t, 0))
9690 +#else
9691 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9692 #endif
9693
9694 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9695 @@ -93,6 +93,9 @@
9696 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9697 _PAGE_ACCESSED)
9698
9699 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
9700 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
9701 +
9702 #define __PAGE_KERNEL_EXEC \
9703 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9704 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9705 @@ -103,8 +106,8 @@
9706 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9707 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9708 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9709 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9710 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
9711 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9712 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
9713 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9714 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
9715 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
9716 @@ -163,8 +166,8 @@
9717 * bits are combined, this will alow user to access the high address mapped
9718 * VDSO in the presence of CONFIG_COMPAT_VDSO
9719 */
9720 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9721 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9722 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9723 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9724 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9725 #endif
9726
9727 @@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
9728 {
9729 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9730 }
9731 +#endif
9732
9733 +#if PAGETABLE_LEVELS == 3
9734 +#include <asm-generic/pgtable-nopud.h>
9735 +#endif
9736 +
9737 +#if PAGETABLE_LEVELS == 2
9738 +#include <asm-generic/pgtable-nopmd.h>
9739 +#endif
9740 +
9741 +#ifndef __ASSEMBLY__
9742 #if PAGETABLE_LEVELS > 3
9743 typedef struct { pudval_t pud; } pud_t;
9744
9745 @@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
9746 return pud.pud;
9747 }
9748 #else
9749 -#include <asm-generic/pgtable-nopud.h>
9750 -
9751 static inline pudval_t native_pud_val(pud_t pud)
9752 {
9753 return native_pgd_val(pud.pgd);
9754 @@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
9755 return pmd.pmd;
9756 }
9757 #else
9758 -#include <asm-generic/pgtable-nopmd.h>
9759 -
9760 static inline pmdval_t native_pmd_val(pmd_t pmd)
9761 {
9762 return native_pgd_val(pmd.pud.pgd);
9763 @@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
9764
9765 extern pteval_t __supported_pte_mask;
9766 extern void set_nx(void);
9767 +
9768 +#ifdef CONFIG_X86_32
9769 +#ifdef CONFIG_X86_PAE
9770 extern int nx_enabled;
9771 +#else
9772 +#define nx_enabled (0)
9773 +#endif
9774 +#else
9775 +#define nx_enabled (1)
9776 +#endif
9777
9778 #define pgprot_writecombine pgprot_writecombine
9779 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9780 diff -urNp linux-2.6.32.43/arch/x86/include/asm/processor.h linux-2.6.32.43/arch/x86/include/asm/processor.h
9781 --- linux-2.6.32.43/arch/x86/include/asm/processor.h 2011-04-22 19:16:29.000000000 -0400
9782 +++ linux-2.6.32.43/arch/x86/include/asm/processor.h 2011-05-11 18:25:15.000000000 -0400
9783 @@ -272,7 +272,7 @@ struct tss_struct {
9784
9785 } ____cacheline_aligned;
9786
9787 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9788 +extern struct tss_struct init_tss[NR_CPUS];
9789
9790 /*
9791 * Save the original ist values for checking stack pointers during debugging
9792 @@ -888,11 +888,18 @@ static inline void spin_lock_prefetch(co
9793 */
9794 #define TASK_SIZE PAGE_OFFSET
9795 #define TASK_SIZE_MAX TASK_SIZE
9796 +
9797 +#ifdef CONFIG_PAX_SEGMEXEC
9798 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9799 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9800 +#else
9801 #define STACK_TOP TASK_SIZE
9802 -#define STACK_TOP_MAX STACK_TOP
9803 +#endif
9804 +
9805 +#define STACK_TOP_MAX TASK_SIZE
9806
9807 #define INIT_THREAD { \
9808 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9809 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9810 .vm86_info = NULL, \
9811 .sysenter_cs = __KERNEL_CS, \
9812 .io_bitmap_ptr = NULL, \
9813 @@ -906,7 +913,7 @@ static inline void spin_lock_prefetch(co
9814 */
9815 #define INIT_TSS { \
9816 .x86_tss = { \
9817 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9818 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9819 .ss0 = __KERNEL_DS, \
9820 .ss1 = __KERNEL_CS, \
9821 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9822 @@ -917,11 +924,7 @@ static inline void spin_lock_prefetch(co
9823 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9824
9825 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9826 -#define KSTK_TOP(info) \
9827 -({ \
9828 - unsigned long *__ptr = (unsigned long *)(info); \
9829 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9830 -})
9831 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9832
9833 /*
9834 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9835 @@ -936,7 +939,7 @@ extern unsigned long thread_saved_pc(str
9836 #define task_pt_regs(task) \
9837 ({ \
9838 struct pt_regs *__regs__; \
9839 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9840 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9841 __regs__ - 1; \
9842 })
9843
9844 @@ -946,13 +949,13 @@ extern unsigned long thread_saved_pc(str
9845 /*
9846 * User space process size. 47bits minus one guard page.
9847 */
9848 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9849 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9850
9851 /* This decides where the kernel will search for a free chunk of vm
9852 * space during mmap's.
9853 */
9854 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9855 - 0xc0000000 : 0xFFFFe000)
9856 + 0xc0000000 : 0xFFFFf000)
9857
9858 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9859 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9860 @@ -963,11 +966,11 @@ extern unsigned long thread_saved_pc(str
9861 #define STACK_TOP_MAX TASK_SIZE_MAX
9862
9863 #define INIT_THREAD { \
9864 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9865 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9866 }
9867
9868 #define INIT_TSS { \
9869 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9870 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9871 }
9872
9873 /*
9874 @@ -989,6 +992,10 @@ extern void start_thread(struct pt_regs
9875 */
9876 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9877
9878 +#ifdef CONFIG_PAX_SEGMEXEC
9879 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9880 +#endif
9881 +
9882 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9883
9884 /* Get/set a process' ability to use the timestamp counter instruction */
9885 diff -urNp linux-2.6.32.43/arch/x86/include/asm/ptrace.h linux-2.6.32.43/arch/x86/include/asm/ptrace.h
9886 --- linux-2.6.32.43/arch/x86/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
9887 +++ linux-2.6.32.43/arch/x86/include/asm/ptrace.h 2011-04-17 15:56:46.000000000 -0400
9888 @@ -151,28 +151,29 @@ static inline unsigned long regs_return_
9889 }
9890
9891 /*
9892 - * user_mode_vm(regs) determines whether a register set came from user mode.
9893 + * user_mode(regs) determines whether a register set came from user mode.
9894 * This is true if V8086 mode was enabled OR if the register set was from
9895 * protected mode with RPL-3 CS value. This tricky test checks that with
9896 * one comparison. Many places in the kernel can bypass this full check
9897 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9898 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9899 + * be used.
9900 */
9901 -static inline int user_mode(struct pt_regs *regs)
9902 +static inline int user_mode_novm(struct pt_regs *regs)
9903 {
9904 #ifdef CONFIG_X86_32
9905 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9906 #else
9907 - return !!(regs->cs & 3);
9908 + return !!(regs->cs & SEGMENT_RPL_MASK);
9909 #endif
9910 }
9911
9912 -static inline int user_mode_vm(struct pt_regs *regs)
9913 +static inline int user_mode(struct pt_regs *regs)
9914 {
9915 #ifdef CONFIG_X86_32
9916 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9917 USER_RPL;
9918 #else
9919 - return user_mode(regs);
9920 + return user_mode_novm(regs);
9921 #endif
9922 }
9923
9924 diff -urNp linux-2.6.32.43/arch/x86/include/asm/reboot.h linux-2.6.32.43/arch/x86/include/asm/reboot.h
9925 --- linux-2.6.32.43/arch/x86/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
9926 +++ linux-2.6.32.43/arch/x86/include/asm/reboot.h 2011-05-22 23:02:03.000000000 -0400
9927 @@ -6,19 +6,19 @@
9928 struct pt_regs;
9929
9930 struct machine_ops {
9931 - void (*restart)(char *cmd);
9932 - void (*halt)(void);
9933 - void (*power_off)(void);
9934 + void (* __noreturn restart)(char *cmd);
9935 + void (* __noreturn halt)(void);
9936 + void (* __noreturn power_off)(void);
9937 void (*shutdown)(void);
9938 void (*crash_shutdown)(struct pt_regs *);
9939 - void (*emergency_restart)(void);
9940 + void (* __noreturn emergency_restart)(void);
9941 };
9942
9943 extern struct machine_ops machine_ops;
9944
9945 void native_machine_crash_shutdown(struct pt_regs *regs);
9946 void native_machine_shutdown(void);
9947 -void machine_real_restart(const unsigned char *code, int length);
9948 +void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
9949
9950 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
9951 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
9952 diff -urNp linux-2.6.32.43/arch/x86/include/asm/rwsem.h linux-2.6.32.43/arch/x86/include/asm/rwsem.h
9953 --- linux-2.6.32.43/arch/x86/include/asm/rwsem.h 2011-03-27 14:31:47.000000000 -0400
9954 +++ linux-2.6.32.43/arch/x86/include/asm/rwsem.h 2011-04-17 15:56:46.000000000 -0400
9955 @@ -118,6 +118,14 @@ static inline void __down_read(struct rw
9956 {
9957 asm volatile("# beginning down_read\n\t"
9958 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9959 +
9960 +#ifdef CONFIG_PAX_REFCOUNT
9961 + "jno 0f\n"
9962 + LOCK_PREFIX _ASM_DEC "(%1)\n\t"
9963 + "int $4\n0:\n"
9964 + _ASM_EXTABLE(0b, 0b)
9965 +#endif
9966 +
9967 /* adds 0x00000001, returns the old value */
9968 " jns 1f\n"
9969 " call call_rwsem_down_read_failed\n"
9970 @@ -139,6 +147,14 @@ static inline int __down_read_trylock(st
9971 "1:\n\t"
9972 " mov %1,%2\n\t"
9973 " add %3,%2\n\t"
9974 +
9975 +#ifdef CONFIG_PAX_REFCOUNT
9976 + "jno 0f\n"
9977 + "sub %3,%2\n"
9978 + "int $4\n0:\n"
9979 + _ASM_EXTABLE(0b, 0b)
9980 +#endif
9981 +
9982 " jle 2f\n\t"
9983 LOCK_PREFIX " cmpxchg %2,%0\n\t"
9984 " jnz 1b\n\t"
9985 @@ -160,6 +176,14 @@ static inline void __down_write_nested(s
9986 tmp = RWSEM_ACTIVE_WRITE_BIAS;
9987 asm volatile("# beginning down_write\n\t"
9988 LOCK_PREFIX " xadd %1,(%2)\n\t"
9989 +
9990 +#ifdef CONFIG_PAX_REFCOUNT
9991 + "jno 0f\n"
9992 + "mov %1,(%2)\n"
9993 + "int $4\n0:\n"
9994 + _ASM_EXTABLE(0b, 0b)
9995 +#endif
9996 +
9997 /* subtract 0x0000ffff, returns the old value */
9998 " test %1,%1\n\t"
9999 /* was the count 0 before? */
10000 @@ -198,6 +222,14 @@ static inline void __up_read(struct rw_s
10001 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
10002 asm volatile("# beginning __up_read\n\t"
10003 LOCK_PREFIX " xadd %1,(%2)\n\t"
10004 +
10005 +#ifdef CONFIG_PAX_REFCOUNT
10006 + "jno 0f\n"
10007 + "mov %1,(%2)\n"
10008 + "int $4\n0:\n"
10009 + _ASM_EXTABLE(0b, 0b)
10010 +#endif
10011 +
10012 /* subtracts 1, returns the old value */
10013 " jns 1f\n\t"
10014 " call call_rwsem_wake\n"
10015 @@ -216,6 +248,14 @@ static inline void __up_write(struct rw_
10016 rwsem_count_t tmp;
10017 asm volatile("# beginning __up_write\n\t"
10018 LOCK_PREFIX " xadd %1,(%2)\n\t"
10019 +
10020 +#ifdef CONFIG_PAX_REFCOUNT
10021 + "jno 0f\n"
10022 + "mov %1,(%2)\n"
10023 + "int $4\n0:\n"
10024 + _ASM_EXTABLE(0b, 0b)
10025 +#endif
10026 +
10027 /* tries to transition
10028 0xffff0001 -> 0x00000000 */
10029 " jz 1f\n"
10030 @@ -234,6 +274,14 @@ static inline void __downgrade_write(str
10031 {
10032 asm volatile("# beginning __downgrade_write\n\t"
10033 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10034 +
10035 +#ifdef CONFIG_PAX_REFCOUNT
10036 + "jno 0f\n"
10037 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10038 + "int $4\n0:\n"
10039 + _ASM_EXTABLE(0b, 0b)
10040 +#endif
10041 +
10042 /*
10043 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10044 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10045 @@ -253,7 +301,15 @@ static inline void __downgrade_write(str
10046 static inline void rwsem_atomic_add(rwsem_count_t delta,
10047 struct rw_semaphore *sem)
10048 {
10049 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10050 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10051 +
10052 +#ifdef CONFIG_PAX_REFCOUNT
10053 + "jno 0f\n"
10054 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
10055 + "int $4\n0:\n"
10056 + _ASM_EXTABLE(0b, 0b)
10057 +#endif
10058 +
10059 : "+m" (sem->count)
10060 : "er" (delta));
10061 }
10062 @@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic
10063 {
10064 rwsem_count_t tmp = delta;
10065
10066 - asm volatile(LOCK_PREFIX "xadd %0,%1"
10067 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
10068 +
10069 +#ifdef CONFIG_PAX_REFCOUNT
10070 + "jno 0f\n"
10071 + "mov %0,%1\n"
10072 + "int $4\n0:\n"
10073 + _ASM_EXTABLE(0b, 0b)
10074 +#endif
10075 +
10076 : "+r" (tmp), "+m" (sem->count)
10077 : : "memory");
10078
10079 diff -urNp linux-2.6.32.43/arch/x86/include/asm/segment.h linux-2.6.32.43/arch/x86/include/asm/segment.h
10080 --- linux-2.6.32.43/arch/x86/include/asm/segment.h 2011-03-27 14:31:47.000000000 -0400
10081 +++ linux-2.6.32.43/arch/x86/include/asm/segment.h 2011-04-17 15:56:46.000000000 -0400
10082 @@ -62,8 +62,8 @@
10083 * 26 - ESPFIX small SS
10084 * 27 - per-cpu [ offset to per-cpu data area ]
10085 * 28 - stack_canary-20 [ for stack protector ]
10086 - * 29 - unused
10087 - * 30 - unused
10088 + * 29 - PCI BIOS CS
10089 + * 30 - PCI BIOS DS
10090 * 31 - TSS for double fault handler
10091 */
10092 #define GDT_ENTRY_TLS_MIN 6
10093 @@ -77,6 +77,8 @@
10094
10095 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
10096
10097 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10098 +
10099 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
10100
10101 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
10102 @@ -88,7 +90,7 @@
10103 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
10104 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
10105
10106 -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10107 +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10108 #ifdef CONFIG_SMP
10109 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
10110 #else
10111 @@ -102,6 +104,12 @@
10112 #define __KERNEL_STACK_CANARY 0
10113 #endif
10114
10115 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
10116 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10117 +
10118 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
10119 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10120 +
10121 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10122
10123 /*
10124 @@ -139,7 +147,7 @@
10125 */
10126
10127 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10128 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10129 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10130
10131
10132 #else
10133 @@ -163,6 +171,8 @@
10134 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
10135 #define __USER32_DS __USER_DS
10136
10137 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10138 +
10139 #define GDT_ENTRY_TSS 8 /* needs two entries */
10140 #define GDT_ENTRY_LDT 10 /* needs two entries */
10141 #define GDT_ENTRY_TLS_MIN 12
10142 @@ -183,6 +193,7 @@
10143 #endif
10144
10145 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
10146 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
10147 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
10148 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
10149 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
10150 diff -urNp linux-2.6.32.43/arch/x86/include/asm/smp.h linux-2.6.32.43/arch/x86/include/asm/smp.h
10151 --- linux-2.6.32.43/arch/x86/include/asm/smp.h 2011-03-27 14:31:47.000000000 -0400
10152 +++ linux-2.6.32.43/arch/x86/include/asm/smp.h 2011-07-01 19:00:40.000000000 -0400
10153 @@ -24,7 +24,7 @@ extern unsigned int num_processors;
10154 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
10155 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10156 DECLARE_PER_CPU(u16, cpu_llc_id);
10157 -DECLARE_PER_CPU(int, cpu_number);
10158 +DECLARE_PER_CPU(unsigned int, cpu_number);
10159
10160 static inline struct cpumask *cpu_sibling_mask(int cpu)
10161 {
10162 @@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_ap
10163 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
10164
10165 /* Static state in head.S used to set up a CPU */
10166 -extern struct {
10167 - void *sp;
10168 - unsigned short ss;
10169 -} stack_start;
10170 +extern unsigned long stack_start; /* Initial stack pointer address */
10171
10172 struct smp_ops {
10173 void (*smp_prepare_boot_cpu)(void);
10174 @@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitd
10175 extern int safe_smp_processor_id(void);
10176
10177 #elif defined(CONFIG_X86_64_SMP)
10178 -#define raw_smp_processor_id() (percpu_read(cpu_number))
10179 -
10180 -#define stack_smp_processor_id() \
10181 -({ \
10182 - struct thread_info *ti; \
10183 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10184 - ti->cpu; \
10185 -})
10186 +#define raw_smp_processor_id() (percpu_read(cpu_number))
10187 +#define stack_smp_processor_id() raw_smp_processor_id()
10188 #define safe_smp_processor_id() smp_processor_id()
10189
10190 #endif
10191 diff -urNp linux-2.6.32.43/arch/x86/include/asm/spinlock.h linux-2.6.32.43/arch/x86/include/asm/spinlock.h
10192 --- linux-2.6.32.43/arch/x86/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
10193 +++ linux-2.6.32.43/arch/x86/include/asm/spinlock.h 2011-04-17 15:56:46.000000000 -0400
10194 @@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(r
10195 static inline void __raw_read_lock(raw_rwlock_t *rw)
10196 {
10197 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
10198 +
10199 +#ifdef CONFIG_PAX_REFCOUNT
10200 + "jno 0f\n"
10201 + LOCK_PREFIX " addl $1,(%0)\n"
10202 + "int $4\n0:\n"
10203 + _ASM_EXTABLE(0b, 0b)
10204 +#endif
10205 +
10206 "jns 1f\n"
10207 "call __read_lock_failed\n\t"
10208 "1:\n"
10209 @@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_r
10210 static inline void __raw_write_lock(raw_rwlock_t *rw)
10211 {
10212 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
10213 +
10214 +#ifdef CONFIG_PAX_REFCOUNT
10215 + "jno 0f\n"
10216 + LOCK_PREFIX " addl %1,(%0)\n"
10217 + "int $4\n0:\n"
10218 + _ASM_EXTABLE(0b, 0b)
10219 +#endif
10220 +
10221 "jz 1f\n"
10222 "call __write_lock_failed\n\t"
10223 "1:\n"
10224 @@ -286,12 +302,29 @@ static inline int __raw_write_trylock(ra
10225
10226 static inline void __raw_read_unlock(raw_rwlock_t *rw)
10227 {
10228 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
10229 + asm volatile(LOCK_PREFIX "incl %0\n"
10230 +
10231 +#ifdef CONFIG_PAX_REFCOUNT
10232 + "jno 0f\n"
10233 + LOCK_PREFIX "decl %0\n"
10234 + "int $4\n0:\n"
10235 + _ASM_EXTABLE(0b, 0b)
10236 +#endif
10237 +
10238 + :"+m" (rw->lock) : : "memory");
10239 }
10240
10241 static inline void __raw_write_unlock(raw_rwlock_t *rw)
10242 {
10243 - asm volatile(LOCK_PREFIX "addl %1, %0"
10244 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
10245 +
10246 +#ifdef CONFIG_PAX_REFCOUNT
10247 + "jno 0f\n"
10248 + LOCK_PREFIX "subl %1, %0\n"
10249 + "int $4\n0:\n"
10250 + _ASM_EXTABLE(0b, 0b)
10251 +#endif
10252 +
10253 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
10254 }
10255
10256 diff -urNp linux-2.6.32.43/arch/x86/include/asm/stackprotector.h linux-2.6.32.43/arch/x86/include/asm/stackprotector.h
10257 --- linux-2.6.32.43/arch/x86/include/asm/stackprotector.h 2011-03-27 14:31:47.000000000 -0400
10258 +++ linux-2.6.32.43/arch/x86/include/asm/stackprotector.h 2011-07-06 19:53:33.000000000 -0400
10259 @@ -48,7 +48,7 @@
10260 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10261 */
10262 #define GDT_STACK_CANARY_INIT \
10263 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10264 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10265
10266 /*
10267 * Initialize the stackprotector canary value.
10268 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
10269
10270 static inline void load_stack_canary_segment(void)
10271 {
10272 -#ifdef CONFIG_X86_32
10273 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10274 asm volatile ("mov %0, %%gs" : : "r" (0));
10275 #endif
10276 }
10277 diff -urNp linux-2.6.32.43/arch/x86/include/asm/system.h linux-2.6.32.43/arch/x86/include/asm/system.h
10278 --- linux-2.6.32.43/arch/x86/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
10279 +++ linux-2.6.32.43/arch/x86/include/asm/system.h 2011-05-22 23:02:03.000000000 -0400
10280 @@ -132,7 +132,7 @@ do { \
10281 "thread_return:\n\t" \
10282 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10283 __switch_canary \
10284 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
10285 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10286 "movq %%rax,%%rdi\n\t" \
10287 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10288 "jnz ret_from_fork\n\t" \
10289 @@ -143,7 +143,7 @@ do { \
10290 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10291 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10292 [_tif_fork] "i" (_TIF_FORK), \
10293 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
10294 + [thread_info] "m" (per_cpu_var(current_tinfo)), \
10295 [current_task] "m" (per_cpu_var(current_task)) \
10296 __switch_canary_iparam \
10297 : "memory", "cc" __EXTRA_CLOBBER)
10298 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
10299 {
10300 unsigned long __limit;
10301 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10302 - return __limit + 1;
10303 + return __limit;
10304 }
10305
10306 static inline void native_clts(void)
10307 @@ -340,12 +340,12 @@ void enable_hlt(void);
10308
10309 void cpu_idle_wait(void);
10310
10311 -extern unsigned long arch_align_stack(unsigned long sp);
10312 +#define arch_align_stack(x) ((x) & ~0xfUL)
10313 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10314
10315 void default_idle(void);
10316
10317 -void stop_this_cpu(void *dummy);
10318 +void stop_this_cpu(void *dummy) __noreturn;
10319
10320 /*
10321 * Force strict CPU ordering.
10322 diff -urNp linux-2.6.32.43/arch/x86/include/asm/thread_info.h linux-2.6.32.43/arch/x86/include/asm/thread_info.h
10323 --- linux-2.6.32.43/arch/x86/include/asm/thread_info.h 2011-03-27 14:31:47.000000000 -0400
10324 +++ linux-2.6.32.43/arch/x86/include/asm/thread_info.h 2011-05-17 19:26:34.000000000 -0400
10325 @@ -10,6 +10,7 @@
10326 #include <linux/compiler.h>
10327 #include <asm/page.h>
10328 #include <asm/types.h>
10329 +#include <asm/percpu.h>
10330
10331 /*
10332 * low level task data that entry.S needs immediate access to
10333 @@ -24,7 +25,6 @@ struct exec_domain;
10334 #include <asm/atomic.h>
10335
10336 struct thread_info {
10337 - struct task_struct *task; /* main task structure */
10338 struct exec_domain *exec_domain; /* execution domain */
10339 __u32 flags; /* low level flags */
10340 __u32 status; /* thread synchronous flags */
10341 @@ -34,18 +34,12 @@ struct thread_info {
10342 mm_segment_t addr_limit;
10343 struct restart_block restart_block;
10344 void __user *sysenter_return;
10345 -#ifdef CONFIG_X86_32
10346 - unsigned long previous_esp; /* ESP of the previous stack in
10347 - case of nested (IRQ) stacks
10348 - */
10349 - __u8 supervisor_stack[0];
10350 -#endif
10351 + unsigned long lowest_stack;
10352 int uaccess_err;
10353 };
10354
10355 -#define INIT_THREAD_INFO(tsk) \
10356 +#define INIT_THREAD_INFO \
10357 { \
10358 - .task = &tsk, \
10359 .exec_domain = &default_exec_domain, \
10360 .flags = 0, \
10361 .cpu = 0, \
10362 @@ -56,7 +50,7 @@ struct thread_info {
10363 }, \
10364 }
10365
10366 -#define init_thread_info (init_thread_union.thread_info)
10367 +#define init_thread_info (init_thread_union.stack)
10368 #define init_stack (init_thread_union.stack)
10369
10370 #else /* !__ASSEMBLY__ */
10371 @@ -163,6 +157,23 @@ struct thread_info {
10372 #define alloc_thread_info(tsk) \
10373 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
10374
10375 +#ifdef __ASSEMBLY__
10376 +/* how to get the thread information struct from ASM */
10377 +#define GET_THREAD_INFO(reg) \
10378 + mov PER_CPU_VAR(current_tinfo), reg
10379 +
10380 +/* use this one if reg already contains %esp */
10381 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10382 +#else
10383 +/* how to get the thread information struct from C */
10384 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10385 +
10386 +static __always_inline struct thread_info *current_thread_info(void)
10387 +{
10388 + return percpu_read_stable(current_tinfo);
10389 +}
10390 +#endif
10391 +
10392 #ifdef CONFIG_X86_32
10393
10394 #define STACK_WARN (THREAD_SIZE/8)
10395 @@ -173,35 +184,13 @@ struct thread_info {
10396 */
10397 #ifndef __ASSEMBLY__
10398
10399 -
10400 /* how to get the current stack pointer from C */
10401 register unsigned long current_stack_pointer asm("esp") __used;
10402
10403 -/* how to get the thread information struct from C */
10404 -static inline struct thread_info *current_thread_info(void)
10405 -{
10406 - return (struct thread_info *)
10407 - (current_stack_pointer & ~(THREAD_SIZE - 1));
10408 -}
10409 -
10410 -#else /* !__ASSEMBLY__ */
10411 -
10412 -/* how to get the thread information struct from ASM */
10413 -#define GET_THREAD_INFO(reg) \
10414 - movl $-THREAD_SIZE, reg; \
10415 - andl %esp, reg
10416 -
10417 -/* use this one if reg already contains %esp */
10418 -#define GET_THREAD_INFO_WITH_ESP(reg) \
10419 - andl $-THREAD_SIZE, reg
10420 -
10421 #endif
10422
10423 #else /* X86_32 */
10424
10425 -#include <asm/percpu.h>
10426 -#define KERNEL_STACK_OFFSET (5*8)
10427 -
10428 /*
10429 * macros/functions for gaining access to the thread information structure
10430 * preempt_count needs to be 1 initially, until the scheduler is functional.
10431 @@ -209,21 +198,8 @@ static inline struct thread_info *curren
10432 #ifndef __ASSEMBLY__
10433 DECLARE_PER_CPU(unsigned long, kernel_stack);
10434
10435 -static inline struct thread_info *current_thread_info(void)
10436 -{
10437 - struct thread_info *ti;
10438 - ti = (void *)(percpu_read_stable(kernel_stack) +
10439 - KERNEL_STACK_OFFSET - THREAD_SIZE);
10440 - return ti;
10441 -}
10442 -
10443 -#else /* !__ASSEMBLY__ */
10444 -
10445 -/* how to get the thread information struct from ASM */
10446 -#define GET_THREAD_INFO(reg) \
10447 - movq PER_CPU_VAR(kernel_stack),reg ; \
10448 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10449 -
10450 +/* how to get the current stack pointer from C */
10451 +register unsigned long current_stack_pointer asm("rsp") __used;
10452 #endif
10453
10454 #endif /* !X86_32 */
10455 @@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
10456 extern void free_thread_info(struct thread_info *ti);
10457 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10458 #define arch_task_cache_init arch_task_cache_init
10459 +
10460 +#define __HAVE_THREAD_FUNCTIONS
10461 +#define task_thread_info(task) (&(task)->tinfo)
10462 +#define task_stack_page(task) ((task)->stack)
10463 +#define setup_thread_stack(p, org) do {} while (0)
10464 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10465 +
10466 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10467 +extern struct task_struct *alloc_task_struct(void);
10468 +extern void free_task_struct(struct task_struct *);
10469 +
10470 #endif
10471 #endif /* _ASM_X86_THREAD_INFO_H */
10472 diff -urNp linux-2.6.32.43/arch/x86/include/asm/uaccess_32.h linux-2.6.32.43/arch/x86/include/asm/uaccess_32.h
10473 --- linux-2.6.32.43/arch/x86/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
10474 +++ linux-2.6.32.43/arch/x86/include/asm/uaccess_32.h 2011-05-16 21:46:57.000000000 -0400
10475 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
10476 static __always_inline unsigned long __must_check
10477 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10478 {
10479 + pax_track_stack();
10480 +
10481 + if ((long)n < 0)
10482 + return n;
10483 +
10484 if (__builtin_constant_p(n)) {
10485 unsigned long ret;
10486
10487 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
10488 return ret;
10489 }
10490 }
10491 + if (!__builtin_constant_p(n))
10492 + check_object_size(from, n, true);
10493 return __copy_to_user_ll(to, from, n);
10494 }
10495
10496 @@ -83,12 +90,16 @@ static __always_inline unsigned long __m
10497 __copy_to_user(void __user *to, const void *from, unsigned long n)
10498 {
10499 might_fault();
10500 +
10501 return __copy_to_user_inatomic(to, from, n);
10502 }
10503
10504 static __always_inline unsigned long
10505 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10506 {
10507 + if ((long)n < 0)
10508 + return n;
10509 +
10510 /* Avoid zeroing the tail if the copy fails..
10511 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10512 * but as the zeroing behaviour is only significant when n is not
10513 @@ -138,6 +149,12 @@ static __always_inline unsigned long
10514 __copy_from_user(void *to, const void __user *from, unsigned long n)
10515 {
10516 might_fault();
10517 +
10518 + pax_track_stack();
10519 +
10520 + if ((long)n < 0)
10521 + return n;
10522 +
10523 if (__builtin_constant_p(n)) {
10524 unsigned long ret;
10525
10526 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
10527 return ret;
10528 }
10529 }
10530 + if (!__builtin_constant_p(n))
10531 + check_object_size(to, n, false);
10532 return __copy_from_user_ll(to, from, n);
10533 }
10534
10535 @@ -160,6 +179,10 @@ static __always_inline unsigned long __c
10536 const void __user *from, unsigned long n)
10537 {
10538 might_fault();
10539 +
10540 + if ((long)n < 0)
10541 + return n;
10542 +
10543 if (__builtin_constant_p(n)) {
10544 unsigned long ret;
10545
10546 @@ -182,14 +205,62 @@ static __always_inline unsigned long
10547 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10548 unsigned long n)
10549 {
10550 - return __copy_from_user_ll_nocache_nozero(to, from, n);
10551 + if ((long)n < 0)
10552 + return n;
10553 +
10554 + return __copy_from_user_ll_nocache_nozero(to, from, n);
10555 +}
10556 +
10557 +/**
10558 + * copy_to_user: - Copy a block of data into user space.
10559 + * @to: Destination address, in user space.
10560 + * @from: Source address, in kernel space.
10561 + * @n: Number of bytes to copy.
10562 + *
10563 + * Context: User context only. This function may sleep.
10564 + *
10565 + * Copy data from kernel space to user space.
10566 + *
10567 + * Returns number of bytes that could not be copied.
10568 + * On success, this will be zero.
10569 + */
10570 +static __always_inline unsigned long __must_check
10571 +copy_to_user(void __user *to, const void *from, unsigned long n)
10572 +{
10573 + if (access_ok(VERIFY_WRITE, to, n))
10574 + n = __copy_to_user(to, from, n);
10575 + return n;
10576 +}
10577 +
10578 +/**
10579 + * copy_from_user: - Copy a block of data from user space.
10580 + * @to: Destination address, in kernel space.
10581 + * @from: Source address, in user space.
10582 + * @n: Number of bytes to copy.
10583 + *
10584 + * Context: User context only. This function may sleep.
10585 + *
10586 + * Copy data from user space to kernel space.
10587 + *
10588 + * Returns number of bytes that could not be copied.
10589 + * On success, this will be zero.
10590 + *
10591 + * If some data could not be copied, this function will pad the copied
10592 + * data to the requested size using zero bytes.
10593 + */
10594 +static __always_inline unsigned long __must_check
10595 +copy_from_user(void *to, const void __user *from, unsigned long n)
10596 +{
10597 + if (access_ok(VERIFY_READ, from, n))
10598 + n = __copy_from_user(to, from, n);
10599 + else if ((long)n > 0) {
10600 + if (!__builtin_constant_p(n))
10601 + check_object_size(to, n, false);
10602 + memset(to, 0, n);
10603 + }
10604 + return n;
10605 }
10606
10607 -unsigned long __must_check copy_to_user(void __user *to,
10608 - const void *from, unsigned long n);
10609 -unsigned long __must_check copy_from_user(void *to,
10610 - const void __user *from,
10611 - unsigned long n);
10612 long __must_check strncpy_from_user(char *dst, const char __user *src,
10613 long count);
10614 long __must_check __strncpy_from_user(char *dst,
10615 diff -urNp linux-2.6.32.43/arch/x86/include/asm/uaccess_64.h linux-2.6.32.43/arch/x86/include/asm/uaccess_64.h
10616 --- linux-2.6.32.43/arch/x86/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
10617 +++ linux-2.6.32.43/arch/x86/include/asm/uaccess_64.h 2011-05-16 21:46:57.000000000 -0400
10618 @@ -9,6 +9,9 @@
10619 #include <linux/prefetch.h>
10620 #include <linux/lockdep.h>
10621 #include <asm/page.h>
10622 +#include <asm/pgtable.h>
10623 +
10624 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
10625
10626 /*
10627 * Copy To/From Userspace
10628 @@ -19,113 +22,203 @@ __must_check unsigned long
10629 copy_user_generic(void *to, const void *from, unsigned len);
10630
10631 __must_check unsigned long
10632 -copy_to_user(void __user *to, const void *from, unsigned len);
10633 -__must_check unsigned long
10634 -copy_from_user(void *to, const void __user *from, unsigned len);
10635 -__must_check unsigned long
10636 copy_in_user(void __user *to, const void __user *from, unsigned len);
10637
10638 static __always_inline __must_check
10639 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
10640 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
10641 {
10642 - int ret = 0;
10643 + unsigned ret = 0;
10644
10645 might_fault();
10646 - if (!__builtin_constant_p(size))
10647 - return copy_user_generic(dst, (__force void *)src, size);
10648 +
10649 + if ((int)size < 0)
10650 + return size;
10651 +
10652 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10653 + if (!__access_ok(VERIFY_READ, src, size))
10654 + return size;
10655 +#endif
10656 +
10657 + if (!__builtin_constant_p(size)) {
10658 + check_object_size(dst, size, false);
10659 +
10660 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10661 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10662 + src += PAX_USER_SHADOW_BASE;
10663 +#endif
10664 +
10665 + return copy_user_generic(dst, (__force const void *)src, size);
10666 + }
10667 switch (size) {
10668 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10669 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10670 ret, "b", "b", "=q", 1);
10671 return ret;
10672 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10673 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10674 ret, "w", "w", "=r", 2);
10675 return ret;
10676 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10677 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10678 ret, "l", "k", "=r", 4);
10679 return ret;
10680 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10681 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10682 ret, "q", "", "=r", 8);
10683 return ret;
10684 case 10:
10685 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10686 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10687 ret, "q", "", "=r", 10);
10688 if (unlikely(ret))
10689 return ret;
10690 __get_user_asm(*(u16 *)(8 + (char *)dst),
10691 - (u16 __user *)(8 + (char __user *)src),
10692 + (const u16 __user *)(8 + (const char __user *)src),
10693 ret, "w", "w", "=r", 2);
10694 return ret;
10695 case 16:
10696 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10697 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10698 ret, "q", "", "=r", 16);
10699 if (unlikely(ret))
10700 return ret;
10701 __get_user_asm(*(u64 *)(8 + (char *)dst),
10702 - (u64 __user *)(8 + (char __user *)src),
10703 + (const u64 __user *)(8 + (const char __user *)src),
10704 ret, "q", "", "=r", 8);
10705 return ret;
10706 default:
10707 - return copy_user_generic(dst, (__force void *)src, size);
10708 +
10709 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10710 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10711 + src += PAX_USER_SHADOW_BASE;
10712 +#endif
10713 +
10714 + return copy_user_generic(dst, (__force const void *)src, size);
10715 }
10716 }
10717
10718 static __always_inline __must_check
10719 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
10720 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10721 {
10722 - int ret = 0;
10723 + unsigned ret = 0;
10724
10725 might_fault();
10726 - if (!__builtin_constant_p(size))
10727 +
10728 + pax_track_stack();
10729 +
10730 + if ((int)size < 0)
10731 + return size;
10732 +
10733 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10734 + if (!__access_ok(VERIFY_WRITE, dst, size))
10735 + return size;
10736 +#endif
10737 +
10738 + if (!__builtin_constant_p(size)) {
10739 + check_object_size(src, size, true);
10740 +
10741 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10742 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10743 + dst += PAX_USER_SHADOW_BASE;
10744 +#endif
10745 +
10746 return copy_user_generic((__force void *)dst, src, size);
10747 + }
10748 switch (size) {
10749 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10750 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10751 ret, "b", "b", "iq", 1);
10752 return ret;
10753 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10754 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10755 ret, "w", "w", "ir", 2);
10756 return ret;
10757 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
10758 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
10759 ret, "l", "k", "ir", 4);
10760 return ret;
10761 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
10762 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10763 ret, "q", "", "er", 8);
10764 return ret;
10765 case 10:
10766 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10767 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10768 ret, "q", "", "er", 10);
10769 if (unlikely(ret))
10770 return ret;
10771 asm("":::"memory");
10772 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
10773 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
10774 ret, "w", "w", "ir", 2);
10775 return ret;
10776 case 16:
10777 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10778 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10779 ret, "q", "", "er", 16);
10780 if (unlikely(ret))
10781 return ret;
10782 asm("":::"memory");
10783 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
10784 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
10785 ret, "q", "", "er", 8);
10786 return ret;
10787 default:
10788 +
10789 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10790 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10791 + dst += PAX_USER_SHADOW_BASE;
10792 +#endif
10793 +
10794 return copy_user_generic((__force void *)dst, src, size);
10795 }
10796 }
10797
10798 static __always_inline __must_check
10799 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10800 +unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
10801 +{
10802 + if (access_ok(VERIFY_WRITE, to, len))
10803 + len = __copy_to_user(to, from, len);
10804 + return len;
10805 +}
10806 +
10807 +static __always_inline __must_check
10808 +unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
10809 +{
10810 + if ((int)len < 0)
10811 + return len;
10812 +
10813 + if (access_ok(VERIFY_READ, from, len))
10814 + len = __copy_from_user(to, from, len);
10815 + else if ((int)len > 0) {
10816 + if (!__builtin_constant_p(len))
10817 + check_object_size(to, len, false);
10818 + memset(to, 0, len);
10819 + }
10820 + return len;
10821 +}
10822 +
10823 +static __always_inline __must_check
10824 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10825 {
10826 - int ret = 0;
10827 + unsigned ret = 0;
10828
10829 might_fault();
10830 - if (!__builtin_constant_p(size))
10831 +
10832 + pax_track_stack();
10833 +
10834 + if ((int)size < 0)
10835 + return size;
10836 +
10837 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10838 + if (!__access_ok(VERIFY_READ, src, size))
10839 + return size;
10840 + if (!__access_ok(VERIFY_WRITE, dst, size))
10841 + return size;
10842 +#endif
10843 +
10844 + if (!__builtin_constant_p(size)) {
10845 +
10846 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10847 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10848 + src += PAX_USER_SHADOW_BASE;
10849 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10850 + dst += PAX_USER_SHADOW_BASE;
10851 +#endif
10852 +
10853 return copy_user_generic((__force void *)dst,
10854 - (__force void *)src, size);
10855 + (__force const void *)src, size);
10856 + }
10857 switch (size) {
10858 case 1: {
10859 u8 tmp;
10860 - __get_user_asm(tmp, (u8 __user *)src,
10861 + __get_user_asm(tmp, (const u8 __user *)src,
10862 ret, "b", "b", "=q", 1);
10863 if (likely(!ret))
10864 __put_user_asm(tmp, (u8 __user *)dst,
10865 @@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, con
10866 }
10867 case 2: {
10868 u16 tmp;
10869 - __get_user_asm(tmp, (u16 __user *)src,
10870 + __get_user_asm(tmp, (const u16 __user *)src,
10871 ret, "w", "w", "=r", 2);
10872 if (likely(!ret))
10873 __put_user_asm(tmp, (u16 __user *)dst,
10874 @@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, con
10875
10876 case 4: {
10877 u32 tmp;
10878 - __get_user_asm(tmp, (u32 __user *)src,
10879 + __get_user_asm(tmp, (const u32 __user *)src,
10880 ret, "l", "k", "=r", 4);
10881 if (likely(!ret))
10882 __put_user_asm(tmp, (u32 __user *)dst,
10883 @@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, con
10884 }
10885 case 8: {
10886 u64 tmp;
10887 - __get_user_asm(tmp, (u64 __user *)src,
10888 + __get_user_asm(tmp, (const u64 __user *)src,
10889 ret, "q", "", "=r", 8);
10890 if (likely(!ret))
10891 __put_user_asm(tmp, (u64 __user *)dst,
10892 @@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, con
10893 return ret;
10894 }
10895 default:
10896 +
10897 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10898 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10899 + src += PAX_USER_SHADOW_BASE;
10900 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10901 + dst += PAX_USER_SHADOW_BASE;
10902 +#endif
10903 +
10904 return copy_user_generic((__force void *)dst,
10905 - (__force void *)src, size);
10906 + (__force const void *)src, size);
10907 }
10908 }
10909
10910 @@ -176,33 +277,75 @@ __must_check long strlen_user(const char
10911 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
10912 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
10913
10914 -__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
10915 - unsigned size);
10916 +static __must_check __always_inline unsigned long
10917 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
10918 +{
10919 + pax_track_stack();
10920 +
10921 + if ((int)size < 0)
10922 + return size;
10923
10924 -static __must_check __always_inline int
10925 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10926 + if (!__access_ok(VERIFY_READ, src, size))
10927 + return size;
10928 +
10929 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10930 + src += PAX_USER_SHADOW_BASE;
10931 +#endif
10932 +
10933 + return copy_user_generic(dst, (__force const void *)src, size);
10934 +}
10935 +
10936 +static __must_check __always_inline unsigned long
10937 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
10938 {
10939 + if ((int)size < 0)
10940 + return size;
10941 +
10942 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10943 + if (!__access_ok(VERIFY_WRITE, dst, size))
10944 + return size;
10945 +
10946 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10947 + dst += PAX_USER_SHADOW_BASE;
10948 +#endif
10949 +
10950 return copy_user_generic((__force void *)dst, src, size);
10951 }
10952
10953 -extern long __copy_user_nocache(void *dst, const void __user *src,
10954 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
10955 unsigned size, int zerorest);
10956
10957 -static inline int
10958 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10959 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10960 {
10961 might_sleep();
10962 +
10963 + if ((int)size < 0)
10964 + return size;
10965 +
10966 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10967 + if (!__access_ok(VERIFY_READ, src, size))
10968 + return size;
10969 +#endif
10970 +
10971 return __copy_user_nocache(dst, src, size, 1);
10972 }
10973
10974 -static inline int
10975 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10976 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10977 unsigned size)
10978 {
10979 + if ((int)size < 0)
10980 + return size;
10981 +
10982 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10983 + if (!__access_ok(VERIFY_READ, src, size))
10984 + return size;
10985 +#endif
10986 +
10987 return __copy_user_nocache(dst, src, size, 0);
10988 }
10989
10990 -unsigned long
10991 +extern unsigned long
10992 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
10993
10994 #endif /* _ASM_X86_UACCESS_64_H */
10995 diff -urNp linux-2.6.32.43/arch/x86/include/asm/uaccess.h linux-2.6.32.43/arch/x86/include/asm/uaccess.h
10996 --- linux-2.6.32.43/arch/x86/include/asm/uaccess.h 2011-06-25 12:55:34.000000000 -0400
10997 +++ linux-2.6.32.43/arch/x86/include/asm/uaccess.h 2011-06-25 12:56:37.000000000 -0400
10998 @@ -8,12 +8,15 @@
10999 #include <linux/thread_info.h>
11000 #include <linux/prefetch.h>
11001 #include <linux/string.h>
11002 +#include <linux/sched.h>
11003 #include <asm/asm.h>
11004 #include <asm/page.h>
11005
11006 #define VERIFY_READ 0
11007 #define VERIFY_WRITE 1
11008
11009 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
11010 +
11011 /*
11012 * The fs value determines whether argument validity checking should be
11013 * performed or not. If get_fs() == USER_DS, checking is performed, with
11014 @@ -29,7 +32,12 @@
11015
11016 #define get_ds() (KERNEL_DS)
11017 #define get_fs() (current_thread_info()->addr_limit)
11018 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11019 +void __set_fs(mm_segment_t x);
11020 +void set_fs(mm_segment_t x);
11021 +#else
11022 #define set_fs(x) (current_thread_info()->addr_limit = (x))
11023 +#endif
11024
11025 #define segment_eq(a, b) ((a).seg == (b).seg)
11026
11027 @@ -77,7 +85,33 @@
11028 * checks that the pointer is in the user space range - after calling
11029 * this function, memory access functions may still return -EFAULT.
11030 */
11031 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11032 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11033 +#define access_ok(type, addr, size) \
11034 +({ \
11035 + long __size = size; \
11036 + unsigned long __addr = (unsigned long)addr; \
11037 + unsigned long __addr_ao = __addr & PAGE_MASK; \
11038 + unsigned long __end_ao = __addr + __size - 1; \
11039 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
11040 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
11041 + while(__addr_ao <= __end_ao) { \
11042 + char __c_ao; \
11043 + __addr_ao += PAGE_SIZE; \
11044 + if (__size > PAGE_SIZE) \
11045 + cond_resched(); \
11046 + if (__get_user(__c_ao, (char __user *)__addr)) \
11047 + break; \
11048 + if (type != VERIFY_WRITE) { \
11049 + __addr = __addr_ao; \
11050 + continue; \
11051 + } \
11052 + if (__put_user(__c_ao, (char __user *)__addr)) \
11053 + break; \
11054 + __addr = __addr_ao; \
11055 + } \
11056 + } \
11057 + __ret_ao; \
11058 +})
11059
11060 /*
11061 * The exception table consists of pairs of addresses: the first is the
11062 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
11063 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
11064 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
11065
11066 -
11067 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11068 +#define __copyuser_seg "gs;"
11069 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
11070 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
11071 +#else
11072 +#define __copyuser_seg
11073 +#define __COPYUSER_SET_ES
11074 +#define __COPYUSER_RESTORE_ES
11075 +#endif
11076
11077 #ifdef CONFIG_X86_32
11078 #define __put_user_asm_u64(x, addr, err, errret) \
11079 - asm volatile("1: movl %%eax,0(%2)\n" \
11080 - "2: movl %%edx,4(%2)\n" \
11081 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
11082 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
11083 "3:\n" \
11084 ".section .fixup,\"ax\"\n" \
11085 "4: movl %3,%0\n" \
11086 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
11087 : "A" (x), "r" (addr), "i" (errret), "0" (err))
11088
11089 #define __put_user_asm_ex_u64(x, addr) \
11090 - asm volatile("1: movl %%eax,0(%1)\n" \
11091 - "2: movl %%edx,4(%1)\n" \
11092 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
11093 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
11094 "3:\n" \
11095 _ASM_EXTABLE(1b, 2b - 1b) \
11096 _ASM_EXTABLE(2b, 3b - 2b) \
11097 @@ -374,7 +416,7 @@ do { \
11098 } while (0)
11099
11100 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11101 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
11102 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
11103 "2:\n" \
11104 ".section .fixup,\"ax\"\n" \
11105 "3: mov %3,%0\n" \
11106 @@ -382,7 +424,7 @@ do { \
11107 " jmp 2b\n" \
11108 ".previous\n" \
11109 _ASM_EXTABLE(1b, 3b) \
11110 - : "=r" (err), ltype(x) \
11111 + : "=r" (err), ltype (x) \
11112 : "m" (__m(addr)), "i" (errret), "0" (err))
11113
11114 #define __get_user_size_ex(x, ptr, size) \
11115 @@ -407,7 +449,7 @@ do { \
11116 } while (0)
11117
11118 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
11119 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
11120 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
11121 "2:\n" \
11122 _ASM_EXTABLE(1b, 2b - 1b) \
11123 : ltype(x) : "m" (__m(addr)))
11124 @@ -424,13 +466,24 @@ do { \
11125 int __gu_err; \
11126 unsigned long __gu_val; \
11127 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
11128 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
11129 + (x) = (__typeof__(*(ptr)))__gu_val; \
11130 __gu_err; \
11131 })
11132
11133 /* FIXME: this hack is definitely wrong -AK */
11134 struct __large_struct { unsigned long buf[100]; };
11135 -#define __m(x) (*(struct __large_struct __user *)(x))
11136 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11137 +#define ____m(x) \
11138 +({ \
11139 + unsigned long ____x = (unsigned long)(x); \
11140 + if (____x < PAX_USER_SHADOW_BASE) \
11141 + ____x += PAX_USER_SHADOW_BASE; \
11142 + (void __user *)____x; \
11143 +})
11144 +#else
11145 +#define ____m(x) (x)
11146 +#endif
11147 +#define __m(x) (*(struct __large_struct __user *)____m(x))
11148
11149 /*
11150 * Tell gcc we read from memory instead of writing: this is because
11151 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
11152 * aliasing issues.
11153 */
11154 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11155 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
11156 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
11157 "2:\n" \
11158 ".section .fixup,\"ax\"\n" \
11159 "3: mov %3,%0\n" \
11160 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
11161 ".previous\n" \
11162 _ASM_EXTABLE(1b, 3b) \
11163 : "=r"(err) \
11164 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
11165 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
11166
11167 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
11168 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
11169 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
11170 "2:\n" \
11171 _ASM_EXTABLE(1b, 2b - 1b) \
11172 : : ltype(x), "m" (__m(addr)))
11173 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
11174 * On error, the variable @x is set to zero.
11175 */
11176
11177 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11178 +#define __get_user(x, ptr) get_user((x), (ptr))
11179 +#else
11180 #define __get_user(x, ptr) \
11181 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
11182 +#endif
11183
11184 /**
11185 * __put_user: - Write a simple value into user space, with less checking.
11186 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
11187 * Returns zero on success, or -EFAULT on error.
11188 */
11189
11190 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11191 +#define __put_user(x, ptr) put_user((x), (ptr))
11192 +#else
11193 #define __put_user(x, ptr) \
11194 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
11195 +#endif
11196
11197 #define __get_user_unaligned __get_user
11198 #define __put_user_unaligned __put_user
11199 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
11200 #define get_user_ex(x, ptr) do { \
11201 unsigned long __gue_val; \
11202 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
11203 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
11204 + (x) = (__typeof__(*(ptr)))__gue_val; \
11205 } while (0)
11206
11207 #ifdef CONFIG_X86_WP_WORKS_OK
11208 @@ -567,6 +628,7 @@ extern struct movsl_mask {
11209
11210 #define ARCH_HAS_NOCACHE_UACCESS 1
11211
11212 +#define ARCH_HAS_SORT_EXTABLE
11213 #ifdef CONFIG_X86_32
11214 # include "uaccess_32.h"
11215 #else
11216 diff -urNp linux-2.6.32.43/arch/x86/include/asm/vgtod.h linux-2.6.32.43/arch/x86/include/asm/vgtod.h
11217 --- linux-2.6.32.43/arch/x86/include/asm/vgtod.h 2011-03-27 14:31:47.000000000 -0400
11218 +++ linux-2.6.32.43/arch/x86/include/asm/vgtod.h 2011-04-17 15:56:46.000000000 -0400
11219 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
11220 int sysctl_enabled;
11221 struct timezone sys_tz;
11222 struct { /* extract of a clocksource struct */
11223 + char name[8];
11224 cycle_t (*vread)(void);
11225 cycle_t cycle_last;
11226 cycle_t mask;
11227 diff -urNp linux-2.6.32.43/arch/x86/include/asm/vmi.h linux-2.6.32.43/arch/x86/include/asm/vmi.h
11228 --- linux-2.6.32.43/arch/x86/include/asm/vmi.h 2011-03-27 14:31:47.000000000 -0400
11229 +++ linux-2.6.32.43/arch/x86/include/asm/vmi.h 2011-04-17 15:56:46.000000000 -0400
11230 @@ -191,6 +191,7 @@ struct vrom_header {
11231 u8 reserved[96]; /* Reserved for headers */
11232 char vmi_init[8]; /* VMI_Init jump point */
11233 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
11234 + char rom_data[8048]; /* rest of the option ROM */
11235 } __attribute__((packed));
11236
11237 struct pnp_header {
11238 diff -urNp linux-2.6.32.43/arch/x86/include/asm/vsyscall.h linux-2.6.32.43/arch/x86/include/asm/vsyscall.h
11239 --- linux-2.6.32.43/arch/x86/include/asm/vsyscall.h 2011-03-27 14:31:47.000000000 -0400
11240 +++ linux-2.6.32.43/arch/x86/include/asm/vsyscall.h 2011-04-17 15:56:46.000000000 -0400
11241 @@ -15,9 +15,10 @@ enum vsyscall_num {
11242
11243 #ifdef __KERNEL__
11244 #include <linux/seqlock.h>
11245 +#include <linux/getcpu.h>
11246 +#include <linux/time.h>
11247
11248 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
11249 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
11250
11251 /* Definitions for CONFIG_GENERIC_TIME definitions */
11252 #define __section_vsyscall_gtod_data __attribute__ \
11253 @@ -31,7 +32,6 @@ enum vsyscall_num {
11254 #define VGETCPU_LSL 2
11255
11256 extern int __vgetcpu_mode;
11257 -extern volatile unsigned long __jiffies;
11258
11259 /* kernel space (writeable) */
11260 extern int vgetcpu_mode;
11261 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
11262
11263 extern void map_vsyscall(void);
11264
11265 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
11266 +extern time_t vtime(time_t *t);
11267 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
11268 #endif /* __KERNEL__ */
11269
11270 #endif /* _ASM_X86_VSYSCALL_H */
11271 diff -urNp linux-2.6.32.43/arch/x86/include/asm/xsave.h linux-2.6.32.43/arch/x86/include/asm/xsave.h
11272 --- linux-2.6.32.43/arch/x86/include/asm/xsave.h 2011-03-27 14:31:47.000000000 -0400
11273 +++ linux-2.6.32.43/arch/x86/include/asm/xsave.h 2011-04-17 15:56:46.000000000 -0400
11274 @@ -56,6 +56,12 @@ static inline int xrstor_checking(struct
11275 static inline int xsave_user(struct xsave_struct __user *buf)
11276 {
11277 int err;
11278 +
11279 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11280 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11281 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11282 +#endif
11283 +
11284 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
11285 "2:\n"
11286 ".section .fixup,\"ax\"\n"
11287 @@ -82,6 +88,11 @@ static inline int xrestore_user(struct x
11288 u32 lmask = mask;
11289 u32 hmask = mask >> 32;
11290
11291 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11292 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11293 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11294 +#endif
11295 +
11296 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11297 "2:\n"
11298 ".section .fixup,\"ax\"\n"
11299 diff -urNp linux-2.6.32.43/arch/x86/Kconfig linux-2.6.32.43/arch/x86/Kconfig
11300 --- linux-2.6.32.43/arch/x86/Kconfig 2011-03-27 14:31:47.000000000 -0400
11301 +++ linux-2.6.32.43/arch/x86/Kconfig 2011-04-17 15:56:46.000000000 -0400
11302 @@ -223,7 +223,7 @@ config X86_TRAMPOLINE
11303
11304 config X86_32_LAZY_GS
11305 def_bool y
11306 - depends on X86_32 && !CC_STACKPROTECTOR
11307 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11308
11309 config KTIME_SCALAR
11310 def_bool X86_32
11311 @@ -1008,7 +1008,7 @@ choice
11312
11313 config NOHIGHMEM
11314 bool "off"
11315 - depends on !X86_NUMAQ
11316 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11317 ---help---
11318 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11319 However, the address space of 32-bit x86 processors is only 4
11320 @@ -1045,7 +1045,7 @@ config NOHIGHMEM
11321
11322 config HIGHMEM4G
11323 bool "4GB"
11324 - depends on !X86_NUMAQ
11325 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11326 ---help---
11327 Select this if you have a 32-bit processor and between 1 and 4
11328 gigabytes of physical RAM.
11329 @@ -1099,7 +1099,7 @@ config PAGE_OFFSET
11330 hex
11331 default 0xB0000000 if VMSPLIT_3G_OPT
11332 default 0x80000000 if VMSPLIT_2G
11333 - default 0x78000000 if VMSPLIT_2G_OPT
11334 + default 0x70000000 if VMSPLIT_2G_OPT
11335 default 0x40000000 if VMSPLIT_1G
11336 default 0xC0000000
11337 depends on X86_32
11338 @@ -1430,7 +1430,7 @@ config ARCH_USES_PG_UNCACHED
11339
11340 config EFI
11341 bool "EFI runtime service support"
11342 - depends on ACPI
11343 + depends on ACPI && !PAX_KERNEXEC
11344 ---help---
11345 This enables the kernel to use EFI runtime services that are
11346 available (such as the EFI variable services).
11347 @@ -1460,6 +1460,7 @@ config SECCOMP
11348
11349 config CC_STACKPROTECTOR
11350 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
11351 + depends on X86_64 || !PAX_MEMORY_UDEREF
11352 ---help---
11353 This option turns on the -fstack-protector GCC feature. This
11354 feature puts, at the beginning of functions, a canary value on
11355 @@ -1517,6 +1518,7 @@ config KEXEC_JUMP
11356 config PHYSICAL_START
11357 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
11358 default "0x1000000"
11359 + range 0x400000 0x40000000
11360 ---help---
11361 This gives the physical address where the kernel is loaded.
11362
11363 @@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
11364 hex
11365 prompt "Alignment value to which kernel should be aligned" if X86_32
11366 default "0x1000000"
11367 + range 0x400000 0x1000000 if PAX_KERNEXEC
11368 range 0x2000 0x1000000
11369 ---help---
11370 This value puts the alignment restrictions on physical address
11371 @@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
11372 Say N if you want to disable CPU hotplug.
11373
11374 config COMPAT_VDSO
11375 - def_bool y
11376 + def_bool n
11377 prompt "Compat VDSO support"
11378 depends on X86_32 || IA32_EMULATION
11379 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
11380 ---help---
11381 Map the 32-bit VDSO to the predictable old-style address too.
11382 ---help---
11383 diff -urNp linux-2.6.32.43/arch/x86/Kconfig.cpu linux-2.6.32.43/arch/x86/Kconfig.cpu
11384 --- linux-2.6.32.43/arch/x86/Kconfig.cpu 2011-03-27 14:31:47.000000000 -0400
11385 +++ linux-2.6.32.43/arch/x86/Kconfig.cpu 2011-04-17 15:56:46.000000000 -0400
11386 @@ -340,7 +340,7 @@ config X86_PPRO_FENCE
11387
11388 config X86_F00F_BUG
11389 def_bool y
11390 - depends on M586MMX || M586TSC || M586 || M486 || M386
11391 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
11392
11393 config X86_WP_WORKS_OK
11394 def_bool y
11395 @@ -360,7 +360,7 @@ config X86_POPAD_OK
11396
11397 config X86_ALIGNMENT_16
11398 def_bool y
11399 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11400 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11401
11402 config X86_INTEL_USERCOPY
11403 def_bool y
11404 @@ -406,7 +406,7 @@ config X86_CMPXCHG64
11405 # generates cmov.
11406 config X86_CMOV
11407 def_bool y
11408 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11409 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11410
11411 config X86_MINIMUM_CPU_FAMILY
11412 int
11413 diff -urNp linux-2.6.32.43/arch/x86/Kconfig.debug linux-2.6.32.43/arch/x86/Kconfig.debug
11414 --- linux-2.6.32.43/arch/x86/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
11415 +++ linux-2.6.32.43/arch/x86/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
11416 @@ -99,7 +99,7 @@ config X86_PTDUMP
11417 config DEBUG_RODATA
11418 bool "Write protect kernel read-only data structures"
11419 default y
11420 - depends on DEBUG_KERNEL
11421 + depends on DEBUG_KERNEL && BROKEN
11422 ---help---
11423 Mark the kernel read-only data as write-protected in the pagetables,
11424 in order to catch accidental (and incorrect) writes to such const
11425 diff -urNp linux-2.6.32.43/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.32.43/arch/x86/kernel/acpi/realmode/wakeup.S
11426 --- linux-2.6.32.43/arch/x86/kernel/acpi/realmode/wakeup.S 2011-03-27 14:31:47.000000000 -0400
11427 +++ linux-2.6.32.43/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-01 18:53:40.000000000 -0400
11428 @@ -91,6 +91,9 @@ _start:
11429 /* Do any other stuff... */
11430
11431 #ifndef CONFIG_64BIT
11432 + /* Recheck NX bit overrides (64bit path does this in trampoline) */
11433 + call verify_cpu
11434 +
11435 /* This could also be done in C code... */
11436 movl pmode_cr3, %eax
11437 movl %eax, %cr3
11438 @@ -104,7 +107,7 @@ _start:
11439 movl %eax, %ecx
11440 orl %edx, %ecx
11441 jz 1f
11442 - movl $0xc0000080, %ecx
11443 + mov $MSR_EFER, %ecx
11444 wrmsr
11445 1:
11446
11447 @@ -114,6 +117,7 @@ _start:
11448 movl pmode_cr0, %eax
11449 movl %eax, %cr0
11450 jmp pmode_return
11451 +# include "../../verify_cpu.S"
11452 #else
11453 pushw $0
11454 pushw trampoline_segment
11455 diff -urNp linux-2.6.32.43/arch/x86/kernel/acpi/sleep.c linux-2.6.32.43/arch/x86/kernel/acpi/sleep.c
11456 --- linux-2.6.32.43/arch/x86/kernel/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
11457 +++ linux-2.6.32.43/arch/x86/kernel/acpi/sleep.c 2011-07-01 19:01:34.000000000 -0400
11458 @@ -11,11 +11,12 @@
11459 #include <linux/cpumask.h>
11460 #include <asm/segment.h>
11461 #include <asm/desc.h>
11462 +#include <asm/e820.h>
11463
11464 #include "realmode/wakeup.h"
11465 #include "sleep.h"
11466
11467 -unsigned long acpi_wakeup_address;
11468 +unsigned long acpi_wakeup_address = 0x2000;
11469 unsigned long acpi_realmode_flags;
11470
11471 /* address in low memory of the wakeup routine. */
11472 @@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
11473 #else /* CONFIG_64BIT */
11474 header->trampoline_segment = setup_trampoline() >> 4;
11475 #ifdef CONFIG_SMP
11476 - stack_start.sp = temp_stack + sizeof(temp_stack);
11477 + stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11478 +
11479 + pax_open_kernel();
11480 early_gdt_descr.address =
11481 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11482 + pax_close_kernel();
11483 +
11484 initial_gs = per_cpu_offset(smp_processor_id());
11485 #endif
11486 initial_code = (unsigned long)wakeup_long64;
11487 @@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
11488 return;
11489 }
11490
11491 - acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
11492 -
11493 - if (!acpi_realmode) {
11494 - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
11495 - return;
11496 - }
11497 -
11498 - acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
11499 + reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
11500 + acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
11501 }
11502
11503
11504 diff -urNp linux-2.6.32.43/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.32.43/arch/x86/kernel/acpi/wakeup_32.S
11505 --- linux-2.6.32.43/arch/x86/kernel/acpi/wakeup_32.S 2011-03-27 14:31:47.000000000 -0400
11506 +++ linux-2.6.32.43/arch/x86/kernel/acpi/wakeup_32.S 2011-04-17 15:56:46.000000000 -0400
11507 @@ -30,13 +30,11 @@ wakeup_pmode_return:
11508 # and restore the stack ... but you need gdt for this to work
11509 movl saved_context_esp, %esp
11510
11511 - movl %cs:saved_magic, %eax
11512 - cmpl $0x12345678, %eax
11513 + cmpl $0x12345678, saved_magic
11514 jne bogus_magic
11515
11516 # jump to place where we left off
11517 - movl saved_eip, %eax
11518 - jmp *%eax
11519 + jmp *(saved_eip)
11520
11521 bogus_magic:
11522 jmp bogus_magic
11523 diff -urNp linux-2.6.32.43/arch/x86/kernel/alternative.c linux-2.6.32.43/arch/x86/kernel/alternative.c
11524 --- linux-2.6.32.43/arch/x86/kernel/alternative.c 2011-03-27 14:31:47.000000000 -0400
11525 +++ linux-2.6.32.43/arch/x86/kernel/alternative.c 2011-04-17 15:56:46.000000000 -0400
11526 @@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(str
11527
11528 BUG_ON(p->len > MAX_PATCH_LEN);
11529 /* prep the buffer with the original instructions */
11530 - memcpy(insnbuf, p->instr, p->len);
11531 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11532 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11533 (unsigned long)p->instr, p->len);
11534
11535 @@ -475,7 +475,7 @@ void __init alternative_instructions(voi
11536 if (smp_alt_once)
11537 free_init_pages("SMP alternatives",
11538 (unsigned long)__smp_locks,
11539 - (unsigned long)__smp_locks_end);
11540 + PAGE_ALIGN((unsigned long)__smp_locks_end));
11541
11542 restart_nmi();
11543 }
11544 @@ -492,13 +492,17 @@ void __init alternative_instructions(voi
11545 * instructions. And on the local CPU you need to be protected again NMI or MCE
11546 * handlers seeing an inconsistent instruction while you patch.
11547 */
11548 -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
11549 +static void *__kprobes text_poke_early(void *addr, const void *opcode,
11550 size_t len)
11551 {
11552 unsigned long flags;
11553 local_irq_save(flags);
11554 - memcpy(addr, opcode, len);
11555 +
11556 + pax_open_kernel();
11557 + memcpy(ktla_ktva(addr), opcode, len);
11558 sync_core();
11559 + pax_close_kernel();
11560 +
11561 local_irq_restore(flags);
11562 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11563 that causes hangs on some VIA CPUs. */
11564 @@ -520,35 +524,21 @@ static void *__init_or_module text_poke_
11565 */
11566 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11567 {
11568 - unsigned long flags;
11569 - char *vaddr;
11570 + unsigned char *vaddr = ktla_ktva(addr);
11571 struct page *pages[2];
11572 - int i;
11573 + size_t i;
11574
11575 if (!core_kernel_text((unsigned long)addr)) {
11576 - pages[0] = vmalloc_to_page(addr);
11577 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11578 + pages[0] = vmalloc_to_page(vaddr);
11579 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11580 } else {
11581 - pages[0] = virt_to_page(addr);
11582 + pages[0] = virt_to_page(vaddr);
11583 WARN_ON(!PageReserved(pages[0]));
11584 - pages[1] = virt_to_page(addr + PAGE_SIZE);
11585 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11586 }
11587 BUG_ON(!pages[0]);
11588 - local_irq_save(flags);
11589 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11590 - if (pages[1])
11591 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11592 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11593 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11594 - clear_fixmap(FIX_TEXT_POKE0);
11595 - if (pages[1])
11596 - clear_fixmap(FIX_TEXT_POKE1);
11597 - local_flush_tlb();
11598 - sync_core();
11599 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
11600 - that causes hangs on some VIA CPUs. */
11601 + text_poke_early(addr, opcode, len);
11602 for (i = 0; i < len; i++)
11603 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11604 - local_irq_restore(flags);
11605 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11606 return addr;
11607 }
11608 diff -urNp linux-2.6.32.43/arch/x86/kernel/amd_iommu.c linux-2.6.32.43/arch/x86/kernel/amd_iommu.c
11609 --- linux-2.6.32.43/arch/x86/kernel/amd_iommu.c 2011-03-27 14:31:47.000000000 -0400
11610 +++ linux-2.6.32.43/arch/x86/kernel/amd_iommu.c 2011-04-17 15:56:46.000000000 -0400
11611 @@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(
11612 }
11613 }
11614
11615 -static struct dma_map_ops amd_iommu_dma_ops = {
11616 +static const struct dma_map_ops amd_iommu_dma_ops = {
11617 .alloc_coherent = alloc_coherent,
11618 .free_coherent = free_coherent,
11619 .map_page = map_page,
11620 diff -urNp linux-2.6.32.43/arch/x86/kernel/apic/apic.c linux-2.6.32.43/arch/x86/kernel/apic/apic.c
11621 --- linux-2.6.32.43/arch/x86/kernel/apic/apic.c 2011-03-27 14:31:47.000000000 -0400
11622 +++ linux-2.6.32.43/arch/x86/kernel/apic/apic.c 2011-05-16 21:46:57.000000000 -0400
11623 @@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs
11624 apic_write(APIC_ESR, 0);
11625 v1 = apic_read(APIC_ESR);
11626 ack_APIC_irq();
11627 - atomic_inc(&irq_err_count);
11628 + atomic_inc_unchecked(&irq_err_count);
11629
11630 /*
11631 * Here is what the APIC error bits mean:
11632 @@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(vo
11633 u16 *bios_cpu_apicid;
11634 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11635
11636 + pax_track_stack();
11637 +
11638 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11639 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11640
11641 diff -urNp linux-2.6.32.43/arch/x86/kernel/apic/io_apic.c linux-2.6.32.43/arch/x86/kernel/apic/io_apic.c
11642 --- linux-2.6.32.43/arch/x86/kernel/apic/io_apic.c 2011-03-27 14:31:47.000000000 -0400
11643 +++ linux-2.6.32.43/arch/x86/kernel/apic/io_apic.c 2011-05-04 17:56:20.000000000 -0400
11644 @@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapi
11645 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
11646 GFP_ATOMIC);
11647 if (!ioapic_entries)
11648 - return 0;
11649 + return NULL;
11650
11651 for (apic = 0; apic < nr_ioapics; apic++) {
11652 ioapic_entries[apic] =
11653 @@ -733,7 +733,7 @@ nomem:
11654 kfree(ioapic_entries[apic]);
11655 kfree(ioapic_entries);
11656
11657 - return 0;
11658 + return NULL;
11659 }
11660
11661 /*
11662 @@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
11663 }
11664 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11665
11666 -void lock_vector_lock(void)
11667 +void lock_vector_lock(void) __acquires(vector_lock)
11668 {
11669 /* Used to the online set of cpus does not change
11670 * during assign_irq_vector.
11671 @@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
11672 spin_lock(&vector_lock);
11673 }
11674
11675 -void unlock_vector_lock(void)
11676 +void unlock_vector_lock(void) __releases(vector_lock)
11677 {
11678 spin_unlock(&vector_lock);
11679 }
11680 @@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int i
11681 ack_APIC_irq();
11682 }
11683
11684 -atomic_t irq_mis_count;
11685 +atomic_unchecked_t irq_mis_count;
11686
11687 static void ack_apic_level(unsigned int irq)
11688 {
11689 @@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int
11690
11691 /* Tail end of version 0x11 I/O APIC bug workaround */
11692 if (!(v & (1 << (i & 0x1f)))) {
11693 - atomic_inc(&irq_mis_count);
11694 + atomic_inc_unchecked(&irq_mis_count);
11695 spin_lock(&ioapic_lock);
11696 __mask_and_edge_IO_APIC_irq(cfg);
11697 __unmask_and_level_IO_APIC_irq(cfg);
11698 diff -urNp linux-2.6.32.43/arch/x86/kernel/apm_32.c linux-2.6.32.43/arch/x86/kernel/apm_32.c
11699 --- linux-2.6.32.43/arch/x86/kernel/apm_32.c 2011-03-27 14:31:47.000000000 -0400
11700 +++ linux-2.6.32.43/arch/x86/kernel/apm_32.c 2011-04-23 12:56:10.000000000 -0400
11701 @@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
11702 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11703 * even though they are called in protected mode.
11704 */
11705 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11706 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11707 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11708
11709 static const char driver_version[] = "1.16ac"; /* no spaces */
11710 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
11711 BUG_ON(cpu != 0);
11712 gdt = get_cpu_gdt_table(cpu);
11713 save_desc_40 = gdt[0x40 / 8];
11714 +
11715 + pax_open_kernel();
11716 gdt[0x40 / 8] = bad_bios_desc;
11717 + pax_close_kernel();
11718
11719 apm_irq_save(flags);
11720 APM_DO_SAVE_SEGS;
11721 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
11722 &call->esi);
11723 APM_DO_RESTORE_SEGS;
11724 apm_irq_restore(flags);
11725 +
11726 + pax_open_kernel();
11727 gdt[0x40 / 8] = save_desc_40;
11728 + pax_close_kernel();
11729 +
11730 put_cpu();
11731
11732 return call->eax & 0xff;
11733 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
11734 BUG_ON(cpu != 0);
11735 gdt = get_cpu_gdt_table(cpu);
11736 save_desc_40 = gdt[0x40 / 8];
11737 +
11738 + pax_open_kernel();
11739 gdt[0x40 / 8] = bad_bios_desc;
11740 + pax_close_kernel();
11741
11742 apm_irq_save(flags);
11743 APM_DO_SAVE_SEGS;
11744 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
11745 &call->eax);
11746 APM_DO_RESTORE_SEGS;
11747 apm_irq_restore(flags);
11748 +
11749 + pax_open_kernel();
11750 gdt[0x40 / 8] = save_desc_40;
11751 + pax_close_kernel();
11752 +
11753 put_cpu();
11754 return error;
11755 }
11756 @@ -975,7 +989,7 @@ recalc:
11757
11758 static void apm_power_off(void)
11759 {
11760 - unsigned char po_bios_call[] = {
11761 + const unsigned char po_bios_call[] = {
11762 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
11763 0x8e, 0xd0, /* movw ax,ss */
11764 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
11765 @@ -2357,12 +2371,15 @@ static int __init apm_init(void)
11766 * code to that CPU.
11767 */
11768 gdt = get_cpu_gdt_table(0);
11769 +
11770 + pax_open_kernel();
11771 set_desc_base(&gdt[APM_CS >> 3],
11772 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11773 set_desc_base(&gdt[APM_CS_16 >> 3],
11774 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11775 set_desc_base(&gdt[APM_DS >> 3],
11776 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11777 + pax_close_kernel();
11778
11779 proc_create("apm", 0, NULL, &apm_file_ops);
11780
11781 diff -urNp linux-2.6.32.43/arch/x86/kernel/asm-offsets_32.c linux-2.6.32.43/arch/x86/kernel/asm-offsets_32.c
11782 --- linux-2.6.32.43/arch/x86/kernel/asm-offsets_32.c 2011-03-27 14:31:47.000000000 -0400
11783 +++ linux-2.6.32.43/arch/x86/kernel/asm-offsets_32.c 2011-05-16 21:46:57.000000000 -0400
11784 @@ -51,7 +51,6 @@ void foo(void)
11785 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
11786 BLANK();
11787
11788 - OFFSET(TI_task, thread_info, task);
11789 OFFSET(TI_exec_domain, thread_info, exec_domain);
11790 OFFSET(TI_flags, thread_info, flags);
11791 OFFSET(TI_status, thread_info, status);
11792 @@ -60,6 +59,8 @@ void foo(void)
11793 OFFSET(TI_restart_block, thread_info, restart_block);
11794 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
11795 OFFSET(TI_cpu, thread_info, cpu);
11796 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11797 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11798 BLANK();
11799
11800 OFFSET(GDS_size, desc_ptr, size);
11801 @@ -99,6 +100,7 @@ void foo(void)
11802
11803 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11804 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11805 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11806 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
11807 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
11808 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
11809 @@ -115,6 +117,11 @@ void foo(void)
11810 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
11811 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11812 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11813 +
11814 +#ifdef CONFIG_PAX_KERNEXEC
11815 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11816 +#endif
11817 +
11818 #endif
11819
11820 #ifdef CONFIG_XEN
11821 diff -urNp linux-2.6.32.43/arch/x86/kernel/asm-offsets_64.c linux-2.6.32.43/arch/x86/kernel/asm-offsets_64.c
11822 --- linux-2.6.32.43/arch/x86/kernel/asm-offsets_64.c 2011-03-27 14:31:47.000000000 -0400
11823 +++ linux-2.6.32.43/arch/x86/kernel/asm-offsets_64.c 2011-05-16 21:46:57.000000000 -0400
11824 @@ -44,6 +44,8 @@ int main(void)
11825 ENTRY(addr_limit);
11826 ENTRY(preempt_count);
11827 ENTRY(status);
11828 + ENTRY(lowest_stack);
11829 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11830 #ifdef CONFIG_IA32_EMULATION
11831 ENTRY(sysenter_return);
11832 #endif
11833 @@ -63,6 +65,18 @@ int main(void)
11834 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11835 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
11836 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11837 +
11838 +#ifdef CONFIG_PAX_KERNEXEC
11839 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11840 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11841 +#endif
11842 +
11843 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11844 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11845 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11846 + OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
11847 +#endif
11848 +
11849 #endif
11850
11851
11852 @@ -115,6 +129,7 @@ int main(void)
11853 ENTRY(cr8);
11854 BLANK();
11855 #undef ENTRY
11856 + DEFINE(TSS_size, sizeof(struct tss_struct));
11857 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
11858 BLANK();
11859 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
11860 @@ -130,6 +145,7 @@ int main(void)
11861
11862 BLANK();
11863 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11864 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11865 #ifdef CONFIG_XEN
11866 BLANK();
11867 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
11868 diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/amd.c linux-2.6.32.43/arch/x86/kernel/cpu/amd.c
11869 --- linux-2.6.32.43/arch/x86/kernel/cpu/amd.c 2011-06-25 12:55:34.000000000 -0400
11870 +++ linux-2.6.32.43/arch/x86/kernel/cpu/amd.c 2011-06-25 12:56:37.000000000 -0400
11871 @@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_c
11872 unsigned int size)
11873 {
11874 /* AMD errata T13 (order #21922) */
11875 - if ((c->x86 == 6)) {
11876 + if (c->x86 == 6) {
11877 /* Duron Rev A0 */
11878 if (c->x86_model == 3 && c->x86_mask == 0)
11879 size = 64;
11880 diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/common.c linux-2.6.32.43/arch/x86/kernel/cpu/common.c
11881 --- linux-2.6.32.43/arch/x86/kernel/cpu/common.c 2011-03-27 14:31:47.000000000 -0400
11882 +++ linux-2.6.32.43/arch/x86/kernel/cpu/common.c 2011-05-11 18:25:15.000000000 -0400
11883 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
11884
11885 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11886
11887 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11888 -#ifdef CONFIG_X86_64
11889 - /*
11890 - * We need valid kernel segments for data and code in long mode too
11891 - * IRET will check the segment types kkeil 2000/10/28
11892 - * Also sysret mandates a special GDT layout
11893 - *
11894 - * TLS descriptors are currently at a different place compared to i386.
11895 - * Hopefully nobody expects them at a fixed place (Wine?)
11896 - */
11897 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11898 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11899 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11900 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11901 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11902 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
11903 -#else
11904 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11905 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11906 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11907 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
11908 - /*
11909 - * Segments used for calling PnP BIOS have byte granularity.
11910 - * They code segments and data segments have fixed 64k limits,
11911 - * the transfer segment sizes are set at run time.
11912 - */
11913 - /* 32-bit code */
11914 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11915 - /* 16-bit code */
11916 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11917 - /* 16-bit data */
11918 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
11919 - /* 16-bit data */
11920 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
11921 - /* 16-bit data */
11922 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
11923 - /*
11924 - * The APM segments have byte granularity and their bases
11925 - * are set at run time. All have 64k limits.
11926 - */
11927 - /* 32-bit code */
11928 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11929 - /* 16-bit code */
11930 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11931 - /* data */
11932 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
11933 -
11934 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11935 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11936 - GDT_STACK_CANARY_INIT
11937 -#endif
11938 -} };
11939 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
11940 -
11941 static int __init x86_xsave_setup(char *s)
11942 {
11943 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
11944 @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
11945 {
11946 struct desc_ptr gdt_descr;
11947
11948 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
11949 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
11950 gdt_descr.size = GDT_SIZE - 1;
11951 load_gdt(&gdt_descr);
11952 /* Reload the per-cpu base */
11953 @@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struc
11954 /* Filter out anything that depends on CPUID levels we don't have */
11955 filter_cpuid_features(c, true);
11956
11957 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
11958 + setup_clear_cpu_cap(X86_FEATURE_SEP);
11959 +#endif
11960 +
11961 /* If the model name is still unset, do table lookup. */
11962 if (!c->x86_model_id[0]) {
11963 const char *p;
11964 @@ -980,6 +930,9 @@ static __init int setup_disablecpuid(cha
11965 }
11966 __setup("clearcpuid=", setup_disablecpuid);
11967
11968 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
11969 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
11970 +
11971 #ifdef CONFIG_X86_64
11972 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
11973
11974 @@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
11975 EXPORT_PER_CPU_SYMBOL(current_task);
11976
11977 DEFINE_PER_CPU(unsigned long, kernel_stack) =
11978 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
11979 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
11980 EXPORT_PER_CPU_SYMBOL(kernel_stack);
11981
11982 DEFINE_PER_CPU(char *, irq_stack_ptr) =
11983 @@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(str
11984 {
11985 memset(regs, 0, sizeof(struct pt_regs));
11986 regs->fs = __KERNEL_PERCPU;
11987 - regs->gs = __KERNEL_STACK_CANARY;
11988 + savesegment(gs, regs->gs);
11989
11990 return regs;
11991 }
11992 @@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
11993 int i;
11994
11995 cpu = stack_smp_processor_id();
11996 - t = &per_cpu(init_tss, cpu);
11997 + t = init_tss + cpu;
11998 orig_ist = &per_cpu(orig_ist, cpu);
11999
12000 #ifdef CONFIG_NUMA
12001 @@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
12002 switch_to_new_gdt(cpu);
12003 loadsegment(fs, 0);
12004
12005 - load_idt((const struct desc_ptr *)&idt_descr);
12006 + load_idt(&idt_descr);
12007
12008 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12009 syscall_init();
12010 @@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
12011 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12012 barrier();
12013
12014 - check_efer();
12015 if (cpu != 0)
12016 enable_x2apic();
12017
12018 @@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
12019 {
12020 int cpu = smp_processor_id();
12021 struct task_struct *curr = current;
12022 - struct tss_struct *t = &per_cpu(init_tss, cpu);
12023 + struct tss_struct *t = init_tss + cpu;
12024 struct thread_struct *thread = &curr->thread;
12025
12026 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12027 diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/intel.c linux-2.6.32.43/arch/x86/kernel/cpu/intel.c
12028 --- linux-2.6.32.43/arch/x86/kernel/cpu/intel.c 2011-03-27 14:31:47.000000000 -0400
12029 +++ linux-2.6.32.43/arch/x86/kernel/cpu/intel.c 2011-04-17 15:56:46.000000000 -0400
12030 @@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug
12031 * Update the IDT descriptor and reload the IDT so that
12032 * it uses the read-only mapped virtual address.
12033 */
12034 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12035 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12036 load_idt(&idt_descr);
12037 }
12038 #endif
12039 diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/intel_cacheinfo.c linux-2.6.32.43/arch/x86/kernel/cpu/intel_cacheinfo.c
12040 --- linux-2.6.32.43/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
12041 +++ linux-2.6.32.43/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-04-17 15:56:46.000000000 -0400
12042 @@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kob
12043 return ret;
12044 }
12045
12046 -static struct sysfs_ops sysfs_ops = {
12047 +static const struct sysfs_ops sysfs_ops = {
12048 .show = show,
12049 .store = store,
12050 };
12051 diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/Makefile linux-2.6.32.43/arch/x86/kernel/cpu/Makefile
12052 --- linux-2.6.32.43/arch/x86/kernel/cpu/Makefile 2011-03-27 14:31:47.000000000 -0400
12053 +++ linux-2.6.32.43/arch/x86/kernel/cpu/Makefile 2011-04-17 15:56:46.000000000 -0400
12054 @@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
12055 CFLAGS_REMOVE_common.o = -pg
12056 endif
12057
12058 -# Make sure load_percpu_segment has no stackprotector
12059 -nostackp := $(call cc-option, -fno-stack-protector)
12060 -CFLAGS_common.o := $(nostackp)
12061 -
12062 obj-y := intel_cacheinfo.o addon_cpuid_features.o
12063 obj-y += proc.o capflags.o powerflags.o common.o
12064 obj-y += vmware.o hypervisor.o sched.o
12065 diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce_amd.c linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce_amd.c
12066 --- linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:56:59.000000000 -0400
12067 +++ linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:57:13.000000000 -0400
12068 @@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kob
12069 return ret;
12070 }
12071
12072 -static struct sysfs_ops threshold_ops = {
12073 +static const struct sysfs_ops threshold_ops = {
12074 .show = show,
12075 .store = store,
12076 };
12077 diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce.c
12078 --- linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce.c 2011-03-27 14:31:47.000000000 -0400
12079 +++ linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-04 17:56:20.000000000 -0400
12080 @@ -43,6 +43,7 @@
12081 #include <asm/ipi.h>
12082 #include <asm/mce.h>
12083 #include <asm/msr.h>
12084 +#include <asm/local.h>
12085
12086 #include "mce-internal.h"
12087
12088 @@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
12089 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12090 m->cs, m->ip);
12091
12092 - if (m->cs == __KERNEL_CS)
12093 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12094 print_symbol("{%s}", m->ip);
12095 pr_cont("\n");
12096 }
12097 @@ -221,10 +222,10 @@ static void print_mce_tail(void)
12098
12099 #define PANIC_TIMEOUT 5 /* 5 seconds */
12100
12101 -static atomic_t mce_paniced;
12102 +static atomic_unchecked_t mce_paniced;
12103
12104 static int fake_panic;
12105 -static atomic_t mce_fake_paniced;
12106 +static atomic_unchecked_t mce_fake_paniced;
12107
12108 /* Panic in progress. Enable interrupts and wait for final IPI */
12109 static void wait_for_panic(void)
12110 @@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct
12111 /*
12112 * Make sure only one CPU runs in machine check panic
12113 */
12114 - if (atomic_inc_return(&mce_paniced) > 1)
12115 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12116 wait_for_panic();
12117 barrier();
12118
12119 @@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct
12120 console_verbose();
12121 } else {
12122 /* Don't log too much for fake panic */
12123 - if (atomic_inc_return(&mce_fake_paniced) > 1)
12124 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12125 return;
12126 }
12127 print_mce_head();
12128 @@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
12129 * might have been modified by someone else.
12130 */
12131 rmb();
12132 - if (atomic_read(&mce_paniced))
12133 + if (atomic_read_unchecked(&mce_paniced))
12134 wait_for_panic();
12135 if (!monarch_timeout)
12136 goto out;
12137 @@ -1429,14 +1430,14 @@ void __cpuinit mcheck_init(struct cpuinf
12138 */
12139
12140 static DEFINE_SPINLOCK(mce_state_lock);
12141 -static int open_count; /* #times opened */
12142 +static local_t open_count; /* #times opened */
12143 static int open_exclu; /* already open exclusive? */
12144
12145 static int mce_open(struct inode *inode, struct file *file)
12146 {
12147 spin_lock(&mce_state_lock);
12148
12149 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
12150 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
12151 spin_unlock(&mce_state_lock);
12152
12153 return -EBUSY;
12154 @@ -1444,7 +1445,7 @@ static int mce_open(struct inode *inode,
12155
12156 if (file->f_flags & O_EXCL)
12157 open_exclu = 1;
12158 - open_count++;
12159 + local_inc(&open_count);
12160
12161 spin_unlock(&mce_state_lock);
12162
12163 @@ -1455,7 +1456,7 @@ static int mce_release(struct inode *ino
12164 {
12165 spin_lock(&mce_state_lock);
12166
12167 - open_count--;
12168 + local_dec(&open_count);
12169 open_exclu = 0;
12170
12171 spin_unlock(&mce_state_lock);
12172 @@ -2082,7 +2083,7 @@ struct dentry *mce_get_debugfs_dir(void)
12173 static void mce_reset(void)
12174 {
12175 cpu_missing = 0;
12176 - atomic_set(&mce_fake_paniced, 0);
12177 + atomic_set_unchecked(&mce_fake_paniced, 0);
12178 atomic_set(&mce_executing, 0);
12179 atomic_set(&mce_callin, 0);
12180 atomic_set(&global_nwo, 0);
12181 diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/amd.c linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/amd.c
12182 --- linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/amd.c 2011-03-27 14:31:47.000000000 -0400
12183 +++ linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/amd.c 2011-04-17 15:56:46.000000000 -0400
12184 @@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base
12185 return 0;
12186 }
12187
12188 -static struct mtrr_ops amd_mtrr_ops = {
12189 +static const struct mtrr_ops amd_mtrr_ops = {
12190 .vendor = X86_VENDOR_AMD,
12191 .set = amd_set_mtrr,
12192 .get = amd_get_mtrr,
12193 diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/centaur.c linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/centaur.c
12194 --- linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/centaur.c 2011-03-27 14:31:47.000000000 -0400
12195 +++ linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/centaur.c 2011-04-17 15:56:46.000000000 -0400
12196 @@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long
12197 return 0;
12198 }
12199
12200 -static struct mtrr_ops centaur_mtrr_ops = {
12201 +static const struct mtrr_ops centaur_mtrr_ops = {
12202 .vendor = X86_VENDOR_CENTAUR,
12203 .set = centaur_set_mcr,
12204 .get = centaur_get_mcr,
12205 diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/cyrix.c linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/cyrix.c
12206 --- linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-03-27 14:31:47.000000000 -0400
12207 +++ linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-04-17 15:56:46.000000000 -0400
12208 @@ -265,7 +265,7 @@ static void cyrix_set_all(void)
12209 post_set();
12210 }
12211
12212 -static struct mtrr_ops cyrix_mtrr_ops = {
12213 +static const struct mtrr_ops cyrix_mtrr_ops = {
12214 .vendor = X86_VENDOR_CYRIX,
12215 .set_all = cyrix_set_all,
12216 .set = cyrix_set_arr,
12217 diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/generic.c
12218 --- linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/generic.c 2011-03-27 14:31:47.000000000 -0400
12219 +++ linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/generic.c 2011-04-23 12:56:10.000000000 -0400
12220 @@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
12221 /*
12222 * Generic structure...
12223 */
12224 -struct mtrr_ops generic_mtrr_ops = {
12225 +const struct mtrr_ops generic_mtrr_ops = {
12226 .use_intel_if = 1,
12227 .set_all = generic_set_all,
12228 .get = generic_get_mtrr,
12229 diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/main.c
12230 --- linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:00:52.000000000 -0400
12231 +++ linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:03:05.000000000 -0400
12232 @@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
12233 u64 size_or_mask, size_and_mask;
12234 static bool mtrr_aps_delayed_init;
12235
12236 -static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12237 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12238
12239 -struct mtrr_ops *mtrr_if;
12240 +const struct mtrr_ops *mtrr_if;
12241
12242 static void set_mtrr(unsigned int reg, unsigned long base,
12243 unsigned long size, mtrr_type type);
12244
12245 -void set_mtrr_ops(struct mtrr_ops *ops)
12246 +void set_mtrr_ops(const struct mtrr_ops *ops)
12247 {
12248 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
12249 mtrr_ops[ops->vendor] = ops;
12250 diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/mtrr.h
12251 --- linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-03-27 14:31:47.000000000 -0400
12252 +++ linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-04-17 15:56:46.000000000 -0400
12253 @@ -12,19 +12,19 @@
12254 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
12255
12256 struct mtrr_ops {
12257 - u32 vendor;
12258 - u32 use_intel_if;
12259 - void (*set)(unsigned int reg, unsigned long base,
12260 + const u32 vendor;
12261 + const u32 use_intel_if;
12262 + void (* const set)(unsigned int reg, unsigned long base,
12263 unsigned long size, mtrr_type type);
12264 - void (*set_all)(void);
12265 + void (* const set_all)(void);
12266
12267 - void (*get)(unsigned int reg, unsigned long *base,
12268 + void (* const get)(unsigned int reg, unsigned long *base,
12269 unsigned long *size, mtrr_type *type);
12270 - int (*get_free_region)(unsigned long base, unsigned long size,
12271 + int (* const get_free_region)(unsigned long base, unsigned long size,
12272 int replace_reg);
12273 - int (*validate_add_page)(unsigned long base, unsigned long size,
12274 + int (* const validate_add_page)(unsigned long base, unsigned long size,
12275 unsigned int type);
12276 - int (*have_wrcomb)(void);
12277 + int (* const have_wrcomb)(void);
12278 };
12279
12280 extern int generic_get_free_region(unsigned long base, unsigned long size,
12281 @@ -32,7 +32,7 @@ extern int generic_get_free_region(unsig
12282 extern int generic_validate_add_page(unsigned long base, unsigned long size,
12283 unsigned int type);
12284
12285 -extern struct mtrr_ops generic_mtrr_ops;
12286 +extern const struct mtrr_ops generic_mtrr_ops;
12287
12288 extern int positive_have_wrcomb(void);
12289
12290 @@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int in
12291 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
12292 void get_mtrr_state(void);
12293
12294 -extern void set_mtrr_ops(struct mtrr_ops *ops);
12295 +extern void set_mtrr_ops(const struct mtrr_ops *ops);
12296
12297 extern u64 size_or_mask, size_and_mask;
12298 -extern struct mtrr_ops *mtrr_if;
12299 +extern const struct mtrr_ops *mtrr_if;
12300
12301 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
12302 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
12303 diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.32.43/arch/x86/kernel/cpu/perfctr-watchdog.c
12304 --- linux-2.6.32.43/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-03-27 14:31:47.000000000 -0400
12305 +++ linux-2.6.32.43/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-04-17 15:56:46.000000000 -0400
12306 @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
12307
12308 /* Interface defining a CPU specific perfctr watchdog */
12309 struct wd_ops {
12310 - int (*reserve)(void);
12311 - void (*unreserve)(void);
12312 - int (*setup)(unsigned nmi_hz);
12313 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12314 - void (*stop)(void);
12315 + int (* const reserve)(void);
12316 + void (* const unreserve)(void);
12317 + int (* const setup)(unsigned nmi_hz);
12318 + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12319 + void (* const stop)(void);
12320 unsigned perfctr;
12321 unsigned evntsel;
12322 u64 checkbit;
12323 @@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
12324 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
12325 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
12326
12327 +/* cannot be const */
12328 static struct wd_ops intel_arch_wd_ops;
12329
12330 static int setup_intel_arch_watchdog(unsigned nmi_hz)
12331 @@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(uns
12332 return 1;
12333 }
12334
12335 +/* cannot be const */
12336 static struct wd_ops intel_arch_wd_ops __read_mostly = {
12337 .reserve = single_msr_reserve,
12338 .unreserve = single_msr_unreserve,
12339 diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/perf_event.c linux-2.6.32.43/arch/x86/kernel/cpu/perf_event.c
12340 --- linux-2.6.32.43/arch/x86/kernel/cpu/perf_event.c 2011-03-27 14:31:47.000000000 -0400
12341 +++ linux-2.6.32.43/arch/x86/kernel/cpu/perf_event.c 2011-05-04 17:56:20.000000000 -0400
12342 @@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event
12343 * count to the generic event atomically:
12344 */
12345 again:
12346 - prev_raw_count = atomic64_read(&hwc->prev_count);
12347 + prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
12348 rdmsrl(hwc->event_base + idx, new_raw_count);
12349
12350 - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
12351 + if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
12352 new_raw_count) != prev_raw_count)
12353 goto again;
12354
12355 @@ -741,7 +741,7 @@ again:
12356 delta = (new_raw_count << shift) - (prev_raw_count << shift);
12357 delta >>= shift;
12358
12359 - atomic64_add(delta, &event->count);
12360 + atomic64_add_unchecked(delta, &event->count);
12361 atomic64_sub(delta, &hwc->period_left);
12362
12363 return new_raw_count;
12364 @@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_ev
12365 * The hw event starts counting from this event offset,
12366 * mark it to be able to extra future deltas:
12367 */
12368 - atomic64_set(&hwc->prev_count, (u64)-left);
12369 + atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
12370
12371 err = checking_wrmsrl(hwc->event_base + idx,
12372 (u64)(-left) & x86_pmu.event_mask);
12373 @@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs
12374 break;
12375
12376 callchain_store(entry, frame.return_address);
12377 - fp = frame.next_frame;
12378 + fp = (__force const void __user *)frame.next_frame;
12379 }
12380 }
12381
12382 diff -urNp linux-2.6.32.43/arch/x86/kernel/crash.c linux-2.6.32.43/arch/x86/kernel/crash.c
12383 --- linux-2.6.32.43/arch/x86/kernel/crash.c 2011-03-27 14:31:47.000000000 -0400
12384 +++ linux-2.6.32.43/arch/x86/kernel/crash.c 2011-04-17 15:56:46.000000000 -0400
12385 @@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu,
12386 regs = args->regs;
12387
12388 #ifdef CONFIG_X86_32
12389 - if (!user_mode_vm(regs)) {
12390 + if (!user_mode(regs)) {
12391 crash_fixup_ss_esp(&fixed_regs, regs);
12392 regs = &fixed_regs;
12393 }
12394 diff -urNp linux-2.6.32.43/arch/x86/kernel/doublefault_32.c linux-2.6.32.43/arch/x86/kernel/doublefault_32.c
12395 --- linux-2.6.32.43/arch/x86/kernel/doublefault_32.c 2011-03-27 14:31:47.000000000 -0400
12396 +++ linux-2.6.32.43/arch/x86/kernel/doublefault_32.c 2011-04-17 15:56:46.000000000 -0400
12397 @@ -11,7 +11,7 @@
12398
12399 #define DOUBLEFAULT_STACKSIZE (1024)
12400 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12401 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12402 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12403
12404 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12405
12406 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
12407 unsigned long gdt, tss;
12408
12409 store_gdt(&gdt_desc);
12410 - gdt = gdt_desc.address;
12411 + gdt = (unsigned long)gdt_desc.address;
12412
12413 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12414
12415 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
12416 /* 0x2 bit is always set */
12417 .flags = X86_EFLAGS_SF | 0x2,
12418 .sp = STACK_START,
12419 - .es = __USER_DS,
12420 + .es = __KERNEL_DS,
12421 .cs = __KERNEL_CS,
12422 .ss = __KERNEL_DS,
12423 - .ds = __USER_DS,
12424 + .ds = __KERNEL_DS,
12425 .fs = __KERNEL_PERCPU,
12426
12427 .__cr3 = __pa_nodebug(swapper_pg_dir),
12428 diff -urNp linux-2.6.32.43/arch/x86/kernel/dumpstack_32.c linux-2.6.32.43/arch/x86/kernel/dumpstack_32.c
12429 --- linux-2.6.32.43/arch/x86/kernel/dumpstack_32.c 2011-03-27 14:31:47.000000000 -0400
12430 +++ linux-2.6.32.43/arch/x86/kernel/dumpstack_32.c 2011-04-17 15:56:46.000000000 -0400
12431 @@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task
12432 #endif
12433
12434 for (;;) {
12435 - struct thread_info *context;
12436 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12437 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12438
12439 - context = (struct thread_info *)
12440 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12441 - bp = print_context_stack(context, stack, bp, ops,
12442 - data, NULL, &graph);
12443 -
12444 - stack = (unsigned long *)context->previous_esp;
12445 - if (!stack)
12446 + if (stack_start == task_stack_page(task))
12447 break;
12448 + stack = *(unsigned long **)stack_start;
12449 if (ops->stack(data, "IRQ") < 0)
12450 break;
12451 touch_nmi_watchdog();
12452 @@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs
12453 * When in-kernel, we also print out the stack and code at the
12454 * time of the fault..
12455 */
12456 - if (!user_mode_vm(regs)) {
12457 + if (!user_mode(regs)) {
12458 unsigned int code_prologue = code_bytes * 43 / 64;
12459 unsigned int code_len = code_bytes;
12460 unsigned char c;
12461 u8 *ip;
12462 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12463
12464 printk(KERN_EMERG "Stack:\n");
12465 show_stack_log_lvl(NULL, regs, &regs->sp,
12466 @@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs
12467
12468 printk(KERN_EMERG "Code: ");
12469
12470 - ip = (u8 *)regs->ip - code_prologue;
12471 + ip = (u8 *)regs->ip - code_prologue + cs_base;
12472 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12473 /* try starting at IP */
12474 - ip = (u8 *)regs->ip;
12475 + ip = (u8 *)regs->ip + cs_base;
12476 code_len = code_len - code_prologue + 1;
12477 }
12478 for (i = 0; i < code_len; i++, ip++) {
12479 @@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs
12480 printk(" Bad EIP value.");
12481 break;
12482 }
12483 - if (ip == (u8 *)regs->ip)
12484 + if (ip == (u8 *)regs->ip + cs_base)
12485 printk("<%02x> ", c);
12486 else
12487 printk("%02x ", c);
12488 @@ -149,6 +146,7 @@ int is_valid_bugaddr(unsigned long ip)
12489 {
12490 unsigned short ud2;
12491
12492 + ip = ktla_ktva(ip);
12493 if (ip < PAGE_OFFSET)
12494 return 0;
12495 if (probe_kernel_address((unsigned short *)ip, ud2))
12496 diff -urNp linux-2.6.32.43/arch/x86/kernel/dumpstack_64.c linux-2.6.32.43/arch/x86/kernel/dumpstack_64.c
12497 --- linux-2.6.32.43/arch/x86/kernel/dumpstack_64.c 2011-03-27 14:31:47.000000000 -0400
12498 +++ linux-2.6.32.43/arch/x86/kernel/dumpstack_64.c 2011-04-17 15:56:46.000000000 -0400
12499 @@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task
12500 unsigned long *irq_stack_end =
12501 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12502 unsigned used = 0;
12503 - struct thread_info *tinfo;
12504 int graph = 0;
12505 + void *stack_start;
12506
12507 if (!task)
12508 task = current;
12509 @@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task
12510 * current stack address. If the stacks consist of nested
12511 * exceptions
12512 */
12513 - tinfo = task_thread_info(task);
12514 for (;;) {
12515 char *id;
12516 unsigned long *estack_end;
12517 +
12518 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12519 &used, &id);
12520
12521 @@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task
12522 if (ops->stack(data, id) < 0)
12523 break;
12524
12525 - bp = print_context_stack(tinfo, stack, bp, ops,
12526 + bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12527 data, estack_end, &graph);
12528 ops->stack(data, "<EOE>");
12529 /*
12530 @@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task
12531 if (stack >= irq_stack && stack < irq_stack_end) {
12532 if (ops->stack(data, "IRQ") < 0)
12533 break;
12534 - bp = print_context_stack(tinfo, stack, bp,
12535 + bp = print_context_stack(task, irq_stack, stack, bp,
12536 ops, data, irq_stack_end, &graph);
12537 /*
12538 * We link to the next stack (which would be
12539 @@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task
12540 /*
12541 * This handles the process stack:
12542 */
12543 - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12544 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12545 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12546 put_cpu();
12547 }
12548 EXPORT_SYMBOL(dump_trace);
12549 diff -urNp linux-2.6.32.43/arch/x86/kernel/dumpstack.c linux-2.6.32.43/arch/x86/kernel/dumpstack.c
12550 --- linux-2.6.32.43/arch/x86/kernel/dumpstack.c 2011-03-27 14:31:47.000000000 -0400
12551 +++ linux-2.6.32.43/arch/x86/kernel/dumpstack.c 2011-04-17 15:56:46.000000000 -0400
12552 @@ -2,6 +2,9 @@
12553 * Copyright (C) 1991, 1992 Linus Torvalds
12554 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12555 */
12556 +#ifdef CONFIG_GRKERNSEC_HIDESYM
12557 +#define __INCLUDED_BY_HIDESYM 1
12558 +#endif
12559 #include <linux/kallsyms.h>
12560 #include <linux/kprobes.h>
12561 #include <linux/uaccess.h>
12562 @@ -28,7 +31,7 @@ static int die_counter;
12563
12564 void printk_address(unsigned long address, int reliable)
12565 {
12566 - printk(" [<%p>] %s%pS\n", (void *) address,
12567 + printk(" [<%p>] %s%pA\n", (void *) address,
12568 reliable ? "" : "? ", (void *) address);
12569 }
12570
12571 @@ -36,9 +39,8 @@ void printk_address(unsigned long addres
12572 static void
12573 print_ftrace_graph_addr(unsigned long addr, void *data,
12574 const struct stacktrace_ops *ops,
12575 - struct thread_info *tinfo, int *graph)
12576 + struct task_struct *task, int *graph)
12577 {
12578 - struct task_struct *task = tinfo->task;
12579 unsigned long ret_addr;
12580 int index = task->curr_ret_stack;
12581
12582 @@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long ad
12583 static inline void
12584 print_ftrace_graph_addr(unsigned long addr, void *data,
12585 const struct stacktrace_ops *ops,
12586 - struct thread_info *tinfo, int *graph)
12587 + struct task_struct *task, int *graph)
12588 { }
12589 #endif
12590
12591 @@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long ad
12592 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12593 */
12594
12595 -static inline int valid_stack_ptr(struct thread_info *tinfo,
12596 - void *p, unsigned int size, void *end)
12597 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12598 {
12599 - void *t = tinfo;
12600 if (end) {
12601 if (p < end && p >= (end-THREAD_SIZE))
12602 return 1;
12603 @@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct
12604 }
12605
12606 unsigned long
12607 -print_context_stack(struct thread_info *tinfo,
12608 +print_context_stack(struct task_struct *task, void *stack_start,
12609 unsigned long *stack, unsigned long bp,
12610 const struct stacktrace_ops *ops, void *data,
12611 unsigned long *end, int *graph)
12612 {
12613 struct stack_frame *frame = (struct stack_frame *)bp;
12614
12615 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12616 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12617 unsigned long addr;
12618
12619 addr = *stack;
12620 @@ -103,7 +103,7 @@ print_context_stack(struct thread_info *
12621 } else {
12622 ops->address(data, addr, 0);
12623 }
12624 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12625 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12626 }
12627 stack++;
12628 }
12629 @@ -180,7 +180,7 @@ void dump_stack(void)
12630 #endif
12631
12632 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12633 - current->pid, current->comm, print_tainted(),
12634 + task_pid_nr(current), current->comm, print_tainted(),
12635 init_utsname()->release,
12636 (int)strcspn(init_utsname()->version, " "),
12637 init_utsname()->version);
12638 @@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
12639 return flags;
12640 }
12641
12642 +extern void gr_handle_kernel_exploit(void);
12643 +
12644 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12645 {
12646 if (regs && kexec_should_crash(current))
12647 @@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long fl
12648 panic("Fatal exception in interrupt");
12649 if (panic_on_oops)
12650 panic("Fatal exception");
12651 - do_exit(signr);
12652 +
12653 + gr_handle_kernel_exploit();
12654 +
12655 + do_group_exit(signr);
12656 }
12657
12658 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12659 @@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs
12660 unsigned long flags = oops_begin();
12661 int sig = SIGSEGV;
12662
12663 - if (!user_mode_vm(regs))
12664 + if (!user_mode(regs))
12665 report_bug(regs->ip, regs);
12666
12667 if (__die(str, regs, err))
12668 diff -urNp linux-2.6.32.43/arch/x86/kernel/dumpstack.h linux-2.6.32.43/arch/x86/kernel/dumpstack.h
12669 --- linux-2.6.32.43/arch/x86/kernel/dumpstack.h 2011-03-27 14:31:47.000000000 -0400
12670 +++ linux-2.6.32.43/arch/x86/kernel/dumpstack.h 2011-04-23 13:25:26.000000000 -0400
12671 @@ -15,7 +15,7 @@
12672 #endif
12673
12674 extern unsigned long
12675 -print_context_stack(struct thread_info *tinfo,
12676 +print_context_stack(struct task_struct *task, void *stack_start,
12677 unsigned long *stack, unsigned long bp,
12678 const struct stacktrace_ops *ops, void *data,
12679 unsigned long *end, int *graph);
12680 diff -urNp linux-2.6.32.43/arch/x86/kernel/e820.c linux-2.6.32.43/arch/x86/kernel/e820.c
12681 --- linux-2.6.32.43/arch/x86/kernel/e820.c 2011-03-27 14:31:47.000000000 -0400
12682 +++ linux-2.6.32.43/arch/x86/kernel/e820.c 2011-04-17 15:56:46.000000000 -0400
12683 @@ -733,7 +733,7 @@ struct early_res {
12684 };
12685 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
12686 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
12687 - {}
12688 + { 0, 0, {0}, 0 }
12689 };
12690
12691 static int __init find_overlapped_early(u64 start, u64 end)
12692 diff -urNp linux-2.6.32.43/arch/x86/kernel/early_printk.c linux-2.6.32.43/arch/x86/kernel/early_printk.c
12693 --- linux-2.6.32.43/arch/x86/kernel/early_printk.c 2011-03-27 14:31:47.000000000 -0400
12694 +++ linux-2.6.32.43/arch/x86/kernel/early_printk.c 2011-05-16 21:46:57.000000000 -0400
12695 @@ -7,6 +7,7 @@
12696 #include <linux/pci_regs.h>
12697 #include <linux/pci_ids.h>
12698 #include <linux/errno.h>
12699 +#include <linux/sched.h>
12700 #include <asm/io.h>
12701 #include <asm/processor.h>
12702 #include <asm/fcntl.h>
12703 @@ -170,6 +171,8 @@ asmlinkage void early_printk(const char
12704 int n;
12705 va_list ap;
12706
12707 + pax_track_stack();
12708 +
12709 va_start(ap, fmt);
12710 n = vscnprintf(buf, sizeof(buf), fmt, ap);
12711 early_console->write(early_console, buf, n);
12712 diff -urNp linux-2.6.32.43/arch/x86/kernel/efi_32.c linux-2.6.32.43/arch/x86/kernel/efi_32.c
12713 --- linux-2.6.32.43/arch/x86/kernel/efi_32.c 2011-03-27 14:31:47.000000000 -0400
12714 +++ linux-2.6.32.43/arch/x86/kernel/efi_32.c 2011-04-17 15:56:46.000000000 -0400
12715 @@ -38,70 +38,38 @@
12716 */
12717
12718 static unsigned long efi_rt_eflags;
12719 -static pgd_t efi_bak_pg_dir_pointer[2];
12720 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
12721
12722 -void efi_call_phys_prelog(void)
12723 +void __init efi_call_phys_prelog(void)
12724 {
12725 - unsigned long cr4;
12726 - unsigned long temp;
12727 struct desc_ptr gdt_descr;
12728
12729 local_irq_save(efi_rt_eflags);
12730
12731 - /*
12732 - * If I don't have PAE, I should just duplicate two entries in page
12733 - * directory. If I have PAE, I just need to duplicate one entry in
12734 - * page directory.
12735 - */
12736 - cr4 = read_cr4_safe();
12737
12738 - if (cr4 & X86_CR4_PAE) {
12739 - efi_bak_pg_dir_pointer[0].pgd =
12740 - swapper_pg_dir[pgd_index(0)].pgd;
12741 - swapper_pg_dir[0].pgd =
12742 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12743 - } else {
12744 - efi_bak_pg_dir_pointer[0].pgd =
12745 - swapper_pg_dir[pgd_index(0)].pgd;
12746 - efi_bak_pg_dir_pointer[1].pgd =
12747 - swapper_pg_dir[pgd_index(0x400000)].pgd;
12748 - swapper_pg_dir[pgd_index(0)].pgd =
12749 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12750 - temp = PAGE_OFFSET + 0x400000;
12751 - swapper_pg_dir[pgd_index(0x400000)].pgd =
12752 - swapper_pg_dir[pgd_index(temp)].pgd;
12753 - }
12754 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
12755 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
12756 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
12757
12758 /*
12759 * After the lock is released, the original page table is restored.
12760 */
12761 __flush_tlb_all();
12762
12763 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
12764 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
12765 gdt_descr.size = GDT_SIZE - 1;
12766 load_gdt(&gdt_descr);
12767 }
12768
12769 -void efi_call_phys_epilog(void)
12770 +void __init efi_call_phys_epilog(void)
12771 {
12772 - unsigned long cr4;
12773 struct desc_ptr gdt_descr;
12774
12775 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
12776 + gdt_descr.address = get_cpu_gdt_table(0);
12777 gdt_descr.size = GDT_SIZE - 1;
12778 load_gdt(&gdt_descr);
12779
12780 - cr4 = read_cr4_safe();
12781 -
12782 - if (cr4 & X86_CR4_PAE) {
12783 - swapper_pg_dir[pgd_index(0)].pgd =
12784 - efi_bak_pg_dir_pointer[0].pgd;
12785 - } else {
12786 - swapper_pg_dir[pgd_index(0)].pgd =
12787 - efi_bak_pg_dir_pointer[0].pgd;
12788 - swapper_pg_dir[pgd_index(0x400000)].pgd =
12789 - efi_bak_pg_dir_pointer[1].pgd;
12790 - }
12791 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
12792
12793 /*
12794 * After the lock is released, the original page table is restored.
12795 diff -urNp linux-2.6.32.43/arch/x86/kernel/efi_stub_32.S linux-2.6.32.43/arch/x86/kernel/efi_stub_32.S
12796 --- linux-2.6.32.43/arch/x86/kernel/efi_stub_32.S 2011-03-27 14:31:47.000000000 -0400
12797 +++ linux-2.6.32.43/arch/x86/kernel/efi_stub_32.S 2011-04-17 15:56:46.000000000 -0400
12798 @@ -6,6 +6,7 @@
12799 */
12800
12801 #include <linux/linkage.h>
12802 +#include <linux/init.h>
12803 #include <asm/page_types.h>
12804
12805 /*
12806 @@ -20,7 +21,7 @@
12807 * service functions will comply with gcc calling convention, too.
12808 */
12809
12810 -.text
12811 +__INIT
12812 ENTRY(efi_call_phys)
12813 /*
12814 * 0. The function can only be called in Linux kernel. So CS has been
12815 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
12816 * The mapping of lower virtual memory has been created in prelog and
12817 * epilog.
12818 */
12819 - movl $1f, %edx
12820 - subl $__PAGE_OFFSET, %edx
12821 - jmp *%edx
12822 + jmp 1f-__PAGE_OFFSET
12823 1:
12824
12825 /*
12826 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
12827 * parameter 2, ..., param n. To make things easy, we save the return
12828 * address of efi_call_phys in a global variable.
12829 */
12830 - popl %edx
12831 - movl %edx, saved_return_addr
12832 - /* get the function pointer into ECX*/
12833 - popl %ecx
12834 - movl %ecx, efi_rt_function_ptr
12835 - movl $2f, %edx
12836 - subl $__PAGE_OFFSET, %edx
12837 - pushl %edx
12838 + popl (saved_return_addr)
12839 + popl (efi_rt_function_ptr)
12840
12841 /*
12842 * 3. Clear PG bit in %CR0.
12843 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
12844 /*
12845 * 5. Call the physical function.
12846 */
12847 - jmp *%ecx
12848 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
12849
12850 -2:
12851 /*
12852 * 6. After EFI runtime service returns, control will return to
12853 * following instruction. We'd better readjust stack pointer first.
12854 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
12855 movl %cr0, %edx
12856 orl $0x80000000, %edx
12857 movl %edx, %cr0
12858 - jmp 1f
12859 -1:
12860 +
12861 /*
12862 * 8. Now restore the virtual mode from flat mode by
12863 * adding EIP with PAGE_OFFSET.
12864 */
12865 - movl $1f, %edx
12866 - jmp *%edx
12867 + jmp 1f+__PAGE_OFFSET
12868 1:
12869
12870 /*
12871 * 9. Balance the stack. And because EAX contain the return value,
12872 * we'd better not clobber it.
12873 */
12874 - leal efi_rt_function_ptr, %edx
12875 - movl (%edx), %ecx
12876 - pushl %ecx
12877 + pushl (efi_rt_function_ptr)
12878
12879 /*
12880 - * 10. Push the saved return address onto the stack and return.
12881 + * 10. Return to the saved return address.
12882 */
12883 - leal saved_return_addr, %edx
12884 - movl (%edx), %ecx
12885 - pushl %ecx
12886 - ret
12887 + jmpl *(saved_return_addr)
12888 ENDPROC(efi_call_phys)
12889 .previous
12890
12891 -.data
12892 +__INITDATA
12893 saved_return_addr:
12894 .long 0
12895 efi_rt_function_ptr:
12896 diff -urNp linux-2.6.32.43/arch/x86/kernel/entry_32.S linux-2.6.32.43/arch/x86/kernel/entry_32.S
12897 --- linux-2.6.32.43/arch/x86/kernel/entry_32.S 2011-03-27 14:31:47.000000000 -0400
12898 +++ linux-2.6.32.43/arch/x86/kernel/entry_32.S 2011-05-22 23:02:03.000000000 -0400
12899 @@ -185,13 +185,146 @@
12900 /*CFI_REL_OFFSET gs, PT_GS*/
12901 .endm
12902 .macro SET_KERNEL_GS reg
12903 +
12904 +#ifdef CONFIG_CC_STACKPROTECTOR
12905 movl $(__KERNEL_STACK_CANARY), \reg
12906 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12907 + movl $(__USER_DS), \reg
12908 +#else
12909 + xorl \reg, \reg
12910 +#endif
12911 +
12912 movl \reg, %gs
12913 .endm
12914
12915 #endif /* CONFIG_X86_32_LAZY_GS */
12916
12917 -.macro SAVE_ALL
12918 +.macro pax_enter_kernel
12919 +#ifdef CONFIG_PAX_KERNEXEC
12920 + call pax_enter_kernel
12921 +#endif
12922 +.endm
12923 +
12924 +.macro pax_exit_kernel
12925 +#ifdef CONFIG_PAX_KERNEXEC
12926 + call pax_exit_kernel
12927 +#endif
12928 +.endm
12929 +
12930 +#ifdef CONFIG_PAX_KERNEXEC
12931 +ENTRY(pax_enter_kernel)
12932 +#ifdef CONFIG_PARAVIRT
12933 + pushl %eax
12934 + pushl %ecx
12935 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12936 + mov %eax, %esi
12937 +#else
12938 + mov %cr0, %esi
12939 +#endif
12940 + bts $16, %esi
12941 + jnc 1f
12942 + mov %cs, %esi
12943 + cmp $__KERNEL_CS, %esi
12944 + jz 3f
12945 + ljmp $__KERNEL_CS, $3f
12946 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
12947 +2:
12948 +#ifdef CONFIG_PARAVIRT
12949 + mov %esi, %eax
12950 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12951 +#else
12952 + mov %esi, %cr0
12953 +#endif
12954 +3:
12955 +#ifdef CONFIG_PARAVIRT
12956 + popl %ecx
12957 + popl %eax
12958 +#endif
12959 + ret
12960 +ENDPROC(pax_enter_kernel)
12961 +
12962 +ENTRY(pax_exit_kernel)
12963 +#ifdef CONFIG_PARAVIRT
12964 + pushl %eax
12965 + pushl %ecx
12966 +#endif
12967 + mov %cs, %esi
12968 + cmp $__KERNEXEC_KERNEL_CS, %esi
12969 + jnz 2f
12970 +#ifdef CONFIG_PARAVIRT
12971 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
12972 + mov %eax, %esi
12973 +#else
12974 + mov %cr0, %esi
12975 +#endif
12976 + btr $16, %esi
12977 + ljmp $__KERNEL_CS, $1f
12978 +1:
12979 +#ifdef CONFIG_PARAVIRT
12980 + mov %esi, %eax
12981 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
12982 +#else
12983 + mov %esi, %cr0
12984 +#endif
12985 +2:
12986 +#ifdef CONFIG_PARAVIRT
12987 + popl %ecx
12988 + popl %eax
12989 +#endif
12990 + ret
12991 +ENDPROC(pax_exit_kernel)
12992 +#endif
12993 +
12994 +.macro pax_erase_kstack
12995 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12996 + call pax_erase_kstack
12997 +#endif
12998 +.endm
12999 +
13000 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13001 +/*
13002 + * ebp: thread_info
13003 + * ecx, edx: can be clobbered
13004 + */
13005 +ENTRY(pax_erase_kstack)
13006 + pushl %edi
13007 + pushl %eax
13008 +
13009 + mov TI_lowest_stack(%ebp), %edi
13010 + mov $-0xBEEF, %eax
13011 + std
13012 +
13013 +1: mov %edi, %ecx
13014 + and $THREAD_SIZE_asm - 1, %ecx
13015 + shr $2, %ecx
13016 + repne scasl
13017 + jecxz 2f
13018 +
13019 + cmp $2*16, %ecx
13020 + jc 2f
13021 +
13022 + mov $2*16, %ecx
13023 + repe scasl
13024 + jecxz 2f
13025 + jne 1b
13026 +
13027 +2: cld
13028 + mov %esp, %ecx
13029 + sub %edi, %ecx
13030 + shr $2, %ecx
13031 + rep stosl
13032 +
13033 + mov TI_task_thread_sp0(%ebp), %edi
13034 + sub $128, %edi
13035 + mov %edi, TI_lowest_stack(%ebp)
13036 +
13037 + popl %eax
13038 + popl %edi
13039 + ret
13040 +ENDPROC(pax_erase_kstack)
13041 +#endif
13042 +
13043 +.macro __SAVE_ALL _DS
13044 cld
13045 PUSH_GS
13046 pushl %fs
13047 @@ -224,7 +357,7 @@
13048 pushl %ebx
13049 CFI_ADJUST_CFA_OFFSET 4
13050 CFI_REL_OFFSET ebx, 0
13051 - movl $(__USER_DS), %edx
13052 + movl $\_DS, %edx
13053 movl %edx, %ds
13054 movl %edx, %es
13055 movl $(__KERNEL_PERCPU), %edx
13056 @@ -232,6 +365,15 @@
13057 SET_KERNEL_GS %edx
13058 .endm
13059
13060 +.macro SAVE_ALL
13061 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
13062 + __SAVE_ALL __KERNEL_DS
13063 + pax_enter_kernel
13064 +#else
13065 + __SAVE_ALL __USER_DS
13066 +#endif
13067 +.endm
13068 +
13069 .macro RESTORE_INT_REGS
13070 popl %ebx
13071 CFI_ADJUST_CFA_OFFSET -4
13072 @@ -352,7 +494,15 @@ check_userspace:
13073 movb PT_CS(%esp), %al
13074 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
13075 cmpl $USER_RPL, %eax
13076 +
13077 +#ifdef CONFIG_PAX_KERNEXEC
13078 + jae resume_userspace
13079 +
13080 + PAX_EXIT_KERNEL
13081 + jmp resume_kernel
13082 +#else
13083 jb resume_kernel # not returning to v8086 or userspace
13084 +#endif
13085
13086 ENTRY(resume_userspace)
13087 LOCKDEP_SYS_EXIT
13088 @@ -364,7 +514,7 @@ ENTRY(resume_userspace)
13089 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
13090 # int/exception return?
13091 jne work_pending
13092 - jmp restore_all
13093 + jmp restore_all_pax
13094 END(ret_from_exception)
13095
13096 #ifdef CONFIG_PREEMPT
13097 @@ -414,25 +564,36 @@ sysenter_past_esp:
13098 /*CFI_REL_OFFSET cs, 0*/
13099 /*
13100 * Push current_thread_info()->sysenter_return to the stack.
13101 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
13102 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
13103 */
13104 - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
13105 + pushl $0
13106 CFI_ADJUST_CFA_OFFSET 4
13107 CFI_REL_OFFSET eip, 0
13108
13109 pushl %eax
13110 CFI_ADJUST_CFA_OFFSET 4
13111 SAVE_ALL
13112 + GET_THREAD_INFO(%ebp)
13113 + movl TI_sysenter_return(%ebp),%ebp
13114 + movl %ebp,PT_EIP(%esp)
13115 ENABLE_INTERRUPTS(CLBR_NONE)
13116
13117 /*
13118 * Load the potential sixth argument from user stack.
13119 * Careful about security.
13120 */
13121 + movl PT_OLDESP(%esp),%ebp
13122 +
13123 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13124 + mov PT_OLDSS(%esp),%ds
13125 +1: movl %ds:(%ebp),%ebp
13126 + push %ss
13127 + pop %ds
13128 +#else
13129 cmpl $__PAGE_OFFSET-3,%ebp
13130 jae syscall_fault
13131 1: movl (%ebp),%ebp
13132 +#endif
13133 +
13134 movl %ebp,PT_EBP(%esp)
13135 .section __ex_table,"a"
13136 .align 4
13137 @@ -455,12 +616,23 @@ sysenter_do_call:
13138 testl $_TIF_ALLWORK_MASK, %ecx
13139 jne sysexit_audit
13140 sysenter_exit:
13141 +
13142 +#ifdef CONFIG_PAX_RANDKSTACK
13143 + pushl_cfi %eax
13144 + call pax_randomize_kstack
13145 + popl_cfi %eax
13146 +#endif
13147 +
13148 + pax_erase_kstack
13149 +
13150 /* if something modifies registers it must also disable sysexit */
13151 movl PT_EIP(%esp), %edx
13152 movl PT_OLDESP(%esp), %ecx
13153 xorl %ebp,%ebp
13154 TRACE_IRQS_ON
13155 1: mov PT_FS(%esp), %fs
13156 +2: mov PT_DS(%esp), %ds
13157 +3: mov PT_ES(%esp), %es
13158 PTGS_TO_GS
13159 ENABLE_INTERRUPTS_SYSEXIT
13160
13161 @@ -477,6 +649,9 @@ sysenter_audit:
13162 movl %eax,%edx /* 2nd arg: syscall number */
13163 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
13164 call audit_syscall_entry
13165 +
13166 + pax_erase_kstack
13167 +
13168 pushl %ebx
13169 CFI_ADJUST_CFA_OFFSET 4
13170 movl PT_EAX(%esp),%eax /* reload syscall number */
13171 @@ -504,11 +679,17 @@ sysexit_audit:
13172
13173 CFI_ENDPROC
13174 .pushsection .fixup,"ax"
13175 -2: movl $0,PT_FS(%esp)
13176 +4: movl $0,PT_FS(%esp)
13177 + jmp 1b
13178 +5: movl $0,PT_DS(%esp)
13179 + jmp 1b
13180 +6: movl $0,PT_ES(%esp)
13181 jmp 1b
13182 .section __ex_table,"a"
13183 .align 4
13184 - .long 1b,2b
13185 + .long 1b,4b
13186 + .long 2b,5b
13187 + .long 3b,6b
13188 .popsection
13189 PTGS_TO_GS_EX
13190 ENDPROC(ia32_sysenter_target)
13191 @@ -538,6 +719,14 @@ syscall_exit:
13192 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13193 jne syscall_exit_work
13194
13195 +restore_all_pax:
13196 +
13197 +#ifdef CONFIG_PAX_RANDKSTACK
13198 + call pax_randomize_kstack
13199 +#endif
13200 +
13201 + pax_erase_kstack
13202 +
13203 restore_all:
13204 TRACE_IRQS_IRET
13205 restore_all_notrace:
13206 @@ -602,7 +791,13 @@ ldt_ss:
13207 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13208 mov %dx, %ax /* eax: new kernel esp */
13209 sub %eax, %edx /* offset (low word is 0) */
13210 - PER_CPU(gdt_page, %ebx)
13211 +#ifdef CONFIG_SMP
13212 + movl PER_CPU_VAR(cpu_number), %ebx
13213 + shll $PAGE_SHIFT_asm, %ebx
13214 + addl $cpu_gdt_table, %ebx
13215 +#else
13216 + movl $cpu_gdt_table, %ebx
13217 +#endif
13218 shr $16, %edx
13219 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
13220 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
13221 @@ -636,31 +831,25 @@ work_resched:
13222 movl TI_flags(%ebp), %ecx
13223 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13224 # than syscall tracing?
13225 - jz restore_all
13226 + jz restore_all_pax
13227 testb $_TIF_NEED_RESCHED, %cl
13228 jnz work_resched
13229
13230 work_notifysig: # deal with pending signals and
13231 # notify-resume requests
13232 + movl %esp, %eax
13233 #ifdef CONFIG_VM86
13234 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13235 - movl %esp, %eax
13236 - jne work_notifysig_v86 # returning to kernel-space or
13237 + jz 1f # returning to kernel-space or
13238 # vm86-space
13239 - xorl %edx, %edx
13240 - call do_notify_resume
13241 - jmp resume_userspace_sig
13242
13243 - ALIGN
13244 -work_notifysig_v86:
13245 pushl %ecx # save ti_flags for do_notify_resume
13246 CFI_ADJUST_CFA_OFFSET 4
13247 call save_v86_state # %eax contains pt_regs pointer
13248 popl %ecx
13249 CFI_ADJUST_CFA_OFFSET -4
13250 movl %eax, %esp
13251 -#else
13252 - movl %esp, %eax
13253 +1:
13254 #endif
13255 xorl %edx, %edx
13256 call do_notify_resume
13257 @@ -673,6 +862,9 @@ syscall_trace_entry:
13258 movl $-ENOSYS,PT_EAX(%esp)
13259 movl %esp, %eax
13260 call syscall_trace_enter
13261 +
13262 + pax_erase_kstack
13263 +
13264 /* What it returned is what we'll actually use. */
13265 cmpl $(nr_syscalls), %eax
13266 jnae syscall_call
13267 @@ -695,6 +887,10 @@ END(syscall_exit_work)
13268
13269 RING0_INT_FRAME # can't unwind into user space anyway
13270 syscall_fault:
13271 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13272 + push %ss
13273 + pop %ds
13274 +#endif
13275 GET_THREAD_INFO(%ebp)
13276 movl $-EFAULT,PT_EAX(%esp)
13277 jmp resume_userspace
13278 @@ -726,6 +922,33 @@ PTREGSCALL(rt_sigreturn)
13279 PTREGSCALL(vm86)
13280 PTREGSCALL(vm86old)
13281
13282 + ALIGN;
13283 +ENTRY(kernel_execve)
13284 + push %ebp
13285 + sub $PT_OLDSS+4,%esp
13286 + push %edi
13287 + push %ecx
13288 + push %eax
13289 + lea 3*4(%esp),%edi
13290 + mov $PT_OLDSS/4+1,%ecx
13291 + xorl %eax,%eax
13292 + rep stosl
13293 + pop %eax
13294 + pop %ecx
13295 + pop %edi
13296 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13297 + mov %eax,PT_EBX(%esp)
13298 + mov %edx,PT_ECX(%esp)
13299 + mov %ecx,PT_EDX(%esp)
13300 + mov %esp,%eax
13301 + call sys_execve
13302 + GET_THREAD_INFO(%ebp)
13303 + test %eax,%eax
13304 + jz syscall_exit
13305 + add $PT_OLDSS+4,%esp
13306 + pop %ebp
13307 + ret
13308 +
13309 .macro FIXUP_ESPFIX_STACK
13310 /*
13311 * Switch back for ESPFIX stack to the normal zerobased stack
13312 @@ -735,7 +958,13 @@ PTREGSCALL(vm86old)
13313 * normal stack and adjusts ESP with the matching offset.
13314 */
13315 /* fixup the stack */
13316 - PER_CPU(gdt_page, %ebx)
13317 +#ifdef CONFIG_SMP
13318 + movl PER_CPU_VAR(cpu_number), %ebx
13319 + shll $PAGE_SHIFT_asm, %ebx
13320 + addl $cpu_gdt_table, %ebx
13321 +#else
13322 + movl $cpu_gdt_table, %ebx
13323 +#endif
13324 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
13325 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
13326 shl $16, %eax
13327 @@ -1198,7 +1427,6 @@ return_to_handler:
13328 ret
13329 #endif
13330
13331 -.section .rodata,"a"
13332 #include "syscall_table_32.S"
13333
13334 syscall_table_size=(.-sys_call_table)
13335 @@ -1255,9 +1483,12 @@ error_code:
13336 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13337 REG_TO_PTGS %ecx
13338 SET_KERNEL_GS %ecx
13339 - movl $(__USER_DS), %ecx
13340 + movl $(__KERNEL_DS), %ecx
13341 movl %ecx, %ds
13342 movl %ecx, %es
13343 +
13344 + pax_enter_kernel
13345 +
13346 TRACE_IRQS_OFF
13347 movl %esp,%eax # pt_regs pointer
13348 call *%edi
13349 @@ -1351,6 +1582,9 @@ nmi_stack_correct:
13350 xorl %edx,%edx # zero error code
13351 movl %esp,%eax # pt_regs pointer
13352 call do_nmi
13353 +
13354 + pax_exit_kernel
13355 +
13356 jmp restore_all_notrace
13357 CFI_ENDPROC
13358
13359 @@ -1391,6 +1625,9 @@ nmi_espfix_stack:
13360 FIXUP_ESPFIX_STACK # %eax == %esp
13361 xorl %edx,%edx # zero error code
13362 call do_nmi
13363 +
13364 + pax_exit_kernel
13365 +
13366 RESTORE_REGS
13367 lss 12+4(%esp), %esp # back to espfix stack
13368 CFI_ADJUST_CFA_OFFSET -24
13369 diff -urNp linux-2.6.32.43/arch/x86/kernel/entry_64.S linux-2.6.32.43/arch/x86/kernel/entry_64.S
13370 --- linux-2.6.32.43/arch/x86/kernel/entry_64.S 2011-03-27 14:31:47.000000000 -0400
13371 +++ linux-2.6.32.43/arch/x86/kernel/entry_64.S 2011-06-04 20:30:53.000000000 -0400
13372 @@ -53,6 +53,7 @@
13373 #include <asm/paravirt.h>
13374 #include <asm/ftrace.h>
13375 #include <asm/percpu.h>
13376 +#include <asm/pgtable.h>
13377
13378 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13379 #include <linux/elf-em.h>
13380 @@ -174,6 +175,257 @@ ENTRY(native_usergs_sysret64)
13381 ENDPROC(native_usergs_sysret64)
13382 #endif /* CONFIG_PARAVIRT */
13383
13384 + .macro ljmpq sel, off
13385 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13386 + .byte 0x48; ljmp *1234f(%rip)
13387 + .pushsection .rodata
13388 + .align 16
13389 + 1234: .quad \off; .word \sel
13390 + .popsection
13391 +#else
13392 + pushq $\sel
13393 + pushq $\off
13394 + lretq
13395 +#endif
13396 + .endm
13397 +
13398 + .macro pax_enter_kernel
13399 +#ifdef CONFIG_PAX_KERNEXEC
13400 + call pax_enter_kernel
13401 +#endif
13402 + .endm
13403 +
13404 + .macro pax_exit_kernel
13405 +#ifdef CONFIG_PAX_KERNEXEC
13406 + call pax_exit_kernel
13407 +#endif
13408 + .endm
13409 +
13410 +#ifdef CONFIG_PAX_KERNEXEC
13411 +ENTRY(pax_enter_kernel)
13412 + pushq %rdi
13413 +
13414 +#ifdef CONFIG_PARAVIRT
13415 + PV_SAVE_REGS(CLBR_RDI)
13416 +#endif
13417 +
13418 + GET_CR0_INTO_RDI
13419 + bts $16,%rdi
13420 + jnc 1f
13421 + mov %cs,%edi
13422 + cmp $__KERNEL_CS,%edi
13423 + jz 3f
13424 + ljmpq __KERNEL_CS,3f
13425 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
13426 +2: SET_RDI_INTO_CR0
13427 +3:
13428 +
13429 +#ifdef CONFIG_PARAVIRT
13430 + PV_RESTORE_REGS(CLBR_RDI)
13431 +#endif
13432 +
13433 + popq %rdi
13434 + retq
13435 +ENDPROC(pax_enter_kernel)
13436 +
13437 +ENTRY(pax_exit_kernel)
13438 + pushq %rdi
13439 +
13440 +#ifdef CONFIG_PARAVIRT
13441 + PV_SAVE_REGS(CLBR_RDI)
13442 +#endif
13443 +
13444 + mov %cs,%rdi
13445 + cmp $__KERNEXEC_KERNEL_CS,%edi
13446 + jnz 2f
13447 + GET_CR0_INTO_RDI
13448 + btr $16,%rdi
13449 + ljmpq __KERNEL_CS,1f
13450 +1: SET_RDI_INTO_CR0
13451 +2:
13452 +
13453 +#ifdef CONFIG_PARAVIRT
13454 + PV_RESTORE_REGS(CLBR_RDI);
13455 +#endif
13456 +
13457 + popq %rdi
13458 + retq
13459 +ENDPROC(pax_exit_kernel)
13460 +#endif
13461 +
13462 + .macro pax_enter_kernel_user
13463 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13464 + call pax_enter_kernel_user
13465 +#endif
13466 + .endm
13467 +
13468 + .macro pax_exit_kernel_user
13469 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13470 + call pax_exit_kernel_user
13471 +#endif
13472 +#ifdef CONFIG_PAX_RANDKSTACK
13473 + push %rax
13474 + call pax_randomize_kstack
13475 + pop %rax
13476 +#endif
13477 + pax_erase_kstack
13478 + .endm
13479 +
13480 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13481 +ENTRY(pax_enter_kernel_user)
13482 + pushq %rdi
13483 + pushq %rbx
13484 +
13485 +#ifdef CONFIG_PARAVIRT
13486 + PV_SAVE_REGS(CLBR_RDI)
13487 +#endif
13488 +
13489 + GET_CR3_INTO_RDI
13490 + mov %rdi,%rbx
13491 + add $__START_KERNEL_map,%rbx
13492 + sub phys_base(%rip),%rbx
13493 +
13494 +#ifdef CONFIG_PARAVIRT
13495 + pushq %rdi
13496 + cmpl $0, pv_info+PARAVIRT_enabled
13497 + jz 1f
13498 + i = 0
13499 + .rept USER_PGD_PTRS
13500 + mov i*8(%rbx),%rsi
13501 + mov $0,%sil
13502 + lea i*8(%rbx),%rdi
13503 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13504 + i = i + 1
13505 + .endr
13506 + jmp 2f
13507 +1:
13508 +#endif
13509 +
13510 + i = 0
13511 + .rept USER_PGD_PTRS
13512 + movb $0,i*8(%rbx)
13513 + i = i + 1
13514 + .endr
13515 +
13516 +#ifdef CONFIG_PARAVIRT
13517 +2: popq %rdi
13518 +#endif
13519 + SET_RDI_INTO_CR3
13520 +
13521 +#ifdef CONFIG_PAX_KERNEXEC
13522 + GET_CR0_INTO_RDI
13523 + bts $16,%rdi
13524 + SET_RDI_INTO_CR0
13525 +#endif
13526 +
13527 +#ifdef CONFIG_PARAVIRT
13528 + PV_RESTORE_REGS(CLBR_RDI)
13529 +#endif
13530 +
13531 + popq %rbx
13532 + popq %rdi
13533 + retq
13534 +ENDPROC(pax_enter_kernel_user)
13535 +
13536 +ENTRY(pax_exit_kernel_user)
13537 + push %rdi
13538 +
13539 +#ifdef CONFIG_PARAVIRT
13540 + pushq %rbx
13541 + PV_SAVE_REGS(CLBR_RDI)
13542 +#endif
13543 +
13544 +#ifdef CONFIG_PAX_KERNEXEC
13545 + GET_CR0_INTO_RDI
13546 + btr $16,%rdi
13547 + SET_RDI_INTO_CR0
13548 +#endif
13549 +
13550 + GET_CR3_INTO_RDI
13551 + add $__START_KERNEL_map,%rdi
13552 + sub phys_base(%rip),%rdi
13553 +
13554 +#ifdef CONFIG_PARAVIRT
13555 + cmpl $0, pv_info+PARAVIRT_enabled
13556 + jz 1f
13557 + mov %rdi,%rbx
13558 + i = 0
13559 + .rept USER_PGD_PTRS
13560 + mov i*8(%rbx),%rsi
13561 + mov $0x67,%sil
13562 + lea i*8(%rbx),%rdi
13563 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13564 + i = i + 1
13565 + .endr
13566 + jmp 2f
13567 +1:
13568 +#endif
13569 +
13570 + i = 0
13571 + .rept USER_PGD_PTRS
13572 + movb $0x67,i*8(%rdi)
13573 + i = i + 1
13574 + .endr
13575 +
13576 +#ifdef CONFIG_PARAVIRT
13577 +2: PV_RESTORE_REGS(CLBR_RDI)
13578 + popq %rbx
13579 +#endif
13580 +
13581 + popq %rdi
13582 + retq
13583 +ENDPROC(pax_exit_kernel_user)
13584 +#endif
13585 +
13586 +.macro pax_erase_kstack
13587 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13588 + call pax_erase_kstack
13589 +#endif
13590 +.endm
13591 +
13592 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13593 +/*
13594 + * r10: thread_info
13595 + * rcx, rdx: can be clobbered
13596 + */
13597 +ENTRY(pax_erase_kstack)
13598 + pushq %rdi
13599 + pushq %rax
13600 +
13601 + GET_THREAD_INFO(%r10)
13602 + mov TI_lowest_stack(%r10), %rdi
13603 + mov $-0xBEEF, %rax
13604 + std
13605 +
13606 +1: mov %edi, %ecx
13607 + and $THREAD_SIZE_asm - 1, %ecx
13608 + shr $3, %ecx
13609 + repne scasq
13610 + jecxz 2f
13611 +
13612 + cmp $2*8, %ecx
13613 + jc 2f
13614 +
13615 + mov $2*8, %ecx
13616 + repe scasq
13617 + jecxz 2f
13618 + jne 1b
13619 +
13620 +2: cld
13621 + mov %esp, %ecx
13622 + sub %edi, %ecx
13623 + shr $3, %ecx
13624 + rep stosq
13625 +
13626 + mov TI_task_thread_sp0(%r10), %rdi
13627 + sub $256, %rdi
13628 + mov %rdi, TI_lowest_stack(%r10)
13629 +
13630 + popq %rax
13631 + popq %rdi
13632 + ret
13633 +ENDPROC(pax_erase_kstack)
13634 +#endif
13635
13636 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13637 #ifdef CONFIG_TRACE_IRQFLAGS
13638 @@ -317,7 +569,7 @@ ENTRY(save_args)
13639 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
13640 movq_cfi rbp, 8 /* push %rbp */
13641 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
13642 - testl $3, CS(%rdi)
13643 + testb $3, CS(%rdi)
13644 je 1f
13645 SWAPGS
13646 /*
13647 @@ -409,7 +661,7 @@ ENTRY(ret_from_fork)
13648
13649 RESTORE_REST
13650
13651 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13652 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13653 je int_ret_from_sys_call
13654
13655 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13656 @@ -455,7 +707,7 @@ END(ret_from_fork)
13657 ENTRY(system_call)
13658 CFI_STARTPROC simple
13659 CFI_SIGNAL_FRAME
13660 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13661 + CFI_DEF_CFA rsp,0
13662 CFI_REGISTER rip,rcx
13663 /*CFI_REGISTER rflags,r11*/
13664 SWAPGS_UNSAFE_STACK
13665 @@ -468,12 +720,13 @@ ENTRY(system_call_after_swapgs)
13666
13667 movq %rsp,PER_CPU_VAR(old_rsp)
13668 movq PER_CPU_VAR(kernel_stack),%rsp
13669 + pax_enter_kernel_user
13670 /*
13671 * No need to follow this irqs off/on section - it's straight
13672 * and short:
13673 */
13674 ENABLE_INTERRUPTS(CLBR_NONE)
13675 - SAVE_ARGS 8,1
13676 + SAVE_ARGS 8*6,1
13677 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13678 movq %rcx,RIP-ARGOFFSET(%rsp)
13679 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13680 @@ -502,6 +755,7 @@ sysret_check:
13681 andl %edi,%edx
13682 jnz sysret_careful
13683 CFI_REMEMBER_STATE
13684 + pax_exit_kernel_user
13685 /*
13686 * sysretq will re-enable interrupts:
13687 */
13688 @@ -562,6 +816,9 @@ auditsys:
13689 movq %rax,%rsi /* 2nd arg: syscall number */
13690 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13691 call audit_syscall_entry
13692 +
13693 + pax_erase_kstack
13694 +
13695 LOAD_ARGS 0 /* reload call-clobbered registers */
13696 jmp system_call_fastpath
13697
13698 @@ -592,6 +849,9 @@ tracesys:
13699 FIXUP_TOP_OF_STACK %rdi
13700 movq %rsp,%rdi
13701 call syscall_trace_enter
13702 +
13703 + pax_erase_kstack
13704 +
13705 /*
13706 * Reload arg registers from stack in case ptrace changed them.
13707 * We don't reload %rax because syscall_trace_enter() returned
13708 @@ -613,7 +873,7 @@ tracesys:
13709 GLOBAL(int_ret_from_sys_call)
13710 DISABLE_INTERRUPTS(CLBR_NONE)
13711 TRACE_IRQS_OFF
13712 - testl $3,CS-ARGOFFSET(%rsp)
13713 + testb $3,CS-ARGOFFSET(%rsp)
13714 je retint_restore_args
13715 movl $_TIF_ALLWORK_MASK,%edi
13716 /* edi: mask to check */
13717 @@ -800,6 +1060,16 @@ END(interrupt)
13718 CFI_ADJUST_CFA_OFFSET 10*8
13719 call save_args
13720 PARTIAL_FRAME 0
13721 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13722 + testb $3, CS(%rdi)
13723 + jnz 1f
13724 + pax_enter_kernel
13725 + jmp 2f
13726 +1: pax_enter_kernel_user
13727 +2:
13728 +#else
13729 + pax_enter_kernel
13730 +#endif
13731 call \func
13732 .endm
13733
13734 @@ -822,7 +1092,7 @@ ret_from_intr:
13735 CFI_ADJUST_CFA_OFFSET -8
13736 exit_intr:
13737 GET_THREAD_INFO(%rcx)
13738 - testl $3,CS-ARGOFFSET(%rsp)
13739 + testb $3,CS-ARGOFFSET(%rsp)
13740 je retint_kernel
13741
13742 /* Interrupt came from user space */
13743 @@ -844,12 +1114,14 @@ retint_swapgs: /* return to user-space
13744 * The iretq could re-enable interrupts:
13745 */
13746 DISABLE_INTERRUPTS(CLBR_ANY)
13747 + pax_exit_kernel_user
13748 TRACE_IRQS_IRETQ
13749 SWAPGS
13750 jmp restore_args
13751
13752 retint_restore_args: /* return to kernel space */
13753 DISABLE_INTERRUPTS(CLBR_ANY)
13754 + pax_exit_kernel
13755 /*
13756 * The iretq could re-enable interrupts:
13757 */
13758 @@ -1032,6 +1304,16 @@ ENTRY(\sym)
13759 CFI_ADJUST_CFA_OFFSET 15*8
13760 call error_entry
13761 DEFAULT_FRAME 0
13762 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13763 + testb $3, CS(%rsp)
13764 + jnz 1f
13765 + pax_enter_kernel
13766 + jmp 2f
13767 +1: pax_enter_kernel_user
13768 +2:
13769 +#else
13770 + pax_enter_kernel
13771 +#endif
13772 movq %rsp,%rdi /* pt_regs pointer */
13773 xorl %esi,%esi /* no error code */
13774 call \do_sym
13775 @@ -1049,6 +1331,16 @@ ENTRY(\sym)
13776 subq $15*8, %rsp
13777 call save_paranoid
13778 TRACE_IRQS_OFF
13779 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13780 + testb $3, CS(%rsp)
13781 + jnz 1f
13782 + pax_enter_kernel
13783 + jmp 2f
13784 +1: pax_enter_kernel_user
13785 +2:
13786 +#else
13787 + pax_enter_kernel
13788 +#endif
13789 movq %rsp,%rdi /* pt_regs pointer */
13790 xorl %esi,%esi /* no error code */
13791 call \do_sym
13792 @@ -1066,9 +1358,24 @@ ENTRY(\sym)
13793 subq $15*8, %rsp
13794 call save_paranoid
13795 TRACE_IRQS_OFF
13796 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13797 + testb $3, CS(%rsp)
13798 + jnz 1f
13799 + pax_enter_kernel
13800 + jmp 2f
13801 +1: pax_enter_kernel_user
13802 +2:
13803 +#else
13804 + pax_enter_kernel
13805 +#endif
13806 movq %rsp,%rdi /* pt_regs pointer */
13807 xorl %esi,%esi /* no error code */
13808 - PER_CPU(init_tss, %rbp)
13809 +#ifdef CONFIG_SMP
13810 + imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
13811 + lea init_tss(%rbp), %rbp
13812 +#else
13813 + lea init_tss(%rip), %rbp
13814 +#endif
13815 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13816 call \do_sym
13817 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13818 @@ -1085,6 +1392,16 @@ ENTRY(\sym)
13819 CFI_ADJUST_CFA_OFFSET 15*8
13820 call error_entry
13821 DEFAULT_FRAME 0
13822 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13823 + testb $3, CS(%rsp)
13824 + jnz 1f
13825 + pax_enter_kernel
13826 + jmp 2f
13827 +1: pax_enter_kernel_user
13828 +2:
13829 +#else
13830 + pax_enter_kernel
13831 +#endif
13832 movq %rsp,%rdi /* pt_regs pointer */
13833 movq ORIG_RAX(%rsp),%rsi /* get error code */
13834 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13835 @@ -1104,6 +1421,16 @@ ENTRY(\sym)
13836 call save_paranoid
13837 DEFAULT_FRAME 0
13838 TRACE_IRQS_OFF
13839 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13840 + testb $3, CS(%rsp)
13841 + jnz 1f
13842 + pax_enter_kernel
13843 + jmp 2f
13844 +1: pax_enter_kernel_user
13845 +2:
13846 +#else
13847 + pax_enter_kernel
13848 +#endif
13849 movq %rsp,%rdi /* pt_regs pointer */
13850 movq ORIG_RAX(%rsp),%rsi /* get error code */
13851 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13852 @@ -1405,14 +1732,27 @@ ENTRY(paranoid_exit)
13853 TRACE_IRQS_OFF
13854 testl %ebx,%ebx /* swapgs needed? */
13855 jnz paranoid_restore
13856 - testl $3,CS(%rsp)
13857 + testb $3,CS(%rsp)
13858 jnz paranoid_userspace
13859 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13860 + pax_exit_kernel
13861 + TRACE_IRQS_IRETQ 0
13862 + SWAPGS_UNSAFE_STACK
13863 + RESTORE_ALL 8
13864 + jmp irq_return
13865 +#endif
13866 paranoid_swapgs:
13867 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13868 + pax_exit_kernel_user
13869 +#else
13870 + pax_exit_kernel
13871 +#endif
13872 TRACE_IRQS_IRETQ 0
13873 SWAPGS_UNSAFE_STACK
13874 RESTORE_ALL 8
13875 jmp irq_return
13876 paranoid_restore:
13877 + pax_exit_kernel
13878 TRACE_IRQS_IRETQ 0
13879 RESTORE_ALL 8
13880 jmp irq_return
13881 @@ -1470,7 +1810,7 @@ ENTRY(error_entry)
13882 movq_cfi r14, R14+8
13883 movq_cfi r15, R15+8
13884 xorl %ebx,%ebx
13885 - testl $3,CS+8(%rsp)
13886 + testb $3,CS+8(%rsp)
13887 je error_kernelspace
13888 error_swapgs:
13889 SWAPGS
13890 @@ -1529,6 +1869,16 @@ ENTRY(nmi)
13891 CFI_ADJUST_CFA_OFFSET 15*8
13892 call save_paranoid
13893 DEFAULT_FRAME 0
13894 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13895 + testb $3, CS(%rsp)
13896 + jnz 1f
13897 + pax_enter_kernel
13898 + jmp 2f
13899 +1: pax_enter_kernel_user
13900 +2:
13901 +#else
13902 + pax_enter_kernel
13903 +#endif
13904 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
13905 movq %rsp,%rdi
13906 movq $-1,%rsi
13907 @@ -1539,11 +1889,25 @@ ENTRY(nmi)
13908 DISABLE_INTERRUPTS(CLBR_NONE)
13909 testl %ebx,%ebx /* swapgs needed? */
13910 jnz nmi_restore
13911 - testl $3,CS(%rsp)
13912 + testb $3,CS(%rsp)
13913 jnz nmi_userspace
13914 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13915 + pax_exit_kernel
13916 + SWAPGS_UNSAFE_STACK
13917 + RESTORE_ALL 8
13918 + jmp irq_return
13919 +#endif
13920 nmi_swapgs:
13921 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13922 + pax_exit_kernel_user
13923 +#else
13924 + pax_exit_kernel
13925 +#endif
13926 SWAPGS_UNSAFE_STACK
13927 + RESTORE_ALL 8
13928 + jmp irq_return
13929 nmi_restore:
13930 + pax_exit_kernel
13931 RESTORE_ALL 8
13932 jmp irq_return
13933 nmi_userspace:
13934 diff -urNp linux-2.6.32.43/arch/x86/kernel/ftrace.c linux-2.6.32.43/arch/x86/kernel/ftrace.c
13935 --- linux-2.6.32.43/arch/x86/kernel/ftrace.c 2011-03-27 14:31:47.000000000 -0400
13936 +++ linux-2.6.32.43/arch/x86/kernel/ftrace.c 2011-05-04 17:56:20.000000000 -0400
13937 @@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the
13938 static void *mod_code_newcode; /* holds the text to write to the IP */
13939
13940 static unsigned nmi_wait_count;
13941 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
13942 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
13943
13944 int ftrace_arch_read_dyn_info(char *buf, int size)
13945 {
13946 @@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf,
13947
13948 r = snprintf(buf, size, "%u %u",
13949 nmi_wait_count,
13950 - atomic_read(&nmi_update_count));
13951 + atomic_read_unchecked(&nmi_update_count));
13952 return r;
13953 }
13954
13955 @@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
13956 {
13957 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
13958 smp_rmb();
13959 + pax_open_kernel();
13960 ftrace_mod_code();
13961 - atomic_inc(&nmi_update_count);
13962 + pax_close_kernel();
13963 + atomic_inc_unchecked(&nmi_update_count);
13964 }
13965 /* Must have previous changes seen before executions */
13966 smp_mb();
13967 @@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, voi
13968
13969
13970
13971 -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
13972 +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
13973
13974 static unsigned char *ftrace_nop_replace(void)
13975 {
13976 @@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, uns
13977 {
13978 unsigned char replaced[MCOUNT_INSN_SIZE];
13979
13980 + ip = ktla_ktva(ip);
13981 +
13982 /*
13983 * Note: Due to modules and __init, code can
13984 * disappear and change, we need to protect against faulting
13985 @@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_fun
13986 unsigned char old[MCOUNT_INSN_SIZE], *new;
13987 int ret;
13988
13989 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
13990 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
13991 new = ftrace_call_replace(ip, (unsigned long)func);
13992 ret = ftrace_modify_code(ip, old, new);
13993
13994 @@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *da
13995 switch (faulted) {
13996 case 0:
13997 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
13998 - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
13999 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
14000 break;
14001 case 1:
14002 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
14003 - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
14004 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
14005 break;
14006 case 2:
14007 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
14008 - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
14009 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
14010 break;
14011 }
14012
14013 @@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long
14014 {
14015 unsigned char code[MCOUNT_INSN_SIZE];
14016
14017 + ip = ktla_ktva(ip);
14018 +
14019 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14020 return -EFAULT;
14021
14022 diff -urNp linux-2.6.32.43/arch/x86/kernel/head32.c linux-2.6.32.43/arch/x86/kernel/head32.c
14023 --- linux-2.6.32.43/arch/x86/kernel/head32.c 2011-03-27 14:31:47.000000000 -0400
14024 +++ linux-2.6.32.43/arch/x86/kernel/head32.c 2011-04-17 15:56:46.000000000 -0400
14025 @@ -16,6 +16,7 @@
14026 #include <asm/apic.h>
14027 #include <asm/io_apic.h>
14028 #include <asm/bios_ebda.h>
14029 +#include <asm/boot.h>
14030
14031 static void __init i386_default_early_setup(void)
14032 {
14033 @@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
14034 {
14035 reserve_trampoline_memory();
14036
14037 - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14038 + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14039
14040 #ifdef CONFIG_BLK_DEV_INITRD
14041 /* Reserve INITRD */
14042 diff -urNp linux-2.6.32.43/arch/x86/kernel/head_32.S linux-2.6.32.43/arch/x86/kernel/head_32.S
14043 --- linux-2.6.32.43/arch/x86/kernel/head_32.S 2011-03-27 14:31:47.000000000 -0400
14044 +++ linux-2.6.32.43/arch/x86/kernel/head_32.S 2011-07-06 19:53:33.000000000 -0400
14045 @@ -19,10 +19,17 @@
14046 #include <asm/setup.h>
14047 #include <asm/processor-flags.h>
14048 #include <asm/percpu.h>
14049 +#include <asm/msr-index.h>
14050
14051 /* Physical address */
14052 #define pa(X) ((X) - __PAGE_OFFSET)
14053
14054 +#ifdef CONFIG_PAX_KERNEXEC
14055 +#define ta(X) (X)
14056 +#else
14057 +#define ta(X) ((X) - __PAGE_OFFSET)
14058 +#endif
14059 +
14060 /*
14061 * References to members of the new_cpu_data structure.
14062 */
14063 @@ -52,11 +59,7 @@
14064 * and small than max_low_pfn, otherwise will waste some page table entries
14065 */
14066
14067 -#if PTRS_PER_PMD > 1
14068 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14069 -#else
14070 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14071 -#endif
14072 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14073
14074 /* Enough space to fit pagetables for the low memory linear map */
14075 MAPPING_BEYOND_END = \
14076 @@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
14077 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14078
14079 /*
14080 + * Real beginning of normal "text" segment
14081 + */
14082 +ENTRY(stext)
14083 +ENTRY(_stext)
14084 +
14085 +/*
14086 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14087 * %esi points to the real-mode code as a 32-bit pointer.
14088 * CS and DS must be 4 GB flat segments, but we don't depend on
14089 @@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14090 * can.
14091 */
14092 __HEAD
14093 +
14094 +#ifdef CONFIG_PAX_KERNEXEC
14095 + jmp startup_32
14096 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14097 +.fill PAGE_SIZE-5,1,0xcc
14098 +#endif
14099 +
14100 ENTRY(startup_32)
14101 + movl pa(stack_start),%ecx
14102 +
14103 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
14104 us to not reload segments */
14105 testb $(1<<6), BP_loadflags(%esi)
14106 @@ -95,7 +113,60 @@ ENTRY(startup_32)
14107 movl %eax,%es
14108 movl %eax,%fs
14109 movl %eax,%gs
14110 + movl %eax,%ss
14111 2:
14112 + leal -__PAGE_OFFSET(%ecx),%esp
14113 +
14114 +#ifdef CONFIG_SMP
14115 + movl $pa(cpu_gdt_table),%edi
14116 + movl $__per_cpu_load,%eax
14117 + movw %ax,__KERNEL_PERCPU + 2(%edi)
14118 + rorl $16,%eax
14119 + movb %al,__KERNEL_PERCPU + 4(%edi)
14120 + movb %ah,__KERNEL_PERCPU + 7(%edi)
14121 + movl $__per_cpu_end - 1,%eax
14122 + subl $__per_cpu_start,%eax
14123 + movw %ax,__KERNEL_PERCPU + 0(%edi)
14124 +#endif
14125 +
14126 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14127 + movl $NR_CPUS,%ecx
14128 + movl $pa(cpu_gdt_table),%edi
14129 +1:
14130 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14131 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14132 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14133 + addl $PAGE_SIZE_asm,%edi
14134 + loop 1b
14135 +#endif
14136 +
14137 +#ifdef CONFIG_PAX_KERNEXEC
14138 + movl $pa(boot_gdt),%edi
14139 + movl $__LOAD_PHYSICAL_ADDR,%eax
14140 + movw %ax,__BOOT_CS + 2(%edi)
14141 + rorl $16,%eax
14142 + movb %al,__BOOT_CS + 4(%edi)
14143 + movb %ah,__BOOT_CS + 7(%edi)
14144 + rorl $16,%eax
14145 +
14146 + ljmp $(__BOOT_CS),$1f
14147 +1:
14148 +
14149 + movl $NR_CPUS,%ecx
14150 + movl $pa(cpu_gdt_table),%edi
14151 + addl $__PAGE_OFFSET,%eax
14152 +1:
14153 + movw %ax,__KERNEL_CS + 2(%edi)
14154 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14155 + rorl $16,%eax
14156 + movb %al,__KERNEL_CS + 4(%edi)
14157 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14158 + movb %ah,__KERNEL_CS + 7(%edi)
14159 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14160 + rorl $16,%eax
14161 + addl $PAGE_SIZE_asm,%edi
14162 + loop 1b
14163 +#endif
14164
14165 /*
14166 * Clear BSS first so that there are no surprises...
14167 @@ -140,9 +211,7 @@ ENTRY(startup_32)
14168 cmpl $num_subarch_entries, %eax
14169 jae bad_subarch
14170
14171 - movl pa(subarch_entries)(,%eax,4), %eax
14172 - subl $__PAGE_OFFSET, %eax
14173 - jmp *%eax
14174 + jmp *pa(subarch_entries)(,%eax,4)
14175
14176 bad_subarch:
14177 WEAK(lguest_entry)
14178 @@ -154,10 +223,10 @@ WEAK(xen_entry)
14179 __INITDATA
14180
14181 subarch_entries:
14182 - .long default_entry /* normal x86/PC */
14183 - .long lguest_entry /* lguest hypervisor */
14184 - .long xen_entry /* Xen hypervisor */
14185 - .long default_entry /* Moorestown MID */
14186 + .long ta(default_entry) /* normal x86/PC */
14187 + .long ta(lguest_entry) /* lguest hypervisor */
14188 + .long ta(xen_entry) /* Xen hypervisor */
14189 + .long ta(default_entry) /* Moorestown MID */
14190 num_subarch_entries = (. - subarch_entries) / 4
14191 .previous
14192 #endif /* CONFIG_PARAVIRT */
14193 @@ -218,8 +287,11 @@ default_entry:
14194 movl %eax, pa(max_pfn_mapped)
14195
14196 /* Do early initialization of the fixmap area */
14197 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14198 - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14199 +#ifdef CONFIG_COMPAT_VDSO
14200 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14201 +#else
14202 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14203 +#endif
14204 #else /* Not PAE */
14205
14206 page_pde_offset = (__PAGE_OFFSET >> 20);
14207 @@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14208 movl %eax, pa(max_pfn_mapped)
14209
14210 /* Do early initialization of the fixmap area */
14211 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14212 - movl %eax,pa(swapper_pg_dir+0xffc)
14213 +#ifdef CONFIG_COMPAT_VDSO
14214 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
14215 +#else
14216 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
14217 +#endif
14218 #endif
14219 jmp 3f
14220 /*
14221 @@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
14222 movl %eax,%es
14223 movl %eax,%fs
14224 movl %eax,%gs
14225 + movl pa(stack_start),%ecx
14226 + movl %eax,%ss
14227 + leal -__PAGE_OFFSET(%ecx),%esp
14228 #endif /* CONFIG_SMP */
14229 3:
14230
14231 @@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
14232 orl %edx,%eax
14233 movl %eax,%cr4
14234
14235 +#ifdef CONFIG_X86_PAE
14236 btl $5, %eax # check if PAE is enabled
14237 jnc 6f
14238
14239 @@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
14240 cpuid
14241 cmpl $0x80000000, %eax
14242 jbe 6f
14243 +
14244 + /* Clear bogus XD_DISABLE bits */
14245 + call verify_cpu
14246 +
14247 mov $0x80000001, %eax
14248 cpuid
14249 /* Execute Disable bit supported? */
14250 @@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
14251 jnc 6f
14252
14253 /* Setup EFER (Extended Feature Enable Register) */
14254 - movl $0xc0000080, %ecx
14255 + movl $MSR_EFER, %ecx
14256 rdmsr
14257
14258 btsl $11, %eax
14259 /* Make changes effective */
14260 wrmsr
14261
14262 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14263 + movl $1,pa(nx_enabled)
14264 +#endif
14265 +
14266 6:
14267
14268 /*
14269 @@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
14270 movl %eax,%cr0 /* ..and set paging (PG) bit */
14271 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
14272 1:
14273 - /* Set up the stack pointer */
14274 - lss stack_start,%esp
14275 + /* Shift the stack pointer to a virtual address */
14276 + addl $__PAGE_OFFSET, %esp
14277
14278 /*
14279 * Initialize eflags. Some BIOS's leave bits like NT set. This would
14280 @@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
14281
14282 #ifdef CONFIG_SMP
14283 cmpb $0, ready
14284 - jz 1f /* Initial CPU cleans BSS */
14285 - jmp checkCPUtype
14286 -1:
14287 + jnz checkCPUtype
14288 #endif /* CONFIG_SMP */
14289
14290 /*
14291 @@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
14292 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14293 movl %eax,%ss # after changing gdt.
14294
14295 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
14296 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14297 movl %eax,%ds
14298 movl %eax,%es
14299
14300 @@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
14301 */
14302 cmpb $0,ready
14303 jne 1f
14304 - movl $per_cpu__gdt_page,%eax
14305 + movl $cpu_gdt_table,%eax
14306 movl $per_cpu__stack_canary,%ecx
14307 +#ifdef CONFIG_SMP
14308 + addl $__per_cpu_load,%ecx
14309 +#endif
14310 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14311 shrl $16, %ecx
14312 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14313 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14314 1:
14315 -#endif
14316 movl $(__KERNEL_STACK_CANARY),%eax
14317 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14318 + movl $(__USER_DS),%eax
14319 +#else
14320 + xorl %eax,%eax
14321 +#endif
14322 movl %eax,%gs
14323
14324 xorl %eax,%eax # Clear LDT
14325 @@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
14326
14327 cld # gcc2 wants the direction flag cleared at all times
14328 pushl $0 # fake return address for unwinder
14329 -#ifdef CONFIG_SMP
14330 - movb ready, %cl
14331 movb $1, ready
14332 - cmpb $0,%cl # the first CPU calls start_kernel
14333 - je 1f
14334 - movl (stack_start), %esp
14335 -1:
14336 -#endif /* CONFIG_SMP */
14337 jmp *(initial_code)
14338
14339 /*
14340 @@ -546,22 +631,22 @@ early_page_fault:
14341 jmp early_fault
14342
14343 early_fault:
14344 - cld
14345 #ifdef CONFIG_PRINTK
14346 + cmpl $1,%ss:early_recursion_flag
14347 + je hlt_loop
14348 + incl %ss:early_recursion_flag
14349 + cld
14350 pusha
14351 movl $(__KERNEL_DS),%eax
14352 movl %eax,%ds
14353 movl %eax,%es
14354 - cmpl $2,early_recursion_flag
14355 - je hlt_loop
14356 - incl early_recursion_flag
14357 movl %cr2,%eax
14358 pushl %eax
14359 pushl %edx /* trapno */
14360 pushl $fault_msg
14361 call printk
14362 +; call dump_stack
14363 #endif
14364 - call dump_stack
14365 hlt_loop:
14366 hlt
14367 jmp hlt_loop
14368 @@ -569,8 +654,11 @@ hlt_loop:
14369 /* This is the default interrupt "handler" :-) */
14370 ALIGN
14371 ignore_int:
14372 - cld
14373 #ifdef CONFIG_PRINTK
14374 + cmpl $2,%ss:early_recursion_flag
14375 + je hlt_loop
14376 + incl %ss:early_recursion_flag
14377 + cld
14378 pushl %eax
14379 pushl %ecx
14380 pushl %edx
14381 @@ -579,9 +667,6 @@ ignore_int:
14382 movl $(__KERNEL_DS),%eax
14383 movl %eax,%ds
14384 movl %eax,%es
14385 - cmpl $2,early_recursion_flag
14386 - je hlt_loop
14387 - incl early_recursion_flag
14388 pushl 16(%esp)
14389 pushl 24(%esp)
14390 pushl 32(%esp)
14391 @@ -600,6 +685,8 @@ ignore_int:
14392 #endif
14393 iret
14394
14395 +#include "verify_cpu.S"
14396 +
14397 __REFDATA
14398 .align 4
14399 ENTRY(initial_code)
14400 @@ -610,31 +697,47 @@ ENTRY(initial_page_table)
14401 /*
14402 * BSS section
14403 */
14404 -__PAGE_ALIGNED_BSS
14405 - .align PAGE_SIZE_asm
14406 #ifdef CONFIG_X86_PAE
14407 +.section .swapper_pg_pmd,"a",@progbits
14408 swapper_pg_pmd:
14409 .fill 1024*KPMDS,4,0
14410 #else
14411 +.section .swapper_pg_dir,"a",@progbits
14412 ENTRY(swapper_pg_dir)
14413 .fill 1024,4,0
14414 #endif
14415 +.section .swapper_pg_fixmap,"a",@progbits
14416 swapper_pg_fixmap:
14417 .fill 1024,4,0
14418 #ifdef CONFIG_X86_TRAMPOLINE
14419 +.section .trampoline_pg_dir,"a",@progbits
14420 ENTRY(trampoline_pg_dir)
14421 +#ifdef CONFIG_X86_PAE
14422 + .fill 4,8,0
14423 +#else
14424 .fill 1024,4,0
14425 #endif
14426 +#endif
14427 +
14428 +.section .empty_zero_page,"a",@progbits
14429 ENTRY(empty_zero_page)
14430 .fill 4096,1,0
14431
14432 /*
14433 + * The IDT has to be page-aligned to simplify the Pentium
14434 + * F0 0F bug workaround.. We have a special link segment
14435 + * for this.
14436 + */
14437 +.section .idt,"a",@progbits
14438 +ENTRY(idt_table)
14439 + .fill 256,8,0
14440 +
14441 +/*
14442 * This starts the data section.
14443 */
14444 #ifdef CONFIG_X86_PAE
14445 -__PAGE_ALIGNED_DATA
14446 - /* Page-aligned for the benefit of paravirt? */
14447 - .align PAGE_SIZE_asm
14448 +.section .swapper_pg_dir,"a",@progbits
14449 +
14450 ENTRY(swapper_pg_dir)
14451 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14452 # if KPMDS == 3
14453 @@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
14454 # error "Kernel PMDs should be 1, 2 or 3"
14455 # endif
14456 .align PAGE_SIZE_asm /* needs to be page-sized too */
14457 +
14458 +#ifdef CONFIG_PAX_PER_CPU_PGD
14459 +ENTRY(cpu_pgd)
14460 + .rept NR_CPUS
14461 + .fill 4,8,0
14462 + .endr
14463 +#endif
14464 +
14465 #endif
14466
14467 .data
14468 +.balign 4
14469 ENTRY(stack_start)
14470 - .long init_thread_union+THREAD_SIZE
14471 - .long __BOOT_DS
14472 + .long init_thread_union+THREAD_SIZE-8
14473
14474 ready: .byte 0
14475
14476 +.section .rodata,"a",@progbits
14477 early_recursion_flag:
14478 .long 0
14479
14480 @@ -697,7 +809,7 @@ fault_msg:
14481 .word 0 # 32 bit align gdt_desc.address
14482 boot_gdt_descr:
14483 .word __BOOT_DS+7
14484 - .long boot_gdt - __PAGE_OFFSET
14485 + .long pa(boot_gdt)
14486
14487 .word 0 # 32-bit align idt_desc.address
14488 idt_descr:
14489 @@ -708,7 +820,7 @@ idt_descr:
14490 .word 0 # 32 bit align gdt_desc.address
14491 ENTRY(early_gdt_descr)
14492 .word GDT_ENTRIES*8-1
14493 - .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
14494 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
14495
14496 /*
14497 * The boot_gdt must mirror the equivalent in setup.S and is
14498 @@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
14499 .align L1_CACHE_BYTES
14500 ENTRY(boot_gdt)
14501 .fill GDT_ENTRY_BOOT_CS,8,0
14502 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14503 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14504 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14505 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14506 +
14507 + .align PAGE_SIZE_asm
14508 +ENTRY(cpu_gdt_table)
14509 + .rept NR_CPUS
14510 + .quad 0x0000000000000000 /* NULL descriptor */
14511 + .quad 0x0000000000000000 /* 0x0b reserved */
14512 + .quad 0x0000000000000000 /* 0x13 reserved */
14513 + .quad 0x0000000000000000 /* 0x1b reserved */
14514 +
14515 +#ifdef CONFIG_PAX_KERNEXEC
14516 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14517 +#else
14518 + .quad 0x0000000000000000 /* 0x20 unused */
14519 +#endif
14520 +
14521 + .quad 0x0000000000000000 /* 0x28 unused */
14522 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14523 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14524 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14525 + .quad 0x0000000000000000 /* 0x4b reserved */
14526 + .quad 0x0000000000000000 /* 0x53 reserved */
14527 + .quad 0x0000000000000000 /* 0x5b reserved */
14528 +
14529 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14530 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14531 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14532 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14533 +
14534 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14535 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14536 +
14537 + /*
14538 + * Segments used for calling PnP BIOS have byte granularity.
14539 + * The code segments and data segments have fixed 64k limits,
14540 + * the transfer segment sizes are set at run time.
14541 + */
14542 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
14543 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
14544 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
14545 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
14546 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
14547 +
14548 + /*
14549 + * The APM segments have byte granularity and their bases
14550 + * are set at run time. All have 64k limits.
14551 + */
14552 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14553 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14554 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
14555 +
14556 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14557 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14558 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14559 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14560 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14561 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14562 +
14563 + /* Be sure this is zeroed to avoid false validations in Xen */
14564 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14565 + .endr
14566 diff -urNp linux-2.6.32.43/arch/x86/kernel/head_64.S linux-2.6.32.43/arch/x86/kernel/head_64.S
14567 --- linux-2.6.32.43/arch/x86/kernel/head_64.S 2011-03-27 14:31:47.000000000 -0400
14568 +++ linux-2.6.32.43/arch/x86/kernel/head_64.S 2011-04-17 15:56:46.000000000 -0400
14569 @@ -19,6 +19,7 @@
14570 #include <asm/cache.h>
14571 #include <asm/processor-flags.h>
14572 #include <asm/percpu.h>
14573 +#include <asm/cpufeature.h>
14574
14575 #ifdef CONFIG_PARAVIRT
14576 #include <asm/asm-offsets.h>
14577 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
14578 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14579 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14580 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14581 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
14582 +L3_VMALLOC_START = pud_index(VMALLOC_START)
14583 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14584 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14585
14586 .text
14587 __HEAD
14588 @@ -85,35 +90,22 @@ startup_64:
14589 */
14590 addq %rbp, init_level4_pgt + 0(%rip)
14591 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14592 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14593 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14594 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14595
14596 addq %rbp, level3_ident_pgt + 0(%rip)
14597 +#ifndef CONFIG_XEN
14598 + addq %rbp, level3_ident_pgt + 8(%rip)
14599 +#endif
14600
14601 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14602 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14603 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14604
14605 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14606 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14607 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14608
14609 - /* Add an Identity mapping if I am above 1G */
14610 - leaq _text(%rip), %rdi
14611 - andq $PMD_PAGE_MASK, %rdi
14612 -
14613 - movq %rdi, %rax
14614 - shrq $PUD_SHIFT, %rax
14615 - andq $(PTRS_PER_PUD - 1), %rax
14616 - jz ident_complete
14617 -
14618 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
14619 - leaq level3_ident_pgt(%rip), %rbx
14620 - movq %rdx, 0(%rbx, %rax, 8)
14621 -
14622 - movq %rdi, %rax
14623 - shrq $PMD_SHIFT, %rax
14624 - andq $(PTRS_PER_PMD - 1), %rax
14625 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
14626 - leaq level2_spare_pgt(%rip), %rbx
14627 - movq %rdx, 0(%rbx, %rax, 8)
14628 -ident_complete:
14629 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14630 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
14631
14632 /*
14633 * Fixup the kernel text+data virtual addresses. Note that
14634 @@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
14635 * after the boot processor executes this code.
14636 */
14637
14638 - /* Enable PAE mode and PGE */
14639 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
14640 + /* Enable PAE mode and PSE/PGE */
14641 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
14642 movq %rax, %cr4
14643
14644 /* Setup early boot stage 4 level pagetables. */
14645 @@ -184,9 +176,13 @@ ENTRY(secondary_startup_64)
14646 movl $MSR_EFER, %ecx
14647 rdmsr
14648 btsl $_EFER_SCE, %eax /* Enable System Call */
14649 - btl $20,%edi /* No Execute supported? */
14650 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
14651 jnc 1f
14652 btsl $_EFER_NX, %eax
14653 + leaq init_level4_pgt(%rip), %rdi
14654 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
14655 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
14656 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
14657 1: wrmsr /* Make changes effective */
14658
14659 /* Setup cr0 */
14660 @@ -262,16 +258,16 @@ ENTRY(secondary_startup_64)
14661 .quad x86_64_start_kernel
14662 ENTRY(initial_gs)
14663 .quad INIT_PER_CPU_VAR(irq_stack_union)
14664 - __FINITDATA
14665
14666 ENTRY(stack_start)
14667 .quad init_thread_union+THREAD_SIZE-8
14668 .word 0
14669 + __FINITDATA
14670
14671 bad_address:
14672 jmp bad_address
14673
14674 - .section ".init.text","ax"
14675 + __INIT
14676 #ifdef CONFIG_EARLY_PRINTK
14677 .globl early_idt_handlers
14678 early_idt_handlers:
14679 @@ -316,18 +312,23 @@ ENTRY(early_idt_handler)
14680 #endif /* EARLY_PRINTK */
14681 1: hlt
14682 jmp 1b
14683 + .previous
14684
14685 #ifdef CONFIG_EARLY_PRINTK
14686 + __INITDATA
14687 early_recursion_flag:
14688 .long 0
14689 + .previous
14690
14691 + .section .rodata,"a",@progbits
14692 early_idt_msg:
14693 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
14694 early_idt_ripmsg:
14695 .asciz "RIP %s\n"
14696 -#endif /* CONFIG_EARLY_PRINTK */
14697 .previous
14698 +#endif /* CONFIG_EARLY_PRINTK */
14699
14700 + .section .rodata,"a",@progbits
14701 #define NEXT_PAGE(name) \
14702 .balign PAGE_SIZE; \
14703 ENTRY(name)
14704 @@ -350,13 +351,36 @@ NEXT_PAGE(init_level4_pgt)
14705 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14706 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
14707 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14708 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
14709 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
14710 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
14711 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14712 .org init_level4_pgt + L4_START_KERNEL*8, 0
14713 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
14714 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
14715
14716 +#ifdef CONFIG_PAX_PER_CPU_PGD
14717 +NEXT_PAGE(cpu_pgd)
14718 + .rept NR_CPUS
14719 + .fill 512,8,0
14720 + .endr
14721 +#endif
14722 +
14723 NEXT_PAGE(level3_ident_pgt)
14724 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14725 +#ifdef CONFIG_XEN
14726 .fill 511,8,0
14727 +#else
14728 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
14729 + .fill 510,8,0
14730 +#endif
14731 +
14732 +NEXT_PAGE(level3_vmalloc_pgt)
14733 + .fill 512,8,0
14734 +
14735 +NEXT_PAGE(level3_vmemmap_pgt)
14736 + .fill L3_VMEMMAP_START,8,0
14737 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14738
14739 NEXT_PAGE(level3_kernel_pgt)
14740 .fill L3_START_KERNEL,8,0
14741 @@ -364,20 +388,23 @@ NEXT_PAGE(level3_kernel_pgt)
14742 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
14743 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14744
14745 +NEXT_PAGE(level2_vmemmap_pgt)
14746 + .fill 512,8,0
14747 +
14748 NEXT_PAGE(level2_fixmap_pgt)
14749 - .fill 506,8,0
14750 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14751 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
14752 - .fill 5,8,0
14753 + .fill 507,8,0
14754 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
14755 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
14756 + .fill 4,8,0
14757
14758 -NEXT_PAGE(level1_fixmap_pgt)
14759 +NEXT_PAGE(level1_vsyscall_pgt)
14760 .fill 512,8,0
14761
14762 -NEXT_PAGE(level2_ident_pgt)
14763 - /* Since I easily can, map the first 1G.
14764 + /* Since I easily can, map the first 2G.
14765 * Don't set NX because code runs from these pages.
14766 */
14767 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
14768 +NEXT_PAGE(level2_ident_pgt)
14769 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
14770
14771 NEXT_PAGE(level2_kernel_pgt)
14772 /*
14773 @@ -390,33 +417,55 @@ NEXT_PAGE(level2_kernel_pgt)
14774 * If you want to increase this then increase MODULES_VADDR
14775 * too.)
14776 */
14777 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
14778 - KERNEL_IMAGE_SIZE/PMD_SIZE)
14779 -
14780 -NEXT_PAGE(level2_spare_pgt)
14781 - .fill 512, 8, 0
14782 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
14783
14784 #undef PMDS
14785 #undef NEXT_PAGE
14786
14787 - .data
14788 + .align PAGE_SIZE
14789 +ENTRY(cpu_gdt_table)
14790 + .rept NR_CPUS
14791 + .quad 0x0000000000000000 /* NULL descriptor */
14792 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
14793 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
14794 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
14795 + .quad 0x00cffb000000ffff /* __USER32_CS */
14796 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
14797 + .quad 0x00affb000000ffff /* __USER_CS */
14798 +
14799 +#ifdef CONFIG_PAX_KERNEXEC
14800 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
14801 +#else
14802 + .quad 0x0 /* unused */
14803 +#endif
14804 +
14805 + .quad 0,0 /* TSS */
14806 + .quad 0,0 /* LDT */
14807 + .quad 0,0,0 /* three TLS descriptors */
14808 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
14809 + /* asm/segment.h:GDT_ENTRIES must match this */
14810 +
14811 + /* zero the remaining page */
14812 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
14813 + .endr
14814 +
14815 .align 16
14816 .globl early_gdt_descr
14817 early_gdt_descr:
14818 .word GDT_ENTRIES*8-1
14819 early_gdt_descr_base:
14820 - .quad INIT_PER_CPU_VAR(gdt_page)
14821 + .quad cpu_gdt_table
14822
14823 ENTRY(phys_base)
14824 /* This must match the first entry in level2_kernel_pgt */
14825 .quad 0x0000000000000000
14826
14827 #include "../../x86/xen/xen-head.S"
14828 -
14829 - .section .bss, "aw", @nobits
14830 +
14831 + .section .rodata,"a",@progbits
14832 .align L1_CACHE_BYTES
14833 ENTRY(idt_table)
14834 - .skip IDT_ENTRIES * 16
14835 + .fill 512,8,0
14836
14837 __PAGE_ALIGNED_BSS
14838 .align PAGE_SIZE
14839 diff -urNp linux-2.6.32.43/arch/x86/kernel/i386_ksyms_32.c linux-2.6.32.43/arch/x86/kernel/i386_ksyms_32.c
14840 --- linux-2.6.32.43/arch/x86/kernel/i386_ksyms_32.c 2011-03-27 14:31:47.000000000 -0400
14841 +++ linux-2.6.32.43/arch/x86/kernel/i386_ksyms_32.c 2011-04-17 15:56:46.000000000 -0400
14842 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
14843 EXPORT_SYMBOL(cmpxchg8b_emu);
14844 #endif
14845
14846 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
14847 +
14848 /* Networking helper routines. */
14849 EXPORT_SYMBOL(csum_partial_copy_generic);
14850 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
14851 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
14852
14853 EXPORT_SYMBOL(__get_user_1);
14854 EXPORT_SYMBOL(__get_user_2);
14855 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
14856
14857 EXPORT_SYMBOL(csum_partial);
14858 EXPORT_SYMBOL(empty_zero_page);
14859 +
14860 +#ifdef CONFIG_PAX_KERNEXEC
14861 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
14862 +#endif
14863 diff -urNp linux-2.6.32.43/arch/x86/kernel/i8259.c linux-2.6.32.43/arch/x86/kernel/i8259.c
14864 --- linux-2.6.32.43/arch/x86/kernel/i8259.c 2011-03-27 14:31:47.000000000 -0400
14865 +++ linux-2.6.32.43/arch/x86/kernel/i8259.c 2011-05-04 17:56:28.000000000 -0400
14866 @@ -208,7 +208,7 @@ spurious_8259A_irq:
14867 "spurious 8259A interrupt: IRQ%d.\n", irq);
14868 spurious_irq_mask |= irqmask;
14869 }
14870 - atomic_inc(&irq_err_count);
14871 + atomic_inc_unchecked(&irq_err_count);
14872 /*
14873 * Theoretically we do not have to handle this IRQ,
14874 * but in Linux this does not cause problems and is
14875 diff -urNp linux-2.6.32.43/arch/x86/kernel/init_task.c linux-2.6.32.43/arch/x86/kernel/init_task.c
14876 --- linux-2.6.32.43/arch/x86/kernel/init_task.c 2011-03-27 14:31:47.000000000 -0400
14877 +++ linux-2.6.32.43/arch/x86/kernel/init_task.c 2011-04-17 15:56:46.000000000 -0400
14878 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
14879 * way process stacks are handled. This is done by having a special
14880 * "init_task" linker map entry..
14881 */
14882 -union thread_union init_thread_union __init_task_data =
14883 - { INIT_THREAD_INFO(init_task) };
14884 +union thread_union init_thread_union __init_task_data;
14885
14886 /*
14887 * Initial task structure.
14888 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
14889 * section. Since TSS's are completely CPU-local, we want them
14890 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
14891 */
14892 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
14893 -
14894 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
14895 +EXPORT_SYMBOL(init_tss);
14896 diff -urNp linux-2.6.32.43/arch/x86/kernel/ioport.c linux-2.6.32.43/arch/x86/kernel/ioport.c
14897 --- linux-2.6.32.43/arch/x86/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
14898 +++ linux-2.6.32.43/arch/x86/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
14899 @@ -6,6 +6,7 @@
14900 #include <linux/sched.h>
14901 #include <linux/kernel.h>
14902 #include <linux/capability.h>
14903 +#include <linux/security.h>
14904 #include <linux/errno.h>
14905 #include <linux/types.h>
14906 #include <linux/ioport.h>
14907 @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
14908
14909 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
14910 return -EINVAL;
14911 +#ifdef CONFIG_GRKERNSEC_IO
14912 + if (turn_on && grsec_disable_privio) {
14913 + gr_handle_ioperm();
14914 + return -EPERM;
14915 + }
14916 +#endif
14917 if (turn_on && !capable(CAP_SYS_RAWIO))
14918 return -EPERM;
14919
14920 @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
14921 * because the ->io_bitmap_max value must match the bitmap
14922 * contents:
14923 */
14924 - tss = &per_cpu(init_tss, get_cpu());
14925 + tss = init_tss + get_cpu();
14926
14927 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
14928
14929 @@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, s
14930 return -EINVAL;
14931 /* Trying to gain more privileges? */
14932 if (level > old) {
14933 +#ifdef CONFIG_GRKERNSEC_IO
14934 + if (grsec_disable_privio) {
14935 + gr_handle_iopl();
14936 + return -EPERM;
14937 + }
14938 +#endif
14939 if (!capable(CAP_SYS_RAWIO))
14940 return -EPERM;
14941 }
14942 diff -urNp linux-2.6.32.43/arch/x86/kernel/irq_32.c linux-2.6.32.43/arch/x86/kernel/irq_32.c
14943 --- linux-2.6.32.43/arch/x86/kernel/irq_32.c 2011-03-27 14:31:47.000000000 -0400
14944 +++ linux-2.6.32.43/arch/x86/kernel/irq_32.c 2011-07-06 19:53:33.000000000 -0400
14945 @@ -35,7 +35,7 @@ static int check_stack_overflow(void)
14946 __asm__ __volatile__("andl %%esp,%0" :
14947 "=r" (sp) : "0" (THREAD_SIZE - 1));
14948
14949 - return sp < (sizeof(struct thread_info) + STACK_WARN);
14950 + return sp < STACK_WARN;
14951 }
14952
14953 static void print_stack_overflow(void)
14954 @@ -54,9 +54,9 @@ static inline void print_stack_overflow(
14955 * per-CPU IRQ handling contexts (thread information and stack)
14956 */
14957 union irq_ctx {
14958 - struct thread_info tinfo;
14959 - u32 stack[THREAD_SIZE/sizeof(u32)];
14960 -} __attribute__((aligned(PAGE_SIZE)));
14961 + unsigned long previous_esp;
14962 + u32 stack[THREAD_SIZE/sizeof(u32)];
14963 +} __attribute__((aligned(THREAD_SIZE)));
14964
14965 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
14966 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
14967 @@ -78,10 +78,9 @@ static void call_on_stack(void *func, vo
14968 static inline int
14969 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
14970 {
14971 - union irq_ctx *curctx, *irqctx;
14972 + union irq_ctx *irqctx;
14973 u32 *isp, arg1, arg2;
14974
14975 - curctx = (union irq_ctx *) current_thread_info();
14976 irqctx = __get_cpu_var(hardirq_ctx);
14977
14978 /*
14979 @@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struc
14980 * handler) we can't do that and just have to keep using the
14981 * current stack (which is the irq stack already after all)
14982 */
14983 - if (unlikely(curctx == irqctx))
14984 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
14985 return 0;
14986
14987 /* build the stack frame on the IRQ stack */
14988 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14989 - irqctx->tinfo.task = curctx->tinfo.task;
14990 - irqctx->tinfo.previous_esp = current_stack_pointer;
14991 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14992 + irqctx->previous_esp = current_stack_pointer;
14993
14994 - /*
14995 - * Copy the softirq bits in preempt_count so that the
14996 - * softirq checks work in the hardirq context.
14997 - */
14998 - irqctx->tinfo.preempt_count =
14999 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15000 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15001 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15002 + __set_fs(MAKE_MM_SEG(0));
15003 +#endif
15004
15005 if (unlikely(overflow))
15006 call_on_stack(print_stack_overflow, isp);
15007 @@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struc
15008 : "0" (irq), "1" (desc), "2" (isp),
15009 "D" (desc->handle_irq)
15010 : "memory", "cc", "ecx");
15011 +
15012 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15013 + __set_fs(current_thread_info()->addr_limit);
15014 +#endif
15015 +
15016 return 1;
15017 }
15018
15019 @@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struc
15020 */
15021 void __cpuinit irq_ctx_init(int cpu)
15022 {
15023 - union irq_ctx *irqctx;
15024 -
15025 if (per_cpu(hardirq_ctx, cpu))
15026 return;
15027
15028 - irqctx = &per_cpu(hardirq_stack, cpu);
15029 - irqctx->tinfo.task = NULL;
15030 - irqctx->tinfo.exec_domain = NULL;
15031 - irqctx->tinfo.cpu = cpu;
15032 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15033 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15034 -
15035 - per_cpu(hardirq_ctx, cpu) = irqctx;
15036 -
15037 - irqctx = &per_cpu(softirq_stack, cpu);
15038 - irqctx->tinfo.task = NULL;
15039 - irqctx->tinfo.exec_domain = NULL;
15040 - irqctx->tinfo.cpu = cpu;
15041 - irqctx->tinfo.preempt_count = 0;
15042 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15043 -
15044 - per_cpu(softirq_ctx, cpu) = irqctx;
15045 + per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
15046 + per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
15047
15048 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15049 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15050 @@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
15051 asmlinkage void do_softirq(void)
15052 {
15053 unsigned long flags;
15054 - struct thread_info *curctx;
15055 union irq_ctx *irqctx;
15056 u32 *isp;
15057
15058 @@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
15059 local_irq_save(flags);
15060
15061 if (local_softirq_pending()) {
15062 - curctx = current_thread_info();
15063 irqctx = __get_cpu_var(softirq_ctx);
15064 - irqctx->tinfo.task = curctx->task;
15065 - irqctx->tinfo.previous_esp = current_stack_pointer;
15066 + irqctx->previous_esp = current_stack_pointer;
15067
15068 /* build the stack frame on the softirq stack */
15069 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15070 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15071 +
15072 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15073 + __set_fs(MAKE_MM_SEG(0));
15074 +#endif
15075
15076 call_on_stack(__do_softirq, isp);
15077 +
15078 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15079 + __set_fs(current_thread_info()->addr_limit);
15080 +#endif
15081 +
15082 /*
15083 * Shouldnt happen, we returned above if in_interrupt():
15084 */
15085 diff -urNp linux-2.6.32.43/arch/x86/kernel/irq.c linux-2.6.32.43/arch/x86/kernel/irq.c
15086 --- linux-2.6.32.43/arch/x86/kernel/irq.c 2011-03-27 14:31:47.000000000 -0400
15087 +++ linux-2.6.32.43/arch/x86/kernel/irq.c 2011-05-04 17:56:28.000000000 -0400
15088 @@ -15,7 +15,7 @@
15089 #include <asm/mce.h>
15090 #include <asm/hw_irq.h>
15091
15092 -atomic_t irq_err_count;
15093 +atomic_unchecked_t irq_err_count;
15094
15095 /* Function pointer for generic interrupt vector handling */
15096 void (*generic_interrupt_extension)(void) = NULL;
15097 @@ -114,9 +114,9 @@ static int show_other_interrupts(struct
15098 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15099 seq_printf(p, " Machine check polls\n");
15100 #endif
15101 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15102 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15103 #if defined(CONFIG_X86_IO_APIC)
15104 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15105 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15106 #endif
15107 return 0;
15108 }
15109 @@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15110
15111 u64 arch_irq_stat(void)
15112 {
15113 - u64 sum = atomic_read(&irq_err_count);
15114 + u64 sum = atomic_read_unchecked(&irq_err_count);
15115
15116 #ifdef CONFIG_X86_IO_APIC
15117 - sum += atomic_read(&irq_mis_count);
15118 + sum += atomic_read_unchecked(&irq_mis_count);
15119 #endif
15120 return sum;
15121 }
15122 diff -urNp linux-2.6.32.43/arch/x86/kernel/kgdb.c linux-2.6.32.43/arch/x86/kernel/kgdb.c
15123 --- linux-2.6.32.43/arch/x86/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
15124 +++ linux-2.6.32.43/arch/x86/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
15125 @@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vec
15126
15127 /* clear the trace bit */
15128 linux_regs->flags &= ~X86_EFLAGS_TF;
15129 - atomic_set(&kgdb_cpu_doing_single_step, -1);
15130 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15131
15132 /* set the trace bit if we're stepping */
15133 if (remcomInBuffer[0] == 's') {
15134 linux_regs->flags |= X86_EFLAGS_TF;
15135 kgdb_single_step = 1;
15136 - atomic_set(&kgdb_cpu_doing_single_step,
15137 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15138 raw_smp_processor_id());
15139 }
15140
15141 @@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args
15142 break;
15143
15144 case DIE_DEBUG:
15145 - if (atomic_read(&kgdb_cpu_doing_single_step) ==
15146 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
15147 raw_smp_processor_id()) {
15148 if (user_mode(regs))
15149 return single_step_cont(regs, args);
15150 @@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception
15151 return instruction_pointer(regs);
15152 }
15153
15154 -struct kgdb_arch arch_kgdb_ops = {
15155 +const struct kgdb_arch arch_kgdb_ops = {
15156 /* Breakpoint instruction: */
15157 .gdb_bpt_instr = { 0xcc },
15158 .flags = KGDB_HW_BREAKPOINT,
15159 diff -urNp linux-2.6.32.43/arch/x86/kernel/kprobes.c linux-2.6.32.43/arch/x86/kernel/kprobes.c
15160 --- linux-2.6.32.43/arch/x86/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
15161 +++ linux-2.6.32.43/arch/x86/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
15162 @@ -166,9 +166,13 @@ static void __kprobes set_jmp_op(void *f
15163 char op;
15164 s32 raddr;
15165 } __attribute__((packed)) * jop;
15166 - jop = (struct __arch_jmp_op *)from;
15167 +
15168 + jop = (struct __arch_jmp_op *)(ktla_ktva(from));
15169 +
15170 + pax_open_kernel();
15171 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
15172 jop->op = RELATIVEJUMP_INSTRUCTION;
15173 + pax_close_kernel();
15174 }
15175
15176 /*
15177 @@ -193,7 +197,7 @@ static int __kprobes can_boost(kprobe_op
15178 kprobe_opcode_t opcode;
15179 kprobe_opcode_t *orig_opcodes = opcodes;
15180
15181 - if (search_exception_tables((unsigned long)opcodes))
15182 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15183 return 0; /* Page fault may occur on this address. */
15184
15185 retry:
15186 @@ -337,7 +341,9 @@ static void __kprobes fix_riprel(struct
15187 disp = (u8 *) p->addr + *((s32 *) insn) -
15188 (u8 *) p->ainsn.insn;
15189 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
15190 + pax_open_kernel();
15191 *(s32 *)insn = (s32) disp;
15192 + pax_close_kernel();
15193 }
15194 }
15195 #endif
15196 @@ -345,16 +351,18 @@ static void __kprobes fix_riprel(struct
15197
15198 static void __kprobes arch_copy_kprobe(struct kprobe *p)
15199 {
15200 - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15201 + pax_open_kernel();
15202 + memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15203 + pax_close_kernel();
15204
15205 fix_riprel(p);
15206
15207 - if (can_boost(p->addr))
15208 + if (can_boost(ktla_ktva(p->addr)))
15209 p->ainsn.boostable = 0;
15210 else
15211 p->ainsn.boostable = -1;
15212
15213 - p->opcode = *p->addr;
15214 + p->opcode = *(ktla_ktva(p->addr));
15215 }
15216
15217 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15218 @@ -432,7 +440,7 @@ static void __kprobes prepare_singlestep
15219 if (p->opcode == BREAKPOINT_INSTRUCTION)
15220 regs->ip = (unsigned long)p->addr;
15221 else
15222 - regs->ip = (unsigned long)p->ainsn.insn;
15223 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15224 }
15225
15226 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
15227 @@ -453,7 +461,7 @@ static void __kprobes setup_singlestep(s
15228 if (p->ainsn.boostable == 1 && !p->post_handler) {
15229 /* Boost up -- we can execute copied instructions directly */
15230 reset_current_kprobe();
15231 - regs->ip = (unsigned long)p->ainsn.insn;
15232 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15233 preempt_enable_no_resched();
15234 return;
15235 }
15236 @@ -523,7 +531,7 @@ static int __kprobes kprobe_handler(stru
15237 struct kprobe_ctlblk *kcb;
15238
15239 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
15240 - if (*addr != BREAKPOINT_INSTRUCTION) {
15241 + if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15242 /*
15243 * The breakpoint instruction was removed right
15244 * after we hit it. Another cpu has removed
15245 @@ -775,7 +783,7 @@ static void __kprobes resume_execution(s
15246 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15247 {
15248 unsigned long *tos = stack_addr(regs);
15249 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15250 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15251 unsigned long orig_ip = (unsigned long)p->addr;
15252 kprobe_opcode_t *insn = p->ainsn.insn;
15253
15254 @@ -958,7 +966,7 @@ int __kprobes kprobe_exceptions_notify(s
15255 struct die_args *args = data;
15256 int ret = NOTIFY_DONE;
15257
15258 - if (args->regs && user_mode_vm(args->regs))
15259 + if (args->regs && user_mode(args->regs))
15260 return ret;
15261
15262 switch (val) {
15263 diff -urNp linux-2.6.32.43/arch/x86/kernel/ldt.c linux-2.6.32.43/arch/x86/kernel/ldt.c
15264 --- linux-2.6.32.43/arch/x86/kernel/ldt.c 2011-03-27 14:31:47.000000000 -0400
15265 +++ linux-2.6.32.43/arch/x86/kernel/ldt.c 2011-04-17 15:56:46.000000000 -0400
15266 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
15267 if (reload) {
15268 #ifdef CONFIG_SMP
15269 preempt_disable();
15270 - load_LDT(pc);
15271 + load_LDT_nolock(pc);
15272 if (!cpumask_equal(mm_cpumask(current->mm),
15273 cpumask_of(smp_processor_id())))
15274 smp_call_function(flush_ldt, current->mm, 1);
15275 preempt_enable();
15276 #else
15277 - load_LDT(pc);
15278 + load_LDT_nolock(pc);
15279 #endif
15280 }
15281 if (oldsize) {
15282 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
15283 return err;
15284
15285 for (i = 0; i < old->size; i++)
15286 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15287 + write_ldt_entry(new->ldt, i, old->ldt + i);
15288 return 0;
15289 }
15290
15291 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct
15292 retval = copy_ldt(&mm->context, &old_mm->context);
15293 mutex_unlock(&old_mm->context.lock);
15294 }
15295 +
15296 + if (tsk == current) {
15297 + mm->context.vdso = 0;
15298 +
15299 +#ifdef CONFIG_X86_32
15300 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15301 + mm->context.user_cs_base = 0UL;
15302 + mm->context.user_cs_limit = ~0UL;
15303 +
15304 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15305 + cpus_clear(mm->context.cpu_user_cs_mask);
15306 +#endif
15307 +
15308 +#endif
15309 +#endif
15310 +
15311 + }
15312 +
15313 return retval;
15314 }
15315
15316 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
15317 }
15318 }
15319
15320 +#ifdef CONFIG_PAX_SEGMEXEC
15321 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15322 + error = -EINVAL;
15323 + goto out_unlock;
15324 + }
15325 +#endif
15326 +
15327 fill_ldt(&ldt, &ldt_info);
15328 if (oldmode)
15329 ldt.avl = 0;
15330 diff -urNp linux-2.6.32.43/arch/x86/kernel/machine_kexec_32.c linux-2.6.32.43/arch/x86/kernel/machine_kexec_32.c
15331 --- linux-2.6.32.43/arch/x86/kernel/machine_kexec_32.c 2011-03-27 14:31:47.000000000 -0400
15332 +++ linux-2.6.32.43/arch/x86/kernel/machine_kexec_32.c 2011-04-17 15:56:46.000000000 -0400
15333 @@ -26,7 +26,7 @@
15334 #include <asm/system.h>
15335 #include <asm/cacheflush.h>
15336
15337 -static void set_idt(void *newidt, __u16 limit)
15338 +static void set_idt(struct desc_struct *newidt, __u16 limit)
15339 {
15340 struct desc_ptr curidt;
15341
15342 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
15343 }
15344
15345
15346 -static void set_gdt(void *newgdt, __u16 limit)
15347 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15348 {
15349 struct desc_ptr curgdt;
15350
15351 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15352 }
15353
15354 control_page = page_address(image->control_code_page);
15355 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15356 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15357
15358 relocate_kernel_ptr = control_page;
15359 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15360 diff -urNp linux-2.6.32.43/arch/x86/kernel/microcode_amd.c linux-2.6.32.43/arch/x86/kernel/microcode_amd.c
15361 --- linux-2.6.32.43/arch/x86/kernel/microcode_amd.c 2011-04-17 17:00:52.000000000 -0400
15362 +++ linux-2.6.32.43/arch/x86/kernel/microcode_amd.c 2011-04-17 17:03:05.000000000 -0400
15363 @@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int c
15364 uci->mc = NULL;
15365 }
15366
15367 -static struct microcode_ops microcode_amd_ops = {
15368 +static const struct microcode_ops microcode_amd_ops = {
15369 .request_microcode_user = request_microcode_user,
15370 .request_microcode_fw = request_microcode_fw,
15371 .collect_cpu_info = collect_cpu_info_amd,
15372 @@ -372,7 +372,7 @@ static struct microcode_ops microcode_am
15373 .microcode_fini_cpu = microcode_fini_cpu_amd,
15374 };
15375
15376 -struct microcode_ops * __init init_amd_microcode(void)
15377 +const struct microcode_ops * __init init_amd_microcode(void)
15378 {
15379 return &microcode_amd_ops;
15380 }
15381 diff -urNp linux-2.6.32.43/arch/x86/kernel/microcode_core.c linux-2.6.32.43/arch/x86/kernel/microcode_core.c
15382 --- linux-2.6.32.43/arch/x86/kernel/microcode_core.c 2011-03-27 14:31:47.000000000 -0400
15383 +++ linux-2.6.32.43/arch/x86/kernel/microcode_core.c 2011-04-17 15:56:46.000000000 -0400
15384 @@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
15385
15386 #define MICROCODE_VERSION "2.00"
15387
15388 -static struct microcode_ops *microcode_ops;
15389 +static const struct microcode_ops *microcode_ops;
15390
15391 /*
15392 * Synchronization.
15393 diff -urNp linux-2.6.32.43/arch/x86/kernel/microcode_intel.c linux-2.6.32.43/arch/x86/kernel/microcode_intel.c
15394 --- linux-2.6.32.43/arch/x86/kernel/microcode_intel.c 2011-03-27 14:31:47.000000000 -0400
15395 +++ linux-2.6.32.43/arch/x86/kernel/microcode_intel.c 2011-04-17 15:56:46.000000000 -0400
15396 @@ -443,13 +443,13 @@ static enum ucode_state request_microcod
15397
15398 static int get_ucode_user(void *to, const void *from, size_t n)
15399 {
15400 - return copy_from_user(to, from, n);
15401 + return copy_from_user(to, (__force const void __user *)from, n);
15402 }
15403
15404 static enum ucode_state
15405 request_microcode_user(int cpu, const void __user *buf, size_t size)
15406 {
15407 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15408 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
15409 }
15410
15411 static void microcode_fini_cpu(int cpu)
15412 @@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
15413 uci->mc = NULL;
15414 }
15415
15416 -static struct microcode_ops microcode_intel_ops = {
15417 +static const struct microcode_ops microcode_intel_ops = {
15418 .request_microcode_user = request_microcode_user,
15419 .request_microcode_fw = request_microcode_fw,
15420 .collect_cpu_info = collect_cpu_info,
15421 @@ -468,7 +468,7 @@ static struct microcode_ops microcode_in
15422 .microcode_fini_cpu = microcode_fini_cpu,
15423 };
15424
15425 -struct microcode_ops * __init init_intel_microcode(void)
15426 +const struct microcode_ops * __init init_intel_microcode(void)
15427 {
15428 return &microcode_intel_ops;
15429 }
15430 diff -urNp linux-2.6.32.43/arch/x86/kernel/module.c linux-2.6.32.43/arch/x86/kernel/module.c
15431 --- linux-2.6.32.43/arch/x86/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
15432 +++ linux-2.6.32.43/arch/x86/kernel/module.c 2011-04-17 15:56:46.000000000 -0400
15433 @@ -34,7 +34,7 @@
15434 #define DEBUGP(fmt...)
15435 #endif
15436
15437 -void *module_alloc(unsigned long size)
15438 +static void *__module_alloc(unsigned long size, pgprot_t prot)
15439 {
15440 struct vm_struct *area;
15441
15442 @@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
15443 if (!area)
15444 return NULL;
15445
15446 - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
15447 - PAGE_KERNEL_EXEC);
15448 + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
15449 +}
15450 +
15451 +void *module_alloc(unsigned long size)
15452 +{
15453 +
15454 +#ifdef CONFIG_PAX_KERNEXEC
15455 + return __module_alloc(size, PAGE_KERNEL);
15456 +#else
15457 + return __module_alloc(size, PAGE_KERNEL_EXEC);
15458 +#endif
15459 +
15460 }
15461
15462 /* Free memory returned from module_alloc */
15463 @@ -58,6 +68,40 @@ void module_free(struct module *mod, voi
15464 vfree(module_region);
15465 }
15466
15467 +#ifdef CONFIG_PAX_KERNEXEC
15468 +#ifdef CONFIG_X86_32
15469 +void *module_alloc_exec(unsigned long size)
15470 +{
15471 + struct vm_struct *area;
15472 +
15473 + if (size == 0)
15474 + return NULL;
15475 +
15476 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15477 + return area ? area->addr : NULL;
15478 +}
15479 +EXPORT_SYMBOL(module_alloc_exec);
15480 +
15481 +void module_free_exec(struct module *mod, void *module_region)
15482 +{
15483 + vunmap(module_region);
15484 +}
15485 +EXPORT_SYMBOL(module_free_exec);
15486 +#else
15487 +void module_free_exec(struct module *mod, void *module_region)
15488 +{
15489 + module_free(mod, module_region);
15490 +}
15491 +EXPORT_SYMBOL(module_free_exec);
15492 +
15493 +void *module_alloc_exec(unsigned long size)
15494 +{
15495 + return __module_alloc(size, PAGE_KERNEL_RX);
15496 +}
15497 +EXPORT_SYMBOL(module_alloc_exec);
15498 +#endif
15499 +#endif
15500 +
15501 /* We don't need anything special. */
15502 int module_frob_arch_sections(Elf_Ehdr *hdr,
15503 Elf_Shdr *sechdrs,
15504 @@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15505 unsigned int i;
15506 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15507 Elf32_Sym *sym;
15508 - uint32_t *location;
15509 + uint32_t *plocation, location;
15510
15511 DEBUGP("Applying relocate section %u to %u\n", relsec,
15512 sechdrs[relsec].sh_info);
15513 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15514 /* This is where to make the change */
15515 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15516 - + rel[i].r_offset;
15517 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15518 + location = (uint32_t)plocation;
15519 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15520 + plocation = ktla_ktva((void *)plocation);
15521 /* This is the symbol it is referring to. Note that all
15522 undefined symbols have been resolved. */
15523 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15524 @@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15525 switch (ELF32_R_TYPE(rel[i].r_info)) {
15526 case R_386_32:
15527 /* We add the value into the location given */
15528 - *location += sym->st_value;
15529 + pax_open_kernel();
15530 + *plocation += sym->st_value;
15531 + pax_close_kernel();
15532 break;
15533 case R_386_PC32:
15534 /* Add the value, subtract its postition */
15535 - *location += sym->st_value - (uint32_t)location;
15536 + pax_open_kernel();
15537 + *plocation += sym->st_value - location;
15538 + pax_close_kernel();
15539 break;
15540 default:
15541 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15542 @@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
15543 case R_X86_64_NONE:
15544 break;
15545 case R_X86_64_64:
15546 + pax_open_kernel();
15547 *(u64 *)loc = val;
15548 + pax_close_kernel();
15549 break;
15550 case R_X86_64_32:
15551 + pax_open_kernel();
15552 *(u32 *)loc = val;
15553 + pax_close_kernel();
15554 if (val != *(u32 *)loc)
15555 goto overflow;
15556 break;
15557 case R_X86_64_32S:
15558 + pax_open_kernel();
15559 *(s32 *)loc = val;
15560 + pax_close_kernel();
15561 if ((s64)val != *(s32 *)loc)
15562 goto overflow;
15563 break;
15564 case R_X86_64_PC32:
15565 val -= (u64)loc;
15566 + pax_open_kernel();
15567 *(u32 *)loc = val;
15568 + pax_close_kernel();
15569 +
15570 #if 0
15571 if ((s64)val != *(s32 *)loc)
15572 goto overflow;
15573 diff -urNp linux-2.6.32.43/arch/x86/kernel/paravirt.c linux-2.6.32.43/arch/x86/kernel/paravirt.c
15574 --- linux-2.6.32.43/arch/x86/kernel/paravirt.c 2011-03-27 14:31:47.000000000 -0400
15575 +++ linux-2.6.32.43/arch/x86/kernel/paravirt.c 2011-05-16 21:46:57.000000000 -0400
15576 @@ -122,7 +122,7 @@ unsigned paravirt_patch_jmp(void *insnbu
15577 * corresponding structure. */
15578 static void *get_call_destination(u8 type)
15579 {
15580 - struct paravirt_patch_template tmpl = {
15581 + const struct paravirt_patch_template tmpl = {
15582 .pv_init_ops = pv_init_ops,
15583 .pv_time_ops = pv_time_ops,
15584 .pv_cpu_ops = pv_cpu_ops,
15585 @@ -133,6 +133,9 @@ static void *get_call_destination(u8 typ
15586 .pv_lock_ops = pv_lock_ops,
15587 #endif
15588 };
15589 +
15590 + pax_track_stack();
15591 +
15592 return *((void **)&tmpl + type);
15593 }
15594
15595 @@ -145,14 +148,14 @@ unsigned paravirt_patch_default(u8 type,
15596 if (opfunc == NULL)
15597 /* If there's no function, patch it with a ud2a (BUG) */
15598 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
15599 - else if (opfunc == _paravirt_nop)
15600 + else if (opfunc == (void *)_paravirt_nop)
15601 /* If the operation is a nop, then nop the callsite */
15602 ret = paravirt_patch_nop();
15603
15604 /* identity functions just return their single argument */
15605 - else if (opfunc == _paravirt_ident_32)
15606 + else if (opfunc == (void *)_paravirt_ident_32)
15607 ret = paravirt_patch_ident_32(insnbuf, len);
15608 - else if (opfunc == _paravirt_ident_64)
15609 + else if (opfunc == (void *)_paravirt_ident_64)
15610 ret = paravirt_patch_ident_64(insnbuf, len);
15611
15612 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
15613 @@ -178,7 +181,7 @@ unsigned paravirt_patch_insns(void *insn
15614 if (insn_len > len || start == NULL)
15615 insn_len = len;
15616 else
15617 - memcpy(insnbuf, start, insn_len);
15618 + memcpy(insnbuf, ktla_ktva(start), insn_len);
15619
15620 return insn_len;
15621 }
15622 @@ -294,22 +297,22 @@ void arch_flush_lazy_mmu_mode(void)
15623 preempt_enable();
15624 }
15625
15626 -struct pv_info pv_info = {
15627 +struct pv_info pv_info __read_only = {
15628 .name = "bare hardware",
15629 .paravirt_enabled = 0,
15630 .kernel_rpl = 0,
15631 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
15632 };
15633
15634 -struct pv_init_ops pv_init_ops = {
15635 +struct pv_init_ops pv_init_ops __read_only = {
15636 .patch = native_patch,
15637 };
15638
15639 -struct pv_time_ops pv_time_ops = {
15640 +struct pv_time_ops pv_time_ops __read_only = {
15641 .sched_clock = native_sched_clock,
15642 };
15643
15644 -struct pv_irq_ops pv_irq_ops = {
15645 +struct pv_irq_ops pv_irq_ops __read_only = {
15646 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
15647 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
15648 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
15649 @@ -321,7 +324,7 @@ struct pv_irq_ops pv_irq_ops = {
15650 #endif
15651 };
15652
15653 -struct pv_cpu_ops pv_cpu_ops = {
15654 +struct pv_cpu_ops pv_cpu_ops __read_only = {
15655 .cpuid = native_cpuid,
15656 .get_debugreg = native_get_debugreg,
15657 .set_debugreg = native_set_debugreg,
15658 @@ -382,7 +385,7 @@ struct pv_cpu_ops pv_cpu_ops = {
15659 .end_context_switch = paravirt_nop,
15660 };
15661
15662 -struct pv_apic_ops pv_apic_ops = {
15663 +struct pv_apic_ops pv_apic_ops __read_only = {
15664 #ifdef CONFIG_X86_LOCAL_APIC
15665 .startup_ipi_hook = paravirt_nop,
15666 #endif
15667 @@ -396,7 +399,7 @@ struct pv_apic_ops pv_apic_ops = {
15668 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
15669 #endif
15670
15671 -struct pv_mmu_ops pv_mmu_ops = {
15672 +struct pv_mmu_ops pv_mmu_ops __read_only = {
15673
15674 .read_cr2 = native_read_cr2,
15675 .write_cr2 = native_write_cr2,
15676 @@ -467,6 +470,12 @@ struct pv_mmu_ops pv_mmu_ops = {
15677 },
15678
15679 .set_fixmap = native_set_fixmap,
15680 +
15681 +#ifdef CONFIG_PAX_KERNEXEC
15682 + .pax_open_kernel = native_pax_open_kernel,
15683 + .pax_close_kernel = native_pax_close_kernel,
15684 +#endif
15685 +
15686 };
15687
15688 EXPORT_SYMBOL_GPL(pv_time_ops);
15689 diff -urNp linux-2.6.32.43/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.32.43/arch/x86/kernel/paravirt-spinlocks.c
15690 --- linux-2.6.32.43/arch/x86/kernel/paravirt-spinlocks.c 2011-03-27 14:31:47.000000000 -0400
15691 +++ linux-2.6.32.43/arch/x86/kernel/paravirt-spinlocks.c 2011-04-17 15:56:46.000000000 -0400
15692 @@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
15693 __raw_spin_lock(lock);
15694 }
15695
15696 -struct pv_lock_ops pv_lock_ops = {
15697 +struct pv_lock_ops pv_lock_ops __read_only = {
15698 #ifdef CONFIG_SMP
15699 .spin_is_locked = __ticket_spin_is_locked,
15700 .spin_is_contended = __ticket_spin_is_contended,
15701 diff -urNp linux-2.6.32.43/arch/x86/kernel/pci-calgary_64.c linux-2.6.32.43/arch/x86/kernel/pci-calgary_64.c
15702 --- linux-2.6.32.43/arch/x86/kernel/pci-calgary_64.c 2011-03-27 14:31:47.000000000 -0400
15703 +++ linux-2.6.32.43/arch/x86/kernel/pci-calgary_64.c 2011-04-17 15:56:46.000000000 -0400
15704 @@ -477,7 +477,7 @@ static void calgary_free_coherent(struct
15705 free_pages((unsigned long)vaddr, get_order(size));
15706 }
15707
15708 -static struct dma_map_ops calgary_dma_ops = {
15709 +static const struct dma_map_ops calgary_dma_ops = {
15710 .alloc_coherent = calgary_alloc_coherent,
15711 .free_coherent = calgary_free_coherent,
15712 .map_sg = calgary_map_sg,
15713 diff -urNp linux-2.6.32.43/arch/x86/kernel/pci-dma.c linux-2.6.32.43/arch/x86/kernel/pci-dma.c
15714 --- linux-2.6.32.43/arch/x86/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
15715 +++ linux-2.6.32.43/arch/x86/kernel/pci-dma.c 2011-04-17 15:56:46.000000000 -0400
15716 @@ -14,7 +14,7 @@
15717
15718 static int forbid_dac __read_mostly;
15719
15720 -struct dma_map_ops *dma_ops;
15721 +const struct dma_map_ops *dma_ops;
15722 EXPORT_SYMBOL(dma_ops);
15723
15724 static int iommu_sac_force __read_mostly;
15725 @@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
15726
15727 int dma_supported(struct device *dev, u64 mask)
15728 {
15729 - struct dma_map_ops *ops = get_dma_ops(dev);
15730 + const struct dma_map_ops *ops = get_dma_ops(dev);
15731
15732 #ifdef CONFIG_PCI
15733 if (mask > 0xffffffff && forbid_dac > 0) {
15734 diff -urNp linux-2.6.32.43/arch/x86/kernel/pci-gart_64.c linux-2.6.32.43/arch/x86/kernel/pci-gart_64.c
15735 --- linux-2.6.32.43/arch/x86/kernel/pci-gart_64.c 2011-03-27 14:31:47.000000000 -0400
15736 +++ linux-2.6.32.43/arch/x86/kernel/pci-gart_64.c 2011-04-17 15:56:46.000000000 -0400
15737 @@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct ag
15738 return -1;
15739 }
15740
15741 -static struct dma_map_ops gart_dma_ops = {
15742 +static const struct dma_map_ops gart_dma_ops = {
15743 .map_sg = gart_map_sg,
15744 .unmap_sg = gart_unmap_sg,
15745 .map_page = gart_map_page,
15746 diff -urNp linux-2.6.32.43/arch/x86/kernel/pci-nommu.c linux-2.6.32.43/arch/x86/kernel/pci-nommu.c
15747 --- linux-2.6.32.43/arch/x86/kernel/pci-nommu.c 2011-03-27 14:31:47.000000000 -0400
15748 +++ linux-2.6.32.43/arch/x86/kernel/pci-nommu.c 2011-04-17 15:56:46.000000000 -0400
15749 @@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(str
15750 flush_write_buffers();
15751 }
15752
15753 -struct dma_map_ops nommu_dma_ops = {
15754 +const struct dma_map_ops nommu_dma_ops = {
15755 .alloc_coherent = dma_generic_alloc_coherent,
15756 .free_coherent = nommu_free_coherent,
15757 .map_sg = nommu_map_sg,
15758 diff -urNp linux-2.6.32.43/arch/x86/kernel/pci-swiotlb.c linux-2.6.32.43/arch/x86/kernel/pci-swiotlb.c
15759 --- linux-2.6.32.43/arch/x86/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
15760 +++ linux-2.6.32.43/arch/x86/kernel/pci-swiotlb.c 2011-04-17 15:56:46.000000000 -0400
15761 @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
15762 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
15763 }
15764
15765 -static struct dma_map_ops swiotlb_dma_ops = {
15766 +static const struct dma_map_ops swiotlb_dma_ops = {
15767 .mapping_error = swiotlb_dma_mapping_error,
15768 .alloc_coherent = x86_swiotlb_alloc_coherent,
15769 .free_coherent = swiotlb_free_coherent,
15770 diff -urNp linux-2.6.32.43/arch/x86/kernel/process_32.c linux-2.6.32.43/arch/x86/kernel/process_32.c
15771 --- linux-2.6.32.43/arch/x86/kernel/process_32.c 2011-06-25 12:55:34.000000000 -0400
15772 +++ linux-2.6.32.43/arch/x86/kernel/process_32.c 2011-06-25 12:56:37.000000000 -0400
15773 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __as
15774 unsigned long thread_saved_pc(struct task_struct *tsk)
15775 {
15776 return ((unsigned long *)tsk->thread.sp)[3];
15777 +//XXX return tsk->thread.eip;
15778 }
15779
15780 #ifndef CONFIG_SMP
15781 @@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, i
15782 unsigned short ss, gs;
15783 const char *board;
15784
15785 - if (user_mode_vm(regs)) {
15786 + if (user_mode(regs)) {
15787 sp = regs->sp;
15788 ss = regs->ss & 0xffff;
15789 - gs = get_user_gs(regs);
15790 } else {
15791 sp = (unsigned long) (&regs->sp);
15792 savesegment(ss, ss);
15793 - savesegment(gs, gs);
15794 }
15795 + gs = get_user_gs(regs);
15796
15797 printk("\n");
15798
15799 @@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), voi
15800 regs.bx = (unsigned long) fn;
15801 regs.dx = (unsigned long) arg;
15802
15803 - regs.ds = __USER_DS;
15804 - regs.es = __USER_DS;
15805 + regs.ds = __KERNEL_DS;
15806 + regs.es = __KERNEL_DS;
15807 regs.fs = __KERNEL_PERCPU;
15808 - regs.gs = __KERNEL_STACK_CANARY;
15809 + savesegment(gs, regs.gs);
15810 regs.orig_ax = -1;
15811 regs.ip = (unsigned long) kernel_thread_helper;
15812 regs.cs = __KERNEL_CS | get_kernel_rpl();
15813 @@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flag
15814 struct task_struct *tsk;
15815 int err;
15816
15817 - childregs = task_pt_regs(p);
15818 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
15819 *childregs = *regs;
15820 childregs->ax = 0;
15821 childregs->sp = sp;
15822
15823 p->thread.sp = (unsigned long) childregs;
15824 p->thread.sp0 = (unsigned long) (childregs+1);
15825 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15826
15827 p->thread.ip = (unsigned long) ret_from_fork;
15828
15829 @@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p,
15830 struct thread_struct *prev = &prev_p->thread,
15831 *next = &next_p->thread;
15832 int cpu = smp_processor_id();
15833 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
15834 + struct tss_struct *tss = init_tss + cpu;
15835 bool preload_fpu;
15836
15837 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
15838 @@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p,
15839 */
15840 lazy_save_gs(prev->gs);
15841
15842 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15843 + __set_fs(task_thread_info(next_p)->addr_limit);
15844 +#endif
15845 +
15846 /*
15847 * Load the per-thread Thread-Local Storage descriptor.
15848 */
15849 @@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p,
15850 */
15851 arch_end_context_switch(next_p);
15852
15853 + percpu_write(current_task, next_p);
15854 + percpu_write(current_tinfo, &next_p->tinfo);
15855 +
15856 if (preload_fpu)
15857 __math_state_restore();
15858
15859 @@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p,
15860 if (prev->gs | next->gs)
15861 lazy_load_gs(next->gs);
15862
15863 - percpu_write(current_task, next_p);
15864 -
15865 return prev_p;
15866 }
15867
15868 @@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_stru
15869 } while (count++ < 16);
15870 return 0;
15871 }
15872 -
15873 diff -urNp linux-2.6.32.43/arch/x86/kernel/process_64.c linux-2.6.32.43/arch/x86/kernel/process_64.c
15874 --- linux-2.6.32.43/arch/x86/kernel/process_64.c 2011-06-25 12:55:34.000000000 -0400
15875 +++ linux-2.6.32.43/arch/x86/kernel/process_64.c 2011-06-25 12:56:37.000000000 -0400
15876 @@ -91,7 +91,7 @@ static void __exit_idle(void)
15877 void exit_idle(void)
15878 {
15879 /* idle loop has pid 0 */
15880 - if (current->pid)
15881 + if (task_pid_nr(current))
15882 return;
15883 __exit_idle();
15884 }
15885 @@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, i
15886 if (!board)
15887 board = "";
15888 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
15889 - current->pid, current->comm, print_tainted(),
15890 + task_pid_nr(current), current->comm, print_tainted(),
15891 init_utsname()->release,
15892 (int)strcspn(init_utsname()->version, " "),
15893 init_utsname()->version, board);
15894 @@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flag
15895 struct pt_regs *childregs;
15896 struct task_struct *me = current;
15897
15898 - childregs = ((struct pt_regs *)
15899 - (THREAD_SIZE + task_stack_page(p))) - 1;
15900 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
15901 *childregs = *regs;
15902
15903 childregs->ax = 0;
15904 @@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flag
15905 p->thread.sp = (unsigned long) childregs;
15906 p->thread.sp0 = (unsigned long) (childregs+1);
15907 p->thread.usersp = me->thread.usersp;
15908 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15909
15910 set_tsk_thread_flag(p, TIF_FORK);
15911
15912 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p,
15913 struct thread_struct *prev = &prev_p->thread;
15914 struct thread_struct *next = &next_p->thread;
15915 int cpu = smp_processor_id();
15916 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
15917 + struct tss_struct *tss = init_tss + cpu;
15918 unsigned fsindex, gsindex;
15919 bool preload_fpu;
15920
15921 @@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p,
15922 prev->usersp = percpu_read(old_rsp);
15923 percpu_write(old_rsp, next->usersp);
15924 percpu_write(current_task, next_p);
15925 + percpu_write(current_tinfo, &next_p->tinfo);
15926
15927 - percpu_write(kernel_stack,
15928 - (unsigned long)task_stack_page(next_p) +
15929 - THREAD_SIZE - KERNEL_STACK_OFFSET);
15930 + percpu_write(kernel_stack, next->sp0);
15931
15932 /*
15933 * Now maybe reload the debug registers and handle I/O bitmaps
15934 @@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_stru
15935 if (!p || p == current || p->state == TASK_RUNNING)
15936 return 0;
15937 stack = (unsigned long)task_stack_page(p);
15938 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
15939 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
15940 return 0;
15941 fp = *(u64 *)(p->thread.sp);
15942 do {
15943 - if (fp < (unsigned long)stack ||
15944 - fp >= (unsigned long)stack+THREAD_SIZE)
15945 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
15946 return 0;
15947 ip = *(u64 *)(fp+8);
15948 if (!in_sched_functions(ip))
15949 diff -urNp linux-2.6.32.43/arch/x86/kernel/process.c linux-2.6.32.43/arch/x86/kernel/process.c
15950 --- linux-2.6.32.43/arch/x86/kernel/process.c 2011-04-22 19:16:29.000000000 -0400
15951 +++ linux-2.6.32.43/arch/x86/kernel/process.c 2011-05-22 23:02:03.000000000 -0400
15952 @@ -51,16 +51,33 @@ void free_thread_xstate(struct task_stru
15953
15954 void free_thread_info(struct thread_info *ti)
15955 {
15956 - free_thread_xstate(ti->task);
15957 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
15958 }
15959
15960 +static struct kmem_cache *task_struct_cachep;
15961 +
15962 void arch_task_cache_init(void)
15963 {
15964 - task_xstate_cachep =
15965 - kmem_cache_create("task_xstate", xstate_size,
15966 + /* create a slab on which task_structs can be allocated */
15967 + task_struct_cachep =
15968 + kmem_cache_create("task_struct", sizeof(struct task_struct),
15969 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
15970 +
15971 + task_xstate_cachep =
15972 + kmem_cache_create("task_xstate", xstate_size,
15973 __alignof__(union thread_xstate),
15974 - SLAB_PANIC | SLAB_NOTRACK, NULL);
15975 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
15976 +}
15977 +
15978 +struct task_struct *alloc_task_struct(void)
15979 +{
15980 + return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
15981 +}
15982 +
15983 +void free_task_struct(struct task_struct *task)
15984 +{
15985 + free_thread_xstate(task);
15986 + kmem_cache_free(task_struct_cachep, task);
15987 }
15988
15989 /*
15990 @@ -73,7 +90,7 @@ void exit_thread(void)
15991 unsigned long *bp = t->io_bitmap_ptr;
15992
15993 if (bp) {
15994 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
15995 + struct tss_struct *tss = init_tss + get_cpu();
15996
15997 t->io_bitmap_ptr = NULL;
15998 clear_thread_flag(TIF_IO_BITMAP);
15999 @@ -93,6 +110,9 @@ void flush_thread(void)
16000
16001 clear_tsk_thread_flag(tsk, TIF_DEBUG);
16002
16003 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16004 + loadsegment(gs, 0);
16005 +#endif
16006 tsk->thread.debugreg0 = 0;
16007 tsk->thread.debugreg1 = 0;
16008 tsk->thread.debugreg2 = 0;
16009 @@ -307,7 +327,7 @@ void default_idle(void)
16010 EXPORT_SYMBOL(default_idle);
16011 #endif
16012
16013 -void stop_this_cpu(void *dummy)
16014 +__noreturn void stop_this_cpu(void *dummy)
16015 {
16016 local_irq_disable();
16017 /*
16018 @@ -568,16 +588,35 @@ static int __init idle_setup(char *str)
16019 }
16020 early_param("idle", idle_setup);
16021
16022 -unsigned long arch_align_stack(unsigned long sp)
16023 +#ifdef CONFIG_PAX_RANDKSTACK
16024 +asmlinkage void pax_randomize_kstack(void)
16025 {
16026 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16027 - sp -= get_random_int() % 8192;
16028 - return sp & ~0xf;
16029 -}
16030 + struct thread_struct *thread = &current->thread;
16031 + unsigned long time;
16032
16033 -unsigned long arch_randomize_brk(struct mm_struct *mm)
16034 -{
16035 - unsigned long range_end = mm->brk + 0x02000000;
16036 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16037 + if (!randomize_va_space)
16038 + return;
16039 +
16040 + rdtscl(time);
16041 +
16042 + /* P4 seems to return a 0 LSB, ignore it */
16043 +#ifdef CONFIG_MPENTIUM4
16044 + time &= 0x3EUL;
16045 + time <<= 2;
16046 +#elif defined(CONFIG_X86_64)
16047 + time &= 0xFUL;
16048 + time <<= 4;
16049 +#else
16050 + time &= 0x1FUL;
16051 + time <<= 3;
16052 +#endif
16053 +
16054 + thread->sp0 ^= time;
16055 + load_sp0(init_tss + smp_processor_id(), thread);
16056 +
16057 +#ifdef CONFIG_X86_64
16058 + percpu_write(kernel_stack, thread->sp0);
16059 +#endif
16060 }
16061 +#endif
16062
16063 diff -urNp linux-2.6.32.43/arch/x86/kernel/ptrace.c linux-2.6.32.43/arch/x86/kernel/ptrace.c
16064 --- linux-2.6.32.43/arch/x86/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
16065 +++ linux-2.6.32.43/arch/x86/kernel/ptrace.c 2011-04-17 15:56:46.000000000 -0400
16066 @@ -925,7 +925,7 @@ static const struct user_regset_view use
16067 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
16068 {
16069 int ret;
16070 - unsigned long __user *datap = (unsigned long __user *)data;
16071 + unsigned long __user *datap = (__force unsigned long __user *)data;
16072
16073 switch (request) {
16074 /* read the word at location addr in the USER area. */
16075 @@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *chi
16076 if (addr < 0)
16077 return -EIO;
16078 ret = do_get_thread_area(child, addr,
16079 - (struct user_desc __user *) data);
16080 + (__force struct user_desc __user *) data);
16081 break;
16082
16083 case PTRACE_SET_THREAD_AREA:
16084 if (addr < 0)
16085 return -EIO;
16086 ret = do_set_thread_area(child, addr,
16087 - (struct user_desc __user *) data, 0);
16088 + (__force struct user_desc __user *) data, 0);
16089 break;
16090 #endif
16091
16092 @@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *chi
16093 #ifdef CONFIG_X86_PTRACE_BTS
16094 case PTRACE_BTS_CONFIG:
16095 ret = ptrace_bts_config
16096 - (child, data, (struct ptrace_bts_config __user *)addr);
16097 + (child, data, (__force struct ptrace_bts_config __user *)addr);
16098 break;
16099
16100 case PTRACE_BTS_STATUS:
16101 ret = ptrace_bts_status
16102 - (child, data, (struct ptrace_bts_config __user *)addr);
16103 + (child, data, (__force struct ptrace_bts_config __user *)addr);
16104 break;
16105
16106 case PTRACE_BTS_SIZE:
16107 @@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *chi
16108
16109 case PTRACE_BTS_GET:
16110 ret = ptrace_bts_read_record
16111 - (child, data, (struct bts_struct __user *) addr);
16112 + (child, data, (__force struct bts_struct __user *) addr);
16113 break;
16114
16115 case PTRACE_BTS_CLEAR:
16116 @@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *chi
16117
16118 case PTRACE_BTS_DRAIN:
16119 ret = ptrace_bts_drain
16120 - (child, data, (struct bts_struct __user *) addr);
16121 + (child, data, (__force struct bts_struct __user *) addr);
16122 break;
16123 #endif /* CONFIG_X86_PTRACE_BTS */
16124
16125 @@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *ts
16126 info.si_code = si_code;
16127
16128 /* User-mode ip? */
16129 - info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
16130 + info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
16131
16132 /* Send us the fake SIGTRAP */
16133 force_sig_info(SIGTRAP, &info, tsk);
16134 @@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *ts
16135 * We must return the syscall number to actually look up in the table.
16136 * This can be -1L to skip running any syscall at all.
16137 */
16138 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
16139 +long syscall_trace_enter(struct pt_regs *regs)
16140 {
16141 long ret = 0;
16142
16143 @@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(stru
16144 return ret ?: regs->orig_ax;
16145 }
16146
16147 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
16148 +void syscall_trace_leave(struct pt_regs *regs)
16149 {
16150 if (unlikely(current->audit_context))
16151 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
16152 diff -urNp linux-2.6.32.43/arch/x86/kernel/reboot.c linux-2.6.32.43/arch/x86/kernel/reboot.c
16153 --- linux-2.6.32.43/arch/x86/kernel/reboot.c 2011-03-27 14:31:47.000000000 -0400
16154 +++ linux-2.6.32.43/arch/x86/kernel/reboot.c 2011-05-22 23:02:03.000000000 -0400
16155 @@ -33,7 +33,7 @@ void (*pm_power_off)(void);
16156 EXPORT_SYMBOL(pm_power_off);
16157
16158 static const struct desc_ptr no_idt = {};
16159 -static int reboot_mode;
16160 +static unsigned short reboot_mode;
16161 enum reboot_type reboot_type = BOOT_KBD;
16162 int reboot_force;
16163
16164 @@ -292,12 +292,12 @@ core_initcall(reboot_init);
16165 controller to pulse the CPU reset line, which is more thorough, but
16166 doesn't work with at least one type of 486 motherboard. It is easy
16167 to stop this code working; hence the copious comments. */
16168 -static const unsigned long long
16169 -real_mode_gdt_entries [3] =
16170 +static struct desc_struct
16171 +real_mode_gdt_entries [3] __read_only =
16172 {
16173 - 0x0000000000000000ULL, /* Null descriptor */
16174 - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
16175 - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
16176 + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
16177 + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
16178 + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
16179 };
16180
16181 static const struct desc_ptr
16182 @@ -346,7 +346,7 @@ static const unsigned char jump_to_bios
16183 * specified by the code and length parameters.
16184 * We assume that length will aways be less that 100!
16185 */
16186 -void machine_real_restart(const unsigned char *code, int length)
16187 +__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
16188 {
16189 local_irq_disable();
16190
16191 @@ -366,8 +366,8 @@ void machine_real_restart(const unsigned
16192 /* Remap the kernel at virtual address zero, as well as offset zero
16193 from the kernel segment. This assumes the kernel segment starts at
16194 virtual address PAGE_OFFSET. */
16195 - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16196 - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
16197 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16198 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
16199
16200 /*
16201 * Use `swapper_pg_dir' as our page directory.
16202 @@ -379,16 +379,15 @@ void machine_real_restart(const unsigned
16203 boot)". This seems like a fairly standard thing that gets set by
16204 REBOOT.COM programs, and the previous reset routine did this
16205 too. */
16206 - *((unsigned short *)0x472) = reboot_mode;
16207 + *(unsigned short *)(__va(0x472)) = reboot_mode;
16208
16209 /* For the switch to real mode, copy some code to low memory. It has
16210 to be in the first 64k because it is running in 16-bit mode, and it
16211 has to have the same physical and virtual address, because it turns
16212 off paging. Copy it near the end of the first page, out of the way
16213 of BIOS variables. */
16214 - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
16215 - real_mode_switch, sizeof (real_mode_switch));
16216 - memcpy((void *)(0x1000 - 100), code, length);
16217 + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
16218 + memcpy(__va(0x1000 - 100), code, length);
16219
16220 /* Set up the IDT for real mode. */
16221 load_idt(&real_mode_idt);
16222 @@ -416,6 +415,7 @@ void machine_real_restart(const unsigned
16223 __asm__ __volatile__ ("ljmp $0x0008,%0"
16224 :
16225 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
16226 + do { } while (1);
16227 }
16228 #ifdef CONFIG_APM_MODULE
16229 EXPORT_SYMBOL(machine_real_restart);
16230 @@ -536,7 +536,7 @@ void __attribute__((weak)) mach_reboot_f
16231 {
16232 }
16233
16234 -static void native_machine_emergency_restart(void)
16235 +__noreturn static void native_machine_emergency_restart(void)
16236 {
16237 int i;
16238
16239 @@ -651,13 +651,13 @@ void native_machine_shutdown(void)
16240 #endif
16241 }
16242
16243 -static void __machine_emergency_restart(int emergency)
16244 +static __noreturn void __machine_emergency_restart(int emergency)
16245 {
16246 reboot_emergency = emergency;
16247 machine_ops.emergency_restart();
16248 }
16249
16250 -static void native_machine_restart(char *__unused)
16251 +static __noreturn void native_machine_restart(char *__unused)
16252 {
16253 printk("machine restart\n");
16254
16255 @@ -666,7 +666,7 @@ static void native_machine_restart(char
16256 __machine_emergency_restart(0);
16257 }
16258
16259 -static void native_machine_halt(void)
16260 +static __noreturn void native_machine_halt(void)
16261 {
16262 /* stop other cpus and apics */
16263 machine_shutdown();
16264 @@ -677,7 +677,7 @@ static void native_machine_halt(void)
16265 stop_this_cpu(NULL);
16266 }
16267
16268 -static void native_machine_power_off(void)
16269 +__noreturn static void native_machine_power_off(void)
16270 {
16271 if (pm_power_off) {
16272 if (!reboot_force)
16273 @@ -686,6 +686,7 @@ static void native_machine_power_off(voi
16274 }
16275 /* a fallback in case there is no PM info available */
16276 tboot_shutdown(TB_SHUTDOWN_HALT);
16277 + do { } while (1);
16278 }
16279
16280 struct machine_ops machine_ops = {
16281 diff -urNp linux-2.6.32.43/arch/x86/kernel/setup.c linux-2.6.32.43/arch/x86/kernel/setup.c
16282 --- linux-2.6.32.43/arch/x86/kernel/setup.c 2011-04-17 17:00:52.000000000 -0400
16283 +++ linux-2.6.32.43/arch/x86/kernel/setup.c 2011-04-17 17:03:05.000000000 -0400
16284 @@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
16285
16286 if (!boot_params.hdr.root_flags)
16287 root_mountflags &= ~MS_RDONLY;
16288 - init_mm.start_code = (unsigned long) _text;
16289 - init_mm.end_code = (unsigned long) _etext;
16290 + init_mm.start_code = ktla_ktva((unsigned long) _text);
16291 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
16292 init_mm.end_data = (unsigned long) _edata;
16293 init_mm.brk = _brk_end;
16294
16295 - code_resource.start = virt_to_phys(_text);
16296 - code_resource.end = virt_to_phys(_etext)-1;
16297 - data_resource.start = virt_to_phys(_etext);
16298 + code_resource.start = virt_to_phys(ktla_ktva(_text));
16299 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16300 + data_resource.start = virt_to_phys(_sdata);
16301 data_resource.end = virt_to_phys(_edata)-1;
16302 bss_resource.start = virt_to_phys(&__bss_start);
16303 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16304 diff -urNp linux-2.6.32.43/arch/x86/kernel/setup_percpu.c linux-2.6.32.43/arch/x86/kernel/setup_percpu.c
16305 --- linux-2.6.32.43/arch/x86/kernel/setup_percpu.c 2011-03-27 14:31:47.000000000 -0400
16306 +++ linux-2.6.32.43/arch/x86/kernel/setup_percpu.c 2011-06-04 20:36:29.000000000 -0400
16307 @@ -25,19 +25,17 @@
16308 # define DBG(x...)
16309 #endif
16310
16311 -DEFINE_PER_CPU(int, cpu_number);
16312 +#ifdef CONFIG_SMP
16313 +DEFINE_PER_CPU(unsigned int, cpu_number);
16314 EXPORT_PER_CPU_SYMBOL(cpu_number);
16315 +#endif
16316
16317 -#ifdef CONFIG_X86_64
16318 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16319 -#else
16320 -#define BOOT_PERCPU_OFFSET 0
16321 -#endif
16322
16323 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16324 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16325
16326 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16327 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16328 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16329 };
16330 EXPORT_SYMBOL(__per_cpu_offset);
16331 @@ -159,10 +157,10 @@ static inline void setup_percpu_segment(
16332 {
16333 #ifdef CONFIG_X86_32
16334 struct desc_struct gdt;
16335 + unsigned long base = per_cpu_offset(cpu);
16336
16337 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16338 - 0x2 | DESCTYPE_S, 0x8);
16339 - gdt.s = 1;
16340 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16341 + 0x83 | DESCTYPE_S, 0xC);
16342 write_gdt_entry(get_cpu_gdt_table(cpu),
16343 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16344 #endif
16345 @@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
16346 /* alrighty, percpu areas up and running */
16347 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16348 for_each_possible_cpu(cpu) {
16349 +#ifdef CONFIG_CC_STACKPROTECTOR
16350 +#ifdef CONFIG_X86_32
16351 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
16352 +#endif
16353 +#endif
16354 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16355 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16356 per_cpu(cpu_number, cpu) = cpu;
16357 @@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
16358 early_per_cpu_map(x86_cpu_to_node_map, cpu);
16359 #endif
16360 #endif
16361 +#ifdef CONFIG_CC_STACKPROTECTOR
16362 +#ifdef CONFIG_X86_32
16363 + if (!cpu)
16364 + per_cpu(stack_canary.canary, cpu) = canary;
16365 +#endif
16366 +#endif
16367 /*
16368 * Up to this point, the boot CPU has been using .data.init
16369 * area. Reload any changed state for the boot CPU.
16370 diff -urNp linux-2.6.32.43/arch/x86/kernel/signal.c linux-2.6.32.43/arch/x86/kernel/signal.c
16371 --- linux-2.6.32.43/arch/x86/kernel/signal.c 2011-03-27 14:31:47.000000000 -0400
16372 +++ linux-2.6.32.43/arch/x86/kernel/signal.c 2011-05-22 23:02:03.000000000 -0400
16373 @@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsi
16374 * Align the stack pointer according to the i386 ABI,
16375 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16376 */
16377 - sp = ((sp + 4) & -16ul) - 4;
16378 + sp = ((sp - 12) & -16ul) - 4;
16379 #else /* !CONFIG_X86_32 */
16380 sp = round_down(sp, 16) - 8;
16381 #endif
16382 @@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, str
16383 * Return an always-bogus address instead so we will die with SIGSEGV.
16384 */
16385 if (onsigstack && !likely(on_sig_stack(sp)))
16386 - return (void __user *)-1L;
16387 + return (__force void __user *)-1L;
16388
16389 /* save i387 state */
16390 if (used_math() && save_i387_xstate(*fpstate) < 0)
16391 - return (void __user *)-1L;
16392 + return (__force void __user *)-1L;
16393
16394 return (void __user *)sp;
16395 }
16396 @@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigactio
16397 }
16398
16399 if (current->mm->context.vdso)
16400 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16401 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16402 else
16403 - restorer = &frame->retcode;
16404 + restorer = (void __user *)&frame->retcode;
16405 if (ka->sa.sa_flags & SA_RESTORER)
16406 restorer = ka->sa.sa_restorer;
16407
16408 @@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigactio
16409 * reasons and because gdb uses it as a signature to notice
16410 * signal handler stack frames.
16411 */
16412 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16413 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16414
16415 if (err)
16416 return -EFAULT;
16417 @@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, str
16418 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16419
16420 /* Set up to return from userspace. */
16421 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16422 + if (current->mm->context.vdso)
16423 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16424 + else
16425 + restorer = (void __user *)&frame->retcode;
16426 if (ka->sa.sa_flags & SA_RESTORER)
16427 restorer = ka->sa.sa_restorer;
16428 put_user_ex(restorer, &frame->pretcode);
16429 @@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, str
16430 * reasons and because gdb uses it as a signature to notice
16431 * signal handler stack frames.
16432 */
16433 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16434 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16435 } put_user_catch(err);
16436
16437 if (err)
16438 @@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *re
16439 int signr;
16440 sigset_t *oldset;
16441
16442 + pax_track_stack();
16443 +
16444 /*
16445 * We want the common case to go fast, which is why we may in certain
16446 * cases get here from kernel mode. Just return without doing anything
16447 @@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *re
16448 * X86_32: vm86 regs switched out by assembly code before reaching
16449 * here, so testing against kernel CS suffices.
16450 */
16451 - if (!user_mode(regs))
16452 + if (!user_mode_novm(regs))
16453 return;
16454
16455 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
16456 diff -urNp linux-2.6.32.43/arch/x86/kernel/smpboot.c linux-2.6.32.43/arch/x86/kernel/smpboot.c
16457 --- linux-2.6.32.43/arch/x86/kernel/smpboot.c 2011-03-27 14:31:47.000000000 -0400
16458 +++ linux-2.6.32.43/arch/x86/kernel/smpboot.c 2011-07-01 19:10:03.000000000 -0400
16459 @@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct
16460 */
16461 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
16462
16463 -void cpu_hotplug_driver_lock()
16464 +void cpu_hotplug_driver_lock(void)
16465 {
16466 - mutex_lock(&x86_cpu_hotplug_driver_mutex);
16467 + mutex_lock(&x86_cpu_hotplug_driver_mutex);
16468 }
16469
16470 -void cpu_hotplug_driver_unlock()
16471 +void cpu_hotplug_driver_unlock(void)
16472 {
16473 - mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16474 + mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16475 }
16476
16477 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
16478 @@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_a
16479 * target processor state.
16480 */
16481 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
16482 - (unsigned long)stack_start.sp);
16483 + stack_start);
16484
16485 /*
16486 * Run STARTUP IPI loop.
16487 @@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int api
16488 set_idle_for_cpu(cpu, c_idle.idle);
16489 do_rest:
16490 per_cpu(current_task, cpu) = c_idle.idle;
16491 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16492 #ifdef CONFIG_X86_32
16493 /* Stack for startup_32 can be just as for start_secondary onwards */
16494 irq_ctx_init(cpu);
16495 @@ -750,13 +751,15 @@ do_rest:
16496 #else
16497 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16498 initial_gs = per_cpu_offset(cpu);
16499 - per_cpu(kernel_stack, cpu) =
16500 - (unsigned long)task_stack_page(c_idle.idle) -
16501 - KERNEL_STACK_OFFSET + THREAD_SIZE;
16502 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16503 #endif
16504 +
16505 + pax_open_kernel();
16506 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16507 + pax_close_kernel();
16508 +
16509 initial_code = (unsigned long)start_secondary;
16510 - stack_start.sp = (void *) c_idle.idle->thread.sp;
16511 + stack_start = c_idle.idle->thread.sp;
16512
16513 /* start_ip had better be page-aligned! */
16514 start_ip = setup_trampoline();
16515 @@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int
16516
16517 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16518
16519 +#ifdef CONFIG_PAX_PER_CPU_PGD
16520 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16521 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16522 + KERNEL_PGD_PTRS);
16523 +#endif
16524 +
16525 err = do_boot_cpu(apicid, cpu);
16526
16527 if (err) {
16528 diff -urNp linux-2.6.32.43/arch/x86/kernel/step.c linux-2.6.32.43/arch/x86/kernel/step.c
16529 --- linux-2.6.32.43/arch/x86/kernel/step.c 2011-03-27 14:31:47.000000000 -0400
16530 +++ linux-2.6.32.43/arch/x86/kernel/step.c 2011-04-17 15:56:46.000000000 -0400
16531 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
16532 struct desc_struct *desc;
16533 unsigned long base;
16534
16535 - seg &= ~7UL;
16536 + seg >>= 3;
16537
16538 mutex_lock(&child->mm->context.lock);
16539 - if (unlikely((seg >> 3) >= child->mm->context.size))
16540 + if (unlikely(seg >= child->mm->context.size))
16541 addr = -1L; /* bogus selector, access would fault */
16542 else {
16543 desc = child->mm->context.ldt + seg;
16544 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
16545 addr += base;
16546 }
16547 mutex_unlock(&child->mm->context.lock);
16548 - }
16549 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16550 + addr = ktla_ktva(addr);
16551
16552 return addr;
16553 }
16554 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
16555 unsigned char opcode[15];
16556 unsigned long addr = convert_ip_to_linear(child, regs);
16557
16558 + if (addr == -EINVAL)
16559 + return 0;
16560 +
16561 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16562 for (i = 0; i < copied; i++) {
16563 switch (opcode[i]) {
16564 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
16565
16566 #ifdef CONFIG_X86_64
16567 case 0x40 ... 0x4f:
16568 - if (regs->cs != __USER_CS)
16569 + if ((regs->cs & 0xffff) != __USER_CS)
16570 /* 32-bit mode: register increment */
16571 return 0;
16572 /* 64-bit mode: REX prefix */
16573 diff -urNp linux-2.6.32.43/arch/x86/kernel/syscall_table_32.S linux-2.6.32.43/arch/x86/kernel/syscall_table_32.S
16574 --- linux-2.6.32.43/arch/x86/kernel/syscall_table_32.S 2011-03-27 14:31:47.000000000 -0400
16575 +++ linux-2.6.32.43/arch/x86/kernel/syscall_table_32.S 2011-04-17 15:56:46.000000000 -0400
16576 @@ -1,3 +1,4 @@
16577 +.section .rodata,"a",@progbits
16578 ENTRY(sys_call_table)
16579 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
16580 .long sys_exit
16581 diff -urNp linux-2.6.32.43/arch/x86/kernel/sys_i386_32.c linux-2.6.32.43/arch/x86/kernel/sys_i386_32.c
16582 --- linux-2.6.32.43/arch/x86/kernel/sys_i386_32.c 2011-03-27 14:31:47.000000000 -0400
16583 +++ linux-2.6.32.43/arch/x86/kernel/sys_i386_32.c 2011-04-17 15:56:46.000000000 -0400
16584 @@ -24,6 +24,21 @@
16585
16586 #include <asm/syscalls.h>
16587
16588 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
16589 +{
16590 + unsigned long pax_task_size = TASK_SIZE;
16591 +
16592 +#ifdef CONFIG_PAX_SEGMEXEC
16593 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
16594 + pax_task_size = SEGMEXEC_TASK_SIZE;
16595 +#endif
16596 +
16597 + if (len > pax_task_size || addr > pax_task_size - len)
16598 + return -EINVAL;
16599 +
16600 + return 0;
16601 +}
16602 +
16603 /*
16604 * Perform the select(nd, in, out, ex, tv) and mmap() system
16605 * calls. Linux/i386 didn't use to be able to handle more than
16606 @@ -58,6 +73,212 @@ out:
16607 return err;
16608 }
16609
16610 +unsigned long
16611 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
16612 + unsigned long len, unsigned long pgoff, unsigned long flags)
16613 +{
16614 + struct mm_struct *mm = current->mm;
16615 + struct vm_area_struct *vma;
16616 + unsigned long start_addr, pax_task_size = TASK_SIZE;
16617 +
16618 +#ifdef CONFIG_PAX_SEGMEXEC
16619 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16620 + pax_task_size = SEGMEXEC_TASK_SIZE;
16621 +#endif
16622 +
16623 + pax_task_size -= PAGE_SIZE;
16624 +
16625 + if (len > pax_task_size)
16626 + return -ENOMEM;
16627 +
16628 + if (flags & MAP_FIXED)
16629 + return addr;
16630 +
16631 +#ifdef CONFIG_PAX_RANDMMAP
16632 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16633 +#endif
16634 +
16635 + if (addr) {
16636 + addr = PAGE_ALIGN(addr);
16637 + if (pax_task_size - len >= addr) {
16638 + vma = find_vma(mm, addr);
16639 + if (check_heap_stack_gap(vma, addr, len))
16640 + return addr;
16641 + }
16642 + }
16643 + if (len > mm->cached_hole_size) {
16644 + start_addr = addr = mm->free_area_cache;
16645 + } else {
16646 + start_addr = addr = mm->mmap_base;
16647 + mm->cached_hole_size = 0;
16648 + }
16649 +
16650 +#ifdef CONFIG_PAX_PAGEEXEC
16651 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
16652 + start_addr = 0x00110000UL;
16653 +
16654 +#ifdef CONFIG_PAX_RANDMMAP
16655 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16656 + start_addr += mm->delta_mmap & 0x03FFF000UL;
16657 +#endif
16658 +
16659 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
16660 + start_addr = addr = mm->mmap_base;
16661 + else
16662 + addr = start_addr;
16663 + }
16664 +#endif
16665 +
16666 +full_search:
16667 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
16668 + /* At this point: (!vma || addr < vma->vm_end). */
16669 + if (pax_task_size - len < addr) {
16670 + /*
16671 + * Start a new search - just in case we missed
16672 + * some holes.
16673 + */
16674 + if (start_addr != mm->mmap_base) {
16675 + start_addr = addr = mm->mmap_base;
16676 + mm->cached_hole_size = 0;
16677 + goto full_search;
16678 + }
16679 + return -ENOMEM;
16680 + }
16681 + if (check_heap_stack_gap(vma, addr, len))
16682 + break;
16683 + if (addr + mm->cached_hole_size < vma->vm_start)
16684 + mm->cached_hole_size = vma->vm_start - addr;
16685 + addr = vma->vm_end;
16686 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
16687 + start_addr = addr = mm->mmap_base;
16688 + mm->cached_hole_size = 0;
16689 + goto full_search;
16690 + }
16691 + }
16692 +
16693 + /*
16694 + * Remember the place where we stopped the search:
16695 + */
16696 + mm->free_area_cache = addr + len;
16697 + return addr;
16698 +}
16699 +
16700 +unsigned long
16701 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16702 + const unsigned long len, const unsigned long pgoff,
16703 + const unsigned long flags)
16704 +{
16705 + struct vm_area_struct *vma;
16706 + struct mm_struct *mm = current->mm;
16707 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
16708 +
16709 +#ifdef CONFIG_PAX_SEGMEXEC
16710 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16711 + pax_task_size = SEGMEXEC_TASK_SIZE;
16712 +#endif
16713 +
16714 + pax_task_size -= PAGE_SIZE;
16715 +
16716 + /* requested length too big for entire address space */
16717 + if (len > pax_task_size)
16718 + return -ENOMEM;
16719 +
16720 + if (flags & MAP_FIXED)
16721 + return addr;
16722 +
16723 +#ifdef CONFIG_PAX_PAGEEXEC
16724 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
16725 + goto bottomup;
16726 +#endif
16727 +
16728 +#ifdef CONFIG_PAX_RANDMMAP
16729 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16730 +#endif
16731 +
16732 + /* requesting a specific address */
16733 + if (addr) {
16734 + addr = PAGE_ALIGN(addr);
16735 + if (pax_task_size - len >= addr) {
16736 + vma = find_vma(mm, addr);
16737 + if (check_heap_stack_gap(vma, addr, len))
16738 + return addr;
16739 + }
16740 + }
16741 +
16742 + /* check if free_area_cache is useful for us */
16743 + if (len <= mm->cached_hole_size) {
16744 + mm->cached_hole_size = 0;
16745 + mm->free_area_cache = mm->mmap_base;
16746 + }
16747 +
16748 + /* either no address requested or can't fit in requested address hole */
16749 + addr = mm->free_area_cache;
16750 +
16751 + /* make sure it can fit in the remaining address space */
16752 + if (addr > len) {
16753 + vma = find_vma(mm, addr-len);
16754 + if (check_heap_stack_gap(vma, addr - len, len))
16755 + /* remember the address as a hint for next time */
16756 + return (mm->free_area_cache = addr-len);
16757 + }
16758 +
16759 + if (mm->mmap_base < len)
16760 + goto bottomup;
16761 +
16762 + addr = mm->mmap_base-len;
16763 +
16764 + do {
16765 + /*
16766 + * Lookup failure means no vma is above this address,
16767 + * else if new region fits below vma->vm_start,
16768 + * return with success:
16769 + */
16770 + vma = find_vma(mm, addr);
16771 + if (check_heap_stack_gap(vma, addr, len))
16772 + /* remember the address as a hint for next time */
16773 + return (mm->free_area_cache = addr);
16774 +
16775 + /* remember the largest hole we saw so far */
16776 + if (addr + mm->cached_hole_size < vma->vm_start)
16777 + mm->cached_hole_size = vma->vm_start - addr;
16778 +
16779 + /* try just below the current vma->vm_start */
16780 + addr = skip_heap_stack_gap(vma, len);
16781 + } while (!IS_ERR_VALUE(addr));
16782 +
16783 +bottomup:
16784 + /*
16785 + * A failed mmap() very likely causes application failure,
16786 + * so fall back to the bottom-up function here. This scenario
16787 + * can happen with large stack limits and large mmap()
16788 + * allocations.
16789 + */
16790 +
16791 +#ifdef CONFIG_PAX_SEGMEXEC
16792 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16793 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
16794 + else
16795 +#endif
16796 +
16797 + mm->mmap_base = TASK_UNMAPPED_BASE;
16798 +
16799 +#ifdef CONFIG_PAX_RANDMMAP
16800 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16801 + mm->mmap_base += mm->delta_mmap;
16802 +#endif
16803 +
16804 + mm->free_area_cache = mm->mmap_base;
16805 + mm->cached_hole_size = ~0UL;
16806 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16807 + /*
16808 + * Restore the topdown base:
16809 + */
16810 + mm->mmap_base = base;
16811 + mm->free_area_cache = base;
16812 + mm->cached_hole_size = ~0UL;
16813 +
16814 + return addr;
16815 +}
16816
16817 struct sel_arg_struct {
16818 unsigned long n;
16819 @@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int fi
16820 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
16821 case SEMTIMEDOP:
16822 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
16823 - (const struct timespec __user *)fifth);
16824 + (__force const struct timespec __user *)fifth);
16825
16826 case SEMGET:
16827 return sys_semget(first, second, third);
16828 @@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int fi
16829 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
16830 if (ret)
16831 return ret;
16832 - return put_user(raddr, (ulong __user *) third);
16833 + return put_user(raddr, (__force ulong __user *) third);
16834 }
16835 case 1: /* iBCS2 emulator entry point */
16836 if (!segment_eq(get_fs(), get_ds()))
16837 @@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldol
16838
16839 return error;
16840 }
16841 -
16842 -
16843 -/*
16844 - * Do a system call from kernel instead of calling sys_execve so we
16845 - * end up with proper pt_regs.
16846 - */
16847 -int kernel_execve(const char *filename, char *const argv[], char *const envp[])
16848 -{
16849 - long __res;
16850 - asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
16851 - : "=a" (__res)
16852 - : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
16853 - return __res;
16854 -}
16855 diff -urNp linux-2.6.32.43/arch/x86/kernel/sys_x86_64.c linux-2.6.32.43/arch/x86/kernel/sys_x86_64.c
16856 --- linux-2.6.32.43/arch/x86/kernel/sys_x86_64.c 2011-03-27 14:31:47.000000000 -0400
16857 +++ linux-2.6.32.43/arch/x86/kernel/sys_x86_64.c 2011-04-17 15:56:46.000000000 -0400
16858 @@ -32,8 +32,8 @@ out:
16859 return error;
16860 }
16861
16862 -static void find_start_end(unsigned long flags, unsigned long *begin,
16863 - unsigned long *end)
16864 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
16865 + unsigned long *begin, unsigned long *end)
16866 {
16867 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
16868 unsigned long new_begin;
16869 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
16870 *begin = new_begin;
16871 }
16872 } else {
16873 - *begin = TASK_UNMAPPED_BASE;
16874 + *begin = mm->mmap_base;
16875 *end = TASK_SIZE;
16876 }
16877 }
16878 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
16879 if (flags & MAP_FIXED)
16880 return addr;
16881
16882 - find_start_end(flags, &begin, &end);
16883 + find_start_end(mm, flags, &begin, &end);
16884
16885 if (len > end)
16886 return -ENOMEM;
16887
16888 +#ifdef CONFIG_PAX_RANDMMAP
16889 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16890 +#endif
16891 +
16892 if (addr) {
16893 addr = PAGE_ALIGN(addr);
16894 vma = find_vma(mm, addr);
16895 - if (end - len >= addr &&
16896 - (!vma || addr + len <= vma->vm_start))
16897 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
16898 return addr;
16899 }
16900 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
16901 @@ -106,7 +109,7 @@ full_search:
16902 }
16903 return -ENOMEM;
16904 }
16905 - if (!vma || addr + len <= vma->vm_start) {
16906 + if (check_heap_stack_gap(vma, addr, len)) {
16907 /*
16908 * Remember the place where we stopped the search:
16909 */
16910 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
16911 {
16912 struct vm_area_struct *vma;
16913 struct mm_struct *mm = current->mm;
16914 - unsigned long addr = addr0;
16915 + unsigned long base = mm->mmap_base, addr = addr0;
16916
16917 /* requested length too big for entire address space */
16918 if (len > TASK_SIZE)
16919 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
16920 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
16921 goto bottomup;
16922
16923 +#ifdef CONFIG_PAX_RANDMMAP
16924 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16925 +#endif
16926 +
16927 /* requesting a specific address */
16928 if (addr) {
16929 addr = PAGE_ALIGN(addr);
16930 - vma = find_vma(mm, addr);
16931 - if (TASK_SIZE - len >= addr &&
16932 - (!vma || addr + len <= vma->vm_start))
16933 - return addr;
16934 + if (TASK_SIZE - len >= addr) {
16935 + vma = find_vma(mm, addr);
16936 + if (check_heap_stack_gap(vma, addr, len))
16937 + return addr;
16938 + }
16939 }
16940
16941 /* check if free_area_cache is useful for us */
16942 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
16943 /* make sure it can fit in the remaining address space */
16944 if (addr > len) {
16945 vma = find_vma(mm, addr-len);
16946 - if (!vma || addr <= vma->vm_start)
16947 + if (check_heap_stack_gap(vma, addr - len, len))
16948 /* remember the address as a hint for next time */
16949 return mm->free_area_cache = addr-len;
16950 }
16951 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
16952 * return with success:
16953 */
16954 vma = find_vma(mm, addr);
16955 - if (!vma || addr+len <= vma->vm_start)
16956 + if (check_heap_stack_gap(vma, addr, len))
16957 /* remember the address as a hint for next time */
16958 return mm->free_area_cache = addr;
16959
16960 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
16961 mm->cached_hole_size = vma->vm_start - addr;
16962
16963 /* try just below the current vma->vm_start */
16964 - addr = vma->vm_start-len;
16965 - } while (len < vma->vm_start);
16966 + addr = skip_heap_stack_gap(vma, len);
16967 + } while (!IS_ERR_VALUE(addr));
16968
16969 bottomup:
16970 /*
16971 @@ -198,13 +206,21 @@ bottomup:
16972 * can happen with large stack limits and large mmap()
16973 * allocations.
16974 */
16975 + mm->mmap_base = TASK_UNMAPPED_BASE;
16976 +
16977 +#ifdef CONFIG_PAX_RANDMMAP
16978 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16979 + mm->mmap_base += mm->delta_mmap;
16980 +#endif
16981 +
16982 + mm->free_area_cache = mm->mmap_base;
16983 mm->cached_hole_size = ~0UL;
16984 - mm->free_area_cache = TASK_UNMAPPED_BASE;
16985 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16986 /*
16987 * Restore the topdown base:
16988 */
16989 - mm->free_area_cache = mm->mmap_base;
16990 + mm->mmap_base = base;
16991 + mm->free_area_cache = base;
16992 mm->cached_hole_size = ~0UL;
16993
16994 return addr;
16995 diff -urNp linux-2.6.32.43/arch/x86/kernel/tboot.c linux-2.6.32.43/arch/x86/kernel/tboot.c
16996 --- linux-2.6.32.43/arch/x86/kernel/tboot.c 2011-03-27 14:31:47.000000000 -0400
16997 +++ linux-2.6.32.43/arch/x86/kernel/tboot.c 2011-05-22 23:02:03.000000000 -0400
16998 @@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
16999
17000 void tboot_shutdown(u32 shutdown_type)
17001 {
17002 - void (*shutdown)(void);
17003 + void (* __noreturn shutdown)(void);
17004
17005 if (!tboot_enabled())
17006 return;
17007 @@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
17008
17009 switch_to_tboot_pt();
17010
17011 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17012 + shutdown = (void *)tboot->shutdown_entry;
17013 shutdown();
17014
17015 /* should not reach here */
17016 @@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
17017 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17018 }
17019
17020 -static atomic_t ap_wfs_count;
17021 +static atomic_unchecked_t ap_wfs_count;
17022
17023 static int tboot_wait_for_aps(int num_aps)
17024 {
17025 @@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(
17026 {
17027 switch (action) {
17028 case CPU_DYING:
17029 - atomic_inc(&ap_wfs_count);
17030 + atomic_inc_unchecked(&ap_wfs_count);
17031 if (num_online_cpus() == 1)
17032 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17033 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17034 return NOTIFY_BAD;
17035 break;
17036 }
17037 @@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
17038
17039 tboot_create_trampoline();
17040
17041 - atomic_set(&ap_wfs_count, 0);
17042 + atomic_set_unchecked(&ap_wfs_count, 0);
17043 register_hotcpu_notifier(&tboot_cpu_notifier);
17044 return 0;
17045 }
17046 diff -urNp linux-2.6.32.43/arch/x86/kernel/time.c linux-2.6.32.43/arch/x86/kernel/time.c
17047 --- linux-2.6.32.43/arch/x86/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
17048 +++ linux-2.6.32.43/arch/x86/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
17049 @@ -26,17 +26,13 @@
17050 int timer_ack;
17051 #endif
17052
17053 -#ifdef CONFIG_X86_64
17054 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
17055 -#endif
17056 -
17057 unsigned long profile_pc(struct pt_regs *regs)
17058 {
17059 unsigned long pc = instruction_pointer(regs);
17060
17061 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17062 + if (!user_mode(regs) && in_lock_functions(pc)) {
17063 #ifdef CONFIG_FRAME_POINTER
17064 - return *(unsigned long *)(regs->bp + sizeof(long));
17065 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17066 #else
17067 unsigned long *sp =
17068 (unsigned long *)kernel_stack_pointer(regs);
17069 @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
17070 * or above a saved flags. Eflags has bits 22-31 zero,
17071 * kernel addresses don't.
17072 */
17073 +
17074 +#ifdef CONFIG_PAX_KERNEXEC
17075 + return ktla_ktva(sp[0]);
17076 +#else
17077 if (sp[0] >> 22)
17078 return sp[0];
17079 if (sp[1] >> 22)
17080 return sp[1];
17081 #endif
17082 +
17083 +#endif
17084 }
17085 return pc;
17086 }
17087 diff -urNp linux-2.6.32.43/arch/x86/kernel/tls.c linux-2.6.32.43/arch/x86/kernel/tls.c
17088 --- linux-2.6.32.43/arch/x86/kernel/tls.c 2011-03-27 14:31:47.000000000 -0400
17089 +++ linux-2.6.32.43/arch/x86/kernel/tls.c 2011-04-17 15:56:46.000000000 -0400
17090 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
17091 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17092 return -EINVAL;
17093
17094 +#ifdef CONFIG_PAX_SEGMEXEC
17095 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17096 + return -EINVAL;
17097 +#endif
17098 +
17099 set_tls_desc(p, idx, &info, 1);
17100
17101 return 0;
17102 diff -urNp linux-2.6.32.43/arch/x86/kernel/trampoline_32.S linux-2.6.32.43/arch/x86/kernel/trampoline_32.S
17103 --- linux-2.6.32.43/arch/x86/kernel/trampoline_32.S 2011-03-27 14:31:47.000000000 -0400
17104 +++ linux-2.6.32.43/arch/x86/kernel/trampoline_32.S 2011-04-17 15:56:46.000000000 -0400
17105 @@ -32,6 +32,12 @@
17106 #include <asm/segment.h>
17107 #include <asm/page_types.h>
17108
17109 +#ifdef CONFIG_PAX_KERNEXEC
17110 +#define ta(X) (X)
17111 +#else
17112 +#define ta(X) ((X) - __PAGE_OFFSET)
17113 +#endif
17114 +
17115 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
17116 __CPUINITRODATA
17117 .code16
17118 @@ -60,7 +66,7 @@ r_base = .
17119 inc %ax # protected mode (PE) bit
17120 lmsw %ax # into protected mode
17121 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17122 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17123 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
17124
17125 # These need to be in the same 64K segment as the above;
17126 # hence we don't use the boot_gdt_descr defined in head.S
17127 diff -urNp linux-2.6.32.43/arch/x86/kernel/trampoline_64.S linux-2.6.32.43/arch/x86/kernel/trampoline_64.S
17128 --- linux-2.6.32.43/arch/x86/kernel/trampoline_64.S 2011-03-27 14:31:47.000000000 -0400
17129 +++ linux-2.6.32.43/arch/x86/kernel/trampoline_64.S 2011-07-01 18:53:26.000000000 -0400
17130 @@ -91,7 +91,7 @@ startup_32:
17131 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17132 movl %eax, %ds
17133
17134 - movl $X86_CR4_PAE, %eax
17135 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17136 movl %eax, %cr4 # Enable PAE mode
17137
17138 # Setup trampoline 4 level pagetables
17139 @@ -127,7 +127,7 @@ startup_64:
17140 no_longmode:
17141 hlt
17142 jmp no_longmode
17143 -#include "verify_cpu_64.S"
17144 +#include "verify_cpu.S"
17145
17146 # Careful these need to be in the same 64K segment as the above;
17147 tidt:
17148 @@ -138,7 +138,7 @@ tidt:
17149 # so the kernel can live anywhere
17150 .balign 4
17151 tgdt:
17152 - .short tgdt_end - tgdt # gdt limit
17153 + .short tgdt_end - tgdt - 1 # gdt limit
17154 .long tgdt - r_base
17155 .short 0
17156 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17157 diff -urNp linux-2.6.32.43/arch/x86/kernel/traps.c linux-2.6.32.43/arch/x86/kernel/traps.c
17158 --- linux-2.6.32.43/arch/x86/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
17159 +++ linux-2.6.32.43/arch/x86/kernel/traps.c 2011-07-06 19:53:33.000000000 -0400
17160 @@ -69,12 +69,6 @@ asmlinkage int system_call(void);
17161
17162 /* Do we ignore FPU interrupts ? */
17163 char ignore_fpu_irq;
17164 -
17165 -/*
17166 - * The IDT has to be page-aligned to simplify the Pentium
17167 - * F0 0F bug workaround.
17168 - */
17169 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17170 #endif
17171
17172 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17173 @@ -112,19 +106,19 @@ static inline void preempt_conditional_c
17174 static inline void
17175 die_if_kernel(const char *str, struct pt_regs *regs, long err)
17176 {
17177 - if (!user_mode_vm(regs))
17178 + if (!user_mode(regs))
17179 die(str, regs, err);
17180 }
17181 #endif
17182
17183 static void __kprobes
17184 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17185 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17186 long error_code, siginfo_t *info)
17187 {
17188 struct task_struct *tsk = current;
17189
17190 #ifdef CONFIG_X86_32
17191 - if (regs->flags & X86_VM_MASK) {
17192 + if (v8086_mode(regs)) {
17193 /*
17194 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17195 * On nmi (interrupt 2), do_trap should not be called.
17196 @@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str
17197 }
17198 #endif
17199
17200 - if (!user_mode(regs))
17201 + if (!user_mode_novm(regs))
17202 goto kernel_trap;
17203
17204 #ifdef CONFIG_X86_32
17205 @@ -158,7 +152,7 @@ trap_signal:
17206 printk_ratelimit()) {
17207 printk(KERN_INFO
17208 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17209 - tsk->comm, tsk->pid, str,
17210 + tsk->comm, task_pid_nr(tsk), str,
17211 regs->ip, regs->sp, error_code);
17212 print_vma_addr(" in ", regs->ip);
17213 printk("\n");
17214 @@ -175,8 +169,20 @@ kernel_trap:
17215 if (!fixup_exception(regs)) {
17216 tsk->thread.error_code = error_code;
17217 tsk->thread.trap_no = trapnr;
17218 +
17219 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17220 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17221 + str = "PAX: suspicious stack segment fault";
17222 +#endif
17223 +
17224 die(str, regs, error_code);
17225 }
17226 +
17227 +#ifdef CONFIG_PAX_REFCOUNT
17228 + if (trapnr == 4)
17229 + pax_report_refcount_overflow(regs);
17230 +#endif
17231 +
17232 return;
17233
17234 #ifdef CONFIG_X86_32
17235 @@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *re
17236 conditional_sti(regs);
17237
17238 #ifdef CONFIG_X86_32
17239 - if (regs->flags & X86_VM_MASK)
17240 + if (v8086_mode(regs))
17241 goto gp_in_vm86;
17242 #endif
17243
17244 tsk = current;
17245 - if (!user_mode(regs))
17246 + if (!user_mode_novm(regs))
17247 goto gp_in_kernel;
17248
17249 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17250 + if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17251 + struct mm_struct *mm = tsk->mm;
17252 + unsigned long limit;
17253 +
17254 + down_write(&mm->mmap_sem);
17255 + limit = mm->context.user_cs_limit;
17256 + if (limit < TASK_SIZE) {
17257 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17258 + up_write(&mm->mmap_sem);
17259 + return;
17260 + }
17261 + up_write(&mm->mmap_sem);
17262 + }
17263 +#endif
17264 +
17265 tsk->thread.error_code = error_code;
17266 tsk->thread.trap_no = 13;
17267
17268 @@ -305,6 +327,13 @@ gp_in_kernel:
17269 if (notify_die(DIE_GPF, "general protection fault", regs,
17270 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17271 return;
17272 +
17273 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17274 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17275 + die("PAX: suspicious general protection fault", regs, error_code);
17276 + else
17277 +#endif
17278 +
17279 die("general protection fault", regs, error_code);
17280 }
17281
17282 @@ -435,6 +464,17 @@ static notrace __kprobes void default_do
17283 dotraplinkage notrace __kprobes void
17284 do_nmi(struct pt_regs *regs, long error_code)
17285 {
17286 +
17287 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17288 + if (!user_mode(regs)) {
17289 + unsigned long cs = regs->cs & 0xFFFF;
17290 + unsigned long ip = ktva_ktla(regs->ip);
17291 +
17292 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17293 + regs->ip = ip;
17294 + }
17295 +#endif
17296 +
17297 nmi_enter();
17298
17299 inc_irq_stat(__nmi_count);
17300 @@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(st
17301 }
17302
17303 #ifdef CONFIG_X86_32
17304 - if (regs->flags & X86_VM_MASK)
17305 + if (v8086_mode(regs))
17306 goto debug_vm86;
17307 #endif
17308
17309 @@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(st
17310 * kernel space (but re-enable TF when returning to user mode).
17311 */
17312 if (condition & DR_STEP) {
17313 - if (!user_mode(regs))
17314 + if (!user_mode_novm(regs))
17315 goto clear_TF_reenable;
17316 }
17317
17318 @@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs
17319 * Handle strange cache flush from user space exception
17320 * in all other cases. This is undocumented behaviour.
17321 */
17322 - if (regs->flags & X86_VM_MASK) {
17323 + if (v8086_mode(regs)) {
17324 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
17325 return;
17326 }
17327 @@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) sm
17328 void __math_state_restore(void)
17329 {
17330 struct thread_info *thread = current_thread_info();
17331 - struct task_struct *tsk = thread->task;
17332 + struct task_struct *tsk = current;
17333
17334 /*
17335 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17336 @@ -825,8 +865,7 @@ void __math_state_restore(void)
17337 */
17338 asmlinkage void math_state_restore(void)
17339 {
17340 - struct thread_info *thread = current_thread_info();
17341 - struct task_struct *tsk = thread->task;
17342 + struct task_struct *tsk = current;
17343
17344 if (!tsk_used_math(tsk)) {
17345 local_irq_enable();
17346 diff -urNp linux-2.6.32.43/arch/x86/kernel/verify_cpu_64.S linux-2.6.32.43/arch/x86/kernel/verify_cpu_64.S
17347 --- linux-2.6.32.43/arch/x86/kernel/verify_cpu_64.S 2011-03-27 14:31:47.000000000 -0400
17348 +++ linux-2.6.32.43/arch/x86/kernel/verify_cpu_64.S 1969-12-31 19:00:00.000000000 -0500
17349 @@ -1,105 +0,0 @@
17350 -/*
17351 - *
17352 - * verify_cpu.S - Code for cpu long mode and SSE verification. This
17353 - * code has been borrowed from boot/setup.S and was introduced by
17354 - * Andi Kleen.
17355 - *
17356 - * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17357 - * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17358 - * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17359 - *
17360 - * This source code is licensed under the GNU General Public License,
17361 - * Version 2. See the file COPYING for more details.
17362 - *
17363 - * This is a common code for verification whether CPU supports
17364 - * long mode and SSE or not. It is not called directly instead this
17365 - * file is included at various places and compiled in that context.
17366 - * Following are the current usage.
17367 - *
17368 - * This file is included by both 16bit and 32bit code.
17369 - *
17370 - * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
17371 - * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
17372 - * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
17373 - * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
17374 - *
17375 - * verify_cpu, returns the status of cpu check in register %eax.
17376 - * 0: Success 1: Failure
17377 - *
17378 - * The caller needs to check for the error code and take the action
17379 - * appropriately. Either display a message or halt.
17380 - */
17381 -
17382 -#include <asm/cpufeature.h>
17383 -
17384 -verify_cpu:
17385 - pushfl # Save caller passed flags
17386 - pushl $0 # Kill any dangerous flags
17387 - popfl
17388 -
17389 - pushfl # standard way to check for cpuid
17390 - popl %eax
17391 - movl %eax,%ebx
17392 - xorl $0x200000,%eax
17393 - pushl %eax
17394 - popfl
17395 - pushfl
17396 - popl %eax
17397 - cmpl %eax,%ebx
17398 - jz verify_cpu_no_longmode # cpu has no cpuid
17399 -
17400 - movl $0x0,%eax # See if cpuid 1 is implemented
17401 - cpuid
17402 - cmpl $0x1,%eax
17403 - jb verify_cpu_no_longmode # no cpuid 1
17404 -
17405 - xor %di,%di
17406 - cmpl $0x68747541,%ebx # AuthenticAMD
17407 - jnz verify_cpu_noamd
17408 - cmpl $0x69746e65,%edx
17409 - jnz verify_cpu_noamd
17410 - cmpl $0x444d4163,%ecx
17411 - jnz verify_cpu_noamd
17412 - mov $1,%di # cpu is from AMD
17413 -
17414 -verify_cpu_noamd:
17415 - movl $0x1,%eax # Does the cpu have what it takes
17416 - cpuid
17417 - andl $REQUIRED_MASK0,%edx
17418 - xorl $REQUIRED_MASK0,%edx
17419 - jnz verify_cpu_no_longmode
17420 -
17421 - movl $0x80000000,%eax # See if extended cpuid is implemented
17422 - cpuid
17423 - cmpl $0x80000001,%eax
17424 - jb verify_cpu_no_longmode # no extended cpuid
17425 -
17426 - movl $0x80000001,%eax # Does the cpu have what it takes
17427 - cpuid
17428 - andl $REQUIRED_MASK1,%edx
17429 - xorl $REQUIRED_MASK1,%edx
17430 - jnz verify_cpu_no_longmode
17431 -
17432 -verify_cpu_sse_test:
17433 - movl $1,%eax
17434 - cpuid
17435 - andl $SSE_MASK,%edx
17436 - cmpl $SSE_MASK,%edx
17437 - je verify_cpu_sse_ok
17438 - test %di,%di
17439 - jz verify_cpu_no_longmode # only try to force SSE on AMD
17440 - movl $0xc0010015,%ecx # HWCR
17441 - rdmsr
17442 - btr $15,%eax # enable SSE
17443 - wrmsr
17444 - xor %di,%di # don't loop
17445 - jmp verify_cpu_sse_test # try again
17446 -
17447 -verify_cpu_no_longmode:
17448 - popfl # Restore caller passed flags
17449 - movl $1,%eax
17450 - ret
17451 -verify_cpu_sse_ok:
17452 - popfl # Restore caller passed flags
17453 - xorl %eax, %eax
17454 - ret
17455 diff -urNp linux-2.6.32.43/arch/x86/kernel/verify_cpu.S linux-2.6.32.43/arch/x86/kernel/verify_cpu.S
17456 --- linux-2.6.32.43/arch/x86/kernel/verify_cpu.S 1969-12-31 19:00:00.000000000 -0500
17457 +++ linux-2.6.32.43/arch/x86/kernel/verify_cpu.S 2011-07-01 18:28:42.000000000 -0400
17458 @@ -0,0 +1,140 @@
17459 +/*
17460 + *
17461 + * verify_cpu.S - Code for cpu long mode and SSE verification. This
17462 + * code has been borrowed from boot/setup.S and was introduced by
17463 + * Andi Kleen.
17464 + *
17465 + * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17466 + * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17467 + * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17468 + * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
17469 + *
17470 + * This source code is licensed under the GNU General Public License,
17471 + * Version 2. See the file COPYING for more details.
17472 + *
17473 + * This is a common code for verification whether CPU supports
17474 + * long mode and SSE or not. It is not called directly instead this
17475 + * file is included at various places and compiled in that context.
17476 + * This file is expected to run in 32bit code. Currently:
17477 + *
17478 + * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17479 + * arch/x86/kernel/trampoline_64.S: secondary processor verification
17480 + * arch/x86/kernel/head_32.S: processor startup
17481 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17482 + *
17483 + * verify_cpu, returns the status of longmode and SSE in register %eax.
17484 + * 0: Success 1: Failure
17485 + *
17486 + * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
17487 + *
17488 + * The caller needs to check for the error code and take the action
17489 + * appropriately. Either display a message or halt.
17490 + */
17491 +
17492 +#include <asm/cpufeature.h>
17493 +#include <asm/msr-index.h>
17494 +
17495 +verify_cpu:
17496 + pushfl # Save caller passed flags
17497 + pushl $0 # Kill any dangerous flags
17498 + popfl
17499 +
17500 + pushfl # standard way to check for cpuid
17501 + popl %eax
17502 + movl %eax,%ebx
17503 + xorl $0x200000,%eax
17504 + pushl %eax
17505 + popfl
17506 + pushfl
17507 + popl %eax
17508 + cmpl %eax,%ebx
17509 + jz verify_cpu_no_longmode # cpu has no cpuid
17510 +
17511 + movl $0x0,%eax # See if cpuid 1 is implemented
17512 + cpuid
17513 + cmpl $0x1,%eax
17514 + jb verify_cpu_no_longmode # no cpuid 1
17515 +
17516 + xor %di,%di
17517 + cmpl $0x68747541,%ebx # AuthenticAMD
17518 + jnz verify_cpu_noamd
17519 + cmpl $0x69746e65,%edx
17520 + jnz verify_cpu_noamd
17521 + cmpl $0x444d4163,%ecx
17522 + jnz verify_cpu_noamd
17523 + mov $1,%di # cpu is from AMD
17524 + jmp verify_cpu_check
17525 +
17526 +verify_cpu_noamd:
17527 + cmpl $0x756e6547,%ebx # GenuineIntel?
17528 + jnz verify_cpu_check
17529 + cmpl $0x49656e69,%edx
17530 + jnz verify_cpu_check
17531 + cmpl $0x6c65746e,%ecx
17532 + jnz verify_cpu_check
17533 +
17534 + # only call IA32_MISC_ENABLE when:
17535 + # family > 6 || (family == 6 && model >= 0xd)
17536 + movl $0x1, %eax # check CPU family and model
17537 + cpuid
17538 + movl %eax, %ecx
17539 +
17540 + andl $0x0ff00f00, %eax # mask family and extended family
17541 + shrl $8, %eax
17542 + cmpl $6, %eax
17543 + ja verify_cpu_clear_xd # family > 6, ok
17544 + jb verify_cpu_check # family < 6, skip
17545 +
17546 + andl $0x000f00f0, %ecx # mask model and extended model
17547 + shrl $4, %ecx
17548 + cmpl $0xd, %ecx
17549 + jb verify_cpu_check # family == 6, model < 0xd, skip
17550 +
17551 +verify_cpu_clear_xd:
17552 + movl $MSR_IA32_MISC_ENABLE, %ecx
17553 + rdmsr
17554 + btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
17555 + jnc verify_cpu_check # only write MSR if bit was changed
17556 + wrmsr
17557 +
17558 +verify_cpu_check:
17559 + movl $0x1,%eax # Does the cpu have what it takes
17560 + cpuid
17561 + andl $REQUIRED_MASK0,%edx
17562 + xorl $REQUIRED_MASK0,%edx
17563 + jnz verify_cpu_no_longmode
17564 +
17565 + movl $0x80000000,%eax # See if extended cpuid is implemented
17566 + cpuid
17567 + cmpl $0x80000001,%eax
17568 + jb verify_cpu_no_longmode # no extended cpuid
17569 +
17570 + movl $0x80000001,%eax # Does the cpu have what it takes
17571 + cpuid
17572 + andl $REQUIRED_MASK1,%edx
17573 + xorl $REQUIRED_MASK1,%edx
17574 + jnz verify_cpu_no_longmode
17575 +
17576 +verify_cpu_sse_test:
17577 + movl $1,%eax
17578 + cpuid
17579 + andl $SSE_MASK,%edx
17580 + cmpl $SSE_MASK,%edx
17581 + je verify_cpu_sse_ok
17582 + test %di,%di
17583 + jz verify_cpu_no_longmode # only try to force SSE on AMD
17584 + movl $MSR_K7_HWCR,%ecx
17585 + rdmsr
17586 + btr $15,%eax # enable SSE
17587 + wrmsr
17588 + xor %di,%di # don't loop
17589 + jmp verify_cpu_sse_test # try again
17590 +
17591 +verify_cpu_no_longmode:
17592 + popfl # Restore caller passed flags
17593 + movl $1,%eax
17594 + ret
17595 +verify_cpu_sse_ok:
17596 + popfl # Restore caller passed flags
17597 + xorl %eax, %eax
17598 + ret
17599 diff -urNp linux-2.6.32.43/arch/x86/kernel/vm86_32.c linux-2.6.32.43/arch/x86/kernel/vm86_32.c
17600 --- linux-2.6.32.43/arch/x86/kernel/vm86_32.c 2011-03-27 14:31:47.000000000 -0400
17601 +++ linux-2.6.32.43/arch/x86/kernel/vm86_32.c 2011-04-17 15:56:46.000000000 -0400
17602 @@ -41,6 +41,7 @@
17603 #include <linux/ptrace.h>
17604 #include <linux/audit.h>
17605 #include <linux/stddef.h>
17606 +#include <linux/grsecurity.h>
17607
17608 #include <asm/uaccess.h>
17609 #include <asm/io.h>
17610 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
17611 do_exit(SIGSEGV);
17612 }
17613
17614 - tss = &per_cpu(init_tss, get_cpu());
17615 + tss = init_tss + get_cpu();
17616 current->thread.sp0 = current->thread.saved_sp0;
17617 current->thread.sysenter_cs = __KERNEL_CS;
17618 load_sp0(tss, &current->thread);
17619 @@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
17620 struct task_struct *tsk;
17621 int tmp, ret = -EPERM;
17622
17623 +#ifdef CONFIG_GRKERNSEC_VM86
17624 + if (!capable(CAP_SYS_RAWIO)) {
17625 + gr_handle_vm86();
17626 + goto out;
17627 + }
17628 +#endif
17629 +
17630 tsk = current;
17631 if (tsk->thread.saved_sp0)
17632 goto out;
17633 @@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
17634 int tmp, ret;
17635 struct vm86plus_struct __user *v86;
17636
17637 +#ifdef CONFIG_GRKERNSEC_VM86
17638 + if (!capable(CAP_SYS_RAWIO)) {
17639 + gr_handle_vm86();
17640 + ret = -EPERM;
17641 + goto out;
17642 + }
17643 +#endif
17644 +
17645 tsk = current;
17646 switch (regs->bx) {
17647 case VM86_REQUEST_IRQ:
17648 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
17649 tsk->thread.saved_fs = info->regs32->fs;
17650 tsk->thread.saved_gs = get_user_gs(info->regs32);
17651
17652 - tss = &per_cpu(init_tss, get_cpu());
17653 + tss = init_tss + get_cpu();
17654 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17655 if (cpu_has_sep)
17656 tsk->thread.sysenter_cs = 0;
17657 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
17658 goto cannot_handle;
17659 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17660 goto cannot_handle;
17661 - intr_ptr = (unsigned long __user *) (i << 2);
17662 + intr_ptr = (__force unsigned long __user *) (i << 2);
17663 if (get_user(segoffs, intr_ptr))
17664 goto cannot_handle;
17665 if ((segoffs >> 16) == BIOSSEG)
17666 diff -urNp linux-2.6.32.43/arch/x86/kernel/vmi_32.c linux-2.6.32.43/arch/x86/kernel/vmi_32.c
17667 --- linux-2.6.32.43/arch/x86/kernel/vmi_32.c 2011-03-27 14:31:47.000000000 -0400
17668 +++ linux-2.6.32.43/arch/x86/kernel/vmi_32.c 2011-04-17 15:56:46.000000000 -0400
17669 @@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1)))
17670 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
17671
17672 #define call_vrom_func(rom,func) \
17673 - (((VROMFUNC *)(rom->func))())
17674 + (((VROMFUNC *)(ktva_ktla(rom.func)))())
17675
17676 #define call_vrom_long_func(rom,func,arg) \
17677 - (((VROMLONGFUNC *)(rom->func)) (arg))
17678 +({\
17679 + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
17680 + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
17681 + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
17682 + __reloc;\
17683 +})
17684
17685 -static struct vrom_header *vmi_rom;
17686 +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
17687 static int disable_pge;
17688 static int disable_pse;
17689 static int disable_sep;
17690 @@ -76,10 +81,10 @@ static struct {
17691 void (*set_initial_ap_state)(int, int);
17692 void (*halt)(void);
17693 void (*set_lazy_mode)(int mode);
17694 -} vmi_ops;
17695 +} vmi_ops __read_only;
17696
17697 /* Cached VMI operations */
17698 -struct vmi_timer_ops vmi_timer_ops;
17699 +struct vmi_timer_ops vmi_timer_ops __read_only;
17700
17701 /*
17702 * VMI patching routines.
17703 @@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
17704 static inline void patch_offset(void *insnbuf,
17705 unsigned long ip, unsigned long dest)
17706 {
17707 - *(unsigned long *)(insnbuf+1) = dest-ip-5;
17708 + *(unsigned long *)(insnbuf+1) = dest-ip-5;
17709 }
17710
17711 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
17712 @@ -102,6 +107,7 @@ static unsigned patch_internal(int call,
17713 {
17714 u64 reloc;
17715 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
17716 +
17717 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
17718 switch(rel->type) {
17719 case VMI_RELOCATION_CALL_REL:
17720 @@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud
17721
17722 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
17723 {
17724 - const pte_t pte = { .pte = 0 };
17725 + const pte_t pte = __pte(0ULL);
17726 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
17727 }
17728
17729 static void vmi_pmd_clear(pmd_t *pmd)
17730 {
17731 - const pte_t pte = { .pte = 0 };
17732 + const pte_t pte = __pte(0ULL);
17733 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
17734 }
17735 #endif
17736 @@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, un
17737 ap.ss = __KERNEL_DS;
17738 ap.esp = (unsigned long) start_esp;
17739
17740 - ap.ds = __USER_DS;
17741 - ap.es = __USER_DS;
17742 + ap.ds = __KERNEL_DS;
17743 + ap.es = __KERNEL_DS;
17744 ap.fs = __KERNEL_PERCPU;
17745 - ap.gs = __KERNEL_STACK_CANARY;
17746 + savesegment(gs, ap.gs);
17747
17748 ap.eflags = 0;
17749
17750 @@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
17751 paravirt_leave_lazy_mmu();
17752 }
17753
17754 +#ifdef CONFIG_PAX_KERNEXEC
17755 +static unsigned long vmi_pax_open_kernel(void)
17756 +{
17757 + return 0;
17758 +}
17759 +
17760 +static unsigned long vmi_pax_close_kernel(void)
17761 +{
17762 + return 0;
17763 +}
17764 +#endif
17765 +
17766 static inline int __init check_vmi_rom(struct vrom_header *rom)
17767 {
17768 struct pci_header *pci;
17769 @@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(s
17770 return 0;
17771 if (rom->vrom_signature != VMI_SIGNATURE)
17772 return 0;
17773 + if (rom->rom_length * 512 > sizeof(*rom)) {
17774 + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
17775 + return 0;
17776 + }
17777 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
17778 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
17779 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
17780 @@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(v
17781 struct vrom_header *romstart;
17782 romstart = (struct vrom_header *)isa_bus_to_virt(base);
17783 if (check_vmi_rom(romstart)) {
17784 - vmi_rom = romstart;
17785 + vmi_rom = *romstart;
17786 return 1;
17787 }
17788 }
17789 @@ -836,6 +858,11 @@ static inline int __init activate_vmi(vo
17790
17791 para_fill(pv_irq_ops.safe_halt, Halt);
17792
17793 +#ifdef CONFIG_PAX_KERNEXEC
17794 + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
17795 + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
17796 +#endif
17797 +
17798 /*
17799 * Alternative instruction rewriting doesn't happen soon enough
17800 * to convert VMI_IRET to a call instead of a jump; so we have
17801 @@ -853,16 +880,16 @@ static inline int __init activate_vmi(vo
17802
17803 void __init vmi_init(void)
17804 {
17805 - if (!vmi_rom)
17806 + if (!vmi_rom.rom_signature)
17807 probe_vmi_rom();
17808 else
17809 - check_vmi_rom(vmi_rom);
17810 + check_vmi_rom(&vmi_rom);
17811
17812 /* In case probing for or validating the ROM failed, basil */
17813 - if (!vmi_rom)
17814 + if (!vmi_rom.rom_signature)
17815 return;
17816
17817 - reserve_top_address(-vmi_rom->virtual_top);
17818 + reserve_top_address(-vmi_rom.virtual_top);
17819
17820 #ifdef CONFIG_X86_IO_APIC
17821 /* This is virtual hardware; timer routing is wired correctly */
17822 @@ -874,7 +901,7 @@ void __init vmi_activate(void)
17823 {
17824 unsigned long flags;
17825
17826 - if (!vmi_rom)
17827 + if (!vmi_rom.rom_signature)
17828 return;
17829
17830 local_irq_save(flags);
17831 diff -urNp linux-2.6.32.43/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.43/arch/x86/kernel/vmlinux.lds.S
17832 --- linux-2.6.32.43/arch/x86/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
17833 +++ linux-2.6.32.43/arch/x86/kernel/vmlinux.lds.S 2011-04-17 15:56:46.000000000 -0400
17834 @@ -26,6 +26,13 @@
17835 #include <asm/page_types.h>
17836 #include <asm/cache.h>
17837 #include <asm/boot.h>
17838 +#include <asm/segment.h>
17839 +
17840 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17841 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
17842 +#else
17843 +#define __KERNEL_TEXT_OFFSET 0
17844 +#endif
17845
17846 #undef i386 /* in case the preprocessor is a 32bit one */
17847
17848 @@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
17849 #ifdef CONFIG_X86_32
17850 OUTPUT_ARCH(i386)
17851 ENTRY(phys_startup_32)
17852 -jiffies = jiffies_64;
17853 #else
17854 OUTPUT_ARCH(i386:x86-64)
17855 ENTRY(phys_startup_64)
17856 -jiffies_64 = jiffies;
17857 #endif
17858
17859 PHDRS {
17860 text PT_LOAD FLAGS(5); /* R_E */
17861 - data PT_LOAD FLAGS(7); /* RWE */
17862 +#ifdef CONFIG_X86_32
17863 + module PT_LOAD FLAGS(5); /* R_E */
17864 +#endif
17865 +#ifdef CONFIG_XEN
17866 + rodata PT_LOAD FLAGS(5); /* R_E */
17867 +#else
17868 + rodata PT_LOAD FLAGS(4); /* R__ */
17869 +#endif
17870 + data PT_LOAD FLAGS(6); /* RW_ */
17871 #ifdef CONFIG_X86_64
17872 user PT_LOAD FLAGS(5); /* R_E */
17873 +#endif
17874 + init.begin PT_LOAD FLAGS(6); /* RW_ */
17875 #ifdef CONFIG_SMP
17876 percpu PT_LOAD FLAGS(6); /* RW_ */
17877 #endif
17878 + text.init PT_LOAD FLAGS(5); /* R_E */
17879 + text.exit PT_LOAD FLAGS(5); /* R_E */
17880 init PT_LOAD FLAGS(7); /* RWE */
17881 -#endif
17882 note PT_NOTE FLAGS(0); /* ___ */
17883 }
17884
17885 SECTIONS
17886 {
17887 #ifdef CONFIG_X86_32
17888 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
17889 - phys_startup_32 = startup_32 - LOAD_OFFSET;
17890 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
17891 #else
17892 - . = __START_KERNEL;
17893 - phys_startup_64 = startup_64 - LOAD_OFFSET;
17894 + . = __START_KERNEL;
17895 #endif
17896
17897 /* Text and read-only data */
17898 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
17899 - _text = .;
17900 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17901 /* bootstrapping code */
17902 +#ifdef CONFIG_X86_32
17903 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17904 +#else
17905 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17906 +#endif
17907 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17908 + _text = .;
17909 HEAD_TEXT
17910 #ifdef CONFIG_X86_32
17911 . = ALIGN(PAGE_SIZE);
17912 @@ -82,28 +102,71 @@ SECTIONS
17913 IRQENTRY_TEXT
17914 *(.fixup)
17915 *(.gnu.warning)
17916 - /* End of text section */
17917 - _etext = .;
17918 } :text = 0x9090
17919
17920 - NOTES :text :note
17921 + . += __KERNEL_TEXT_OFFSET;
17922 +
17923 +#ifdef CONFIG_X86_32
17924 + . = ALIGN(PAGE_SIZE);
17925 + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
17926 + *(.vmi.rom)
17927 + } :module
17928 +
17929 + . = ALIGN(PAGE_SIZE);
17930 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
17931 +
17932 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
17933 + MODULES_EXEC_VADDR = .;
17934 + BYTE(0)
17935 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
17936 + . = ALIGN(HPAGE_SIZE);
17937 + MODULES_EXEC_END = . - 1;
17938 +#endif
17939 +
17940 + } :module
17941 +#endif
17942
17943 - EXCEPTION_TABLE(16) :text = 0x9090
17944 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
17945 + /* End of text section */
17946 + _etext = . - __KERNEL_TEXT_OFFSET;
17947 + }
17948 +
17949 +#ifdef CONFIG_X86_32
17950 + . = ALIGN(PAGE_SIZE);
17951 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
17952 + *(.idt)
17953 + . = ALIGN(PAGE_SIZE);
17954 + *(.empty_zero_page)
17955 + *(.swapper_pg_fixmap)
17956 + *(.swapper_pg_pmd)
17957 + *(.swapper_pg_dir)
17958 + *(.trampoline_pg_dir)
17959 + } :rodata
17960 +#endif
17961 +
17962 + . = ALIGN(PAGE_SIZE);
17963 + NOTES :rodata :note
17964 +
17965 + EXCEPTION_TABLE(16) :rodata
17966
17967 RO_DATA(PAGE_SIZE)
17968
17969 /* Data */
17970 .data : AT(ADDR(.data) - LOAD_OFFSET) {
17971 +
17972 +#ifdef CONFIG_PAX_KERNEXEC
17973 + . = ALIGN(HPAGE_SIZE);
17974 +#else
17975 + . = ALIGN(PAGE_SIZE);
17976 +#endif
17977 +
17978 /* Start of data section */
17979 _sdata = .;
17980
17981 /* init_task */
17982 INIT_TASK_DATA(THREAD_SIZE)
17983
17984 -#ifdef CONFIG_X86_32
17985 - /* 32 bit has nosave before _edata */
17986 NOSAVE_DATA
17987 -#endif
17988
17989 PAGE_ALIGNED_DATA(PAGE_SIZE)
17990
17991 @@ -112,6 +175,8 @@ SECTIONS
17992 DATA_DATA
17993 CONSTRUCTORS
17994
17995 + jiffies = jiffies_64;
17996 +
17997 /* rarely changed data like cpu maps */
17998 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
17999
18000 @@ -166,12 +231,6 @@ SECTIONS
18001 }
18002 vgetcpu_mode = VVIRT(.vgetcpu_mode);
18003
18004 - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
18005 - .jiffies : AT(VLOAD(.jiffies)) {
18006 - *(.jiffies)
18007 - }
18008 - jiffies = VVIRT(.jiffies);
18009 -
18010 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
18011 *(.vsyscall_3)
18012 }
18013 @@ -187,12 +246,19 @@ SECTIONS
18014 #endif /* CONFIG_X86_64 */
18015
18016 /* Init code and data - will be freed after init */
18017 - . = ALIGN(PAGE_SIZE);
18018 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
18019 + BYTE(0)
18020 +
18021 +#ifdef CONFIG_PAX_KERNEXEC
18022 + . = ALIGN(HPAGE_SIZE);
18023 +#else
18024 + . = ALIGN(PAGE_SIZE);
18025 +#endif
18026 +
18027 __init_begin = .; /* paired with __init_end */
18028 - }
18029 + } :init.begin
18030
18031 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
18032 +#ifdef CONFIG_SMP
18033 /*
18034 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
18035 * output PHDR, so the next output section - .init.text - should
18036 @@ -201,12 +267,27 @@ SECTIONS
18037 PERCPU_VADDR(0, :percpu)
18038 #endif
18039
18040 - INIT_TEXT_SECTION(PAGE_SIZE)
18041 -#ifdef CONFIG_X86_64
18042 - :init
18043 -#endif
18044 + . = ALIGN(PAGE_SIZE);
18045 + init_begin = .;
18046 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
18047 + VMLINUX_SYMBOL(_sinittext) = .;
18048 + INIT_TEXT
18049 + VMLINUX_SYMBOL(_einittext) = .;
18050 + . = ALIGN(PAGE_SIZE);
18051 + } :text.init
18052
18053 - INIT_DATA_SECTION(16)
18054 + /*
18055 + * .exit.text is discard at runtime, not link time, to deal with
18056 + * references from .altinstructions and .eh_frame
18057 + */
18058 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18059 + EXIT_TEXT
18060 + . = ALIGN(16);
18061 + } :text.exit
18062 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18063 +
18064 + . = ALIGN(PAGE_SIZE);
18065 + INIT_DATA_SECTION(16) :init
18066
18067 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
18068 __x86_cpu_dev_start = .;
18069 @@ -232,19 +313,11 @@ SECTIONS
18070 *(.altinstr_replacement)
18071 }
18072
18073 - /*
18074 - * .exit.text is discard at runtime, not link time, to deal with
18075 - * references from .altinstructions and .eh_frame
18076 - */
18077 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18078 - EXIT_TEXT
18079 - }
18080 -
18081 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18082 EXIT_DATA
18083 }
18084
18085 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18086 +#ifndef CONFIG_SMP
18087 PERCPU(PAGE_SIZE)
18088 #endif
18089
18090 @@ -267,12 +340,6 @@ SECTIONS
18091 . = ALIGN(PAGE_SIZE);
18092 }
18093
18094 -#ifdef CONFIG_X86_64
18095 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18096 - NOSAVE_DATA
18097 - }
18098 -#endif
18099 -
18100 /* BSS */
18101 . = ALIGN(PAGE_SIZE);
18102 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18103 @@ -288,6 +355,7 @@ SECTIONS
18104 __brk_base = .;
18105 . += 64 * 1024; /* 64k alignment slop space */
18106 *(.brk_reservation) /* areas brk users have reserved */
18107 + . = ALIGN(HPAGE_SIZE);
18108 __brk_limit = .;
18109 }
18110
18111 @@ -316,13 +384,12 @@ SECTIONS
18112 * for the boot processor.
18113 */
18114 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
18115 -INIT_PER_CPU(gdt_page);
18116 INIT_PER_CPU(irq_stack_union);
18117
18118 /*
18119 * Build-time check on the image size:
18120 */
18121 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18122 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18123 "kernel image bigger than KERNEL_IMAGE_SIZE");
18124
18125 #ifdef CONFIG_SMP
18126 diff -urNp linux-2.6.32.43/arch/x86/kernel/vsyscall_64.c linux-2.6.32.43/arch/x86/kernel/vsyscall_64.c
18127 --- linux-2.6.32.43/arch/x86/kernel/vsyscall_64.c 2011-03-27 14:31:47.000000000 -0400
18128 +++ linux-2.6.32.43/arch/x86/kernel/vsyscall_64.c 2011-04-23 12:56:10.000000000 -0400
18129 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
18130
18131 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
18132 /* copy vsyscall data */
18133 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
18134 vsyscall_gtod_data.clock.vread = clock->vread;
18135 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
18136 vsyscall_gtod_data.clock.mask = clock->mask;
18137 @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
18138 We do this here because otherwise user space would do it on
18139 its own in a likely inferior way (no access to jiffies).
18140 If you don't like it pass NULL. */
18141 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
18142 + if (tcache && tcache->blob[0] == (j = jiffies)) {
18143 p = tcache->blob[1];
18144 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
18145 /* Load per CPU data from RDTSCP */
18146 diff -urNp linux-2.6.32.43/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.32.43/arch/x86/kernel/x8664_ksyms_64.c
18147 --- linux-2.6.32.43/arch/x86/kernel/x8664_ksyms_64.c 2011-03-27 14:31:47.000000000 -0400
18148 +++ linux-2.6.32.43/arch/x86/kernel/x8664_ksyms_64.c 2011-04-17 15:56:46.000000000 -0400
18149 @@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
18150
18151 EXPORT_SYMBOL(copy_user_generic);
18152 EXPORT_SYMBOL(__copy_user_nocache);
18153 -EXPORT_SYMBOL(copy_from_user);
18154 -EXPORT_SYMBOL(copy_to_user);
18155 EXPORT_SYMBOL(__copy_from_user_inatomic);
18156
18157 EXPORT_SYMBOL(copy_page);
18158 diff -urNp linux-2.6.32.43/arch/x86/kernel/xsave.c linux-2.6.32.43/arch/x86/kernel/xsave.c
18159 --- linux-2.6.32.43/arch/x86/kernel/xsave.c 2011-03-27 14:31:47.000000000 -0400
18160 +++ linux-2.6.32.43/arch/x86/kernel/xsave.c 2011-04-17 15:56:46.000000000 -0400
18161 @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
18162 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18163 return -1;
18164
18165 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18166 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18167 fx_sw_user->extended_size -
18168 FP_XSTATE_MAGIC2_SIZE));
18169 /*
18170 @@ -196,7 +196,7 @@ fx_only:
18171 * the other extended state.
18172 */
18173 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18174 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18175 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
18176 }
18177
18178 /*
18179 @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
18180 if (task_thread_info(tsk)->status & TS_XSAVE)
18181 err = restore_user_xstate(buf);
18182 else
18183 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
18184 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
18185 buf);
18186 if (unlikely(err)) {
18187 /*
18188 diff -urNp linux-2.6.32.43/arch/x86/kvm/emulate.c linux-2.6.32.43/arch/x86/kvm/emulate.c
18189 --- linux-2.6.32.43/arch/x86/kvm/emulate.c 2011-03-27 14:31:47.000000000 -0400
18190 +++ linux-2.6.32.43/arch/x86/kvm/emulate.c 2011-04-17 15:56:46.000000000 -0400
18191 @@ -81,8 +81,8 @@
18192 #define Src2CL (1<<29)
18193 #define Src2ImmByte (2<<29)
18194 #define Src2One (3<<29)
18195 -#define Src2Imm16 (4<<29)
18196 -#define Src2Mask (7<<29)
18197 +#define Src2Imm16 (4U<<29)
18198 +#define Src2Mask (7U<<29)
18199
18200 enum {
18201 Group1_80, Group1_81, Group1_82, Group1_83,
18202 @@ -411,6 +411,7 @@ static u32 group2_table[] = {
18203
18204 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
18205 do { \
18206 + unsigned long _tmp; \
18207 __asm__ __volatile__ ( \
18208 _PRE_EFLAGS("0", "4", "2") \
18209 _op _suffix " %"_x"3,%1; " \
18210 @@ -424,8 +425,6 @@ static u32 group2_table[] = {
18211 /* Raw emulation: instruction has two explicit operands. */
18212 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
18213 do { \
18214 - unsigned long _tmp; \
18215 - \
18216 switch ((_dst).bytes) { \
18217 case 2: \
18218 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
18219 @@ -441,7 +440,6 @@ static u32 group2_table[] = {
18220
18221 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18222 do { \
18223 - unsigned long _tmp; \
18224 switch ((_dst).bytes) { \
18225 case 1: \
18226 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
18227 diff -urNp linux-2.6.32.43/arch/x86/kvm/lapic.c linux-2.6.32.43/arch/x86/kvm/lapic.c
18228 --- linux-2.6.32.43/arch/x86/kvm/lapic.c 2011-03-27 14:31:47.000000000 -0400
18229 +++ linux-2.6.32.43/arch/x86/kvm/lapic.c 2011-04-17 15:56:46.000000000 -0400
18230 @@ -52,7 +52,7 @@
18231 #define APIC_BUS_CYCLE_NS 1
18232
18233 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18234 -#define apic_debug(fmt, arg...)
18235 +#define apic_debug(fmt, arg...) do {} while (0)
18236
18237 #define APIC_LVT_NUM 6
18238 /* 14 is the version for Xeon and Pentium 8.4.8*/
18239 diff -urNp linux-2.6.32.43/arch/x86/kvm/paging_tmpl.h linux-2.6.32.43/arch/x86/kvm/paging_tmpl.h
18240 --- linux-2.6.32.43/arch/x86/kvm/paging_tmpl.h 2011-03-27 14:31:47.000000000 -0400
18241 +++ linux-2.6.32.43/arch/x86/kvm/paging_tmpl.h 2011-05-16 21:46:57.000000000 -0400
18242 @@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_
18243 int level = PT_PAGE_TABLE_LEVEL;
18244 unsigned long mmu_seq;
18245
18246 + pax_track_stack();
18247 +
18248 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
18249 kvm_mmu_audit(vcpu, "pre page fault");
18250
18251 diff -urNp linux-2.6.32.43/arch/x86/kvm/svm.c linux-2.6.32.43/arch/x86/kvm/svm.c
18252 --- linux-2.6.32.43/arch/x86/kvm/svm.c 2011-03-27 14:31:47.000000000 -0400
18253 +++ linux-2.6.32.43/arch/x86/kvm/svm.c 2011-04-17 15:56:46.000000000 -0400
18254 @@ -2483,9 +2483,12 @@ static int handle_exit(struct kvm_run *k
18255 static void reload_tss(struct kvm_vcpu *vcpu)
18256 {
18257 int cpu = raw_smp_processor_id();
18258 -
18259 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
18260 +
18261 + pax_open_kernel();
18262 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
18263 + pax_close_kernel();
18264 +
18265 load_TR_desc();
18266 }
18267
18268 @@ -2946,7 +2949,7 @@ static bool svm_gb_page_enable(void)
18269 return true;
18270 }
18271
18272 -static struct kvm_x86_ops svm_x86_ops = {
18273 +static const struct kvm_x86_ops svm_x86_ops = {
18274 .cpu_has_kvm_support = has_svm,
18275 .disabled_by_bios = is_disabled,
18276 .hardware_setup = svm_hardware_setup,
18277 diff -urNp linux-2.6.32.43/arch/x86/kvm/vmx.c linux-2.6.32.43/arch/x86/kvm/vmx.c
18278 --- linux-2.6.32.43/arch/x86/kvm/vmx.c 2011-03-27 14:31:47.000000000 -0400
18279 +++ linux-2.6.32.43/arch/x86/kvm/vmx.c 2011-05-04 17:56:20.000000000 -0400
18280 @@ -570,7 +570,11 @@ static void reload_tss(void)
18281
18282 kvm_get_gdt(&gdt);
18283 descs = (void *)gdt.base;
18284 +
18285 + pax_open_kernel();
18286 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18287 + pax_close_kernel();
18288 +
18289 load_TR_desc();
18290 }
18291
18292 @@ -1409,8 +1413,11 @@ static __init int hardware_setup(void)
18293 if (!cpu_has_vmx_flexpriority())
18294 flexpriority_enabled = 0;
18295
18296 - if (!cpu_has_vmx_tpr_shadow())
18297 - kvm_x86_ops->update_cr8_intercept = NULL;
18298 + if (!cpu_has_vmx_tpr_shadow()) {
18299 + pax_open_kernel();
18300 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18301 + pax_close_kernel();
18302 + }
18303
18304 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18305 kvm_disable_largepages();
18306 @@ -2361,7 +2368,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
18307 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
18308
18309 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
18310 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
18311 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
18312 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
18313 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
18314 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
18315 @@ -3717,6 +3724,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
18316 "jmp .Lkvm_vmx_return \n\t"
18317 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18318 ".Lkvm_vmx_return: "
18319 +
18320 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18321 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18322 + ".Lkvm_vmx_return2: "
18323 +#endif
18324 +
18325 /* Save guest registers, load host registers, keep flags */
18326 "xchg %0, (%%"R"sp) \n\t"
18327 "mov %%"R"ax, %c[rax](%0) \n\t"
18328 @@ -3763,8 +3776,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
18329 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
18330 #endif
18331 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
18332 +
18333 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18334 + ,[cs]"i"(__KERNEL_CS)
18335 +#endif
18336 +
18337 : "cc", "memory"
18338 - , R"bx", R"di", R"si"
18339 + , R"ax", R"bx", R"di", R"si"
18340 #ifdef CONFIG_X86_64
18341 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
18342 #endif
18343 @@ -3781,7 +3799,16 @@ static void vmx_vcpu_run(struct kvm_vcpu
18344 if (vmx->rmode.irq.pending)
18345 fixup_rmode_irq(vmx);
18346
18347 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18348 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18349 +
18350 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18351 + loadsegment(fs, __KERNEL_PERCPU);
18352 +#endif
18353 +
18354 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18355 + __set_fs(current_thread_info()->addr_limit);
18356 +#endif
18357 +
18358 vmx->launched = 1;
18359
18360 vmx_complete_interrupts(vmx);
18361 @@ -3956,7 +3983,7 @@ static bool vmx_gb_page_enable(void)
18362 return false;
18363 }
18364
18365 -static struct kvm_x86_ops vmx_x86_ops = {
18366 +static const struct kvm_x86_ops vmx_x86_ops = {
18367 .cpu_has_kvm_support = cpu_has_kvm_support,
18368 .disabled_by_bios = vmx_disabled_by_bios,
18369 .hardware_setup = hardware_setup,
18370 diff -urNp linux-2.6.32.43/arch/x86/kvm/x86.c linux-2.6.32.43/arch/x86/kvm/x86.c
18371 --- linux-2.6.32.43/arch/x86/kvm/x86.c 2011-05-10 22:12:01.000000000 -0400
18372 +++ linux-2.6.32.43/arch/x86/kvm/x86.c 2011-05-10 22:12:26.000000000 -0400
18373 @@ -82,7 +82,7 @@ static void update_cr8_intercept(struct
18374 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
18375 struct kvm_cpuid_entry2 __user *entries);
18376
18377 -struct kvm_x86_ops *kvm_x86_ops;
18378 +const struct kvm_x86_ops *kvm_x86_ops;
18379 EXPORT_SYMBOL_GPL(kvm_x86_ops);
18380
18381 int ignore_msrs = 0;
18382 @@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
18383 struct kvm_cpuid2 *cpuid,
18384 struct kvm_cpuid_entry2 __user *entries)
18385 {
18386 - int r;
18387 + int r, i;
18388
18389 r = -E2BIG;
18390 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18391 goto out;
18392 r = -EFAULT;
18393 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18394 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18395 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18396 goto out;
18397 + for (i = 0; i < cpuid->nent; ++i) {
18398 + struct kvm_cpuid_entry2 cpuid_entry;
18399 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18400 + goto out;
18401 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
18402 + }
18403 vcpu->arch.cpuid_nent = cpuid->nent;
18404 kvm_apic_set_version(vcpu);
18405 return 0;
18406 @@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
18407 struct kvm_cpuid2 *cpuid,
18408 struct kvm_cpuid_entry2 __user *entries)
18409 {
18410 - int r;
18411 + int r, i;
18412
18413 vcpu_load(vcpu);
18414 r = -E2BIG;
18415 if (cpuid->nent < vcpu->arch.cpuid_nent)
18416 goto out;
18417 r = -EFAULT;
18418 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18419 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18420 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18421 goto out;
18422 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18423 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18424 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18425 + goto out;
18426 + }
18427 return 0;
18428
18429 out:
18430 @@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
18431 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18432 struct kvm_interrupt *irq)
18433 {
18434 - if (irq->irq < 0 || irq->irq >= 256)
18435 + if (irq->irq >= 256)
18436 return -EINVAL;
18437 if (irqchip_in_kernel(vcpu->kvm))
18438 return -ENXIO;
18439 @@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cp
18440 .notifier_call = kvmclock_cpufreq_notifier
18441 };
18442
18443 -int kvm_arch_init(void *opaque)
18444 +int kvm_arch_init(const void *opaque)
18445 {
18446 int r, cpu;
18447 - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18448 + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
18449
18450 if (kvm_x86_ops) {
18451 printk(KERN_ERR "kvm: already loaded the other module\n");
18452 diff -urNp linux-2.6.32.43/arch/x86/lib/atomic64_32.c linux-2.6.32.43/arch/x86/lib/atomic64_32.c
18453 --- linux-2.6.32.43/arch/x86/lib/atomic64_32.c 2011-03-27 14:31:47.000000000 -0400
18454 +++ linux-2.6.32.43/arch/x86/lib/atomic64_32.c 2011-05-04 17:56:28.000000000 -0400
18455 @@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u6
18456 }
18457 EXPORT_SYMBOL(atomic64_cmpxchg);
18458
18459 +u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
18460 +{
18461 + return cmpxchg8b(&ptr->counter, old_val, new_val);
18462 +}
18463 +EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
18464 +
18465 /**
18466 * atomic64_xchg - xchg atomic64 variable
18467 * @ptr: pointer to type atomic64_t
18468 @@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 n
18469 EXPORT_SYMBOL(atomic64_xchg);
18470
18471 /**
18472 + * atomic64_xchg_unchecked - xchg atomic64 variable
18473 + * @ptr: pointer to type atomic64_unchecked_t
18474 + * @new_val: value to assign
18475 + *
18476 + * Atomically xchgs the value of @ptr to @new_val and returns
18477 + * the old value.
18478 + */
18479 +u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18480 +{
18481 + /*
18482 + * Try first with a (possibly incorrect) assumption about
18483 + * what we have there. We'll do two loops most likely,
18484 + * but we'll get an ownership MESI transaction straight away
18485 + * instead of a read transaction followed by a
18486 + * flush-for-ownership transaction:
18487 + */
18488 + u64 old_val, real_val = 0;
18489 +
18490 + do {
18491 + old_val = real_val;
18492 +
18493 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18494 +
18495 + } while (real_val != old_val);
18496 +
18497 + return old_val;
18498 +}
18499 +EXPORT_SYMBOL(atomic64_xchg_unchecked);
18500 +
18501 +/**
18502 * atomic64_set - set atomic64 variable
18503 * @ptr: pointer to type atomic64_t
18504 * @new_val: value to assign
18505 @@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 n
18506 EXPORT_SYMBOL(atomic64_set);
18507
18508 /**
18509 -EXPORT_SYMBOL(atomic64_read);
18510 + * atomic64_unchecked_set - set atomic64 variable
18511 + * @ptr: pointer to type atomic64_unchecked_t
18512 + * @new_val: value to assign
18513 + *
18514 + * Atomically sets the value of @ptr to @new_val.
18515 + */
18516 +void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18517 +{
18518 + atomic64_xchg_unchecked(ptr, new_val);
18519 +}
18520 +EXPORT_SYMBOL(atomic64_set_unchecked);
18521 +
18522 +/**
18523 * atomic64_add_return - add and return
18524 * @delta: integer value to add
18525 * @ptr: pointer to type atomic64_t
18526 @@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 del
18527 }
18528 EXPORT_SYMBOL(atomic64_add_return);
18529
18530 +/**
18531 + * atomic64_add_return_unchecked - add and return
18532 + * @delta: integer value to add
18533 + * @ptr: pointer to type atomic64_unchecked_t
18534 + *
18535 + * Atomically adds @delta to @ptr and returns @delta + *@ptr
18536 + */
18537 +noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18538 +{
18539 + /*
18540 + * Try first with a (possibly incorrect) assumption about
18541 + * what we have there. We'll do two loops most likely,
18542 + * but we'll get an ownership MESI transaction straight away
18543 + * instead of a read transaction followed by a
18544 + * flush-for-ownership transaction:
18545 + */
18546 + u64 old_val, new_val, real_val = 0;
18547 +
18548 + do {
18549 + old_val = real_val;
18550 + new_val = old_val + delta;
18551 +
18552 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18553 +
18554 + } while (real_val != old_val);
18555 +
18556 + return new_val;
18557 +}
18558 +EXPORT_SYMBOL(atomic64_add_return_unchecked);
18559 +
18560 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
18561 {
18562 return atomic64_add_return(-delta, ptr);
18563 }
18564 EXPORT_SYMBOL(atomic64_sub_return);
18565
18566 +u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18567 +{
18568 + return atomic64_add_return_unchecked(-delta, ptr);
18569 +}
18570 +EXPORT_SYMBOL(atomic64_sub_return_unchecked);
18571 +
18572 u64 atomic64_inc_return(atomic64_t *ptr)
18573 {
18574 return atomic64_add_return(1, ptr);
18575 }
18576 EXPORT_SYMBOL(atomic64_inc_return);
18577
18578 +u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
18579 +{
18580 + return atomic64_add_return_unchecked(1, ptr);
18581 +}
18582 +EXPORT_SYMBOL(atomic64_inc_return_unchecked);
18583 +
18584 u64 atomic64_dec_return(atomic64_t *ptr)
18585 {
18586 return atomic64_sub_return(1, ptr);
18587 }
18588 EXPORT_SYMBOL(atomic64_dec_return);
18589
18590 +u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
18591 +{
18592 + return atomic64_sub_return_unchecked(1, ptr);
18593 +}
18594 +EXPORT_SYMBOL(atomic64_dec_return_unchecked);
18595 +
18596 /**
18597 * atomic64_add - add integer to atomic64 variable
18598 * @delta: integer value to add
18599 @@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t
18600 EXPORT_SYMBOL(atomic64_add);
18601
18602 /**
18603 + * atomic64_add_unchecked - add integer to atomic64 variable
18604 + * @delta: integer value to add
18605 + * @ptr: pointer to type atomic64_unchecked_t
18606 + *
18607 + * Atomically adds @delta to @ptr.
18608 + */
18609 +void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18610 +{
18611 + atomic64_add_return_unchecked(delta, ptr);
18612 +}
18613 +EXPORT_SYMBOL(atomic64_add_unchecked);
18614 +
18615 +/**
18616 * atomic64_sub - subtract the atomic64 variable
18617 * @delta: integer value to subtract
18618 * @ptr: pointer to type atomic64_t
18619 @@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t
18620 EXPORT_SYMBOL(atomic64_sub);
18621
18622 /**
18623 + * atomic64_sub_unchecked - subtract the atomic64 variable
18624 + * @delta: integer value to subtract
18625 + * @ptr: pointer to type atomic64_unchecked_t
18626 + *
18627 + * Atomically subtracts @delta from @ptr.
18628 + */
18629 +void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18630 +{
18631 + atomic64_add_unchecked(-delta, ptr);
18632 +}
18633 +EXPORT_SYMBOL(atomic64_sub_unchecked);
18634 +
18635 +/**
18636 * atomic64_sub_and_test - subtract value from variable and test result
18637 * @delta: integer value to subtract
18638 * @ptr: pointer to type atomic64_t
18639 @@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
18640 EXPORT_SYMBOL(atomic64_inc);
18641
18642 /**
18643 + * atomic64_inc_unchecked - increment atomic64 variable
18644 + * @ptr: pointer to type atomic64_unchecked_t
18645 + *
18646 + * Atomically increments @ptr by 1.
18647 + */
18648 +void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
18649 +{
18650 + atomic64_add_unchecked(1, ptr);
18651 +}
18652 +EXPORT_SYMBOL(atomic64_inc_unchecked);
18653 +
18654 +/**
18655 * atomic64_dec - decrement atomic64 variable
18656 * @ptr: pointer to type atomic64_t
18657 *
18658 @@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
18659 EXPORT_SYMBOL(atomic64_dec);
18660
18661 /**
18662 + * atomic64_dec_unchecked - decrement atomic64 variable
18663 + * @ptr: pointer to type atomic64_unchecked_t
18664 + *
18665 + * Atomically decrements @ptr by 1.
18666 + */
18667 +void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
18668 +{
18669 + atomic64_sub_unchecked(1, ptr);
18670 +}
18671 +EXPORT_SYMBOL(atomic64_dec_unchecked);
18672 +
18673 +/**
18674 * atomic64_dec_and_test - decrement and test
18675 * @ptr: pointer to type atomic64_t
18676 *
18677 diff -urNp linux-2.6.32.43/arch/x86/lib/checksum_32.S linux-2.6.32.43/arch/x86/lib/checksum_32.S
18678 --- linux-2.6.32.43/arch/x86/lib/checksum_32.S 2011-03-27 14:31:47.000000000 -0400
18679 +++ linux-2.6.32.43/arch/x86/lib/checksum_32.S 2011-04-17 15:56:46.000000000 -0400
18680 @@ -28,7 +28,8 @@
18681 #include <linux/linkage.h>
18682 #include <asm/dwarf2.h>
18683 #include <asm/errno.h>
18684 -
18685 +#include <asm/segment.h>
18686 +
18687 /*
18688 * computes a partial checksum, e.g. for TCP/UDP fragments
18689 */
18690 @@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (
18691
18692 #define ARGBASE 16
18693 #define FP 12
18694 -
18695 -ENTRY(csum_partial_copy_generic)
18696 +
18697 +ENTRY(csum_partial_copy_generic_to_user)
18698 CFI_STARTPROC
18699 +
18700 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18701 + pushl %gs
18702 + CFI_ADJUST_CFA_OFFSET 4
18703 + popl %es
18704 + CFI_ADJUST_CFA_OFFSET -4
18705 + jmp csum_partial_copy_generic
18706 +#endif
18707 +
18708 +ENTRY(csum_partial_copy_generic_from_user)
18709 +
18710 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18711 + pushl %gs
18712 + CFI_ADJUST_CFA_OFFSET 4
18713 + popl %ds
18714 + CFI_ADJUST_CFA_OFFSET -4
18715 +#endif
18716 +
18717 +ENTRY(csum_partial_copy_generic)
18718 subl $4,%esp
18719 CFI_ADJUST_CFA_OFFSET 4
18720 pushl %edi
18721 @@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
18722 jmp 4f
18723 SRC(1: movw (%esi), %bx )
18724 addl $2, %esi
18725 -DST( movw %bx, (%edi) )
18726 +DST( movw %bx, %es:(%edi) )
18727 addl $2, %edi
18728 addw %bx, %ax
18729 adcl $0, %eax
18730 @@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
18731 SRC(1: movl (%esi), %ebx )
18732 SRC( movl 4(%esi), %edx )
18733 adcl %ebx, %eax
18734 -DST( movl %ebx, (%edi) )
18735 +DST( movl %ebx, %es:(%edi) )
18736 adcl %edx, %eax
18737 -DST( movl %edx, 4(%edi) )
18738 +DST( movl %edx, %es:4(%edi) )
18739
18740 SRC( movl 8(%esi), %ebx )
18741 SRC( movl 12(%esi), %edx )
18742 adcl %ebx, %eax
18743 -DST( movl %ebx, 8(%edi) )
18744 +DST( movl %ebx, %es:8(%edi) )
18745 adcl %edx, %eax
18746 -DST( movl %edx, 12(%edi) )
18747 +DST( movl %edx, %es:12(%edi) )
18748
18749 SRC( movl 16(%esi), %ebx )
18750 SRC( movl 20(%esi), %edx )
18751 adcl %ebx, %eax
18752 -DST( movl %ebx, 16(%edi) )
18753 +DST( movl %ebx, %es:16(%edi) )
18754 adcl %edx, %eax
18755 -DST( movl %edx, 20(%edi) )
18756 +DST( movl %edx, %es:20(%edi) )
18757
18758 SRC( movl 24(%esi), %ebx )
18759 SRC( movl 28(%esi), %edx )
18760 adcl %ebx, %eax
18761 -DST( movl %ebx, 24(%edi) )
18762 +DST( movl %ebx, %es:24(%edi) )
18763 adcl %edx, %eax
18764 -DST( movl %edx, 28(%edi) )
18765 +DST( movl %edx, %es:28(%edi) )
18766
18767 lea 32(%esi), %esi
18768 lea 32(%edi), %edi
18769 @@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
18770 shrl $2, %edx # This clears CF
18771 SRC(3: movl (%esi), %ebx )
18772 adcl %ebx, %eax
18773 -DST( movl %ebx, (%edi) )
18774 +DST( movl %ebx, %es:(%edi) )
18775 lea 4(%esi), %esi
18776 lea 4(%edi), %edi
18777 dec %edx
18778 @@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
18779 jb 5f
18780 SRC( movw (%esi), %cx )
18781 leal 2(%esi), %esi
18782 -DST( movw %cx, (%edi) )
18783 +DST( movw %cx, %es:(%edi) )
18784 leal 2(%edi), %edi
18785 je 6f
18786 shll $16,%ecx
18787 SRC(5: movb (%esi), %cl )
18788 -DST( movb %cl, (%edi) )
18789 +DST( movb %cl, %es:(%edi) )
18790 6: addl %ecx, %eax
18791 adcl $0, %eax
18792 7:
18793 @@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
18794
18795 6001:
18796 movl ARGBASE+20(%esp), %ebx # src_err_ptr
18797 - movl $-EFAULT, (%ebx)
18798 + movl $-EFAULT, %ss:(%ebx)
18799
18800 # zero the complete destination - computing the rest
18801 # is too much work
18802 @@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
18803
18804 6002:
18805 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18806 - movl $-EFAULT,(%ebx)
18807 + movl $-EFAULT,%ss:(%ebx)
18808 jmp 5000b
18809
18810 .previous
18811
18812 + pushl %ss
18813 + CFI_ADJUST_CFA_OFFSET 4
18814 + popl %ds
18815 + CFI_ADJUST_CFA_OFFSET -4
18816 + pushl %ss
18817 + CFI_ADJUST_CFA_OFFSET 4
18818 + popl %es
18819 + CFI_ADJUST_CFA_OFFSET -4
18820 popl %ebx
18821 CFI_ADJUST_CFA_OFFSET -4
18822 CFI_RESTORE ebx
18823 @@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
18824 CFI_ADJUST_CFA_OFFSET -4
18825 ret
18826 CFI_ENDPROC
18827 -ENDPROC(csum_partial_copy_generic)
18828 +ENDPROC(csum_partial_copy_generic_to_user)
18829
18830 #else
18831
18832 /* Version for PentiumII/PPro */
18833
18834 #define ROUND1(x) \
18835 + nop; nop; nop; \
18836 SRC(movl x(%esi), %ebx ) ; \
18837 addl %ebx, %eax ; \
18838 - DST(movl %ebx, x(%edi) ) ;
18839 + DST(movl %ebx, %es:x(%edi)) ;
18840
18841 #define ROUND(x) \
18842 + nop; nop; nop; \
18843 SRC(movl x(%esi), %ebx ) ; \
18844 adcl %ebx, %eax ; \
18845 - DST(movl %ebx, x(%edi) ) ;
18846 + DST(movl %ebx, %es:x(%edi)) ;
18847
18848 #define ARGBASE 12
18849 -
18850 -ENTRY(csum_partial_copy_generic)
18851 +
18852 +ENTRY(csum_partial_copy_generic_to_user)
18853 CFI_STARTPROC
18854 +
18855 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18856 + pushl %gs
18857 + CFI_ADJUST_CFA_OFFSET 4
18858 + popl %es
18859 + CFI_ADJUST_CFA_OFFSET -4
18860 + jmp csum_partial_copy_generic
18861 +#endif
18862 +
18863 +ENTRY(csum_partial_copy_generic_from_user)
18864 +
18865 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18866 + pushl %gs
18867 + CFI_ADJUST_CFA_OFFSET 4
18868 + popl %ds
18869 + CFI_ADJUST_CFA_OFFSET -4
18870 +#endif
18871 +
18872 +ENTRY(csum_partial_copy_generic)
18873 pushl %ebx
18874 CFI_ADJUST_CFA_OFFSET 4
18875 CFI_REL_OFFSET ebx, 0
18876 @@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
18877 subl %ebx, %edi
18878 lea -1(%esi),%edx
18879 andl $-32,%edx
18880 - lea 3f(%ebx,%ebx), %ebx
18881 + lea 3f(%ebx,%ebx,2), %ebx
18882 testl %esi, %esi
18883 jmp *%ebx
18884 1: addl $64,%esi
18885 @@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
18886 jb 5f
18887 SRC( movw (%esi), %dx )
18888 leal 2(%esi), %esi
18889 -DST( movw %dx, (%edi) )
18890 +DST( movw %dx, %es:(%edi) )
18891 leal 2(%edi), %edi
18892 je 6f
18893 shll $16,%edx
18894 5:
18895 SRC( movb (%esi), %dl )
18896 -DST( movb %dl, (%edi) )
18897 +DST( movb %dl, %es:(%edi) )
18898 6: addl %edx, %eax
18899 adcl $0, %eax
18900 7:
18901 .section .fixup, "ax"
18902 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
18903 - movl $-EFAULT, (%ebx)
18904 + movl $-EFAULT, %ss:(%ebx)
18905 # zero the complete destination (computing the rest is too much work)
18906 movl ARGBASE+8(%esp),%edi # dst
18907 movl ARGBASE+12(%esp),%ecx # len
18908 @@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
18909 rep; stosb
18910 jmp 7b
18911 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18912 - movl $-EFAULT, (%ebx)
18913 + movl $-EFAULT, %ss:(%ebx)
18914 jmp 7b
18915 .previous
18916
18917 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18918 + pushl %ss
18919 + CFI_ADJUST_CFA_OFFSET 4
18920 + popl %ds
18921 + CFI_ADJUST_CFA_OFFSET -4
18922 + pushl %ss
18923 + CFI_ADJUST_CFA_OFFSET 4
18924 + popl %es
18925 + CFI_ADJUST_CFA_OFFSET -4
18926 +#endif
18927 +
18928 popl %esi
18929 CFI_ADJUST_CFA_OFFSET -4
18930 CFI_RESTORE esi
18931 @@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
18932 CFI_RESTORE ebx
18933 ret
18934 CFI_ENDPROC
18935 -ENDPROC(csum_partial_copy_generic)
18936 +ENDPROC(csum_partial_copy_generic_to_user)
18937
18938 #undef ROUND
18939 #undef ROUND1
18940 diff -urNp linux-2.6.32.43/arch/x86/lib/clear_page_64.S linux-2.6.32.43/arch/x86/lib/clear_page_64.S
18941 --- linux-2.6.32.43/arch/x86/lib/clear_page_64.S 2011-03-27 14:31:47.000000000 -0400
18942 +++ linux-2.6.32.43/arch/x86/lib/clear_page_64.S 2011-04-17 15:56:46.000000000 -0400
18943 @@ -43,7 +43,7 @@ ENDPROC(clear_page)
18944
18945 #include <asm/cpufeature.h>
18946
18947 - .section .altinstr_replacement,"ax"
18948 + .section .altinstr_replacement,"a"
18949 1: .byte 0xeb /* jmp <disp8> */
18950 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
18951 2:
18952 diff -urNp linux-2.6.32.43/arch/x86/lib/copy_page_64.S linux-2.6.32.43/arch/x86/lib/copy_page_64.S
18953 --- linux-2.6.32.43/arch/x86/lib/copy_page_64.S 2011-03-27 14:31:47.000000000 -0400
18954 +++ linux-2.6.32.43/arch/x86/lib/copy_page_64.S 2011-04-17 15:56:46.000000000 -0400
18955 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
18956
18957 #include <asm/cpufeature.h>
18958
18959 - .section .altinstr_replacement,"ax"
18960 + .section .altinstr_replacement,"a"
18961 1: .byte 0xeb /* jmp <disp8> */
18962 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
18963 2:
18964 diff -urNp linux-2.6.32.43/arch/x86/lib/copy_user_64.S linux-2.6.32.43/arch/x86/lib/copy_user_64.S
18965 --- linux-2.6.32.43/arch/x86/lib/copy_user_64.S 2011-06-25 12:55:34.000000000 -0400
18966 +++ linux-2.6.32.43/arch/x86/lib/copy_user_64.S 2011-06-25 12:56:37.000000000 -0400
18967 @@ -15,13 +15,14 @@
18968 #include <asm/asm-offsets.h>
18969 #include <asm/thread_info.h>
18970 #include <asm/cpufeature.h>
18971 +#include <asm/pgtable.h>
18972
18973 .macro ALTERNATIVE_JUMP feature,orig,alt
18974 0:
18975 .byte 0xe9 /* 32bit jump */
18976 .long \orig-1f /* by default jump to orig */
18977 1:
18978 - .section .altinstr_replacement,"ax"
18979 + .section .altinstr_replacement,"a"
18980 2: .byte 0xe9 /* near jump with 32bit immediate */
18981 .long \alt-1b /* offset */ /* or alternatively to alt */
18982 .previous
18983 @@ -64,49 +65,19 @@
18984 #endif
18985 .endm
18986
18987 -/* Standard copy_to_user with segment limit checking */
18988 -ENTRY(copy_to_user)
18989 - CFI_STARTPROC
18990 - GET_THREAD_INFO(%rax)
18991 - movq %rdi,%rcx
18992 - addq %rdx,%rcx
18993 - jc bad_to_user
18994 - cmpq TI_addr_limit(%rax),%rcx
18995 - ja bad_to_user
18996 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18997 - CFI_ENDPROC
18998 -ENDPROC(copy_to_user)
18999 -
19000 -/* Standard copy_from_user with segment limit checking */
19001 -ENTRY(copy_from_user)
19002 - CFI_STARTPROC
19003 - GET_THREAD_INFO(%rax)
19004 - movq %rsi,%rcx
19005 - addq %rdx,%rcx
19006 - jc bad_from_user
19007 - cmpq TI_addr_limit(%rax),%rcx
19008 - ja bad_from_user
19009 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19010 - CFI_ENDPROC
19011 -ENDPROC(copy_from_user)
19012 -
19013 ENTRY(copy_user_generic)
19014 CFI_STARTPROC
19015 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19016 CFI_ENDPROC
19017 ENDPROC(copy_user_generic)
19018
19019 -ENTRY(__copy_from_user_inatomic)
19020 - CFI_STARTPROC
19021 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19022 - CFI_ENDPROC
19023 -ENDPROC(__copy_from_user_inatomic)
19024 -
19025 .section .fixup,"ax"
19026 /* must zero dest */
19027 ENTRY(bad_from_user)
19028 bad_from_user:
19029 CFI_STARTPROC
19030 + testl %edx,%edx
19031 + js bad_to_user
19032 movl %edx,%ecx
19033 xorl %eax,%eax
19034 rep
19035 diff -urNp linux-2.6.32.43/arch/x86/lib/copy_user_nocache_64.S linux-2.6.32.43/arch/x86/lib/copy_user_nocache_64.S
19036 --- linux-2.6.32.43/arch/x86/lib/copy_user_nocache_64.S 2011-03-27 14:31:47.000000000 -0400
19037 +++ linux-2.6.32.43/arch/x86/lib/copy_user_nocache_64.S 2011-04-17 15:56:46.000000000 -0400
19038 @@ -14,6 +14,7 @@
19039 #include <asm/current.h>
19040 #include <asm/asm-offsets.h>
19041 #include <asm/thread_info.h>
19042 +#include <asm/pgtable.h>
19043
19044 .macro ALIGN_DESTINATION
19045 #ifdef FIX_ALIGNMENT
19046 @@ -50,6 +51,15 @@
19047 */
19048 ENTRY(__copy_user_nocache)
19049 CFI_STARTPROC
19050 +
19051 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19052 + mov $PAX_USER_SHADOW_BASE,%rcx
19053 + cmp %rcx,%rsi
19054 + jae 1f
19055 + add %rcx,%rsi
19056 +1:
19057 +#endif
19058 +
19059 cmpl $8,%edx
19060 jb 20f /* less then 8 bytes, go to byte copy loop */
19061 ALIGN_DESTINATION
19062 diff -urNp linux-2.6.32.43/arch/x86/lib/csum-wrappers_64.c linux-2.6.32.43/arch/x86/lib/csum-wrappers_64.c
19063 --- linux-2.6.32.43/arch/x86/lib/csum-wrappers_64.c 2011-03-27 14:31:47.000000000 -0400
19064 +++ linux-2.6.32.43/arch/x86/lib/csum-wrappers_64.c 2011-05-04 17:56:20.000000000 -0400
19065 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
19066 len -= 2;
19067 }
19068 }
19069 +
19070 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19071 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19072 + src += PAX_USER_SHADOW_BASE;
19073 +#endif
19074 +
19075 isum = csum_partial_copy_generic((__force const void *)src,
19076 dst, len, isum, errp, NULL);
19077 if (unlikely(*errp))
19078 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
19079 }
19080
19081 *errp = 0;
19082 +
19083 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19084 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19085 + dst += PAX_USER_SHADOW_BASE;
19086 +#endif
19087 +
19088 return csum_partial_copy_generic(src, (void __force *)dst,
19089 len, isum, NULL, errp);
19090 }
19091 diff -urNp linux-2.6.32.43/arch/x86/lib/getuser.S linux-2.6.32.43/arch/x86/lib/getuser.S
19092 --- linux-2.6.32.43/arch/x86/lib/getuser.S 2011-03-27 14:31:47.000000000 -0400
19093 +++ linux-2.6.32.43/arch/x86/lib/getuser.S 2011-04-17 15:56:46.000000000 -0400
19094 @@ -33,14 +33,35 @@
19095 #include <asm/asm-offsets.h>
19096 #include <asm/thread_info.h>
19097 #include <asm/asm.h>
19098 +#include <asm/segment.h>
19099 +#include <asm/pgtable.h>
19100 +
19101 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19102 +#define __copyuser_seg gs;
19103 +#else
19104 +#define __copyuser_seg
19105 +#endif
19106
19107 .text
19108 ENTRY(__get_user_1)
19109 CFI_STARTPROC
19110 +
19111 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19112 GET_THREAD_INFO(%_ASM_DX)
19113 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19114 jae bad_get_user
19115 -1: movzb (%_ASM_AX),%edx
19116 +
19117 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19118 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19119 + cmp %_ASM_DX,%_ASM_AX
19120 + jae 1234f
19121 + add %_ASM_DX,%_ASM_AX
19122 +1234:
19123 +#endif
19124 +
19125 +#endif
19126 +
19127 +1: __copyuser_seg movzb (%_ASM_AX),%edx
19128 xor %eax,%eax
19129 ret
19130 CFI_ENDPROC
19131 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
19132 ENTRY(__get_user_2)
19133 CFI_STARTPROC
19134 add $1,%_ASM_AX
19135 +
19136 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19137 jc bad_get_user
19138 GET_THREAD_INFO(%_ASM_DX)
19139 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19140 jae bad_get_user
19141 -2: movzwl -1(%_ASM_AX),%edx
19142 +
19143 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19144 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19145 + cmp %_ASM_DX,%_ASM_AX
19146 + jae 1234f
19147 + add %_ASM_DX,%_ASM_AX
19148 +1234:
19149 +#endif
19150 +
19151 +#endif
19152 +
19153 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19154 xor %eax,%eax
19155 ret
19156 CFI_ENDPROC
19157 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
19158 ENTRY(__get_user_4)
19159 CFI_STARTPROC
19160 add $3,%_ASM_AX
19161 +
19162 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19163 jc bad_get_user
19164 GET_THREAD_INFO(%_ASM_DX)
19165 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19166 jae bad_get_user
19167 -3: mov -3(%_ASM_AX),%edx
19168 +
19169 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19170 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19171 + cmp %_ASM_DX,%_ASM_AX
19172 + jae 1234f
19173 + add %_ASM_DX,%_ASM_AX
19174 +1234:
19175 +#endif
19176 +
19177 +#endif
19178 +
19179 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
19180 xor %eax,%eax
19181 ret
19182 CFI_ENDPROC
19183 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
19184 GET_THREAD_INFO(%_ASM_DX)
19185 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19186 jae bad_get_user
19187 +
19188 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19189 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19190 + cmp %_ASM_DX,%_ASM_AX
19191 + jae 1234f
19192 + add %_ASM_DX,%_ASM_AX
19193 +1234:
19194 +#endif
19195 +
19196 4: movq -7(%_ASM_AX),%_ASM_DX
19197 xor %eax,%eax
19198 ret
19199 diff -urNp linux-2.6.32.43/arch/x86/lib/memcpy_64.S linux-2.6.32.43/arch/x86/lib/memcpy_64.S
19200 --- linux-2.6.32.43/arch/x86/lib/memcpy_64.S 2011-03-27 14:31:47.000000000 -0400
19201 +++ linux-2.6.32.43/arch/x86/lib/memcpy_64.S 2011-04-17 15:56:46.000000000 -0400
19202 @@ -128,7 +128,7 @@ ENDPROC(__memcpy)
19203 * It is also a lot simpler. Use this when possible:
19204 */
19205
19206 - .section .altinstr_replacement, "ax"
19207 + .section .altinstr_replacement, "a"
19208 1: .byte 0xeb /* jmp <disp8> */
19209 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
19210 2:
19211 diff -urNp linux-2.6.32.43/arch/x86/lib/memset_64.S linux-2.6.32.43/arch/x86/lib/memset_64.S
19212 --- linux-2.6.32.43/arch/x86/lib/memset_64.S 2011-03-27 14:31:47.000000000 -0400
19213 +++ linux-2.6.32.43/arch/x86/lib/memset_64.S 2011-04-17 15:56:46.000000000 -0400
19214 @@ -118,7 +118,7 @@ ENDPROC(__memset)
19215
19216 #include <asm/cpufeature.h>
19217
19218 - .section .altinstr_replacement,"ax"
19219 + .section .altinstr_replacement,"a"
19220 1: .byte 0xeb /* jmp <disp8> */
19221 .byte (memset_c - memset) - (2f - 1b) /* offset */
19222 2:
19223 diff -urNp linux-2.6.32.43/arch/x86/lib/mmx_32.c linux-2.6.32.43/arch/x86/lib/mmx_32.c
19224 --- linux-2.6.32.43/arch/x86/lib/mmx_32.c 2011-03-27 14:31:47.000000000 -0400
19225 +++ linux-2.6.32.43/arch/x86/lib/mmx_32.c 2011-04-17 15:56:46.000000000 -0400
19226 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
19227 {
19228 void *p;
19229 int i;
19230 + unsigned long cr0;
19231
19232 if (unlikely(in_interrupt()))
19233 return __memcpy(to, from, len);
19234 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
19235 kernel_fpu_begin();
19236
19237 __asm__ __volatile__ (
19238 - "1: prefetch (%0)\n" /* This set is 28 bytes */
19239 - " prefetch 64(%0)\n"
19240 - " prefetch 128(%0)\n"
19241 - " prefetch 192(%0)\n"
19242 - " prefetch 256(%0)\n"
19243 + "1: prefetch (%1)\n" /* This set is 28 bytes */
19244 + " prefetch 64(%1)\n"
19245 + " prefetch 128(%1)\n"
19246 + " prefetch 192(%1)\n"
19247 + " prefetch 256(%1)\n"
19248 "2: \n"
19249 ".section .fixup, \"ax\"\n"
19250 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19251 + "3: \n"
19252 +
19253 +#ifdef CONFIG_PAX_KERNEXEC
19254 + " movl %%cr0, %0\n"
19255 + " movl %0, %%eax\n"
19256 + " andl $0xFFFEFFFF, %%eax\n"
19257 + " movl %%eax, %%cr0\n"
19258 +#endif
19259 +
19260 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19261 +
19262 +#ifdef CONFIG_PAX_KERNEXEC
19263 + " movl %0, %%cr0\n"
19264 +#endif
19265 +
19266 " jmp 2b\n"
19267 ".previous\n"
19268 _ASM_EXTABLE(1b, 3b)
19269 - : : "r" (from));
19270 + : "=&r" (cr0) : "r" (from) : "ax");
19271
19272 for ( ; i > 5; i--) {
19273 __asm__ __volatile__ (
19274 - "1: prefetch 320(%0)\n"
19275 - "2: movq (%0), %%mm0\n"
19276 - " movq 8(%0), %%mm1\n"
19277 - " movq 16(%0), %%mm2\n"
19278 - " movq 24(%0), %%mm3\n"
19279 - " movq %%mm0, (%1)\n"
19280 - " movq %%mm1, 8(%1)\n"
19281 - " movq %%mm2, 16(%1)\n"
19282 - " movq %%mm3, 24(%1)\n"
19283 - " movq 32(%0), %%mm0\n"
19284 - " movq 40(%0), %%mm1\n"
19285 - " movq 48(%0), %%mm2\n"
19286 - " movq 56(%0), %%mm3\n"
19287 - " movq %%mm0, 32(%1)\n"
19288 - " movq %%mm1, 40(%1)\n"
19289 - " movq %%mm2, 48(%1)\n"
19290 - " movq %%mm3, 56(%1)\n"
19291 + "1: prefetch 320(%1)\n"
19292 + "2: movq (%1), %%mm0\n"
19293 + " movq 8(%1), %%mm1\n"
19294 + " movq 16(%1), %%mm2\n"
19295 + " movq 24(%1), %%mm3\n"
19296 + " movq %%mm0, (%2)\n"
19297 + " movq %%mm1, 8(%2)\n"
19298 + " movq %%mm2, 16(%2)\n"
19299 + " movq %%mm3, 24(%2)\n"
19300 + " movq 32(%1), %%mm0\n"
19301 + " movq 40(%1), %%mm1\n"
19302 + " movq 48(%1), %%mm2\n"
19303 + " movq 56(%1), %%mm3\n"
19304 + " movq %%mm0, 32(%2)\n"
19305 + " movq %%mm1, 40(%2)\n"
19306 + " movq %%mm2, 48(%2)\n"
19307 + " movq %%mm3, 56(%2)\n"
19308 ".section .fixup, \"ax\"\n"
19309 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19310 + "3:\n"
19311 +
19312 +#ifdef CONFIG_PAX_KERNEXEC
19313 + " movl %%cr0, %0\n"
19314 + " movl %0, %%eax\n"
19315 + " andl $0xFFFEFFFF, %%eax\n"
19316 + " movl %%eax, %%cr0\n"
19317 +#endif
19318 +
19319 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19320 +
19321 +#ifdef CONFIG_PAX_KERNEXEC
19322 + " movl %0, %%cr0\n"
19323 +#endif
19324 +
19325 " jmp 2b\n"
19326 ".previous\n"
19327 _ASM_EXTABLE(1b, 3b)
19328 - : : "r" (from), "r" (to) : "memory");
19329 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19330
19331 from += 64;
19332 to += 64;
19333 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
19334 static void fast_copy_page(void *to, void *from)
19335 {
19336 int i;
19337 + unsigned long cr0;
19338
19339 kernel_fpu_begin();
19340
19341 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
19342 * but that is for later. -AV
19343 */
19344 __asm__ __volatile__(
19345 - "1: prefetch (%0)\n"
19346 - " prefetch 64(%0)\n"
19347 - " prefetch 128(%0)\n"
19348 - " prefetch 192(%0)\n"
19349 - " prefetch 256(%0)\n"
19350 + "1: prefetch (%1)\n"
19351 + " prefetch 64(%1)\n"
19352 + " prefetch 128(%1)\n"
19353 + " prefetch 192(%1)\n"
19354 + " prefetch 256(%1)\n"
19355 "2: \n"
19356 ".section .fixup, \"ax\"\n"
19357 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19358 + "3: \n"
19359 +
19360 +#ifdef CONFIG_PAX_KERNEXEC
19361 + " movl %%cr0, %0\n"
19362 + " movl %0, %%eax\n"
19363 + " andl $0xFFFEFFFF, %%eax\n"
19364 + " movl %%eax, %%cr0\n"
19365 +#endif
19366 +
19367 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19368 +
19369 +#ifdef CONFIG_PAX_KERNEXEC
19370 + " movl %0, %%cr0\n"
19371 +#endif
19372 +
19373 " jmp 2b\n"
19374 ".previous\n"
19375 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
19376 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19377
19378 for (i = 0; i < (4096-320)/64; i++) {
19379 __asm__ __volatile__ (
19380 - "1: prefetch 320(%0)\n"
19381 - "2: movq (%0), %%mm0\n"
19382 - " movntq %%mm0, (%1)\n"
19383 - " movq 8(%0), %%mm1\n"
19384 - " movntq %%mm1, 8(%1)\n"
19385 - " movq 16(%0), %%mm2\n"
19386 - " movntq %%mm2, 16(%1)\n"
19387 - " movq 24(%0), %%mm3\n"
19388 - " movntq %%mm3, 24(%1)\n"
19389 - " movq 32(%0), %%mm4\n"
19390 - " movntq %%mm4, 32(%1)\n"
19391 - " movq 40(%0), %%mm5\n"
19392 - " movntq %%mm5, 40(%1)\n"
19393 - " movq 48(%0), %%mm6\n"
19394 - " movntq %%mm6, 48(%1)\n"
19395 - " movq 56(%0), %%mm7\n"
19396 - " movntq %%mm7, 56(%1)\n"
19397 + "1: prefetch 320(%1)\n"
19398 + "2: movq (%1), %%mm0\n"
19399 + " movntq %%mm0, (%2)\n"
19400 + " movq 8(%1), %%mm1\n"
19401 + " movntq %%mm1, 8(%2)\n"
19402 + " movq 16(%1), %%mm2\n"
19403 + " movntq %%mm2, 16(%2)\n"
19404 + " movq 24(%1), %%mm3\n"
19405 + " movntq %%mm3, 24(%2)\n"
19406 + " movq 32(%1), %%mm4\n"
19407 + " movntq %%mm4, 32(%2)\n"
19408 + " movq 40(%1), %%mm5\n"
19409 + " movntq %%mm5, 40(%2)\n"
19410 + " movq 48(%1), %%mm6\n"
19411 + " movntq %%mm6, 48(%2)\n"
19412 + " movq 56(%1), %%mm7\n"
19413 + " movntq %%mm7, 56(%2)\n"
19414 ".section .fixup, \"ax\"\n"
19415 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19416 + "3:\n"
19417 +
19418 +#ifdef CONFIG_PAX_KERNEXEC
19419 + " movl %%cr0, %0\n"
19420 + " movl %0, %%eax\n"
19421 + " andl $0xFFFEFFFF, %%eax\n"
19422 + " movl %%eax, %%cr0\n"
19423 +#endif
19424 +
19425 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19426 +
19427 +#ifdef CONFIG_PAX_KERNEXEC
19428 + " movl %0, %%cr0\n"
19429 +#endif
19430 +
19431 " jmp 2b\n"
19432 ".previous\n"
19433 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
19434 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19435
19436 from += 64;
19437 to += 64;
19438 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
19439 static void fast_copy_page(void *to, void *from)
19440 {
19441 int i;
19442 + unsigned long cr0;
19443
19444 kernel_fpu_begin();
19445
19446 __asm__ __volatile__ (
19447 - "1: prefetch (%0)\n"
19448 - " prefetch 64(%0)\n"
19449 - " prefetch 128(%0)\n"
19450 - " prefetch 192(%0)\n"
19451 - " prefetch 256(%0)\n"
19452 + "1: prefetch (%1)\n"
19453 + " prefetch 64(%1)\n"
19454 + " prefetch 128(%1)\n"
19455 + " prefetch 192(%1)\n"
19456 + " prefetch 256(%1)\n"
19457 "2: \n"
19458 ".section .fixup, \"ax\"\n"
19459 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19460 + "3: \n"
19461 +
19462 +#ifdef CONFIG_PAX_KERNEXEC
19463 + " movl %%cr0, %0\n"
19464 + " movl %0, %%eax\n"
19465 + " andl $0xFFFEFFFF, %%eax\n"
19466 + " movl %%eax, %%cr0\n"
19467 +#endif
19468 +
19469 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19470 +
19471 +#ifdef CONFIG_PAX_KERNEXEC
19472 + " movl %0, %%cr0\n"
19473 +#endif
19474 +
19475 " jmp 2b\n"
19476 ".previous\n"
19477 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
19478 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19479
19480 for (i = 0; i < 4096/64; i++) {
19481 __asm__ __volatile__ (
19482 - "1: prefetch 320(%0)\n"
19483 - "2: movq (%0), %%mm0\n"
19484 - " movq 8(%0), %%mm1\n"
19485 - " movq 16(%0), %%mm2\n"
19486 - " movq 24(%0), %%mm3\n"
19487 - " movq %%mm0, (%1)\n"
19488 - " movq %%mm1, 8(%1)\n"
19489 - " movq %%mm2, 16(%1)\n"
19490 - " movq %%mm3, 24(%1)\n"
19491 - " movq 32(%0), %%mm0\n"
19492 - " movq 40(%0), %%mm1\n"
19493 - " movq 48(%0), %%mm2\n"
19494 - " movq 56(%0), %%mm3\n"
19495 - " movq %%mm0, 32(%1)\n"
19496 - " movq %%mm1, 40(%1)\n"
19497 - " movq %%mm2, 48(%1)\n"
19498 - " movq %%mm3, 56(%1)\n"
19499 + "1: prefetch 320(%1)\n"
19500 + "2: movq (%1), %%mm0\n"
19501 + " movq 8(%1), %%mm1\n"
19502 + " movq 16(%1), %%mm2\n"
19503 + " movq 24(%1), %%mm3\n"
19504 + " movq %%mm0, (%2)\n"
19505 + " movq %%mm1, 8(%2)\n"
19506 + " movq %%mm2, 16(%2)\n"
19507 + " movq %%mm3, 24(%2)\n"
19508 + " movq 32(%1), %%mm0\n"
19509 + " movq 40(%1), %%mm1\n"
19510 + " movq 48(%1), %%mm2\n"
19511 + " movq 56(%1), %%mm3\n"
19512 + " movq %%mm0, 32(%2)\n"
19513 + " movq %%mm1, 40(%2)\n"
19514 + " movq %%mm2, 48(%2)\n"
19515 + " movq %%mm3, 56(%2)\n"
19516 ".section .fixup, \"ax\"\n"
19517 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19518 + "3:\n"
19519 +
19520 +#ifdef CONFIG_PAX_KERNEXEC
19521 + " movl %%cr0, %0\n"
19522 + " movl %0, %%eax\n"
19523 + " andl $0xFFFEFFFF, %%eax\n"
19524 + " movl %%eax, %%cr0\n"
19525 +#endif
19526 +
19527 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19528 +
19529 +#ifdef CONFIG_PAX_KERNEXEC
19530 + " movl %0, %%cr0\n"
19531 +#endif
19532 +
19533 " jmp 2b\n"
19534 ".previous\n"
19535 _ASM_EXTABLE(1b, 3b)
19536 - : : "r" (from), "r" (to) : "memory");
19537 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19538
19539 from += 64;
19540 to += 64;
19541 diff -urNp linux-2.6.32.43/arch/x86/lib/putuser.S linux-2.6.32.43/arch/x86/lib/putuser.S
19542 --- linux-2.6.32.43/arch/x86/lib/putuser.S 2011-03-27 14:31:47.000000000 -0400
19543 +++ linux-2.6.32.43/arch/x86/lib/putuser.S 2011-04-17 15:56:46.000000000 -0400
19544 @@ -15,7 +15,8 @@
19545 #include <asm/thread_info.h>
19546 #include <asm/errno.h>
19547 #include <asm/asm.h>
19548 -
19549 +#include <asm/segment.h>
19550 +#include <asm/pgtable.h>
19551
19552 /*
19553 * __put_user_X
19554 @@ -29,52 +30,119 @@
19555 * as they get called from within inline assembly.
19556 */
19557
19558 -#define ENTER CFI_STARTPROC ; \
19559 - GET_THREAD_INFO(%_ASM_BX)
19560 +#define ENTER CFI_STARTPROC
19561 #define EXIT ret ; \
19562 CFI_ENDPROC
19563
19564 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19565 +#define _DEST %_ASM_CX,%_ASM_BX
19566 +#else
19567 +#define _DEST %_ASM_CX
19568 +#endif
19569 +
19570 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19571 +#define __copyuser_seg gs;
19572 +#else
19573 +#define __copyuser_seg
19574 +#endif
19575 +
19576 .text
19577 ENTRY(__put_user_1)
19578 ENTER
19579 +
19580 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19581 + GET_THREAD_INFO(%_ASM_BX)
19582 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
19583 jae bad_put_user
19584 -1: movb %al,(%_ASM_CX)
19585 +
19586 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19587 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19588 + cmp %_ASM_BX,%_ASM_CX
19589 + jb 1234f
19590 + xor %ebx,%ebx
19591 +1234:
19592 +#endif
19593 +
19594 +#endif
19595 +
19596 +1: __copyuser_seg movb %al,(_DEST)
19597 xor %eax,%eax
19598 EXIT
19599 ENDPROC(__put_user_1)
19600
19601 ENTRY(__put_user_2)
19602 ENTER
19603 +
19604 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19605 + GET_THREAD_INFO(%_ASM_BX)
19606 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19607 sub $1,%_ASM_BX
19608 cmp %_ASM_BX,%_ASM_CX
19609 jae bad_put_user
19610 -2: movw %ax,(%_ASM_CX)
19611 +
19612 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19613 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19614 + cmp %_ASM_BX,%_ASM_CX
19615 + jb 1234f
19616 + xor %ebx,%ebx
19617 +1234:
19618 +#endif
19619 +
19620 +#endif
19621 +
19622 +2: __copyuser_seg movw %ax,(_DEST)
19623 xor %eax,%eax
19624 EXIT
19625 ENDPROC(__put_user_2)
19626
19627 ENTRY(__put_user_4)
19628 ENTER
19629 +
19630 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19631 + GET_THREAD_INFO(%_ASM_BX)
19632 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19633 sub $3,%_ASM_BX
19634 cmp %_ASM_BX,%_ASM_CX
19635 jae bad_put_user
19636 -3: movl %eax,(%_ASM_CX)
19637 +
19638 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19639 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19640 + cmp %_ASM_BX,%_ASM_CX
19641 + jb 1234f
19642 + xor %ebx,%ebx
19643 +1234:
19644 +#endif
19645 +
19646 +#endif
19647 +
19648 +3: __copyuser_seg movl %eax,(_DEST)
19649 xor %eax,%eax
19650 EXIT
19651 ENDPROC(__put_user_4)
19652
19653 ENTRY(__put_user_8)
19654 ENTER
19655 +
19656 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19657 + GET_THREAD_INFO(%_ASM_BX)
19658 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19659 sub $7,%_ASM_BX
19660 cmp %_ASM_BX,%_ASM_CX
19661 jae bad_put_user
19662 -4: mov %_ASM_AX,(%_ASM_CX)
19663 +
19664 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19665 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19666 + cmp %_ASM_BX,%_ASM_CX
19667 + jb 1234f
19668 + xor %ebx,%ebx
19669 +1234:
19670 +#endif
19671 +
19672 +#endif
19673 +
19674 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
19675 #ifdef CONFIG_X86_32
19676 -5: movl %edx,4(%_ASM_CX)
19677 +5: __copyuser_seg movl %edx,4(_DEST)
19678 #endif
19679 xor %eax,%eax
19680 EXIT
19681 diff -urNp linux-2.6.32.43/arch/x86/lib/usercopy_32.c linux-2.6.32.43/arch/x86/lib/usercopy_32.c
19682 --- linux-2.6.32.43/arch/x86/lib/usercopy_32.c 2011-03-27 14:31:47.000000000 -0400
19683 +++ linux-2.6.32.43/arch/x86/lib/usercopy_32.c 2011-04-23 21:12:28.000000000 -0400
19684 @@ -43,7 +43,7 @@ do { \
19685 __asm__ __volatile__( \
19686 " testl %1,%1\n" \
19687 " jz 2f\n" \
19688 - "0: lodsb\n" \
19689 + "0: "__copyuser_seg"lodsb\n" \
19690 " stosb\n" \
19691 " testb %%al,%%al\n" \
19692 " jz 1f\n" \
19693 @@ -128,10 +128,12 @@ do { \
19694 int __d0; \
19695 might_fault(); \
19696 __asm__ __volatile__( \
19697 + __COPYUSER_SET_ES \
19698 "0: rep; stosl\n" \
19699 " movl %2,%0\n" \
19700 "1: rep; stosb\n" \
19701 "2:\n" \
19702 + __COPYUSER_RESTORE_ES \
19703 ".section .fixup,\"ax\"\n" \
19704 "3: lea 0(%2,%0,4),%0\n" \
19705 " jmp 2b\n" \
19706 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
19707 might_fault();
19708
19709 __asm__ __volatile__(
19710 + __COPYUSER_SET_ES
19711 " testl %0, %0\n"
19712 " jz 3f\n"
19713 " andl %0,%%ecx\n"
19714 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
19715 " subl %%ecx,%0\n"
19716 " addl %0,%%eax\n"
19717 "1:\n"
19718 + __COPYUSER_RESTORE_ES
19719 ".section .fixup,\"ax\"\n"
19720 "2: xorl %%eax,%%eax\n"
19721 " jmp 1b\n"
19722 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
19723
19724 #ifdef CONFIG_X86_INTEL_USERCOPY
19725 static unsigned long
19726 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
19727 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
19728 {
19729 int d0, d1;
19730 __asm__ __volatile__(
19731 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
19732 " .align 2,0x90\n"
19733 "3: movl 0(%4), %%eax\n"
19734 "4: movl 4(%4), %%edx\n"
19735 - "5: movl %%eax, 0(%3)\n"
19736 - "6: movl %%edx, 4(%3)\n"
19737 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
19738 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
19739 "7: movl 8(%4), %%eax\n"
19740 "8: movl 12(%4),%%edx\n"
19741 - "9: movl %%eax, 8(%3)\n"
19742 - "10: movl %%edx, 12(%3)\n"
19743 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
19744 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
19745 "11: movl 16(%4), %%eax\n"
19746 "12: movl 20(%4), %%edx\n"
19747 - "13: movl %%eax, 16(%3)\n"
19748 - "14: movl %%edx, 20(%3)\n"
19749 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
19750 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
19751 "15: movl 24(%4), %%eax\n"
19752 "16: movl 28(%4), %%edx\n"
19753 - "17: movl %%eax, 24(%3)\n"
19754 - "18: movl %%edx, 28(%3)\n"
19755 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
19756 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
19757 "19: movl 32(%4), %%eax\n"
19758 "20: movl 36(%4), %%edx\n"
19759 - "21: movl %%eax, 32(%3)\n"
19760 - "22: movl %%edx, 36(%3)\n"
19761 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
19762 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
19763 "23: movl 40(%4), %%eax\n"
19764 "24: movl 44(%4), %%edx\n"
19765 - "25: movl %%eax, 40(%3)\n"
19766 - "26: movl %%edx, 44(%3)\n"
19767 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
19768 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
19769 "27: movl 48(%4), %%eax\n"
19770 "28: movl 52(%4), %%edx\n"
19771 - "29: movl %%eax, 48(%3)\n"
19772 - "30: movl %%edx, 52(%3)\n"
19773 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
19774 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
19775 "31: movl 56(%4), %%eax\n"
19776 "32: movl 60(%4), %%edx\n"
19777 - "33: movl %%eax, 56(%3)\n"
19778 - "34: movl %%edx, 60(%3)\n"
19779 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
19780 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
19781 " addl $-64, %0\n"
19782 " addl $64, %4\n"
19783 " addl $64, %3\n"
19784 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
19785 " shrl $2, %0\n"
19786 " andl $3, %%eax\n"
19787 " cld\n"
19788 + __COPYUSER_SET_ES
19789 "99: rep; movsl\n"
19790 "36: movl %%eax, %0\n"
19791 "37: rep; movsb\n"
19792 "100:\n"
19793 + __COPYUSER_RESTORE_ES
19794 + ".section .fixup,\"ax\"\n"
19795 + "101: lea 0(%%eax,%0,4),%0\n"
19796 + " jmp 100b\n"
19797 + ".previous\n"
19798 + ".section __ex_table,\"a\"\n"
19799 + " .align 4\n"
19800 + " .long 1b,100b\n"
19801 + " .long 2b,100b\n"
19802 + " .long 3b,100b\n"
19803 + " .long 4b,100b\n"
19804 + " .long 5b,100b\n"
19805 + " .long 6b,100b\n"
19806 + " .long 7b,100b\n"
19807 + " .long 8b,100b\n"
19808 + " .long 9b,100b\n"
19809 + " .long 10b,100b\n"
19810 + " .long 11b,100b\n"
19811 + " .long 12b,100b\n"
19812 + " .long 13b,100b\n"
19813 + " .long 14b,100b\n"
19814 + " .long 15b,100b\n"
19815 + " .long 16b,100b\n"
19816 + " .long 17b,100b\n"
19817 + " .long 18b,100b\n"
19818 + " .long 19b,100b\n"
19819 + " .long 20b,100b\n"
19820 + " .long 21b,100b\n"
19821 + " .long 22b,100b\n"
19822 + " .long 23b,100b\n"
19823 + " .long 24b,100b\n"
19824 + " .long 25b,100b\n"
19825 + " .long 26b,100b\n"
19826 + " .long 27b,100b\n"
19827 + " .long 28b,100b\n"
19828 + " .long 29b,100b\n"
19829 + " .long 30b,100b\n"
19830 + " .long 31b,100b\n"
19831 + " .long 32b,100b\n"
19832 + " .long 33b,100b\n"
19833 + " .long 34b,100b\n"
19834 + " .long 35b,100b\n"
19835 + " .long 36b,100b\n"
19836 + " .long 37b,100b\n"
19837 + " .long 99b,101b\n"
19838 + ".previous"
19839 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
19840 + : "1"(to), "2"(from), "0"(size)
19841 + : "eax", "edx", "memory");
19842 + return size;
19843 +}
19844 +
19845 +static unsigned long
19846 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
19847 +{
19848 + int d0, d1;
19849 + __asm__ __volatile__(
19850 + " .align 2,0x90\n"
19851 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
19852 + " cmpl $67, %0\n"
19853 + " jbe 3f\n"
19854 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
19855 + " .align 2,0x90\n"
19856 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
19857 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
19858 + "5: movl %%eax, 0(%3)\n"
19859 + "6: movl %%edx, 4(%3)\n"
19860 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
19861 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
19862 + "9: movl %%eax, 8(%3)\n"
19863 + "10: movl %%edx, 12(%3)\n"
19864 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
19865 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
19866 + "13: movl %%eax, 16(%3)\n"
19867 + "14: movl %%edx, 20(%3)\n"
19868 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
19869 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
19870 + "17: movl %%eax, 24(%3)\n"
19871 + "18: movl %%edx, 28(%3)\n"
19872 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
19873 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
19874 + "21: movl %%eax, 32(%3)\n"
19875 + "22: movl %%edx, 36(%3)\n"
19876 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
19877 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
19878 + "25: movl %%eax, 40(%3)\n"
19879 + "26: movl %%edx, 44(%3)\n"
19880 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
19881 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
19882 + "29: movl %%eax, 48(%3)\n"
19883 + "30: movl %%edx, 52(%3)\n"
19884 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
19885 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
19886 + "33: movl %%eax, 56(%3)\n"
19887 + "34: movl %%edx, 60(%3)\n"
19888 + " addl $-64, %0\n"
19889 + " addl $64, %4\n"
19890 + " addl $64, %3\n"
19891 + " cmpl $63, %0\n"
19892 + " ja 1b\n"
19893 + "35: movl %0, %%eax\n"
19894 + " shrl $2, %0\n"
19895 + " andl $3, %%eax\n"
19896 + " cld\n"
19897 + "99: rep; "__copyuser_seg" movsl\n"
19898 + "36: movl %%eax, %0\n"
19899 + "37: rep; "__copyuser_seg" movsb\n"
19900 + "100:\n"
19901 ".section .fixup,\"ax\"\n"
19902 "101: lea 0(%%eax,%0,4),%0\n"
19903 " jmp 100b\n"
19904 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
19905 int d0, d1;
19906 __asm__ __volatile__(
19907 " .align 2,0x90\n"
19908 - "0: movl 32(%4), %%eax\n"
19909 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19910 " cmpl $67, %0\n"
19911 " jbe 2f\n"
19912 - "1: movl 64(%4), %%eax\n"
19913 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19914 " .align 2,0x90\n"
19915 - "2: movl 0(%4), %%eax\n"
19916 - "21: movl 4(%4), %%edx\n"
19917 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19918 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19919 " movl %%eax, 0(%3)\n"
19920 " movl %%edx, 4(%3)\n"
19921 - "3: movl 8(%4), %%eax\n"
19922 - "31: movl 12(%4),%%edx\n"
19923 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19924 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19925 " movl %%eax, 8(%3)\n"
19926 " movl %%edx, 12(%3)\n"
19927 - "4: movl 16(%4), %%eax\n"
19928 - "41: movl 20(%4), %%edx\n"
19929 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19930 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19931 " movl %%eax, 16(%3)\n"
19932 " movl %%edx, 20(%3)\n"
19933 - "10: movl 24(%4), %%eax\n"
19934 - "51: movl 28(%4), %%edx\n"
19935 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19936 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19937 " movl %%eax, 24(%3)\n"
19938 " movl %%edx, 28(%3)\n"
19939 - "11: movl 32(%4), %%eax\n"
19940 - "61: movl 36(%4), %%edx\n"
19941 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19942 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19943 " movl %%eax, 32(%3)\n"
19944 " movl %%edx, 36(%3)\n"
19945 - "12: movl 40(%4), %%eax\n"
19946 - "71: movl 44(%4), %%edx\n"
19947 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19948 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19949 " movl %%eax, 40(%3)\n"
19950 " movl %%edx, 44(%3)\n"
19951 - "13: movl 48(%4), %%eax\n"
19952 - "81: movl 52(%4), %%edx\n"
19953 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19954 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19955 " movl %%eax, 48(%3)\n"
19956 " movl %%edx, 52(%3)\n"
19957 - "14: movl 56(%4), %%eax\n"
19958 - "91: movl 60(%4), %%edx\n"
19959 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19960 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19961 " movl %%eax, 56(%3)\n"
19962 " movl %%edx, 60(%3)\n"
19963 " addl $-64, %0\n"
19964 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
19965 " shrl $2, %0\n"
19966 " andl $3, %%eax\n"
19967 " cld\n"
19968 - "6: rep; movsl\n"
19969 + "6: rep; "__copyuser_seg" movsl\n"
19970 " movl %%eax,%0\n"
19971 - "7: rep; movsb\n"
19972 + "7: rep; "__copyuser_seg" movsb\n"
19973 "8:\n"
19974 ".section .fixup,\"ax\"\n"
19975 "9: lea 0(%%eax,%0,4),%0\n"
19976 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
19977
19978 __asm__ __volatile__(
19979 " .align 2,0x90\n"
19980 - "0: movl 32(%4), %%eax\n"
19981 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19982 " cmpl $67, %0\n"
19983 " jbe 2f\n"
19984 - "1: movl 64(%4), %%eax\n"
19985 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19986 " .align 2,0x90\n"
19987 - "2: movl 0(%4), %%eax\n"
19988 - "21: movl 4(%4), %%edx\n"
19989 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19990 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19991 " movnti %%eax, 0(%3)\n"
19992 " movnti %%edx, 4(%3)\n"
19993 - "3: movl 8(%4), %%eax\n"
19994 - "31: movl 12(%4),%%edx\n"
19995 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19996 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19997 " movnti %%eax, 8(%3)\n"
19998 " movnti %%edx, 12(%3)\n"
19999 - "4: movl 16(%4), %%eax\n"
20000 - "41: movl 20(%4), %%edx\n"
20001 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20002 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20003 " movnti %%eax, 16(%3)\n"
20004 " movnti %%edx, 20(%3)\n"
20005 - "10: movl 24(%4), %%eax\n"
20006 - "51: movl 28(%4), %%edx\n"
20007 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20008 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20009 " movnti %%eax, 24(%3)\n"
20010 " movnti %%edx, 28(%3)\n"
20011 - "11: movl 32(%4), %%eax\n"
20012 - "61: movl 36(%4), %%edx\n"
20013 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20014 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20015 " movnti %%eax, 32(%3)\n"
20016 " movnti %%edx, 36(%3)\n"
20017 - "12: movl 40(%4), %%eax\n"
20018 - "71: movl 44(%4), %%edx\n"
20019 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20020 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20021 " movnti %%eax, 40(%3)\n"
20022 " movnti %%edx, 44(%3)\n"
20023 - "13: movl 48(%4), %%eax\n"
20024 - "81: movl 52(%4), %%edx\n"
20025 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20026 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20027 " movnti %%eax, 48(%3)\n"
20028 " movnti %%edx, 52(%3)\n"
20029 - "14: movl 56(%4), %%eax\n"
20030 - "91: movl 60(%4), %%edx\n"
20031 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20032 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20033 " movnti %%eax, 56(%3)\n"
20034 " movnti %%edx, 60(%3)\n"
20035 " addl $-64, %0\n"
20036 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
20037 " shrl $2, %0\n"
20038 " andl $3, %%eax\n"
20039 " cld\n"
20040 - "6: rep; movsl\n"
20041 + "6: rep; "__copyuser_seg" movsl\n"
20042 " movl %%eax,%0\n"
20043 - "7: rep; movsb\n"
20044 + "7: rep; "__copyuser_seg" movsb\n"
20045 "8:\n"
20046 ".section .fixup,\"ax\"\n"
20047 "9: lea 0(%%eax,%0,4),%0\n"
20048 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
20049
20050 __asm__ __volatile__(
20051 " .align 2,0x90\n"
20052 - "0: movl 32(%4), %%eax\n"
20053 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20054 " cmpl $67, %0\n"
20055 " jbe 2f\n"
20056 - "1: movl 64(%4), %%eax\n"
20057 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20058 " .align 2,0x90\n"
20059 - "2: movl 0(%4), %%eax\n"
20060 - "21: movl 4(%4), %%edx\n"
20061 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20062 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20063 " movnti %%eax, 0(%3)\n"
20064 " movnti %%edx, 4(%3)\n"
20065 - "3: movl 8(%4), %%eax\n"
20066 - "31: movl 12(%4),%%edx\n"
20067 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20068 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20069 " movnti %%eax, 8(%3)\n"
20070 " movnti %%edx, 12(%3)\n"
20071 - "4: movl 16(%4), %%eax\n"
20072 - "41: movl 20(%4), %%edx\n"
20073 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20074 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20075 " movnti %%eax, 16(%3)\n"
20076 " movnti %%edx, 20(%3)\n"
20077 - "10: movl 24(%4), %%eax\n"
20078 - "51: movl 28(%4), %%edx\n"
20079 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20080 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20081 " movnti %%eax, 24(%3)\n"
20082 " movnti %%edx, 28(%3)\n"
20083 - "11: movl 32(%4), %%eax\n"
20084 - "61: movl 36(%4), %%edx\n"
20085 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20086 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20087 " movnti %%eax, 32(%3)\n"
20088 " movnti %%edx, 36(%3)\n"
20089 - "12: movl 40(%4), %%eax\n"
20090 - "71: movl 44(%4), %%edx\n"
20091 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20092 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20093 " movnti %%eax, 40(%3)\n"
20094 " movnti %%edx, 44(%3)\n"
20095 - "13: movl 48(%4), %%eax\n"
20096 - "81: movl 52(%4), %%edx\n"
20097 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20098 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20099 " movnti %%eax, 48(%3)\n"
20100 " movnti %%edx, 52(%3)\n"
20101 - "14: movl 56(%4), %%eax\n"
20102 - "91: movl 60(%4), %%edx\n"
20103 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20104 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20105 " movnti %%eax, 56(%3)\n"
20106 " movnti %%edx, 60(%3)\n"
20107 " addl $-64, %0\n"
20108 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
20109 " shrl $2, %0\n"
20110 " andl $3, %%eax\n"
20111 " cld\n"
20112 - "6: rep; movsl\n"
20113 + "6: rep; "__copyuser_seg" movsl\n"
20114 " movl %%eax,%0\n"
20115 - "7: rep; movsb\n"
20116 + "7: rep; "__copyuser_seg" movsb\n"
20117 "8:\n"
20118 ".section .fixup,\"ax\"\n"
20119 "9: lea 0(%%eax,%0,4),%0\n"
20120 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
20121 */
20122 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
20123 unsigned long size);
20124 -unsigned long __copy_user_intel(void __user *to, const void *from,
20125 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
20126 + unsigned long size);
20127 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
20128 unsigned long size);
20129 unsigned long __copy_user_zeroing_intel_nocache(void *to,
20130 const void __user *from, unsigned long size);
20131 #endif /* CONFIG_X86_INTEL_USERCOPY */
20132
20133 /* Generic arbitrary sized copy. */
20134 -#define __copy_user(to, from, size) \
20135 +#define __copy_user(to, from, size, prefix, set, restore) \
20136 do { \
20137 int __d0, __d1, __d2; \
20138 __asm__ __volatile__( \
20139 + set \
20140 " cmp $7,%0\n" \
20141 " jbe 1f\n" \
20142 " movl %1,%0\n" \
20143 " negl %0\n" \
20144 " andl $7,%0\n" \
20145 " subl %0,%3\n" \
20146 - "4: rep; movsb\n" \
20147 + "4: rep; "prefix"movsb\n" \
20148 " movl %3,%0\n" \
20149 " shrl $2,%0\n" \
20150 " andl $3,%3\n" \
20151 " .align 2,0x90\n" \
20152 - "0: rep; movsl\n" \
20153 + "0: rep; "prefix"movsl\n" \
20154 " movl %3,%0\n" \
20155 - "1: rep; movsb\n" \
20156 + "1: rep; "prefix"movsb\n" \
20157 "2:\n" \
20158 + restore \
20159 ".section .fixup,\"ax\"\n" \
20160 "5: addl %3,%0\n" \
20161 " jmp 2b\n" \
20162 @@ -682,14 +799,14 @@ do { \
20163 " negl %0\n" \
20164 " andl $7,%0\n" \
20165 " subl %0,%3\n" \
20166 - "4: rep; movsb\n" \
20167 + "4: rep; "__copyuser_seg"movsb\n" \
20168 " movl %3,%0\n" \
20169 " shrl $2,%0\n" \
20170 " andl $3,%3\n" \
20171 " .align 2,0x90\n" \
20172 - "0: rep; movsl\n" \
20173 + "0: rep; "__copyuser_seg"movsl\n" \
20174 " movl %3,%0\n" \
20175 - "1: rep; movsb\n" \
20176 + "1: rep; "__copyuser_seg"movsb\n" \
20177 "2:\n" \
20178 ".section .fixup,\"ax\"\n" \
20179 "5: addl %3,%0\n" \
20180 @@ -775,9 +892,9 @@ survive:
20181 }
20182 #endif
20183 if (movsl_is_ok(to, from, n))
20184 - __copy_user(to, from, n);
20185 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
20186 else
20187 - n = __copy_user_intel(to, from, n);
20188 + n = __generic_copy_to_user_intel(to, from, n);
20189 return n;
20190 }
20191 EXPORT_SYMBOL(__copy_to_user_ll);
20192 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
20193 unsigned long n)
20194 {
20195 if (movsl_is_ok(to, from, n))
20196 - __copy_user(to, from, n);
20197 + __copy_user(to, from, n, __copyuser_seg, "", "");
20198 else
20199 - n = __copy_user_intel((void __user *)to,
20200 - (const void *)from, n);
20201 + n = __generic_copy_from_user_intel(to, from, n);
20202 return n;
20203 }
20204 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
20205 @@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocach
20206 if (n > 64 && cpu_has_xmm2)
20207 n = __copy_user_intel_nocache(to, from, n);
20208 else
20209 - __copy_user(to, from, n);
20210 + __copy_user(to, from, n, __copyuser_seg, "", "");
20211 #else
20212 - __copy_user(to, from, n);
20213 + __copy_user(to, from, n, __copyuser_seg, "", "");
20214 #endif
20215 return n;
20216 }
20217 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
20218
20219 -/**
20220 - * copy_to_user: - Copy a block of data into user space.
20221 - * @to: Destination address, in user space.
20222 - * @from: Source address, in kernel space.
20223 - * @n: Number of bytes to copy.
20224 - *
20225 - * Context: User context only. This function may sleep.
20226 - *
20227 - * Copy data from kernel space to user space.
20228 - *
20229 - * Returns number of bytes that could not be copied.
20230 - * On success, this will be zero.
20231 - */
20232 -unsigned long
20233 -copy_to_user(void __user *to, const void *from, unsigned long n)
20234 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20235 +void __set_fs(mm_segment_t x)
20236 {
20237 - if (access_ok(VERIFY_WRITE, to, n))
20238 - n = __copy_to_user(to, from, n);
20239 - return n;
20240 + switch (x.seg) {
20241 + case 0:
20242 + loadsegment(gs, 0);
20243 + break;
20244 + case TASK_SIZE_MAX:
20245 + loadsegment(gs, __USER_DS);
20246 + break;
20247 + case -1UL:
20248 + loadsegment(gs, __KERNEL_DS);
20249 + break;
20250 + default:
20251 + BUG();
20252 + }
20253 + return;
20254 }
20255 -EXPORT_SYMBOL(copy_to_user);
20256 +EXPORT_SYMBOL(__set_fs);
20257
20258 -/**
20259 - * copy_from_user: - Copy a block of data from user space.
20260 - * @to: Destination address, in kernel space.
20261 - * @from: Source address, in user space.
20262 - * @n: Number of bytes to copy.
20263 - *
20264 - * Context: User context only. This function may sleep.
20265 - *
20266 - * Copy data from user space to kernel space.
20267 - *
20268 - * Returns number of bytes that could not be copied.
20269 - * On success, this will be zero.
20270 - *
20271 - * If some data could not be copied, this function will pad the copied
20272 - * data to the requested size using zero bytes.
20273 - */
20274 -unsigned long
20275 -copy_from_user(void *to, const void __user *from, unsigned long n)
20276 +void set_fs(mm_segment_t x)
20277 {
20278 - if (access_ok(VERIFY_READ, from, n))
20279 - n = __copy_from_user(to, from, n);
20280 - else
20281 - memset(to, 0, n);
20282 - return n;
20283 + current_thread_info()->addr_limit = x;
20284 + __set_fs(x);
20285 }
20286 -EXPORT_SYMBOL(copy_from_user);
20287 +EXPORT_SYMBOL(set_fs);
20288 +#endif
20289 diff -urNp linux-2.6.32.43/arch/x86/lib/usercopy_64.c linux-2.6.32.43/arch/x86/lib/usercopy_64.c
20290 --- linux-2.6.32.43/arch/x86/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
20291 +++ linux-2.6.32.43/arch/x86/lib/usercopy_64.c 2011-05-04 17:56:20.000000000 -0400
20292 @@ -42,6 +42,12 @@ long
20293 __strncpy_from_user(char *dst, const char __user *src, long count)
20294 {
20295 long res;
20296 +
20297 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20298 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
20299 + src += PAX_USER_SHADOW_BASE;
20300 +#endif
20301 +
20302 __do_strncpy_from_user(dst, src, count, res);
20303 return res;
20304 }
20305 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
20306 {
20307 long __d0;
20308 might_fault();
20309 +
20310 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20311 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
20312 + addr += PAX_USER_SHADOW_BASE;
20313 +#endif
20314 +
20315 /* no memory constraint because it doesn't change any memory gcc knows
20316 about */
20317 asm volatile(
20318 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
20319
20320 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
20321 {
20322 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20323 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20324 +
20325 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20326 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
20327 + to += PAX_USER_SHADOW_BASE;
20328 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
20329 + from += PAX_USER_SHADOW_BASE;
20330 +#endif
20331 +
20332 return copy_user_generic((__force void *)to, (__force void *)from, len);
20333 - }
20334 - return len;
20335 + }
20336 + return len;
20337 }
20338 EXPORT_SYMBOL(copy_in_user);
20339
20340 diff -urNp linux-2.6.32.43/arch/x86/Makefile linux-2.6.32.43/arch/x86/Makefile
20341 --- linux-2.6.32.43/arch/x86/Makefile 2011-03-27 14:31:47.000000000 -0400
20342 +++ linux-2.6.32.43/arch/x86/Makefile 2011-04-17 15:56:46.000000000 -0400
20343 @@ -189,3 +189,12 @@ define archhelp
20344 echo ' FDARGS="..." arguments for the booted kernel'
20345 echo ' FDINITRD=file initrd for the booted kernel'
20346 endef
20347 +
20348 +define OLD_LD
20349 +
20350 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
20351 +*** Please upgrade your binutils to 2.18 or newer
20352 +endef
20353 +
20354 +archprepare:
20355 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
20356 diff -urNp linux-2.6.32.43/arch/x86/mm/extable.c linux-2.6.32.43/arch/x86/mm/extable.c
20357 --- linux-2.6.32.43/arch/x86/mm/extable.c 2011-03-27 14:31:47.000000000 -0400
20358 +++ linux-2.6.32.43/arch/x86/mm/extable.c 2011-04-17 15:56:46.000000000 -0400
20359 @@ -1,14 +1,71 @@
20360 #include <linux/module.h>
20361 #include <linux/spinlock.h>
20362 +#include <linux/sort.h>
20363 #include <asm/uaccess.h>
20364 +#include <asm/pgtable.h>
20365
20366 +/*
20367 + * The exception table needs to be sorted so that the binary
20368 + * search that we use to find entries in it works properly.
20369 + * This is used both for the kernel exception table and for
20370 + * the exception tables of modules that get loaded.
20371 + */
20372 +static int cmp_ex(const void *a, const void *b)
20373 +{
20374 + const struct exception_table_entry *x = a, *y = b;
20375 +
20376 + /* avoid overflow */
20377 + if (x->insn > y->insn)
20378 + return 1;
20379 + if (x->insn < y->insn)
20380 + return -1;
20381 + return 0;
20382 +}
20383 +
20384 +static void swap_ex(void *a, void *b, int size)
20385 +{
20386 + struct exception_table_entry t, *x = a, *y = b;
20387 +
20388 + t = *x;
20389 +
20390 + pax_open_kernel();
20391 + *x = *y;
20392 + *y = t;
20393 + pax_close_kernel();
20394 +}
20395 +
20396 +void sort_extable(struct exception_table_entry *start,
20397 + struct exception_table_entry *finish)
20398 +{
20399 + sort(start, finish - start, sizeof(struct exception_table_entry),
20400 + cmp_ex, swap_ex);
20401 +}
20402 +
20403 +#ifdef CONFIG_MODULES
20404 +/*
20405 + * If the exception table is sorted, any referring to the module init
20406 + * will be at the beginning or the end.
20407 + */
20408 +void trim_init_extable(struct module *m)
20409 +{
20410 + /*trim the beginning*/
20411 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
20412 + m->extable++;
20413 + m->num_exentries--;
20414 + }
20415 + /*trim the end*/
20416 + while (m->num_exentries &&
20417 + within_module_init(m->extable[m->num_exentries-1].insn, m))
20418 + m->num_exentries--;
20419 +}
20420 +#endif /* CONFIG_MODULES */
20421
20422 int fixup_exception(struct pt_regs *regs)
20423 {
20424 const struct exception_table_entry *fixup;
20425
20426 #ifdef CONFIG_PNPBIOS
20427 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
20428 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
20429 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
20430 extern u32 pnp_bios_is_utter_crap;
20431 pnp_bios_is_utter_crap = 1;
20432 diff -urNp linux-2.6.32.43/arch/x86/mm/fault.c linux-2.6.32.43/arch/x86/mm/fault.c
20433 --- linux-2.6.32.43/arch/x86/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
20434 +++ linux-2.6.32.43/arch/x86/mm/fault.c 2011-06-06 17:35:16.000000000 -0400
20435 @@ -11,10 +11,19 @@
20436 #include <linux/kprobes.h> /* __kprobes, ... */
20437 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
20438 #include <linux/perf_event.h> /* perf_sw_event */
20439 +#include <linux/unistd.h>
20440 +#include <linux/compiler.h>
20441
20442 #include <asm/traps.h> /* dotraplinkage, ... */
20443 #include <asm/pgalloc.h> /* pgd_*(), ... */
20444 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
20445 +#include <asm/vsyscall.h>
20446 +#include <asm/tlbflush.h>
20447 +
20448 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20449 +#include <asm/stacktrace.h>
20450 +#include "../kernel/dumpstack.h"
20451 +#endif
20452
20453 /*
20454 * Page fault error code bits:
20455 @@ -51,7 +60,7 @@ static inline int notify_page_fault(stru
20456 int ret = 0;
20457
20458 /* kprobe_running() needs smp_processor_id() */
20459 - if (kprobes_built_in() && !user_mode_vm(regs)) {
20460 + if (kprobes_built_in() && !user_mode(regs)) {
20461 preempt_disable();
20462 if (kprobe_running() && kprobe_fault_handler(regs, 14))
20463 ret = 1;
20464 @@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *re
20465 return !instr_lo || (instr_lo>>1) == 1;
20466 case 0x00:
20467 /* Prefetch instruction is 0x0F0D or 0x0F18 */
20468 - if (probe_kernel_address(instr, opcode))
20469 + if (user_mode(regs)) {
20470 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20471 + return 0;
20472 + } else if (probe_kernel_address(instr, opcode))
20473 return 0;
20474
20475 *prefetch = (instr_lo == 0xF) &&
20476 @@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsign
20477 while (instr < max_instr) {
20478 unsigned char opcode;
20479
20480 - if (probe_kernel_address(instr, opcode))
20481 + if (user_mode(regs)) {
20482 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20483 + break;
20484 + } else if (probe_kernel_address(instr, opcode))
20485 break;
20486
20487 instr++;
20488 @@ -172,6 +187,30 @@ force_sig_info_fault(int si_signo, int s
20489 force_sig_info(si_signo, &info, tsk);
20490 }
20491
20492 +#ifdef CONFIG_PAX_EMUTRAMP
20493 +static int pax_handle_fetch_fault(struct pt_regs *regs);
20494 +#endif
20495 +
20496 +#ifdef CONFIG_PAX_PAGEEXEC
20497 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
20498 +{
20499 + pgd_t *pgd;
20500 + pud_t *pud;
20501 + pmd_t *pmd;
20502 +
20503 + pgd = pgd_offset(mm, address);
20504 + if (!pgd_present(*pgd))
20505 + return NULL;
20506 + pud = pud_offset(pgd, address);
20507 + if (!pud_present(*pud))
20508 + return NULL;
20509 + pmd = pmd_offset(pud, address);
20510 + if (!pmd_present(*pmd))
20511 + return NULL;
20512 + return pmd;
20513 +}
20514 +#endif
20515 +
20516 DEFINE_SPINLOCK(pgd_lock);
20517 LIST_HEAD(pgd_list);
20518
20519 @@ -224,11 +263,24 @@ void vmalloc_sync_all(void)
20520 address += PMD_SIZE) {
20521
20522 unsigned long flags;
20523 +
20524 +#ifdef CONFIG_PAX_PER_CPU_PGD
20525 + unsigned long cpu;
20526 +#else
20527 struct page *page;
20528 +#endif
20529
20530 spin_lock_irqsave(&pgd_lock, flags);
20531 +
20532 +#ifdef CONFIG_PAX_PER_CPU_PGD
20533 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20534 + pgd_t *pgd = get_cpu_pgd(cpu);
20535 +#else
20536 list_for_each_entry(page, &pgd_list, lru) {
20537 - if (!vmalloc_sync_one(page_address(page), address))
20538 + pgd_t *pgd = page_address(page);
20539 +#endif
20540 +
20541 + if (!vmalloc_sync_one(pgd, address))
20542 break;
20543 }
20544 spin_unlock_irqrestore(&pgd_lock, flags);
20545 @@ -258,6 +310,11 @@ static noinline int vmalloc_fault(unsign
20546 * an interrupt in the middle of a task switch..
20547 */
20548 pgd_paddr = read_cr3();
20549 +
20550 +#ifdef CONFIG_PAX_PER_CPU_PGD
20551 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
20552 +#endif
20553 +
20554 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
20555 if (!pmd_k)
20556 return -1;
20557 @@ -332,15 +389,27 @@ void vmalloc_sync_all(void)
20558
20559 const pgd_t *pgd_ref = pgd_offset_k(address);
20560 unsigned long flags;
20561 +
20562 +#ifdef CONFIG_PAX_PER_CPU_PGD
20563 + unsigned long cpu;
20564 +#else
20565 struct page *page;
20566 +#endif
20567
20568 if (pgd_none(*pgd_ref))
20569 continue;
20570
20571 spin_lock_irqsave(&pgd_lock, flags);
20572 +
20573 +#ifdef CONFIG_PAX_PER_CPU_PGD
20574 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20575 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
20576 +#else
20577 list_for_each_entry(page, &pgd_list, lru) {
20578 pgd_t *pgd;
20579 pgd = (pgd_t *)page_address(page) + pgd_index(address);
20580 +#endif
20581 +
20582 if (pgd_none(*pgd))
20583 set_pgd(pgd, *pgd_ref);
20584 else
20585 @@ -373,7 +442,14 @@ static noinline int vmalloc_fault(unsign
20586 * happen within a race in page table update. In the later
20587 * case just flush:
20588 */
20589 +
20590 +#ifdef CONFIG_PAX_PER_CPU_PGD
20591 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
20592 + pgd = pgd_offset_cpu(smp_processor_id(), address);
20593 +#else
20594 pgd = pgd_offset(current->active_mm, address);
20595 +#endif
20596 +
20597 pgd_ref = pgd_offset_k(address);
20598 if (pgd_none(*pgd_ref))
20599 return -1;
20600 @@ -535,7 +611,7 @@ static int is_errata93(struct pt_regs *r
20601 static int is_errata100(struct pt_regs *regs, unsigned long address)
20602 {
20603 #ifdef CONFIG_X86_64
20604 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
20605 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
20606 return 1;
20607 #endif
20608 return 0;
20609 @@ -562,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *r
20610 }
20611
20612 static const char nx_warning[] = KERN_CRIT
20613 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
20614 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
20615
20616 static void
20617 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
20618 @@ -571,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, un
20619 if (!oops_may_print())
20620 return;
20621
20622 - if (error_code & PF_INSTR) {
20623 + if (nx_enabled && (error_code & PF_INSTR)) {
20624 unsigned int level;
20625
20626 pte_t *pte = lookup_address(address, &level);
20627
20628 if (pte && pte_present(*pte) && !pte_exec(*pte))
20629 - printk(nx_warning, current_uid());
20630 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
20631 }
20632
20633 +#ifdef CONFIG_PAX_KERNEXEC
20634 + if (init_mm.start_code <= address && address < init_mm.end_code) {
20635 + if (current->signal->curr_ip)
20636 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20637 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
20638 + else
20639 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20640 + current->comm, task_pid_nr(current), current_uid(), current_euid());
20641 + }
20642 +#endif
20643 +
20644 printk(KERN_ALERT "BUG: unable to handle kernel ");
20645 if (address < PAGE_SIZE)
20646 printk(KERN_CONT "NULL pointer dereference");
20647 @@ -704,6 +791,68 @@ __bad_area_nosemaphore(struct pt_regs *r
20648 unsigned long address, int si_code)
20649 {
20650 struct task_struct *tsk = current;
20651 + struct mm_struct *mm = tsk->mm;
20652 +
20653 +#ifdef CONFIG_X86_64
20654 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
20655 + if (regs->ip == (unsigned long)vgettimeofday) {
20656 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
20657 + return;
20658 + } else if (regs->ip == (unsigned long)vtime) {
20659 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
20660 + return;
20661 + } else if (regs->ip == (unsigned long)vgetcpu) {
20662 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
20663 + return;
20664 + }
20665 + }
20666 +#endif
20667 +
20668 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20669 + if (mm && (error_code & PF_USER)) {
20670 + unsigned long ip = regs->ip;
20671 +
20672 + if (v8086_mode(regs))
20673 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
20674 +
20675 + /*
20676 + * It's possible to have interrupts off here:
20677 + */
20678 + local_irq_enable();
20679 +
20680 +#ifdef CONFIG_PAX_PAGEEXEC
20681 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
20682 + ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
20683 +
20684 +#ifdef CONFIG_PAX_EMUTRAMP
20685 + switch (pax_handle_fetch_fault(regs)) {
20686 + case 2:
20687 + return;
20688 + }
20689 +#endif
20690 +
20691 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20692 + do_group_exit(SIGKILL);
20693 + }
20694 +#endif
20695 +
20696 +#ifdef CONFIG_PAX_SEGMEXEC
20697 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
20698 +
20699 +#ifdef CONFIG_PAX_EMUTRAMP
20700 + switch (pax_handle_fetch_fault(regs)) {
20701 + case 2:
20702 + return;
20703 + }
20704 +#endif
20705 +
20706 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20707 + do_group_exit(SIGKILL);
20708 + }
20709 +#endif
20710 +
20711 + }
20712 +#endif
20713
20714 /* User mode accesses just cause a SIGSEGV */
20715 if (error_code & PF_USER) {
20716 @@ -857,6 +1006,99 @@ static int spurious_fault_check(unsigned
20717 return 1;
20718 }
20719
20720 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20721 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
20722 +{
20723 + pte_t *pte;
20724 + pmd_t *pmd;
20725 + spinlock_t *ptl;
20726 + unsigned char pte_mask;
20727 +
20728 + if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
20729 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
20730 + return 0;
20731 +
20732 + /* PaX: it's our fault, let's handle it if we can */
20733 +
20734 + /* PaX: take a look at read faults before acquiring any locks */
20735 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
20736 + /* instruction fetch attempt from a protected page in user mode */
20737 + up_read(&mm->mmap_sem);
20738 +
20739 +#ifdef CONFIG_PAX_EMUTRAMP
20740 + switch (pax_handle_fetch_fault(regs)) {
20741 + case 2:
20742 + return 1;
20743 + }
20744 +#endif
20745 +
20746 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20747 + do_group_exit(SIGKILL);
20748 + }
20749 +
20750 + pmd = pax_get_pmd(mm, address);
20751 + if (unlikely(!pmd))
20752 + return 0;
20753 +
20754 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
20755 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
20756 + pte_unmap_unlock(pte, ptl);
20757 + return 0;
20758 + }
20759 +
20760 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
20761 + /* write attempt to a protected page in user mode */
20762 + pte_unmap_unlock(pte, ptl);
20763 + return 0;
20764 + }
20765 +
20766 +#ifdef CONFIG_SMP
20767 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
20768 +#else
20769 + if (likely(address > get_limit(regs->cs)))
20770 +#endif
20771 + {
20772 + set_pte(pte, pte_mkread(*pte));
20773 + __flush_tlb_one(address);
20774 + pte_unmap_unlock(pte, ptl);
20775 + up_read(&mm->mmap_sem);
20776 + return 1;
20777 + }
20778 +
20779 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
20780 +
20781 + /*
20782 + * PaX: fill DTLB with user rights and retry
20783 + */
20784 + __asm__ __volatile__ (
20785 + "orb %2,(%1)\n"
20786 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
20787 +/*
20788 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
20789 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
20790 + * page fault when examined during a TLB load attempt. this is true not only
20791 + * for PTEs holding a non-present entry but also present entries that will
20792 + * raise a page fault (such as those set up by PaX, or the copy-on-write
20793 + * mechanism). in effect it means that we do *not* need to flush the TLBs
20794 + * for our target pages since their PTEs are simply not in the TLBs at all.
20795 +
20796 + * the best thing in omitting it is that we gain around 15-20% speed in the
20797 + * fast path of the page fault handler and can get rid of tracing since we
20798 + * can no longer flush unintended entries.
20799 + */
20800 + "invlpg (%0)\n"
20801 +#endif
20802 + __copyuser_seg"testb $0,(%0)\n"
20803 + "xorb %3,(%1)\n"
20804 + :
20805 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
20806 + : "memory", "cc");
20807 + pte_unmap_unlock(pte, ptl);
20808 + up_read(&mm->mmap_sem);
20809 + return 1;
20810 +}
20811 +#endif
20812 +
20813 /*
20814 * Handle a spurious fault caused by a stale TLB entry.
20815 *
20816 @@ -923,6 +1165,9 @@ int show_unhandled_signals = 1;
20817 static inline int
20818 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
20819 {
20820 + if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
20821 + return 1;
20822 +
20823 if (write) {
20824 /* write, present and write, not present: */
20825 if (unlikely(!(vma->vm_flags & VM_WRITE)))
20826 @@ -956,17 +1201,31 @@ do_page_fault(struct pt_regs *regs, unsi
20827 {
20828 struct vm_area_struct *vma;
20829 struct task_struct *tsk;
20830 - unsigned long address;
20831 struct mm_struct *mm;
20832 int write;
20833 int fault;
20834
20835 + /* Get the faulting address: */
20836 + unsigned long address = read_cr2();
20837 +
20838 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20839 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
20840 + if (!search_exception_tables(regs->ip)) {
20841 + bad_area_nosemaphore(regs, error_code, address);
20842 + return;
20843 + }
20844 + if (address < PAX_USER_SHADOW_BASE) {
20845 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
20846 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
20847 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
20848 + } else
20849 + address -= PAX_USER_SHADOW_BASE;
20850 + }
20851 +#endif
20852 +
20853 tsk = current;
20854 mm = tsk->mm;
20855
20856 - /* Get the faulting address: */
20857 - address = read_cr2();
20858 -
20859 /*
20860 * Detect and handle instructions that would cause a page fault for
20861 * both a tracked kernel page and a userspace page.
20862 @@ -1026,7 +1285,7 @@ do_page_fault(struct pt_regs *regs, unsi
20863 * User-mode registers count as a user access even for any
20864 * potential system fault or CPU buglet:
20865 */
20866 - if (user_mode_vm(regs)) {
20867 + if (user_mode(regs)) {
20868 local_irq_enable();
20869 error_code |= PF_USER;
20870 } else {
20871 @@ -1080,6 +1339,11 @@ do_page_fault(struct pt_regs *regs, unsi
20872 might_sleep();
20873 }
20874
20875 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20876 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
20877 + return;
20878 +#endif
20879 +
20880 vma = find_vma(mm, address);
20881 if (unlikely(!vma)) {
20882 bad_area(regs, error_code, address);
20883 @@ -1091,18 +1355,24 @@ do_page_fault(struct pt_regs *regs, unsi
20884 bad_area(regs, error_code, address);
20885 return;
20886 }
20887 - if (error_code & PF_USER) {
20888 - /*
20889 - * Accessing the stack below %sp is always a bug.
20890 - * The large cushion allows instructions like enter
20891 - * and pusha to work. ("enter $65535, $31" pushes
20892 - * 32 pointers and then decrements %sp by 65535.)
20893 - */
20894 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
20895 - bad_area(regs, error_code, address);
20896 - return;
20897 - }
20898 + /*
20899 + * Accessing the stack below %sp is always a bug.
20900 + * The large cushion allows instructions like enter
20901 + * and pusha to work. ("enter $65535, $31" pushes
20902 + * 32 pointers and then decrements %sp by 65535.)
20903 + */
20904 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
20905 + bad_area(regs, error_code, address);
20906 + return;
20907 + }
20908 +
20909 +#ifdef CONFIG_PAX_SEGMEXEC
20910 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
20911 + bad_area(regs, error_code, address);
20912 + return;
20913 }
20914 +#endif
20915 +
20916 if (unlikely(expand_stack(vma, address))) {
20917 bad_area(regs, error_code, address);
20918 return;
20919 @@ -1146,3 +1416,199 @@ good_area:
20920
20921 up_read(&mm->mmap_sem);
20922 }
20923 +
20924 +#ifdef CONFIG_PAX_EMUTRAMP
20925 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
20926 +{
20927 + int err;
20928 +
20929 + do { /* PaX: gcc trampoline emulation #1 */
20930 + unsigned char mov1, mov2;
20931 + unsigned short jmp;
20932 + unsigned int addr1, addr2;
20933 +
20934 +#ifdef CONFIG_X86_64
20935 + if ((regs->ip + 11) >> 32)
20936 + break;
20937 +#endif
20938 +
20939 + err = get_user(mov1, (unsigned char __user *)regs->ip);
20940 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20941 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
20942 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20943 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
20944 +
20945 + if (err)
20946 + break;
20947 +
20948 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
20949 + regs->cx = addr1;
20950 + regs->ax = addr2;
20951 + regs->ip = addr2;
20952 + return 2;
20953 + }
20954 + } while (0);
20955 +
20956 + do { /* PaX: gcc trampoline emulation #2 */
20957 + unsigned char mov, jmp;
20958 + unsigned int addr1, addr2;
20959 +
20960 +#ifdef CONFIG_X86_64
20961 + if ((regs->ip + 9) >> 32)
20962 + break;
20963 +#endif
20964 +
20965 + err = get_user(mov, (unsigned char __user *)regs->ip);
20966 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20967 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
20968 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20969 +
20970 + if (err)
20971 + break;
20972 +
20973 + if (mov == 0xB9 && jmp == 0xE9) {
20974 + regs->cx = addr1;
20975 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
20976 + return 2;
20977 + }
20978 + } while (0);
20979 +
20980 + return 1; /* PaX in action */
20981 +}
20982 +
20983 +#ifdef CONFIG_X86_64
20984 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
20985 +{
20986 + int err;
20987 +
20988 + do { /* PaX: gcc trampoline emulation #1 */
20989 + unsigned short mov1, mov2, jmp1;
20990 + unsigned char jmp2;
20991 + unsigned int addr1;
20992 + unsigned long addr2;
20993 +
20994 + err = get_user(mov1, (unsigned short __user *)regs->ip);
20995 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
20996 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
20997 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
20998 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
20999 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
21000 +
21001 + if (err)
21002 + break;
21003 +
21004 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21005 + regs->r11 = addr1;
21006 + regs->r10 = addr2;
21007 + regs->ip = addr1;
21008 + return 2;
21009 + }
21010 + } while (0);
21011 +
21012 + do { /* PaX: gcc trampoline emulation #2 */
21013 + unsigned short mov1, mov2, jmp1;
21014 + unsigned char jmp2;
21015 + unsigned long addr1, addr2;
21016 +
21017 + err = get_user(mov1, (unsigned short __user *)regs->ip);
21018 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21019 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21020 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21021 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
21022 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
21023 +
21024 + if (err)
21025 + break;
21026 +
21027 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21028 + regs->r11 = addr1;
21029 + regs->r10 = addr2;
21030 + regs->ip = addr1;
21031 + return 2;
21032 + }
21033 + } while (0);
21034 +
21035 + return 1; /* PaX in action */
21036 +}
21037 +#endif
21038 +
21039 +/*
21040 + * PaX: decide what to do with offenders (regs->ip = fault address)
21041 + *
21042 + * returns 1 when task should be killed
21043 + * 2 when gcc trampoline was detected
21044 + */
21045 +static int pax_handle_fetch_fault(struct pt_regs *regs)
21046 +{
21047 + if (v8086_mode(regs))
21048 + return 1;
21049 +
21050 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
21051 + return 1;
21052 +
21053 +#ifdef CONFIG_X86_32
21054 + return pax_handle_fetch_fault_32(regs);
21055 +#else
21056 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
21057 + return pax_handle_fetch_fault_32(regs);
21058 + else
21059 + return pax_handle_fetch_fault_64(regs);
21060 +#endif
21061 +}
21062 +#endif
21063 +
21064 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21065 +void pax_report_insns(void *pc, void *sp)
21066 +{
21067 + long i;
21068 +
21069 + printk(KERN_ERR "PAX: bytes at PC: ");
21070 + for (i = 0; i < 20; i++) {
21071 + unsigned char c;
21072 + if (get_user(c, (__force unsigned char __user *)pc+i))
21073 + printk(KERN_CONT "?? ");
21074 + else
21075 + printk(KERN_CONT "%02x ", c);
21076 + }
21077 + printk("\n");
21078 +
21079 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
21080 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
21081 + unsigned long c;
21082 + if (get_user(c, (__force unsigned long __user *)sp+i))
21083 +#ifdef CONFIG_X86_32
21084 + printk(KERN_CONT "???????? ");
21085 +#else
21086 + printk(KERN_CONT "???????????????? ");
21087 +#endif
21088 + else
21089 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
21090 + }
21091 + printk("\n");
21092 +}
21093 +#endif
21094 +
21095 +/**
21096 + * probe_kernel_write(): safely attempt to write to a location
21097 + * @dst: address to write to
21098 + * @src: pointer to the data that shall be written
21099 + * @size: size of the data chunk
21100 + *
21101 + * Safely write to address @dst from the buffer at @src. If a kernel fault
21102 + * happens, handle that and return -EFAULT.
21103 + */
21104 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
21105 +{
21106 + long ret;
21107 + mm_segment_t old_fs = get_fs();
21108 +
21109 + set_fs(KERNEL_DS);
21110 + pagefault_disable();
21111 + pax_open_kernel();
21112 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
21113 + pax_close_kernel();
21114 + pagefault_enable();
21115 + set_fs(old_fs);
21116 +
21117 + return ret ? -EFAULT : 0;
21118 +}
21119 diff -urNp linux-2.6.32.43/arch/x86/mm/gup.c linux-2.6.32.43/arch/x86/mm/gup.c
21120 --- linux-2.6.32.43/arch/x86/mm/gup.c 2011-03-27 14:31:47.000000000 -0400
21121 +++ linux-2.6.32.43/arch/x86/mm/gup.c 2011-04-17 15:56:46.000000000 -0400
21122 @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
21123 addr = start;
21124 len = (unsigned long) nr_pages << PAGE_SHIFT;
21125 end = start + len;
21126 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21127 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21128 (void __user *)start, len)))
21129 return 0;
21130
21131 diff -urNp linux-2.6.32.43/arch/x86/mm/highmem_32.c linux-2.6.32.43/arch/x86/mm/highmem_32.c
21132 --- linux-2.6.32.43/arch/x86/mm/highmem_32.c 2011-03-27 14:31:47.000000000 -0400
21133 +++ linux-2.6.32.43/arch/x86/mm/highmem_32.c 2011-04-17 15:56:46.000000000 -0400
21134 @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
21135 idx = type + KM_TYPE_NR*smp_processor_id();
21136 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21137 BUG_ON(!pte_none(*(kmap_pte-idx)));
21138 +
21139 + pax_open_kernel();
21140 set_pte(kmap_pte-idx, mk_pte(page, prot));
21141 + pax_close_kernel();
21142
21143 return (void *)vaddr;
21144 }
21145 diff -urNp linux-2.6.32.43/arch/x86/mm/hugetlbpage.c linux-2.6.32.43/arch/x86/mm/hugetlbpage.c
21146 --- linux-2.6.32.43/arch/x86/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
21147 +++ linux-2.6.32.43/arch/x86/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
21148 @@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmappe
21149 struct hstate *h = hstate_file(file);
21150 struct mm_struct *mm = current->mm;
21151 struct vm_area_struct *vma;
21152 - unsigned long start_addr;
21153 + unsigned long start_addr, pax_task_size = TASK_SIZE;
21154 +
21155 +#ifdef CONFIG_PAX_SEGMEXEC
21156 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21157 + pax_task_size = SEGMEXEC_TASK_SIZE;
21158 +#endif
21159 +
21160 + pax_task_size -= PAGE_SIZE;
21161
21162 if (len > mm->cached_hole_size) {
21163 - start_addr = mm->free_area_cache;
21164 + start_addr = mm->free_area_cache;
21165 } else {
21166 - start_addr = TASK_UNMAPPED_BASE;
21167 - mm->cached_hole_size = 0;
21168 + start_addr = mm->mmap_base;
21169 + mm->cached_hole_size = 0;
21170 }
21171
21172 full_search:
21173 @@ -281,26 +288,27 @@ full_search:
21174
21175 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
21176 /* At this point: (!vma || addr < vma->vm_end). */
21177 - if (TASK_SIZE - len < addr) {
21178 + if (pax_task_size - len < addr) {
21179 /*
21180 * Start a new search - just in case we missed
21181 * some holes.
21182 */
21183 - if (start_addr != TASK_UNMAPPED_BASE) {
21184 - start_addr = TASK_UNMAPPED_BASE;
21185 + if (start_addr != mm->mmap_base) {
21186 + start_addr = mm->mmap_base;
21187 mm->cached_hole_size = 0;
21188 goto full_search;
21189 }
21190 return -ENOMEM;
21191 }
21192 - if (!vma || addr + len <= vma->vm_start) {
21193 - mm->free_area_cache = addr + len;
21194 - return addr;
21195 - }
21196 + if (check_heap_stack_gap(vma, addr, len))
21197 + break;
21198 if (addr + mm->cached_hole_size < vma->vm_start)
21199 mm->cached_hole_size = vma->vm_start - addr;
21200 addr = ALIGN(vma->vm_end, huge_page_size(h));
21201 }
21202 +
21203 + mm->free_area_cache = addr + len;
21204 + return addr;
21205 }
21206
21207 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
21208 @@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmappe
21209 {
21210 struct hstate *h = hstate_file(file);
21211 struct mm_struct *mm = current->mm;
21212 - struct vm_area_struct *vma, *prev_vma;
21213 - unsigned long base = mm->mmap_base, addr = addr0;
21214 + struct vm_area_struct *vma;
21215 + unsigned long base = mm->mmap_base, addr;
21216 unsigned long largest_hole = mm->cached_hole_size;
21217 - int first_time = 1;
21218
21219 /* don't allow allocations above current base */
21220 if (mm->free_area_cache > base)
21221 @@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmappe
21222 largest_hole = 0;
21223 mm->free_area_cache = base;
21224 }
21225 -try_again:
21226 +
21227 /* make sure it can fit in the remaining address space */
21228 if (mm->free_area_cache < len)
21229 goto fail;
21230
21231 /* either no address requested or cant fit in requested address hole */
21232 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
21233 + addr = (mm->free_area_cache - len);
21234 do {
21235 + addr &= huge_page_mask(h);
21236 + vma = find_vma(mm, addr);
21237 /*
21238 * Lookup failure means no vma is above this address,
21239 * i.e. return with success:
21240 - */
21241 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
21242 - return addr;
21243 -
21244 - /*
21245 * new region fits between prev_vma->vm_end and
21246 * vma->vm_start, use it:
21247 */
21248 - if (addr + len <= vma->vm_start &&
21249 - (!prev_vma || (addr >= prev_vma->vm_end))) {
21250 + if (check_heap_stack_gap(vma, addr, len)) {
21251 /* remember the address as a hint for next time */
21252 - mm->cached_hole_size = largest_hole;
21253 - return (mm->free_area_cache = addr);
21254 - } else {
21255 - /* pull free_area_cache down to the first hole */
21256 - if (mm->free_area_cache == vma->vm_end) {
21257 - mm->free_area_cache = vma->vm_start;
21258 - mm->cached_hole_size = largest_hole;
21259 - }
21260 + mm->cached_hole_size = largest_hole;
21261 + return (mm->free_area_cache = addr);
21262 + }
21263 + /* pull free_area_cache down to the first hole */
21264 + if (mm->free_area_cache == vma->vm_end) {
21265 + mm->free_area_cache = vma->vm_start;
21266 + mm->cached_hole_size = largest_hole;
21267 }
21268
21269 /* remember the largest hole we saw so far */
21270 if (addr + largest_hole < vma->vm_start)
21271 - largest_hole = vma->vm_start - addr;
21272 + largest_hole = vma->vm_start - addr;
21273
21274 /* try just below the current vma->vm_start */
21275 - addr = (vma->vm_start - len) & huge_page_mask(h);
21276 - } while (len <= vma->vm_start);
21277 + addr = skip_heap_stack_gap(vma, len);
21278 + } while (!IS_ERR_VALUE(addr));
21279
21280 fail:
21281 /*
21282 - * if hint left us with no space for the requested
21283 - * mapping then try again:
21284 - */
21285 - if (first_time) {
21286 - mm->free_area_cache = base;
21287 - largest_hole = 0;
21288 - first_time = 0;
21289 - goto try_again;
21290 - }
21291 - /*
21292 * A failed mmap() very likely causes application failure,
21293 * so fall back to the bottom-up function here. This scenario
21294 * can happen with large stack limits and large mmap()
21295 * allocations.
21296 */
21297 - mm->free_area_cache = TASK_UNMAPPED_BASE;
21298 +
21299 +#ifdef CONFIG_PAX_SEGMEXEC
21300 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21301 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
21302 + else
21303 +#endif
21304 +
21305 + mm->mmap_base = TASK_UNMAPPED_BASE;
21306 +
21307 +#ifdef CONFIG_PAX_RANDMMAP
21308 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21309 + mm->mmap_base += mm->delta_mmap;
21310 +#endif
21311 +
21312 + mm->free_area_cache = mm->mmap_base;
21313 mm->cached_hole_size = ~0UL;
21314 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
21315 len, pgoff, flags);
21316 @@ -387,6 +393,7 @@ fail:
21317 /*
21318 * Restore the topdown base:
21319 */
21320 + mm->mmap_base = base;
21321 mm->free_area_cache = base;
21322 mm->cached_hole_size = ~0UL;
21323
21324 @@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *f
21325 struct hstate *h = hstate_file(file);
21326 struct mm_struct *mm = current->mm;
21327 struct vm_area_struct *vma;
21328 + unsigned long pax_task_size = TASK_SIZE;
21329
21330 if (len & ~huge_page_mask(h))
21331 return -EINVAL;
21332 - if (len > TASK_SIZE)
21333 +
21334 +#ifdef CONFIG_PAX_SEGMEXEC
21335 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21336 + pax_task_size = SEGMEXEC_TASK_SIZE;
21337 +#endif
21338 +
21339 + pax_task_size -= PAGE_SIZE;
21340 +
21341 + if (len > pax_task_size)
21342 return -ENOMEM;
21343
21344 if (flags & MAP_FIXED) {
21345 @@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
21346 if (addr) {
21347 addr = ALIGN(addr, huge_page_size(h));
21348 vma = find_vma(mm, addr);
21349 - if (TASK_SIZE - len >= addr &&
21350 - (!vma || addr + len <= vma->vm_start))
21351 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
21352 return addr;
21353 }
21354 if (mm->get_unmapped_area == arch_get_unmapped_area)
21355 diff -urNp linux-2.6.32.43/arch/x86/mm/init_32.c linux-2.6.32.43/arch/x86/mm/init_32.c
21356 --- linux-2.6.32.43/arch/x86/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
21357 +++ linux-2.6.32.43/arch/x86/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
21358 @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
21359 }
21360
21361 /*
21362 - * Creates a middle page table and puts a pointer to it in the
21363 - * given global directory entry. This only returns the gd entry
21364 - * in non-PAE compilation mode, since the middle layer is folded.
21365 - */
21366 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
21367 -{
21368 - pud_t *pud;
21369 - pmd_t *pmd_table;
21370 -
21371 -#ifdef CONFIG_X86_PAE
21372 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
21373 - if (after_bootmem)
21374 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
21375 - else
21376 - pmd_table = (pmd_t *)alloc_low_page();
21377 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
21378 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
21379 - pud = pud_offset(pgd, 0);
21380 - BUG_ON(pmd_table != pmd_offset(pud, 0));
21381 -
21382 - return pmd_table;
21383 - }
21384 -#endif
21385 - pud = pud_offset(pgd, 0);
21386 - pmd_table = pmd_offset(pud, 0);
21387 -
21388 - return pmd_table;
21389 -}
21390 -
21391 -/*
21392 * Create a page table and place a pointer to it in a middle page
21393 * directory entry:
21394 */
21395 @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
21396 page_table = (pte_t *)alloc_low_page();
21397
21398 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
21399 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21400 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
21401 +#else
21402 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
21403 +#endif
21404 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
21405 }
21406
21407 return pte_offset_kernel(pmd, 0);
21408 }
21409
21410 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
21411 +{
21412 + pud_t *pud;
21413 + pmd_t *pmd_table;
21414 +
21415 + pud = pud_offset(pgd, 0);
21416 + pmd_table = pmd_offset(pud, 0);
21417 +
21418 + return pmd_table;
21419 +}
21420 +
21421 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
21422 {
21423 int pgd_idx = pgd_index(vaddr);
21424 @@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
21425 int pgd_idx, pmd_idx;
21426 unsigned long vaddr;
21427 pgd_t *pgd;
21428 + pud_t *pud;
21429 pmd_t *pmd;
21430 pte_t *pte = NULL;
21431
21432 @@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
21433 pgd = pgd_base + pgd_idx;
21434
21435 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
21436 - pmd = one_md_table_init(pgd);
21437 - pmd = pmd + pmd_index(vaddr);
21438 + pud = pud_offset(pgd, vaddr);
21439 + pmd = pmd_offset(pud, vaddr);
21440 +
21441 +#ifdef CONFIG_X86_PAE
21442 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21443 +#endif
21444 +
21445 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
21446 pmd++, pmd_idx++) {
21447 pte = page_table_kmap_check(one_page_table_init(pmd),
21448 @@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
21449 }
21450 }
21451
21452 -static inline int is_kernel_text(unsigned long addr)
21453 +static inline int is_kernel_text(unsigned long start, unsigned long end)
21454 {
21455 - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
21456 - return 1;
21457 - return 0;
21458 + if ((start > ktla_ktva((unsigned long)_etext) ||
21459 + end <= ktla_ktva((unsigned long)_stext)) &&
21460 + (start > ktla_ktva((unsigned long)_einittext) ||
21461 + end <= ktla_ktva((unsigned long)_sinittext)) &&
21462 +
21463 +#ifdef CONFIG_ACPI_SLEEP
21464 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
21465 +#endif
21466 +
21467 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
21468 + return 0;
21469 + return 1;
21470 }
21471
21472 /*
21473 @@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned lo
21474 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
21475 unsigned long start_pfn, end_pfn;
21476 pgd_t *pgd_base = swapper_pg_dir;
21477 - int pgd_idx, pmd_idx, pte_ofs;
21478 + unsigned int pgd_idx, pmd_idx, pte_ofs;
21479 unsigned long pfn;
21480 pgd_t *pgd;
21481 + pud_t *pud;
21482 pmd_t *pmd;
21483 pte_t *pte;
21484 unsigned pages_2m, pages_4k;
21485 @@ -278,8 +279,13 @@ repeat:
21486 pfn = start_pfn;
21487 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21488 pgd = pgd_base + pgd_idx;
21489 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
21490 - pmd = one_md_table_init(pgd);
21491 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
21492 + pud = pud_offset(pgd, 0);
21493 + pmd = pmd_offset(pud, 0);
21494 +
21495 +#ifdef CONFIG_X86_PAE
21496 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21497 +#endif
21498
21499 if (pfn >= end_pfn)
21500 continue;
21501 @@ -291,14 +297,13 @@ repeat:
21502 #endif
21503 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
21504 pmd++, pmd_idx++) {
21505 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
21506 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
21507
21508 /*
21509 * Map with big pages if possible, otherwise
21510 * create normal page tables:
21511 */
21512 if (use_pse) {
21513 - unsigned int addr2;
21514 pgprot_t prot = PAGE_KERNEL_LARGE;
21515 /*
21516 * first pass will use the same initial
21517 @@ -308,11 +313,7 @@ repeat:
21518 __pgprot(PTE_IDENT_ATTR |
21519 _PAGE_PSE);
21520
21521 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
21522 - PAGE_OFFSET + PAGE_SIZE-1;
21523 -
21524 - if (is_kernel_text(addr) ||
21525 - is_kernel_text(addr2))
21526 + if (is_kernel_text(address, address + PMD_SIZE))
21527 prot = PAGE_KERNEL_LARGE_EXEC;
21528
21529 pages_2m++;
21530 @@ -329,7 +330,7 @@ repeat:
21531 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21532 pte += pte_ofs;
21533 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
21534 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
21535 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
21536 pgprot_t prot = PAGE_KERNEL;
21537 /*
21538 * first pass will use the same initial
21539 @@ -337,7 +338,7 @@ repeat:
21540 */
21541 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
21542
21543 - if (is_kernel_text(addr))
21544 + if (is_kernel_text(address, address + PAGE_SIZE))
21545 prot = PAGE_KERNEL_EXEC;
21546
21547 pages_4k++;
21548 @@ -489,7 +490,7 @@ void __init native_pagetable_setup_start
21549
21550 pud = pud_offset(pgd, va);
21551 pmd = pmd_offset(pud, va);
21552 - if (!pmd_present(*pmd))
21553 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
21554 break;
21555
21556 pte = pte_offset_kernel(pmd, va);
21557 @@ -541,9 +542,7 @@ void __init early_ioremap_page_table_ran
21558
21559 static void __init pagetable_init(void)
21560 {
21561 - pgd_t *pgd_base = swapper_pg_dir;
21562 -
21563 - permanent_kmaps_init(pgd_base);
21564 + permanent_kmaps_init(swapper_pg_dir);
21565 }
21566
21567 #ifdef CONFIG_ACPI_SLEEP
21568 @@ -551,12 +550,12 @@ static void __init pagetable_init(void)
21569 * ACPI suspend needs this for resume, because things like the intel-agp
21570 * driver might have split up a kernel 4MB mapping.
21571 */
21572 -char swsusp_pg_dir[PAGE_SIZE]
21573 +pgd_t swsusp_pg_dir[PTRS_PER_PGD]
21574 __attribute__ ((aligned(PAGE_SIZE)));
21575
21576 static inline void save_pg_dir(void)
21577 {
21578 - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
21579 + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
21580 }
21581 #else /* !CONFIG_ACPI_SLEEP */
21582 static inline void save_pg_dir(void)
21583 @@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
21584 flush_tlb_all();
21585 }
21586
21587 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21588 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21589 EXPORT_SYMBOL_GPL(__supported_pte_mask);
21590
21591 /* user-defined highmem size */
21592 @@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void
21593 * Initialize the boot-time allocator (with low memory only):
21594 */
21595 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
21596 - bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21597 + bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21598 PAGE_SIZE);
21599 if (bootmap == -1L)
21600 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
21601 @@ -864,6 +863,12 @@ void __init mem_init(void)
21602
21603 pci_iommu_alloc();
21604
21605 +#ifdef CONFIG_PAX_PER_CPU_PGD
21606 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21607 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21608 + KERNEL_PGD_PTRS);
21609 +#endif
21610 +
21611 #ifdef CONFIG_FLATMEM
21612 BUG_ON(!mem_map);
21613 #endif
21614 @@ -881,7 +886,7 @@ void __init mem_init(void)
21615 set_highmem_pages_init();
21616
21617 codesize = (unsigned long) &_etext - (unsigned long) &_text;
21618 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
21619 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
21620 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
21621
21622 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
21623 @@ -923,10 +928,10 @@ void __init mem_init(void)
21624 ((unsigned long)&__init_end -
21625 (unsigned long)&__init_begin) >> 10,
21626
21627 - (unsigned long)&_etext, (unsigned long)&_edata,
21628 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
21629 + (unsigned long)&_sdata, (unsigned long)&_edata,
21630 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
21631
21632 - (unsigned long)&_text, (unsigned long)&_etext,
21633 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
21634 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
21635
21636 /*
21637 @@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
21638 if (!kernel_set_to_readonly)
21639 return;
21640
21641 + start = ktla_ktva(start);
21642 pr_debug("Set kernel text: %lx - %lx for read write\n",
21643 start, start+size);
21644
21645 @@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
21646 if (!kernel_set_to_readonly)
21647 return;
21648
21649 + start = ktla_ktva(start);
21650 pr_debug("Set kernel text: %lx - %lx for read only\n",
21651 start, start+size);
21652
21653 @@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
21654 unsigned long start = PFN_ALIGN(_text);
21655 unsigned long size = PFN_ALIGN(_etext) - start;
21656
21657 + start = ktla_ktva(start);
21658 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
21659 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
21660 size >> 10);
21661 diff -urNp linux-2.6.32.43/arch/x86/mm/init_64.c linux-2.6.32.43/arch/x86/mm/init_64.c
21662 --- linux-2.6.32.43/arch/x86/mm/init_64.c 2011-04-17 17:00:52.000000000 -0400
21663 +++ linux-2.6.32.43/arch/x86/mm/init_64.c 2011-04-17 17:03:05.000000000 -0400
21664 @@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
21665 pmd = fill_pmd(pud, vaddr);
21666 pte = fill_pte(pmd, vaddr);
21667
21668 + pax_open_kernel();
21669 set_pte(pte, new_pte);
21670 + pax_close_kernel();
21671
21672 /*
21673 * It's enough to flush this one mapping.
21674 @@ -223,14 +225,12 @@ static void __init __init_extra_mapping(
21675 pgd = pgd_offset_k((unsigned long)__va(phys));
21676 if (pgd_none(*pgd)) {
21677 pud = (pud_t *) spp_getpage();
21678 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
21679 - _PAGE_USER));
21680 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
21681 }
21682 pud = pud_offset(pgd, (unsigned long)__va(phys));
21683 if (pud_none(*pud)) {
21684 pmd = (pmd_t *) spp_getpage();
21685 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
21686 - _PAGE_USER));
21687 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
21688 }
21689 pmd = pmd_offset(pud, phys);
21690 BUG_ON(!pmd_none(*pmd));
21691 @@ -675,6 +675,12 @@ void __init mem_init(void)
21692
21693 pci_iommu_alloc();
21694
21695 +#ifdef CONFIG_PAX_PER_CPU_PGD
21696 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21697 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21698 + KERNEL_PGD_PTRS);
21699 +#endif
21700 +
21701 /* clear_bss() already clear the empty_zero_page */
21702
21703 reservedpages = 0;
21704 @@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
21705 static struct vm_area_struct gate_vma = {
21706 .vm_start = VSYSCALL_START,
21707 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
21708 - .vm_page_prot = PAGE_READONLY_EXEC,
21709 - .vm_flags = VM_READ | VM_EXEC
21710 + .vm_page_prot = PAGE_READONLY,
21711 + .vm_flags = VM_READ
21712 };
21713
21714 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
21715 @@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long a
21716
21717 const char *arch_vma_name(struct vm_area_struct *vma)
21718 {
21719 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21720 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21721 return "[vdso]";
21722 if (vma == &gate_vma)
21723 return "[vsyscall]";
21724 diff -urNp linux-2.6.32.43/arch/x86/mm/init.c linux-2.6.32.43/arch/x86/mm/init.c
21725 --- linux-2.6.32.43/arch/x86/mm/init.c 2011-04-17 17:00:52.000000000 -0400
21726 +++ linux-2.6.32.43/arch/x86/mm/init.c 2011-06-07 19:06:09.000000000 -0400
21727 @@ -69,11 +69,7 @@ static void __init find_early_table_spac
21728 * cause a hotspot and fill up ZONE_DMA. The page tables
21729 * need roughly 0.5KB per GB.
21730 */
21731 -#ifdef CONFIG_X86_32
21732 - start = 0x7000;
21733 -#else
21734 - start = 0x8000;
21735 -#endif
21736 + start = 0x100000;
21737 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
21738 tables, PAGE_SIZE);
21739 if (e820_table_start == -1UL)
21740 @@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
21741 #endif
21742
21743 set_nx();
21744 - if (nx_enabled)
21745 + if (nx_enabled && cpu_has_nx)
21746 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
21747
21748 /* Enable PSE if available */
21749 @@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_m
21750 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
21751 * mmio resources as well as potential bios/acpi data regions.
21752 */
21753 +
21754 int devmem_is_allowed(unsigned long pagenr)
21755 {
21756 +#ifdef CONFIG_GRKERNSEC_KMEM
21757 + /* allow BDA */
21758 + if (!pagenr)
21759 + return 1;
21760 + /* allow EBDA */
21761 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
21762 + return 1;
21763 + /* allow ISA/video mem */
21764 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21765 + return 1;
21766 + /* throw out everything else below 1MB */
21767 + if (pagenr <= 256)
21768 + return 0;
21769 +#else
21770 if (pagenr <= 256)
21771 return 1;
21772 +#endif
21773 +
21774 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
21775 return 0;
21776 if (!page_is_ram(pagenr))
21777 @@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigne
21778
21779 void free_initmem(void)
21780 {
21781 +
21782 +#ifdef CONFIG_PAX_KERNEXEC
21783 +#ifdef CONFIG_X86_32
21784 + /* PaX: limit KERNEL_CS to actual size */
21785 + unsigned long addr, limit;
21786 + struct desc_struct d;
21787 + int cpu;
21788 +
21789 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
21790 + limit = (limit - 1UL) >> PAGE_SHIFT;
21791 +
21792 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
21793 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
21794 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
21795 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
21796 + }
21797 +
21798 + /* PaX: make KERNEL_CS read-only */
21799 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
21800 + if (!paravirt_enabled())
21801 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
21802 +/*
21803 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
21804 + pgd = pgd_offset_k(addr);
21805 + pud = pud_offset(pgd, addr);
21806 + pmd = pmd_offset(pud, addr);
21807 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21808 + }
21809 +*/
21810 +#ifdef CONFIG_X86_PAE
21811 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
21812 +/*
21813 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
21814 + pgd = pgd_offset_k(addr);
21815 + pud = pud_offset(pgd, addr);
21816 + pmd = pmd_offset(pud, addr);
21817 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21818 + }
21819 +*/
21820 +#endif
21821 +
21822 +#ifdef CONFIG_MODULES
21823 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
21824 +#endif
21825 +
21826 +#else
21827 + pgd_t *pgd;
21828 + pud_t *pud;
21829 + pmd_t *pmd;
21830 + unsigned long addr, end;
21831 +
21832 + /* PaX: make kernel code/rodata read-only, rest non-executable */
21833 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
21834 + pgd = pgd_offset_k(addr);
21835 + pud = pud_offset(pgd, addr);
21836 + pmd = pmd_offset(pud, addr);
21837 + if (!pmd_present(*pmd))
21838 + continue;
21839 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
21840 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21841 + else
21842 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21843 + }
21844 +
21845 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
21846 + end = addr + KERNEL_IMAGE_SIZE;
21847 + for (; addr < end; addr += PMD_SIZE) {
21848 + pgd = pgd_offset_k(addr);
21849 + pud = pud_offset(pgd, addr);
21850 + pmd = pmd_offset(pud, addr);
21851 + if (!pmd_present(*pmd))
21852 + continue;
21853 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
21854 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21855 + }
21856 +#endif
21857 +
21858 + flush_tlb_all();
21859 +#endif
21860 +
21861 free_init_pages("unused kernel memory",
21862 (unsigned long)(&__init_begin),
21863 (unsigned long)(&__init_end));
21864 diff -urNp linux-2.6.32.43/arch/x86/mm/iomap_32.c linux-2.6.32.43/arch/x86/mm/iomap_32.c
21865 --- linux-2.6.32.43/arch/x86/mm/iomap_32.c 2011-03-27 14:31:47.000000000 -0400
21866 +++ linux-2.6.32.43/arch/x86/mm/iomap_32.c 2011-04-17 15:56:46.000000000 -0400
21867 @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
21868 debug_kmap_atomic(type);
21869 idx = type + KM_TYPE_NR * smp_processor_id();
21870 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21871 +
21872 + pax_open_kernel();
21873 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
21874 + pax_close_kernel();
21875 +
21876 arch_flush_lazy_mmu_mode();
21877
21878 return (void *)vaddr;
21879 diff -urNp linux-2.6.32.43/arch/x86/mm/ioremap.c linux-2.6.32.43/arch/x86/mm/ioremap.c
21880 --- linux-2.6.32.43/arch/x86/mm/ioremap.c 2011-03-27 14:31:47.000000000 -0400
21881 +++ linux-2.6.32.43/arch/x86/mm/ioremap.c 2011-04-17 15:56:46.000000000 -0400
21882 @@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
21883 * Second special case: Some BIOSen report the PC BIOS
21884 * area (640->1Mb) as ram even though it is not.
21885 */
21886 - if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
21887 - pagenr < (BIOS_END >> PAGE_SHIFT))
21888 + if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
21889 + pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21890 return 0;
21891
21892 for (i = 0; i < e820.nr_map; i++) {
21893 @@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(re
21894 /*
21895 * Don't allow anybody to remap normal RAM that we're using..
21896 */
21897 - for (pfn = phys_addr >> PAGE_SHIFT;
21898 - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
21899 - pfn++) {
21900 -
21901 + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
21902 int is_ram = page_is_ram(pfn);
21903
21904 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
21905 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
21906 return NULL;
21907 WARN_ON_ONCE(is_ram);
21908 }
21909 @@ -407,7 +404,7 @@ static int __init early_ioremap_debug_se
21910 early_param("early_ioremap_debug", early_ioremap_debug_setup);
21911
21912 static __initdata int after_paging_init;
21913 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
21914 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
21915
21916 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
21917 {
21918 @@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
21919 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
21920
21921 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
21922 - memset(bm_pte, 0, sizeof(bm_pte));
21923 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
21924 + pmd_populate_user(&init_mm, pmd, bm_pte);
21925
21926 /*
21927 * The boot-ioremap range spans multiple pmds, for which
21928 diff -urNp linux-2.6.32.43/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.32.43/arch/x86/mm/kmemcheck/kmemcheck.c
21929 --- linux-2.6.32.43/arch/x86/mm/kmemcheck/kmemcheck.c 2011-03-27 14:31:47.000000000 -0400
21930 +++ linux-2.6.32.43/arch/x86/mm/kmemcheck/kmemcheck.c 2011-04-17 15:56:46.000000000 -0400
21931 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
21932 * memory (e.g. tracked pages)? For now, we need this to avoid
21933 * invoking kmemcheck for PnP BIOS calls.
21934 */
21935 - if (regs->flags & X86_VM_MASK)
21936 + if (v8086_mode(regs))
21937 return false;
21938 - if (regs->cs != __KERNEL_CS)
21939 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
21940 return false;
21941
21942 pte = kmemcheck_pte_lookup(address);
21943 diff -urNp linux-2.6.32.43/arch/x86/mm/mmap.c linux-2.6.32.43/arch/x86/mm/mmap.c
21944 --- linux-2.6.32.43/arch/x86/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
21945 +++ linux-2.6.32.43/arch/x86/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
21946 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
21947 * Leave an at least ~128 MB hole with possible stack randomization.
21948 */
21949 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
21950 -#define MAX_GAP (TASK_SIZE/6*5)
21951 +#define MAX_GAP (pax_task_size/6*5)
21952
21953 /*
21954 * True on X86_32 or when emulating IA32 on X86_64
21955 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
21956 return rnd << PAGE_SHIFT;
21957 }
21958
21959 -static unsigned long mmap_base(void)
21960 +static unsigned long mmap_base(struct mm_struct *mm)
21961 {
21962 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
21963 + unsigned long pax_task_size = TASK_SIZE;
21964 +
21965 +#ifdef CONFIG_PAX_SEGMEXEC
21966 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21967 + pax_task_size = SEGMEXEC_TASK_SIZE;
21968 +#endif
21969
21970 if (gap < MIN_GAP)
21971 gap = MIN_GAP;
21972 else if (gap > MAX_GAP)
21973 gap = MAX_GAP;
21974
21975 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
21976 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
21977 }
21978
21979 /*
21980 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
21981 * does, but not when emulating X86_32
21982 */
21983 -static unsigned long mmap_legacy_base(void)
21984 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
21985 {
21986 - if (mmap_is_ia32())
21987 + if (mmap_is_ia32()) {
21988 +
21989 +#ifdef CONFIG_PAX_SEGMEXEC
21990 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21991 + return SEGMEXEC_TASK_UNMAPPED_BASE;
21992 + else
21993 +#endif
21994 +
21995 return TASK_UNMAPPED_BASE;
21996 - else
21997 + } else
21998 return TASK_UNMAPPED_BASE + mmap_rnd();
21999 }
22000
22001 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
22002 void arch_pick_mmap_layout(struct mm_struct *mm)
22003 {
22004 if (mmap_is_legacy()) {
22005 - mm->mmap_base = mmap_legacy_base();
22006 + mm->mmap_base = mmap_legacy_base(mm);
22007 +
22008 +#ifdef CONFIG_PAX_RANDMMAP
22009 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22010 + mm->mmap_base += mm->delta_mmap;
22011 +#endif
22012 +
22013 mm->get_unmapped_area = arch_get_unmapped_area;
22014 mm->unmap_area = arch_unmap_area;
22015 } else {
22016 - mm->mmap_base = mmap_base();
22017 + mm->mmap_base = mmap_base(mm);
22018 +
22019 +#ifdef CONFIG_PAX_RANDMMAP
22020 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22021 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
22022 +#endif
22023 +
22024 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
22025 mm->unmap_area = arch_unmap_area_topdown;
22026 }
22027 diff -urNp linux-2.6.32.43/arch/x86/mm/mmio-mod.c linux-2.6.32.43/arch/x86/mm/mmio-mod.c
22028 --- linux-2.6.32.43/arch/x86/mm/mmio-mod.c 2011-03-27 14:31:47.000000000 -0400
22029 +++ linux-2.6.32.43/arch/x86/mm/mmio-mod.c 2011-07-06 19:53:33.000000000 -0400
22030 @@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, s
22031 break;
22032 default:
22033 {
22034 - unsigned char *ip = (unsigned char *)instptr;
22035 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
22036 my_trace->opcode = MMIO_UNKNOWN_OP;
22037 my_trace->width = 0;
22038 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
22039 @@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p,
22040 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
22041 void __iomem *addr)
22042 {
22043 - static atomic_t next_id;
22044 + static atomic_unchecked_t next_id;
22045 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
22046 /* These are page-unaligned. */
22047 struct mmiotrace_map map = {
22048 @@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_
22049 .private = trace
22050 },
22051 .phys = offset,
22052 - .id = atomic_inc_return(&next_id)
22053 + .id = atomic_inc_return_unchecked(&next_id)
22054 };
22055 map.map_id = trace->id;
22056
22057 diff -urNp linux-2.6.32.43/arch/x86/mm/numa_32.c linux-2.6.32.43/arch/x86/mm/numa_32.c
22058 --- linux-2.6.32.43/arch/x86/mm/numa_32.c 2011-03-27 14:31:47.000000000 -0400
22059 +++ linux-2.6.32.43/arch/x86/mm/numa_32.c 2011-04-17 15:56:46.000000000 -0400
22060 @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
22061 }
22062 #endif
22063
22064 -extern unsigned long find_max_low_pfn(void);
22065 extern unsigned long highend_pfn, highstart_pfn;
22066
22067 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
22068 diff -urNp linux-2.6.32.43/arch/x86/mm/pageattr.c linux-2.6.32.43/arch/x86/mm/pageattr.c
22069 --- linux-2.6.32.43/arch/x86/mm/pageattr.c 2011-03-27 14:31:47.000000000 -0400
22070 +++ linux-2.6.32.43/arch/x86/mm/pageattr.c 2011-04-17 15:56:46.000000000 -0400
22071 @@ -261,16 +261,17 @@ static inline pgprot_t static_protection
22072 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
22073 */
22074 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
22075 - pgprot_val(forbidden) |= _PAGE_NX;
22076 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22077
22078 /*
22079 * The kernel text needs to be executable for obvious reasons
22080 * Does not cover __inittext since that is gone later on. On
22081 * 64bit we do not enforce !NX on the low mapping
22082 */
22083 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
22084 - pgprot_val(forbidden) |= _PAGE_NX;
22085 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
22086 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22087
22088 +#ifdef CONFIG_DEBUG_RODATA
22089 /*
22090 * The .rodata section needs to be read-only. Using the pfn
22091 * catches all aliases.
22092 @@ -278,6 +279,14 @@ static inline pgprot_t static_protection
22093 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
22094 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
22095 pgprot_val(forbidden) |= _PAGE_RW;
22096 +#endif
22097 +
22098 +#ifdef CONFIG_PAX_KERNEXEC
22099 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
22100 + pgprot_val(forbidden) |= _PAGE_RW;
22101 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22102 + }
22103 +#endif
22104
22105 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
22106
22107 @@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
22108 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
22109 {
22110 /* change init_mm */
22111 + pax_open_kernel();
22112 set_pte_atomic(kpte, pte);
22113 +
22114 #ifdef CONFIG_X86_32
22115 if (!SHARED_KERNEL_PMD) {
22116 +
22117 +#ifdef CONFIG_PAX_PER_CPU_PGD
22118 + unsigned long cpu;
22119 +#else
22120 struct page *page;
22121 +#endif
22122
22123 +#ifdef CONFIG_PAX_PER_CPU_PGD
22124 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22125 + pgd_t *pgd = get_cpu_pgd(cpu);
22126 +#else
22127 list_for_each_entry(page, &pgd_list, lru) {
22128 - pgd_t *pgd;
22129 + pgd_t *pgd = (pgd_t *)page_address(page);
22130 +#endif
22131 +
22132 pud_t *pud;
22133 pmd_t *pmd;
22134
22135 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
22136 + pgd += pgd_index(address);
22137 pud = pud_offset(pgd, address);
22138 pmd = pmd_offset(pud, address);
22139 set_pte_atomic((pte_t *)pmd, pte);
22140 }
22141 }
22142 #endif
22143 + pax_close_kernel();
22144 }
22145
22146 static int
22147 diff -urNp linux-2.6.32.43/arch/x86/mm/pageattr-test.c linux-2.6.32.43/arch/x86/mm/pageattr-test.c
22148 --- linux-2.6.32.43/arch/x86/mm/pageattr-test.c 2011-03-27 14:31:47.000000000 -0400
22149 +++ linux-2.6.32.43/arch/x86/mm/pageattr-test.c 2011-04-17 15:56:46.000000000 -0400
22150 @@ -36,7 +36,7 @@ enum {
22151
22152 static int pte_testbit(pte_t pte)
22153 {
22154 - return pte_flags(pte) & _PAGE_UNUSED1;
22155 + return pte_flags(pte) & _PAGE_CPA_TEST;
22156 }
22157
22158 struct split_state {
22159 diff -urNp linux-2.6.32.43/arch/x86/mm/pat.c linux-2.6.32.43/arch/x86/mm/pat.c
22160 --- linux-2.6.32.43/arch/x86/mm/pat.c 2011-03-27 14:31:47.000000000 -0400
22161 +++ linux-2.6.32.43/arch/x86/mm/pat.c 2011-04-17 15:56:46.000000000 -0400
22162 @@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct
22163
22164 conflict:
22165 printk(KERN_INFO "%s:%d conflicting memory types "
22166 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
22167 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
22168 new->end, cattr_name(new->type), cattr_name(entry->type));
22169 return -EBUSY;
22170 }
22171 @@ -559,7 +559,7 @@ unlock_ret:
22172
22173 if (err) {
22174 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
22175 - current->comm, current->pid, start, end);
22176 + current->comm, task_pid_nr(current), start, end);
22177 }
22178
22179 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
22180 @@ -689,8 +689,8 @@ static inline int range_is_allowed(unsig
22181 while (cursor < to) {
22182 if (!devmem_is_allowed(pfn)) {
22183 printk(KERN_INFO
22184 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
22185 - current->comm, from, to);
22186 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
22187 + current->comm, from, to, cursor);
22188 return 0;
22189 }
22190 cursor += PAGE_SIZE;
22191 @@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, un
22192 printk(KERN_INFO
22193 "%s:%d ioremap_change_attr failed %s "
22194 "for %Lx-%Lx\n",
22195 - current->comm, current->pid,
22196 + current->comm, task_pid_nr(current),
22197 cattr_name(flags),
22198 base, (unsigned long long)(base + size));
22199 return -EINVAL;
22200 @@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr,
22201 free_memtype(paddr, paddr + size);
22202 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
22203 " for %Lx-%Lx, got %s\n",
22204 - current->comm, current->pid,
22205 + current->comm, task_pid_nr(current),
22206 cattr_name(want_flags),
22207 (unsigned long long)paddr,
22208 (unsigned long long)(paddr + size),
22209 diff -urNp linux-2.6.32.43/arch/x86/mm/pf_in.c linux-2.6.32.43/arch/x86/mm/pf_in.c
22210 --- linux-2.6.32.43/arch/x86/mm/pf_in.c 2011-03-27 14:31:47.000000000 -0400
22211 +++ linux-2.6.32.43/arch/x86/mm/pf_in.c 2011-07-06 19:53:33.000000000 -0400
22212 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
22213 int i;
22214 enum reason_type rv = OTHERS;
22215
22216 - p = (unsigned char *)ins_addr;
22217 + p = (unsigned char *)ktla_ktva(ins_addr);
22218 p += skip_prefix(p, &prf);
22219 p += get_opcode(p, &opcode);
22220
22221 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
22222 struct prefix_bits prf;
22223 int i;
22224
22225 - p = (unsigned char *)ins_addr;
22226 + p = (unsigned char *)ktla_ktva(ins_addr);
22227 p += skip_prefix(p, &prf);
22228 p += get_opcode(p, &opcode);
22229
22230 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
22231 struct prefix_bits prf;
22232 int i;
22233
22234 - p = (unsigned char *)ins_addr;
22235 + p = (unsigned char *)ktla_ktva(ins_addr);
22236 p += skip_prefix(p, &prf);
22237 p += get_opcode(p, &opcode);
22238
22239 @@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned l
22240 int i;
22241 unsigned long rv;
22242
22243 - p = (unsigned char *)ins_addr;
22244 + p = (unsigned char *)ktla_ktva(ins_addr);
22245 p += skip_prefix(p, &prf);
22246 p += get_opcode(p, &opcode);
22247 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
22248 @@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned l
22249 int i;
22250 unsigned long rv;
22251
22252 - p = (unsigned char *)ins_addr;
22253 + p = (unsigned char *)ktla_ktva(ins_addr);
22254 p += skip_prefix(p, &prf);
22255 p += get_opcode(p, &opcode);
22256 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
22257 diff -urNp linux-2.6.32.43/arch/x86/mm/pgtable_32.c linux-2.6.32.43/arch/x86/mm/pgtable_32.c
22258 --- linux-2.6.32.43/arch/x86/mm/pgtable_32.c 2011-03-27 14:31:47.000000000 -0400
22259 +++ linux-2.6.32.43/arch/x86/mm/pgtable_32.c 2011-04-17 15:56:46.000000000 -0400
22260 @@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
22261 return;
22262 }
22263 pte = pte_offset_kernel(pmd, vaddr);
22264 +
22265 + pax_open_kernel();
22266 if (pte_val(pteval))
22267 set_pte_at(&init_mm, vaddr, pte, pteval);
22268 else
22269 pte_clear(&init_mm, vaddr, pte);
22270 + pax_close_kernel();
22271
22272 /*
22273 * It's enough to flush this one mapping.
22274 diff -urNp linux-2.6.32.43/arch/x86/mm/pgtable.c linux-2.6.32.43/arch/x86/mm/pgtable.c
22275 --- linux-2.6.32.43/arch/x86/mm/pgtable.c 2011-03-27 14:31:47.000000000 -0400
22276 +++ linux-2.6.32.43/arch/x86/mm/pgtable.c 2011-05-11 18:25:15.000000000 -0400
22277 @@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *p
22278 list_del(&page->lru);
22279 }
22280
22281 -#define UNSHARED_PTRS_PER_PGD \
22282 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22283 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22284 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
22285
22286 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22287 +{
22288 + while (count--)
22289 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
22290 +}
22291 +#endif
22292 +
22293 +#ifdef CONFIG_PAX_PER_CPU_PGD
22294 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22295 +{
22296 + while (count--)
22297 +
22298 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22299 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
22300 +#else
22301 + *dst++ = *src++;
22302 +#endif
22303 +
22304 +}
22305 +#endif
22306 +
22307 +#ifdef CONFIG_X86_64
22308 +#define pxd_t pud_t
22309 +#define pyd_t pgd_t
22310 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
22311 +#define pxd_free(mm, pud) pud_free((mm), (pud))
22312 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
22313 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
22314 +#define PYD_SIZE PGDIR_SIZE
22315 +#else
22316 +#define pxd_t pmd_t
22317 +#define pyd_t pud_t
22318 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
22319 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
22320 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
22321 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
22322 +#define PYD_SIZE PUD_SIZE
22323 +#endif
22324 +
22325 +#ifdef CONFIG_PAX_PER_CPU_PGD
22326 +static inline void pgd_ctor(pgd_t *pgd) {}
22327 +static inline void pgd_dtor(pgd_t *pgd) {}
22328 +#else
22329 static void pgd_ctor(pgd_t *pgd)
22330 {
22331 /* If the pgd points to a shared pagetable level (either the
22332 @@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
22333 pgd_list_del(pgd);
22334 spin_unlock_irqrestore(&pgd_lock, flags);
22335 }
22336 +#endif
22337
22338 /*
22339 * List of all pgd's needed for non-PAE so it can invalidate entries
22340 @@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
22341 * -- wli
22342 */
22343
22344 -#ifdef CONFIG_X86_PAE
22345 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22346 /*
22347 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
22348 * updating the top-level pagetable entries to guarantee the
22349 @@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
22350 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
22351 * and initialize the kernel pmds here.
22352 */
22353 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
22354 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22355
22356 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
22357 {
22358 @@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm,
22359 */
22360 flush_tlb_mm(mm);
22361 }
22362 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
22363 +#define PREALLOCATED_PXDS USER_PGD_PTRS
22364 #else /* !CONFIG_X86_PAE */
22365
22366 /* No need to prepopulate any pagetable entries in non-PAE modes. */
22367 -#define PREALLOCATED_PMDS 0
22368 +#define PREALLOCATED_PXDS 0
22369
22370 #endif /* CONFIG_X86_PAE */
22371
22372 -static void free_pmds(pmd_t *pmds[])
22373 +static void free_pxds(pxd_t *pxds[])
22374 {
22375 int i;
22376
22377 - for(i = 0; i < PREALLOCATED_PMDS; i++)
22378 - if (pmds[i])
22379 - free_page((unsigned long)pmds[i]);
22380 + for(i = 0; i < PREALLOCATED_PXDS; i++)
22381 + if (pxds[i])
22382 + free_page((unsigned long)pxds[i]);
22383 }
22384
22385 -static int preallocate_pmds(pmd_t *pmds[])
22386 +static int preallocate_pxds(pxd_t *pxds[])
22387 {
22388 int i;
22389 bool failed = false;
22390
22391 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
22392 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
22393 - if (pmd == NULL)
22394 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
22395 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
22396 + if (pxd == NULL)
22397 failed = true;
22398 - pmds[i] = pmd;
22399 + pxds[i] = pxd;
22400 }
22401
22402 if (failed) {
22403 - free_pmds(pmds);
22404 + free_pxds(pxds);
22405 return -ENOMEM;
22406 }
22407
22408 @@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[
22409 * preallocate which never got a corresponding vma will need to be
22410 * freed manually.
22411 */
22412 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
22413 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
22414 {
22415 int i;
22416
22417 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
22418 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
22419 pgd_t pgd = pgdp[i];
22420
22421 if (pgd_val(pgd) != 0) {
22422 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
22423 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
22424
22425 - pgdp[i] = native_make_pgd(0);
22426 + set_pgd(pgdp + i, native_make_pgd(0));
22427
22428 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
22429 - pmd_free(mm, pmd);
22430 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
22431 + pxd_free(mm, pxd);
22432 }
22433 }
22434 }
22435
22436 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
22437 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
22438 {
22439 - pud_t *pud;
22440 + pyd_t *pyd;
22441 unsigned long addr;
22442 int i;
22443
22444 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
22445 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
22446 return;
22447
22448 - pud = pud_offset(pgd, 0);
22449 +#ifdef CONFIG_X86_64
22450 + pyd = pyd_offset(mm, 0L);
22451 +#else
22452 + pyd = pyd_offset(pgd, 0L);
22453 +#endif
22454
22455 - for (addr = i = 0; i < PREALLOCATED_PMDS;
22456 - i++, pud++, addr += PUD_SIZE) {
22457 - pmd_t *pmd = pmds[i];
22458 + for (addr = i = 0; i < PREALLOCATED_PXDS;
22459 + i++, pyd++, addr += PYD_SIZE) {
22460 + pxd_t *pxd = pxds[i];
22461
22462 if (i >= KERNEL_PGD_BOUNDARY)
22463 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22464 - sizeof(pmd_t) * PTRS_PER_PMD);
22465 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22466 + sizeof(pxd_t) * PTRS_PER_PMD);
22467
22468 - pud_populate(mm, pud, pmd);
22469 + pyd_populate(mm, pyd, pxd);
22470 }
22471 }
22472
22473 pgd_t *pgd_alloc(struct mm_struct *mm)
22474 {
22475 pgd_t *pgd;
22476 - pmd_t *pmds[PREALLOCATED_PMDS];
22477 + pxd_t *pxds[PREALLOCATED_PXDS];
22478 +
22479 unsigned long flags;
22480
22481 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
22482 @@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22483
22484 mm->pgd = pgd;
22485
22486 - if (preallocate_pmds(pmds) != 0)
22487 + if (preallocate_pxds(pxds) != 0)
22488 goto out_free_pgd;
22489
22490 if (paravirt_pgd_alloc(mm) != 0)
22491 - goto out_free_pmds;
22492 + goto out_free_pxds;
22493
22494 /*
22495 * Make sure that pre-populating the pmds is atomic with
22496 @@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22497 spin_lock_irqsave(&pgd_lock, flags);
22498
22499 pgd_ctor(pgd);
22500 - pgd_prepopulate_pmd(mm, pgd, pmds);
22501 + pgd_prepopulate_pxd(mm, pgd, pxds);
22502
22503 spin_unlock_irqrestore(&pgd_lock, flags);
22504
22505 return pgd;
22506
22507 -out_free_pmds:
22508 - free_pmds(pmds);
22509 +out_free_pxds:
22510 + free_pxds(pxds);
22511 out_free_pgd:
22512 free_page((unsigned long)pgd);
22513 out:
22514 @@ -287,7 +338,7 @@ out:
22515
22516 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
22517 {
22518 - pgd_mop_up_pmds(mm, pgd);
22519 + pgd_mop_up_pxds(mm, pgd);
22520 pgd_dtor(pgd);
22521 paravirt_pgd_free(mm, pgd);
22522 free_page((unsigned long)pgd);
22523 diff -urNp linux-2.6.32.43/arch/x86/mm/setup_nx.c linux-2.6.32.43/arch/x86/mm/setup_nx.c
22524 --- linux-2.6.32.43/arch/x86/mm/setup_nx.c 2011-03-27 14:31:47.000000000 -0400
22525 +++ linux-2.6.32.43/arch/x86/mm/setup_nx.c 2011-04-17 15:56:46.000000000 -0400
22526 @@ -4,11 +4,10 @@
22527
22528 #include <asm/pgtable.h>
22529
22530 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22531 int nx_enabled;
22532
22533 -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22534 -static int disable_nx __cpuinitdata;
22535 -
22536 +#ifndef CONFIG_PAX_PAGEEXEC
22537 /*
22538 * noexec = on|off
22539 *
22540 @@ -22,32 +21,26 @@ static int __init noexec_setup(char *str
22541 if (!str)
22542 return -EINVAL;
22543 if (!strncmp(str, "on", 2)) {
22544 - __supported_pte_mask |= _PAGE_NX;
22545 - disable_nx = 0;
22546 + nx_enabled = 1;
22547 } else if (!strncmp(str, "off", 3)) {
22548 - disable_nx = 1;
22549 - __supported_pte_mask &= ~_PAGE_NX;
22550 + nx_enabled = 0;
22551 }
22552 return 0;
22553 }
22554 early_param("noexec", noexec_setup);
22555 #endif
22556 +#endif
22557
22558 #ifdef CONFIG_X86_PAE
22559 void __init set_nx(void)
22560 {
22561 - unsigned int v[4], l, h;
22562 + if (!nx_enabled && cpu_has_nx) {
22563 + unsigned l, h;
22564
22565 - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
22566 - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
22567 -
22568 - if ((v[3] & (1 << 20)) && !disable_nx) {
22569 - rdmsr(MSR_EFER, l, h);
22570 - l |= EFER_NX;
22571 - wrmsr(MSR_EFER, l, h);
22572 - nx_enabled = 1;
22573 - __supported_pte_mask |= _PAGE_NX;
22574 - }
22575 + __supported_pte_mask &= ~_PAGE_NX;
22576 + rdmsr(MSR_EFER, l, h);
22577 + l &= ~EFER_NX;
22578 + wrmsr(MSR_EFER, l, h);
22579 }
22580 }
22581 #else
22582 @@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
22583 unsigned long efer;
22584
22585 rdmsrl(MSR_EFER, efer);
22586 - if (!(efer & EFER_NX) || disable_nx)
22587 + if (!(efer & EFER_NX) || !nx_enabled)
22588 __supported_pte_mask &= ~_PAGE_NX;
22589 }
22590 #endif
22591 diff -urNp linux-2.6.32.43/arch/x86/mm/tlb.c linux-2.6.32.43/arch/x86/mm/tlb.c
22592 --- linux-2.6.32.43/arch/x86/mm/tlb.c 2011-03-27 14:31:47.000000000 -0400
22593 +++ linux-2.6.32.43/arch/x86/mm/tlb.c 2011-04-23 12:56:10.000000000 -0400
22594 @@ -61,7 +61,11 @@ void leave_mm(int cpu)
22595 BUG();
22596 cpumask_clear_cpu(cpu,
22597 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
22598 +
22599 +#ifndef CONFIG_PAX_PER_CPU_PGD
22600 load_cr3(swapper_pg_dir);
22601 +#endif
22602 +
22603 }
22604 EXPORT_SYMBOL_GPL(leave_mm);
22605
22606 diff -urNp linux-2.6.32.43/arch/x86/oprofile/backtrace.c linux-2.6.32.43/arch/x86/oprofile/backtrace.c
22607 --- linux-2.6.32.43/arch/x86/oprofile/backtrace.c 2011-03-27 14:31:47.000000000 -0400
22608 +++ linux-2.6.32.43/arch/x86/oprofile/backtrace.c 2011-04-17 15:56:46.000000000 -0400
22609 @@ -57,7 +57,7 @@ static struct frame_head *dump_user_back
22610 struct frame_head bufhead[2];
22611
22612 /* Also check accessibility of one struct frame_head beyond */
22613 - if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
22614 + if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
22615 return NULL;
22616 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
22617 return NULL;
22618 @@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
22619 {
22620 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
22621
22622 - if (!user_mode_vm(regs)) {
22623 + if (!user_mode(regs)) {
22624 unsigned long stack = kernel_stack_pointer(regs);
22625 if (depth)
22626 dump_trace(NULL, regs, (unsigned long *)stack, 0,
22627 diff -urNp linux-2.6.32.43/arch/x86/oprofile/op_model_p4.c linux-2.6.32.43/arch/x86/oprofile/op_model_p4.c
22628 --- linux-2.6.32.43/arch/x86/oprofile/op_model_p4.c 2011-03-27 14:31:47.000000000 -0400
22629 +++ linux-2.6.32.43/arch/x86/oprofile/op_model_p4.c 2011-04-17 15:56:46.000000000 -0400
22630 @@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
22631 #endif
22632 }
22633
22634 -static int inline addr_increment(void)
22635 +static inline int addr_increment(void)
22636 {
22637 #ifdef CONFIG_SMP
22638 return smp_num_siblings == 2 ? 2 : 1;
22639 diff -urNp linux-2.6.32.43/arch/x86/pci/common.c linux-2.6.32.43/arch/x86/pci/common.c
22640 --- linux-2.6.32.43/arch/x86/pci/common.c 2011-03-27 14:31:47.000000000 -0400
22641 +++ linux-2.6.32.43/arch/x86/pci/common.c 2011-04-23 12:56:10.000000000 -0400
22642 @@ -31,8 +31,8 @@ int noioapicreroute = 1;
22643 int pcibios_last_bus = -1;
22644 unsigned long pirq_table_addr;
22645 struct pci_bus *pci_root_bus;
22646 -struct pci_raw_ops *raw_pci_ops;
22647 -struct pci_raw_ops *raw_pci_ext_ops;
22648 +const struct pci_raw_ops *raw_pci_ops;
22649 +const struct pci_raw_ops *raw_pci_ext_ops;
22650
22651 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
22652 int reg, int len, u32 *val)
22653 diff -urNp linux-2.6.32.43/arch/x86/pci/direct.c linux-2.6.32.43/arch/x86/pci/direct.c
22654 --- linux-2.6.32.43/arch/x86/pci/direct.c 2011-03-27 14:31:47.000000000 -0400
22655 +++ linux-2.6.32.43/arch/x86/pci/direct.c 2011-04-17 15:56:46.000000000 -0400
22656 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
22657
22658 #undef PCI_CONF1_ADDRESS
22659
22660 -struct pci_raw_ops pci_direct_conf1 = {
22661 +const struct pci_raw_ops pci_direct_conf1 = {
22662 .read = pci_conf1_read,
22663 .write = pci_conf1_write,
22664 };
22665 @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
22666
22667 #undef PCI_CONF2_ADDRESS
22668
22669 -struct pci_raw_ops pci_direct_conf2 = {
22670 +const struct pci_raw_ops pci_direct_conf2 = {
22671 .read = pci_conf2_read,
22672 .write = pci_conf2_write,
22673 };
22674 @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
22675 * This should be close to trivial, but it isn't, because there are buggy
22676 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
22677 */
22678 -static int __init pci_sanity_check(struct pci_raw_ops *o)
22679 +static int __init pci_sanity_check(const struct pci_raw_ops *o)
22680 {
22681 u32 x = 0;
22682 int year, devfn;
22683 diff -urNp linux-2.6.32.43/arch/x86/pci/mmconfig_32.c linux-2.6.32.43/arch/x86/pci/mmconfig_32.c
22684 --- linux-2.6.32.43/arch/x86/pci/mmconfig_32.c 2011-03-27 14:31:47.000000000 -0400
22685 +++ linux-2.6.32.43/arch/x86/pci/mmconfig_32.c 2011-04-17 15:56:46.000000000 -0400
22686 @@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int
22687 return 0;
22688 }
22689
22690 -static struct pci_raw_ops pci_mmcfg = {
22691 +static const struct pci_raw_ops pci_mmcfg = {
22692 .read = pci_mmcfg_read,
22693 .write = pci_mmcfg_write,
22694 };
22695 diff -urNp linux-2.6.32.43/arch/x86/pci/mmconfig_64.c linux-2.6.32.43/arch/x86/pci/mmconfig_64.c
22696 --- linux-2.6.32.43/arch/x86/pci/mmconfig_64.c 2011-03-27 14:31:47.000000000 -0400
22697 +++ linux-2.6.32.43/arch/x86/pci/mmconfig_64.c 2011-04-17 15:56:46.000000000 -0400
22698 @@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int
22699 return 0;
22700 }
22701
22702 -static struct pci_raw_ops pci_mmcfg = {
22703 +static const struct pci_raw_ops pci_mmcfg = {
22704 .read = pci_mmcfg_read,
22705 .write = pci_mmcfg_write,
22706 };
22707 diff -urNp linux-2.6.32.43/arch/x86/pci/numaq_32.c linux-2.6.32.43/arch/x86/pci/numaq_32.c
22708 --- linux-2.6.32.43/arch/x86/pci/numaq_32.c 2011-03-27 14:31:47.000000000 -0400
22709 +++ linux-2.6.32.43/arch/x86/pci/numaq_32.c 2011-04-17 15:56:46.000000000 -0400
22710 @@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned i
22711
22712 #undef PCI_CONF1_MQ_ADDRESS
22713
22714 -static struct pci_raw_ops pci_direct_conf1_mq = {
22715 +static const struct pci_raw_ops pci_direct_conf1_mq = {
22716 .read = pci_conf1_mq_read,
22717 .write = pci_conf1_mq_write
22718 };
22719 diff -urNp linux-2.6.32.43/arch/x86/pci/olpc.c linux-2.6.32.43/arch/x86/pci/olpc.c
22720 --- linux-2.6.32.43/arch/x86/pci/olpc.c 2011-03-27 14:31:47.000000000 -0400
22721 +++ linux-2.6.32.43/arch/x86/pci/olpc.c 2011-04-17 15:56:46.000000000 -0400
22722 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
22723 return 0;
22724 }
22725
22726 -static struct pci_raw_ops pci_olpc_conf = {
22727 +static const struct pci_raw_ops pci_olpc_conf = {
22728 .read = pci_olpc_read,
22729 .write = pci_olpc_write,
22730 };
22731 diff -urNp linux-2.6.32.43/arch/x86/pci/pcbios.c linux-2.6.32.43/arch/x86/pci/pcbios.c
22732 --- linux-2.6.32.43/arch/x86/pci/pcbios.c 2011-03-27 14:31:47.000000000 -0400
22733 +++ linux-2.6.32.43/arch/x86/pci/pcbios.c 2011-04-17 15:56:46.000000000 -0400
22734 @@ -56,50 +56,93 @@ union bios32 {
22735 static struct {
22736 unsigned long address;
22737 unsigned short segment;
22738 -} bios32_indirect = { 0, __KERNEL_CS };
22739 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
22740
22741 /*
22742 * Returns the entry point for the given service, NULL on error
22743 */
22744
22745 -static unsigned long bios32_service(unsigned long service)
22746 +static unsigned long __devinit bios32_service(unsigned long service)
22747 {
22748 unsigned char return_code; /* %al */
22749 unsigned long address; /* %ebx */
22750 unsigned long length; /* %ecx */
22751 unsigned long entry; /* %edx */
22752 unsigned long flags;
22753 + struct desc_struct d, *gdt;
22754
22755 local_irq_save(flags);
22756 - __asm__("lcall *(%%edi); cld"
22757 +
22758 + gdt = get_cpu_gdt_table(smp_processor_id());
22759 +
22760 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
22761 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22762 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
22763 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22764 +
22765 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
22766 : "=a" (return_code),
22767 "=b" (address),
22768 "=c" (length),
22769 "=d" (entry)
22770 : "0" (service),
22771 "1" (0),
22772 - "D" (&bios32_indirect));
22773 + "D" (&bios32_indirect),
22774 + "r"(__PCIBIOS_DS)
22775 + : "memory");
22776 +
22777 + pax_open_kernel();
22778 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
22779 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
22780 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
22781 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
22782 + pax_close_kernel();
22783 +
22784 local_irq_restore(flags);
22785
22786 switch (return_code) {
22787 - case 0:
22788 - return address + entry;
22789 - case 0x80: /* Not present */
22790 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22791 - return 0;
22792 - default: /* Shouldn't happen */
22793 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22794 - service, return_code);
22795 + case 0: {
22796 + int cpu;
22797 + unsigned char flags;
22798 +
22799 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
22800 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
22801 + printk(KERN_WARNING "bios32_service: not valid\n");
22802 return 0;
22803 + }
22804 + address = address + PAGE_OFFSET;
22805 + length += 16UL; /* some BIOSs underreport this... */
22806 + flags = 4;
22807 + if (length >= 64*1024*1024) {
22808 + length >>= PAGE_SHIFT;
22809 + flags |= 8;
22810 + }
22811 +
22812 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
22813 + gdt = get_cpu_gdt_table(cpu);
22814 + pack_descriptor(&d, address, length, 0x9b, flags);
22815 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22816 + pack_descriptor(&d, address, length, 0x93, flags);
22817 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22818 + }
22819 + return entry;
22820 + }
22821 + case 0x80: /* Not present */
22822 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22823 + return 0;
22824 + default: /* Shouldn't happen */
22825 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22826 + service, return_code);
22827 + return 0;
22828 }
22829 }
22830
22831 static struct {
22832 unsigned long address;
22833 unsigned short segment;
22834 -} pci_indirect = { 0, __KERNEL_CS };
22835 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
22836
22837 -static int pci_bios_present;
22838 +static int pci_bios_present __read_only;
22839
22840 static int __devinit check_pcibios(void)
22841 {
22842 @@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
22843 unsigned long flags, pcibios_entry;
22844
22845 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
22846 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
22847 + pci_indirect.address = pcibios_entry;
22848
22849 local_irq_save(flags);
22850 - __asm__(
22851 - "lcall *(%%edi); cld\n\t"
22852 + __asm__("movw %w6, %%ds\n\t"
22853 + "lcall *%%ss:(%%edi); cld\n\t"
22854 + "push %%ss\n\t"
22855 + "pop %%ds\n\t"
22856 "jc 1f\n\t"
22857 "xor %%ah, %%ah\n"
22858 "1:"
22859 @@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
22860 "=b" (ebx),
22861 "=c" (ecx)
22862 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
22863 - "D" (&pci_indirect)
22864 + "D" (&pci_indirect),
22865 + "r" (__PCIBIOS_DS)
22866 : "memory");
22867 local_irq_restore(flags);
22868
22869 @@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int se
22870
22871 switch (len) {
22872 case 1:
22873 - __asm__("lcall *(%%esi); cld\n\t"
22874 + __asm__("movw %w6, %%ds\n\t"
22875 + "lcall *%%ss:(%%esi); cld\n\t"
22876 + "push %%ss\n\t"
22877 + "pop %%ds\n\t"
22878 "jc 1f\n\t"
22879 "xor %%ah, %%ah\n"
22880 "1:"
22881 @@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int se
22882 : "1" (PCIBIOS_READ_CONFIG_BYTE),
22883 "b" (bx),
22884 "D" ((long)reg),
22885 - "S" (&pci_indirect));
22886 + "S" (&pci_indirect),
22887 + "r" (__PCIBIOS_DS));
22888 /*
22889 * Zero-extend the result beyond 8 bits, do not trust the
22890 * BIOS having done it:
22891 @@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int se
22892 *value &= 0xff;
22893 break;
22894 case 2:
22895 - __asm__("lcall *(%%esi); cld\n\t"
22896 + __asm__("movw %w6, %%ds\n\t"
22897 + "lcall *%%ss:(%%esi); cld\n\t"
22898 + "push %%ss\n\t"
22899 + "pop %%ds\n\t"
22900 "jc 1f\n\t"
22901 "xor %%ah, %%ah\n"
22902 "1:"
22903 @@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int se
22904 : "1" (PCIBIOS_READ_CONFIG_WORD),
22905 "b" (bx),
22906 "D" ((long)reg),
22907 - "S" (&pci_indirect));
22908 + "S" (&pci_indirect),
22909 + "r" (__PCIBIOS_DS));
22910 /*
22911 * Zero-extend the result beyond 16 bits, do not trust the
22912 * BIOS having done it:
22913 @@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int se
22914 *value &= 0xffff;
22915 break;
22916 case 4:
22917 - __asm__("lcall *(%%esi); cld\n\t"
22918 + __asm__("movw %w6, %%ds\n\t"
22919 + "lcall *%%ss:(%%esi); cld\n\t"
22920 + "push %%ss\n\t"
22921 + "pop %%ds\n\t"
22922 "jc 1f\n\t"
22923 "xor %%ah, %%ah\n"
22924 "1:"
22925 @@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int se
22926 : "1" (PCIBIOS_READ_CONFIG_DWORD),
22927 "b" (bx),
22928 "D" ((long)reg),
22929 - "S" (&pci_indirect));
22930 + "S" (&pci_indirect),
22931 + "r" (__PCIBIOS_DS));
22932 break;
22933 }
22934
22935 @@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int s
22936
22937 switch (len) {
22938 case 1:
22939 - __asm__("lcall *(%%esi); cld\n\t"
22940 + __asm__("movw %w6, %%ds\n\t"
22941 + "lcall *%%ss:(%%esi); cld\n\t"
22942 + "push %%ss\n\t"
22943 + "pop %%ds\n\t"
22944 "jc 1f\n\t"
22945 "xor %%ah, %%ah\n"
22946 "1:"
22947 @@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int s
22948 "c" (value),
22949 "b" (bx),
22950 "D" ((long)reg),
22951 - "S" (&pci_indirect));
22952 + "S" (&pci_indirect),
22953 + "r" (__PCIBIOS_DS));
22954 break;
22955 case 2:
22956 - __asm__("lcall *(%%esi); cld\n\t"
22957 + __asm__("movw %w6, %%ds\n\t"
22958 + "lcall *%%ss:(%%esi); cld\n\t"
22959 + "push %%ss\n\t"
22960 + "pop %%ds\n\t"
22961 "jc 1f\n\t"
22962 "xor %%ah, %%ah\n"
22963 "1:"
22964 @@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int s
22965 "c" (value),
22966 "b" (bx),
22967 "D" ((long)reg),
22968 - "S" (&pci_indirect));
22969 + "S" (&pci_indirect),
22970 + "r" (__PCIBIOS_DS));
22971 break;
22972 case 4:
22973 - __asm__("lcall *(%%esi); cld\n\t"
22974 + __asm__("movw %w6, %%ds\n\t"
22975 + "lcall *%%ss:(%%esi); cld\n\t"
22976 + "push %%ss\n\t"
22977 + "pop %%ds\n\t"
22978 "jc 1f\n\t"
22979 "xor %%ah, %%ah\n"
22980 "1:"
22981 @@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int s
22982 "c" (value),
22983 "b" (bx),
22984 "D" ((long)reg),
22985 - "S" (&pci_indirect));
22986 + "S" (&pci_indirect),
22987 + "r" (__PCIBIOS_DS));
22988 break;
22989 }
22990
22991 @@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int s
22992 * Function table for BIOS32 access
22993 */
22994
22995 -static struct pci_raw_ops pci_bios_access = {
22996 +static const struct pci_raw_ops pci_bios_access = {
22997 .read = pci_bios_read,
22998 .write = pci_bios_write
22999 };
23000 @@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_acces
23001 * Try to find PCI BIOS.
23002 */
23003
23004 -static struct pci_raw_ops * __devinit pci_find_bios(void)
23005 +static const struct pci_raw_ops * __devinit pci_find_bios(void)
23006 {
23007 union bios32 *check;
23008 unsigned char sum;
23009 @@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_i
23010
23011 DBG("PCI: Fetching IRQ routing table... ");
23012 __asm__("push %%es\n\t"
23013 + "movw %w8, %%ds\n\t"
23014 "push %%ds\n\t"
23015 "pop %%es\n\t"
23016 - "lcall *(%%esi); cld\n\t"
23017 + "lcall *%%ss:(%%esi); cld\n\t"
23018 "pop %%es\n\t"
23019 + "push %%ss\n\t"
23020 + "pop %%ds\n"
23021 "jc 1f\n\t"
23022 "xor %%ah, %%ah\n"
23023 "1:"
23024 @@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_i
23025 "1" (0),
23026 "D" ((long) &opt),
23027 "S" (&pci_indirect),
23028 - "m" (opt)
23029 + "m" (opt),
23030 + "r" (__PCIBIOS_DS)
23031 : "memory");
23032 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
23033 if (ret & 0xff00)
23034 @@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_d
23035 {
23036 int ret;
23037
23038 - __asm__("lcall *(%%esi); cld\n\t"
23039 + __asm__("movw %w5, %%ds\n\t"
23040 + "lcall *%%ss:(%%esi); cld\n\t"
23041 + "push %%ss\n\t"
23042 + "pop %%ds\n"
23043 "jc 1f\n\t"
23044 "xor %%ah, %%ah\n"
23045 "1:"
23046 @@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_d
23047 : "0" (PCIBIOS_SET_PCI_HW_INT),
23048 "b" ((dev->bus->number << 8) | dev->devfn),
23049 "c" ((irq << 8) | (pin + 10)),
23050 - "S" (&pci_indirect));
23051 + "S" (&pci_indirect),
23052 + "r" (__PCIBIOS_DS));
23053 return !(ret & 0xff00);
23054 }
23055 EXPORT_SYMBOL(pcibios_set_irq_routing);
23056 diff -urNp linux-2.6.32.43/arch/x86/power/cpu.c linux-2.6.32.43/arch/x86/power/cpu.c
23057 --- linux-2.6.32.43/arch/x86/power/cpu.c 2011-03-27 14:31:47.000000000 -0400
23058 +++ linux-2.6.32.43/arch/x86/power/cpu.c 2011-04-17 15:56:46.000000000 -0400
23059 @@ -129,7 +129,7 @@ static void do_fpu_end(void)
23060 static void fix_processor_context(void)
23061 {
23062 int cpu = smp_processor_id();
23063 - struct tss_struct *t = &per_cpu(init_tss, cpu);
23064 + struct tss_struct *t = init_tss + cpu;
23065
23066 set_tss_desc(cpu, t); /*
23067 * This just modifies memory; should not be
23068 @@ -139,7 +139,9 @@ static void fix_processor_context(void)
23069 */
23070
23071 #ifdef CONFIG_X86_64
23072 + pax_open_kernel();
23073 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
23074 + pax_close_kernel();
23075
23076 syscall_init(); /* This sets MSR_*STAR and related */
23077 #endif
23078 diff -urNp linux-2.6.32.43/arch/x86/vdso/Makefile linux-2.6.32.43/arch/x86/vdso/Makefile
23079 --- linux-2.6.32.43/arch/x86/vdso/Makefile 2011-03-27 14:31:47.000000000 -0400
23080 +++ linux-2.6.32.43/arch/x86/vdso/Makefile 2011-04-17 15:56:46.000000000 -0400
23081 @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
23082 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
23083 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
23084
23085 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23086 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23087 GCOV_PROFILE := n
23088
23089 #
23090 diff -urNp linux-2.6.32.43/arch/x86/vdso/vclock_gettime.c linux-2.6.32.43/arch/x86/vdso/vclock_gettime.c
23091 --- linux-2.6.32.43/arch/x86/vdso/vclock_gettime.c 2011-03-27 14:31:47.000000000 -0400
23092 +++ linux-2.6.32.43/arch/x86/vdso/vclock_gettime.c 2011-04-17 15:56:46.000000000 -0400
23093 @@ -22,24 +22,48 @@
23094 #include <asm/hpet.h>
23095 #include <asm/unistd.h>
23096 #include <asm/io.h>
23097 +#include <asm/fixmap.h>
23098 #include "vextern.h"
23099
23100 #define gtod vdso_vsyscall_gtod_data
23101
23102 +notrace noinline long __vdso_fallback_time(long *t)
23103 +{
23104 + long secs;
23105 + asm volatile("syscall"
23106 + : "=a" (secs)
23107 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
23108 + return secs;
23109 +}
23110 +
23111 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
23112 {
23113 long ret;
23114 asm("syscall" : "=a" (ret) :
23115 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
23116 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
23117 return ret;
23118 }
23119
23120 +notrace static inline cycle_t __vdso_vread_hpet(void)
23121 +{
23122 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
23123 +}
23124 +
23125 +notrace static inline cycle_t __vdso_vread_tsc(void)
23126 +{
23127 + cycle_t ret = (cycle_t)vget_cycles();
23128 +
23129 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
23130 +}
23131 +
23132 notrace static inline long vgetns(void)
23133 {
23134 long v;
23135 - cycles_t (*vread)(void);
23136 - vread = gtod->clock.vread;
23137 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
23138 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
23139 + v = __vdso_vread_tsc();
23140 + else
23141 + v = __vdso_vread_hpet();
23142 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
23143 return (v * gtod->clock.mult) >> gtod->clock.shift;
23144 }
23145
23146 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
23147
23148 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
23149 {
23150 - if (likely(gtod->sysctl_enabled))
23151 + if (likely(gtod->sysctl_enabled &&
23152 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23153 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23154 switch (clock) {
23155 case CLOCK_REALTIME:
23156 if (likely(gtod->clock.vread))
23157 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
23158 int clock_gettime(clockid_t, struct timespec *)
23159 __attribute__((weak, alias("__vdso_clock_gettime")));
23160
23161 -notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23162 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
23163 {
23164 long ret;
23165 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
23166 + asm("syscall" : "=a" (ret) :
23167 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
23168 + return ret;
23169 +}
23170 +
23171 +notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23172 +{
23173 + if (likely(gtod->sysctl_enabled &&
23174 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23175 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23176 + {
23177 if (likely(tv != NULL)) {
23178 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
23179 offsetof(struct timespec, tv_nsec) ||
23180 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
23181 }
23182 return 0;
23183 }
23184 - asm("syscall" : "=a" (ret) :
23185 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
23186 - return ret;
23187 + return __vdso_fallback_gettimeofday(tv, tz);
23188 }
23189 int gettimeofday(struct timeval *, struct timezone *)
23190 __attribute__((weak, alias("__vdso_gettimeofday")));
23191 diff -urNp linux-2.6.32.43/arch/x86/vdso/vdso32-setup.c linux-2.6.32.43/arch/x86/vdso/vdso32-setup.c
23192 --- linux-2.6.32.43/arch/x86/vdso/vdso32-setup.c 2011-03-27 14:31:47.000000000 -0400
23193 +++ linux-2.6.32.43/arch/x86/vdso/vdso32-setup.c 2011-04-23 12:56:10.000000000 -0400
23194 @@ -25,6 +25,7 @@
23195 #include <asm/tlbflush.h>
23196 #include <asm/vdso.h>
23197 #include <asm/proto.h>
23198 +#include <asm/mman.h>
23199
23200 enum {
23201 VDSO_DISABLED = 0,
23202 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
23203 void enable_sep_cpu(void)
23204 {
23205 int cpu = get_cpu();
23206 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
23207 + struct tss_struct *tss = init_tss + cpu;
23208
23209 if (!boot_cpu_has(X86_FEATURE_SEP)) {
23210 put_cpu();
23211 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
23212 gate_vma.vm_start = FIXADDR_USER_START;
23213 gate_vma.vm_end = FIXADDR_USER_END;
23214 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
23215 - gate_vma.vm_page_prot = __P101;
23216 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
23217 /*
23218 * Make sure the vDSO gets into every core dump.
23219 * Dumping its contents makes post-mortem fully interpretable later
23220 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
23221 if (compat)
23222 addr = VDSO_HIGH_BASE;
23223 else {
23224 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
23225 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
23226 if (IS_ERR_VALUE(addr)) {
23227 ret = addr;
23228 goto up_fail;
23229 }
23230 }
23231
23232 - current->mm->context.vdso = (void *)addr;
23233 + current->mm->context.vdso = addr;
23234
23235 if (compat_uses_vma || !compat) {
23236 /*
23237 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
23238 }
23239
23240 current_thread_info()->sysenter_return =
23241 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23242 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23243
23244 up_fail:
23245 if (ret)
23246 - current->mm->context.vdso = NULL;
23247 + current->mm->context.vdso = 0;
23248
23249 up_write(&mm->mmap_sem);
23250
23251 @@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
23252
23253 const char *arch_vma_name(struct vm_area_struct *vma)
23254 {
23255 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
23256 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
23257 return "[vdso]";
23258 +
23259 +#ifdef CONFIG_PAX_SEGMEXEC
23260 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
23261 + return "[vdso]";
23262 +#endif
23263 +
23264 return NULL;
23265 }
23266
23267 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
23268 struct mm_struct *mm = tsk->mm;
23269
23270 /* Check to see if this task was created in compat vdso mode */
23271 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
23272 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
23273 return &gate_vma;
23274 return NULL;
23275 }
23276 diff -urNp linux-2.6.32.43/arch/x86/vdso/vdso.lds.S linux-2.6.32.43/arch/x86/vdso/vdso.lds.S
23277 --- linux-2.6.32.43/arch/x86/vdso/vdso.lds.S 2011-03-27 14:31:47.000000000 -0400
23278 +++ linux-2.6.32.43/arch/x86/vdso/vdso.lds.S 2011-06-06 17:35:35.000000000 -0400
23279 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
23280 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
23281 #include "vextern.h"
23282 #undef VEXTERN
23283 +
23284 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
23285 +VEXTERN(fallback_gettimeofday)
23286 +VEXTERN(fallback_time)
23287 +VEXTERN(getcpu)
23288 +#undef VEXTERN
23289 diff -urNp linux-2.6.32.43/arch/x86/vdso/vextern.h linux-2.6.32.43/arch/x86/vdso/vextern.h
23290 --- linux-2.6.32.43/arch/x86/vdso/vextern.h 2011-03-27 14:31:47.000000000 -0400
23291 +++ linux-2.6.32.43/arch/x86/vdso/vextern.h 2011-04-17 15:56:46.000000000 -0400
23292 @@ -11,6 +11,5 @@
23293 put into vextern.h and be referenced as a pointer with vdso prefix.
23294 The main kernel later fills in the values. */
23295
23296 -VEXTERN(jiffies)
23297 VEXTERN(vgetcpu_mode)
23298 VEXTERN(vsyscall_gtod_data)
23299 diff -urNp linux-2.6.32.43/arch/x86/vdso/vma.c linux-2.6.32.43/arch/x86/vdso/vma.c
23300 --- linux-2.6.32.43/arch/x86/vdso/vma.c 2011-03-27 14:31:47.000000000 -0400
23301 +++ linux-2.6.32.43/arch/x86/vdso/vma.c 2011-04-17 15:56:46.000000000 -0400
23302 @@ -57,7 +57,7 @@ static int __init init_vdso_vars(void)
23303 if (!vbase)
23304 goto oom;
23305
23306 - if (memcmp(vbase, "\177ELF", 4)) {
23307 + if (memcmp(vbase, ELFMAG, SELFMAG)) {
23308 printk("VDSO: I'm broken; not ELF\n");
23309 vdso_enabled = 0;
23310 }
23311 @@ -66,6 +66,7 @@ static int __init init_vdso_vars(void)
23312 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
23313 #include "vextern.h"
23314 #undef VEXTERN
23315 + vunmap(vbase);
23316 return 0;
23317
23318 oom:
23319 @@ -116,7 +117,7 @@ int arch_setup_additional_pages(struct l
23320 goto up_fail;
23321 }
23322
23323 - current->mm->context.vdso = (void *)addr;
23324 + current->mm->context.vdso = addr;
23325
23326 ret = install_special_mapping(mm, addr, vdso_size,
23327 VM_READ|VM_EXEC|
23328 @@ -124,7 +125,7 @@ int arch_setup_additional_pages(struct l
23329 VM_ALWAYSDUMP,
23330 vdso_pages);
23331 if (ret) {
23332 - current->mm->context.vdso = NULL;
23333 + current->mm->context.vdso = 0;
23334 goto up_fail;
23335 }
23336
23337 @@ -132,10 +133,3 @@ up_fail:
23338 up_write(&mm->mmap_sem);
23339 return ret;
23340 }
23341 -
23342 -static __init int vdso_setup(char *s)
23343 -{
23344 - vdso_enabled = simple_strtoul(s, NULL, 0);
23345 - return 0;
23346 -}
23347 -__setup("vdso=", vdso_setup);
23348 diff -urNp linux-2.6.32.43/arch/x86/xen/enlighten.c linux-2.6.32.43/arch/x86/xen/enlighten.c
23349 --- linux-2.6.32.43/arch/x86/xen/enlighten.c 2011-03-27 14:31:47.000000000 -0400
23350 +++ linux-2.6.32.43/arch/x86/xen/enlighten.c 2011-05-22 23:02:03.000000000 -0400
23351 @@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
23352
23353 struct shared_info xen_dummy_shared_info;
23354
23355 -void *xen_initial_gdt;
23356 -
23357 /*
23358 * Point at some empty memory to start with. We map the real shared_info
23359 * page as soon as fixmap is up and running.
23360 @@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_des
23361
23362 preempt_disable();
23363
23364 - start = __get_cpu_var(idt_desc).address;
23365 + start = (unsigned long)__get_cpu_var(idt_desc).address;
23366 end = start + __get_cpu_var(idt_desc).size + 1;
23367
23368 xen_mc_flush();
23369 @@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic
23370 #endif
23371 };
23372
23373 -static void xen_reboot(int reason)
23374 +static __noreturn void xen_reboot(int reason)
23375 {
23376 struct sched_shutdown r = { .reason = reason };
23377
23378 @@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
23379 BUG();
23380 }
23381
23382 -static void xen_restart(char *msg)
23383 +static __noreturn void xen_restart(char *msg)
23384 {
23385 xen_reboot(SHUTDOWN_reboot);
23386 }
23387
23388 -static void xen_emergency_restart(void)
23389 +static __noreturn void xen_emergency_restart(void)
23390 {
23391 xen_reboot(SHUTDOWN_reboot);
23392 }
23393
23394 -static void xen_machine_halt(void)
23395 +static __noreturn void xen_machine_halt(void)
23396 {
23397 xen_reboot(SHUTDOWN_poweroff);
23398 }
23399 @@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(
23400 */
23401 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
23402
23403 -#ifdef CONFIG_X86_64
23404 /* Work out if we support NX */
23405 - check_efer();
23406 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23407 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
23408 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
23409 + unsigned l, h;
23410 +
23411 +#ifdef CONFIG_X86_PAE
23412 + nx_enabled = 1;
23413 +#endif
23414 + __supported_pte_mask |= _PAGE_NX;
23415 + rdmsr(MSR_EFER, l, h);
23416 + l |= EFER_NX;
23417 + wrmsr(MSR_EFER, l, h);
23418 + }
23419 #endif
23420
23421 xen_setup_features();
23422 @@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(
23423
23424 machine_ops = xen_machine_ops;
23425
23426 - /*
23427 - * The only reliable way to retain the initial address of the
23428 - * percpu gdt_page is to remember it here, so we can go and
23429 - * mark it RW later, when the initial percpu area is freed.
23430 - */
23431 - xen_initial_gdt = &per_cpu(gdt_page, 0);
23432 -
23433 xen_smp_init();
23434
23435 pgd = (pgd_t *)xen_start_info->pt_base;
23436 diff -urNp linux-2.6.32.43/arch/x86/xen/mmu.c linux-2.6.32.43/arch/x86/xen/mmu.c
23437 --- linux-2.6.32.43/arch/x86/xen/mmu.c 2011-07-13 17:23:04.000000000 -0400
23438 +++ linux-2.6.32.43/arch/x86/xen/mmu.c 2011-07-13 17:23:18.000000000 -0400
23439 @@ -1719,6 +1719,8 @@ __init pgd_t *xen_setup_kernel_pagetable
23440 convert_pfn_mfn(init_level4_pgt);
23441 convert_pfn_mfn(level3_ident_pgt);
23442 convert_pfn_mfn(level3_kernel_pgt);
23443 + convert_pfn_mfn(level3_vmalloc_pgt);
23444 + convert_pfn_mfn(level3_vmemmap_pgt);
23445
23446 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
23447 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
23448 @@ -1737,7 +1739,10 @@ __init pgd_t *xen_setup_kernel_pagetable
23449 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
23450 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
23451 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
23452 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
23453 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
23454 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
23455 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
23456 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
23457 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
23458
23459 diff -urNp linux-2.6.32.43/arch/x86/xen/smp.c linux-2.6.32.43/arch/x86/xen/smp.c
23460 --- linux-2.6.32.43/arch/x86/xen/smp.c 2011-03-27 14:31:47.000000000 -0400
23461 +++ linux-2.6.32.43/arch/x86/xen/smp.c 2011-05-11 18:25:15.000000000 -0400
23462 @@ -167,11 +167,6 @@ static void __init xen_smp_prepare_boot_
23463 {
23464 BUG_ON(smp_processor_id() != 0);
23465 native_smp_prepare_boot_cpu();
23466 -
23467 - /* We've switched to the "real" per-cpu gdt, so make sure the
23468 - old memory can be recycled */
23469 - make_lowmem_page_readwrite(xen_initial_gdt);
23470 -
23471 xen_setup_vcpu_info_placement();
23472 }
23473
23474 @@ -231,12 +226,12 @@ cpu_initialize_context(unsigned int cpu,
23475 gdt = get_cpu_gdt_table(cpu);
23476
23477 ctxt->flags = VGCF_IN_KERNEL;
23478 - ctxt->user_regs.ds = __USER_DS;
23479 - ctxt->user_regs.es = __USER_DS;
23480 + ctxt->user_regs.ds = __KERNEL_DS;
23481 + ctxt->user_regs.es = __KERNEL_DS;
23482 ctxt->user_regs.ss = __KERNEL_DS;
23483 #ifdef CONFIG_X86_32
23484 ctxt->user_regs.fs = __KERNEL_PERCPU;
23485 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
23486 + savesegment(gs, ctxt->user_regs.gs);
23487 #else
23488 ctxt->gs_base_kernel = per_cpu_offset(cpu);
23489 #endif
23490 @@ -287,13 +282,12 @@ static int __cpuinit xen_cpu_up(unsigned
23491 int rc;
23492
23493 per_cpu(current_task, cpu) = idle;
23494 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
23495 #ifdef CONFIG_X86_32
23496 irq_ctx_init(cpu);
23497 #else
23498 clear_tsk_thread_flag(idle, TIF_FORK);
23499 - per_cpu(kernel_stack, cpu) =
23500 - (unsigned long)task_stack_page(idle) -
23501 - KERNEL_STACK_OFFSET + THREAD_SIZE;
23502 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
23503 #endif
23504 xen_setup_runstate_info(cpu);
23505 xen_setup_timer(cpu);
23506 diff -urNp linux-2.6.32.43/arch/x86/xen/xen-asm_32.S linux-2.6.32.43/arch/x86/xen/xen-asm_32.S
23507 --- linux-2.6.32.43/arch/x86/xen/xen-asm_32.S 2011-03-27 14:31:47.000000000 -0400
23508 +++ linux-2.6.32.43/arch/x86/xen/xen-asm_32.S 2011-04-22 19:13:13.000000000 -0400
23509 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
23510 ESP_OFFSET=4 # bytes pushed onto stack
23511
23512 /*
23513 - * Store vcpu_info pointer for easy access. Do it this way to
23514 - * avoid having to reload %fs
23515 + * Store vcpu_info pointer for easy access.
23516 */
23517 #ifdef CONFIG_SMP
23518 - GET_THREAD_INFO(%eax)
23519 - movl TI_cpu(%eax), %eax
23520 - movl __per_cpu_offset(,%eax,4), %eax
23521 - mov per_cpu__xen_vcpu(%eax), %eax
23522 + push %fs
23523 + mov $(__KERNEL_PERCPU), %eax
23524 + mov %eax, %fs
23525 + mov PER_CPU_VAR(xen_vcpu), %eax
23526 + pop %fs
23527 #else
23528 movl per_cpu__xen_vcpu, %eax
23529 #endif
23530 diff -urNp linux-2.6.32.43/arch/x86/xen/xen-head.S linux-2.6.32.43/arch/x86/xen/xen-head.S
23531 --- linux-2.6.32.43/arch/x86/xen/xen-head.S 2011-03-27 14:31:47.000000000 -0400
23532 +++ linux-2.6.32.43/arch/x86/xen/xen-head.S 2011-04-17 15:56:46.000000000 -0400
23533 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
23534 #ifdef CONFIG_X86_32
23535 mov %esi,xen_start_info
23536 mov $init_thread_union+THREAD_SIZE,%esp
23537 +#ifdef CONFIG_SMP
23538 + movl $cpu_gdt_table,%edi
23539 + movl $__per_cpu_load,%eax
23540 + movw %ax,__KERNEL_PERCPU + 2(%edi)
23541 + rorl $16,%eax
23542 + movb %al,__KERNEL_PERCPU + 4(%edi)
23543 + movb %ah,__KERNEL_PERCPU + 7(%edi)
23544 + movl $__per_cpu_end - 1,%eax
23545 + subl $__per_cpu_start,%eax
23546 + movw %ax,__KERNEL_PERCPU + 0(%edi)
23547 +#endif
23548 #else
23549 mov %rsi,xen_start_info
23550 mov $init_thread_union+THREAD_SIZE,%rsp
23551 diff -urNp linux-2.6.32.43/arch/x86/xen/xen-ops.h linux-2.6.32.43/arch/x86/xen/xen-ops.h
23552 --- linux-2.6.32.43/arch/x86/xen/xen-ops.h 2011-03-27 14:31:47.000000000 -0400
23553 +++ linux-2.6.32.43/arch/x86/xen/xen-ops.h 2011-04-17 15:56:46.000000000 -0400
23554 @@ -10,8 +10,6 @@
23555 extern const char xen_hypervisor_callback[];
23556 extern const char xen_failsafe_callback[];
23557
23558 -extern void *xen_initial_gdt;
23559 -
23560 struct trap_info;
23561 void xen_copy_trap_info(struct trap_info *traps);
23562
23563 diff -urNp linux-2.6.32.43/block/blk-integrity.c linux-2.6.32.43/block/blk-integrity.c
23564 --- linux-2.6.32.43/block/blk-integrity.c 2011-03-27 14:31:47.000000000 -0400
23565 +++ linux-2.6.32.43/block/blk-integrity.c 2011-04-17 15:56:46.000000000 -0400
23566 @@ -278,7 +278,7 @@ static struct attribute *integrity_attrs
23567 NULL,
23568 };
23569
23570 -static struct sysfs_ops integrity_ops = {
23571 +static const struct sysfs_ops integrity_ops = {
23572 .show = &integrity_attr_show,
23573 .store = &integrity_attr_store,
23574 };
23575 diff -urNp linux-2.6.32.43/block/blk-iopoll.c linux-2.6.32.43/block/blk-iopoll.c
23576 --- linux-2.6.32.43/block/blk-iopoll.c 2011-03-27 14:31:47.000000000 -0400
23577 +++ linux-2.6.32.43/block/blk-iopoll.c 2011-04-17 15:56:46.000000000 -0400
23578 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
23579 }
23580 EXPORT_SYMBOL(blk_iopoll_complete);
23581
23582 -static void blk_iopoll_softirq(struct softirq_action *h)
23583 +static void blk_iopoll_softirq(void)
23584 {
23585 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
23586 int rearm = 0, budget = blk_iopoll_budget;
23587 diff -urNp linux-2.6.32.43/block/blk-map.c linux-2.6.32.43/block/blk-map.c
23588 --- linux-2.6.32.43/block/blk-map.c 2011-03-27 14:31:47.000000000 -0400
23589 +++ linux-2.6.32.43/block/blk-map.c 2011-04-18 16:57:33.000000000 -0400
23590 @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
23591 * direct dma. else, set up kernel bounce buffers
23592 */
23593 uaddr = (unsigned long) ubuf;
23594 - if (blk_rq_aligned(q, ubuf, len) && !map_data)
23595 + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
23596 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
23597 else
23598 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
23599 @@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_q
23600 for (i = 0; i < iov_count; i++) {
23601 unsigned long uaddr = (unsigned long)iov[i].iov_base;
23602
23603 + if (!iov[i].iov_len)
23604 + return -EINVAL;
23605 +
23606 if (uaddr & queue_dma_alignment(q)) {
23607 unaligned = 1;
23608 break;
23609 }
23610 - if (!iov[i].iov_len)
23611 - return -EINVAL;
23612 }
23613
23614 if (unaligned || (q->dma_pad_mask & len) || map_data)
23615 @@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue
23616 if (!len || !kbuf)
23617 return -EINVAL;
23618
23619 - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
23620 + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
23621 if (do_copy)
23622 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
23623 else
23624 diff -urNp linux-2.6.32.43/block/blk-softirq.c linux-2.6.32.43/block/blk-softirq.c
23625 --- linux-2.6.32.43/block/blk-softirq.c 2011-03-27 14:31:47.000000000 -0400
23626 +++ linux-2.6.32.43/block/blk-softirq.c 2011-04-17 15:56:46.000000000 -0400
23627 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
23628 * Softirq action handler - move entries to local list and loop over them
23629 * while passing them to the queue registered handler.
23630 */
23631 -static void blk_done_softirq(struct softirq_action *h)
23632 +static void blk_done_softirq(void)
23633 {
23634 struct list_head *cpu_list, local_list;
23635
23636 diff -urNp linux-2.6.32.43/block/blk-sysfs.c linux-2.6.32.43/block/blk-sysfs.c
23637 --- linux-2.6.32.43/block/blk-sysfs.c 2011-05-10 22:12:01.000000000 -0400
23638 +++ linux-2.6.32.43/block/blk-sysfs.c 2011-05-10 22:12:26.000000000 -0400
23639 @@ -414,7 +414,7 @@ static void blk_release_queue(struct kob
23640 kmem_cache_free(blk_requestq_cachep, q);
23641 }
23642
23643 -static struct sysfs_ops queue_sysfs_ops = {
23644 +static const struct sysfs_ops queue_sysfs_ops = {
23645 .show = queue_attr_show,
23646 .store = queue_attr_store,
23647 };
23648 diff -urNp linux-2.6.32.43/block/bsg.c linux-2.6.32.43/block/bsg.c
23649 --- linux-2.6.32.43/block/bsg.c 2011-03-27 14:31:47.000000000 -0400
23650 +++ linux-2.6.32.43/block/bsg.c 2011-04-17 15:56:46.000000000 -0400
23651 @@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
23652 struct sg_io_v4 *hdr, struct bsg_device *bd,
23653 fmode_t has_write_perm)
23654 {
23655 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23656 + unsigned char *cmdptr;
23657 +
23658 if (hdr->request_len > BLK_MAX_CDB) {
23659 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
23660 if (!rq->cmd)
23661 return -ENOMEM;
23662 - }
23663 + cmdptr = rq->cmd;
23664 + } else
23665 + cmdptr = tmpcmd;
23666
23667 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
23668 + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
23669 hdr->request_len))
23670 return -EFAULT;
23671
23672 + if (cmdptr != rq->cmd)
23673 + memcpy(rq->cmd, cmdptr, hdr->request_len);
23674 +
23675 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
23676 if (blk_verify_command(rq->cmd, has_write_perm))
23677 return -EPERM;
23678 diff -urNp linux-2.6.32.43/block/elevator.c linux-2.6.32.43/block/elevator.c
23679 --- linux-2.6.32.43/block/elevator.c 2011-03-27 14:31:47.000000000 -0400
23680 +++ linux-2.6.32.43/block/elevator.c 2011-04-17 15:56:46.000000000 -0400
23681 @@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, str
23682 return error;
23683 }
23684
23685 -static struct sysfs_ops elv_sysfs_ops = {
23686 +static const struct sysfs_ops elv_sysfs_ops = {
23687 .show = elv_attr_show,
23688 .store = elv_attr_store,
23689 };
23690 diff -urNp linux-2.6.32.43/block/scsi_ioctl.c linux-2.6.32.43/block/scsi_ioctl.c
23691 --- linux-2.6.32.43/block/scsi_ioctl.c 2011-03-27 14:31:47.000000000 -0400
23692 +++ linux-2.6.32.43/block/scsi_ioctl.c 2011-04-23 13:28:22.000000000 -0400
23693 @@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
23694 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
23695 struct sg_io_hdr *hdr, fmode_t mode)
23696 {
23697 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
23698 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23699 + unsigned char *cmdptr;
23700 +
23701 + if (rq->cmd != rq->__cmd)
23702 + cmdptr = rq->cmd;
23703 + else
23704 + cmdptr = tmpcmd;
23705 +
23706 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
23707 return -EFAULT;
23708 +
23709 + if (cmdptr != rq->cmd)
23710 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
23711 +
23712 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
23713 return -EPERM;
23714
23715 @@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *
23716 int err;
23717 unsigned int in_len, out_len, bytes, opcode, cmdlen;
23718 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
23719 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23720 + unsigned char *cmdptr;
23721
23722 if (!sic)
23723 return -EINVAL;
23724 @@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *
23725 */
23726 err = -EFAULT;
23727 rq->cmd_len = cmdlen;
23728 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
23729 +
23730 + if (rq->cmd != rq->__cmd)
23731 + cmdptr = rq->cmd;
23732 + else
23733 + cmdptr = tmpcmd;
23734 +
23735 + if (copy_from_user(cmdptr, sic->data, cmdlen))
23736 goto error;
23737
23738 + if (rq->cmd != cmdptr)
23739 + memcpy(rq->cmd, cmdptr, cmdlen);
23740 +
23741 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
23742 goto error;
23743
23744 diff -urNp linux-2.6.32.43/crypto/gf128mul.c linux-2.6.32.43/crypto/gf128mul.c
23745 --- linux-2.6.32.43/crypto/gf128mul.c 2011-03-27 14:31:47.000000000 -0400
23746 +++ linux-2.6.32.43/crypto/gf128mul.c 2011-07-06 19:53:33.000000000 -0400
23747 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
23748 for (i = 0; i < 7; ++i)
23749 gf128mul_x_lle(&p[i + 1], &p[i]);
23750
23751 - memset(r, 0, sizeof(r));
23752 + memset(r, 0, sizeof(*r));
23753 for (i = 0;;) {
23754 u8 ch = ((u8 *)b)[15 - i];
23755
23756 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
23757 for (i = 0; i < 7; ++i)
23758 gf128mul_x_bbe(&p[i + 1], &p[i]);
23759
23760 - memset(r, 0, sizeof(r));
23761 + memset(r, 0, sizeof(*r));
23762 for (i = 0;;) {
23763 u8 ch = ((u8 *)b)[i];
23764
23765 diff -urNp linux-2.6.32.43/crypto/serpent.c linux-2.6.32.43/crypto/serpent.c
23766 --- linux-2.6.32.43/crypto/serpent.c 2011-03-27 14:31:47.000000000 -0400
23767 +++ linux-2.6.32.43/crypto/serpent.c 2011-05-16 21:46:57.000000000 -0400
23768 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
23769 u32 r0,r1,r2,r3,r4;
23770 int i;
23771
23772 + pax_track_stack();
23773 +
23774 /* Copy key, add padding */
23775
23776 for (i = 0; i < keylen; ++i)
23777 diff -urNp linux-2.6.32.43/Documentation/dontdiff linux-2.6.32.43/Documentation/dontdiff
23778 --- linux-2.6.32.43/Documentation/dontdiff 2011-03-27 14:31:47.000000000 -0400
23779 +++ linux-2.6.32.43/Documentation/dontdiff 2011-05-18 20:09:36.000000000 -0400
23780 @@ -1,13 +1,16 @@
23781 *.a
23782 *.aux
23783 *.bin
23784 +*.cis
23785 *.cpio
23786 *.csp
23787 +*.dbg
23788 *.dsp
23789 *.dvi
23790 *.elf
23791 *.eps
23792 *.fw
23793 +*.gcno
23794 *.gen.S
23795 *.gif
23796 *.grep
23797 @@ -38,8 +41,10 @@
23798 *.tab.h
23799 *.tex
23800 *.ver
23801 +*.vim
23802 *.xml
23803 *_MODULES
23804 +*_reg_safe.h
23805 *_vga16.c
23806 *~
23807 *.9
23808 @@ -49,11 +54,16 @@
23809 53c700_d.h
23810 CVS
23811 ChangeSet
23812 +GPATH
23813 +GRTAGS
23814 +GSYMS
23815 +GTAGS
23816 Image
23817 Kerntypes
23818 Module.markers
23819 Module.symvers
23820 PENDING
23821 +PERF*
23822 SCCS
23823 System.map*
23824 TAGS
23825 @@ -76,7 +86,11 @@ btfixupprep
23826 build
23827 bvmlinux
23828 bzImage*
23829 +capability_names.h
23830 +capflags.c
23831 classlist.h*
23832 +clut_vga16.c
23833 +common-cmds.h
23834 comp*.log
23835 compile.h*
23836 conf
23837 @@ -103,13 +117,14 @@ gen_crc32table
23838 gen_init_cpio
23839 genksyms
23840 *_gray256.c
23841 +hash
23842 ihex2fw
23843 ikconfig.h*
23844 initramfs_data.cpio
23845 +initramfs_data.cpio.bz2
23846 initramfs_data.cpio.gz
23847 initramfs_list
23848 kallsyms
23849 -kconfig
23850 keywords.c
23851 ksym.c*
23852 ksym.h*
23853 @@ -133,7 +148,9 @@ mkboot
23854 mkbugboot
23855 mkcpustr
23856 mkdep
23857 +mkpiggy
23858 mkprep
23859 +mkregtable
23860 mktables
23861 mktree
23862 modpost
23863 @@ -149,6 +166,7 @@ patches*
23864 pca200e.bin
23865 pca200e_ecd.bin2
23866 piggy.gz
23867 +piggy.S
23868 piggyback
23869 pnmtologo
23870 ppc_defs.h*
23871 @@ -157,12 +175,15 @@ qconf
23872 raid6altivec*.c
23873 raid6int*.c
23874 raid6tables.c
23875 +regdb.c
23876 relocs
23877 +rlim_names.h
23878 series
23879 setup
23880 setup.bin
23881 setup.elf
23882 sImage
23883 +slabinfo
23884 sm_tbl*
23885 split-include
23886 syscalltab.h
23887 @@ -186,14 +207,20 @@ version.h*
23888 vmlinux
23889 vmlinux-*
23890 vmlinux.aout
23891 +vmlinux.bin.all
23892 +vmlinux.bin.bz2
23893 vmlinux.lds
23894 +vmlinux.relocs
23895 +voffset.h
23896 vsyscall.lds
23897 vsyscall_32.lds
23898 wanxlfw.inc
23899 uImage
23900 unifdef
23901 +utsrelease.h
23902 wakeup.bin
23903 wakeup.elf
23904 wakeup.lds
23905 zImage*
23906 zconf.hash.c
23907 +zoffset.h
23908 diff -urNp linux-2.6.32.43/Documentation/kernel-parameters.txt linux-2.6.32.43/Documentation/kernel-parameters.txt
23909 --- linux-2.6.32.43/Documentation/kernel-parameters.txt 2011-03-27 14:31:47.000000000 -0400
23910 +++ linux-2.6.32.43/Documentation/kernel-parameters.txt 2011-04-17 15:56:45.000000000 -0400
23911 @@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters.
23912 the specified number of seconds. This is to be used if
23913 your oopses keep scrolling off the screen.
23914
23915 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
23916 + virtualization environments that don't cope well with the
23917 + expand down segment used by UDEREF on X86-32 or the frequent
23918 + page table updates on X86-64.
23919 +
23920 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
23921 +
23922 pcbit= [HW,ISDN]
23923
23924 pcd. [PARIDE]
23925 diff -urNp linux-2.6.32.43/drivers/acpi/acpi_pad.c linux-2.6.32.43/drivers/acpi/acpi_pad.c
23926 --- linux-2.6.32.43/drivers/acpi/acpi_pad.c 2011-03-27 14:31:47.000000000 -0400
23927 +++ linux-2.6.32.43/drivers/acpi/acpi_pad.c 2011-04-17 15:56:46.000000000 -0400
23928 @@ -30,7 +30,7 @@
23929 #include <acpi/acpi_bus.h>
23930 #include <acpi/acpi_drivers.h>
23931
23932 -#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
23933 +#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
23934 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
23935 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
23936 static DEFINE_MUTEX(isolated_cpus_lock);
23937 diff -urNp linux-2.6.32.43/drivers/acpi/battery.c linux-2.6.32.43/drivers/acpi/battery.c
23938 --- linux-2.6.32.43/drivers/acpi/battery.c 2011-03-27 14:31:47.000000000 -0400
23939 +++ linux-2.6.32.43/drivers/acpi/battery.c 2011-04-17 15:56:46.000000000 -0400
23940 @@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
23941 }
23942
23943 static struct battery_file {
23944 - struct file_operations ops;
23945 + const struct file_operations ops;
23946 mode_t mode;
23947 const char *name;
23948 } acpi_battery_file[] = {
23949 diff -urNp linux-2.6.32.43/drivers/acpi/dock.c linux-2.6.32.43/drivers/acpi/dock.c
23950 --- linux-2.6.32.43/drivers/acpi/dock.c 2011-03-27 14:31:47.000000000 -0400
23951 +++ linux-2.6.32.43/drivers/acpi/dock.c 2011-04-17 15:56:46.000000000 -0400
23952 @@ -77,7 +77,7 @@ struct dock_dependent_device {
23953 struct list_head list;
23954 struct list_head hotplug_list;
23955 acpi_handle handle;
23956 - struct acpi_dock_ops *ops;
23957 + const struct acpi_dock_ops *ops;
23958 void *context;
23959 };
23960
23961 @@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
23962 * the dock driver after _DCK is executed.
23963 */
23964 int
23965 -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
23966 +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
23967 void *context)
23968 {
23969 struct dock_dependent_device *dd;
23970 diff -urNp linux-2.6.32.43/drivers/acpi/osl.c linux-2.6.32.43/drivers/acpi/osl.c
23971 --- linux-2.6.32.43/drivers/acpi/osl.c 2011-03-27 14:31:47.000000000 -0400
23972 +++ linux-2.6.32.43/drivers/acpi/osl.c 2011-04-17 15:56:46.000000000 -0400
23973 @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
23974 void __iomem *virt_addr;
23975
23976 virt_addr = ioremap(phys_addr, width);
23977 + if (!virt_addr)
23978 + return AE_NO_MEMORY;
23979 if (!value)
23980 value = &dummy;
23981
23982 @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
23983 void __iomem *virt_addr;
23984
23985 virt_addr = ioremap(phys_addr, width);
23986 + if (!virt_addr)
23987 + return AE_NO_MEMORY;
23988
23989 switch (width) {
23990 case 8:
23991 diff -urNp linux-2.6.32.43/drivers/acpi/power_meter.c linux-2.6.32.43/drivers/acpi/power_meter.c
23992 --- linux-2.6.32.43/drivers/acpi/power_meter.c 2011-03-27 14:31:47.000000000 -0400
23993 +++ linux-2.6.32.43/drivers/acpi/power_meter.c 2011-04-17 15:56:46.000000000 -0400
23994 @@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *d
23995 return res;
23996
23997 temp /= 1000;
23998 - if (temp < 0)
23999 - return -EINVAL;
24000
24001 mutex_lock(&resource->lock);
24002 resource->trip[attr->index - 7] = temp;
24003 diff -urNp linux-2.6.32.43/drivers/acpi/proc.c linux-2.6.32.43/drivers/acpi/proc.c
24004 --- linux-2.6.32.43/drivers/acpi/proc.c 2011-03-27 14:31:47.000000000 -0400
24005 +++ linux-2.6.32.43/drivers/acpi/proc.c 2011-04-17 15:56:46.000000000 -0400
24006 @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
24007 size_t count, loff_t * ppos)
24008 {
24009 struct list_head *node, *next;
24010 - char strbuf[5];
24011 - char str[5] = "";
24012 - unsigned int len = count;
24013 + char strbuf[5] = {0};
24014 struct acpi_device *found_dev = NULL;
24015
24016 - if (len > 4)
24017 - len = 4;
24018 - if (len < 0)
24019 - return -EFAULT;
24020 + if (count > 4)
24021 + count = 4;
24022
24023 - if (copy_from_user(strbuf, buffer, len))
24024 + if (copy_from_user(strbuf, buffer, count))
24025 return -EFAULT;
24026 - strbuf[len] = '\0';
24027 - sscanf(strbuf, "%s", str);
24028 + strbuf[count] = '\0';
24029
24030 mutex_lock(&acpi_device_lock);
24031 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
24032 @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
24033 if (!dev->wakeup.flags.valid)
24034 continue;
24035
24036 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
24037 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
24038 dev->wakeup.state.enabled =
24039 dev->wakeup.state.enabled ? 0 : 1;
24040 found_dev = dev;
24041 diff -urNp linux-2.6.32.43/drivers/acpi/processor_core.c linux-2.6.32.43/drivers/acpi/processor_core.c
24042 --- linux-2.6.32.43/drivers/acpi/processor_core.c 2011-03-27 14:31:47.000000000 -0400
24043 +++ linux-2.6.32.43/drivers/acpi/processor_core.c 2011-04-17 15:56:46.000000000 -0400
24044 @@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(
24045 return 0;
24046 }
24047
24048 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
24049 + BUG_ON(pr->id >= nr_cpu_ids);
24050
24051 /*
24052 * Buggy BIOS check
24053 diff -urNp linux-2.6.32.43/drivers/acpi/sbshc.c linux-2.6.32.43/drivers/acpi/sbshc.c
24054 --- linux-2.6.32.43/drivers/acpi/sbshc.c 2011-03-27 14:31:47.000000000 -0400
24055 +++ linux-2.6.32.43/drivers/acpi/sbshc.c 2011-04-17 15:56:46.000000000 -0400
24056 @@ -17,7 +17,7 @@
24057
24058 #define PREFIX "ACPI: "
24059
24060 -#define ACPI_SMB_HC_CLASS "smbus_host_controller"
24061 +#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
24062 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
24063
24064 struct acpi_smb_hc {
24065 diff -urNp linux-2.6.32.43/drivers/acpi/sleep.c linux-2.6.32.43/drivers/acpi/sleep.c
24066 --- linux-2.6.32.43/drivers/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
24067 +++ linux-2.6.32.43/drivers/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
24068 @@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(susp
24069 }
24070 }
24071
24072 -static struct platform_suspend_ops acpi_suspend_ops = {
24073 +static const struct platform_suspend_ops acpi_suspend_ops = {
24074 .valid = acpi_suspend_state_valid,
24075 .begin = acpi_suspend_begin,
24076 .prepare_late = acpi_pm_prepare,
24077 @@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspen
24078 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24079 * been requested.
24080 */
24081 -static struct platform_suspend_ops acpi_suspend_ops_old = {
24082 +static const struct platform_suspend_ops acpi_suspend_ops_old = {
24083 .valid = acpi_suspend_state_valid,
24084 .begin = acpi_suspend_begin_old,
24085 .prepare_late = acpi_pm_disable_gpes,
24086 @@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
24087 acpi_enable_all_runtime_gpes();
24088 }
24089
24090 -static struct platform_hibernation_ops acpi_hibernation_ops = {
24091 +static const struct platform_hibernation_ops acpi_hibernation_ops = {
24092 .begin = acpi_hibernation_begin,
24093 .end = acpi_pm_end,
24094 .pre_snapshot = acpi_hibernation_pre_snapshot,
24095 @@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot
24096 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24097 * been requested.
24098 */
24099 -static struct platform_hibernation_ops acpi_hibernation_ops_old = {
24100 +static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
24101 .begin = acpi_hibernation_begin_old,
24102 .end = acpi_pm_end,
24103 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
24104 diff -urNp linux-2.6.32.43/drivers/acpi/video.c linux-2.6.32.43/drivers/acpi/video.c
24105 --- linux-2.6.32.43/drivers/acpi/video.c 2011-03-27 14:31:47.000000000 -0400
24106 +++ linux-2.6.32.43/drivers/acpi/video.c 2011-04-17 15:56:46.000000000 -0400
24107 @@ -359,7 +359,7 @@ static int acpi_video_set_brightness(str
24108 vd->brightness->levels[request_level]);
24109 }
24110
24111 -static struct backlight_ops acpi_backlight_ops = {
24112 +static const struct backlight_ops acpi_backlight_ops = {
24113 .get_brightness = acpi_video_get_brightness,
24114 .update_status = acpi_video_set_brightness,
24115 };
24116 diff -urNp linux-2.6.32.43/drivers/ata/ahci.c linux-2.6.32.43/drivers/ata/ahci.c
24117 --- linux-2.6.32.43/drivers/ata/ahci.c 2011-03-27 14:31:47.000000000 -0400
24118 +++ linux-2.6.32.43/drivers/ata/ahci.c 2011-04-23 12:56:10.000000000 -0400
24119 @@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sh
24120 .sdev_attrs = ahci_sdev_attrs,
24121 };
24122
24123 -static struct ata_port_operations ahci_ops = {
24124 +static const struct ata_port_operations ahci_ops = {
24125 .inherits = &sata_pmp_port_ops,
24126
24127 .qc_defer = sata_pmp_qc_defer_cmd_switch,
24128 @@ -424,17 +424,17 @@ static struct ata_port_operations ahci_o
24129 .port_stop = ahci_port_stop,
24130 };
24131
24132 -static struct ata_port_operations ahci_vt8251_ops = {
24133 +static const struct ata_port_operations ahci_vt8251_ops = {
24134 .inherits = &ahci_ops,
24135 .hardreset = ahci_vt8251_hardreset,
24136 };
24137
24138 -static struct ata_port_operations ahci_p5wdh_ops = {
24139 +static const struct ata_port_operations ahci_p5wdh_ops = {
24140 .inherits = &ahci_ops,
24141 .hardreset = ahci_p5wdh_hardreset,
24142 };
24143
24144 -static struct ata_port_operations ahci_sb600_ops = {
24145 +static const struct ata_port_operations ahci_sb600_ops = {
24146 .inherits = &ahci_ops,
24147 .softreset = ahci_sb600_softreset,
24148 .pmp_softreset = ahci_sb600_softreset,
24149 diff -urNp linux-2.6.32.43/drivers/ata/ata_generic.c linux-2.6.32.43/drivers/ata/ata_generic.c
24150 --- linux-2.6.32.43/drivers/ata/ata_generic.c 2011-03-27 14:31:47.000000000 -0400
24151 +++ linux-2.6.32.43/drivers/ata/ata_generic.c 2011-04-17 15:56:46.000000000 -0400
24152 @@ -104,7 +104,7 @@ static struct scsi_host_template generic
24153 ATA_BMDMA_SHT(DRV_NAME),
24154 };
24155
24156 -static struct ata_port_operations generic_port_ops = {
24157 +static const struct ata_port_operations generic_port_ops = {
24158 .inherits = &ata_bmdma_port_ops,
24159 .cable_detect = ata_cable_unknown,
24160 .set_mode = generic_set_mode,
24161 diff -urNp linux-2.6.32.43/drivers/ata/ata_piix.c linux-2.6.32.43/drivers/ata/ata_piix.c
24162 --- linux-2.6.32.43/drivers/ata/ata_piix.c 2011-03-27 14:31:47.000000000 -0400
24163 +++ linux-2.6.32.43/drivers/ata/ata_piix.c 2011-04-23 12:56:10.000000000 -0400
24164 @@ -318,7 +318,7 @@ static struct scsi_host_template piix_sh
24165 ATA_BMDMA_SHT(DRV_NAME),
24166 };
24167
24168 -static struct ata_port_operations piix_pata_ops = {
24169 +static const struct ata_port_operations piix_pata_ops = {
24170 .inherits = &ata_bmdma32_port_ops,
24171 .cable_detect = ata_cable_40wire,
24172 .set_piomode = piix_set_piomode,
24173 @@ -326,22 +326,22 @@ static struct ata_port_operations piix_p
24174 .prereset = piix_pata_prereset,
24175 };
24176
24177 -static struct ata_port_operations piix_vmw_ops = {
24178 +static const struct ata_port_operations piix_vmw_ops = {
24179 .inherits = &piix_pata_ops,
24180 .bmdma_status = piix_vmw_bmdma_status,
24181 };
24182
24183 -static struct ata_port_operations ich_pata_ops = {
24184 +static const struct ata_port_operations ich_pata_ops = {
24185 .inherits = &piix_pata_ops,
24186 .cable_detect = ich_pata_cable_detect,
24187 .set_dmamode = ich_set_dmamode,
24188 };
24189
24190 -static struct ata_port_operations piix_sata_ops = {
24191 +static const struct ata_port_operations piix_sata_ops = {
24192 .inherits = &ata_bmdma_port_ops,
24193 };
24194
24195 -static struct ata_port_operations piix_sidpr_sata_ops = {
24196 +static const struct ata_port_operations piix_sidpr_sata_ops = {
24197 .inherits = &piix_sata_ops,
24198 .hardreset = sata_std_hardreset,
24199 .scr_read = piix_sidpr_scr_read,
24200 diff -urNp linux-2.6.32.43/drivers/ata/libata-acpi.c linux-2.6.32.43/drivers/ata/libata-acpi.c
24201 --- linux-2.6.32.43/drivers/ata/libata-acpi.c 2011-03-27 14:31:47.000000000 -0400
24202 +++ linux-2.6.32.43/drivers/ata/libata-acpi.c 2011-04-17 15:56:46.000000000 -0400
24203 @@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_han
24204 ata_acpi_uevent(dev->link->ap, dev, event);
24205 }
24206
24207 -static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24208 +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24209 .handler = ata_acpi_dev_notify_dock,
24210 .uevent = ata_acpi_dev_uevent,
24211 };
24212
24213 -static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24214 +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24215 .handler = ata_acpi_ap_notify_dock,
24216 .uevent = ata_acpi_ap_uevent,
24217 };
24218 diff -urNp linux-2.6.32.43/drivers/ata/libata-core.c linux-2.6.32.43/drivers/ata/libata-core.c
24219 --- linux-2.6.32.43/drivers/ata/libata-core.c 2011-03-27 14:31:47.000000000 -0400
24220 +++ linux-2.6.32.43/drivers/ata/libata-core.c 2011-04-23 12:56:10.000000000 -0400
24221 @@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *
24222 struct ata_port *ap;
24223 unsigned int tag;
24224
24225 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24226 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24227 ap = qc->ap;
24228
24229 qc->flags = 0;
24230 @@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued
24231 struct ata_port *ap;
24232 struct ata_link *link;
24233
24234 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24235 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24236 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
24237 ap = qc->ap;
24238 link = qc->dev->link;
24239 @@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device
24240 * LOCKING:
24241 * None.
24242 */
24243 -static void ata_finalize_port_ops(struct ata_port_operations *ops)
24244 +static void ata_finalize_port_ops(const struct ata_port_operations *ops)
24245 {
24246 static DEFINE_SPINLOCK(lock);
24247 const struct ata_port_operations *cur;
24248 @@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct
24249 return;
24250
24251 spin_lock(&lock);
24252 + pax_open_kernel();
24253
24254 for (cur = ops->inherits; cur; cur = cur->inherits) {
24255 void **inherit = (void **)cur;
24256 @@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct
24257 if (IS_ERR(*pp))
24258 *pp = NULL;
24259
24260 - ops->inherits = NULL;
24261 + ((struct ata_port_operations *)ops)->inherits = NULL;
24262
24263 + pax_close_kernel();
24264 spin_unlock(&lock);
24265 }
24266
24267 @@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host
24268 */
24269 /* KILLME - the only user left is ipr */
24270 void ata_host_init(struct ata_host *host, struct device *dev,
24271 - unsigned long flags, struct ata_port_operations *ops)
24272 + unsigned long flags, const struct ata_port_operations *ops)
24273 {
24274 spin_lock_init(&host->lock);
24275 host->dev = dev;
24276 @@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(stru
24277 /* truly dummy */
24278 }
24279
24280 -struct ata_port_operations ata_dummy_port_ops = {
24281 +const struct ata_port_operations ata_dummy_port_ops = {
24282 .qc_prep = ata_noop_qc_prep,
24283 .qc_issue = ata_dummy_qc_issue,
24284 .error_handler = ata_dummy_error_handler,
24285 diff -urNp linux-2.6.32.43/drivers/ata/libata-eh.c linux-2.6.32.43/drivers/ata/libata-eh.c
24286 --- linux-2.6.32.43/drivers/ata/libata-eh.c 2011-03-27 14:31:47.000000000 -0400
24287 +++ linux-2.6.32.43/drivers/ata/libata-eh.c 2011-05-16 21:46:57.000000000 -0400
24288 @@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
24289 {
24290 struct ata_link *link;
24291
24292 + pax_track_stack();
24293 +
24294 ata_for_each_link(link, ap, HOST_FIRST)
24295 ata_eh_link_report(link);
24296 }
24297 @@ -3590,7 +3592,7 @@ void ata_do_eh(struct ata_port *ap, ata_
24298 */
24299 void ata_std_error_handler(struct ata_port *ap)
24300 {
24301 - struct ata_port_operations *ops = ap->ops;
24302 + const struct ata_port_operations *ops = ap->ops;
24303 ata_reset_fn_t hardreset = ops->hardreset;
24304
24305 /* ignore built-in hardreset if SCR access is not available */
24306 diff -urNp linux-2.6.32.43/drivers/ata/libata-pmp.c linux-2.6.32.43/drivers/ata/libata-pmp.c
24307 --- linux-2.6.32.43/drivers/ata/libata-pmp.c 2011-03-27 14:31:47.000000000 -0400
24308 +++ linux-2.6.32.43/drivers/ata/libata-pmp.c 2011-04-17 15:56:46.000000000 -0400
24309 @@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(str
24310 */
24311 static int sata_pmp_eh_recover(struct ata_port *ap)
24312 {
24313 - struct ata_port_operations *ops = ap->ops;
24314 + const struct ata_port_operations *ops = ap->ops;
24315 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
24316 struct ata_link *pmp_link = &ap->link;
24317 struct ata_device *pmp_dev = pmp_link->device;
24318 diff -urNp linux-2.6.32.43/drivers/ata/pata_acpi.c linux-2.6.32.43/drivers/ata/pata_acpi.c
24319 --- linux-2.6.32.43/drivers/ata/pata_acpi.c 2011-03-27 14:31:47.000000000 -0400
24320 +++ linux-2.6.32.43/drivers/ata/pata_acpi.c 2011-04-17 15:56:46.000000000 -0400
24321 @@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_s
24322 ATA_BMDMA_SHT(DRV_NAME),
24323 };
24324
24325 -static struct ata_port_operations pacpi_ops = {
24326 +static const struct ata_port_operations pacpi_ops = {
24327 .inherits = &ata_bmdma_port_ops,
24328 .qc_issue = pacpi_qc_issue,
24329 .cable_detect = pacpi_cable_detect,
24330 diff -urNp linux-2.6.32.43/drivers/ata/pata_ali.c linux-2.6.32.43/drivers/ata/pata_ali.c
24331 --- linux-2.6.32.43/drivers/ata/pata_ali.c 2011-03-27 14:31:47.000000000 -0400
24332 +++ linux-2.6.32.43/drivers/ata/pata_ali.c 2011-04-17 15:56:46.000000000 -0400
24333 @@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht
24334 * Port operations for PIO only ALi
24335 */
24336
24337 -static struct ata_port_operations ali_early_port_ops = {
24338 +static const struct ata_port_operations ali_early_port_ops = {
24339 .inherits = &ata_sff_port_ops,
24340 .cable_detect = ata_cable_40wire,
24341 .set_piomode = ali_set_piomode,
24342 @@ -382,7 +382,7 @@ static const struct ata_port_operations
24343 * Port operations for DMA capable ALi without cable
24344 * detect
24345 */
24346 -static struct ata_port_operations ali_20_port_ops = {
24347 +static const struct ata_port_operations ali_20_port_ops = {
24348 .inherits = &ali_dma_base_ops,
24349 .cable_detect = ata_cable_40wire,
24350 .mode_filter = ali_20_filter,
24351 @@ -393,7 +393,7 @@ static struct ata_port_operations ali_20
24352 /*
24353 * Port operations for DMA capable ALi with cable detect
24354 */
24355 -static struct ata_port_operations ali_c2_port_ops = {
24356 +static const struct ata_port_operations ali_c2_port_ops = {
24357 .inherits = &ali_dma_base_ops,
24358 .check_atapi_dma = ali_check_atapi_dma,
24359 .cable_detect = ali_c2_cable_detect,
24360 @@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2
24361 /*
24362 * Port operations for DMA capable ALi with cable detect
24363 */
24364 -static struct ata_port_operations ali_c4_port_ops = {
24365 +static const struct ata_port_operations ali_c4_port_ops = {
24366 .inherits = &ali_dma_base_ops,
24367 .check_atapi_dma = ali_check_atapi_dma,
24368 .cable_detect = ali_c2_cable_detect,
24369 @@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4
24370 /*
24371 * Port operations for DMA capable ALi with cable detect and LBA48
24372 */
24373 -static struct ata_port_operations ali_c5_port_ops = {
24374 +static const struct ata_port_operations ali_c5_port_ops = {
24375 .inherits = &ali_dma_base_ops,
24376 .check_atapi_dma = ali_check_atapi_dma,
24377 .dev_config = ali_warn_atapi_dma,
24378 diff -urNp linux-2.6.32.43/drivers/ata/pata_amd.c linux-2.6.32.43/drivers/ata/pata_amd.c
24379 --- linux-2.6.32.43/drivers/ata/pata_amd.c 2011-03-27 14:31:47.000000000 -0400
24380 +++ linux-2.6.32.43/drivers/ata/pata_amd.c 2011-04-17 15:56:46.000000000 -0400
24381 @@ -397,28 +397,28 @@ static const struct ata_port_operations
24382 .prereset = amd_pre_reset,
24383 };
24384
24385 -static struct ata_port_operations amd33_port_ops = {
24386 +static const struct ata_port_operations amd33_port_ops = {
24387 .inherits = &amd_base_port_ops,
24388 .cable_detect = ata_cable_40wire,
24389 .set_piomode = amd33_set_piomode,
24390 .set_dmamode = amd33_set_dmamode,
24391 };
24392
24393 -static struct ata_port_operations amd66_port_ops = {
24394 +static const struct ata_port_operations amd66_port_ops = {
24395 .inherits = &amd_base_port_ops,
24396 .cable_detect = ata_cable_unknown,
24397 .set_piomode = amd66_set_piomode,
24398 .set_dmamode = amd66_set_dmamode,
24399 };
24400
24401 -static struct ata_port_operations amd100_port_ops = {
24402 +static const struct ata_port_operations amd100_port_ops = {
24403 .inherits = &amd_base_port_ops,
24404 .cable_detect = ata_cable_unknown,
24405 .set_piomode = amd100_set_piomode,
24406 .set_dmamode = amd100_set_dmamode,
24407 };
24408
24409 -static struct ata_port_operations amd133_port_ops = {
24410 +static const struct ata_port_operations amd133_port_ops = {
24411 .inherits = &amd_base_port_ops,
24412 .cable_detect = amd_cable_detect,
24413 .set_piomode = amd133_set_piomode,
24414 @@ -433,13 +433,13 @@ static const struct ata_port_operations
24415 .host_stop = nv_host_stop,
24416 };
24417
24418 -static struct ata_port_operations nv100_port_ops = {
24419 +static const struct ata_port_operations nv100_port_ops = {
24420 .inherits = &nv_base_port_ops,
24421 .set_piomode = nv100_set_piomode,
24422 .set_dmamode = nv100_set_dmamode,
24423 };
24424
24425 -static struct ata_port_operations nv133_port_ops = {
24426 +static const struct ata_port_operations nv133_port_ops = {
24427 .inherits = &nv_base_port_ops,
24428 .set_piomode = nv133_set_piomode,
24429 .set_dmamode = nv133_set_dmamode,
24430 diff -urNp linux-2.6.32.43/drivers/ata/pata_artop.c linux-2.6.32.43/drivers/ata/pata_artop.c
24431 --- linux-2.6.32.43/drivers/ata/pata_artop.c 2011-03-27 14:31:47.000000000 -0400
24432 +++ linux-2.6.32.43/drivers/ata/pata_artop.c 2011-04-17 15:56:46.000000000 -0400
24433 @@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
24434 ATA_BMDMA_SHT(DRV_NAME),
24435 };
24436
24437 -static struct ata_port_operations artop6210_ops = {
24438 +static const struct ata_port_operations artop6210_ops = {
24439 .inherits = &ata_bmdma_port_ops,
24440 .cable_detect = ata_cable_40wire,
24441 .set_piomode = artop6210_set_piomode,
24442 @@ -320,7 +320,7 @@ static struct ata_port_operations artop6
24443 .qc_defer = artop6210_qc_defer,
24444 };
24445
24446 -static struct ata_port_operations artop6260_ops = {
24447 +static const struct ata_port_operations artop6260_ops = {
24448 .inherits = &ata_bmdma_port_ops,
24449 .cable_detect = artop6260_cable_detect,
24450 .set_piomode = artop6260_set_piomode,
24451 diff -urNp linux-2.6.32.43/drivers/ata/pata_at32.c linux-2.6.32.43/drivers/ata/pata_at32.c
24452 --- linux-2.6.32.43/drivers/ata/pata_at32.c 2011-03-27 14:31:47.000000000 -0400
24453 +++ linux-2.6.32.43/drivers/ata/pata_at32.c 2011-04-17 15:56:46.000000000 -0400
24454 @@ -172,7 +172,7 @@ static struct scsi_host_template at32_sh
24455 ATA_PIO_SHT(DRV_NAME),
24456 };
24457
24458 -static struct ata_port_operations at32_port_ops = {
24459 +static const struct ata_port_operations at32_port_ops = {
24460 .inherits = &ata_sff_port_ops,
24461 .cable_detect = ata_cable_40wire,
24462 .set_piomode = pata_at32_set_piomode,
24463 diff -urNp linux-2.6.32.43/drivers/ata/pata_at91.c linux-2.6.32.43/drivers/ata/pata_at91.c
24464 --- linux-2.6.32.43/drivers/ata/pata_at91.c 2011-03-27 14:31:47.000000000 -0400
24465 +++ linux-2.6.32.43/drivers/ata/pata_at91.c 2011-04-17 15:56:46.000000000 -0400
24466 @@ -195,7 +195,7 @@ static struct scsi_host_template pata_at
24467 ATA_PIO_SHT(DRV_NAME),
24468 };
24469
24470 -static struct ata_port_operations pata_at91_port_ops = {
24471 +static const struct ata_port_operations pata_at91_port_ops = {
24472 .inherits = &ata_sff_port_ops,
24473
24474 .sff_data_xfer = pata_at91_data_xfer_noirq,
24475 diff -urNp linux-2.6.32.43/drivers/ata/pata_atiixp.c linux-2.6.32.43/drivers/ata/pata_atiixp.c
24476 --- linux-2.6.32.43/drivers/ata/pata_atiixp.c 2011-03-27 14:31:47.000000000 -0400
24477 +++ linux-2.6.32.43/drivers/ata/pata_atiixp.c 2011-04-17 15:56:46.000000000 -0400
24478 @@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_
24479 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24480 };
24481
24482 -static struct ata_port_operations atiixp_port_ops = {
24483 +static const struct ata_port_operations atiixp_port_ops = {
24484 .inherits = &ata_bmdma_port_ops,
24485
24486 .qc_prep = ata_sff_dumb_qc_prep,
24487 diff -urNp linux-2.6.32.43/drivers/ata/pata_atp867x.c linux-2.6.32.43/drivers/ata/pata_atp867x.c
24488 --- linux-2.6.32.43/drivers/ata/pata_atp867x.c 2011-03-27 14:31:47.000000000 -0400
24489 +++ linux-2.6.32.43/drivers/ata/pata_atp867x.c 2011-04-17 15:56:46.000000000 -0400
24490 @@ -274,7 +274,7 @@ static struct scsi_host_template atp867x
24491 ATA_BMDMA_SHT(DRV_NAME),
24492 };
24493
24494 -static struct ata_port_operations atp867x_ops = {
24495 +static const struct ata_port_operations atp867x_ops = {
24496 .inherits = &ata_bmdma_port_ops,
24497 .cable_detect = atp867x_cable_detect,
24498 .set_piomode = atp867x_set_piomode,
24499 diff -urNp linux-2.6.32.43/drivers/ata/pata_bf54x.c linux-2.6.32.43/drivers/ata/pata_bf54x.c
24500 --- linux-2.6.32.43/drivers/ata/pata_bf54x.c 2011-03-27 14:31:47.000000000 -0400
24501 +++ linux-2.6.32.43/drivers/ata/pata_bf54x.c 2011-04-17 15:56:46.000000000 -0400
24502 @@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
24503 .dma_boundary = ATA_DMA_BOUNDARY,
24504 };
24505
24506 -static struct ata_port_operations bfin_pata_ops = {
24507 +static const struct ata_port_operations bfin_pata_ops = {
24508 .inherits = &ata_sff_port_ops,
24509
24510 .set_piomode = bfin_set_piomode,
24511 diff -urNp linux-2.6.32.43/drivers/ata/pata_cmd640.c linux-2.6.32.43/drivers/ata/pata_cmd640.c
24512 --- linux-2.6.32.43/drivers/ata/pata_cmd640.c 2011-03-27 14:31:47.000000000 -0400
24513 +++ linux-2.6.32.43/drivers/ata/pata_cmd640.c 2011-04-17 15:56:46.000000000 -0400
24514 @@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_
24515 ATA_BMDMA_SHT(DRV_NAME),
24516 };
24517
24518 -static struct ata_port_operations cmd640_port_ops = {
24519 +static const struct ata_port_operations cmd640_port_ops = {
24520 .inherits = &ata_bmdma_port_ops,
24521 /* In theory xfer_noirq is not needed once we kill the prefetcher */
24522 .sff_data_xfer = ata_sff_data_xfer_noirq,
24523 diff -urNp linux-2.6.32.43/drivers/ata/pata_cmd64x.c linux-2.6.32.43/drivers/ata/pata_cmd64x.c
24524 --- linux-2.6.32.43/drivers/ata/pata_cmd64x.c 2011-06-25 12:55:34.000000000 -0400
24525 +++ linux-2.6.32.43/drivers/ata/pata_cmd64x.c 2011-06-25 12:56:37.000000000 -0400
24526 @@ -271,18 +271,18 @@ static const struct ata_port_operations
24527 .set_dmamode = cmd64x_set_dmamode,
24528 };
24529
24530 -static struct ata_port_operations cmd64x_port_ops = {
24531 +static const struct ata_port_operations cmd64x_port_ops = {
24532 .inherits = &cmd64x_base_ops,
24533 .cable_detect = ata_cable_40wire,
24534 };
24535
24536 -static struct ata_port_operations cmd646r1_port_ops = {
24537 +static const struct ata_port_operations cmd646r1_port_ops = {
24538 .inherits = &cmd64x_base_ops,
24539 .bmdma_stop = cmd646r1_bmdma_stop,
24540 .cable_detect = ata_cable_40wire,
24541 };
24542
24543 -static struct ata_port_operations cmd648_port_ops = {
24544 +static const struct ata_port_operations cmd648_port_ops = {
24545 .inherits = &cmd64x_base_ops,
24546 .bmdma_stop = cmd648_bmdma_stop,
24547 .cable_detect = cmd648_cable_detect,
24548 diff -urNp linux-2.6.32.43/drivers/ata/pata_cs5520.c linux-2.6.32.43/drivers/ata/pata_cs5520.c
24549 --- linux-2.6.32.43/drivers/ata/pata_cs5520.c 2011-03-27 14:31:47.000000000 -0400
24550 +++ linux-2.6.32.43/drivers/ata/pata_cs5520.c 2011-04-17 15:56:46.000000000 -0400
24551 @@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_
24552 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24553 };
24554
24555 -static struct ata_port_operations cs5520_port_ops = {
24556 +static const struct ata_port_operations cs5520_port_ops = {
24557 .inherits = &ata_bmdma_port_ops,
24558 .qc_prep = ata_sff_dumb_qc_prep,
24559 .cable_detect = ata_cable_40wire,
24560 diff -urNp linux-2.6.32.43/drivers/ata/pata_cs5530.c linux-2.6.32.43/drivers/ata/pata_cs5530.c
24561 --- linux-2.6.32.43/drivers/ata/pata_cs5530.c 2011-03-27 14:31:47.000000000 -0400
24562 +++ linux-2.6.32.43/drivers/ata/pata_cs5530.c 2011-04-17 15:56:46.000000000 -0400
24563 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
24564 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24565 };
24566
24567 -static struct ata_port_operations cs5530_port_ops = {
24568 +static const struct ata_port_operations cs5530_port_ops = {
24569 .inherits = &ata_bmdma_port_ops,
24570
24571 .qc_prep = ata_sff_dumb_qc_prep,
24572 diff -urNp linux-2.6.32.43/drivers/ata/pata_cs5535.c linux-2.6.32.43/drivers/ata/pata_cs5535.c
24573 --- linux-2.6.32.43/drivers/ata/pata_cs5535.c 2011-03-27 14:31:47.000000000 -0400
24574 +++ linux-2.6.32.43/drivers/ata/pata_cs5535.c 2011-04-17 15:56:46.000000000 -0400
24575 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
24576 ATA_BMDMA_SHT(DRV_NAME),
24577 };
24578
24579 -static struct ata_port_operations cs5535_port_ops = {
24580 +static const struct ata_port_operations cs5535_port_ops = {
24581 .inherits = &ata_bmdma_port_ops,
24582 .cable_detect = cs5535_cable_detect,
24583 .set_piomode = cs5535_set_piomode,
24584 diff -urNp linux-2.6.32.43/drivers/ata/pata_cs5536.c linux-2.6.32.43/drivers/ata/pata_cs5536.c
24585 --- linux-2.6.32.43/drivers/ata/pata_cs5536.c 2011-03-27 14:31:47.000000000 -0400
24586 +++ linux-2.6.32.43/drivers/ata/pata_cs5536.c 2011-04-17 15:56:46.000000000 -0400
24587 @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
24588 ATA_BMDMA_SHT(DRV_NAME),
24589 };
24590
24591 -static struct ata_port_operations cs5536_port_ops = {
24592 +static const struct ata_port_operations cs5536_port_ops = {
24593 .inherits = &ata_bmdma_port_ops,
24594 .cable_detect = cs5536_cable_detect,
24595 .set_piomode = cs5536_set_piomode,
24596 diff -urNp linux-2.6.32.43/drivers/ata/pata_cypress.c linux-2.6.32.43/drivers/ata/pata_cypress.c
24597 --- linux-2.6.32.43/drivers/ata/pata_cypress.c 2011-03-27 14:31:47.000000000 -0400
24598 +++ linux-2.6.32.43/drivers/ata/pata_cypress.c 2011-04-17 15:56:46.000000000 -0400
24599 @@ -113,7 +113,7 @@ static struct scsi_host_template cy82c69
24600 ATA_BMDMA_SHT(DRV_NAME),
24601 };
24602
24603 -static struct ata_port_operations cy82c693_port_ops = {
24604 +static const struct ata_port_operations cy82c693_port_ops = {
24605 .inherits = &ata_bmdma_port_ops,
24606 .cable_detect = ata_cable_40wire,
24607 .set_piomode = cy82c693_set_piomode,
24608 diff -urNp linux-2.6.32.43/drivers/ata/pata_efar.c linux-2.6.32.43/drivers/ata/pata_efar.c
24609 --- linux-2.6.32.43/drivers/ata/pata_efar.c 2011-03-27 14:31:47.000000000 -0400
24610 +++ linux-2.6.32.43/drivers/ata/pata_efar.c 2011-04-17 15:56:46.000000000 -0400
24611 @@ -222,7 +222,7 @@ static struct scsi_host_template efar_sh
24612 ATA_BMDMA_SHT(DRV_NAME),
24613 };
24614
24615 -static struct ata_port_operations efar_ops = {
24616 +static const struct ata_port_operations efar_ops = {
24617 .inherits = &ata_bmdma_port_ops,
24618 .cable_detect = efar_cable_detect,
24619 .set_piomode = efar_set_piomode,
24620 diff -urNp linux-2.6.32.43/drivers/ata/pata_hpt366.c linux-2.6.32.43/drivers/ata/pata_hpt366.c
24621 --- linux-2.6.32.43/drivers/ata/pata_hpt366.c 2011-06-25 12:55:34.000000000 -0400
24622 +++ linux-2.6.32.43/drivers/ata/pata_hpt366.c 2011-06-25 12:56:37.000000000 -0400
24623 @@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_
24624 * Configuration for HPT366/68
24625 */
24626
24627 -static struct ata_port_operations hpt366_port_ops = {
24628 +static const struct ata_port_operations hpt366_port_ops = {
24629 .inherits = &ata_bmdma_port_ops,
24630 .cable_detect = hpt36x_cable_detect,
24631 .mode_filter = hpt366_filter,
24632 diff -urNp linux-2.6.32.43/drivers/ata/pata_hpt37x.c linux-2.6.32.43/drivers/ata/pata_hpt37x.c
24633 --- linux-2.6.32.43/drivers/ata/pata_hpt37x.c 2011-06-25 12:55:34.000000000 -0400
24634 +++ linux-2.6.32.43/drivers/ata/pata_hpt37x.c 2011-06-25 12:56:37.000000000 -0400
24635 @@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_
24636 * Configuration for HPT370
24637 */
24638
24639 -static struct ata_port_operations hpt370_port_ops = {
24640 +static const struct ata_port_operations hpt370_port_ops = {
24641 .inherits = &ata_bmdma_port_ops,
24642
24643 .bmdma_stop = hpt370_bmdma_stop,
24644 @@ -591,7 +591,7 @@ static struct ata_port_operations hpt370
24645 * Configuration for HPT370A. Close to 370 but less filters
24646 */
24647
24648 -static struct ata_port_operations hpt370a_port_ops = {
24649 +static const struct ata_port_operations hpt370a_port_ops = {
24650 .inherits = &hpt370_port_ops,
24651 .mode_filter = hpt370a_filter,
24652 };
24653 @@ -601,7 +601,7 @@ static struct ata_port_operations hpt370
24654 * and DMA mode setting functionality.
24655 */
24656
24657 -static struct ata_port_operations hpt372_port_ops = {
24658 +static const struct ata_port_operations hpt372_port_ops = {
24659 .inherits = &ata_bmdma_port_ops,
24660
24661 .bmdma_stop = hpt37x_bmdma_stop,
24662 @@ -616,7 +616,7 @@ static struct ata_port_operations hpt372
24663 * but we have a different cable detection procedure for function 1.
24664 */
24665
24666 -static struct ata_port_operations hpt374_fn1_port_ops = {
24667 +static const struct ata_port_operations hpt374_fn1_port_ops = {
24668 .inherits = &hpt372_port_ops,
24669 .prereset = hpt374_fn1_pre_reset,
24670 };
24671 diff -urNp linux-2.6.32.43/drivers/ata/pata_hpt3x2n.c linux-2.6.32.43/drivers/ata/pata_hpt3x2n.c
24672 --- linux-2.6.32.43/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:55:34.000000000 -0400
24673 +++ linux-2.6.32.43/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:56:37.000000000 -0400
24674 @@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n
24675 * Configuration for HPT3x2n.
24676 */
24677
24678 -static struct ata_port_operations hpt3x2n_port_ops = {
24679 +static const struct ata_port_operations hpt3x2n_port_ops = {
24680 .inherits = &ata_bmdma_port_ops,
24681
24682 .bmdma_stop = hpt3x2n_bmdma_stop,
24683 diff -urNp linux-2.6.32.43/drivers/ata/pata_hpt3x3.c linux-2.6.32.43/drivers/ata/pata_hpt3x3.c
24684 --- linux-2.6.32.43/drivers/ata/pata_hpt3x3.c 2011-03-27 14:31:47.000000000 -0400
24685 +++ linux-2.6.32.43/drivers/ata/pata_hpt3x3.c 2011-04-17 15:56:46.000000000 -0400
24686 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
24687 ATA_BMDMA_SHT(DRV_NAME),
24688 };
24689
24690 -static struct ata_port_operations hpt3x3_port_ops = {
24691 +static const struct ata_port_operations hpt3x3_port_ops = {
24692 .inherits = &ata_bmdma_port_ops,
24693 .cable_detect = ata_cable_40wire,
24694 .set_piomode = hpt3x3_set_piomode,
24695 diff -urNp linux-2.6.32.43/drivers/ata/pata_icside.c linux-2.6.32.43/drivers/ata/pata_icside.c
24696 --- linux-2.6.32.43/drivers/ata/pata_icside.c 2011-03-27 14:31:47.000000000 -0400
24697 +++ linux-2.6.32.43/drivers/ata/pata_icside.c 2011-04-17 15:56:46.000000000 -0400
24698 @@ -319,7 +319,7 @@ static void pata_icside_postreset(struct
24699 }
24700 }
24701
24702 -static struct ata_port_operations pata_icside_port_ops = {
24703 +static const struct ata_port_operations pata_icside_port_ops = {
24704 .inherits = &ata_sff_port_ops,
24705 /* no need to build any PRD tables for DMA */
24706 .qc_prep = ata_noop_qc_prep,
24707 diff -urNp linux-2.6.32.43/drivers/ata/pata_isapnp.c linux-2.6.32.43/drivers/ata/pata_isapnp.c
24708 --- linux-2.6.32.43/drivers/ata/pata_isapnp.c 2011-03-27 14:31:47.000000000 -0400
24709 +++ linux-2.6.32.43/drivers/ata/pata_isapnp.c 2011-04-17 15:56:46.000000000 -0400
24710 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
24711 ATA_PIO_SHT(DRV_NAME),
24712 };
24713
24714 -static struct ata_port_operations isapnp_port_ops = {
24715 +static const struct ata_port_operations isapnp_port_ops = {
24716 .inherits = &ata_sff_port_ops,
24717 .cable_detect = ata_cable_40wire,
24718 };
24719
24720 -static struct ata_port_operations isapnp_noalt_port_ops = {
24721 +static const struct ata_port_operations isapnp_noalt_port_ops = {
24722 .inherits = &ata_sff_port_ops,
24723 .cable_detect = ata_cable_40wire,
24724 /* No altstatus so we don't want to use the lost interrupt poll */
24725 diff -urNp linux-2.6.32.43/drivers/ata/pata_it8213.c linux-2.6.32.43/drivers/ata/pata_it8213.c
24726 --- linux-2.6.32.43/drivers/ata/pata_it8213.c 2011-03-27 14:31:47.000000000 -0400
24727 +++ linux-2.6.32.43/drivers/ata/pata_it8213.c 2011-04-17 15:56:46.000000000 -0400
24728 @@ -234,7 +234,7 @@ static struct scsi_host_template it8213_
24729 };
24730
24731
24732 -static struct ata_port_operations it8213_ops = {
24733 +static const struct ata_port_operations it8213_ops = {
24734 .inherits = &ata_bmdma_port_ops,
24735 .cable_detect = it8213_cable_detect,
24736 .set_piomode = it8213_set_piomode,
24737 diff -urNp linux-2.6.32.43/drivers/ata/pata_it821x.c linux-2.6.32.43/drivers/ata/pata_it821x.c
24738 --- linux-2.6.32.43/drivers/ata/pata_it821x.c 2011-03-27 14:31:47.000000000 -0400
24739 +++ linux-2.6.32.43/drivers/ata/pata_it821x.c 2011-04-17 15:56:46.000000000 -0400
24740 @@ -800,7 +800,7 @@ static struct scsi_host_template it821x_
24741 ATA_BMDMA_SHT(DRV_NAME),
24742 };
24743
24744 -static struct ata_port_operations it821x_smart_port_ops = {
24745 +static const struct ata_port_operations it821x_smart_port_ops = {
24746 .inherits = &ata_bmdma_port_ops,
24747
24748 .check_atapi_dma= it821x_check_atapi_dma,
24749 @@ -814,7 +814,7 @@ static struct ata_port_operations it821x
24750 .port_start = it821x_port_start,
24751 };
24752
24753 -static struct ata_port_operations it821x_passthru_port_ops = {
24754 +static const struct ata_port_operations it821x_passthru_port_ops = {
24755 .inherits = &ata_bmdma_port_ops,
24756
24757 .check_atapi_dma= it821x_check_atapi_dma,
24758 @@ -830,7 +830,7 @@ static struct ata_port_operations it821x
24759 .port_start = it821x_port_start,
24760 };
24761
24762 -static struct ata_port_operations it821x_rdc_port_ops = {
24763 +static const struct ata_port_operations it821x_rdc_port_ops = {
24764 .inherits = &ata_bmdma_port_ops,
24765
24766 .check_atapi_dma= it821x_check_atapi_dma,
24767 diff -urNp linux-2.6.32.43/drivers/ata/pata_ixp4xx_cf.c linux-2.6.32.43/drivers/ata/pata_ixp4xx_cf.c
24768 --- linux-2.6.32.43/drivers/ata/pata_ixp4xx_cf.c 2011-03-27 14:31:47.000000000 -0400
24769 +++ linux-2.6.32.43/drivers/ata/pata_ixp4xx_cf.c 2011-04-17 15:56:46.000000000 -0400
24770 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
24771 ATA_PIO_SHT(DRV_NAME),
24772 };
24773
24774 -static struct ata_port_operations ixp4xx_port_ops = {
24775 +static const struct ata_port_operations ixp4xx_port_ops = {
24776 .inherits = &ata_sff_port_ops,
24777 .sff_data_xfer = ixp4xx_mmio_data_xfer,
24778 .cable_detect = ata_cable_40wire,
24779 diff -urNp linux-2.6.32.43/drivers/ata/pata_jmicron.c linux-2.6.32.43/drivers/ata/pata_jmicron.c
24780 --- linux-2.6.32.43/drivers/ata/pata_jmicron.c 2011-03-27 14:31:47.000000000 -0400
24781 +++ linux-2.6.32.43/drivers/ata/pata_jmicron.c 2011-04-17 15:56:46.000000000 -0400
24782 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
24783 ATA_BMDMA_SHT(DRV_NAME),
24784 };
24785
24786 -static struct ata_port_operations jmicron_ops = {
24787 +static const struct ata_port_operations jmicron_ops = {
24788 .inherits = &ata_bmdma_port_ops,
24789 .prereset = jmicron_pre_reset,
24790 };
24791 diff -urNp linux-2.6.32.43/drivers/ata/pata_legacy.c linux-2.6.32.43/drivers/ata/pata_legacy.c
24792 --- linux-2.6.32.43/drivers/ata/pata_legacy.c 2011-03-27 14:31:47.000000000 -0400
24793 +++ linux-2.6.32.43/drivers/ata/pata_legacy.c 2011-04-17 15:56:46.000000000 -0400
24794 @@ -106,7 +106,7 @@ struct legacy_probe {
24795
24796 struct legacy_controller {
24797 const char *name;
24798 - struct ata_port_operations *ops;
24799 + const struct ata_port_operations *ops;
24800 unsigned int pio_mask;
24801 unsigned int flags;
24802 unsigned int pflags;
24803 @@ -223,12 +223,12 @@ static const struct ata_port_operations
24804 * pio_mask as well.
24805 */
24806
24807 -static struct ata_port_operations simple_port_ops = {
24808 +static const struct ata_port_operations simple_port_ops = {
24809 .inherits = &legacy_base_port_ops,
24810 .sff_data_xfer = ata_sff_data_xfer_noirq,
24811 };
24812
24813 -static struct ata_port_operations legacy_port_ops = {
24814 +static const struct ata_port_operations legacy_port_ops = {
24815 .inherits = &legacy_base_port_ops,
24816 .sff_data_xfer = ata_sff_data_xfer_noirq,
24817 .set_mode = legacy_set_mode,
24818 @@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(st
24819 return buflen;
24820 }
24821
24822 -static struct ata_port_operations pdc20230_port_ops = {
24823 +static const struct ata_port_operations pdc20230_port_ops = {
24824 .inherits = &legacy_base_port_ops,
24825 .set_piomode = pdc20230_set_piomode,
24826 .sff_data_xfer = pdc_data_xfer_vlb,
24827 @@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct a
24828 ioread8(ap->ioaddr.status_addr);
24829 }
24830
24831 -static struct ata_port_operations ht6560a_port_ops = {
24832 +static const struct ata_port_operations ht6560a_port_ops = {
24833 .inherits = &legacy_base_port_ops,
24834 .set_piomode = ht6560a_set_piomode,
24835 };
24836 @@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct a
24837 ioread8(ap->ioaddr.status_addr);
24838 }
24839
24840 -static struct ata_port_operations ht6560b_port_ops = {
24841 +static const struct ata_port_operations ht6560b_port_ops = {
24842 .inherits = &legacy_base_port_ops,
24843 .set_piomode = ht6560b_set_piomode,
24844 };
24845 @@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(stru
24846 }
24847
24848
24849 -static struct ata_port_operations opti82c611a_port_ops = {
24850 +static const struct ata_port_operations opti82c611a_port_ops = {
24851 .inherits = &legacy_base_port_ops,
24852 .set_piomode = opti82c611a_set_piomode,
24853 };
24854 @@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(
24855 return ata_sff_qc_issue(qc);
24856 }
24857
24858 -static struct ata_port_operations opti82c46x_port_ops = {
24859 +static const struct ata_port_operations opti82c46x_port_ops = {
24860 .inherits = &legacy_base_port_ops,
24861 .set_piomode = opti82c46x_set_piomode,
24862 .qc_issue = opti82c46x_qc_issue,
24863 @@ -771,20 +771,20 @@ static int qdi_port(struct platform_devi
24864 return 0;
24865 }
24866
24867 -static struct ata_port_operations qdi6500_port_ops = {
24868 +static const struct ata_port_operations qdi6500_port_ops = {
24869 .inherits = &legacy_base_port_ops,
24870 .set_piomode = qdi6500_set_piomode,
24871 .qc_issue = qdi_qc_issue,
24872 .sff_data_xfer = vlb32_data_xfer,
24873 };
24874
24875 -static struct ata_port_operations qdi6580_port_ops = {
24876 +static const struct ata_port_operations qdi6580_port_ops = {
24877 .inherits = &legacy_base_port_ops,
24878 .set_piomode = qdi6580_set_piomode,
24879 .sff_data_xfer = vlb32_data_xfer,
24880 };
24881
24882 -static struct ata_port_operations qdi6580dp_port_ops = {
24883 +static const struct ata_port_operations qdi6580dp_port_ops = {
24884 .inherits = &legacy_base_port_ops,
24885 .set_piomode = qdi6580dp_set_piomode,
24886 .sff_data_xfer = vlb32_data_xfer,
24887 @@ -855,7 +855,7 @@ static int winbond_port(struct platform_
24888 return 0;
24889 }
24890
24891 -static struct ata_port_operations winbond_port_ops = {
24892 +static const struct ata_port_operations winbond_port_ops = {
24893 .inherits = &legacy_base_port_ops,
24894 .set_piomode = winbond_set_piomode,
24895 .sff_data_xfer = vlb32_data_xfer,
24896 @@ -978,7 +978,7 @@ static __init int legacy_init_one(struct
24897 int pio_modes = controller->pio_mask;
24898 unsigned long io = probe->port;
24899 u32 mask = (1 << probe->slot);
24900 - struct ata_port_operations *ops = controller->ops;
24901 + const struct ata_port_operations *ops = controller->ops;
24902 struct legacy_data *ld = &legacy_data[probe->slot];
24903 struct ata_host *host = NULL;
24904 struct ata_port *ap;
24905 diff -urNp linux-2.6.32.43/drivers/ata/pata_marvell.c linux-2.6.32.43/drivers/ata/pata_marvell.c
24906 --- linux-2.6.32.43/drivers/ata/pata_marvell.c 2011-03-27 14:31:47.000000000 -0400
24907 +++ linux-2.6.32.43/drivers/ata/pata_marvell.c 2011-04-17 15:56:46.000000000 -0400
24908 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell
24909 ATA_BMDMA_SHT(DRV_NAME),
24910 };
24911
24912 -static struct ata_port_operations marvell_ops = {
24913 +static const struct ata_port_operations marvell_ops = {
24914 .inherits = &ata_bmdma_port_ops,
24915 .cable_detect = marvell_cable_detect,
24916 .prereset = marvell_pre_reset,
24917 diff -urNp linux-2.6.32.43/drivers/ata/pata_mpc52xx.c linux-2.6.32.43/drivers/ata/pata_mpc52xx.c
24918 --- linux-2.6.32.43/drivers/ata/pata_mpc52xx.c 2011-03-27 14:31:47.000000000 -0400
24919 +++ linux-2.6.32.43/drivers/ata/pata_mpc52xx.c 2011-04-17 15:56:46.000000000 -0400
24920 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
24921 ATA_PIO_SHT(DRV_NAME),
24922 };
24923
24924 -static struct ata_port_operations mpc52xx_ata_port_ops = {
24925 +static const struct ata_port_operations mpc52xx_ata_port_ops = {
24926 .inherits = &ata_bmdma_port_ops,
24927 .sff_dev_select = mpc52xx_ata_dev_select,
24928 .set_piomode = mpc52xx_ata_set_piomode,
24929 diff -urNp linux-2.6.32.43/drivers/ata/pata_mpiix.c linux-2.6.32.43/drivers/ata/pata_mpiix.c
24930 --- linux-2.6.32.43/drivers/ata/pata_mpiix.c 2011-03-27 14:31:47.000000000 -0400
24931 +++ linux-2.6.32.43/drivers/ata/pata_mpiix.c 2011-04-17 15:56:46.000000000 -0400
24932 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
24933 ATA_PIO_SHT(DRV_NAME),
24934 };
24935
24936 -static struct ata_port_operations mpiix_port_ops = {
24937 +static const struct ata_port_operations mpiix_port_ops = {
24938 .inherits = &ata_sff_port_ops,
24939 .qc_issue = mpiix_qc_issue,
24940 .cable_detect = ata_cable_40wire,
24941 diff -urNp linux-2.6.32.43/drivers/ata/pata_netcell.c linux-2.6.32.43/drivers/ata/pata_netcell.c
24942 --- linux-2.6.32.43/drivers/ata/pata_netcell.c 2011-03-27 14:31:47.000000000 -0400
24943 +++ linux-2.6.32.43/drivers/ata/pata_netcell.c 2011-04-17 15:56:46.000000000 -0400
24944 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell
24945 ATA_BMDMA_SHT(DRV_NAME),
24946 };
24947
24948 -static struct ata_port_operations netcell_ops = {
24949 +static const struct ata_port_operations netcell_ops = {
24950 .inherits = &ata_bmdma_port_ops,
24951 .cable_detect = ata_cable_80wire,
24952 .read_id = netcell_read_id,
24953 diff -urNp linux-2.6.32.43/drivers/ata/pata_ninja32.c linux-2.6.32.43/drivers/ata/pata_ninja32.c
24954 --- linux-2.6.32.43/drivers/ata/pata_ninja32.c 2011-03-27 14:31:47.000000000 -0400
24955 +++ linux-2.6.32.43/drivers/ata/pata_ninja32.c 2011-04-17 15:56:46.000000000 -0400
24956 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
24957 ATA_BMDMA_SHT(DRV_NAME),
24958 };
24959
24960 -static struct ata_port_operations ninja32_port_ops = {
24961 +static const struct ata_port_operations ninja32_port_ops = {
24962 .inherits = &ata_bmdma_port_ops,
24963 .sff_dev_select = ninja32_dev_select,
24964 .cable_detect = ata_cable_40wire,
24965 diff -urNp linux-2.6.32.43/drivers/ata/pata_ns87410.c linux-2.6.32.43/drivers/ata/pata_ns87410.c
24966 --- linux-2.6.32.43/drivers/ata/pata_ns87410.c 2011-03-27 14:31:47.000000000 -0400
24967 +++ linux-2.6.32.43/drivers/ata/pata_ns87410.c 2011-04-17 15:56:46.000000000 -0400
24968 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
24969 ATA_PIO_SHT(DRV_NAME),
24970 };
24971
24972 -static struct ata_port_operations ns87410_port_ops = {
24973 +static const struct ata_port_operations ns87410_port_ops = {
24974 .inherits = &ata_sff_port_ops,
24975 .qc_issue = ns87410_qc_issue,
24976 .cable_detect = ata_cable_40wire,
24977 diff -urNp linux-2.6.32.43/drivers/ata/pata_ns87415.c linux-2.6.32.43/drivers/ata/pata_ns87415.c
24978 --- linux-2.6.32.43/drivers/ata/pata_ns87415.c 2011-03-27 14:31:47.000000000 -0400
24979 +++ linux-2.6.32.43/drivers/ata/pata_ns87415.c 2011-04-17 15:56:46.000000000 -0400
24980 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
24981 }
24982 #endif /* 87560 SuperIO Support */
24983
24984 -static struct ata_port_operations ns87415_pata_ops = {
24985 +static const struct ata_port_operations ns87415_pata_ops = {
24986 .inherits = &ata_bmdma_port_ops,
24987
24988 .check_atapi_dma = ns87415_check_atapi_dma,
24989 @@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
24990 };
24991
24992 #if defined(CONFIG_SUPERIO)
24993 -static struct ata_port_operations ns87560_pata_ops = {
24994 +static const struct ata_port_operations ns87560_pata_ops = {
24995 .inherits = &ns87415_pata_ops,
24996 .sff_tf_read = ns87560_tf_read,
24997 .sff_check_status = ns87560_check_status,
24998 diff -urNp linux-2.6.32.43/drivers/ata/pata_octeon_cf.c linux-2.6.32.43/drivers/ata/pata_octeon_cf.c
24999 --- linux-2.6.32.43/drivers/ata/pata_octeon_cf.c 2011-03-27 14:31:47.000000000 -0400
25000 +++ linux-2.6.32.43/drivers/ata/pata_octeon_cf.c 2011-04-17 15:56:46.000000000 -0400
25001 @@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(s
25002 return 0;
25003 }
25004
25005 +/* cannot be const */
25006 static struct ata_port_operations octeon_cf_ops = {
25007 .inherits = &ata_sff_port_ops,
25008 .check_atapi_dma = octeon_cf_check_atapi_dma,
25009 diff -urNp linux-2.6.32.43/drivers/ata/pata_oldpiix.c linux-2.6.32.43/drivers/ata/pata_oldpiix.c
25010 --- linux-2.6.32.43/drivers/ata/pata_oldpiix.c 2011-03-27 14:31:47.000000000 -0400
25011 +++ linux-2.6.32.43/drivers/ata/pata_oldpiix.c 2011-04-17 15:56:46.000000000 -0400
25012 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
25013 ATA_BMDMA_SHT(DRV_NAME),
25014 };
25015
25016 -static struct ata_port_operations oldpiix_pata_ops = {
25017 +static const struct ata_port_operations oldpiix_pata_ops = {
25018 .inherits = &ata_bmdma_port_ops,
25019 .qc_issue = oldpiix_qc_issue,
25020 .cable_detect = ata_cable_40wire,
25021 diff -urNp linux-2.6.32.43/drivers/ata/pata_opti.c linux-2.6.32.43/drivers/ata/pata_opti.c
25022 --- linux-2.6.32.43/drivers/ata/pata_opti.c 2011-03-27 14:31:47.000000000 -0400
25023 +++ linux-2.6.32.43/drivers/ata/pata_opti.c 2011-04-17 15:56:46.000000000 -0400
25024 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
25025 ATA_PIO_SHT(DRV_NAME),
25026 };
25027
25028 -static struct ata_port_operations opti_port_ops = {
25029 +static const struct ata_port_operations opti_port_ops = {
25030 .inherits = &ata_sff_port_ops,
25031 .cable_detect = ata_cable_40wire,
25032 .set_piomode = opti_set_piomode,
25033 diff -urNp linux-2.6.32.43/drivers/ata/pata_optidma.c linux-2.6.32.43/drivers/ata/pata_optidma.c
25034 --- linux-2.6.32.43/drivers/ata/pata_optidma.c 2011-03-27 14:31:47.000000000 -0400
25035 +++ linux-2.6.32.43/drivers/ata/pata_optidma.c 2011-04-17 15:56:46.000000000 -0400
25036 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma
25037 ATA_BMDMA_SHT(DRV_NAME),
25038 };
25039
25040 -static struct ata_port_operations optidma_port_ops = {
25041 +static const struct ata_port_operations optidma_port_ops = {
25042 .inherits = &ata_bmdma_port_ops,
25043 .cable_detect = ata_cable_40wire,
25044 .set_piomode = optidma_set_pio_mode,
25045 @@ -346,7 +346,7 @@ static struct ata_port_operations optidm
25046 .prereset = optidma_pre_reset,
25047 };
25048
25049 -static struct ata_port_operations optiplus_port_ops = {
25050 +static const struct ata_port_operations optiplus_port_ops = {
25051 .inherits = &optidma_port_ops,
25052 .set_piomode = optiplus_set_pio_mode,
25053 .set_dmamode = optiplus_set_dma_mode,
25054 diff -urNp linux-2.6.32.43/drivers/ata/pata_palmld.c linux-2.6.32.43/drivers/ata/pata_palmld.c
25055 --- linux-2.6.32.43/drivers/ata/pata_palmld.c 2011-03-27 14:31:47.000000000 -0400
25056 +++ linux-2.6.32.43/drivers/ata/pata_palmld.c 2011-04-17 15:56:46.000000000 -0400
25057 @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
25058 ATA_PIO_SHT(DRV_NAME),
25059 };
25060
25061 -static struct ata_port_operations palmld_port_ops = {
25062 +static const struct ata_port_operations palmld_port_ops = {
25063 .inherits = &ata_sff_port_ops,
25064 .sff_data_xfer = ata_sff_data_xfer_noirq,
25065 .cable_detect = ata_cable_40wire,
25066 diff -urNp linux-2.6.32.43/drivers/ata/pata_pcmcia.c linux-2.6.32.43/drivers/ata/pata_pcmcia.c
25067 --- linux-2.6.32.43/drivers/ata/pata_pcmcia.c 2011-03-27 14:31:47.000000000 -0400
25068 +++ linux-2.6.32.43/drivers/ata/pata_pcmcia.c 2011-04-17 15:56:46.000000000 -0400
25069 @@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_
25070 ATA_PIO_SHT(DRV_NAME),
25071 };
25072
25073 -static struct ata_port_operations pcmcia_port_ops = {
25074 +static const struct ata_port_operations pcmcia_port_ops = {
25075 .inherits = &ata_sff_port_ops,
25076 .sff_data_xfer = ata_sff_data_xfer_noirq,
25077 .cable_detect = ata_cable_40wire,
25078 .set_mode = pcmcia_set_mode,
25079 };
25080
25081 -static struct ata_port_operations pcmcia_8bit_port_ops = {
25082 +static const struct ata_port_operations pcmcia_8bit_port_ops = {
25083 .inherits = &ata_sff_port_ops,
25084 .sff_data_xfer = ata_data_xfer_8bit,
25085 .cable_detect = ata_cable_40wire,
25086 @@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia
25087 unsigned long io_base, ctl_base;
25088 void __iomem *io_addr, *ctl_addr;
25089 int n_ports = 1;
25090 - struct ata_port_operations *ops = &pcmcia_port_ops;
25091 + const struct ata_port_operations *ops = &pcmcia_port_ops;
25092
25093 info = kzalloc(sizeof(*info), GFP_KERNEL);
25094 if (info == NULL)
25095 diff -urNp linux-2.6.32.43/drivers/ata/pata_pdc2027x.c linux-2.6.32.43/drivers/ata/pata_pdc2027x.c
25096 --- linux-2.6.32.43/drivers/ata/pata_pdc2027x.c 2011-03-27 14:31:47.000000000 -0400
25097 +++ linux-2.6.32.43/drivers/ata/pata_pdc2027x.c 2011-04-17 15:56:46.000000000 -0400
25098 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
25099 ATA_BMDMA_SHT(DRV_NAME),
25100 };
25101
25102 -static struct ata_port_operations pdc2027x_pata100_ops = {
25103 +static const struct ata_port_operations pdc2027x_pata100_ops = {
25104 .inherits = &ata_bmdma_port_ops,
25105 .check_atapi_dma = pdc2027x_check_atapi_dma,
25106 .cable_detect = pdc2027x_cable_detect,
25107 .prereset = pdc2027x_prereset,
25108 };
25109
25110 -static struct ata_port_operations pdc2027x_pata133_ops = {
25111 +static const struct ata_port_operations pdc2027x_pata133_ops = {
25112 .inherits = &pdc2027x_pata100_ops,
25113 .mode_filter = pdc2027x_mode_filter,
25114 .set_piomode = pdc2027x_set_piomode,
25115 diff -urNp linux-2.6.32.43/drivers/ata/pata_pdc202xx_old.c linux-2.6.32.43/drivers/ata/pata_pdc202xx_old.c
25116 --- linux-2.6.32.43/drivers/ata/pata_pdc202xx_old.c 2011-03-27 14:31:47.000000000 -0400
25117 +++ linux-2.6.32.43/drivers/ata/pata_pdc202xx_old.c 2011-04-17 15:56:46.000000000 -0400
25118 @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
25119 ATA_BMDMA_SHT(DRV_NAME),
25120 };
25121
25122 -static struct ata_port_operations pdc2024x_port_ops = {
25123 +static const struct ata_port_operations pdc2024x_port_ops = {
25124 .inherits = &ata_bmdma_port_ops,
25125
25126 .cable_detect = ata_cable_40wire,
25127 @@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
25128 .sff_exec_command = pdc202xx_exec_command,
25129 };
25130
25131 -static struct ata_port_operations pdc2026x_port_ops = {
25132 +static const struct ata_port_operations pdc2026x_port_ops = {
25133 .inherits = &pdc2024x_port_ops,
25134
25135 .check_atapi_dma = pdc2026x_check_atapi_dma,
25136 diff -urNp linux-2.6.32.43/drivers/ata/pata_platform.c linux-2.6.32.43/drivers/ata/pata_platform.c
25137 --- linux-2.6.32.43/drivers/ata/pata_platform.c 2011-03-27 14:31:47.000000000 -0400
25138 +++ linux-2.6.32.43/drivers/ata/pata_platform.c 2011-04-17 15:56:46.000000000 -0400
25139 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
25140 ATA_PIO_SHT(DRV_NAME),
25141 };
25142
25143 -static struct ata_port_operations pata_platform_port_ops = {
25144 +static const struct ata_port_operations pata_platform_port_ops = {
25145 .inherits = &ata_sff_port_ops,
25146 .sff_data_xfer = ata_sff_data_xfer_noirq,
25147 .cable_detect = ata_cable_unknown,
25148 diff -urNp linux-2.6.32.43/drivers/ata/pata_qdi.c linux-2.6.32.43/drivers/ata/pata_qdi.c
25149 --- linux-2.6.32.43/drivers/ata/pata_qdi.c 2011-03-27 14:31:47.000000000 -0400
25150 +++ linux-2.6.32.43/drivers/ata/pata_qdi.c 2011-04-17 15:56:46.000000000 -0400
25151 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
25152 ATA_PIO_SHT(DRV_NAME),
25153 };
25154
25155 -static struct ata_port_operations qdi6500_port_ops = {
25156 +static const struct ata_port_operations qdi6500_port_ops = {
25157 .inherits = &ata_sff_port_ops,
25158 .qc_issue = qdi_qc_issue,
25159 .sff_data_xfer = qdi_data_xfer,
25160 @@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
25161 .set_piomode = qdi6500_set_piomode,
25162 };
25163
25164 -static struct ata_port_operations qdi6580_port_ops = {
25165 +static const struct ata_port_operations qdi6580_port_ops = {
25166 .inherits = &qdi6500_port_ops,
25167 .set_piomode = qdi6580_set_piomode,
25168 };
25169 diff -urNp linux-2.6.32.43/drivers/ata/pata_radisys.c linux-2.6.32.43/drivers/ata/pata_radisys.c
25170 --- linux-2.6.32.43/drivers/ata/pata_radisys.c 2011-03-27 14:31:47.000000000 -0400
25171 +++ linux-2.6.32.43/drivers/ata/pata_radisys.c 2011-04-17 15:56:46.000000000 -0400
25172 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys
25173 ATA_BMDMA_SHT(DRV_NAME),
25174 };
25175
25176 -static struct ata_port_operations radisys_pata_ops = {
25177 +static const struct ata_port_operations radisys_pata_ops = {
25178 .inherits = &ata_bmdma_port_ops,
25179 .qc_issue = radisys_qc_issue,
25180 .cable_detect = ata_cable_unknown,
25181 diff -urNp linux-2.6.32.43/drivers/ata/pata_rb532_cf.c linux-2.6.32.43/drivers/ata/pata_rb532_cf.c
25182 --- linux-2.6.32.43/drivers/ata/pata_rb532_cf.c 2011-03-27 14:31:47.000000000 -0400
25183 +++ linux-2.6.32.43/drivers/ata/pata_rb532_cf.c 2011-04-17 15:56:46.000000000 -0400
25184 @@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handle
25185 return IRQ_HANDLED;
25186 }
25187
25188 -static struct ata_port_operations rb532_pata_port_ops = {
25189 +static const struct ata_port_operations rb532_pata_port_ops = {
25190 .inherits = &ata_sff_port_ops,
25191 .sff_data_xfer = ata_sff_data_xfer32,
25192 };
25193 diff -urNp linux-2.6.32.43/drivers/ata/pata_rdc.c linux-2.6.32.43/drivers/ata/pata_rdc.c
25194 --- linux-2.6.32.43/drivers/ata/pata_rdc.c 2011-03-27 14:31:47.000000000 -0400
25195 +++ linux-2.6.32.43/drivers/ata/pata_rdc.c 2011-04-17 15:56:46.000000000 -0400
25196 @@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_p
25197 pci_write_config_byte(dev, 0x48, udma_enable);
25198 }
25199
25200 -static struct ata_port_operations rdc_pata_ops = {
25201 +static const struct ata_port_operations rdc_pata_ops = {
25202 .inherits = &ata_bmdma32_port_ops,
25203 .cable_detect = rdc_pata_cable_detect,
25204 .set_piomode = rdc_set_piomode,
25205 diff -urNp linux-2.6.32.43/drivers/ata/pata_rz1000.c linux-2.6.32.43/drivers/ata/pata_rz1000.c
25206 --- linux-2.6.32.43/drivers/ata/pata_rz1000.c 2011-03-27 14:31:47.000000000 -0400
25207 +++ linux-2.6.32.43/drivers/ata/pata_rz1000.c 2011-04-17 15:56:46.000000000 -0400
25208 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
25209 ATA_PIO_SHT(DRV_NAME),
25210 };
25211
25212 -static struct ata_port_operations rz1000_port_ops = {
25213 +static const struct ata_port_operations rz1000_port_ops = {
25214 .inherits = &ata_sff_port_ops,
25215 .cable_detect = ata_cable_40wire,
25216 .set_mode = rz1000_set_mode,
25217 diff -urNp linux-2.6.32.43/drivers/ata/pata_sc1200.c linux-2.6.32.43/drivers/ata/pata_sc1200.c
25218 --- linux-2.6.32.43/drivers/ata/pata_sc1200.c 2011-03-27 14:31:47.000000000 -0400
25219 +++ linux-2.6.32.43/drivers/ata/pata_sc1200.c 2011-04-17 15:56:46.000000000 -0400
25220 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
25221 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25222 };
25223
25224 -static struct ata_port_operations sc1200_port_ops = {
25225 +static const struct ata_port_operations sc1200_port_ops = {
25226 .inherits = &ata_bmdma_port_ops,
25227 .qc_prep = ata_sff_dumb_qc_prep,
25228 .qc_issue = sc1200_qc_issue,
25229 diff -urNp linux-2.6.32.43/drivers/ata/pata_scc.c linux-2.6.32.43/drivers/ata/pata_scc.c
25230 --- linux-2.6.32.43/drivers/ata/pata_scc.c 2011-03-27 14:31:47.000000000 -0400
25231 +++ linux-2.6.32.43/drivers/ata/pata_scc.c 2011-04-17 15:56:46.000000000 -0400
25232 @@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
25233 ATA_BMDMA_SHT(DRV_NAME),
25234 };
25235
25236 -static struct ata_port_operations scc_pata_ops = {
25237 +static const struct ata_port_operations scc_pata_ops = {
25238 .inherits = &ata_bmdma_port_ops,
25239
25240 .set_piomode = scc_set_piomode,
25241 diff -urNp linux-2.6.32.43/drivers/ata/pata_sch.c linux-2.6.32.43/drivers/ata/pata_sch.c
25242 --- linux-2.6.32.43/drivers/ata/pata_sch.c 2011-03-27 14:31:47.000000000 -0400
25243 +++ linux-2.6.32.43/drivers/ata/pata_sch.c 2011-04-17 15:56:46.000000000 -0400
25244 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
25245 ATA_BMDMA_SHT(DRV_NAME),
25246 };
25247
25248 -static struct ata_port_operations sch_pata_ops = {
25249 +static const struct ata_port_operations sch_pata_ops = {
25250 .inherits = &ata_bmdma_port_ops,
25251 .cable_detect = ata_cable_unknown,
25252 .set_piomode = sch_set_piomode,
25253 diff -urNp linux-2.6.32.43/drivers/ata/pata_serverworks.c linux-2.6.32.43/drivers/ata/pata_serverworks.c
25254 --- linux-2.6.32.43/drivers/ata/pata_serverworks.c 2011-03-27 14:31:47.000000000 -0400
25255 +++ linux-2.6.32.43/drivers/ata/pata_serverworks.c 2011-04-17 15:56:46.000000000 -0400
25256 @@ -299,7 +299,7 @@ static struct scsi_host_template serverw
25257 ATA_BMDMA_SHT(DRV_NAME),
25258 };
25259
25260 -static struct ata_port_operations serverworks_osb4_port_ops = {
25261 +static const struct ata_port_operations serverworks_osb4_port_ops = {
25262 .inherits = &ata_bmdma_port_ops,
25263 .cable_detect = serverworks_cable_detect,
25264 .mode_filter = serverworks_osb4_filter,
25265 @@ -307,7 +307,7 @@ static struct ata_port_operations server
25266 .set_dmamode = serverworks_set_dmamode,
25267 };
25268
25269 -static struct ata_port_operations serverworks_csb_port_ops = {
25270 +static const struct ata_port_operations serverworks_csb_port_ops = {
25271 .inherits = &serverworks_osb4_port_ops,
25272 .mode_filter = serverworks_csb_filter,
25273 };
25274 diff -urNp linux-2.6.32.43/drivers/ata/pata_sil680.c linux-2.6.32.43/drivers/ata/pata_sil680.c
25275 --- linux-2.6.32.43/drivers/ata/pata_sil680.c 2011-06-25 12:55:34.000000000 -0400
25276 +++ linux-2.6.32.43/drivers/ata/pata_sil680.c 2011-06-25 12:56:37.000000000 -0400
25277 @@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
25278 ATA_BMDMA_SHT(DRV_NAME),
25279 };
25280
25281 -static struct ata_port_operations sil680_port_ops = {
25282 +static const struct ata_port_operations sil680_port_ops = {
25283 .inherits = &ata_bmdma32_port_ops,
25284 .cable_detect = sil680_cable_detect,
25285 .set_piomode = sil680_set_piomode,
25286 diff -urNp linux-2.6.32.43/drivers/ata/pata_sis.c linux-2.6.32.43/drivers/ata/pata_sis.c
25287 --- linux-2.6.32.43/drivers/ata/pata_sis.c 2011-03-27 14:31:47.000000000 -0400
25288 +++ linux-2.6.32.43/drivers/ata/pata_sis.c 2011-04-17 15:56:46.000000000 -0400
25289 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
25290 ATA_BMDMA_SHT(DRV_NAME),
25291 };
25292
25293 -static struct ata_port_operations sis_133_for_sata_ops = {
25294 +static const struct ata_port_operations sis_133_for_sata_ops = {
25295 .inherits = &ata_bmdma_port_ops,
25296 .set_piomode = sis_133_set_piomode,
25297 .set_dmamode = sis_133_set_dmamode,
25298 .cable_detect = sis_133_cable_detect,
25299 };
25300
25301 -static struct ata_port_operations sis_base_ops = {
25302 +static const struct ata_port_operations sis_base_ops = {
25303 .inherits = &ata_bmdma_port_ops,
25304 .prereset = sis_pre_reset,
25305 };
25306
25307 -static struct ata_port_operations sis_133_ops = {
25308 +static const struct ata_port_operations sis_133_ops = {
25309 .inherits = &sis_base_ops,
25310 .set_piomode = sis_133_set_piomode,
25311 .set_dmamode = sis_133_set_dmamode,
25312 .cable_detect = sis_133_cable_detect,
25313 };
25314
25315 -static struct ata_port_operations sis_133_early_ops = {
25316 +static const struct ata_port_operations sis_133_early_ops = {
25317 .inherits = &sis_base_ops,
25318 .set_piomode = sis_100_set_piomode,
25319 .set_dmamode = sis_133_early_set_dmamode,
25320 .cable_detect = sis_66_cable_detect,
25321 };
25322
25323 -static struct ata_port_operations sis_100_ops = {
25324 +static const struct ata_port_operations sis_100_ops = {
25325 .inherits = &sis_base_ops,
25326 .set_piomode = sis_100_set_piomode,
25327 .set_dmamode = sis_100_set_dmamode,
25328 .cable_detect = sis_66_cable_detect,
25329 };
25330
25331 -static struct ata_port_operations sis_66_ops = {
25332 +static const struct ata_port_operations sis_66_ops = {
25333 .inherits = &sis_base_ops,
25334 .set_piomode = sis_old_set_piomode,
25335 .set_dmamode = sis_66_set_dmamode,
25336 .cable_detect = sis_66_cable_detect,
25337 };
25338
25339 -static struct ata_port_operations sis_old_ops = {
25340 +static const struct ata_port_operations sis_old_ops = {
25341 .inherits = &sis_base_ops,
25342 .set_piomode = sis_old_set_piomode,
25343 .set_dmamode = sis_old_set_dmamode,
25344 diff -urNp linux-2.6.32.43/drivers/ata/pata_sl82c105.c linux-2.6.32.43/drivers/ata/pata_sl82c105.c
25345 --- linux-2.6.32.43/drivers/ata/pata_sl82c105.c 2011-03-27 14:31:47.000000000 -0400
25346 +++ linux-2.6.32.43/drivers/ata/pata_sl82c105.c 2011-04-17 15:56:46.000000000 -0400
25347 @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
25348 ATA_BMDMA_SHT(DRV_NAME),
25349 };
25350
25351 -static struct ata_port_operations sl82c105_port_ops = {
25352 +static const struct ata_port_operations sl82c105_port_ops = {
25353 .inherits = &ata_bmdma_port_ops,
25354 .qc_defer = sl82c105_qc_defer,
25355 .bmdma_start = sl82c105_bmdma_start,
25356 diff -urNp linux-2.6.32.43/drivers/ata/pata_triflex.c linux-2.6.32.43/drivers/ata/pata_triflex.c
25357 --- linux-2.6.32.43/drivers/ata/pata_triflex.c 2011-03-27 14:31:47.000000000 -0400
25358 +++ linux-2.6.32.43/drivers/ata/pata_triflex.c 2011-04-17 15:56:46.000000000 -0400
25359 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex
25360 ATA_BMDMA_SHT(DRV_NAME),
25361 };
25362
25363 -static struct ata_port_operations triflex_port_ops = {
25364 +static const struct ata_port_operations triflex_port_ops = {
25365 .inherits = &ata_bmdma_port_ops,
25366 .bmdma_start = triflex_bmdma_start,
25367 .bmdma_stop = triflex_bmdma_stop,
25368 diff -urNp linux-2.6.32.43/drivers/ata/pata_via.c linux-2.6.32.43/drivers/ata/pata_via.c
25369 --- linux-2.6.32.43/drivers/ata/pata_via.c 2011-03-27 14:31:47.000000000 -0400
25370 +++ linux-2.6.32.43/drivers/ata/pata_via.c 2011-04-17 15:56:46.000000000 -0400
25371 @@ -419,7 +419,7 @@ static struct scsi_host_template via_sht
25372 ATA_BMDMA_SHT(DRV_NAME),
25373 };
25374
25375 -static struct ata_port_operations via_port_ops = {
25376 +static const struct ata_port_operations via_port_ops = {
25377 .inherits = &ata_bmdma_port_ops,
25378 .cable_detect = via_cable_detect,
25379 .set_piomode = via_set_piomode,
25380 @@ -429,7 +429,7 @@ static struct ata_port_operations via_po
25381 .port_start = via_port_start,
25382 };
25383
25384 -static struct ata_port_operations via_port_ops_noirq = {
25385 +static const struct ata_port_operations via_port_ops_noirq = {
25386 .inherits = &via_port_ops,
25387 .sff_data_xfer = ata_sff_data_xfer_noirq,
25388 };
25389 diff -urNp linux-2.6.32.43/drivers/ata/pata_winbond.c linux-2.6.32.43/drivers/ata/pata_winbond.c
25390 --- linux-2.6.32.43/drivers/ata/pata_winbond.c 2011-03-27 14:31:47.000000000 -0400
25391 +++ linux-2.6.32.43/drivers/ata/pata_winbond.c 2011-04-17 15:56:46.000000000 -0400
25392 @@ -125,7 +125,7 @@ static struct scsi_host_template winbond
25393 ATA_PIO_SHT(DRV_NAME),
25394 };
25395
25396 -static struct ata_port_operations winbond_port_ops = {
25397 +static const struct ata_port_operations winbond_port_ops = {
25398 .inherits = &ata_sff_port_ops,
25399 .sff_data_xfer = winbond_data_xfer,
25400 .cable_detect = ata_cable_40wire,
25401 diff -urNp linux-2.6.32.43/drivers/ata/pdc_adma.c linux-2.6.32.43/drivers/ata/pdc_adma.c
25402 --- linux-2.6.32.43/drivers/ata/pdc_adma.c 2011-03-27 14:31:47.000000000 -0400
25403 +++ linux-2.6.32.43/drivers/ata/pdc_adma.c 2011-04-17 15:56:46.000000000 -0400
25404 @@ -145,7 +145,7 @@ static struct scsi_host_template adma_at
25405 .dma_boundary = ADMA_DMA_BOUNDARY,
25406 };
25407
25408 -static struct ata_port_operations adma_ata_ops = {
25409 +static const struct ata_port_operations adma_ata_ops = {
25410 .inherits = &ata_sff_port_ops,
25411
25412 .lost_interrupt = ATA_OP_NULL,
25413 diff -urNp linux-2.6.32.43/drivers/ata/sata_fsl.c linux-2.6.32.43/drivers/ata/sata_fsl.c
25414 --- linux-2.6.32.43/drivers/ata/sata_fsl.c 2011-03-27 14:31:47.000000000 -0400
25415 +++ linux-2.6.32.43/drivers/ata/sata_fsl.c 2011-04-17 15:56:46.000000000 -0400
25416 @@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fs
25417 .dma_boundary = ATA_DMA_BOUNDARY,
25418 };
25419
25420 -static struct ata_port_operations sata_fsl_ops = {
25421 +static const struct ata_port_operations sata_fsl_ops = {
25422 .inherits = &sata_pmp_port_ops,
25423
25424 .qc_defer = ata_std_qc_defer,
25425 diff -urNp linux-2.6.32.43/drivers/ata/sata_inic162x.c linux-2.6.32.43/drivers/ata/sata_inic162x.c
25426 --- linux-2.6.32.43/drivers/ata/sata_inic162x.c 2011-03-27 14:31:47.000000000 -0400
25427 +++ linux-2.6.32.43/drivers/ata/sata_inic162x.c 2011-04-17 15:56:46.000000000 -0400
25428 @@ -721,7 +721,7 @@ static int inic_port_start(struct ata_po
25429 return 0;
25430 }
25431
25432 -static struct ata_port_operations inic_port_ops = {
25433 +static const struct ata_port_operations inic_port_ops = {
25434 .inherits = &sata_port_ops,
25435
25436 .check_atapi_dma = inic_check_atapi_dma,
25437 diff -urNp linux-2.6.32.43/drivers/ata/sata_mv.c linux-2.6.32.43/drivers/ata/sata_mv.c
25438 --- linux-2.6.32.43/drivers/ata/sata_mv.c 2011-03-27 14:31:47.000000000 -0400
25439 +++ linux-2.6.32.43/drivers/ata/sata_mv.c 2011-04-17 15:56:46.000000000 -0400
25440 @@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht
25441 .dma_boundary = MV_DMA_BOUNDARY,
25442 };
25443
25444 -static struct ata_port_operations mv5_ops = {
25445 +static const struct ata_port_operations mv5_ops = {
25446 .inherits = &ata_sff_port_ops,
25447
25448 .lost_interrupt = ATA_OP_NULL,
25449 @@ -678,7 +678,7 @@ static struct ata_port_operations mv5_op
25450 .port_stop = mv_port_stop,
25451 };
25452
25453 -static struct ata_port_operations mv6_ops = {
25454 +static const struct ata_port_operations mv6_ops = {
25455 .inherits = &mv5_ops,
25456 .dev_config = mv6_dev_config,
25457 .scr_read = mv_scr_read,
25458 @@ -698,7 +698,7 @@ static struct ata_port_operations mv6_op
25459 .bmdma_status = mv_bmdma_status,
25460 };
25461
25462 -static struct ata_port_operations mv_iie_ops = {
25463 +static const struct ata_port_operations mv_iie_ops = {
25464 .inherits = &mv6_ops,
25465 .dev_config = ATA_OP_NULL,
25466 .qc_prep = mv_qc_prep_iie,
25467 diff -urNp linux-2.6.32.43/drivers/ata/sata_nv.c linux-2.6.32.43/drivers/ata/sata_nv.c
25468 --- linux-2.6.32.43/drivers/ata/sata_nv.c 2011-03-27 14:31:47.000000000 -0400
25469 +++ linux-2.6.32.43/drivers/ata/sata_nv.c 2011-04-17 15:56:46.000000000 -0400
25470 @@ -464,7 +464,7 @@ static struct scsi_host_template nv_swnc
25471 * cases. Define nv_hardreset() which only kicks in for post-boot
25472 * probing and use it for all variants.
25473 */
25474 -static struct ata_port_operations nv_generic_ops = {
25475 +static const struct ata_port_operations nv_generic_ops = {
25476 .inherits = &ata_bmdma_port_ops,
25477 .lost_interrupt = ATA_OP_NULL,
25478 .scr_read = nv_scr_read,
25479 @@ -472,20 +472,20 @@ static struct ata_port_operations nv_gen
25480 .hardreset = nv_hardreset,
25481 };
25482
25483 -static struct ata_port_operations nv_nf2_ops = {
25484 +static const struct ata_port_operations nv_nf2_ops = {
25485 .inherits = &nv_generic_ops,
25486 .freeze = nv_nf2_freeze,
25487 .thaw = nv_nf2_thaw,
25488 };
25489
25490 -static struct ata_port_operations nv_ck804_ops = {
25491 +static const struct ata_port_operations nv_ck804_ops = {
25492 .inherits = &nv_generic_ops,
25493 .freeze = nv_ck804_freeze,
25494 .thaw = nv_ck804_thaw,
25495 .host_stop = nv_ck804_host_stop,
25496 };
25497
25498 -static struct ata_port_operations nv_adma_ops = {
25499 +static const struct ata_port_operations nv_adma_ops = {
25500 .inherits = &nv_ck804_ops,
25501
25502 .check_atapi_dma = nv_adma_check_atapi_dma,
25503 @@ -509,7 +509,7 @@ static struct ata_port_operations nv_adm
25504 .host_stop = nv_adma_host_stop,
25505 };
25506
25507 -static struct ata_port_operations nv_swncq_ops = {
25508 +static const struct ata_port_operations nv_swncq_ops = {
25509 .inherits = &nv_generic_ops,
25510
25511 .qc_defer = ata_std_qc_defer,
25512 diff -urNp linux-2.6.32.43/drivers/ata/sata_promise.c linux-2.6.32.43/drivers/ata/sata_promise.c
25513 --- linux-2.6.32.43/drivers/ata/sata_promise.c 2011-03-27 14:31:47.000000000 -0400
25514 +++ linux-2.6.32.43/drivers/ata/sata_promise.c 2011-04-17 15:56:46.000000000 -0400
25515 @@ -195,7 +195,7 @@ static const struct ata_port_operations
25516 .error_handler = pdc_error_handler,
25517 };
25518
25519 -static struct ata_port_operations pdc_sata_ops = {
25520 +static const struct ata_port_operations pdc_sata_ops = {
25521 .inherits = &pdc_common_ops,
25522 .cable_detect = pdc_sata_cable_detect,
25523 .freeze = pdc_sata_freeze,
25524 @@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sa
25525
25526 /* First-generation chips need a more restrictive ->check_atapi_dma op,
25527 and ->freeze/thaw that ignore the hotplug controls. */
25528 -static struct ata_port_operations pdc_old_sata_ops = {
25529 +static const struct ata_port_operations pdc_old_sata_ops = {
25530 .inherits = &pdc_sata_ops,
25531 .freeze = pdc_freeze,
25532 .thaw = pdc_thaw,
25533 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
25534 };
25535
25536 -static struct ata_port_operations pdc_pata_ops = {
25537 +static const struct ata_port_operations pdc_pata_ops = {
25538 .inherits = &pdc_common_ops,
25539 .cable_detect = pdc_pata_cable_detect,
25540 .freeze = pdc_freeze,
25541 diff -urNp linux-2.6.32.43/drivers/ata/sata_qstor.c linux-2.6.32.43/drivers/ata/sata_qstor.c
25542 --- linux-2.6.32.43/drivers/ata/sata_qstor.c 2011-03-27 14:31:47.000000000 -0400
25543 +++ linux-2.6.32.43/drivers/ata/sata_qstor.c 2011-04-17 15:56:46.000000000 -0400
25544 @@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_
25545 .dma_boundary = QS_DMA_BOUNDARY,
25546 };
25547
25548 -static struct ata_port_operations qs_ata_ops = {
25549 +static const struct ata_port_operations qs_ata_ops = {
25550 .inherits = &ata_sff_port_ops,
25551
25552 .check_atapi_dma = qs_check_atapi_dma,
25553 diff -urNp linux-2.6.32.43/drivers/ata/sata_sil24.c linux-2.6.32.43/drivers/ata/sata_sil24.c
25554 --- linux-2.6.32.43/drivers/ata/sata_sil24.c 2011-03-27 14:31:47.000000000 -0400
25555 +++ linux-2.6.32.43/drivers/ata/sata_sil24.c 2011-04-17 15:56:46.000000000 -0400
25556 @@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
25557 .dma_boundary = ATA_DMA_BOUNDARY,
25558 };
25559
25560 -static struct ata_port_operations sil24_ops = {
25561 +static const struct ata_port_operations sil24_ops = {
25562 .inherits = &sata_pmp_port_ops,
25563
25564 .qc_defer = sil24_qc_defer,
25565 diff -urNp linux-2.6.32.43/drivers/ata/sata_sil.c linux-2.6.32.43/drivers/ata/sata_sil.c
25566 --- linux-2.6.32.43/drivers/ata/sata_sil.c 2011-03-27 14:31:47.000000000 -0400
25567 +++ linux-2.6.32.43/drivers/ata/sata_sil.c 2011-04-17 15:56:46.000000000 -0400
25568 @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
25569 .sg_tablesize = ATA_MAX_PRD
25570 };
25571
25572 -static struct ata_port_operations sil_ops = {
25573 +static const struct ata_port_operations sil_ops = {
25574 .inherits = &ata_bmdma32_port_ops,
25575 .dev_config = sil_dev_config,
25576 .set_mode = sil_set_mode,
25577 diff -urNp linux-2.6.32.43/drivers/ata/sata_sis.c linux-2.6.32.43/drivers/ata/sata_sis.c
25578 --- linux-2.6.32.43/drivers/ata/sata_sis.c 2011-03-27 14:31:47.000000000 -0400
25579 +++ linux-2.6.32.43/drivers/ata/sata_sis.c 2011-04-17 15:56:46.000000000 -0400
25580 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
25581 ATA_BMDMA_SHT(DRV_NAME),
25582 };
25583
25584 -static struct ata_port_operations sis_ops = {
25585 +static const struct ata_port_operations sis_ops = {
25586 .inherits = &ata_bmdma_port_ops,
25587 .scr_read = sis_scr_read,
25588 .scr_write = sis_scr_write,
25589 diff -urNp linux-2.6.32.43/drivers/ata/sata_svw.c linux-2.6.32.43/drivers/ata/sata_svw.c
25590 --- linux-2.6.32.43/drivers/ata/sata_svw.c 2011-03-27 14:31:47.000000000 -0400
25591 +++ linux-2.6.32.43/drivers/ata/sata_svw.c 2011-04-17 15:56:46.000000000 -0400
25592 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
25593 };
25594
25595
25596 -static struct ata_port_operations k2_sata_ops = {
25597 +static const struct ata_port_operations k2_sata_ops = {
25598 .inherits = &ata_bmdma_port_ops,
25599 .sff_tf_load = k2_sata_tf_load,
25600 .sff_tf_read = k2_sata_tf_read,
25601 diff -urNp linux-2.6.32.43/drivers/ata/sata_sx4.c linux-2.6.32.43/drivers/ata/sata_sx4.c
25602 --- linux-2.6.32.43/drivers/ata/sata_sx4.c 2011-03-27 14:31:47.000000000 -0400
25603 +++ linux-2.6.32.43/drivers/ata/sata_sx4.c 2011-04-17 15:56:46.000000000 -0400
25604 @@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sat
25605 };
25606
25607 /* TODO: inherit from base port_ops after converting to new EH */
25608 -static struct ata_port_operations pdc_20621_ops = {
25609 +static const struct ata_port_operations pdc_20621_ops = {
25610 .inherits = &ata_sff_port_ops,
25611
25612 .check_atapi_dma = pdc_check_atapi_dma,
25613 diff -urNp linux-2.6.32.43/drivers/ata/sata_uli.c linux-2.6.32.43/drivers/ata/sata_uli.c
25614 --- linux-2.6.32.43/drivers/ata/sata_uli.c 2011-03-27 14:31:47.000000000 -0400
25615 +++ linux-2.6.32.43/drivers/ata/sata_uli.c 2011-04-17 15:56:46.000000000 -0400
25616 @@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht
25617 ATA_BMDMA_SHT(DRV_NAME),
25618 };
25619
25620 -static struct ata_port_operations uli_ops = {
25621 +static const struct ata_port_operations uli_ops = {
25622 .inherits = &ata_bmdma_port_ops,
25623 .scr_read = uli_scr_read,
25624 .scr_write = uli_scr_write,
25625 diff -urNp linux-2.6.32.43/drivers/ata/sata_via.c linux-2.6.32.43/drivers/ata/sata_via.c
25626 --- linux-2.6.32.43/drivers/ata/sata_via.c 2011-05-10 22:12:01.000000000 -0400
25627 +++ linux-2.6.32.43/drivers/ata/sata_via.c 2011-05-10 22:15:08.000000000 -0400
25628 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
25629 ATA_BMDMA_SHT(DRV_NAME),
25630 };
25631
25632 -static struct ata_port_operations svia_base_ops = {
25633 +static const struct ata_port_operations svia_base_ops = {
25634 .inherits = &ata_bmdma_port_ops,
25635 .sff_tf_load = svia_tf_load,
25636 };
25637
25638 -static struct ata_port_operations vt6420_sata_ops = {
25639 +static const struct ata_port_operations vt6420_sata_ops = {
25640 .inherits = &svia_base_ops,
25641 .freeze = svia_noop_freeze,
25642 .prereset = vt6420_prereset,
25643 .bmdma_start = vt6420_bmdma_start,
25644 };
25645
25646 -static struct ata_port_operations vt6421_pata_ops = {
25647 +static const struct ata_port_operations vt6421_pata_ops = {
25648 .inherits = &svia_base_ops,
25649 .cable_detect = vt6421_pata_cable_detect,
25650 .set_piomode = vt6421_set_pio_mode,
25651 .set_dmamode = vt6421_set_dma_mode,
25652 };
25653
25654 -static struct ata_port_operations vt6421_sata_ops = {
25655 +static const struct ata_port_operations vt6421_sata_ops = {
25656 .inherits = &svia_base_ops,
25657 .scr_read = svia_scr_read,
25658 .scr_write = svia_scr_write,
25659 };
25660
25661 -static struct ata_port_operations vt8251_ops = {
25662 +static const struct ata_port_operations vt8251_ops = {
25663 .inherits = &svia_base_ops,
25664 .hardreset = sata_std_hardreset,
25665 .scr_read = vt8251_scr_read,
25666 diff -urNp linux-2.6.32.43/drivers/ata/sata_vsc.c linux-2.6.32.43/drivers/ata/sata_vsc.c
25667 --- linux-2.6.32.43/drivers/ata/sata_vsc.c 2011-03-27 14:31:47.000000000 -0400
25668 +++ linux-2.6.32.43/drivers/ata/sata_vsc.c 2011-04-17 15:56:46.000000000 -0400
25669 @@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
25670 };
25671
25672
25673 -static struct ata_port_operations vsc_sata_ops = {
25674 +static const struct ata_port_operations vsc_sata_ops = {
25675 .inherits = &ata_bmdma_port_ops,
25676 /* The IRQ handling is not quite standard SFF behaviour so we
25677 cannot use the default lost interrupt handler */
25678 diff -urNp linux-2.6.32.43/drivers/atm/adummy.c linux-2.6.32.43/drivers/atm/adummy.c
25679 --- linux-2.6.32.43/drivers/atm/adummy.c 2011-03-27 14:31:47.000000000 -0400
25680 +++ linux-2.6.32.43/drivers/atm/adummy.c 2011-04-17 15:56:46.000000000 -0400
25681 @@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
25682 vcc->pop(vcc, skb);
25683 else
25684 dev_kfree_skb_any(skb);
25685 - atomic_inc(&vcc->stats->tx);
25686 + atomic_inc_unchecked(&vcc->stats->tx);
25687
25688 return 0;
25689 }
25690 diff -urNp linux-2.6.32.43/drivers/atm/ambassador.c linux-2.6.32.43/drivers/atm/ambassador.c
25691 --- linux-2.6.32.43/drivers/atm/ambassador.c 2011-03-27 14:31:47.000000000 -0400
25692 +++ linux-2.6.32.43/drivers/atm/ambassador.c 2011-04-17 15:56:46.000000000 -0400
25693 @@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
25694 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25695
25696 // VC layer stats
25697 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25698 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25699
25700 // free the descriptor
25701 kfree (tx_descr);
25702 @@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
25703 dump_skb ("<<<", vc, skb);
25704
25705 // VC layer stats
25706 - atomic_inc(&atm_vcc->stats->rx);
25707 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25708 __net_timestamp(skb);
25709 // end of our responsability
25710 atm_vcc->push (atm_vcc, skb);
25711 @@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
25712 } else {
25713 PRINTK (KERN_INFO, "dropped over-size frame");
25714 // should we count this?
25715 - atomic_inc(&atm_vcc->stats->rx_drop);
25716 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25717 }
25718
25719 } else {
25720 @@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * at
25721 }
25722
25723 if (check_area (skb->data, skb->len)) {
25724 - atomic_inc(&atm_vcc->stats->tx_err);
25725 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25726 return -ENOMEM; // ?
25727 }
25728
25729 diff -urNp linux-2.6.32.43/drivers/atm/atmtcp.c linux-2.6.32.43/drivers/atm/atmtcp.c
25730 --- linux-2.6.32.43/drivers/atm/atmtcp.c 2011-03-27 14:31:47.000000000 -0400
25731 +++ linux-2.6.32.43/drivers/atm/atmtcp.c 2011-04-17 15:56:46.000000000 -0400
25732 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
25733 if (vcc->pop) vcc->pop(vcc,skb);
25734 else dev_kfree_skb(skb);
25735 if (dev_data) return 0;
25736 - atomic_inc(&vcc->stats->tx_err);
25737 + atomic_inc_unchecked(&vcc->stats->tx_err);
25738 return -ENOLINK;
25739 }
25740 size = skb->len+sizeof(struct atmtcp_hdr);
25741 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
25742 if (!new_skb) {
25743 if (vcc->pop) vcc->pop(vcc,skb);
25744 else dev_kfree_skb(skb);
25745 - atomic_inc(&vcc->stats->tx_err);
25746 + atomic_inc_unchecked(&vcc->stats->tx_err);
25747 return -ENOBUFS;
25748 }
25749 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25750 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
25751 if (vcc->pop) vcc->pop(vcc,skb);
25752 else dev_kfree_skb(skb);
25753 out_vcc->push(out_vcc,new_skb);
25754 - atomic_inc(&vcc->stats->tx);
25755 - atomic_inc(&out_vcc->stats->rx);
25756 + atomic_inc_unchecked(&vcc->stats->tx);
25757 + atomic_inc_unchecked(&out_vcc->stats->rx);
25758 return 0;
25759 }
25760
25761 @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
25762 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25763 read_unlock(&vcc_sklist_lock);
25764 if (!out_vcc) {
25765 - atomic_inc(&vcc->stats->tx_err);
25766 + atomic_inc_unchecked(&vcc->stats->tx_err);
25767 goto done;
25768 }
25769 skb_pull(skb,sizeof(struct atmtcp_hdr));
25770 @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
25771 __net_timestamp(new_skb);
25772 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25773 out_vcc->push(out_vcc,new_skb);
25774 - atomic_inc(&vcc->stats->tx);
25775 - atomic_inc(&out_vcc->stats->rx);
25776 + atomic_inc_unchecked(&vcc->stats->tx);
25777 + atomic_inc_unchecked(&out_vcc->stats->rx);
25778 done:
25779 if (vcc->pop) vcc->pop(vcc,skb);
25780 else dev_kfree_skb(skb);
25781 diff -urNp linux-2.6.32.43/drivers/atm/eni.c linux-2.6.32.43/drivers/atm/eni.c
25782 --- linux-2.6.32.43/drivers/atm/eni.c 2011-03-27 14:31:47.000000000 -0400
25783 +++ linux-2.6.32.43/drivers/atm/eni.c 2011-04-17 15:56:46.000000000 -0400
25784 @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25785 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25786 vcc->dev->number);
25787 length = 0;
25788 - atomic_inc(&vcc->stats->rx_err);
25789 + atomic_inc_unchecked(&vcc->stats->rx_err);
25790 }
25791 else {
25792 length = ATM_CELL_SIZE-1; /* no HEC */
25793 @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25794 size);
25795 }
25796 eff = length = 0;
25797 - atomic_inc(&vcc->stats->rx_err);
25798 + atomic_inc_unchecked(&vcc->stats->rx_err);
25799 }
25800 else {
25801 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25802 @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25803 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25804 vcc->dev->number,vcc->vci,length,size << 2,descr);
25805 length = eff = 0;
25806 - atomic_inc(&vcc->stats->rx_err);
25807 + atomic_inc_unchecked(&vcc->stats->rx_err);
25808 }
25809 }
25810 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25811 @@ -770,7 +770,7 @@ rx_dequeued++;
25812 vcc->push(vcc,skb);
25813 pushed++;
25814 }
25815 - atomic_inc(&vcc->stats->rx);
25816 + atomic_inc_unchecked(&vcc->stats->rx);
25817 }
25818 wake_up(&eni_dev->rx_wait);
25819 }
25820 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
25821 PCI_DMA_TODEVICE);
25822 if (vcc->pop) vcc->pop(vcc,skb);
25823 else dev_kfree_skb_irq(skb);
25824 - atomic_inc(&vcc->stats->tx);
25825 + atomic_inc_unchecked(&vcc->stats->tx);
25826 wake_up(&eni_dev->tx_wait);
25827 dma_complete++;
25828 }
25829 diff -urNp linux-2.6.32.43/drivers/atm/firestream.c linux-2.6.32.43/drivers/atm/firestream.c
25830 --- linux-2.6.32.43/drivers/atm/firestream.c 2011-03-27 14:31:47.000000000 -0400
25831 +++ linux-2.6.32.43/drivers/atm/firestream.c 2011-04-17 15:56:46.000000000 -0400
25832 @@ -748,7 +748,7 @@ static void process_txdone_queue (struct
25833 }
25834 }
25835
25836 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25837 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25838
25839 fs_dprintk (FS_DEBUG_TXMEM, "i");
25840 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25841 @@ -815,7 +815,7 @@ static void process_incoming (struct fs_
25842 #endif
25843 skb_put (skb, qe->p1 & 0xffff);
25844 ATM_SKB(skb)->vcc = atm_vcc;
25845 - atomic_inc(&atm_vcc->stats->rx);
25846 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25847 __net_timestamp(skb);
25848 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25849 atm_vcc->push (atm_vcc, skb);
25850 @@ -836,12 +836,12 @@ static void process_incoming (struct fs_
25851 kfree (pe);
25852 }
25853 if (atm_vcc)
25854 - atomic_inc(&atm_vcc->stats->rx_drop);
25855 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25856 break;
25857 case 0x1f: /* Reassembly abort: no buffers. */
25858 /* Silently increment error counter. */
25859 if (atm_vcc)
25860 - atomic_inc(&atm_vcc->stats->rx_drop);
25861 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25862 break;
25863 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25864 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25865 diff -urNp linux-2.6.32.43/drivers/atm/fore200e.c linux-2.6.32.43/drivers/atm/fore200e.c
25866 --- linux-2.6.32.43/drivers/atm/fore200e.c 2011-03-27 14:31:47.000000000 -0400
25867 +++ linux-2.6.32.43/drivers/atm/fore200e.c 2011-04-17 15:56:46.000000000 -0400
25868 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
25869 #endif
25870 /* check error condition */
25871 if (*entry->status & STATUS_ERROR)
25872 - atomic_inc(&vcc->stats->tx_err);
25873 + atomic_inc_unchecked(&vcc->stats->tx_err);
25874 else
25875 - atomic_inc(&vcc->stats->tx);
25876 + atomic_inc_unchecked(&vcc->stats->tx);
25877 }
25878 }
25879
25880 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
25881 if (skb == NULL) {
25882 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25883
25884 - atomic_inc(&vcc->stats->rx_drop);
25885 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25886 return -ENOMEM;
25887 }
25888
25889 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
25890
25891 dev_kfree_skb_any(skb);
25892
25893 - atomic_inc(&vcc->stats->rx_drop);
25894 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25895 return -ENOMEM;
25896 }
25897
25898 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25899
25900 vcc->push(vcc, skb);
25901 - atomic_inc(&vcc->stats->rx);
25902 + atomic_inc_unchecked(&vcc->stats->rx);
25903
25904 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25905
25906 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
25907 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25908 fore200e->atm_dev->number,
25909 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25910 - atomic_inc(&vcc->stats->rx_err);
25911 + atomic_inc_unchecked(&vcc->stats->rx_err);
25912 }
25913 }
25914
25915 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
25916 goto retry_here;
25917 }
25918
25919 - atomic_inc(&vcc->stats->tx_err);
25920 + atomic_inc_unchecked(&vcc->stats->tx_err);
25921
25922 fore200e->tx_sat++;
25923 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25924 diff -urNp linux-2.6.32.43/drivers/atm/he.c linux-2.6.32.43/drivers/atm/he.c
25925 --- linux-2.6.32.43/drivers/atm/he.c 2011-03-27 14:31:47.000000000 -0400
25926 +++ linux-2.6.32.43/drivers/atm/he.c 2011-04-17 15:56:46.000000000 -0400
25927 @@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25928
25929 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25930 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25931 - atomic_inc(&vcc->stats->rx_drop);
25932 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25933 goto return_host_buffers;
25934 }
25935
25936 @@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25937 RBRQ_LEN_ERR(he_dev->rbrq_head)
25938 ? "LEN_ERR" : "",
25939 vcc->vpi, vcc->vci);
25940 - atomic_inc(&vcc->stats->rx_err);
25941 + atomic_inc_unchecked(&vcc->stats->rx_err);
25942 goto return_host_buffers;
25943 }
25944
25945 @@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25946 vcc->push(vcc, skb);
25947 spin_lock(&he_dev->global_lock);
25948
25949 - atomic_inc(&vcc->stats->rx);
25950 + atomic_inc_unchecked(&vcc->stats->rx);
25951
25952 return_host_buffers:
25953 ++pdus_assembled;
25954 @@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
25955 tpd->vcc->pop(tpd->vcc, tpd->skb);
25956 else
25957 dev_kfree_skb_any(tpd->skb);
25958 - atomic_inc(&tpd->vcc->stats->tx_err);
25959 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25960 }
25961 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25962 return;
25963 @@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25964 vcc->pop(vcc, skb);
25965 else
25966 dev_kfree_skb_any(skb);
25967 - atomic_inc(&vcc->stats->tx_err);
25968 + atomic_inc_unchecked(&vcc->stats->tx_err);
25969 return -EINVAL;
25970 }
25971
25972 @@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25973 vcc->pop(vcc, skb);
25974 else
25975 dev_kfree_skb_any(skb);
25976 - atomic_inc(&vcc->stats->tx_err);
25977 + atomic_inc_unchecked(&vcc->stats->tx_err);
25978 return -EINVAL;
25979 }
25980 #endif
25981 @@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25982 vcc->pop(vcc, skb);
25983 else
25984 dev_kfree_skb_any(skb);
25985 - atomic_inc(&vcc->stats->tx_err);
25986 + atomic_inc_unchecked(&vcc->stats->tx_err);
25987 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25988 return -ENOMEM;
25989 }
25990 @@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25991 vcc->pop(vcc, skb);
25992 else
25993 dev_kfree_skb_any(skb);
25994 - atomic_inc(&vcc->stats->tx_err);
25995 + atomic_inc_unchecked(&vcc->stats->tx_err);
25996 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25997 return -ENOMEM;
25998 }
25999 @@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26000 __enqueue_tpd(he_dev, tpd, cid);
26001 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26002
26003 - atomic_inc(&vcc->stats->tx);
26004 + atomic_inc_unchecked(&vcc->stats->tx);
26005
26006 return 0;
26007 }
26008 diff -urNp linux-2.6.32.43/drivers/atm/horizon.c linux-2.6.32.43/drivers/atm/horizon.c
26009 --- linux-2.6.32.43/drivers/atm/horizon.c 2011-03-27 14:31:47.000000000 -0400
26010 +++ linux-2.6.32.43/drivers/atm/horizon.c 2011-04-17 15:56:46.000000000 -0400
26011 @@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
26012 {
26013 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
26014 // VC layer stats
26015 - atomic_inc(&vcc->stats->rx);
26016 + atomic_inc_unchecked(&vcc->stats->rx);
26017 __net_timestamp(skb);
26018 // end of our responsability
26019 vcc->push (vcc, skb);
26020 @@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
26021 dev->tx_iovec = NULL;
26022
26023 // VC layer stats
26024 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26025 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26026
26027 // free the skb
26028 hrz_kfree_skb (skb);
26029 diff -urNp linux-2.6.32.43/drivers/atm/idt77252.c linux-2.6.32.43/drivers/atm/idt77252.c
26030 --- linux-2.6.32.43/drivers/atm/idt77252.c 2011-03-27 14:31:47.000000000 -0400
26031 +++ linux-2.6.32.43/drivers/atm/idt77252.c 2011-04-17 15:56:46.000000000 -0400
26032 @@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
26033 else
26034 dev_kfree_skb(skb);
26035
26036 - atomic_inc(&vcc->stats->tx);
26037 + atomic_inc_unchecked(&vcc->stats->tx);
26038 }
26039
26040 atomic_dec(&scq->used);
26041 @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
26042 if ((sb = dev_alloc_skb(64)) == NULL) {
26043 printk("%s: Can't allocate buffers for aal0.\n",
26044 card->name);
26045 - atomic_add(i, &vcc->stats->rx_drop);
26046 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
26047 break;
26048 }
26049 if (!atm_charge(vcc, sb->truesize)) {
26050 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
26051 card->name);
26052 - atomic_add(i - 1, &vcc->stats->rx_drop);
26053 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
26054 dev_kfree_skb(sb);
26055 break;
26056 }
26057 @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
26058 ATM_SKB(sb)->vcc = vcc;
26059 __net_timestamp(sb);
26060 vcc->push(vcc, sb);
26061 - atomic_inc(&vcc->stats->rx);
26062 + atomic_inc_unchecked(&vcc->stats->rx);
26063
26064 cell += ATM_CELL_PAYLOAD;
26065 }
26066 @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
26067 "(CDC: %08x)\n",
26068 card->name, len, rpp->len, readl(SAR_REG_CDC));
26069 recycle_rx_pool_skb(card, rpp);
26070 - atomic_inc(&vcc->stats->rx_err);
26071 + atomic_inc_unchecked(&vcc->stats->rx_err);
26072 return;
26073 }
26074 if (stat & SAR_RSQE_CRC) {
26075 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
26076 recycle_rx_pool_skb(card, rpp);
26077 - atomic_inc(&vcc->stats->rx_err);
26078 + atomic_inc_unchecked(&vcc->stats->rx_err);
26079 return;
26080 }
26081 if (skb_queue_len(&rpp->queue) > 1) {
26082 @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
26083 RXPRINTK("%s: Can't alloc RX skb.\n",
26084 card->name);
26085 recycle_rx_pool_skb(card, rpp);
26086 - atomic_inc(&vcc->stats->rx_err);
26087 + atomic_inc_unchecked(&vcc->stats->rx_err);
26088 return;
26089 }
26090 if (!atm_charge(vcc, skb->truesize)) {
26091 @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
26092 __net_timestamp(skb);
26093
26094 vcc->push(vcc, skb);
26095 - atomic_inc(&vcc->stats->rx);
26096 + atomic_inc_unchecked(&vcc->stats->rx);
26097
26098 return;
26099 }
26100 @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
26101 __net_timestamp(skb);
26102
26103 vcc->push(vcc, skb);
26104 - atomic_inc(&vcc->stats->rx);
26105 + atomic_inc_unchecked(&vcc->stats->rx);
26106
26107 if (skb->truesize > SAR_FB_SIZE_3)
26108 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
26109 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
26110 if (vcc->qos.aal != ATM_AAL0) {
26111 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
26112 card->name, vpi, vci);
26113 - atomic_inc(&vcc->stats->rx_drop);
26114 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26115 goto drop;
26116 }
26117
26118 if ((sb = dev_alloc_skb(64)) == NULL) {
26119 printk("%s: Can't allocate buffers for AAL0.\n",
26120 card->name);
26121 - atomic_inc(&vcc->stats->rx_err);
26122 + atomic_inc_unchecked(&vcc->stats->rx_err);
26123 goto drop;
26124 }
26125
26126 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
26127 ATM_SKB(sb)->vcc = vcc;
26128 __net_timestamp(sb);
26129 vcc->push(vcc, sb);
26130 - atomic_inc(&vcc->stats->rx);
26131 + atomic_inc_unchecked(&vcc->stats->rx);
26132
26133 drop:
26134 skb_pull(queue, 64);
26135 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26136
26137 if (vc == NULL) {
26138 printk("%s: NULL connection in send().\n", card->name);
26139 - atomic_inc(&vcc->stats->tx_err);
26140 + atomic_inc_unchecked(&vcc->stats->tx_err);
26141 dev_kfree_skb(skb);
26142 return -EINVAL;
26143 }
26144 if (!test_bit(VCF_TX, &vc->flags)) {
26145 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
26146 - atomic_inc(&vcc->stats->tx_err);
26147 + atomic_inc_unchecked(&vcc->stats->tx_err);
26148 dev_kfree_skb(skb);
26149 return -EINVAL;
26150 }
26151 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26152 break;
26153 default:
26154 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
26155 - atomic_inc(&vcc->stats->tx_err);
26156 + atomic_inc_unchecked(&vcc->stats->tx_err);
26157 dev_kfree_skb(skb);
26158 return -EINVAL;
26159 }
26160
26161 if (skb_shinfo(skb)->nr_frags != 0) {
26162 printk("%s: No scatter-gather yet.\n", card->name);
26163 - atomic_inc(&vcc->stats->tx_err);
26164 + atomic_inc_unchecked(&vcc->stats->tx_err);
26165 dev_kfree_skb(skb);
26166 return -EINVAL;
26167 }
26168 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26169
26170 err = queue_skb(card, vc, skb, oam);
26171 if (err) {
26172 - atomic_inc(&vcc->stats->tx_err);
26173 + atomic_inc_unchecked(&vcc->stats->tx_err);
26174 dev_kfree_skb(skb);
26175 return err;
26176 }
26177 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
26178 skb = dev_alloc_skb(64);
26179 if (!skb) {
26180 printk("%s: Out of memory in send_oam().\n", card->name);
26181 - atomic_inc(&vcc->stats->tx_err);
26182 + atomic_inc_unchecked(&vcc->stats->tx_err);
26183 return -ENOMEM;
26184 }
26185 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
26186 diff -urNp linux-2.6.32.43/drivers/atm/iphase.c linux-2.6.32.43/drivers/atm/iphase.c
26187 --- linux-2.6.32.43/drivers/atm/iphase.c 2011-03-27 14:31:47.000000000 -0400
26188 +++ linux-2.6.32.43/drivers/atm/iphase.c 2011-04-17 15:56:46.000000000 -0400
26189 @@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
26190 status = (u_short) (buf_desc_ptr->desc_mode);
26191 if (status & (RX_CER | RX_PTE | RX_OFL))
26192 {
26193 - atomic_inc(&vcc->stats->rx_err);
26194 + atomic_inc_unchecked(&vcc->stats->rx_err);
26195 IF_ERR(printk("IA: bad packet, dropping it");)
26196 if (status & RX_CER) {
26197 IF_ERR(printk(" cause: packet CRC error\n");)
26198 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
26199 len = dma_addr - buf_addr;
26200 if (len > iadev->rx_buf_sz) {
26201 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
26202 - atomic_inc(&vcc->stats->rx_err);
26203 + atomic_inc_unchecked(&vcc->stats->rx_err);
26204 goto out_free_desc;
26205 }
26206
26207 @@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
26208 ia_vcc = INPH_IA_VCC(vcc);
26209 if (ia_vcc == NULL)
26210 {
26211 - atomic_inc(&vcc->stats->rx_err);
26212 + atomic_inc_unchecked(&vcc->stats->rx_err);
26213 dev_kfree_skb_any(skb);
26214 atm_return(vcc, atm_guess_pdu2truesize(len));
26215 goto INCR_DLE;
26216 @@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
26217 if ((length > iadev->rx_buf_sz) || (length >
26218 (skb->len - sizeof(struct cpcs_trailer))))
26219 {
26220 - atomic_inc(&vcc->stats->rx_err);
26221 + atomic_inc_unchecked(&vcc->stats->rx_err);
26222 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
26223 length, skb->len);)
26224 dev_kfree_skb_any(skb);
26225 @@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
26226
26227 IF_RX(printk("rx_dle_intr: skb push");)
26228 vcc->push(vcc,skb);
26229 - atomic_inc(&vcc->stats->rx);
26230 + atomic_inc_unchecked(&vcc->stats->rx);
26231 iadev->rx_pkt_cnt++;
26232 }
26233 INCR_DLE:
26234 @@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev,
26235 {
26236 struct k_sonet_stats *stats;
26237 stats = &PRIV(_ia_dev[board])->sonet_stats;
26238 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
26239 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
26240 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
26241 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
26242 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
26243 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
26244 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
26245 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
26246 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
26247 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
26248 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
26249 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
26250 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
26251 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
26252 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
26253 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
26254 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
26255 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
26256 }
26257 ia_cmds.status = 0;
26258 break;
26259 @@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
26260 if ((desc == 0) || (desc > iadev->num_tx_desc))
26261 {
26262 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
26263 - atomic_inc(&vcc->stats->tx);
26264 + atomic_inc_unchecked(&vcc->stats->tx);
26265 if (vcc->pop)
26266 vcc->pop(vcc, skb);
26267 else
26268 @@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
26269 ATM_DESC(skb) = vcc->vci;
26270 skb_queue_tail(&iadev->tx_dma_q, skb);
26271
26272 - atomic_inc(&vcc->stats->tx);
26273 + atomic_inc_unchecked(&vcc->stats->tx);
26274 iadev->tx_pkt_cnt++;
26275 /* Increment transaction counter */
26276 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
26277
26278 #if 0
26279 /* add flow control logic */
26280 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
26281 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
26282 if (iavcc->vc_desc_cnt > 10) {
26283 vcc->tx_quota = vcc->tx_quota * 3 / 4;
26284 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
26285 diff -urNp linux-2.6.32.43/drivers/atm/lanai.c linux-2.6.32.43/drivers/atm/lanai.c
26286 --- linux-2.6.32.43/drivers/atm/lanai.c 2011-03-27 14:31:47.000000000 -0400
26287 +++ linux-2.6.32.43/drivers/atm/lanai.c 2011-04-17 15:56:46.000000000 -0400
26288 @@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
26289 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
26290 lanai_endtx(lanai, lvcc);
26291 lanai_free_skb(lvcc->tx.atmvcc, skb);
26292 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
26293 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
26294 }
26295
26296 /* Try to fill the buffer - don't call unless there is backlog */
26297 @@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
26298 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
26299 __net_timestamp(skb);
26300 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
26301 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
26302 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
26303 out:
26304 lvcc->rx.buf.ptr = end;
26305 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
26306 @@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
26307 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
26308 "vcc %d\n", lanai->number, (unsigned int) s, vci);
26309 lanai->stats.service_rxnotaal5++;
26310 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26311 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26312 return 0;
26313 }
26314 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
26315 @@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
26316 int bytes;
26317 read_unlock(&vcc_sklist_lock);
26318 DPRINTK("got trashed rx pdu on vci %d\n", vci);
26319 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26320 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26321 lvcc->stats.x.aal5.service_trash++;
26322 bytes = (SERVICE_GET_END(s) * 16) -
26323 (((unsigned long) lvcc->rx.buf.ptr) -
26324 @@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
26325 }
26326 if (s & SERVICE_STREAM) {
26327 read_unlock(&vcc_sklist_lock);
26328 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26329 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26330 lvcc->stats.x.aal5.service_stream++;
26331 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
26332 "PDU on VCI %d!\n", lanai->number, vci);
26333 @@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
26334 return 0;
26335 }
26336 DPRINTK("got rx crc error on vci %d\n", vci);
26337 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26338 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26339 lvcc->stats.x.aal5.service_rxcrc++;
26340 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
26341 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
26342 diff -urNp linux-2.6.32.43/drivers/atm/nicstar.c linux-2.6.32.43/drivers/atm/nicstar.c
26343 --- linux-2.6.32.43/drivers/atm/nicstar.c 2011-03-27 14:31:47.000000000 -0400
26344 +++ linux-2.6.32.43/drivers/atm/nicstar.c 2011-04-17 15:56:46.000000000 -0400
26345 @@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
26346 if ((vc = (vc_map *) vcc->dev_data) == NULL)
26347 {
26348 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
26349 - atomic_inc(&vcc->stats->tx_err);
26350 + atomic_inc_unchecked(&vcc->stats->tx_err);
26351 dev_kfree_skb_any(skb);
26352 return -EINVAL;
26353 }
26354 @@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
26355 if (!vc->tx)
26356 {
26357 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
26358 - atomic_inc(&vcc->stats->tx_err);
26359 + atomic_inc_unchecked(&vcc->stats->tx_err);
26360 dev_kfree_skb_any(skb);
26361 return -EINVAL;
26362 }
26363 @@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
26364 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
26365 {
26366 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
26367 - atomic_inc(&vcc->stats->tx_err);
26368 + atomic_inc_unchecked(&vcc->stats->tx_err);
26369 dev_kfree_skb_any(skb);
26370 return -EINVAL;
26371 }
26372 @@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
26373 if (skb_shinfo(skb)->nr_frags != 0)
26374 {
26375 printk("nicstar%d: No scatter-gather yet.\n", card->index);
26376 - atomic_inc(&vcc->stats->tx_err);
26377 + atomic_inc_unchecked(&vcc->stats->tx_err);
26378 dev_kfree_skb_any(skb);
26379 return -EINVAL;
26380 }
26381 @@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
26382
26383 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
26384 {
26385 - atomic_inc(&vcc->stats->tx_err);
26386 + atomic_inc_unchecked(&vcc->stats->tx_err);
26387 dev_kfree_skb_any(skb);
26388 return -EIO;
26389 }
26390 - atomic_inc(&vcc->stats->tx);
26391 + atomic_inc_unchecked(&vcc->stats->tx);
26392
26393 return 0;
26394 }
26395 @@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
26396 {
26397 printk("nicstar%d: Can't allocate buffers for aal0.\n",
26398 card->index);
26399 - atomic_add(i,&vcc->stats->rx_drop);
26400 + atomic_add_unchecked(i,&vcc->stats->rx_drop);
26401 break;
26402 }
26403 if (!atm_charge(vcc, sb->truesize))
26404 {
26405 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
26406 card->index);
26407 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26408 + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26409 dev_kfree_skb_any(sb);
26410 break;
26411 }
26412 @@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
26413 ATM_SKB(sb)->vcc = vcc;
26414 __net_timestamp(sb);
26415 vcc->push(vcc, sb);
26416 - atomic_inc(&vcc->stats->rx);
26417 + atomic_inc_unchecked(&vcc->stats->rx);
26418 cell += ATM_CELL_PAYLOAD;
26419 }
26420
26421 @@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
26422 if (iovb == NULL)
26423 {
26424 printk("nicstar%d: Out of iovec buffers.\n", card->index);
26425 - atomic_inc(&vcc->stats->rx_drop);
26426 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26427 recycle_rx_buf(card, skb);
26428 return;
26429 }
26430 @@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
26431 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
26432 {
26433 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26434 - atomic_inc(&vcc->stats->rx_err);
26435 + atomic_inc_unchecked(&vcc->stats->rx_err);
26436 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
26437 NS_SKB(iovb)->iovcnt = 0;
26438 iovb->len = 0;
26439 @@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
26440 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
26441 card->index);
26442 which_list(card, skb);
26443 - atomic_inc(&vcc->stats->rx_err);
26444 + atomic_inc_unchecked(&vcc->stats->rx_err);
26445 recycle_rx_buf(card, skb);
26446 vc->rx_iov = NULL;
26447 recycle_iov_buf(card, iovb);
26448 @@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
26449 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
26450 card->index);
26451 which_list(card, skb);
26452 - atomic_inc(&vcc->stats->rx_err);
26453 + atomic_inc_unchecked(&vcc->stats->rx_err);
26454 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26455 NS_SKB(iovb)->iovcnt);
26456 vc->rx_iov = NULL;
26457 @@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
26458 printk(" - PDU size mismatch.\n");
26459 else
26460 printk(".\n");
26461 - atomic_inc(&vcc->stats->rx_err);
26462 + atomic_inc_unchecked(&vcc->stats->rx_err);
26463 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26464 NS_SKB(iovb)->iovcnt);
26465 vc->rx_iov = NULL;
26466 @@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
26467 if (!atm_charge(vcc, skb->truesize))
26468 {
26469 push_rxbufs(card, skb);
26470 - atomic_inc(&vcc->stats->rx_drop);
26471 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26472 }
26473 else
26474 {
26475 @@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
26476 ATM_SKB(skb)->vcc = vcc;
26477 __net_timestamp(skb);
26478 vcc->push(vcc, skb);
26479 - atomic_inc(&vcc->stats->rx);
26480 + atomic_inc_unchecked(&vcc->stats->rx);
26481 }
26482 }
26483 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
26484 @@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
26485 if (!atm_charge(vcc, sb->truesize))
26486 {
26487 push_rxbufs(card, sb);
26488 - atomic_inc(&vcc->stats->rx_drop);
26489 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26490 }
26491 else
26492 {
26493 @@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
26494 ATM_SKB(sb)->vcc = vcc;
26495 __net_timestamp(sb);
26496 vcc->push(vcc, sb);
26497 - atomic_inc(&vcc->stats->rx);
26498 + atomic_inc_unchecked(&vcc->stats->rx);
26499 }
26500
26501 push_rxbufs(card, skb);
26502 @@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
26503 if (!atm_charge(vcc, skb->truesize))
26504 {
26505 push_rxbufs(card, skb);
26506 - atomic_inc(&vcc->stats->rx_drop);
26507 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26508 }
26509 else
26510 {
26511 @@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
26512 ATM_SKB(skb)->vcc = vcc;
26513 __net_timestamp(skb);
26514 vcc->push(vcc, skb);
26515 - atomic_inc(&vcc->stats->rx);
26516 + atomic_inc_unchecked(&vcc->stats->rx);
26517 }
26518
26519 push_rxbufs(card, sb);
26520 @@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
26521 if (hb == NULL)
26522 {
26523 printk("nicstar%d: Out of huge buffers.\n", card->index);
26524 - atomic_inc(&vcc->stats->rx_drop);
26525 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26526 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26527 NS_SKB(iovb)->iovcnt);
26528 vc->rx_iov = NULL;
26529 @@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
26530 }
26531 else
26532 dev_kfree_skb_any(hb);
26533 - atomic_inc(&vcc->stats->rx_drop);
26534 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26535 }
26536 else
26537 {
26538 @@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
26539 #endif /* NS_USE_DESTRUCTORS */
26540 __net_timestamp(hb);
26541 vcc->push(vcc, hb);
26542 - atomic_inc(&vcc->stats->rx);
26543 + atomic_inc_unchecked(&vcc->stats->rx);
26544 }
26545 }
26546
26547 diff -urNp linux-2.6.32.43/drivers/atm/solos-pci.c linux-2.6.32.43/drivers/atm/solos-pci.c
26548 --- linux-2.6.32.43/drivers/atm/solos-pci.c 2011-04-17 17:00:52.000000000 -0400
26549 +++ linux-2.6.32.43/drivers/atm/solos-pci.c 2011-05-16 21:46:57.000000000 -0400
26550 @@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
26551 }
26552 atm_charge(vcc, skb->truesize);
26553 vcc->push(vcc, skb);
26554 - atomic_inc(&vcc->stats->rx);
26555 + atomic_inc_unchecked(&vcc->stats->rx);
26556 break;
26557
26558 case PKT_STATUS:
26559 @@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *
26560 char msg[500];
26561 char item[10];
26562
26563 + pax_track_stack();
26564 +
26565 len = buf->len;
26566 for (i = 0; i < len; i++){
26567 if(i % 8 == 0)
26568 @@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_car
26569 vcc = SKB_CB(oldskb)->vcc;
26570
26571 if (vcc) {
26572 - atomic_inc(&vcc->stats->tx);
26573 + atomic_inc_unchecked(&vcc->stats->tx);
26574 solos_pop(vcc, oldskb);
26575 } else
26576 dev_kfree_skb_irq(oldskb);
26577 diff -urNp linux-2.6.32.43/drivers/atm/suni.c linux-2.6.32.43/drivers/atm/suni.c
26578 --- linux-2.6.32.43/drivers/atm/suni.c 2011-03-27 14:31:47.000000000 -0400
26579 +++ linux-2.6.32.43/drivers/atm/suni.c 2011-04-17 15:56:46.000000000 -0400
26580 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26581
26582
26583 #define ADD_LIMITED(s,v) \
26584 - atomic_add((v),&stats->s); \
26585 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26586 + atomic_add_unchecked((v),&stats->s); \
26587 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26588
26589
26590 static void suni_hz(unsigned long from_timer)
26591 diff -urNp linux-2.6.32.43/drivers/atm/uPD98402.c linux-2.6.32.43/drivers/atm/uPD98402.c
26592 --- linux-2.6.32.43/drivers/atm/uPD98402.c 2011-03-27 14:31:47.000000000 -0400
26593 +++ linux-2.6.32.43/drivers/atm/uPD98402.c 2011-04-17 15:56:46.000000000 -0400
26594 @@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
26595 struct sonet_stats tmp;
26596 int error = 0;
26597
26598 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26599 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26600 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26601 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26602 if (zero && !error) {
26603 @@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev
26604
26605
26606 #define ADD_LIMITED(s,v) \
26607 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26608 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26609 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26610 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26611 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26612 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26613
26614
26615 static void stat_event(struct atm_dev *dev)
26616 @@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
26617 if (reason & uPD98402_INT_PFM) stat_event(dev);
26618 if (reason & uPD98402_INT_PCO) {
26619 (void) GET(PCOCR); /* clear interrupt cause */
26620 - atomic_add(GET(HECCT),
26621 + atomic_add_unchecked(GET(HECCT),
26622 &PRIV(dev)->sonet_stats.uncorr_hcs);
26623 }
26624 if ((reason & uPD98402_INT_RFO) &&
26625 @@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev
26626 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26627 uPD98402_INT_LOS),PIMR); /* enable them */
26628 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26629 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26630 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26631 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26632 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26633 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26634 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26635 return 0;
26636 }
26637
26638 diff -urNp linux-2.6.32.43/drivers/atm/zatm.c linux-2.6.32.43/drivers/atm/zatm.c
26639 --- linux-2.6.32.43/drivers/atm/zatm.c 2011-03-27 14:31:47.000000000 -0400
26640 +++ linux-2.6.32.43/drivers/atm/zatm.c 2011-04-17 15:56:46.000000000 -0400
26641 @@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
26642 }
26643 if (!size) {
26644 dev_kfree_skb_irq(skb);
26645 - if (vcc) atomic_inc(&vcc->stats->rx_err);
26646 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26647 continue;
26648 }
26649 if (!atm_charge(vcc,skb->truesize)) {
26650 @@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
26651 skb->len = size;
26652 ATM_SKB(skb)->vcc = vcc;
26653 vcc->push(vcc,skb);
26654 - atomic_inc(&vcc->stats->rx);
26655 + atomic_inc_unchecked(&vcc->stats->rx);
26656 }
26657 zout(pos & 0xffff,MTA(mbx));
26658 #if 0 /* probably a stupid idea */
26659 @@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
26660 skb_queue_head(&zatm_vcc->backlog,skb);
26661 break;
26662 }
26663 - atomic_inc(&vcc->stats->tx);
26664 + atomic_inc_unchecked(&vcc->stats->tx);
26665 wake_up(&zatm_vcc->tx_wait);
26666 }
26667
26668 diff -urNp linux-2.6.32.43/drivers/base/bus.c linux-2.6.32.43/drivers/base/bus.c
26669 --- linux-2.6.32.43/drivers/base/bus.c 2011-03-27 14:31:47.000000000 -0400
26670 +++ linux-2.6.32.43/drivers/base/bus.c 2011-04-17 15:56:46.000000000 -0400
26671 @@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kob
26672 return ret;
26673 }
26674
26675 -static struct sysfs_ops driver_sysfs_ops = {
26676 +static const struct sysfs_ops driver_sysfs_ops = {
26677 .show = drv_attr_show,
26678 .store = drv_attr_store,
26679 };
26680 @@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kob
26681 return ret;
26682 }
26683
26684 -static struct sysfs_ops bus_sysfs_ops = {
26685 +static const struct sysfs_ops bus_sysfs_ops = {
26686 .show = bus_attr_show,
26687 .store = bus_attr_store,
26688 };
26689 @@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset
26690 return 0;
26691 }
26692
26693 -static struct kset_uevent_ops bus_uevent_ops = {
26694 +static const struct kset_uevent_ops bus_uevent_ops = {
26695 .filter = bus_uevent_filter,
26696 };
26697
26698 diff -urNp linux-2.6.32.43/drivers/base/class.c linux-2.6.32.43/drivers/base/class.c
26699 --- linux-2.6.32.43/drivers/base/class.c 2011-03-27 14:31:47.000000000 -0400
26700 +++ linux-2.6.32.43/drivers/base/class.c 2011-04-17 15:56:46.000000000 -0400
26701 @@ -63,7 +63,7 @@ static void class_release(struct kobject
26702 kfree(cp);
26703 }
26704
26705 -static struct sysfs_ops class_sysfs_ops = {
26706 +static const struct sysfs_ops class_sysfs_ops = {
26707 .show = class_attr_show,
26708 .store = class_attr_store,
26709 };
26710 diff -urNp linux-2.6.32.43/drivers/base/core.c linux-2.6.32.43/drivers/base/core.c
26711 --- linux-2.6.32.43/drivers/base/core.c 2011-03-27 14:31:47.000000000 -0400
26712 +++ linux-2.6.32.43/drivers/base/core.c 2011-04-17 15:56:46.000000000 -0400
26713 @@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kob
26714 return ret;
26715 }
26716
26717 -static struct sysfs_ops dev_sysfs_ops = {
26718 +static const struct sysfs_ops dev_sysfs_ops = {
26719 .show = dev_attr_show,
26720 .store = dev_attr_store,
26721 };
26722 @@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset,
26723 return retval;
26724 }
26725
26726 -static struct kset_uevent_ops device_uevent_ops = {
26727 +static const struct kset_uevent_ops device_uevent_ops = {
26728 .filter = dev_uevent_filter,
26729 .name = dev_uevent_name,
26730 .uevent = dev_uevent,
26731 diff -urNp linux-2.6.32.43/drivers/base/memory.c linux-2.6.32.43/drivers/base/memory.c
26732 --- linux-2.6.32.43/drivers/base/memory.c 2011-03-27 14:31:47.000000000 -0400
26733 +++ linux-2.6.32.43/drivers/base/memory.c 2011-04-17 15:56:46.000000000 -0400
26734 @@ -44,7 +44,7 @@ static int memory_uevent(struct kset *ks
26735 return retval;
26736 }
26737
26738 -static struct kset_uevent_ops memory_uevent_ops = {
26739 +static const struct kset_uevent_ops memory_uevent_ops = {
26740 .name = memory_uevent_name,
26741 .uevent = memory_uevent,
26742 };
26743 diff -urNp linux-2.6.32.43/drivers/base/sys.c linux-2.6.32.43/drivers/base/sys.c
26744 --- linux-2.6.32.43/drivers/base/sys.c 2011-03-27 14:31:47.000000000 -0400
26745 +++ linux-2.6.32.43/drivers/base/sys.c 2011-04-17 15:56:46.000000000 -0400
26746 @@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struc
26747 return -EIO;
26748 }
26749
26750 -static struct sysfs_ops sysfs_ops = {
26751 +static const struct sysfs_ops sysfs_ops = {
26752 .show = sysdev_show,
26753 .store = sysdev_store,
26754 };
26755 @@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct
26756 return -EIO;
26757 }
26758
26759 -static struct sysfs_ops sysfs_class_ops = {
26760 +static const struct sysfs_ops sysfs_class_ops = {
26761 .show = sysdev_class_show,
26762 .store = sysdev_class_store,
26763 };
26764 diff -urNp linux-2.6.32.43/drivers/block/cciss.c linux-2.6.32.43/drivers/block/cciss.c
26765 --- linux-2.6.32.43/drivers/block/cciss.c 2011-03-27 14:31:47.000000000 -0400
26766 +++ linux-2.6.32.43/drivers/block/cciss.c 2011-04-17 15:56:46.000000000 -0400
26767 @@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct
26768 int err;
26769 u32 cp;
26770
26771 + memset(&arg64, 0, sizeof(arg64));
26772 +
26773 err = 0;
26774 err |=
26775 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26776 diff -urNp linux-2.6.32.43/drivers/block/cpqarray.c linux-2.6.32.43/drivers/block/cpqarray.c
26777 --- linux-2.6.32.43/drivers/block/cpqarray.c 2011-03-27 14:31:47.000000000 -0400
26778 +++ linux-2.6.32.43/drivers/block/cpqarray.c 2011-05-16 21:46:57.000000000 -0400
26779 @@ -896,6 +896,8 @@ static void do_ida_request(struct reques
26780 struct scatterlist tmp_sg[SG_MAX];
26781 int i, dir, seg;
26782
26783 + pax_track_stack();
26784 +
26785 if (blk_queue_plugged(q))
26786 goto startio;
26787
26788 diff -urNp linux-2.6.32.43/drivers/block/DAC960.c linux-2.6.32.43/drivers/block/DAC960.c
26789 --- linux-2.6.32.43/drivers/block/DAC960.c 2011-03-27 14:31:47.000000000 -0400
26790 +++ linux-2.6.32.43/drivers/block/DAC960.c 2011-05-16 21:46:57.000000000 -0400
26791 @@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfigur
26792 unsigned long flags;
26793 int Channel, TargetID;
26794
26795 + pax_track_stack();
26796 +
26797 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
26798 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
26799 sizeof(DAC960_SCSI_Inquiry_T) +
26800 diff -urNp linux-2.6.32.43/drivers/block/nbd.c linux-2.6.32.43/drivers/block/nbd.c
26801 --- linux-2.6.32.43/drivers/block/nbd.c 2011-06-25 12:55:34.000000000 -0400
26802 +++ linux-2.6.32.43/drivers/block/nbd.c 2011-06-25 12:56:37.000000000 -0400
26803 @@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *
26804 struct kvec iov;
26805 sigset_t blocked, oldset;
26806
26807 + pax_track_stack();
26808 +
26809 if (unlikely(!sock)) {
26810 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
26811 lo->disk->disk_name, (send ? "send" : "recv"));
26812 @@ -569,6 +571,8 @@ static void do_nbd_request(struct reques
26813 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
26814 unsigned int cmd, unsigned long arg)
26815 {
26816 + pax_track_stack();
26817 +
26818 switch (cmd) {
26819 case NBD_DISCONNECT: {
26820 struct request sreq;
26821 diff -urNp linux-2.6.32.43/drivers/block/pktcdvd.c linux-2.6.32.43/drivers/block/pktcdvd.c
26822 --- linux-2.6.32.43/drivers/block/pktcdvd.c 2011-03-27 14:31:47.000000000 -0400
26823 +++ linux-2.6.32.43/drivers/block/pktcdvd.c 2011-04-17 15:56:46.000000000 -0400
26824 @@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kob
26825 return len;
26826 }
26827
26828 -static struct sysfs_ops kobj_pkt_ops = {
26829 +static const struct sysfs_ops kobj_pkt_ops = {
26830 .show = kobj_pkt_show,
26831 .store = kobj_pkt_store
26832 };
26833 diff -urNp linux-2.6.32.43/drivers/char/agp/frontend.c linux-2.6.32.43/drivers/char/agp/frontend.c
26834 --- linux-2.6.32.43/drivers/char/agp/frontend.c 2011-03-27 14:31:47.000000000 -0400
26835 +++ linux-2.6.32.43/drivers/char/agp/frontend.c 2011-04-17 15:56:46.000000000 -0400
26836 @@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
26837 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
26838 return -EFAULT;
26839
26840 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
26841 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
26842 return -EFAULT;
26843
26844 client = agp_find_client_by_pid(reserve.pid);
26845 diff -urNp linux-2.6.32.43/drivers/char/briq_panel.c linux-2.6.32.43/drivers/char/briq_panel.c
26846 --- linux-2.6.32.43/drivers/char/briq_panel.c 2011-03-27 14:31:47.000000000 -0400
26847 +++ linux-2.6.32.43/drivers/char/briq_panel.c 2011-04-18 19:48:57.000000000 -0400
26848 @@ -10,6 +10,7 @@
26849 #include <linux/types.h>
26850 #include <linux/errno.h>
26851 #include <linux/tty.h>
26852 +#include <linux/mutex.h>
26853 #include <linux/timer.h>
26854 #include <linux/kernel.h>
26855 #include <linux/wait.h>
26856 @@ -36,6 +37,7 @@ static int vfd_is_open;
26857 static unsigned char vfd[40];
26858 static int vfd_cursor;
26859 static unsigned char ledpb, led;
26860 +static DEFINE_MUTEX(vfd_mutex);
26861
26862 static void update_vfd(void)
26863 {
26864 @@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct f
26865 if (!vfd_is_open)
26866 return -EBUSY;
26867
26868 + mutex_lock(&vfd_mutex);
26869 for (;;) {
26870 char c;
26871 if (!indx)
26872 break;
26873 - if (get_user(c, buf))
26874 + if (get_user(c, buf)) {
26875 + mutex_unlock(&vfd_mutex);
26876 return -EFAULT;
26877 + }
26878 if (esc) {
26879 set_led(c);
26880 esc = 0;
26881 @@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct f
26882 buf++;
26883 }
26884 update_vfd();
26885 + mutex_unlock(&vfd_mutex);
26886
26887 return len;
26888 }
26889 diff -urNp linux-2.6.32.43/drivers/char/genrtc.c linux-2.6.32.43/drivers/char/genrtc.c
26890 --- linux-2.6.32.43/drivers/char/genrtc.c 2011-03-27 14:31:47.000000000 -0400
26891 +++ linux-2.6.32.43/drivers/char/genrtc.c 2011-04-18 19:45:42.000000000 -0400
26892 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *i
26893 switch (cmd) {
26894
26895 case RTC_PLL_GET:
26896 + memset(&pll, 0, sizeof(pll));
26897 if (get_rtc_pll(&pll))
26898 return -EINVAL;
26899 else
26900 diff -urNp linux-2.6.32.43/drivers/char/hpet.c linux-2.6.32.43/drivers/char/hpet.c
26901 --- linux-2.6.32.43/drivers/char/hpet.c 2011-03-27 14:31:47.000000000 -0400
26902 +++ linux-2.6.32.43/drivers/char/hpet.c 2011-04-23 12:56:11.000000000 -0400
26903 @@ -430,7 +430,7 @@ static int hpet_release(struct inode *in
26904 return 0;
26905 }
26906
26907 -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
26908 +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
26909
26910 static int
26911 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
26912 @@ -565,7 +565,7 @@ static inline unsigned long hpet_time_di
26913 }
26914
26915 static int
26916 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
26917 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
26918 {
26919 struct hpet_timer __iomem *timer;
26920 struct hpet __iomem *hpet;
26921 @@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp,
26922 {
26923 struct hpet_info info;
26924
26925 + memset(&info, 0, sizeof(info));
26926 +
26927 if (devp->hd_ireqfreq)
26928 info.hi_ireqfreq =
26929 hpet_time_div(hpetp, devp->hd_ireqfreq);
26930 - else
26931 - info.hi_ireqfreq = 0;
26932 info.hi_flags =
26933 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
26934 info.hi_hpet = hpetp->hp_which;
26935 diff -urNp linux-2.6.32.43/drivers/char/hvc_beat.c linux-2.6.32.43/drivers/char/hvc_beat.c
26936 --- linux-2.6.32.43/drivers/char/hvc_beat.c 2011-03-27 14:31:47.000000000 -0400
26937 +++ linux-2.6.32.43/drivers/char/hvc_beat.c 2011-04-17 15:56:46.000000000 -0400
26938 @@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t v
26939 return cnt;
26940 }
26941
26942 -static struct hv_ops hvc_beat_get_put_ops = {
26943 +static const struct hv_ops hvc_beat_get_put_ops = {
26944 .get_chars = hvc_beat_get_chars,
26945 .put_chars = hvc_beat_put_chars,
26946 };
26947 diff -urNp linux-2.6.32.43/drivers/char/hvc_console.c linux-2.6.32.43/drivers/char/hvc_console.c
26948 --- linux-2.6.32.43/drivers/char/hvc_console.c 2011-03-27 14:31:47.000000000 -0400
26949 +++ linux-2.6.32.43/drivers/char/hvc_console.c 2011-04-17 15:56:46.000000000 -0400
26950 @@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_ind
26951 * console interfaces but can still be used as a tty device. This has to be
26952 * static because kmalloc will not work during early console init.
26953 */
26954 -static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
26955 +static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
26956 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
26957 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
26958
26959 @@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kr
26960 * vty adapters do NOT get an hvc_instantiate() callback since they
26961 * appear after early console init.
26962 */
26963 -int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
26964 +int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
26965 {
26966 struct hvc_struct *hp;
26967
26968 @@ -756,7 +756,7 @@ static const struct tty_operations hvc_o
26969 };
26970
26971 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
26972 - struct hv_ops *ops, int outbuf_size)
26973 + const struct hv_ops *ops, int outbuf_size)
26974 {
26975 struct hvc_struct *hp;
26976 int i;
26977 diff -urNp linux-2.6.32.43/drivers/char/hvc_console.h linux-2.6.32.43/drivers/char/hvc_console.h
26978 --- linux-2.6.32.43/drivers/char/hvc_console.h 2011-03-27 14:31:47.000000000 -0400
26979 +++ linux-2.6.32.43/drivers/char/hvc_console.h 2011-04-17 15:56:46.000000000 -0400
26980 @@ -55,7 +55,7 @@ struct hvc_struct {
26981 int outbuf_size;
26982 int n_outbuf;
26983 uint32_t vtermno;
26984 - struct hv_ops *ops;
26985 + const struct hv_ops *ops;
26986 int irq_requested;
26987 int data;
26988 struct winsize ws;
26989 @@ -76,11 +76,11 @@ struct hv_ops {
26990 };
26991
26992 /* Register a vterm and a slot index for use as a console (console_init) */
26993 -extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
26994 +extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
26995
26996 /* register a vterm for hvc tty operation (module_init or hotplug add) */
26997 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
26998 - struct hv_ops *ops, int outbuf_size);
26999 + const struct hv_ops *ops, int outbuf_size);
27000 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
27001 extern int hvc_remove(struct hvc_struct *hp);
27002
27003 diff -urNp linux-2.6.32.43/drivers/char/hvc_iseries.c linux-2.6.32.43/drivers/char/hvc_iseries.c
27004 --- linux-2.6.32.43/drivers/char/hvc_iseries.c 2011-03-27 14:31:47.000000000 -0400
27005 +++ linux-2.6.32.43/drivers/char/hvc_iseries.c 2011-04-17 15:56:46.000000000 -0400
27006 @@ -197,7 +197,7 @@ done:
27007 return sent;
27008 }
27009
27010 -static struct hv_ops hvc_get_put_ops = {
27011 +static const struct hv_ops hvc_get_put_ops = {
27012 .get_chars = get_chars,
27013 .put_chars = put_chars,
27014 .notifier_add = notifier_add_irq,
27015 diff -urNp linux-2.6.32.43/drivers/char/hvc_iucv.c linux-2.6.32.43/drivers/char/hvc_iucv.c
27016 --- linux-2.6.32.43/drivers/char/hvc_iucv.c 2011-03-27 14:31:47.000000000 -0400
27017 +++ linux-2.6.32.43/drivers/char/hvc_iucv.c 2011-04-17 15:56:46.000000000 -0400
27018 @@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(stru
27019
27020
27021 /* HVC operations */
27022 -static struct hv_ops hvc_iucv_ops = {
27023 +static const struct hv_ops hvc_iucv_ops = {
27024 .get_chars = hvc_iucv_get_chars,
27025 .put_chars = hvc_iucv_put_chars,
27026 .notifier_add = hvc_iucv_notifier_add,
27027 diff -urNp linux-2.6.32.43/drivers/char/hvc_rtas.c linux-2.6.32.43/drivers/char/hvc_rtas.c
27028 --- linux-2.6.32.43/drivers/char/hvc_rtas.c 2011-03-27 14:31:47.000000000 -0400
27029 +++ linux-2.6.32.43/drivers/char/hvc_rtas.c 2011-04-17 15:56:46.000000000 -0400
27030 @@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_
27031 return i;
27032 }
27033
27034 -static struct hv_ops hvc_rtas_get_put_ops = {
27035 +static const struct hv_ops hvc_rtas_get_put_ops = {
27036 .get_chars = hvc_rtas_read_console,
27037 .put_chars = hvc_rtas_write_console,
27038 };
27039 diff -urNp linux-2.6.32.43/drivers/char/hvcs.c linux-2.6.32.43/drivers/char/hvcs.c
27040 --- linux-2.6.32.43/drivers/char/hvcs.c 2011-03-27 14:31:47.000000000 -0400
27041 +++ linux-2.6.32.43/drivers/char/hvcs.c 2011-04-17 15:56:46.000000000 -0400
27042 @@ -82,6 +82,7 @@
27043 #include <asm/hvcserver.h>
27044 #include <asm/uaccess.h>
27045 #include <asm/vio.h>
27046 +#include <asm/local.h>
27047
27048 /*
27049 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
27050 @@ -269,7 +270,7 @@ struct hvcs_struct {
27051 unsigned int index;
27052
27053 struct tty_struct *tty;
27054 - int open_count;
27055 + local_t open_count;
27056
27057 /*
27058 * Used to tell the driver kernel_thread what operations need to take
27059 @@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
27060
27061 spin_lock_irqsave(&hvcsd->lock, flags);
27062
27063 - if (hvcsd->open_count > 0) {
27064 + if (local_read(&hvcsd->open_count) > 0) {
27065 spin_unlock_irqrestore(&hvcsd->lock, flags);
27066 printk(KERN_INFO "HVCS: vterm state unchanged. "
27067 "The hvcs device node is still in use.\n");
27068 @@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
27069 if ((retval = hvcs_partner_connect(hvcsd)))
27070 goto error_release;
27071
27072 - hvcsd->open_count = 1;
27073 + local_set(&hvcsd->open_count, 1);
27074 hvcsd->tty = tty;
27075 tty->driver_data = hvcsd;
27076
27077 @@ -1169,7 +1170,7 @@ fast_open:
27078
27079 spin_lock_irqsave(&hvcsd->lock, flags);
27080 kref_get(&hvcsd->kref);
27081 - hvcsd->open_count++;
27082 + local_inc(&hvcsd->open_count);
27083 hvcsd->todo_mask |= HVCS_SCHED_READ;
27084 spin_unlock_irqrestore(&hvcsd->lock, flags);
27085
27086 @@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct
27087 hvcsd = tty->driver_data;
27088
27089 spin_lock_irqsave(&hvcsd->lock, flags);
27090 - if (--hvcsd->open_count == 0) {
27091 + if (local_dec_and_test(&hvcsd->open_count)) {
27092
27093 vio_disable_interrupts(hvcsd->vdev);
27094
27095 @@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct
27096 free_irq(irq, hvcsd);
27097 kref_put(&hvcsd->kref, destroy_hvcs_struct);
27098 return;
27099 - } else if (hvcsd->open_count < 0) {
27100 + } else if (local_read(&hvcsd->open_count) < 0) {
27101 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
27102 " is missmanaged.\n",
27103 - hvcsd->vdev->unit_address, hvcsd->open_count);
27104 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
27105 }
27106
27107 spin_unlock_irqrestore(&hvcsd->lock, flags);
27108 @@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
27109
27110 spin_lock_irqsave(&hvcsd->lock, flags);
27111 /* Preserve this so that we know how many kref refs to put */
27112 - temp_open_count = hvcsd->open_count;
27113 + temp_open_count = local_read(&hvcsd->open_count);
27114
27115 /*
27116 * Don't kref put inside the spinlock because the destruction
27117 @@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
27118 hvcsd->tty->driver_data = NULL;
27119 hvcsd->tty = NULL;
27120
27121 - hvcsd->open_count = 0;
27122 + local_set(&hvcsd->open_count, 0);
27123
27124 /* This will drop any buffered data on the floor which is OK in a hangup
27125 * scenario. */
27126 @@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct
27127 * the middle of a write operation? This is a crummy place to do this
27128 * but we want to keep it all in the spinlock.
27129 */
27130 - if (hvcsd->open_count <= 0) {
27131 + if (local_read(&hvcsd->open_count) <= 0) {
27132 spin_unlock_irqrestore(&hvcsd->lock, flags);
27133 return -ENODEV;
27134 }
27135 @@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
27136 {
27137 struct hvcs_struct *hvcsd = tty->driver_data;
27138
27139 - if (!hvcsd || hvcsd->open_count <= 0)
27140 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
27141 return 0;
27142
27143 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
27144 diff -urNp linux-2.6.32.43/drivers/char/hvc_udbg.c linux-2.6.32.43/drivers/char/hvc_udbg.c
27145 --- linux-2.6.32.43/drivers/char/hvc_udbg.c 2011-03-27 14:31:47.000000000 -0400
27146 +++ linux-2.6.32.43/drivers/char/hvc_udbg.c 2011-04-17 15:56:46.000000000 -0400
27147 @@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno
27148 return i;
27149 }
27150
27151 -static struct hv_ops hvc_udbg_ops = {
27152 +static const struct hv_ops hvc_udbg_ops = {
27153 .get_chars = hvc_udbg_get,
27154 .put_chars = hvc_udbg_put,
27155 };
27156 diff -urNp linux-2.6.32.43/drivers/char/hvc_vio.c linux-2.6.32.43/drivers/char/hvc_vio.c
27157 --- linux-2.6.32.43/drivers/char/hvc_vio.c 2011-03-27 14:31:47.000000000 -0400
27158 +++ linux-2.6.32.43/drivers/char/hvc_vio.c 2011-04-17 15:56:46.000000000 -0400
27159 @@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t v
27160 return got;
27161 }
27162
27163 -static struct hv_ops hvc_get_put_ops = {
27164 +static const struct hv_ops hvc_get_put_ops = {
27165 .get_chars = filtered_get_chars,
27166 .put_chars = hvc_put_chars,
27167 .notifier_add = notifier_add_irq,
27168 diff -urNp linux-2.6.32.43/drivers/char/hvc_xen.c linux-2.6.32.43/drivers/char/hvc_xen.c
27169 --- linux-2.6.32.43/drivers/char/hvc_xen.c 2011-03-27 14:31:47.000000000 -0400
27170 +++ linux-2.6.32.43/drivers/char/hvc_xen.c 2011-04-17 15:56:46.000000000 -0400
27171 @@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno
27172 return recv;
27173 }
27174
27175 -static struct hv_ops hvc_ops = {
27176 +static const struct hv_ops hvc_ops = {
27177 .get_chars = read_console,
27178 .put_chars = write_console,
27179 .notifier_add = notifier_add_irq,
27180 diff -urNp linux-2.6.32.43/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.32.43/drivers/char/ipmi/ipmi_msghandler.c
27181 --- linux-2.6.32.43/drivers/char/ipmi/ipmi_msghandler.c 2011-03-27 14:31:47.000000000 -0400
27182 +++ linux-2.6.32.43/drivers/char/ipmi/ipmi_msghandler.c 2011-05-16 21:46:57.000000000 -0400
27183 @@ -414,7 +414,7 @@ struct ipmi_smi {
27184 struct proc_dir_entry *proc_dir;
27185 char proc_dir_name[10];
27186
27187 - atomic_t stats[IPMI_NUM_STATS];
27188 + atomic_unchecked_t stats[IPMI_NUM_STATS];
27189
27190 /*
27191 * run_to_completion duplicate of smb_info, smi_info
27192 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
27193
27194
27195 #define ipmi_inc_stat(intf, stat) \
27196 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
27197 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
27198 #define ipmi_get_stat(intf, stat) \
27199 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
27200 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
27201
27202 static int is_lan_addr(struct ipmi_addr *addr)
27203 {
27204 @@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
27205 INIT_LIST_HEAD(&intf->cmd_rcvrs);
27206 init_waitqueue_head(&intf->waitq);
27207 for (i = 0; i < IPMI_NUM_STATS; i++)
27208 - atomic_set(&intf->stats[i], 0);
27209 + atomic_set_unchecked(&intf->stats[i], 0);
27210
27211 intf->proc_dir = NULL;
27212
27213 @@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
27214 struct ipmi_smi_msg smi_msg;
27215 struct ipmi_recv_msg recv_msg;
27216
27217 + pax_track_stack();
27218 +
27219 si = (struct ipmi_system_interface_addr *) &addr;
27220 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
27221 si->channel = IPMI_BMC_CHANNEL;
27222 diff -urNp linux-2.6.32.43/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.32.43/drivers/char/ipmi/ipmi_si_intf.c
27223 --- linux-2.6.32.43/drivers/char/ipmi/ipmi_si_intf.c 2011-03-27 14:31:47.000000000 -0400
27224 +++ linux-2.6.32.43/drivers/char/ipmi/ipmi_si_intf.c 2011-04-17 15:56:46.000000000 -0400
27225 @@ -277,7 +277,7 @@ struct smi_info {
27226 unsigned char slave_addr;
27227
27228 /* Counters and things for the proc filesystem. */
27229 - atomic_t stats[SI_NUM_STATS];
27230 + atomic_unchecked_t stats[SI_NUM_STATS];
27231
27232 struct task_struct *thread;
27233
27234 @@ -285,9 +285,9 @@ struct smi_info {
27235 };
27236
27237 #define smi_inc_stat(smi, stat) \
27238 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
27239 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
27240 #define smi_get_stat(smi, stat) \
27241 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
27242 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
27243
27244 #define SI_MAX_PARMS 4
27245
27246 @@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info
27247 atomic_set(&new_smi->req_events, 0);
27248 new_smi->run_to_completion = 0;
27249 for (i = 0; i < SI_NUM_STATS; i++)
27250 - atomic_set(&new_smi->stats[i], 0);
27251 + atomic_set_unchecked(&new_smi->stats[i], 0);
27252
27253 new_smi->interrupt_disabled = 0;
27254 atomic_set(&new_smi->stop_operation, 0);
27255 diff -urNp linux-2.6.32.43/drivers/char/istallion.c linux-2.6.32.43/drivers/char/istallion.c
27256 --- linux-2.6.32.43/drivers/char/istallion.c 2011-03-27 14:31:47.000000000 -0400
27257 +++ linux-2.6.32.43/drivers/char/istallion.c 2011-05-16 21:46:57.000000000 -0400
27258 @@ -187,7 +187,6 @@ static struct ktermios stli_deftermios
27259 * re-used for each stats call.
27260 */
27261 static comstats_t stli_comstats;
27262 -static combrd_t stli_brdstats;
27263 static struct asystats stli_cdkstats;
27264
27265 /*****************************************************************************/
27266 @@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __u
27267 {
27268 struct stlibrd *brdp;
27269 unsigned int i;
27270 + combrd_t stli_brdstats;
27271
27272 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
27273 return -EFAULT;
27274 @@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stl
27275 struct stliport stli_dummyport;
27276 struct stliport *portp;
27277
27278 + pax_track_stack();
27279 +
27280 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
27281 return -EFAULT;
27282 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
27283 @@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stli
27284 struct stlibrd stli_dummybrd;
27285 struct stlibrd *brdp;
27286
27287 + pax_track_stack();
27288 +
27289 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
27290 return -EFAULT;
27291 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
27292 diff -urNp linux-2.6.32.43/drivers/char/Kconfig linux-2.6.32.43/drivers/char/Kconfig
27293 --- linux-2.6.32.43/drivers/char/Kconfig 2011-03-27 14:31:47.000000000 -0400
27294 +++ linux-2.6.32.43/drivers/char/Kconfig 2011-04-18 19:20:15.000000000 -0400
27295 @@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
27296
27297 config DEVKMEM
27298 bool "/dev/kmem virtual device support"
27299 - default y
27300 + default n
27301 + depends on !GRKERNSEC_KMEM
27302 help
27303 Say Y here if you want to support the /dev/kmem device. The
27304 /dev/kmem device is rarely used, but can be used for certain
27305 @@ -1114,6 +1115,7 @@ config DEVPORT
27306 bool
27307 depends on !M68K
27308 depends on ISA || PCI
27309 + depends on !GRKERNSEC_KMEM
27310 default y
27311
27312 source "drivers/s390/char/Kconfig"
27313 diff -urNp linux-2.6.32.43/drivers/char/keyboard.c linux-2.6.32.43/drivers/char/keyboard.c
27314 --- linux-2.6.32.43/drivers/char/keyboard.c 2011-03-27 14:31:47.000000000 -0400
27315 +++ linux-2.6.32.43/drivers/char/keyboard.c 2011-04-17 15:56:46.000000000 -0400
27316 @@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
27317 kbd->kbdmode == VC_MEDIUMRAW) &&
27318 value != KVAL(K_SAK))
27319 return; /* SAK is allowed even in raw mode */
27320 +
27321 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
27322 + {
27323 + void *func = fn_handler[value];
27324 + if (func == fn_show_state || func == fn_show_ptregs ||
27325 + func == fn_show_mem)
27326 + return;
27327 + }
27328 +#endif
27329 +
27330 fn_handler[value](vc);
27331 }
27332
27333 @@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_
27334 .evbit = { BIT_MASK(EV_SND) },
27335 },
27336
27337 - { }, /* Terminating entry */
27338 + { 0 }, /* Terminating entry */
27339 };
27340
27341 MODULE_DEVICE_TABLE(input, kbd_ids);
27342 diff -urNp linux-2.6.32.43/drivers/char/mem.c linux-2.6.32.43/drivers/char/mem.c
27343 --- linux-2.6.32.43/drivers/char/mem.c 2011-03-27 14:31:47.000000000 -0400
27344 +++ linux-2.6.32.43/drivers/char/mem.c 2011-04-17 15:56:46.000000000 -0400
27345 @@ -18,6 +18,7 @@
27346 #include <linux/raw.h>
27347 #include <linux/tty.h>
27348 #include <linux/capability.h>
27349 +#include <linux/security.h>
27350 #include <linux/ptrace.h>
27351 #include <linux/device.h>
27352 #include <linux/highmem.h>
27353 @@ -35,6 +36,10 @@
27354 # include <linux/efi.h>
27355 #endif
27356
27357 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27358 +extern struct file_operations grsec_fops;
27359 +#endif
27360 +
27361 static inline unsigned long size_inside_page(unsigned long start,
27362 unsigned long size)
27363 {
27364 @@ -102,9 +107,13 @@ static inline int range_is_allowed(unsig
27365
27366 while (cursor < to) {
27367 if (!devmem_is_allowed(pfn)) {
27368 +#ifdef CONFIG_GRKERNSEC_KMEM
27369 + gr_handle_mem_readwrite(from, to);
27370 +#else
27371 printk(KERN_INFO
27372 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27373 current->comm, from, to);
27374 +#endif
27375 return 0;
27376 }
27377 cursor += PAGE_SIZE;
27378 @@ -112,6 +121,11 @@ static inline int range_is_allowed(unsig
27379 }
27380 return 1;
27381 }
27382 +#elif defined(CONFIG_GRKERNSEC_KMEM)
27383 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27384 +{
27385 + return 0;
27386 +}
27387 #else
27388 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27389 {
27390 @@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * fi
27391 #endif
27392
27393 while (count > 0) {
27394 + char *temp;
27395 +
27396 /*
27397 * Handle first page in case it's not aligned
27398 */
27399 @@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * fi
27400 if (!ptr)
27401 return -EFAULT;
27402
27403 - if (copy_to_user(buf, ptr, sz)) {
27404 +#ifdef CONFIG_PAX_USERCOPY
27405 + temp = kmalloc(sz, GFP_KERNEL);
27406 + if (!temp) {
27407 + unxlate_dev_mem_ptr(p, ptr);
27408 + return -ENOMEM;
27409 + }
27410 + memcpy(temp, ptr, sz);
27411 +#else
27412 + temp = ptr;
27413 +#endif
27414 +
27415 + if (copy_to_user(buf, temp, sz)) {
27416 +
27417 +#ifdef CONFIG_PAX_USERCOPY
27418 + kfree(temp);
27419 +#endif
27420 +
27421 unxlate_dev_mem_ptr(p, ptr);
27422 return -EFAULT;
27423 }
27424
27425 +#ifdef CONFIG_PAX_USERCOPY
27426 + kfree(temp);
27427 +#endif
27428 +
27429 unxlate_dev_mem_ptr(p, ptr);
27430
27431 buf += sz;
27432 @@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *fi
27433 size_t count, loff_t *ppos)
27434 {
27435 unsigned long p = *ppos;
27436 - ssize_t low_count, read, sz;
27437 + ssize_t low_count, read, sz, err = 0;
27438 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
27439 - int err = 0;
27440
27441 read = 0;
27442 if (p < (unsigned long) high_memory) {
27443 @@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *fi
27444 }
27445 #endif
27446 while (low_count > 0) {
27447 + char *temp;
27448 +
27449 sz = size_inside_page(p, low_count);
27450
27451 /*
27452 @@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *fi
27453 */
27454 kbuf = xlate_dev_kmem_ptr((char *)p);
27455
27456 - if (copy_to_user(buf, kbuf, sz))
27457 +#ifdef CONFIG_PAX_USERCOPY
27458 + temp = kmalloc(sz, GFP_KERNEL);
27459 + if (!temp)
27460 + return -ENOMEM;
27461 + memcpy(temp, kbuf, sz);
27462 +#else
27463 + temp = kbuf;
27464 +#endif
27465 +
27466 + err = copy_to_user(buf, temp, sz);
27467 +
27468 +#ifdef CONFIG_PAX_USERCOPY
27469 + kfree(temp);
27470 +#endif
27471 +
27472 + if (err)
27473 return -EFAULT;
27474 buf += sz;
27475 p += sz;
27476 @@ -889,6 +941,9 @@ static const struct memdev {
27477 #ifdef CONFIG_CRASH_DUMP
27478 [12] = { "oldmem", 0, &oldmem_fops, NULL },
27479 #endif
27480 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27481 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
27482 +#endif
27483 };
27484
27485 static int memory_open(struct inode *inode, struct file *filp)
27486 diff -urNp linux-2.6.32.43/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.32.43/drivers/char/pcmcia/ipwireless/tty.c
27487 --- linux-2.6.32.43/drivers/char/pcmcia/ipwireless/tty.c 2011-03-27 14:31:47.000000000 -0400
27488 +++ linux-2.6.32.43/drivers/char/pcmcia/ipwireless/tty.c 2011-04-17 15:56:46.000000000 -0400
27489 @@ -29,6 +29,7 @@
27490 #include <linux/tty_driver.h>
27491 #include <linux/tty_flip.h>
27492 #include <linux/uaccess.h>
27493 +#include <asm/local.h>
27494
27495 #include "tty.h"
27496 #include "network.h"
27497 @@ -51,7 +52,7 @@ struct ipw_tty {
27498 int tty_type;
27499 struct ipw_network *network;
27500 struct tty_struct *linux_tty;
27501 - int open_count;
27502 + local_t open_count;
27503 unsigned int control_lines;
27504 struct mutex ipw_tty_mutex;
27505 int tx_bytes_queued;
27506 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
27507 mutex_unlock(&tty->ipw_tty_mutex);
27508 return -ENODEV;
27509 }
27510 - if (tty->open_count == 0)
27511 + if (local_read(&tty->open_count) == 0)
27512 tty->tx_bytes_queued = 0;
27513
27514 - tty->open_count++;
27515 + local_inc(&tty->open_count);
27516
27517 tty->linux_tty = linux_tty;
27518 linux_tty->driver_data = tty;
27519 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
27520
27521 static void do_ipw_close(struct ipw_tty *tty)
27522 {
27523 - tty->open_count--;
27524 -
27525 - if (tty->open_count == 0) {
27526 + if (local_dec_return(&tty->open_count) == 0) {
27527 struct tty_struct *linux_tty = tty->linux_tty;
27528
27529 if (linux_tty != NULL) {
27530 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
27531 return;
27532
27533 mutex_lock(&tty->ipw_tty_mutex);
27534 - if (tty->open_count == 0) {
27535 + if (local_read(&tty->open_count) == 0) {
27536 mutex_unlock(&tty->ipw_tty_mutex);
27537 return;
27538 }
27539 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
27540 return;
27541 }
27542
27543 - if (!tty->open_count) {
27544 + if (!local_read(&tty->open_count)) {
27545 mutex_unlock(&tty->ipw_tty_mutex);
27546 return;
27547 }
27548 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
27549 return -ENODEV;
27550
27551 mutex_lock(&tty->ipw_tty_mutex);
27552 - if (!tty->open_count) {
27553 + if (!local_read(&tty->open_count)) {
27554 mutex_unlock(&tty->ipw_tty_mutex);
27555 return -EINVAL;
27556 }
27557 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
27558 if (!tty)
27559 return -ENODEV;
27560
27561 - if (!tty->open_count)
27562 + if (!local_read(&tty->open_count))
27563 return -EINVAL;
27564
27565 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
27566 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
27567 if (!tty)
27568 return 0;
27569
27570 - if (!tty->open_count)
27571 + if (!local_read(&tty->open_count))
27572 return 0;
27573
27574 return tty->tx_bytes_queued;
27575 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
27576 if (!tty)
27577 return -ENODEV;
27578
27579 - if (!tty->open_count)
27580 + if (!local_read(&tty->open_count))
27581 return -EINVAL;
27582
27583 return get_control_lines(tty);
27584 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
27585 if (!tty)
27586 return -ENODEV;
27587
27588 - if (!tty->open_count)
27589 + if (!local_read(&tty->open_count))
27590 return -EINVAL;
27591
27592 return set_control_lines(tty, set, clear);
27593 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
27594 if (!tty)
27595 return -ENODEV;
27596
27597 - if (!tty->open_count)
27598 + if (!local_read(&tty->open_count))
27599 return -EINVAL;
27600
27601 /* FIXME: Exactly how is the tty object locked here .. */
27602 @@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty
27603 against a parallel ioctl etc */
27604 mutex_lock(&ttyj->ipw_tty_mutex);
27605 }
27606 - while (ttyj->open_count)
27607 + while (local_read(&ttyj->open_count))
27608 do_ipw_close(ttyj);
27609 ipwireless_disassociate_network_ttys(network,
27610 ttyj->channel_idx);
27611 diff -urNp linux-2.6.32.43/drivers/char/pty.c linux-2.6.32.43/drivers/char/pty.c
27612 --- linux-2.6.32.43/drivers/char/pty.c 2011-03-27 14:31:47.000000000 -0400
27613 +++ linux-2.6.32.43/drivers/char/pty.c 2011-04-17 15:56:46.000000000 -0400
27614 @@ -682,7 +682,18 @@ static int ptmx_open(struct inode *inode
27615 return ret;
27616 }
27617
27618 -static struct file_operations ptmx_fops;
27619 +static const struct file_operations ptmx_fops = {
27620 + .llseek = no_llseek,
27621 + .read = tty_read,
27622 + .write = tty_write,
27623 + .poll = tty_poll,
27624 + .unlocked_ioctl = tty_ioctl,
27625 + .compat_ioctl = tty_compat_ioctl,
27626 + .open = ptmx_open,
27627 + .release = tty_release,
27628 + .fasync = tty_fasync,
27629 +};
27630 +
27631
27632 static void __init unix98_pty_init(void)
27633 {
27634 @@ -736,9 +747,6 @@ static void __init unix98_pty_init(void)
27635 register_sysctl_table(pty_root_table);
27636
27637 /* Now create the /dev/ptmx special device */
27638 - tty_default_fops(&ptmx_fops);
27639 - ptmx_fops.open = ptmx_open;
27640 -
27641 cdev_init(&ptmx_cdev, &ptmx_fops);
27642 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
27643 register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
27644 diff -urNp linux-2.6.32.43/drivers/char/random.c linux-2.6.32.43/drivers/char/random.c
27645 --- linux-2.6.32.43/drivers/char/random.c 2011-03-27 14:31:47.000000000 -0400
27646 +++ linux-2.6.32.43/drivers/char/random.c 2011-04-17 15:56:46.000000000 -0400
27647 @@ -254,8 +254,13 @@
27648 /*
27649 * Configuration information
27650 */
27651 +#ifdef CONFIG_GRKERNSEC_RANDNET
27652 +#define INPUT_POOL_WORDS 512
27653 +#define OUTPUT_POOL_WORDS 128
27654 +#else
27655 #define INPUT_POOL_WORDS 128
27656 #define OUTPUT_POOL_WORDS 32
27657 +#endif
27658 #define SEC_XFER_SIZE 512
27659
27660 /*
27661 @@ -292,10 +297,17 @@ static struct poolinfo {
27662 int poolwords;
27663 int tap1, tap2, tap3, tap4, tap5;
27664 } poolinfo_table[] = {
27665 +#ifdef CONFIG_GRKERNSEC_RANDNET
27666 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
27667 + { 512, 411, 308, 208, 104, 1 },
27668 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
27669 + { 128, 103, 76, 51, 25, 1 },
27670 +#else
27671 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
27672 { 128, 103, 76, 51, 25, 1 },
27673 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
27674 { 32, 26, 20, 14, 7, 1 },
27675 +#endif
27676 #if 0
27677 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
27678 { 2048, 1638, 1231, 819, 411, 1 },
27679 @@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
27680 #include <linux/sysctl.h>
27681
27682 static int min_read_thresh = 8, min_write_thresh;
27683 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
27684 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
27685 static int max_write_thresh = INPUT_POOL_WORDS * 32;
27686 static char sysctl_bootid[16];
27687
27688 diff -urNp linux-2.6.32.43/drivers/char/rocket.c linux-2.6.32.43/drivers/char/rocket.c
27689 --- linux-2.6.32.43/drivers/char/rocket.c 2011-03-27 14:31:47.000000000 -0400
27690 +++ linux-2.6.32.43/drivers/char/rocket.c 2011-05-16 21:46:57.000000000 -0400
27691 @@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info
27692 struct rocket_ports tmp;
27693 int board;
27694
27695 + pax_track_stack();
27696 +
27697 if (!retports)
27698 return -EFAULT;
27699 memset(&tmp, 0, sizeof (tmp));
27700 diff -urNp linux-2.6.32.43/drivers/char/sonypi.c linux-2.6.32.43/drivers/char/sonypi.c
27701 --- linux-2.6.32.43/drivers/char/sonypi.c 2011-03-27 14:31:47.000000000 -0400
27702 +++ linux-2.6.32.43/drivers/char/sonypi.c 2011-04-17 15:56:46.000000000 -0400
27703 @@ -55,6 +55,7 @@
27704 #include <asm/uaccess.h>
27705 #include <asm/io.h>
27706 #include <asm/system.h>
27707 +#include <asm/local.h>
27708
27709 #include <linux/sonypi.h>
27710
27711 @@ -491,7 +492,7 @@ static struct sonypi_device {
27712 spinlock_t fifo_lock;
27713 wait_queue_head_t fifo_proc_list;
27714 struct fasync_struct *fifo_async;
27715 - int open_count;
27716 + local_t open_count;
27717 int model;
27718 struct input_dev *input_jog_dev;
27719 struct input_dev *input_key_dev;
27720 @@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, st
27721 static int sonypi_misc_release(struct inode *inode, struct file *file)
27722 {
27723 mutex_lock(&sonypi_device.lock);
27724 - sonypi_device.open_count--;
27725 + local_dec(&sonypi_device.open_count);
27726 mutex_unlock(&sonypi_device.lock);
27727 return 0;
27728 }
27729 @@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode
27730 lock_kernel();
27731 mutex_lock(&sonypi_device.lock);
27732 /* Flush input queue on first open */
27733 - if (!sonypi_device.open_count)
27734 + if (!local_read(&sonypi_device.open_count))
27735 kfifo_reset(sonypi_device.fifo);
27736 - sonypi_device.open_count++;
27737 + local_inc(&sonypi_device.open_count);
27738 mutex_unlock(&sonypi_device.lock);
27739 unlock_kernel();
27740 return 0;
27741 diff -urNp linux-2.6.32.43/drivers/char/stallion.c linux-2.6.32.43/drivers/char/stallion.c
27742 --- linux-2.6.32.43/drivers/char/stallion.c 2011-03-27 14:31:47.000000000 -0400
27743 +++ linux-2.6.32.43/drivers/char/stallion.c 2011-05-16 21:46:57.000000000 -0400
27744 @@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlp
27745 struct stlport stl_dummyport;
27746 struct stlport *portp;
27747
27748 + pax_track_stack();
27749 +
27750 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
27751 return -EFAULT;
27752 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
27753 diff -urNp linux-2.6.32.43/drivers/char/tpm/tpm_bios.c linux-2.6.32.43/drivers/char/tpm/tpm_bios.c
27754 --- linux-2.6.32.43/drivers/char/tpm/tpm_bios.c 2011-03-27 14:31:47.000000000 -0400
27755 +++ linux-2.6.32.43/drivers/char/tpm/tpm_bios.c 2011-04-17 15:56:46.000000000 -0400
27756 @@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start
27757 event = addr;
27758
27759 if ((event->event_type == 0 && event->event_size == 0) ||
27760 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27761 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27762 return NULL;
27763
27764 return addr;
27765 @@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(
27766 return NULL;
27767
27768 if ((event->event_type == 0 && event->event_size == 0) ||
27769 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27770 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27771 return NULL;
27772
27773 (*pos)++;
27774 @@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_
27775 int i;
27776
27777 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27778 - seq_putc(m, data[i]);
27779 + if (!seq_putc(m, data[i]))
27780 + return -EFAULT;
27781
27782 return 0;
27783 }
27784 @@ -409,6 +410,11 @@ static int read_log(struct tpm_bios_log
27785 log->bios_event_log_end = log->bios_event_log + len;
27786
27787 virt = acpi_os_map_memory(start, len);
27788 + if (!virt) {
27789 + kfree(log->bios_event_log);
27790 + log->bios_event_log = NULL;
27791 + return -EFAULT;
27792 + }
27793
27794 memcpy(log->bios_event_log, virt, len);
27795
27796 diff -urNp linux-2.6.32.43/drivers/char/tpm/tpm.c linux-2.6.32.43/drivers/char/tpm/tpm.c
27797 --- linux-2.6.32.43/drivers/char/tpm/tpm.c 2011-04-17 17:00:52.000000000 -0400
27798 +++ linux-2.6.32.43/drivers/char/tpm/tpm.c 2011-05-16 21:46:57.000000000 -0400
27799 @@ -402,7 +402,7 @@ static ssize_t tpm_transmit(struct tpm_c
27800 chip->vendor.req_complete_val)
27801 goto out_recv;
27802
27803 - if ((status == chip->vendor.req_canceled)) {
27804 + if (status == chip->vendor.req_canceled) {
27805 dev_err(chip->dev, "Operation Canceled\n");
27806 rc = -ECANCELED;
27807 goto out;
27808 @@ -821,6 +821,8 @@ ssize_t tpm_show_pubek(struct device *de
27809
27810 struct tpm_chip *chip = dev_get_drvdata(dev);
27811
27812 + pax_track_stack();
27813 +
27814 tpm_cmd.header.in = tpm_readpubek_header;
27815 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
27816 "attempting to read the PUBEK");
27817 diff -urNp linux-2.6.32.43/drivers/char/tty_io.c linux-2.6.32.43/drivers/char/tty_io.c
27818 --- linux-2.6.32.43/drivers/char/tty_io.c 2011-03-27 14:31:47.000000000 -0400
27819 +++ linux-2.6.32.43/drivers/char/tty_io.c 2011-04-17 15:56:46.000000000 -0400
27820 @@ -136,21 +136,10 @@ LIST_HEAD(tty_drivers); /* linked list
27821 DEFINE_MUTEX(tty_mutex);
27822 EXPORT_SYMBOL(tty_mutex);
27823
27824 -static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
27825 -static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
27826 ssize_t redirected_tty_write(struct file *, const char __user *,
27827 size_t, loff_t *);
27828 -static unsigned int tty_poll(struct file *, poll_table *);
27829 static int tty_open(struct inode *, struct file *);
27830 -static int tty_release(struct inode *, struct file *);
27831 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
27832 -#ifdef CONFIG_COMPAT
27833 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
27834 - unsigned long arg);
27835 -#else
27836 -#define tty_compat_ioctl NULL
27837 -#endif
27838 -static int tty_fasync(int fd, struct file *filp, int on);
27839 static void release_tty(struct tty_struct *tty, int idx);
27840 static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
27841 static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
27842 @@ -870,7 +859,7 @@ EXPORT_SYMBOL(start_tty);
27843 * read calls may be outstanding in parallel.
27844 */
27845
27846 -static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
27847 +ssize_t tty_read(struct file *file, char __user *buf, size_t count,
27848 loff_t *ppos)
27849 {
27850 int i;
27851 @@ -898,6 +887,8 @@ static ssize_t tty_read(struct file *fil
27852 return i;
27853 }
27854
27855 +EXPORT_SYMBOL(tty_read);
27856 +
27857 void tty_write_unlock(struct tty_struct *tty)
27858 {
27859 mutex_unlock(&tty->atomic_write_lock);
27860 @@ -1045,7 +1036,7 @@ void tty_write_message(struct tty_struct
27861 * write method will not be invoked in parallel for each device.
27862 */
27863
27864 -static ssize_t tty_write(struct file *file, const char __user *buf,
27865 +ssize_t tty_write(struct file *file, const char __user *buf,
27866 size_t count, loff_t *ppos)
27867 {
27868 struct tty_struct *tty;
27869 @@ -1072,6 +1063,8 @@ static ssize_t tty_write(struct file *fi
27870 return ret;
27871 }
27872
27873 +EXPORT_SYMBOL(tty_write);
27874 +
27875 ssize_t redirected_tty_write(struct file *file, const char __user *buf,
27876 size_t count, loff_t *ppos)
27877 {
27878 @@ -1867,7 +1860,7 @@ static int tty_open(struct inode *inode,
27879 * Takes bkl. See tty_release_dev
27880 */
27881
27882 -static int tty_release(struct inode *inode, struct file *filp)
27883 +int tty_release(struct inode *inode, struct file *filp)
27884 {
27885 lock_kernel();
27886 tty_release_dev(filp);
27887 @@ -1875,6 +1868,8 @@ static int tty_release(struct inode *ino
27888 return 0;
27889 }
27890
27891 +EXPORT_SYMBOL(tty_release);
27892 +
27893 /**
27894 * tty_poll - check tty status
27895 * @filp: file being polled
27896 @@ -1887,7 +1882,7 @@ static int tty_release(struct inode *ino
27897 * may be re-entered freely by other callers.
27898 */
27899
27900 -static unsigned int tty_poll(struct file *filp, poll_table *wait)
27901 +unsigned int tty_poll(struct file *filp, poll_table *wait)
27902 {
27903 struct tty_struct *tty;
27904 struct tty_ldisc *ld;
27905 @@ -1904,7 +1899,9 @@ static unsigned int tty_poll(struct file
27906 return ret;
27907 }
27908
27909 -static int tty_fasync(int fd, struct file *filp, int on)
27910 +EXPORT_SYMBOL(tty_poll);
27911 +
27912 +int tty_fasync(int fd, struct file *filp, int on)
27913 {
27914 struct tty_struct *tty;
27915 unsigned long flags;
27916 @@ -1948,6 +1945,8 @@ out:
27917 return retval;
27918 }
27919
27920 +EXPORT_SYMBOL(tty_fasync);
27921 +
27922 /**
27923 * tiocsti - fake input character
27924 * @tty: tty to fake input into
27925 @@ -2582,8 +2581,10 @@ long tty_ioctl(struct file *file, unsign
27926 return retval;
27927 }
27928
27929 +EXPORT_SYMBOL(tty_ioctl);
27930 +
27931 #ifdef CONFIG_COMPAT
27932 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
27933 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
27934 unsigned long arg)
27935 {
27936 struct inode *inode = file->f_dentry->d_inode;
27937 @@ -2607,6 +2608,8 @@ static long tty_compat_ioctl(struct file
27938
27939 return retval;
27940 }
27941 +
27942 +EXPORT_SYMBOL(tty_compat_ioctl);
27943 #endif
27944
27945 /*
27946 @@ -3050,11 +3053,6 @@ struct tty_struct *get_current_tty(void)
27947 }
27948 EXPORT_SYMBOL_GPL(get_current_tty);
27949
27950 -void tty_default_fops(struct file_operations *fops)
27951 -{
27952 - *fops = tty_fops;
27953 -}
27954 -
27955 /*
27956 * Initialize the console device. This is called *early*, so
27957 * we can't necessarily depend on lots of kernel help here.
27958 diff -urNp linux-2.6.32.43/drivers/char/tty_ldisc.c linux-2.6.32.43/drivers/char/tty_ldisc.c
27959 --- linux-2.6.32.43/drivers/char/tty_ldisc.c 2011-07-13 17:23:04.000000000 -0400
27960 +++ linux-2.6.32.43/drivers/char/tty_ldisc.c 2011-07-13 17:23:18.000000000 -0400
27961 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
27962 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
27963 struct tty_ldisc_ops *ldo = ld->ops;
27964
27965 - ldo->refcount--;
27966 + atomic_dec(&ldo->refcount);
27967 module_put(ldo->owner);
27968 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27969
27970 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
27971 spin_lock_irqsave(&tty_ldisc_lock, flags);
27972 tty_ldiscs[disc] = new_ldisc;
27973 new_ldisc->num = disc;
27974 - new_ldisc->refcount = 0;
27975 + atomic_set(&new_ldisc->refcount, 0);
27976 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27977
27978 return ret;
27979 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
27980 return -EINVAL;
27981
27982 spin_lock_irqsave(&tty_ldisc_lock, flags);
27983 - if (tty_ldiscs[disc]->refcount)
27984 + if (atomic_read(&tty_ldiscs[disc]->refcount))
27985 ret = -EBUSY;
27986 else
27987 tty_ldiscs[disc] = NULL;
27988 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
27989 if (ldops) {
27990 ret = ERR_PTR(-EAGAIN);
27991 if (try_module_get(ldops->owner)) {
27992 - ldops->refcount++;
27993 + atomic_inc(&ldops->refcount);
27994 ret = ldops;
27995 }
27996 }
27997 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
27998 unsigned long flags;
27999
28000 spin_lock_irqsave(&tty_ldisc_lock, flags);
28001 - ldops->refcount--;
28002 + atomic_dec(&ldops->refcount);
28003 module_put(ldops->owner);
28004 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28005 }
28006 diff -urNp linux-2.6.32.43/drivers/char/virtio_console.c linux-2.6.32.43/drivers/char/virtio_console.c
28007 --- linux-2.6.32.43/drivers/char/virtio_console.c 2011-03-27 14:31:47.000000000 -0400
28008 +++ linux-2.6.32.43/drivers/char/virtio_console.c 2011-04-17 15:56:46.000000000 -0400
28009 @@ -44,6 +44,7 @@ static unsigned int in_len;
28010 static char *in, *inbuf;
28011
28012 /* The operations for our console. */
28013 +/* cannot be const */
28014 static struct hv_ops virtio_cons;
28015
28016 /* The hvc device */
28017 diff -urNp linux-2.6.32.43/drivers/char/vt.c linux-2.6.32.43/drivers/char/vt.c
28018 --- linux-2.6.32.43/drivers/char/vt.c 2011-03-27 14:31:47.000000000 -0400
28019 +++ linux-2.6.32.43/drivers/char/vt.c 2011-04-17 15:56:46.000000000 -0400
28020 @@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
28021
28022 static void notify_write(struct vc_data *vc, unsigned int unicode)
28023 {
28024 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
28025 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
28026 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
28027 }
28028
28029 diff -urNp linux-2.6.32.43/drivers/char/vt_ioctl.c linux-2.6.32.43/drivers/char/vt_ioctl.c
28030 --- linux-2.6.32.43/drivers/char/vt_ioctl.c 2011-03-27 14:31:47.000000000 -0400
28031 +++ linux-2.6.32.43/drivers/char/vt_ioctl.c 2011-04-17 15:56:46.000000000 -0400
28032 @@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28033 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
28034 return -EFAULT;
28035
28036 - if (!capable(CAP_SYS_TTY_CONFIG))
28037 - perm = 0;
28038 -
28039 switch (cmd) {
28040 case KDGKBENT:
28041 key_map = key_maps[s];
28042 @@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28043 val = (i ? K_HOLE : K_NOSUCHMAP);
28044 return put_user(val, &user_kbe->kb_value);
28045 case KDSKBENT:
28046 + if (!capable(CAP_SYS_TTY_CONFIG))
28047 + perm = 0;
28048 +
28049 if (!perm)
28050 return -EPERM;
28051 +
28052 if (!i && v == K_NOSUCHMAP) {
28053 /* deallocate map */
28054 key_map = key_maps[s];
28055 @@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28056 int i, j, k;
28057 int ret;
28058
28059 - if (!capable(CAP_SYS_TTY_CONFIG))
28060 - perm = 0;
28061 -
28062 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
28063 if (!kbs) {
28064 ret = -ENOMEM;
28065 @@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28066 kfree(kbs);
28067 return ((p && *p) ? -EOVERFLOW : 0);
28068 case KDSKBSENT:
28069 + if (!capable(CAP_SYS_TTY_CONFIG))
28070 + perm = 0;
28071 +
28072 if (!perm) {
28073 ret = -EPERM;
28074 goto reterr;
28075 diff -urNp linux-2.6.32.43/drivers/cpufreq/cpufreq.c linux-2.6.32.43/drivers/cpufreq/cpufreq.c
28076 --- linux-2.6.32.43/drivers/cpufreq/cpufreq.c 2011-06-25 12:55:34.000000000 -0400
28077 +++ linux-2.6.32.43/drivers/cpufreq/cpufreq.c 2011-06-25 12:56:37.000000000 -0400
28078 @@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct
28079 complete(&policy->kobj_unregister);
28080 }
28081
28082 -static struct sysfs_ops sysfs_ops = {
28083 +static const struct sysfs_ops sysfs_ops = {
28084 .show = show,
28085 .store = store,
28086 };
28087 diff -urNp linux-2.6.32.43/drivers/cpuidle/sysfs.c linux-2.6.32.43/drivers/cpuidle/sysfs.c
28088 --- linux-2.6.32.43/drivers/cpuidle/sysfs.c 2011-03-27 14:31:47.000000000 -0400
28089 +++ linux-2.6.32.43/drivers/cpuidle/sysfs.c 2011-04-17 15:56:46.000000000 -0400
28090 @@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobj
28091 return ret;
28092 }
28093
28094 -static struct sysfs_ops cpuidle_sysfs_ops = {
28095 +static const struct sysfs_ops cpuidle_sysfs_ops = {
28096 .show = cpuidle_show,
28097 .store = cpuidle_store,
28098 };
28099 @@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct
28100 return ret;
28101 }
28102
28103 -static struct sysfs_ops cpuidle_state_sysfs_ops = {
28104 +static const struct sysfs_ops cpuidle_state_sysfs_ops = {
28105 .show = cpuidle_state_show,
28106 };
28107
28108 @@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpui
28109 .release = cpuidle_state_sysfs_release,
28110 };
28111
28112 -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28113 +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28114 {
28115 kobject_put(&device->kobjs[i]->kobj);
28116 wait_for_completion(&device->kobjs[i]->kobj_unregister);
28117 diff -urNp linux-2.6.32.43/drivers/crypto/hifn_795x.c linux-2.6.32.43/drivers/crypto/hifn_795x.c
28118 --- linux-2.6.32.43/drivers/crypto/hifn_795x.c 2011-03-27 14:31:47.000000000 -0400
28119 +++ linux-2.6.32.43/drivers/crypto/hifn_795x.c 2011-05-16 21:46:57.000000000 -0400
28120 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
28121 0xCA, 0x34, 0x2B, 0x2E};
28122 struct scatterlist sg;
28123
28124 + pax_track_stack();
28125 +
28126 memset(src, 0, sizeof(src));
28127 memset(ctx.key, 0, sizeof(ctx.key));
28128
28129 diff -urNp linux-2.6.32.43/drivers/crypto/padlock-aes.c linux-2.6.32.43/drivers/crypto/padlock-aes.c
28130 --- linux-2.6.32.43/drivers/crypto/padlock-aes.c 2011-03-27 14:31:47.000000000 -0400
28131 +++ linux-2.6.32.43/drivers/crypto/padlock-aes.c 2011-05-16 21:46:57.000000000 -0400
28132 @@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm
28133 struct crypto_aes_ctx gen_aes;
28134 int cpu;
28135
28136 + pax_track_stack();
28137 +
28138 if (key_len % 8) {
28139 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
28140 return -EINVAL;
28141 diff -urNp linux-2.6.32.43/drivers/dma/ioat/dma.c linux-2.6.32.43/drivers/dma/ioat/dma.c
28142 --- linux-2.6.32.43/drivers/dma/ioat/dma.c 2011-03-27 14:31:47.000000000 -0400
28143 +++ linux-2.6.32.43/drivers/dma/ioat/dma.c 2011-04-17 15:56:46.000000000 -0400
28144 @@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, str
28145 return entry->show(&chan->common, page);
28146 }
28147
28148 -struct sysfs_ops ioat_sysfs_ops = {
28149 +const struct sysfs_ops ioat_sysfs_ops = {
28150 .show = ioat_attr_show,
28151 };
28152
28153 diff -urNp linux-2.6.32.43/drivers/dma/ioat/dma.h linux-2.6.32.43/drivers/dma/ioat/dma.h
28154 --- linux-2.6.32.43/drivers/dma/ioat/dma.h 2011-03-27 14:31:47.000000000 -0400
28155 +++ linux-2.6.32.43/drivers/dma/ioat/dma.h 2011-04-17 15:56:46.000000000 -0400
28156 @@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_c
28157 unsigned long *phys_complete);
28158 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
28159 void ioat_kobject_del(struct ioatdma_device *device);
28160 -extern struct sysfs_ops ioat_sysfs_ops;
28161 +extern const struct sysfs_ops ioat_sysfs_ops;
28162 extern struct ioat_sysfs_entry ioat_version_attr;
28163 extern struct ioat_sysfs_entry ioat_cap_attr;
28164 #endif /* IOATDMA_H */
28165 diff -urNp linux-2.6.32.43/drivers/edac/edac_device_sysfs.c linux-2.6.32.43/drivers/edac/edac_device_sysfs.c
28166 --- linux-2.6.32.43/drivers/edac/edac_device_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28167 +++ linux-2.6.32.43/drivers/edac/edac_device_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28168 @@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(s
28169 }
28170
28171 /* edac_dev file operations for an 'ctl_info' */
28172 -static struct sysfs_ops device_ctl_info_ops = {
28173 +static const struct sysfs_ops device_ctl_info_ops = {
28174 .show = edac_dev_ctl_info_show,
28175 .store = edac_dev_ctl_info_store
28176 };
28177 @@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(s
28178 }
28179
28180 /* edac_dev file operations for an 'instance' */
28181 -static struct sysfs_ops device_instance_ops = {
28182 +static const struct sysfs_ops device_instance_ops = {
28183 .show = edac_dev_instance_show,
28184 .store = edac_dev_instance_store
28185 };
28186 @@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(stru
28187 }
28188
28189 /* edac_dev file operations for a 'block' */
28190 -static struct sysfs_ops device_block_ops = {
28191 +static const struct sysfs_ops device_block_ops = {
28192 .show = edac_dev_block_show,
28193 .store = edac_dev_block_store
28194 };
28195 diff -urNp linux-2.6.32.43/drivers/edac/edac_mc_sysfs.c linux-2.6.32.43/drivers/edac/edac_mc_sysfs.c
28196 --- linux-2.6.32.43/drivers/edac/edac_mc_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28197 +++ linux-2.6.32.43/drivers/edac/edac_mc_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28198 @@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kob
28199 return -EIO;
28200 }
28201
28202 -static struct sysfs_ops csrowfs_ops = {
28203 +static const struct sysfs_ops csrowfs_ops = {
28204 .show = csrowdev_show,
28205 .store = csrowdev_store
28206 };
28207 @@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobje
28208 }
28209
28210 /* Intermediate show/store table */
28211 -static struct sysfs_ops mci_ops = {
28212 +static const struct sysfs_ops mci_ops = {
28213 .show = mcidev_show,
28214 .store = mcidev_store
28215 };
28216 diff -urNp linux-2.6.32.43/drivers/edac/edac_pci_sysfs.c linux-2.6.32.43/drivers/edac/edac_pci_sysfs.c
28217 --- linux-2.6.32.43/drivers/edac/edac_pci_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28218 +++ linux-2.6.32.43/drivers/edac/edac_pci_sysfs.c 2011-05-04 17:56:20.000000000 -0400
28219 @@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log
28220 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
28221 static int edac_pci_poll_msec = 1000; /* one second workq period */
28222
28223 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
28224 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
28225 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
28226 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
28227
28228 static struct kobject *edac_pci_top_main_kobj;
28229 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
28230 @@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(s
28231 }
28232
28233 /* fs_ops table */
28234 -static struct sysfs_ops pci_instance_ops = {
28235 +static const struct sysfs_ops pci_instance_ops = {
28236 .show = edac_pci_instance_show,
28237 .store = edac_pci_instance_store
28238 };
28239 @@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct
28240 return -EIO;
28241 }
28242
28243 -static struct sysfs_ops edac_pci_sysfs_ops = {
28244 +static const struct sysfs_ops edac_pci_sysfs_ops = {
28245 .show = edac_pci_dev_show,
28246 .store = edac_pci_dev_store
28247 };
28248 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str
28249 edac_printk(KERN_CRIT, EDAC_PCI,
28250 "Signaled System Error on %s\n",
28251 pci_name(dev));
28252 - atomic_inc(&pci_nonparity_count);
28253 + atomic_inc_unchecked(&pci_nonparity_count);
28254 }
28255
28256 if (status & (PCI_STATUS_PARITY)) {
28257 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str
28258 "Master Data Parity Error on %s\n",
28259 pci_name(dev));
28260
28261 - atomic_inc(&pci_parity_count);
28262 + atomic_inc_unchecked(&pci_parity_count);
28263 }
28264
28265 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28266 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str
28267 "Detected Parity Error on %s\n",
28268 pci_name(dev));
28269
28270 - atomic_inc(&pci_parity_count);
28271 + atomic_inc_unchecked(&pci_parity_count);
28272 }
28273 }
28274
28275 @@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(str
28276 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
28277 "Signaled System Error on %s\n",
28278 pci_name(dev));
28279 - atomic_inc(&pci_nonparity_count);
28280 + atomic_inc_unchecked(&pci_nonparity_count);
28281 }
28282
28283 if (status & (PCI_STATUS_PARITY)) {
28284 @@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(str
28285 "Master Data Parity Error on "
28286 "%s\n", pci_name(dev));
28287
28288 - atomic_inc(&pci_parity_count);
28289 + atomic_inc_unchecked(&pci_parity_count);
28290 }
28291
28292 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28293 @@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(str
28294 "Detected Parity Error on %s\n",
28295 pci_name(dev));
28296
28297 - atomic_inc(&pci_parity_count);
28298 + atomic_inc_unchecked(&pci_parity_count);
28299 }
28300 }
28301 }
28302 @@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
28303 if (!check_pci_errors)
28304 return;
28305
28306 - before_count = atomic_read(&pci_parity_count);
28307 + before_count = atomic_read_unchecked(&pci_parity_count);
28308
28309 /* scan all PCI devices looking for a Parity Error on devices and
28310 * bridges.
28311 @@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
28312 /* Only if operator has selected panic on PCI Error */
28313 if (edac_pci_get_panic_on_pe()) {
28314 /* If the count is different 'after' from 'before' */
28315 - if (before_count != atomic_read(&pci_parity_count))
28316 + if (before_count != atomic_read_unchecked(&pci_parity_count))
28317 panic("EDAC: PCI Parity Error");
28318 }
28319 }
28320 diff -urNp linux-2.6.32.43/drivers/firewire/core-cdev.c linux-2.6.32.43/drivers/firewire/core-cdev.c
28321 --- linux-2.6.32.43/drivers/firewire/core-cdev.c 2011-03-27 14:31:47.000000000 -0400
28322 +++ linux-2.6.32.43/drivers/firewire/core-cdev.c 2011-04-17 15:56:46.000000000 -0400
28323 @@ -1141,8 +1141,7 @@ static int init_iso_resource(struct clie
28324 int ret;
28325
28326 if ((request->channels == 0 && request->bandwidth == 0) ||
28327 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
28328 - request->bandwidth < 0)
28329 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
28330 return -EINVAL;
28331
28332 r = kmalloc(sizeof(*r), GFP_KERNEL);
28333 diff -urNp linux-2.6.32.43/drivers/firewire/core-transaction.c linux-2.6.32.43/drivers/firewire/core-transaction.c
28334 --- linux-2.6.32.43/drivers/firewire/core-transaction.c 2011-03-27 14:31:47.000000000 -0400
28335 +++ linux-2.6.32.43/drivers/firewire/core-transaction.c 2011-05-16 21:46:57.000000000 -0400
28336 @@ -36,6 +36,7 @@
28337 #include <linux/string.h>
28338 #include <linux/timer.h>
28339 #include <linux/types.h>
28340 +#include <linux/sched.h>
28341
28342 #include <asm/byteorder.h>
28343
28344 @@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *c
28345 struct transaction_callback_data d;
28346 struct fw_transaction t;
28347
28348 + pax_track_stack();
28349 +
28350 init_completion(&d.done);
28351 d.payload = payload;
28352 fw_send_request(card, &t, tcode, destination_id, generation, speed,
28353 diff -urNp linux-2.6.32.43/drivers/firmware/dmi_scan.c linux-2.6.32.43/drivers/firmware/dmi_scan.c
28354 --- linux-2.6.32.43/drivers/firmware/dmi_scan.c 2011-03-27 14:31:47.000000000 -0400
28355 +++ linux-2.6.32.43/drivers/firmware/dmi_scan.c 2011-04-17 15:56:46.000000000 -0400
28356 @@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
28357 }
28358 }
28359 else {
28360 - /*
28361 - * no iounmap() for that ioremap(); it would be a no-op, but
28362 - * it's so early in setup that sucker gets confused into doing
28363 - * what it shouldn't if we actually call it.
28364 - */
28365 p = dmi_ioremap(0xF0000, 0x10000);
28366 if (p == NULL)
28367 goto error;
28368 diff -urNp linux-2.6.32.43/drivers/firmware/edd.c linux-2.6.32.43/drivers/firmware/edd.c
28369 --- linux-2.6.32.43/drivers/firmware/edd.c 2011-03-27 14:31:47.000000000 -0400
28370 +++ linux-2.6.32.43/drivers/firmware/edd.c 2011-04-17 15:56:46.000000000 -0400
28371 @@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, str
28372 return ret;
28373 }
28374
28375 -static struct sysfs_ops edd_attr_ops = {
28376 +static const struct sysfs_ops edd_attr_ops = {
28377 .show = edd_attr_show,
28378 };
28379
28380 diff -urNp linux-2.6.32.43/drivers/firmware/efivars.c linux-2.6.32.43/drivers/firmware/efivars.c
28381 --- linux-2.6.32.43/drivers/firmware/efivars.c 2011-03-27 14:31:47.000000000 -0400
28382 +++ linux-2.6.32.43/drivers/firmware/efivars.c 2011-04-17 15:56:46.000000000 -0400
28383 @@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct
28384 return ret;
28385 }
28386
28387 -static struct sysfs_ops efivar_attr_ops = {
28388 +static const struct sysfs_ops efivar_attr_ops = {
28389 .show = efivar_attr_show,
28390 .store = efivar_attr_store,
28391 };
28392 diff -urNp linux-2.6.32.43/drivers/firmware/iscsi_ibft.c linux-2.6.32.43/drivers/firmware/iscsi_ibft.c
28393 --- linux-2.6.32.43/drivers/firmware/iscsi_ibft.c 2011-03-27 14:31:47.000000000 -0400
28394 +++ linux-2.6.32.43/drivers/firmware/iscsi_ibft.c 2011-04-17 15:56:46.000000000 -0400
28395 @@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struc
28396 return ret;
28397 }
28398
28399 -static struct sysfs_ops ibft_attr_ops = {
28400 +static const struct sysfs_ops ibft_attr_ops = {
28401 .show = ibft_show_attribute,
28402 };
28403
28404 diff -urNp linux-2.6.32.43/drivers/firmware/memmap.c linux-2.6.32.43/drivers/firmware/memmap.c
28405 --- linux-2.6.32.43/drivers/firmware/memmap.c 2011-03-27 14:31:47.000000000 -0400
28406 +++ linux-2.6.32.43/drivers/firmware/memmap.c 2011-04-17 15:56:46.000000000 -0400
28407 @@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
28408 NULL
28409 };
28410
28411 -static struct sysfs_ops memmap_attr_ops = {
28412 +static const struct sysfs_ops memmap_attr_ops = {
28413 .show = memmap_attr_show,
28414 };
28415
28416 diff -urNp linux-2.6.32.43/drivers/gpio/vr41xx_giu.c linux-2.6.32.43/drivers/gpio/vr41xx_giu.c
28417 --- linux-2.6.32.43/drivers/gpio/vr41xx_giu.c 2011-03-27 14:31:47.000000000 -0400
28418 +++ linux-2.6.32.43/drivers/gpio/vr41xx_giu.c 2011-05-04 17:56:28.000000000 -0400
28419 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
28420 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
28421 maskl, pendl, maskh, pendh);
28422
28423 - atomic_inc(&irq_err_count);
28424 + atomic_inc_unchecked(&irq_err_count);
28425
28426 return -EINVAL;
28427 }
28428 diff -urNp linux-2.6.32.43/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.32.43/drivers/gpu/drm/drm_crtc_helper.c
28429 --- linux-2.6.32.43/drivers/gpu/drm/drm_crtc_helper.c 2011-03-27 14:31:47.000000000 -0400
28430 +++ linux-2.6.32.43/drivers/gpu/drm/drm_crtc_helper.c 2011-05-16 21:46:57.000000000 -0400
28431 @@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct d
28432 struct drm_crtc *tmp;
28433 int crtc_mask = 1;
28434
28435 - WARN(!crtc, "checking null crtc?");
28436 + BUG_ON(!crtc);
28437
28438 dev = crtc->dev;
28439
28440 @@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm
28441
28442 adjusted_mode = drm_mode_duplicate(dev, mode);
28443
28444 + pax_track_stack();
28445 +
28446 crtc->enabled = drm_helper_crtc_in_use(crtc);
28447
28448 if (!crtc->enabled)
28449 diff -urNp linux-2.6.32.43/drivers/gpu/drm/drm_drv.c linux-2.6.32.43/drivers/gpu/drm/drm_drv.c
28450 --- linux-2.6.32.43/drivers/gpu/drm/drm_drv.c 2011-03-27 14:31:47.000000000 -0400
28451 +++ linux-2.6.32.43/drivers/gpu/drm/drm_drv.c 2011-04-17 15:56:46.000000000 -0400
28452 @@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struc
28453 char *kdata = NULL;
28454
28455 atomic_inc(&dev->ioctl_count);
28456 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
28457 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
28458 ++file_priv->ioctl_count;
28459
28460 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
28461 diff -urNp linux-2.6.32.43/drivers/gpu/drm/drm_fops.c linux-2.6.32.43/drivers/gpu/drm/drm_fops.c
28462 --- linux-2.6.32.43/drivers/gpu/drm/drm_fops.c 2011-03-27 14:31:47.000000000 -0400
28463 +++ linux-2.6.32.43/drivers/gpu/drm/drm_fops.c 2011-04-17 15:56:46.000000000 -0400
28464 @@ -66,7 +66,7 @@ static int drm_setup(struct drm_device *
28465 }
28466
28467 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
28468 - atomic_set(&dev->counts[i], 0);
28469 + atomic_set_unchecked(&dev->counts[i], 0);
28470
28471 dev->sigdata.lock = NULL;
28472
28473 @@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
28474
28475 retcode = drm_open_helper(inode, filp, dev);
28476 if (!retcode) {
28477 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
28478 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
28479 spin_lock(&dev->count_lock);
28480 - if (!dev->open_count++) {
28481 + if (local_inc_return(&dev->open_count) == 1) {
28482 spin_unlock(&dev->count_lock);
28483 retcode = drm_setup(dev);
28484 goto out;
28485 @@ -435,7 +435,7 @@ int drm_release(struct inode *inode, str
28486
28487 lock_kernel();
28488
28489 - DRM_DEBUG("open_count = %d\n", dev->open_count);
28490 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
28491
28492 if (dev->driver->preclose)
28493 dev->driver->preclose(dev, file_priv);
28494 @@ -447,7 +447,7 @@ int drm_release(struct inode *inode, str
28495 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
28496 task_pid_nr(current),
28497 (long)old_encode_dev(file_priv->minor->device),
28498 - dev->open_count);
28499 + local_read(&dev->open_count));
28500
28501 /* if the master has gone away we can't do anything with the lock */
28502 if (file_priv->minor->master)
28503 @@ -524,9 +524,9 @@ int drm_release(struct inode *inode, str
28504 * End inline drm_release
28505 */
28506
28507 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
28508 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
28509 spin_lock(&dev->count_lock);
28510 - if (!--dev->open_count) {
28511 + if (local_dec_and_test(&dev->open_count)) {
28512 if (atomic_read(&dev->ioctl_count)) {
28513 DRM_ERROR("Device busy: %d\n",
28514 atomic_read(&dev->ioctl_count));
28515 diff -urNp linux-2.6.32.43/drivers/gpu/drm/drm_gem.c linux-2.6.32.43/drivers/gpu/drm/drm_gem.c
28516 --- linux-2.6.32.43/drivers/gpu/drm/drm_gem.c 2011-03-27 14:31:47.000000000 -0400
28517 +++ linux-2.6.32.43/drivers/gpu/drm/drm_gem.c 2011-04-17 15:56:46.000000000 -0400
28518 @@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
28519 spin_lock_init(&dev->object_name_lock);
28520 idr_init(&dev->object_name_idr);
28521 atomic_set(&dev->object_count, 0);
28522 - atomic_set(&dev->object_memory, 0);
28523 + atomic_set_unchecked(&dev->object_memory, 0);
28524 atomic_set(&dev->pin_count, 0);
28525 - atomic_set(&dev->pin_memory, 0);
28526 + atomic_set_unchecked(&dev->pin_memory, 0);
28527 atomic_set(&dev->gtt_count, 0);
28528 - atomic_set(&dev->gtt_memory, 0);
28529 + atomic_set_unchecked(&dev->gtt_memory, 0);
28530
28531 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
28532 if (!mm) {
28533 @@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *
28534 goto fput;
28535 }
28536 atomic_inc(&dev->object_count);
28537 - atomic_add(obj->size, &dev->object_memory);
28538 + atomic_add_unchecked(obj->size, &dev->object_memory);
28539 return obj;
28540 fput:
28541 fput(obj->filp);
28542 @@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
28543
28544 fput(obj->filp);
28545 atomic_dec(&dev->object_count);
28546 - atomic_sub(obj->size, &dev->object_memory);
28547 + atomic_sub_unchecked(obj->size, &dev->object_memory);
28548 kfree(obj);
28549 }
28550 EXPORT_SYMBOL(drm_gem_object_free);
28551 diff -urNp linux-2.6.32.43/drivers/gpu/drm/drm_info.c linux-2.6.32.43/drivers/gpu/drm/drm_info.c
28552 --- linux-2.6.32.43/drivers/gpu/drm/drm_info.c 2011-03-27 14:31:47.000000000 -0400
28553 +++ linux-2.6.32.43/drivers/gpu/drm/drm_info.c 2011-04-17 15:56:46.000000000 -0400
28554 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
28555 struct drm_local_map *map;
28556 struct drm_map_list *r_list;
28557
28558 - /* Hardcoded from _DRM_FRAME_BUFFER,
28559 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
28560 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
28561 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
28562 + static const char * const types[] = {
28563 + [_DRM_FRAME_BUFFER] = "FB",
28564 + [_DRM_REGISTERS] = "REG",
28565 + [_DRM_SHM] = "SHM",
28566 + [_DRM_AGP] = "AGP",
28567 + [_DRM_SCATTER_GATHER] = "SG",
28568 + [_DRM_CONSISTENT] = "PCI",
28569 + [_DRM_GEM] = "GEM" };
28570 const char *type;
28571 int i;
28572
28573 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
28574 map = r_list->map;
28575 if (!map)
28576 continue;
28577 - if (map->type < 0 || map->type > 5)
28578 + if (map->type >= ARRAY_SIZE(types))
28579 type = "??";
28580 else
28581 type = types[map->type];
28582 @@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file
28583 struct drm_device *dev = node->minor->dev;
28584
28585 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
28586 - seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
28587 + seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
28588 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
28589 - seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
28590 - seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
28591 + seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
28592 + seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
28593 seq_printf(m, "%d gtt total\n", dev->gtt_total);
28594 return 0;
28595 }
28596 @@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, voi
28597 mutex_lock(&dev->struct_mutex);
28598 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
28599 atomic_read(&dev->vma_count),
28600 +#ifdef CONFIG_GRKERNSEC_HIDESYM
28601 + NULL, 0);
28602 +#else
28603 high_memory, (u64)virt_to_phys(high_memory));
28604 +#endif
28605
28606 list_for_each_entry(pt, &dev->vmalist, head) {
28607 vma = pt->vma;
28608 @@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, voi
28609 continue;
28610 seq_printf(m,
28611 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
28612 - pt->pid, vma->vm_start, vma->vm_end,
28613 + pt->pid,
28614 +#ifdef CONFIG_GRKERNSEC_HIDESYM
28615 + 0, 0,
28616 +#else
28617 + vma->vm_start, vma->vm_end,
28618 +#endif
28619 vma->vm_flags & VM_READ ? 'r' : '-',
28620 vma->vm_flags & VM_WRITE ? 'w' : '-',
28621 vma->vm_flags & VM_EXEC ? 'x' : '-',
28622 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
28623 vma->vm_flags & VM_LOCKED ? 'l' : '-',
28624 vma->vm_flags & VM_IO ? 'i' : '-',
28625 +#ifdef CONFIG_GRKERNSEC_HIDESYM
28626 + 0);
28627 +#else
28628 vma->vm_pgoff);
28629 +#endif
28630
28631 #if defined(__i386__)
28632 pgprot = pgprot_val(vma->vm_page_prot);
28633 diff -urNp linux-2.6.32.43/drivers/gpu/drm/drm_ioctl.c linux-2.6.32.43/drivers/gpu/drm/drm_ioctl.c
28634 --- linux-2.6.32.43/drivers/gpu/drm/drm_ioctl.c 2011-03-27 14:31:47.000000000 -0400
28635 +++ linux-2.6.32.43/drivers/gpu/drm/drm_ioctl.c 2011-04-17 15:56:46.000000000 -0400
28636 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
28637 stats->data[i].value =
28638 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
28639 else
28640 - stats->data[i].value = atomic_read(&dev->counts[i]);
28641 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
28642 stats->data[i].type = dev->types[i];
28643 }
28644
28645 diff -urNp linux-2.6.32.43/drivers/gpu/drm/drm_lock.c linux-2.6.32.43/drivers/gpu/drm/drm_lock.c
28646 --- linux-2.6.32.43/drivers/gpu/drm/drm_lock.c 2011-03-27 14:31:47.000000000 -0400
28647 +++ linux-2.6.32.43/drivers/gpu/drm/drm_lock.c 2011-04-17 15:56:46.000000000 -0400
28648 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
28649 if (drm_lock_take(&master->lock, lock->context)) {
28650 master->lock.file_priv = file_priv;
28651 master->lock.lock_time = jiffies;
28652 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
28653 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
28654 break; /* Got lock */
28655 }
28656
28657 @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
28658 return -EINVAL;
28659 }
28660
28661 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
28662 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
28663
28664 /* kernel_context_switch isn't used by any of the x86 drm
28665 * modules but is required by the Sparc driver.
28666 diff -urNp linux-2.6.32.43/drivers/gpu/drm/i810/i810_dma.c linux-2.6.32.43/drivers/gpu/drm/i810/i810_dma.c
28667 --- linux-2.6.32.43/drivers/gpu/drm/i810/i810_dma.c 2011-03-27 14:31:47.000000000 -0400
28668 +++ linux-2.6.32.43/drivers/gpu/drm/i810/i810_dma.c 2011-04-17 15:56:46.000000000 -0400
28669 @@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de
28670 dma->buflist[vertex->idx],
28671 vertex->discard, vertex->used);
28672
28673 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28674 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28675 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28676 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28677 sarea_priv->last_enqueue = dev_priv->counter - 1;
28678 sarea_priv->last_dispatch = (int)hw_status[5];
28679
28680 @@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device
28681 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
28682 mc->last_render);
28683
28684 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28685 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28686 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28687 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28688 sarea_priv->last_enqueue = dev_priv->counter - 1;
28689 sarea_priv->last_dispatch = (int)hw_status[5];
28690
28691 diff -urNp linux-2.6.32.43/drivers/gpu/drm/i810/i810_drv.h linux-2.6.32.43/drivers/gpu/drm/i810/i810_drv.h
28692 --- linux-2.6.32.43/drivers/gpu/drm/i810/i810_drv.h 2011-03-27 14:31:47.000000000 -0400
28693 +++ linux-2.6.32.43/drivers/gpu/drm/i810/i810_drv.h 2011-05-04 17:56:28.000000000 -0400
28694 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
28695 int page_flipping;
28696
28697 wait_queue_head_t irq_queue;
28698 - atomic_t irq_received;
28699 - atomic_t irq_emitted;
28700 + atomic_unchecked_t irq_received;
28701 + atomic_unchecked_t irq_emitted;
28702
28703 int front_offset;
28704 } drm_i810_private_t;
28705 diff -urNp linux-2.6.32.43/drivers/gpu/drm/i830/i830_drv.h linux-2.6.32.43/drivers/gpu/drm/i830/i830_drv.h
28706 --- linux-2.6.32.43/drivers/gpu/drm/i830/i830_drv.h 2011-03-27 14:31:47.000000000 -0400
28707 +++ linux-2.6.32.43/drivers/gpu/drm/i830/i830_drv.h 2011-05-04 17:56:28.000000000 -0400
28708 @@ -115,8 +115,8 @@ typedef struct drm_i830_private {
28709 int page_flipping;
28710
28711 wait_queue_head_t irq_queue;
28712 - atomic_t irq_received;
28713 - atomic_t irq_emitted;
28714 + atomic_unchecked_t irq_received;
28715 + atomic_unchecked_t irq_emitted;
28716
28717 int use_mi_batchbuffer_start;
28718
28719 diff -urNp linux-2.6.32.43/drivers/gpu/drm/i830/i830_irq.c linux-2.6.32.43/drivers/gpu/drm/i830/i830_irq.c
28720 --- linux-2.6.32.43/drivers/gpu/drm/i830/i830_irq.c 2011-03-27 14:31:47.000000000 -0400
28721 +++ linux-2.6.32.43/drivers/gpu/drm/i830/i830_irq.c 2011-05-04 17:56:28.000000000 -0400
28722 @@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_
28723
28724 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
28725
28726 - atomic_inc(&dev_priv->irq_received);
28727 + atomic_inc_unchecked(&dev_priv->irq_received);
28728 wake_up_interruptible(&dev_priv->irq_queue);
28729
28730 return IRQ_HANDLED;
28731 @@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_devi
28732
28733 DRM_DEBUG("%s\n", __func__);
28734
28735 - atomic_inc(&dev_priv->irq_emitted);
28736 + atomic_inc_unchecked(&dev_priv->irq_emitted);
28737
28738 BEGIN_LP_RING(2);
28739 OUT_RING(0);
28740 OUT_RING(GFX_OP_USER_INTERRUPT);
28741 ADVANCE_LP_RING();
28742
28743 - return atomic_read(&dev_priv->irq_emitted);
28744 + return atomic_read_unchecked(&dev_priv->irq_emitted);
28745 }
28746
28747 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
28748 @@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_devi
28749
28750 DRM_DEBUG("%s\n", __func__);
28751
28752 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
28753 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
28754 return 0;
28755
28756 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
28757 @@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_devi
28758
28759 for (;;) {
28760 __set_current_state(TASK_INTERRUPTIBLE);
28761 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
28762 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
28763 break;
28764 if ((signed)(end - jiffies) <= 0) {
28765 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
28766 @@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct d
28767 I830_WRITE16(I830REG_HWSTAM, 0xffff);
28768 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
28769 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
28770 - atomic_set(&dev_priv->irq_received, 0);
28771 - atomic_set(&dev_priv->irq_emitted, 0);
28772 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28773 + atomic_set_unchecked(&dev_priv->irq_emitted, 0);
28774 init_waitqueue_head(&dev_priv->irq_queue);
28775 }
28776
28777 diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ch7017.c
28778 --- linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ch7017.c 2011-03-27 14:31:47.000000000 -0400
28779 +++ linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ch7017.c 2011-04-17 15:56:46.000000000 -0400
28780 @@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_
28781 }
28782 }
28783
28784 -struct intel_dvo_dev_ops ch7017_ops = {
28785 +const struct intel_dvo_dev_ops ch7017_ops = {
28786 .init = ch7017_init,
28787 .detect = ch7017_detect,
28788 .mode_valid = ch7017_mode_valid,
28789 diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ch7xxx.c
28790 --- linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-03-27 14:31:47.000000000 -0400
28791 +++ linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-04-17 15:56:46.000000000 -0400
28792 @@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_
28793 }
28794 }
28795
28796 -struct intel_dvo_dev_ops ch7xxx_ops = {
28797 +const struct intel_dvo_dev_ops ch7xxx_ops = {
28798 .init = ch7xxx_init,
28799 .detect = ch7xxx_detect,
28800 .mode_valid = ch7xxx_mode_valid,
28801 diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/dvo.h linux-2.6.32.43/drivers/gpu/drm/i915/dvo.h
28802 --- linux-2.6.32.43/drivers/gpu/drm/i915/dvo.h 2011-03-27 14:31:47.000000000 -0400
28803 +++ linux-2.6.32.43/drivers/gpu/drm/i915/dvo.h 2011-04-17 15:56:46.000000000 -0400
28804 @@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
28805 *
28806 * \return singly-linked list of modes or NULL if no modes found.
28807 */
28808 - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
28809 + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
28810
28811 /**
28812 * Clean up driver-specific bits of the output
28813 */
28814 - void (*destroy) (struct intel_dvo_device *dvo);
28815 + void (* const destroy) (struct intel_dvo_device *dvo);
28816
28817 /**
28818 * Debugging hook to dump device registers to log file
28819 */
28820 - void (*dump_regs)(struct intel_dvo_device *dvo);
28821 + void (* const dump_regs)(struct intel_dvo_device *dvo);
28822 };
28823
28824 -extern struct intel_dvo_dev_ops sil164_ops;
28825 -extern struct intel_dvo_dev_ops ch7xxx_ops;
28826 -extern struct intel_dvo_dev_ops ivch_ops;
28827 -extern struct intel_dvo_dev_ops tfp410_ops;
28828 -extern struct intel_dvo_dev_ops ch7017_ops;
28829 +extern const struct intel_dvo_dev_ops sil164_ops;
28830 +extern const struct intel_dvo_dev_ops ch7xxx_ops;
28831 +extern const struct intel_dvo_dev_ops ivch_ops;
28832 +extern const struct intel_dvo_dev_ops tfp410_ops;
28833 +extern const struct intel_dvo_dev_ops ch7017_ops;
28834
28835 #endif /* _INTEL_DVO_H */
28836 diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ivch.c
28837 --- linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ivch.c 2011-03-27 14:31:47.000000000 -0400
28838 +++ linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ivch.c 2011-04-17 15:56:46.000000000 -0400
28839 @@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dv
28840 }
28841 }
28842
28843 -struct intel_dvo_dev_ops ivch_ops= {
28844 +const struct intel_dvo_dev_ops ivch_ops= {
28845 .init = ivch_init,
28846 .dpms = ivch_dpms,
28847 .save = ivch_save,
28848 diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.32.43/drivers/gpu/drm/i915/dvo_sil164.c
28849 --- linux-2.6.32.43/drivers/gpu/drm/i915/dvo_sil164.c 2011-03-27 14:31:47.000000000 -0400
28850 +++ linux-2.6.32.43/drivers/gpu/drm/i915/dvo_sil164.c 2011-04-17 15:56:46.000000000 -0400
28851 @@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
28852 }
28853 }
28854
28855 -struct intel_dvo_dev_ops sil164_ops = {
28856 +const struct intel_dvo_dev_ops sil164_ops = {
28857 .init = sil164_init,
28858 .detect = sil164_detect,
28859 .mode_valid = sil164_mode_valid,
28860 diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.32.43/drivers/gpu/drm/i915/dvo_tfp410.c
28861 --- linux-2.6.32.43/drivers/gpu/drm/i915/dvo_tfp410.c 2011-03-27 14:31:47.000000000 -0400
28862 +++ linux-2.6.32.43/drivers/gpu/drm/i915/dvo_tfp410.c 2011-04-17 15:56:46.000000000 -0400
28863 @@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_
28864 }
28865 }
28866
28867 -struct intel_dvo_dev_ops tfp410_ops = {
28868 +const struct intel_dvo_dev_ops tfp410_ops = {
28869 .init = tfp410_init,
28870 .detect = tfp410_detect,
28871 .mode_valid = tfp410_mode_valid,
28872 diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.32.43/drivers/gpu/drm/i915/i915_debugfs.c
28873 --- linux-2.6.32.43/drivers/gpu/drm/i915/i915_debugfs.c 2011-03-27 14:31:47.000000000 -0400
28874 +++ linux-2.6.32.43/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-04 17:56:28.000000000 -0400
28875 @@ -192,7 +192,7 @@ static int i915_interrupt_info(struct se
28876 I915_READ(GTIMR));
28877 }
28878 seq_printf(m, "Interrupts received: %d\n",
28879 - atomic_read(&dev_priv->irq_received));
28880 + atomic_read_unchecked(&dev_priv->irq_received));
28881 if (dev_priv->hw_status_page != NULL) {
28882 seq_printf(m, "Current sequence: %d\n",
28883 i915_get_gem_seqno(dev));
28884 diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/i915_drv.c linux-2.6.32.43/drivers/gpu/drm/i915/i915_drv.c
28885 --- linux-2.6.32.43/drivers/gpu/drm/i915/i915_drv.c 2011-03-27 14:31:47.000000000 -0400
28886 +++ linux-2.6.32.43/drivers/gpu/drm/i915/i915_drv.c 2011-04-17 15:56:46.000000000 -0400
28887 @@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
28888 return i915_resume(dev);
28889 }
28890
28891 -static struct vm_operations_struct i915_gem_vm_ops = {
28892 +static const struct vm_operations_struct i915_gem_vm_ops = {
28893 .fault = i915_gem_fault,
28894 .open = drm_gem_vm_open,
28895 .close = drm_gem_vm_close,
28896 diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/i915_drv.h linux-2.6.32.43/drivers/gpu/drm/i915/i915_drv.h
28897 --- linux-2.6.32.43/drivers/gpu/drm/i915/i915_drv.h 2011-03-27 14:31:47.000000000 -0400
28898 +++ linux-2.6.32.43/drivers/gpu/drm/i915/i915_drv.h 2011-05-04 17:56:28.000000000 -0400
28899 @@ -197,7 +197,7 @@ typedef struct drm_i915_private {
28900 int page_flipping;
28901
28902 wait_queue_head_t irq_queue;
28903 - atomic_t irq_received;
28904 + atomic_unchecked_t irq_received;
28905 /** Protects user_irq_refcount and irq_mask_reg */
28906 spinlock_t user_irq_lock;
28907 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
28908 diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/i915_gem.c linux-2.6.32.43/drivers/gpu/drm/i915/i915_gem.c
28909 --- linux-2.6.32.43/drivers/gpu/drm/i915/i915_gem.c 2011-03-27 14:31:47.000000000 -0400
28910 +++ linux-2.6.32.43/drivers/gpu/drm/i915/i915_gem.c 2011-04-17 15:56:46.000000000 -0400
28911 @@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_d
28912
28913 args->aper_size = dev->gtt_total;
28914 args->aper_available_size = (args->aper_size -
28915 - atomic_read(&dev->pin_memory));
28916 + atomic_read_unchecked(&dev->pin_memory));
28917
28918 return 0;
28919 }
28920 @@ -492,6 +492,11 @@ i915_gem_pread_ioctl(struct drm_device *
28921 return -EINVAL;
28922 }
28923
28924 + if (!access_ok(VERIFY_WRITE, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
28925 + drm_gem_object_unreference(obj);
28926 + return -EFAULT;
28927 + }
28928 +
28929 if (i915_gem_object_needs_bit17_swizzle(obj)) {
28930 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
28931 } else {
28932 @@ -965,6 +970,11 @@ i915_gem_pwrite_ioctl(struct drm_device
28933 return -EINVAL;
28934 }
28935
28936 + if (!access_ok(VERIFY_READ, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
28937 + drm_gem_object_unreference(obj);
28938 + return -EFAULT;
28939 + }
28940 +
28941 /* We can only do the GTT pwrite on untiled buffers, as otherwise
28942 * it would end up going through the fenced access, and we'll get
28943 * different detiling behavior between reading and writing.
28944 @@ -2054,7 +2064,7 @@ i915_gem_object_unbind(struct drm_gem_ob
28945
28946 if (obj_priv->gtt_space) {
28947 atomic_dec(&dev->gtt_count);
28948 - atomic_sub(obj->size, &dev->gtt_memory);
28949 + atomic_sub_unchecked(obj->size, &dev->gtt_memory);
28950
28951 drm_mm_put_block(obj_priv->gtt_space);
28952 obj_priv->gtt_space = NULL;
28953 @@ -2697,7 +2707,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
28954 goto search_free;
28955 }
28956 atomic_inc(&dev->gtt_count);
28957 - atomic_add(obj->size, &dev->gtt_memory);
28958 + atomic_add_unchecked(obj->size, &dev->gtt_memory);
28959
28960 /* Assert that the object is not currently in any GPU domain. As it
28961 * wasn't in the GTT, there shouldn't be any way it could have been in
28962 @@ -3751,9 +3761,9 @@ i915_gem_execbuffer(struct drm_device *d
28963 "%d/%d gtt bytes\n",
28964 atomic_read(&dev->object_count),
28965 atomic_read(&dev->pin_count),
28966 - atomic_read(&dev->object_memory),
28967 - atomic_read(&dev->pin_memory),
28968 - atomic_read(&dev->gtt_memory),
28969 + atomic_read_unchecked(&dev->object_memory),
28970 + atomic_read_unchecked(&dev->pin_memory),
28971 + atomic_read_unchecked(&dev->gtt_memory),
28972 dev->gtt_total);
28973 }
28974 goto err;
28975 @@ -3985,7 +3995,7 @@ i915_gem_object_pin(struct drm_gem_objec
28976 */
28977 if (obj_priv->pin_count == 1) {
28978 atomic_inc(&dev->pin_count);
28979 - atomic_add(obj->size, &dev->pin_memory);
28980 + atomic_add_unchecked(obj->size, &dev->pin_memory);
28981 if (!obj_priv->active &&
28982 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
28983 !list_empty(&obj_priv->list))
28984 @@ -4018,7 +4028,7 @@ i915_gem_object_unpin(struct drm_gem_obj
28985 list_move_tail(&obj_priv->list,
28986 &dev_priv->mm.inactive_list);
28987 atomic_dec(&dev->pin_count);
28988 - atomic_sub(obj->size, &dev->pin_memory);
28989 + atomic_sub_unchecked(obj->size, &dev->pin_memory);
28990 }
28991 i915_verify_inactive(dev, __FILE__, __LINE__);
28992 }
28993 diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/i915_irq.c linux-2.6.32.43/drivers/gpu/drm/i915/i915_irq.c
28994 --- linux-2.6.32.43/drivers/gpu/drm/i915/i915_irq.c 2011-03-27 14:31:47.000000000 -0400
28995 +++ linux-2.6.32.43/drivers/gpu/drm/i915/i915_irq.c 2011-05-04 17:56:28.000000000 -0400
28996 @@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
28997 int irq_received;
28998 int ret = IRQ_NONE;
28999
29000 - atomic_inc(&dev_priv->irq_received);
29001 + atomic_inc_unchecked(&dev_priv->irq_received);
29002
29003 if (IS_IGDNG(dev))
29004 return igdng_irq_handler(dev);
29005 @@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct d
29006 {
29007 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
29008
29009 - atomic_set(&dev_priv->irq_received, 0);
29010 + atomic_set_unchecked(&dev_priv->irq_received, 0);
29011
29012 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
29013 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
29014 diff -urNp linux-2.6.32.43/drivers/gpu/drm/mga/mga_drv.h linux-2.6.32.43/drivers/gpu/drm/mga/mga_drv.h
29015 --- linux-2.6.32.43/drivers/gpu/drm/mga/mga_drv.h 2011-03-27 14:31:47.000000000 -0400
29016 +++ linux-2.6.32.43/drivers/gpu/drm/mga/mga_drv.h 2011-05-04 17:56:28.000000000 -0400
29017 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
29018 u32 clear_cmd;
29019 u32 maccess;
29020
29021 - atomic_t vbl_received; /**< Number of vblanks received. */
29022 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
29023 wait_queue_head_t fence_queue;
29024 - atomic_t last_fence_retired;
29025 + atomic_unchecked_t last_fence_retired;
29026 u32 next_fence_to_post;
29027
29028 unsigned int fb_cpp;
29029 diff -urNp linux-2.6.32.43/drivers/gpu/drm/mga/mga_irq.c linux-2.6.32.43/drivers/gpu/drm/mga/mga_irq.c
29030 --- linux-2.6.32.43/drivers/gpu/drm/mga/mga_irq.c 2011-03-27 14:31:47.000000000 -0400
29031 +++ linux-2.6.32.43/drivers/gpu/drm/mga/mga_irq.c 2011-05-04 17:56:28.000000000 -0400
29032 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
29033 if (crtc != 0)
29034 return 0;
29035
29036 - return atomic_read(&dev_priv->vbl_received);
29037 + return atomic_read_unchecked(&dev_priv->vbl_received);
29038 }
29039
29040
29041 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29042 /* VBLANK interrupt */
29043 if (status & MGA_VLINEPEN) {
29044 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
29045 - atomic_inc(&dev_priv->vbl_received);
29046 + atomic_inc_unchecked(&dev_priv->vbl_received);
29047 drm_handle_vblank(dev, 0);
29048 handled = 1;
29049 }
29050 @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29051 MGA_WRITE(MGA_PRIMEND, prim_end);
29052 }
29053
29054 - atomic_inc(&dev_priv->last_fence_retired);
29055 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
29056 DRM_WAKEUP(&dev_priv->fence_queue);
29057 handled = 1;
29058 }
29059 @@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_dev
29060 * using fences.
29061 */
29062 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
29063 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
29064 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
29065 - *sequence) <= (1 << 23)));
29066
29067 *sequence = cur_fence;
29068 diff -urNp linux-2.6.32.43/drivers/gpu/drm/r128/r128_cce.c linux-2.6.32.43/drivers/gpu/drm/r128/r128_cce.c
29069 --- linux-2.6.32.43/drivers/gpu/drm/r128/r128_cce.c 2011-03-27 14:31:47.000000000 -0400
29070 +++ linux-2.6.32.43/drivers/gpu/drm/r128/r128_cce.c 2011-05-04 17:56:28.000000000 -0400
29071 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
29072
29073 /* GH: Simple idle check.
29074 */
29075 - atomic_set(&dev_priv->idle_count, 0);
29076 + atomic_set_unchecked(&dev_priv->idle_count, 0);
29077
29078 /* We don't support anything other than bus-mastering ring mode,
29079 * but the ring can be in either AGP or PCI space for the ring
29080 diff -urNp linux-2.6.32.43/drivers/gpu/drm/r128/r128_drv.h linux-2.6.32.43/drivers/gpu/drm/r128/r128_drv.h
29081 --- linux-2.6.32.43/drivers/gpu/drm/r128/r128_drv.h 2011-03-27 14:31:47.000000000 -0400
29082 +++ linux-2.6.32.43/drivers/gpu/drm/r128/r128_drv.h 2011-05-04 17:56:28.000000000 -0400
29083 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
29084 int is_pci;
29085 unsigned long cce_buffers_offset;
29086
29087 - atomic_t idle_count;
29088 + atomic_unchecked_t idle_count;
29089
29090 int page_flipping;
29091 int current_page;
29092 u32 crtc_offset;
29093 u32 crtc_offset_cntl;
29094
29095 - atomic_t vbl_received;
29096 + atomic_unchecked_t vbl_received;
29097
29098 u32 color_fmt;
29099 unsigned int front_offset;
29100 diff -urNp linux-2.6.32.43/drivers/gpu/drm/r128/r128_irq.c linux-2.6.32.43/drivers/gpu/drm/r128/r128_irq.c
29101 --- linux-2.6.32.43/drivers/gpu/drm/r128/r128_irq.c 2011-03-27 14:31:47.000000000 -0400
29102 +++ linux-2.6.32.43/drivers/gpu/drm/r128/r128_irq.c 2011-05-04 17:56:28.000000000 -0400
29103 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
29104 if (crtc != 0)
29105 return 0;
29106
29107 - return atomic_read(&dev_priv->vbl_received);
29108 + return atomic_read_unchecked(&dev_priv->vbl_received);
29109 }
29110
29111 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
29112 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
29113 /* VBLANK interrupt */
29114 if (status & R128_CRTC_VBLANK_INT) {
29115 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
29116 - atomic_inc(&dev_priv->vbl_received);
29117 + atomic_inc_unchecked(&dev_priv->vbl_received);
29118 drm_handle_vblank(dev, 0);
29119 return IRQ_HANDLED;
29120 }
29121 diff -urNp linux-2.6.32.43/drivers/gpu/drm/r128/r128_state.c linux-2.6.32.43/drivers/gpu/drm/r128/r128_state.c
29122 --- linux-2.6.32.43/drivers/gpu/drm/r128/r128_state.c 2011-03-27 14:31:47.000000000 -0400
29123 +++ linux-2.6.32.43/drivers/gpu/drm/r128/r128_state.c 2011-05-04 17:56:28.000000000 -0400
29124 @@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_priv
29125
29126 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
29127 {
29128 - if (atomic_read(&dev_priv->idle_count) == 0) {
29129 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
29130 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
29131 } else {
29132 - atomic_set(&dev_priv->idle_count, 0);
29133 + atomic_set_unchecked(&dev_priv->idle_count, 0);
29134 }
29135 }
29136
29137 diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/atom.c linux-2.6.32.43/drivers/gpu/drm/radeon/atom.c
29138 --- linux-2.6.32.43/drivers/gpu/drm/radeon/atom.c 2011-05-10 22:12:01.000000000 -0400
29139 +++ linux-2.6.32.43/drivers/gpu/drm/radeon/atom.c 2011-05-16 21:46:57.000000000 -0400
29140 @@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct c
29141 char name[512];
29142 int i;
29143
29144 + pax_track_stack();
29145 +
29146 ctx->card = card;
29147 ctx->bios = bios;
29148
29149 diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.32.43/drivers/gpu/drm/radeon/mkregtable.c
29150 --- linux-2.6.32.43/drivers/gpu/drm/radeon/mkregtable.c 2011-03-27 14:31:47.000000000 -0400
29151 +++ linux-2.6.32.43/drivers/gpu/drm/radeon/mkregtable.c 2011-04-17 15:56:46.000000000 -0400
29152 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
29153 regex_t mask_rex;
29154 regmatch_t match[4];
29155 char buf[1024];
29156 - size_t end;
29157 + long end;
29158 int len;
29159 int done = 0;
29160 int r;
29161 unsigned o;
29162 struct offset *offset;
29163 char last_reg_s[10];
29164 - int last_reg;
29165 + unsigned long last_reg;
29166
29167 if (regcomp
29168 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
29169 diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_atombios.c
29170 --- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_atombios.c 2011-03-27 14:31:47.000000000 -0400
29171 +++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-16 21:46:57.000000000 -0400
29172 @@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from
29173 bool linkb;
29174 struct radeon_i2c_bus_rec ddc_bus;
29175
29176 + pax_track_stack();
29177 +
29178 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29179
29180 if (data_offset == 0)
29181 @@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_o
29182 }
29183 }
29184
29185 -struct bios_connector {
29186 +static struct bios_connector {
29187 bool valid;
29188 uint16_t line_mux;
29189 uint16_t devices;
29190 int connector_type;
29191 struct radeon_i2c_bus_rec ddc_bus;
29192 -};
29193 +} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29194
29195 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
29196 drm_device
29197 @@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from
29198 uint8_t dac;
29199 union atom_supported_devices *supported_devices;
29200 int i, j;
29201 - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29202
29203 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29204
29205 diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_display.c
29206 --- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_display.c 2011-03-27 14:31:47.000000000 -0400
29207 +++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_display.c 2011-04-17 15:56:46.000000000 -0400
29208 @@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pl
29209
29210 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
29211 error = freq - current_freq;
29212 - error = error < 0 ? 0xffffffff : error;
29213 + error = (int32_t)error < 0 ? 0xffffffff : error;
29214 } else
29215 error = abs(current_freq - freq);
29216 vco_diff = abs(vco - best_vco);
29217 diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_drv.h
29218 --- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_drv.h 2011-03-27 14:31:47.000000000 -0400
29219 +++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-04 17:56:28.000000000 -0400
29220 @@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
29221
29222 /* SW interrupt */
29223 wait_queue_head_t swi_queue;
29224 - atomic_t swi_emitted;
29225 + atomic_unchecked_t swi_emitted;
29226 int vblank_crtc;
29227 uint32_t irq_enable_reg;
29228 uint32_t r500_disp_irq_reg;
29229 diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_fence.c
29230 --- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_fence.c 2011-03-27 14:31:47.000000000 -0400
29231 +++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-04 17:56:28.000000000 -0400
29232 @@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_devi
29233 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
29234 return 0;
29235 }
29236 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
29237 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
29238 if (!rdev->cp.ready) {
29239 /* FIXME: cp is not running assume everythings is done right
29240 * away
29241 @@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct rade
29242 return r;
29243 }
29244 WREG32(rdev->fence_drv.scratch_reg, 0);
29245 - atomic_set(&rdev->fence_drv.seq, 0);
29246 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
29247 INIT_LIST_HEAD(&rdev->fence_drv.created);
29248 INIT_LIST_HEAD(&rdev->fence_drv.emited);
29249 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
29250 diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon.h linux-2.6.32.43/drivers/gpu/drm/radeon/radeon.h
29251 --- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon.h 2011-03-27 14:31:47.000000000 -0400
29252 +++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon.h 2011-05-04 17:56:28.000000000 -0400
29253 @@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device
29254 */
29255 struct radeon_fence_driver {
29256 uint32_t scratch_reg;
29257 - atomic_t seq;
29258 + atomic_unchecked_t seq;
29259 uint32_t last_seq;
29260 unsigned long count_timeout;
29261 wait_queue_head_t queue;
29262 diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_ioc32.c
29263 --- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-03-27 14:31:47.000000000 -0400
29264 +++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-04-23 13:57:24.000000000 -0400
29265 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(str
29266 request = compat_alloc_user_space(sizeof(*request));
29267 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
29268 || __put_user(req32.param, &request->param)
29269 - || __put_user((void __user *)(unsigned long)req32.value,
29270 + || __put_user((unsigned long)req32.value,
29271 &request->value))
29272 return -EFAULT;
29273
29274 diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_irq.c
29275 --- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_irq.c 2011-03-27 14:31:47.000000000 -0400
29276 +++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-04 17:56:28.000000000 -0400
29277 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
29278 unsigned int ret;
29279 RING_LOCALS;
29280
29281 - atomic_inc(&dev_priv->swi_emitted);
29282 - ret = atomic_read(&dev_priv->swi_emitted);
29283 + atomic_inc_unchecked(&dev_priv->swi_emitted);
29284 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
29285
29286 BEGIN_RING(4);
29287 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
29288 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
29289 drm_radeon_private_t *dev_priv =
29290 (drm_radeon_private_t *) dev->dev_private;
29291
29292 - atomic_set(&dev_priv->swi_emitted, 0);
29293 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
29294 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
29295
29296 dev->max_vblank_count = 0x001fffff;
29297 diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_state.c
29298 --- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_state.c 2011-03-27 14:31:47.000000000 -0400
29299 +++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_state.c 2011-04-17 15:56:46.000000000 -0400
29300 @@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm
29301 {
29302 drm_radeon_private_t *dev_priv = dev->dev_private;
29303 drm_radeon_getparam_t *param = data;
29304 - int value;
29305 + int value = 0;
29306
29307 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
29308
29309 diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_ttm.c
29310 --- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_ttm.c 2011-03-27 14:31:47.000000000 -0400
29311 +++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_ttm.c 2011-04-17 15:56:46.000000000 -0400
29312 @@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_devic
29313 DRM_INFO("radeon: ttm finalized\n");
29314 }
29315
29316 -static struct vm_operations_struct radeon_ttm_vm_ops;
29317 -static const struct vm_operations_struct *ttm_vm_ops = NULL;
29318 -
29319 -static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
29320 -{
29321 - struct ttm_buffer_object *bo;
29322 - int r;
29323 -
29324 - bo = (struct ttm_buffer_object *)vma->vm_private_data;
29325 - if (bo == NULL) {
29326 - return VM_FAULT_NOPAGE;
29327 - }
29328 - r = ttm_vm_ops->fault(vma, vmf);
29329 - return r;
29330 -}
29331 -
29332 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
29333 {
29334 struct drm_file *file_priv;
29335 struct radeon_device *rdev;
29336 - int r;
29337
29338 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
29339 return drm_mmap(filp, vma);
29340 @@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struc
29341
29342 file_priv = (struct drm_file *)filp->private_data;
29343 rdev = file_priv->minor->dev->dev_private;
29344 - if (rdev == NULL) {
29345 + if (!rdev)
29346 return -EINVAL;
29347 - }
29348 - r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
29349 - if (unlikely(r != 0)) {
29350 - return r;
29351 - }
29352 - if (unlikely(ttm_vm_ops == NULL)) {
29353 - ttm_vm_ops = vma->vm_ops;
29354 - radeon_ttm_vm_ops = *ttm_vm_ops;
29355 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
29356 - }
29357 - vma->vm_ops = &radeon_ttm_vm_ops;
29358 - return 0;
29359 + return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
29360 }
29361
29362
29363 diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/rs690.c linux-2.6.32.43/drivers/gpu/drm/radeon/rs690.c
29364 --- linux-2.6.32.43/drivers/gpu/drm/radeon/rs690.c 2011-03-27 14:31:47.000000000 -0400
29365 +++ linux-2.6.32.43/drivers/gpu/drm/radeon/rs690.c 2011-04-17 15:56:46.000000000 -0400
29366 @@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct
29367 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
29368 rdev->pm.sideport_bandwidth.full)
29369 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
29370 - read_delay_latency.full = rfixed_const(370 * 800 * 1000);
29371 + read_delay_latency.full = rfixed_const(800 * 1000);
29372 read_delay_latency.full = rfixed_div(read_delay_latency,
29373 rdev->pm.igp_sideport_mclk);
29374 + a.full = rfixed_const(370);
29375 + read_delay_latency.full = rfixed_mul(read_delay_latency, a);
29376 } else {
29377 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
29378 rdev->pm.k8_bandwidth.full)
29379 diff -urNp linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_bo.c
29380 --- linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_bo.c 2011-03-27 14:31:47.000000000 -0400
29381 +++ linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_bo.c 2011-04-23 12:56:11.000000000 -0400
29382 @@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_a
29383 NULL
29384 };
29385
29386 -static struct sysfs_ops ttm_bo_global_ops = {
29387 +static const struct sysfs_ops ttm_bo_global_ops = {
29388 .show = &ttm_bo_global_show
29389 };
29390
29391 diff -urNp linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_bo_vm.c
29392 --- linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-03-27 14:31:47.000000000 -0400
29393 +++ linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-04-17 15:56:46.000000000 -0400
29394 @@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
29395 {
29396 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
29397 vma->vm_private_data;
29398 - struct ttm_bo_device *bdev = bo->bdev;
29399 + struct ttm_bo_device *bdev;
29400 unsigned long bus_base;
29401 unsigned long bus_offset;
29402 unsigned long bus_size;
29403 @@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
29404 unsigned long address = (unsigned long)vmf->virtual_address;
29405 int retval = VM_FAULT_NOPAGE;
29406
29407 + if (!bo)
29408 + return VM_FAULT_NOPAGE;
29409 + bdev = bo->bdev;
29410 +
29411 /*
29412 * Work around locking order reversal in fault / nopfn
29413 * between mmap_sem and bo_reserve: Perform a trylock operation
29414 diff -urNp linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_global.c
29415 --- linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_global.c 2011-03-27 14:31:47.000000000 -0400
29416 +++ linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_global.c 2011-04-17 15:56:46.000000000 -0400
29417 @@ -36,7 +36,7 @@
29418 struct ttm_global_item {
29419 struct mutex mutex;
29420 void *object;
29421 - int refcount;
29422 + atomic_t refcount;
29423 };
29424
29425 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
29426 @@ -49,7 +49,7 @@ void ttm_global_init(void)
29427 struct ttm_global_item *item = &glob[i];
29428 mutex_init(&item->mutex);
29429 item->object = NULL;
29430 - item->refcount = 0;
29431 + atomic_set(&item->refcount, 0);
29432 }
29433 }
29434
29435 @@ -59,7 +59,7 @@ void ttm_global_release(void)
29436 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
29437 struct ttm_global_item *item = &glob[i];
29438 BUG_ON(item->object != NULL);
29439 - BUG_ON(item->refcount != 0);
29440 + BUG_ON(atomic_read(&item->refcount) != 0);
29441 }
29442 }
29443
29444 @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
29445 void *object;
29446
29447 mutex_lock(&item->mutex);
29448 - if (item->refcount == 0) {
29449 + if (atomic_read(&item->refcount) == 0) {
29450 item->object = kzalloc(ref->size, GFP_KERNEL);
29451 if (unlikely(item->object == NULL)) {
29452 ret = -ENOMEM;
29453 @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
29454 goto out_err;
29455
29456 }
29457 - ++item->refcount;
29458 + atomic_inc(&item->refcount);
29459 ref->object = item->object;
29460 object = item->object;
29461 mutex_unlock(&item->mutex);
29462 @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
29463 struct ttm_global_item *item = &glob[ref->global_type];
29464
29465 mutex_lock(&item->mutex);
29466 - BUG_ON(item->refcount == 0);
29467 + BUG_ON(atomic_read(&item->refcount) == 0);
29468 BUG_ON(ref->object != item->object);
29469 - if (--item->refcount == 0) {
29470 + if (atomic_dec_and_test(&item->refcount)) {
29471 ref->release(ref);
29472 item->object = NULL;
29473 }
29474 diff -urNp linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_memory.c linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_memory.c
29475 --- linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_memory.c 2011-03-27 14:31:47.000000000 -0400
29476 +++ linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_memory.c 2011-04-17 15:56:46.000000000 -0400
29477 @@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_at
29478 NULL
29479 };
29480
29481 -static struct sysfs_ops ttm_mem_zone_ops = {
29482 +static const struct sysfs_ops ttm_mem_zone_ops = {
29483 .show = &ttm_mem_zone_show,
29484 .store = &ttm_mem_zone_store
29485 };
29486 diff -urNp linux-2.6.32.43/drivers/gpu/drm/via/via_drv.h linux-2.6.32.43/drivers/gpu/drm/via/via_drv.h
29487 --- linux-2.6.32.43/drivers/gpu/drm/via/via_drv.h 2011-03-27 14:31:47.000000000 -0400
29488 +++ linux-2.6.32.43/drivers/gpu/drm/via/via_drv.h 2011-05-04 17:56:28.000000000 -0400
29489 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
29490 typedef uint32_t maskarray_t[5];
29491
29492 typedef struct drm_via_irq {
29493 - atomic_t irq_received;
29494 + atomic_unchecked_t irq_received;
29495 uint32_t pending_mask;
29496 uint32_t enable_mask;
29497 wait_queue_head_t irq_queue;
29498 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
29499 struct timeval last_vblank;
29500 int last_vblank_valid;
29501 unsigned usec_per_vblank;
29502 - atomic_t vbl_received;
29503 + atomic_unchecked_t vbl_received;
29504 drm_via_state_t hc_state;
29505 char pci_buf[VIA_PCI_BUF_SIZE];
29506 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
29507 diff -urNp linux-2.6.32.43/drivers/gpu/drm/via/via_irq.c linux-2.6.32.43/drivers/gpu/drm/via/via_irq.c
29508 --- linux-2.6.32.43/drivers/gpu/drm/via/via_irq.c 2011-03-27 14:31:47.000000000 -0400
29509 +++ linux-2.6.32.43/drivers/gpu/drm/via/via_irq.c 2011-05-04 17:56:28.000000000 -0400
29510 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
29511 if (crtc != 0)
29512 return 0;
29513
29514 - return atomic_read(&dev_priv->vbl_received);
29515 + return atomic_read_unchecked(&dev_priv->vbl_received);
29516 }
29517
29518 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29519 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
29520
29521 status = VIA_READ(VIA_REG_INTERRUPT);
29522 if (status & VIA_IRQ_VBLANK_PENDING) {
29523 - atomic_inc(&dev_priv->vbl_received);
29524 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
29525 + atomic_inc_unchecked(&dev_priv->vbl_received);
29526 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
29527 do_gettimeofday(&cur_vblank);
29528 if (dev_priv->last_vblank_valid) {
29529 dev_priv->usec_per_vblank =
29530 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
29531 dev_priv->last_vblank = cur_vblank;
29532 dev_priv->last_vblank_valid = 1;
29533 }
29534 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
29535 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
29536 DRM_DEBUG("US per vblank is: %u\n",
29537 dev_priv->usec_per_vblank);
29538 }
29539 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
29540
29541 for (i = 0; i < dev_priv->num_irqs; ++i) {
29542 if (status & cur_irq->pending_mask) {
29543 - atomic_inc(&cur_irq->irq_received);
29544 + atomic_inc_unchecked(&cur_irq->irq_received);
29545 DRM_WAKEUP(&cur_irq->irq_queue);
29546 handled = 1;
29547 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
29548 @@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device *
29549 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29550 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
29551 masks[irq][4]));
29552 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
29553 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
29554 } else {
29555 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29556 (((cur_irq_sequence =
29557 - atomic_read(&cur_irq->irq_received)) -
29558 + atomic_read_unchecked(&cur_irq->irq_received)) -
29559 *sequence) <= (1 << 23)));
29560 }
29561 *sequence = cur_irq_sequence;
29562 @@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct dr
29563 }
29564
29565 for (i = 0; i < dev_priv->num_irqs; ++i) {
29566 - atomic_set(&cur_irq->irq_received, 0);
29567 + atomic_set_unchecked(&cur_irq->irq_received, 0);
29568 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
29569 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
29570 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
29571 @@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev,
29572 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
29573 case VIA_IRQ_RELATIVE:
29574 irqwait->request.sequence +=
29575 - atomic_read(&cur_irq->irq_received);
29576 + atomic_read_unchecked(&cur_irq->irq_received);
29577 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
29578 case VIA_IRQ_ABSOLUTE:
29579 break;
29580 diff -urNp linux-2.6.32.43/drivers/hid/hid-core.c linux-2.6.32.43/drivers/hid/hid-core.c
29581 --- linux-2.6.32.43/drivers/hid/hid-core.c 2011-05-10 22:12:01.000000000 -0400
29582 +++ linux-2.6.32.43/drivers/hid/hid-core.c 2011-05-10 22:12:32.000000000 -0400
29583 @@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device
29584
29585 int hid_add_device(struct hid_device *hdev)
29586 {
29587 - static atomic_t id = ATOMIC_INIT(0);
29588 + static atomic_unchecked_t id = ATOMIC_INIT(0);
29589 int ret;
29590
29591 if (WARN_ON(hdev->status & HID_STAT_ADDED))
29592 @@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hd
29593 /* XXX hack, any other cleaner solution after the driver core
29594 * is converted to allow more than 20 bytes as the device name? */
29595 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
29596 - hdev->vendor, hdev->product, atomic_inc_return(&id));
29597 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
29598
29599 ret = device_add(&hdev->dev);
29600 if (!ret)
29601 diff -urNp linux-2.6.32.43/drivers/hid/usbhid/hiddev.c linux-2.6.32.43/drivers/hid/usbhid/hiddev.c
29602 --- linux-2.6.32.43/drivers/hid/usbhid/hiddev.c 2011-03-27 14:31:47.000000000 -0400
29603 +++ linux-2.6.32.43/drivers/hid/usbhid/hiddev.c 2011-04-17 15:56:46.000000000 -0400
29604 @@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *fi
29605 return put_user(HID_VERSION, (int __user *)arg);
29606
29607 case HIDIOCAPPLICATION:
29608 - if (arg < 0 || arg >= hid->maxapplication)
29609 + if (arg >= hid->maxapplication)
29610 return -EINVAL;
29611
29612 for (i = 0; i < hid->maxcollection; i++)
29613 diff -urNp linux-2.6.32.43/drivers/hwmon/lis3lv02d.c linux-2.6.32.43/drivers/hwmon/lis3lv02d.c
29614 --- linux-2.6.32.43/drivers/hwmon/lis3lv02d.c 2011-03-27 14:31:47.000000000 -0400
29615 +++ linux-2.6.32.43/drivers/hwmon/lis3lv02d.c 2011-05-04 17:56:28.000000000 -0400
29616 @@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(in
29617 * the lid is closed. This leads to interrupts as soon as a little move
29618 * is done.
29619 */
29620 - atomic_inc(&lis3_dev.count);
29621 + atomic_inc_unchecked(&lis3_dev.count);
29622
29623 wake_up_interruptible(&lis3_dev.misc_wait);
29624 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
29625 @@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct in
29626 if (test_and_set_bit(0, &lis3_dev.misc_opened))
29627 return -EBUSY; /* already open */
29628
29629 - atomic_set(&lis3_dev.count, 0);
29630 + atomic_set_unchecked(&lis3_dev.count, 0);
29631
29632 /*
29633 * The sensor can generate interrupts for free-fall and direction
29634 @@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struc
29635 add_wait_queue(&lis3_dev.misc_wait, &wait);
29636 while (true) {
29637 set_current_state(TASK_INTERRUPTIBLE);
29638 - data = atomic_xchg(&lis3_dev.count, 0);
29639 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
29640 if (data)
29641 break;
29642
29643 @@ -244,7 +244,7 @@ out:
29644 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
29645 {
29646 poll_wait(file, &lis3_dev.misc_wait, wait);
29647 - if (atomic_read(&lis3_dev.count))
29648 + if (atomic_read_unchecked(&lis3_dev.count))
29649 return POLLIN | POLLRDNORM;
29650 return 0;
29651 }
29652 diff -urNp linux-2.6.32.43/drivers/hwmon/lis3lv02d.h linux-2.6.32.43/drivers/hwmon/lis3lv02d.h
29653 --- linux-2.6.32.43/drivers/hwmon/lis3lv02d.h 2011-03-27 14:31:47.000000000 -0400
29654 +++ linux-2.6.32.43/drivers/hwmon/lis3lv02d.h 2011-05-04 17:56:28.000000000 -0400
29655 @@ -201,7 +201,7 @@ struct lis3lv02d {
29656
29657 struct input_polled_dev *idev; /* input device */
29658 struct platform_device *pdev; /* platform device */
29659 - atomic_t count; /* interrupt count after last read */
29660 + atomic_unchecked_t count; /* interrupt count after last read */
29661 int xcalib; /* calibrated null value for x */
29662 int ycalib; /* calibrated null value for y */
29663 int zcalib; /* calibrated null value for z */
29664 diff -urNp linux-2.6.32.43/drivers/hwmon/sht15.c linux-2.6.32.43/drivers/hwmon/sht15.c
29665 --- linux-2.6.32.43/drivers/hwmon/sht15.c 2011-03-27 14:31:47.000000000 -0400
29666 +++ linux-2.6.32.43/drivers/hwmon/sht15.c 2011-05-04 17:56:28.000000000 -0400
29667 @@ -112,7 +112,7 @@ struct sht15_data {
29668 int supply_uV;
29669 int supply_uV_valid;
29670 struct work_struct update_supply_work;
29671 - atomic_t interrupt_handled;
29672 + atomic_unchecked_t interrupt_handled;
29673 };
29674
29675 /**
29676 @@ -245,13 +245,13 @@ static inline int sht15_update_single_va
29677 return ret;
29678
29679 gpio_direction_input(data->pdata->gpio_data);
29680 - atomic_set(&data->interrupt_handled, 0);
29681 + atomic_set_unchecked(&data->interrupt_handled, 0);
29682
29683 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29684 if (gpio_get_value(data->pdata->gpio_data) == 0) {
29685 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
29686 /* Only relevant if the interrupt hasn't occured. */
29687 - if (!atomic_read(&data->interrupt_handled))
29688 + if (!atomic_read_unchecked(&data->interrupt_handled))
29689 schedule_work(&data->read_work);
29690 }
29691 ret = wait_event_timeout(data->wait_queue,
29692 @@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired
29693 struct sht15_data *data = d;
29694 /* First disable the interrupt */
29695 disable_irq_nosync(irq);
29696 - atomic_inc(&data->interrupt_handled);
29697 + atomic_inc_unchecked(&data->interrupt_handled);
29698 /* Then schedule a reading work struct */
29699 if (data->flag != SHT15_READING_NOTHING)
29700 schedule_work(&data->read_work);
29701 @@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct wo
29702 here as could have gone low in meantime so verify
29703 it hasn't!
29704 */
29705 - atomic_set(&data->interrupt_handled, 0);
29706 + atomic_set_unchecked(&data->interrupt_handled, 0);
29707 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29708 /* If still not occured or another handler has been scheduled */
29709 if (gpio_get_value(data->pdata->gpio_data)
29710 - || atomic_read(&data->interrupt_handled))
29711 + || atomic_read_unchecked(&data->interrupt_handled))
29712 return;
29713 }
29714 /* Read the data back from the device */
29715 diff -urNp linux-2.6.32.43/drivers/hwmon/w83791d.c linux-2.6.32.43/drivers/hwmon/w83791d.c
29716 --- linux-2.6.32.43/drivers/hwmon/w83791d.c 2011-03-27 14:31:47.000000000 -0400
29717 +++ linux-2.6.32.43/drivers/hwmon/w83791d.c 2011-04-17 15:56:46.000000000 -0400
29718 @@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
29719 struct i2c_board_info *info);
29720 static int w83791d_remove(struct i2c_client *client);
29721
29722 -static int w83791d_read(struct i2c_client *client, u8 register);
29723 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
29724 +static int w83791d_read(struct i2c_client *client, u8 reg);
29725 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
29726 static struct w83791d_data *w83791d_update_device(struct device *dev);
29727
29728 #ifdef DEBUG
29729 diff -urNp linux-2.6.32.43/drivers/ide/ide-cd.c linux-2.6.32.43/drivers/ide/ide-cd.c
29730 --- linux-2.6.32.43/drivers/ide/ide-cd.c 2011-03-27 14:31:47.000000000 -0400
29731 +++ linux-2.6.32.43/drivers/ide/ide-cd.c 2011-04-17 15:56:46.000000000 -0400
29732 @@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_
29733 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
29734 if ((unsigned long)buf & alignment
29735 || blk_rq_bytes(rq) & q->dma_pad_mask
29736 - || object_is_on_stack(buf))
29737 + || object_starts_on_stack(buf))
29738 drive->dma = 0;
29739 }
29740 }
29741 diff -urNp linux-2.6.32.43/drivers/ide/ide-floppy.c linux-2.6.32.43/drivers/ide/ide-floppy.c
29742 --- linux-2.6.32.43/drivers/ide/ide-floppy.c 2011-03-27 14:31:47.000000000 -0400
29743 +++ linux-2.6.32.43/drivers/ide/ide-floppy.c 2011-05-16 21:46:57.000000000 -0400
29744 @@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_d
29745 u8 pc_buf[256], header_len, desc_cnt;
29746 int i, rc = 1, blocks, length;
29747
29748 + pax_track_stack();
29749 +
29750 ide_debug_log(IDE_DBG_FUNC, "enter");
29751
29752 drive->bios_cyl = 0;
29753 diff -urNp linux-2.6.32.43/drivers/ide/setup-pci.c linux-2.6.32.43/drivers/ide/setup-pci.c
29754 --- linux-2.6.32.43/drivers/ide/setup-pci.c 2011-03-27 14:31:47.000000000 -0400
29755 +++ linux-2.6.32.43/drivers/ide/setup-pci.c 2011-05-16 21:46:57.000000000 -0400
29756 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
29757 int ret, i, n_ports = dev2 ? 4 : 2;
29758 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
29759
29760 + pax_track_stack();
29761 +
29762 for (i = 0; i < n_ports / 2; i++) {
29763 ret = ide_setup_pci_controller(pdev[i], d, !i);
29764 if (ret < 0)
29765 diff -urNp linux-2.6.32.43/drivers/ieee1394/dv1394.c linux-2.6.32.43/drivers/ieee1394/dv1394.c
29766 --- linux-2.6.32.43/drivers/ieee1394/dv1394.c 2011-03-27 14:31:47.000000000 -0400
29767 +++ linux-2.6.32.43/drivers/ieee1394/dv1394.c 2011-04-23 12:56:11.000000000 -0400
29768 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
29769 based upon DIF section and sequence
29770 */
29771
29772 -static void inline
29773 +static inline void
29774 frame_put_packet (struct frame *f, struct packet *p)
29775 {
29776 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
29777 diff -urNp linux-2.6.32.43/drivers/ieee1394/hosts.c linux-2.6.32.43/drivers/ieee1394/hosts.c
29778 --- linux-2.6.32.43/drivers/ieee1394/hosts.c 2011-03-27 14:31:47.000000000 -0400
29779 +++ linux-2.6.32.43/drivers/ieee1394/hosts.c 2011-04-17 15:56:46.000000000 -0400
29780 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
29781 }
29782
29783 static struct hpsb_host_driver dummy_driver = {
29784 + .name = "dummy",
29785 .transmit_packet = dummy_transmit_packet,
29786 .devctl = dummy_devctl,
29787 .isoctl = dummy_isoctl
29788 diff -urNp linux-2.6.32.43/drivers/ieee1394/init_ohci1394_dma.c linux-2.6.32.43/drivers/ieee1394/init_ohci1394_dma.c
29789 --- linux-2.6.32.43/drivers/ieee1394/init_ohci1394_dma.c 2011-03-27 14:31:47.000000000 -0400
29790 +++ linux-2.6.32.43/drivers/ieee1394/init_ohci1394_dma.c 2011-04-17 15:56:46.000000000 -0400
29791 @@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_con
29792 for (func = 0; func < 8; func++) {
29793 u32 class = read_pci_config(num,slot,func,
29794 PCI_CLASS_REVISION);
29795 - if ((class == 0xffffffff))
29796 + if (class == 0xffffffff)
29797 continue; /* No device at this func */
29798
29799 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
29800 diff -urNp linux-2.6.32.43/drivers/ieee1394/ohci1394.c linux-2.6.32.43/drivers/ieee1394/ohci1394.c
29801 --- linux-2.6.32.43/drivers/ieee1394/ohci1394.c 2011-03-27 14:31:47.000000000 -0400
29802 +++ linux-2.6.32.43/drivers/ieee1394/ohci1394.c 2011-04-23 12:56:11.000000000 -0400
29803 @@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
29804 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
29805
29806 /* Module Parameters */
29807 -static int phys_dma = 1;
29808 +static int phys_dma;
29809 module_param(phys_dma, int, 0444);
29810 -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
29811 +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
29812
29813 static void dma_trm_tasklet(unsigned long data);
29814 static void dma_trm_reset(struct dma_trm_ctx *d);
29815 diff -urNp linux-2.6.32.43/drivers/ieee1394/sbp2.c linux-2.6.32.43/drivers/ieee1394/sbp2.c
29816 --- linux-2.6.32.43/drivers/ieee1394/sbp2.c 2011-03-27 14:31:47.000000000 -0400
29817 +++ linux-2.6.32.43/drivers/ieee1394/sbp2.c 2011-04-23 12:56:11.000000000 -0400
29818 @@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
29819 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
29820 MODULE_LICENSE("GPL");
29821
29822 -static int sbp2_module_init(void)
29823 +static int __init sbp2_module_init(void)
29824 {
29825 int ret;
29826
29827 diff -urNp linux-2.6.32.43/drivers/infiniband/core/cm.c linux-2.6.32.43/drivers/infiniband/core/cm.c
29828 --- linux-2.6.32.43/drivers/infiniband/core/cm.c 2011-03-27 14:31:47.000000000 -0400
29829 +++ linux-2.6.32.43/drivers/infiniband/core/cm.c 2011-04-17 15:56:46.000000000 -0400
29830 @@ -112,7 +112,7 @@ static char const counter_group_names[CM
29831
29832 struct cm_counter_group {
29833 struct kobject obj;
29834 - atomic_long_t counter[CM_ATTR_COUNT];
29835 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29836 };
29837
29838 struct cm_counter_attribute {
29839 @@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm
29840 struct ib_mad_send_buf *msg = NULL;
29841 int ret;
29842
29843 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29844 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29845 counter[CM_REQ_COUNTER]);
29846
29847 /* Quick state check to discard duplicate REQs. */
29848 @@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm
29849 if (!cm_id_priv)
29850 return;
29851
29852 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29853 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29854 counter[CM_REP_COUNTER]);
29855 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
29856 if (ret)
29857 @@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work
29858 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
29859 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
29860 spin_unlock_irq(&cm_id_priv->lock);
29861 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29862 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29863 counter[CM_RTU_COUNTER]);
29864 goto out;
29865 }
29866 @@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_wor
29867 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
29868 dreq_msg->local_comm_id);
29869 if (!cm_id_priv) {
29870 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29871 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29872 counter[CM_DREQ_COUNTER]);
29873 cm_issue_drep(work->port, work->mad_recv_wc);
29874 return -EINVAL;
29875 @@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_wor
29876 case IB_CM_MRA_REP_RCVD:
29877 break;
29878 case IB_CM_TIMEWAIT:
29879 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29880 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29881 counter[CM_DREQ_COUNTER]);
29882 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29883 goto unlock;
29884 @@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_wor
29885 cm_free_msg(msg);
29886 goto deref;
29887 case IB_CM_DREQ_RCVD:
29888 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29889 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29890 counter[CM_DREQ_COUNTER]);
29891 goto unlock;
29892 default:
29893 @@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work
29894 ib_modify_mad(cm_id_priv->av.port->mad_agent,
29895 cm_id_priv->msg, timeout)) {
29896 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
29897 - atomic_long_inc(&work->port->
29898 + atomic_long_inc_unchecked(&work->port->
29899 counter_group[CM_RECV_DUPLICATES].
29900 counter[CM_MRA_COUNTER]);
29901 goto out;
29902 @@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work
29903 break;
29904 case IB_CM_MRA_REQ_RCVD:
29905 case IB_CM_MRA_REP_RCVD:
29906 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29907 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29908 counter[CM_MRA_COUNTER]);
29909 /* fall through */
29910 default:
29911 @@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work
29912 case IB_CM_LAP_IDLE:
29913 break;
29914 case IB_CM_MRA_LAP_SENT:
29915 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29916 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29917 counter[CM_LAP_COUNTER]);
29918 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29919 goto unlock;
29920 @@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work
29921 cm_free_msg(msg);
29922 goto deref;
29923 case IB_CM_LAP_RCVD:
29924 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29925 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29926 counter[CM_LAP_COUNTER]);
29927 goto unlock;
29928 default:
29929 @@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm
29930 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
29931 if (cur_cm_id_priv) {
29932 spin_unlock_irq(&cm.lock);
29933 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29934 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29935 counter[CM_SIDR_REQ_COUNTER]);
29936 goto out; /* Duplicate message. */
29937 }
29938 @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
29939 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
29940 msg->retries = 1;
29941
29942 - atomic_long_add(1 + msg->retries,
29943 + atomic_long_add_unchecked(1 + msg->retries,
29944 &port->counter_group[CM_XMIT].counter[attr_index]);
29945 if (msg->retries)
29946 - atomic_long_add(msg->retries,
29947 + atomic_long_add_unchecked(msg->retries,
29948 &port->counter_group[CM_XMIT_RETRIES].
29949 counter[attr_index]);
29950
29951 @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
29952 }
29953
29954 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
29955 - atomic_long_inc(&port->counter_group[CM_RECV].
29956 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
29957 counter[attr_id - CM_ATTR_ID_OFFSET]);
29958
29959 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
29960 @@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct ko
29961 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
29962
29963 return sprintf(buf, "%ld\n",
29964 - atomic_long_read(&group->counter[cm_attr->index]));
29965 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
29966 }
29967
29968 -static struct sysfs_ops cm_counter_ops = {
29969 +static const struct sysfs_ops cm_counter_ops = {
29970 .show = cm_show_counter
29971 };
29972
29973 diff -urNp linux-2.6.32.43/drivers/infiniband/core/fmr_pool.c linux-2.6.32.43/drivers/infiniband/core/fmr_pool.c
29974 --- linux-2.6.32.43/drivers/infiniband/core/fmr_pool.c 2011-03-27 14:31:47.000000000 -0400
29975 +++ linux-2.6.32.43/drivers/infiniband/core/fmr_pool.c 2011-05-04 17:56:28.000000000 -0400
29976 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
29977
29978 struct task_struct *thread;
29979
29980 - atomic_t req_ser;
29981 - atomic_t flush_ser;
29982 + atomic_unchecked_t req_ser;
29983 + atomic_unchecked_t flush_ser;
29984
29985 wait_queue_head_t force_wait;
29986 };
29987 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
29988 struct ib_fmr_pool *pool = pool_ptr;
29989
29990 do {
29991 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
29992 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
29993 ib_fmr_batch_release(pool);
29994
29995 - atomic_inc(&pool->flush_ser);
29996 + atomic_inc_unchecked(&pool->flush_ser);
29997 wake_up_interruptible(&pool->force_wait);
29998
29999 if (pool->flush_function)
30000 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
30001 }
30002
30003 set_current_state(TASK_INTERRUPTIBLE);
30004 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
30005 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
30006 !kthread_should_stop())
30007 schedule();
30008 __set_current_state(TASK_RUNNING);
30009 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
30010 pool->dirty_watermark = params->dirty_watermark;
30011 pool->dirty_len = 0;
30012 spin_lock_init(&pool->pool_lock);
30013 - atomic_set(&pool->req_ser, 0);
30014 - atomic_set(&pool->flush_ser, 0);
30015 + atomic_set_unchecked(&pool->req_ser, 0);
30016 + atomic_set_unchecked(&pool->flush_ser, 0);
30017 init_waitqueue_head(&pool->force_wait);
30018
30019 pool->thread = kthread_run(ib_fmr_cleanup_thread,
30020 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
30021 }
30022 spin_unlock_irq(&pool->pool_lock);
30023
30024 - serial = atomic_inc_return(&pool->req_ser);
30025 + serial = atomic_inc_return_unchecked(&pool->req_ser);
30026 wake_up_process(pool->thread);
30027
30028 if (wait_event_interruptible(pool->force_wait,
30029 - atomic_read(&pool->flush_ser) - serial >= 0))
30030 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
30031 return -EINTR;
30032
30033 return 0;
30034 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
30035 } else {
30036 list_add_tail(&fmr->list, &pool->dirty_list);
30037 if (++pool->dirty_len >= pool->dirty_watermark) {
30038 - atomic_inc(&pool->req_ser);
30039 + atomic_inc_unchecked(&pool->req_ser);
30040 wake_up_process(pool->thread);
30041 }
30042 }
30043 diff -urNp linux-2.6.32.43/drivers/infiniband/core/sysfs.c linux-2.6.32.43/drivers/infiniband/core/sysfs.c
30044 --- linux-2.6.32.43/drivers/infiniband/core/sysfs.c 2011-03-27 14:31:47.000000000 -0400
30045 +++ linux-2.6.32.43/drivers/infiniband/core/sysfs.c 2011-04-17 15:56:46.000000000 -0400
30046 @@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kob
30047 return port_attr->show(p, port_attr, buf);
30048 }
30049
30050 -static struct sysfs_ops port_sysfs_ops = {
30051 +static const struct sysfs_ops port_sysfs_ops = {
30052 .show = port_attr_show
30053 };
30054
30055 diff -urNp linux-2.6.32.43/drivers/infiniband/core/uverbs_marshall.c linux-2.6.32.43/drivers/infiniband/core/uverbs_marshall.c
30056 --- linux-2.6.32.43/drivers/infiniband/core/uverbs_marshall.c 2011-03-27 14:31:47.000000000 -0400
30057 +++ linux-2.6.32.43/drivers/infiniband/core/uverbs_marshall.c 2011-04-17 15:56:46.000000000 -0400
30058 @@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_u
30059 dst->grh.sgid_index = src->grh.sgid_index;
30060 dst->grh.hop_limit = src->grh.hop_limit;
30061 dst->grh.traffic_class = src->grh.traffic_class;
30062 + memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
30063 dst->dlid = src->dlid;
30064 dst->sl = src->sl;
30065 dst->src_path_bits = src->src_path_bits;
30066 dst->static_rate = src->static_rate;
30067 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
30068 dst->port_num = src->port_num;
30069 + dst->reserved = 0;
30070 }
30071 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
30072
30073 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
30074 struct ib_qp_attr *src)
30075 {
30076 + dst->qp_state = src->qp_state;
30077 dst->cur_qp_state = src->cur_qp_state;
30078 dst->path_mtu = src->path_mtu;
30079 dst->path_mig_state = src->path_mig_state;
30080 @@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_u
30081 dst->rnr_retry = src->rnr_retry;
30082 dst->alt_port_num = src->alt_port_num;
30083 dst->alt_timeout = src->alt_timeout;
30084 + memset(dst->reserved, 0, sizeof(dst->reserved));
30085 }
30086 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
30087
30088 diff -urNp linux-2.6.32.43/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.32.43/drivers/infiniband/hw/ipath/ipath_fs.c
30089 --- linux-2.6.32.43/drivers/infiniband/hw/ipath/ipath_fs.c 2011-03-27 14:31:47.000000000 -0400
30090 +++ linux-2.6.32.43/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-16 21:46:57.000000000 -0400
30091 @@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(stru
30092 struct infinipath_counters counters;
30093 struct ipath_devdata *dd;
30094
30095 + pax_track_stack();
30096 +
30097 dd = file->f_path.dentry->d_inode->i_private;
30098 dd->ipath_f_read_counters(dd, &counters);
30099
30100 diff -urNp linux-2.6.32.43/drivers/infiniband/hw/nes/nes.c linux-2.6.32.43/drivers/infiniband/hw/nes/nes.c
30101 --- linux-2.6.32.43/drivers/infiniband/hw/nes/nes.c 2011-03-27 14:31:47.000000000 -0400
30102 +++ linux-2.6.32.43/drivers/infiniband/hw/nes/nes.c 2011-05-04 17:56:28.000000000 -0400
30103 @@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
30104 LIST_HEAD(nes_adapter_list);
30105 static LIST_HEAD(nes_dev_list);
30106
30107 -atomic_t qps_destroyed;
30108 +atomic_unchecked_t qps_destroyed;
30109
30110 static unsigned int ee_flsh_adapter;
30111 static unsigned int sysfs_nonidx_addr;
30112 @@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(str
30113 struct nes_adapter *nesadapter = nesdev->nesadapter;
30114 u32 qp_id;
30115
30116 - atomic_inc(&qps_destroyed);
30117 + atomic_inc_unchecked(&qps_destroyed);
30118
30119 /* Free the control structures */
30120
30121 diff -urNp linux-2.6.32.43/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.32.43/drivers/infiniband/hw/nes/nes_cm.c
30122 --- linux-2.6.32.43/drivers/infiniband/hw/nes/nes_cm.c 2011-03-27 14:31:47.000000000 -0400
30123 +++ linux-2.6.32.43/drivers/infiniband/hw/nes/nes_cm.c 2011-05-04 17:56:28.000000000 -0400
30124 @@ -69,11 +69,11 @@ u32 cm_packets_received;
30125 u32 cm_listens_created;
30126 u32 cm_listens_destroyed;
30127 u32 cm_backlog_drops;
30128 -atomic_t cm_loopbacks;
30129 -atomic_t cm_nodes_created;
30130 -atomic_t cm_nodes_destroyed;
30131 -atomic_t cm_accel_dropped_pkts;
30132 -atomic_t cm_resets_recvd;
30133 +atomic_unchecked_t cm_loopbacks;
30134 +atomic_unchecked_t cm_nodes_created;
30135 +atomic_unchecked_t cm_nodes_destroyed;
30136 +atomic_unchecked_t cm_accel_dropped_pkts;
30137 +atomic_unchecked_t cm_resets_recvd;
30138
30139 static inline int mini_cm_accelerated(struct nes_cm_core *,
30140 struct nes_cm_node *);
30141 @@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
30142
30143 static struct nes_cm_core *g_cm_core;
30144
30145 -atomic_t cm_connects;
30146 -atomic_t cm_accepts;
30147 -atomic_t cm_disconnects;
30148 -atomic_t cm_closes;
30149 -atomic_t cm_connecteds;
30150 -atomic_t cm_connect_reqs;
30151 -atomic_t cm_rejects;
30152 +atomic_unchecked_t cm_connects;
30153 +atomic_unchecked_t cm_accepts;
30154 +atomic_unchecked_t cm_disconnects;
30155 +atomic_unchecked_t cm_closes;
30156 +atomic_unchecked_t cm_connecteds;
30157 +atomic_unchecked_t cm_connect_reqs;
30158 +atomic_unchecked_t cm_rejects;
30159
30160
30161 /**
30162 @@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(
30163 cm_node->rem_mac);
30164
30165 add_hte_node(cm_core, cm_node);
30166 - atomic_inc(&cm_nodes_created);
30167 + atomic_inc_unchecked(&cm_nodes_created);
30168
30169 return cm_node;
30170 }
30171 @@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm
30172 }
30173
30174 atomic_dec(&cm_core->node_cnt);
30175 - atomic_inc(&cm_nodes_destroyed);
30176 + atomic_inc_unchecked(&cm_nodes_destroyed);
30177 nesqp = cm_node->nesqp;
30178 if (nesqp) {
30179 nesqp->cm_node = NULL;
30180 @@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm
30181
30182 static void drop_packet(struct sk_buff *skb)
30183 {
30184 - atomic_inc(&cm_accel_dropped_pkts);
30185 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30186 dev_kfree_skb_any(skb);
30187 }
30188
30189 @@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm
30190
30191 int reset = 0; /* whether to send reset in case of err.. */
30192 int passive_state;
30193 - atomic_inc(&cm_resets_recvd);
30194 + atomic_inc_unchecked(&cm_resets_recvd);
30195 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30196 " refcnt=%d\n", cm_node, cm_node->state,
30197 atomic_read(&cm_node->ref_count));
30198 @@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_conne
30199 rem_ref_cm_node(cm_node->cm_core, cm_node);
30200 return NULL;
30201 }
30202 - atomic_inc(&cm_loopbacks);
30203 + atomic_inc_unchecked(&cm_loopbacks);
30204 loopbackremotenode->loopbackpartner = cm_node;
30205 loopbackremotenode->tcp_cntxt.rcv_wscale =
30206 NES_CM_DEFAULT_RCV_WND_SCALE;
30207 @@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_c
30208 add_ref_cm_node(cm_node);
30209 } else if (cm_node->state == NES_CM_STATE_TSA) {
30210 rem_ref_cm_node(cm_core, cm_node);
30211 - atomic_inc(&cm_accel_dropped_pkts);
30212 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30213 dev_kfree_skb_any(skb);
30214 break;
30215 }
30216 @@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct ne
30217
30218 if ((cm_id) && (cm_id->event_handler)) {
30219 if (issue_disconn) {
30220 - atomic_inc(&cm_disconnects);
30221 + atomic_inc_unchecked(&cm_disconnects);
30222 cm_event.event = IW_CM_EVENT_DISCONNECT;
30223 cm_event.status = disconn_status;
30224 cm_event.local_addr = cm_id->local_addr;
30225 @@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct ne
30226 }
30227
30228 if (issue_close) {
30229 - atomic_inc(&cm_closes);
30230 + atomic_inc_unchecked(&cm_closes);
30231 nes_disconnect(nesqp, 1);
30232
30233 cm_id->provider_data = nesqp;
30234 @@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
30235
30236 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30237 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30238 - atomic_inc(&cm_accepts);
30239 + atomic_inc_unchecked(&cm_accepts);
30240
30241 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30242 atomic_read(&nesvnic->netdev->refcnt));
30243 @@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
30244
30245 struct nes_cm_core *cm_core;
30246
30247 - atomic_inc(&cm_rejects);
30248 + atomic_inc_unchecked(&cm_rejects);
30249 cm_node = (struct nes_cm_node *) cm_id->provider_data;
30250 loopback = cm_node->loopbackpartner;
30251 cm_core = cm_node->cm_core;
30252 @@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id,
30253 ntohl(cm_id->local_addr.sin_addr.s_addr),
30254 ntohs(cm_id->local_addr.sin_port));
30255
30256 - atomic_inc(&cm_connects);
30257 + atomic_inc_unchecked(&cm_connects);
30258 nesqp->active_conn = 1;
30259
30260 /* cache the cm_id in the qp */
30261 @@ -3195,7 +3195,7 @@ static void cm_event_connected(struct ne
30262 if (nesqp->destroyed) {
30263 return;
30264 }
30265 - atomic_inc(&cm_connecteds);
30266 + atomic_inc_unchecked(&cm_connecteds);
30267 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30268 " local port 0x%04X. jiffies = %lu.\n",
30269 nesqp->hwqp.qp_id,
30270 @@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm
30271
30272 ret = cm_id->event_handler(cm_id, &cm_event);
30273 cm_id->add_ref(cm_id);
30274 - atomic_inc(&cm_closes);
30275 + atomic_inc_unchecked(&cm_closes);
30276 cm_event.event = IW_CM_EVENT_CLOSE;
30277 cm_event.status = IW_CM_EVENT_STATUS_OK;
30278 cm_event.provider_data = cm_id->provider_data;
30279 @@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_
30280 return;
30281 cm_id = cm_node->cm_id;
30282
30283 - atomic_inc(&cm_connect_reqs);
30284 + atomic_inc_unchecked(&cm_connect_reqs);
30285 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30286 cm_node, cm_id, jiffies);
30287
30288 @@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct n
30289 return;
30290 cm_id = cm_node->cm_id;
30291
30292 - atomic_inc(&cm_connect_reqs);
30293 + atomic_inc_unchecked(&cm_connect_reqs);
30294 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30295 cm_node, cm_id, jiffies);
30296
30297 diff -urNp linux-2.6.32.43/drivers/infiniband/hw/nes/nes.h linux-2.6.32.43/drivers/infiniband/hw/nes/nes.h
30298 --- linux-2.6.32.43/drivers/infiniband/hw/nes/nes.h 2011-03-27 14:31:47.000000000 -0400
30299 +++ linux-2.6.32.43/drivers/infiniband/hw/nes/nes.h 2011-05-04 17:56:28.000000000 -0400
30300 @@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
30301 extern unsigned int wqm_quanta;
30302 extern struct list_head nes_adapter_list;
30303
30304 -extern atomic_t cm_connects;
30305 -extern atomic_t cm_accepts;
30306 -extern atomic_t cm_disconnects;
30307 -extern atomic_t cm_closes;
30308 -extern atomic_t cm_connecteds;
30309 -extern atomic_t cm_connect_reqs;
30310 -extern atomic_t cm_rejects;
30311 -extern atomic_t mod_qp_timouts;
30312 -extern atomic_t qps_created;
30313 -extern atomic_t qps_destroyed;
30314 -extern atomic_t sw_qps_destroyed;
30315 +extern atomic_unchecked_t cm_connects;
30316 +extern atomic_unchecked_t cm_accepts;
30317 +extern atomic_unchecked_t cm_disconnects;
30318 +extern atomic_unchecked_t cm_closes;
30319 +extern atomic_unchecked_t cm_connecteds;
30320 +extern atomic_unchecked_t cm_connect_reqs;
30321 +extern atomic_unchecked_t cm_rejects;
30322 +extern atomic_unchecked_t mod_qp_timouts;
30323 +extern atomic_unchecked_t qps_created;
30324 +extern atomic_unchecked_t qps_destroyed;
30325 +extern atomic_unchecked_t sw_qps_destroyed;
30326 extern u32 mh_detected;
30327 extern u32 mh_pauses_sent;
30328 extern u32 cm_packets_sent;
30329 @@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
30330 extern u32 cm_listens_created;
30331 extern u32 cm_listens_destroyed;
30332 extern u32 cm_backlog_drops;
30333 -extern atomic_t cm_loopbacks;
30334 -extern atomic_t cm_nodes_created;
30335 -extern atomic_t cm_nodes_destroyed;
30336 -extern atomic_t cm_accel_dropped_pkts;
30337 -extern atomic_t cm_resets_recvd;
30338 +extern atomic_unchecked_t cm_loopbacks;
30339 +extern atomic_unchecked_t cm_nodes_created;
30340 +extern atomic_unchecked_t cm_nodes_destroyed;
30341 +extern atomic_unchecked_t cm_accel_dropped_pkts;
30342 +extern atomic_unchecked_t cm_resets_recvd;
30343
30344 extern u32 int_mod_timer_init;
30345 extern u32 int_mod_cq_depth_256;
30346 diff -urNp linux-2.6.32.43/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.32.43/drivers/infiniband/hw/nes/nes_nic.c
30347 --- linux-2.6.32.43/drivers/infiniband/hw/nes/nes_nic.c 2011-03-27 14:31:47.000000000 -0400
30348 +++ linux-2.6.32.43/drivers/infiniband/hw/nes/nes_nic.c 2011-05-04 17:56:28.000000000 -0400
30349 @@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats
30350 target_stat_values[++index] = mh_detected;
30351 target_stat_values[++index] = mh_pauses_sent;
30352 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30353 - target_stat_values[++index] = atomic_read(&cm_connects);
30354 - target_stat_values[++index] = atomic_read(&cm_accepts);
30355 - target_stat_values[++index] = atomic_read(&cm_disconnects);
30356 - target_stat_values[++index] = atomic_read(&cm_connecteds);
30357 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30358 - target_stat_values[++index] = atomic_read(&cm_rejects);
30359 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30360 - target_stat_values[++index] = atomic_read(&qps_created);
30361 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30362 - target_stat_values[++index] = atomic_read(&qps_destroyed);
30363 - target_stat_values[++index] = atomic_read(&cm_closes);
30364 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30365 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30366 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30367 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30368 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30369 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30370 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30371 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30372 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30373 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30374 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30375 target_stat_values[++index] = cm_packets_sent;
30376 target_stat_values[++index] = cm_packets_bounced;
30377 target_stat_values[++index] = cm_packets_created;
30378 @@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats
30379 target_stat_values[++index] = cm_listens_created;
30380 target_stat_values[++index] = cm_listens_destroyed;
30381 target_stat_values[++index] = cm_backlog_drops;
30382 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
30383 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
30384 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30385 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30386 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30387 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30388 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30389 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30390 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30391 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30392 target_stat_values[++index] = int_mod_timer_init;
30393 target_stat_values[++index] = int_mod_cq_depth_1;
30394 target_stat_values[++index] = int_mod_cq_depth_4;
30395 diff -urNp linux-2.6.32.43/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.32.43/drivers/infiniband/hw/nes/nes_verbs.c
30396 --- linux-2.6.32.43/drivers/infiniband/hw/nes/nes_verbs.c 2011-03-27 14:31:47.000000000 -0400
30397 +++ linux-2.6.32.43/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-04 17:56:28.000000000 -0400
30398 @@ -45,9 +45,9 @@
30399
30400 #include <rdma/ib_umem.h>
30401
30402 -atomic_t mod_qp_timouts;
30403 -atomic_t qps_created;
30404 -atomic_t sw_qps_destroyed;
30405 +atomic_unchecked_t mod_qp_timouts;
30406 +atomic_unchecked_t qps_created;
30407 +atomic_unchecked_t sw_qps_destroyed;
30408
30409 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30410
30411 @@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struc
30412 if (init_attr->create_flags)
30413 return ERR_PTR(-EINVAL);
30414
30415 - atomic_inc(&qps_created);
30416 + atomic_inc_unchecked(&qps_created);
30417 switch (init_attr->qp_type) {
30418 case IB_QPT_RC:
30419 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
30420 @@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *
30421 struct iw_cm_event cm_event;
30422 int ret;
30423
30424 - atomic_inc(&sw_qps_destroyed);
30425 + atomic_inc_unchecked(&sw_qps_destroyed);
30426 nesqp->destroyed = 1;
30427
30428 /* Blow away the connection if it exists. */
30429 diff -urNp linux-2.6.32.43/drivers/input/gameport/gameport.c linux-2.6.32.43/drivers/input/gameport/gameport.c
30430 --- linux-2.6.32.43/drivers/input/gameport/gameport.c 2011-03-27 14:31:47.000000000 -0400
30431 +++ linux-2.6.32.43/drivers/input/gameport/gameport.c 2011-05-04 17:56:28.000000000 -0400
30432 @@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
30433 */
30434 static void gameport_init_port(struct gameport *gameport)
30435 {
30436 - static atomic_t gameport_no = ATOMIC_INIT(0);
30437 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30438
30439 __module_get(THIS_MODULE);
30440
30441 mutex_init(&gameport->drv_mutex);
30442 device_initialize(&gameport->dev);
30443 - dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
30444 + dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30445 gameport->dev.bus = &gameport_bus;
30446 gameport->dev.release = gameport_release_port;
30447 if (gameport->parent)
30448 diff -urNp linux-2.6.32.43/drivers/input/input.c linux-2.6.32.43/drivers/input/input.c
30449 --- linux-2.6.32.43/drivers/input/input.c 2011-03-27 14:31:47.000000000 -0400
30450 +++ linux-2.6.32.43/drivers/input/input.c 2011-05-04 17:56:28.000000000 -0400
30451 @@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
30452 */
30453 int input_register_device(struct input_dev *dev)
30454 {
30455 - static atomic_t input_no = ATOMIC_INIT(0);
30456 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30457 struct input_handler *handler;
30458 const char *path;
30459 int error;
30460 @@ -1585,7 +1585,7 @@ int input_register_device(struct input_d
30461 dev->setkeycode = input_default_setkeycode;
30462
30463 dev_set_name(&dev->dev, "input%ld",
30464 - (unsigned long) atomic_inc_return(&input_no) - 1);
30465 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
30466
30467 error = device_add(&dev->dev);
30468 if (error)
30469 diff -urNp linux-2.6.32.43/drivers/input/joystick/sidewinder.c linux-2.6.32.43/drivers/input/joystick/sidewinder.c
30470 --- linux-2.6.32.43/drivers/input/joystick/sidewinder.c 2011-03-27 14:31:47.000000000 -0400
30471 +++ linux-2.6.32.43/drivers/input/joystick/sidewinder.c 2011-05-18 20:09:36.000000000 -0400
30472 @@ -30,6 +30,7 @@
30473 #include <linux/kernel.h>
30474 #include <linux/module.h>
30475 #include <linux/slab.h>
30476 +#include <linux/sched.h>
30477 #include <linux/init.h>
30478 #include <linux/input.h>
30479 #include <linux/gameport.h>
30480 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
30481 unsigned char buf[SW_LENGTH];
30482 int i;
30483
30484 + pax_track_stack();
30485 +
30486 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
30487
30488 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
30489 diff -urNp linux-2.6.32.43/drivers/input/joystick/xpad.c linux-2.6.32.43/drivers/input/joystick/xpad.c
30490 --- linux-2.6.32.43/drivers/input/joystick/xpad.c 2011-03-27 14:31:47.000000000 -0400
30491 +++ linux-2.6.32.43/drivers/input/joystick/xpad.c 2011-05-04 17:56:28.000000000 -0400
30492 @@ -621,7 +621,7 @@ static void xpad_led_set(struct led_clas
30493
30494 static int xpad_led_probe(struct usb_xpad *xpad)
30495 {
30496 - static atomic_t led_seq = ATOMIC_INIT(0);
30497 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
30498 long led_no;
30499 struct xpad_led *led;
30500 struct led_classdev *led_cdev;
30501 @@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpa
30502 if (!led)
30503 return -ENOMEM;
30504
30505 - led_no = (long)atomic_inc_return(&led_seq) - 1;
30506 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
30507
30508 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
30509 led->xpad = xpad;
30510 diff -urNp linux-2.6.32.43/drivers/input/serio/serio.c linux-2.6.32.43/drivers/input/serio/serio.c
30511 --- linux-2.6.32.43/drivers/input/serio/serio.c 2011-03-27 14:31:47.000000000 -0400
30512 +++ linux-2.6.32.43/drivers/input/serio/serio.c 2011-05-04 17:56:28.000000000 -0400
30513 @@ -527,7 +527,7 @@ static void serio_release_port(struct de
30514 */
30515 static void serio_init_port(struct serio *serio)
30516 {
30517 - static atomic_t serio_no = ATOMIC_INIT(0);
30518 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
30519
30520 __module_get(THIS_MODULE);
30521
30522 @@ -536,7 +536,7 @@ static void serio_init_port(struct serio
30523 mutex_init(&serio->drv_mutex);
30524 device_initialize(&serio->dev);
30525 dev_set_name(&serio->dev, "serio%ld",
30526 - (long)atomic_inc_return(&serio_no) - 1);
30527 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
30528 serio->dev.bus = &serio_bus;
30529 serio->dev.release = serio_release_port;
30530 if (serio->parent) {
30531 diff -urNp linux-2.6.32.43/drivers/isdn/gigaset/common.c linux-2.6.32.43/drivers/isdn/gigaset/common.c
30532 --- linux-2.6.32.43/drivers/isdn/gigaset/common.c 2011-03-27 14:31:47.000000000 -0400
30533 +++ linux-2.6.32.43/drivers/isdn/gigaset/common.c 2011-04-17 15:56:46.000000000 -0400
30534 @@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct
30535 cs->commands_pending = 0;
30536 cs->cur_at_seq = 0;
30537 cs->gotfwver = -1;
30538 - cs->open_count = 0;
30539 + local_set(&cs->open_count, 0);
30540 cs->dev = NULL;
30541 cs->tty = NULL;
30542 cs->tty_dev = NULL;
30543 diff -urNp linux-2.6.32.43/drivers/isdn/gigaset/gigaset.h linux-2.6.32.43/drivers/isdn/gigaset/gigaset.h
30544 --- linux-2.6.32.43/drivers/isdn/gigaset/gigaset.h 2011-03-27 14:31:47.000000000 -0400
30545 +++ linux-2.6.32.43/drivers/isdn/gigaset/gigaset.h 2011-04-17 15:56:46.000000000 -0400
30546 @@ -34,6 +34,7 @@
30547 #include <linux/tty_driver.h>
30548 #include <linux/list.h>
30549 #include <asm/atomic.h>
30550 +#include <asm/local.h>
30551
30552 #define GIG_VERSION {0,5,0,0}
30553 #define GIG_COMPAT {0,4,0,0}
30554 @@ -446,7 +447,7 @@ struct cardstate {
30555 spinlock_t cmdlock;
30556 unsigned curlen, cmdbytes;
30557
30558 - unsigned open_count;
30559 + local_t open_count;
30560 struct tty_struct *tty;
30561 struct tasklet_struct if_wake_tasklet;
30562 unsigned control_state;
30563 diff -urNp linux-2.6.32.43/drivers/isdn/gigaset/interface.c linux-2.6.32.43/drivers/isdn/gigaset/interface.c
30564 --- linux-2.6.32.43/drivers/isdn/gigaset/interface.c 2011-03-27 14:31:47.000000000 -0400
30565 +++ linux-2.6.32.43/drivers/isdn/gigaset/interface.c 2011-04-17 15:56:46.000000000 -0400
30566 @@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tt
30567 return -ERESTARTSYS; // FIXME -EINTR?
30568 tty->driver_data = cs;
30569
30570 - ++cs->open_count;
30571 -
30572 - if (cs->open_count == 1) {
30573 + if (local_inc_return(&cs->open_count) == 1) {
30574 spin_lock_irqsave(&cs->lock, flags);
30575 cs->tty = tty;
30576 spin_unlock_irqrestore(&cs->lock, flags);
30577 @@ -195,10 +193,10 @@ static void if_close(struct tty_struct *
30578
30579 if (!cs->connected)
30580 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30581 - else if (!cs->open_count)
30582 + else if (!local_read(&cs->open_count))
30583 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30584 else {
30585 - if (!--cs->open_count) {
30586 + if (!local_dec_return(&cs->open_count)) {
30587 spin_lock_irqsave(&cs->lock, flags);
30588 cs->tty = NULL;
30589 spin_unlock_irqrestore(&cs->lock, flags);
30590 @@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *t
30591 if (!cs->connected) {
30592 gig_dbg(DEBUG_IF, "not connected");
30593 retval = -ENODEV;
30594 - } else if (!cs->open_count)
30595 + } else if (!local_read(&cs->open_count))
30596 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30597 else {
30598 retval = 0;
30599 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *t
30600 if (!cs->connected) {
30601 gig_dbg(DEBUG_IF, "not connected");
30602 retval = -ENODEV;
30603 - } else if (!cs->open_count)
30604 + } else if (!local_read(&cs->open_count))
30605 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30606 else if (cs->mstate != MS_LOCKED) {
30607 dev_warn(cs->dev, "can't write to unlocked device\n");
30608 @@ -395,7 +393,7 @@ static int if_write_room(struct tty_stru
30609 if (!cs->connected) {
30610 gig_dbg(DEBUG_IF, "not connected");
30611 retval = -ENODEV;
30612 - } else if (!cs->open_count)
30613 + } else if (!local_read(&cs->open_count))
30614 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30615 else if (cs->mstate != MS_LOCKED) {
30616 dev_warn(cs->dev, "can't write to unlocked device\n");
30617 @@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty
30618
30619 if (!cs->connected)
30620 gig_dbg(DEBUG_IF, "not connected");
30621 - else if (!cs->open_count)
30622 + else if (!local_read(&cs->open_count))
30623 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30624 else if (cs->mstate != MS_LOCKED)
30625 dev_warn(cs->dev, "can't write to unlocked device\n");
30626 @@ -453,7 +451,7 @@ static void if_throttle(struct tty_struc
30627
30628 if (!cs->connected)
30629 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30630 - else if (!cs->open_count)
30631 + else if (!local_read(&cs->open_count))
30632 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30633 else {
30634 //FIXME
30635 @@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_str
30636
30637 if (!cs->connected)
30638 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30639 - else if (!cs->open_count)
30640 + else if (!local_read(&cs->open_count))
30641 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30642 else {
30643 //FIXME
30644 @@ -510,7 +508,7 @@ static void if_set_termios(struct tty_st
30645 goto out;
30646 }
30647
30648 - if (!cs->open_count) {
30649 + if (!local_read(&cs->open_count)) {
30650 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30651 goto out;
30652 }
30653 diff -urNp linux-2.6.32.43/drivers/isdn/hardware/avm/b1.c linux-2.6.32.43/drivers/isdn/hardware/avm/b1.c
30654 --- linux-2.6.32.43/drivers/isdn/hardware/avm/b1.c 2011-03-27 14:31:47.000000000 -0400
30655 +++ linux-2.6.32.43/drivers/isdn/hardware/avm/b1.c 2011-04-17 15:56:46.000000000 -0400
30656 @@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capilo
30657 }
30658 if (left) {
30659 if (t4file->user) {
30660 - if (copy_from_user(buf, dp, left))
30661 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30662 return -EFAULT;
30663 } else {
30664 memcpy(buf, dp, left);
30665 @@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capilo
30666 }
30667 if (left) {
30668 if (config->user) {
30669 - if (copy_from_user(buf, dp, left))
30670 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30671 return -EFAULT;
30672 } else {
30673 memcpy(buf, dp, left);
30674 diff -urNp linux-2.6.32.43/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.32.43/drivers/isdn/hardware/eicon/capidtmf.c
30675 --- linux-2.6.32.43/drivers/isdn/hardware/eicon/capidtmf.c 2011-03-27 14:31:47.000000000 -0400
30676 +++ linux-2.6.32.43/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-16 21:46:57.000000000 -0400
30677 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
30678 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
30679 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
30680
30681 + pax_track_stack();
30682
30683 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
30684 {
30685 diff -urNp linux-2.6.32.43/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.32.43/drivers/isdn/hardware/eicon/capifunc.c
30686 --- linux-2.6.32.43/drivers/isdn/hardware/eicon/capifunc.c 2011-03-27 14:31:47.000000000 -0400
30687 +++ linux-2.6.32.43/drivers/isdn/hardware/eicon/capifunc.c 2011-05-16 21:46:57.000000000 -0400
30688 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
30689 IDI_SYNC_REQ req;
30690 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30691
30692 + pax_track_stack();
30693 +
30694 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30695
30696 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30697 diff -urNp linux-2.6.32.43/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.32.43/drivers/isdn/hardware/eicon/diddfunc.c
30698 --- linux-2.6.32.43/drivers/isdn/hardware/eicon/diddfunc.c 2011-03-27 14:31:47.000000000 -0400
30699 +++ linux-2.6.32.43/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-16 21:46:57.000000000 -0400
30700 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
30701 IDI_SYNC_REQ req;
30702 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30703
30704 + pax_track_stack();
30705 +
30706 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30707
30708 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30709 diff -urNp linux-2.6.32.43/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.32.43/drivers/isdn/hardware/eicon/divasfunc.c
30710 --- linux-2.6.32.43/drivers/isdn/hardware/eicon/divasfunc.c 2011-03-27 14:31:47.000000000 -0400
30711 +++ linux-2.6.32.43/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-16 21:46:57.000000000 -0400
30712 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
30713 IDI_SYNC_REQ req;
30714 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30715
30716 + pax_track_stack();
30717 +
30718 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30719
30720 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30721 diff -urNp linux-2.6.32.43/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.32.43/drivers/isdn/hardware/eicon/idifunc.c
30722 --- linux-2.6.32.43/drivers/isdn/hardware/eicon/idifunc.c 2011-03-27 14:31:47.000000000 -0400
30723 +++ linux-2.6.32.43/drivers/isdn/hardware/eicon/idifunc.c 2011-05-16 21:46:57.000000000 -0400
30724 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
30725 IDI_SYNC_REQ req;
30726 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30727
30728 + pax_track_stack();
30729 +
30730 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30731
30732 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30733 diff -urNp linux-2.6.32.43/drivers/isdn/hardware/eicon/message.c linux-2.6.32.43/drivers/isdn/hardware/eicon/message.c
30734 --- linux-2.6.32.43/drivers/isdn/hardware/eicon/message.c 2011-03-27 14:31:47.000000000 -0400
30735 +++ linux-2.6.32.43/drivers/isdn/hardware/eicon/message.c 2011-05-16 21:46:57.000000000 -0400
30736 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
30737 dword d;
30738 word w;
30739
30740 + pax_track_stack();
30741 +
30742 a = plci->adapter;
30743 Id = ((word)plci->Id<<8)|a->Id;
30744 PUT_WORD(&SS_Ind[4],0x0000);
30745 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
30746 word j, n, w;
30747 dword d;
30748
30749 + pax_track_stack();
30750 +
30751
30752 for(i=0;i<8;i++) bp_parms[i].length = 0;
30753 for(i=0;i<2;i++) global_config[i].length = 0;
30754 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
30755 const byte llc3[] = {4,3,2,2,6,6,0};
30756 const byte header[] = {0,2,3,3,0,0,0};
30757
30758 + pax_track_stack();
30759 +
30760 for(i=0;i<8;i++) bp_parms[i].length = 0;
30761 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
30762 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
30763 @@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI
30764 word appl_number_group_type[MAX_APPL];
30765 PLCI *auxplci;
30766
30767 + pax_track_stack();
30768 +
30769 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
30770
30771 if(!a->group_optimization_enabled)
30772 diff -urNp linux-2.6.32.43/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.32.43/drivers/isdn/hardware/eicon/mntfunc.c
30773 --- linux-2.6.32.43/drivers/isdn/hardware/eicon/mntfunc.c 2011-03-27 14:31:47.000000000 -0400
30774 +++ linux-2.6.32.43/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-16 21:46:57.000000000 -0400
30775 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
30776 IDI_SYNC_REQ req;
30777 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30778
30779 + pax_track_stack();
30780 +
30781 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30782
30783 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30784 diff -urNp linux-2.6.32.43/drivers/isdn/i4l/isdn_common.c linux-2.6.32.43/drivers/isdn/i4l/isdn_common.c
30785 --- linux-2.6.32.43/drivers/isdn/i4l/isdn_common.c 2011-03-27 14:31:47.000000000 -0400
30786 +++ linux-2.6.32.43/drivers/isdn/i4l/isdn_common.c 2011-05-16 21:46:57.000000000 -0400
30787 @@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct f
30788 } iocpar;
30789 void __user *argp = (void __user *)arg;
30790
30791 + pax_track_stack();
30792 +
30793 #define name iocpar.name
30794 #define bname iocpar.bname
30795 #define iocts iocpar.iocts
30796 diff -urNp linux-2.6.32.43/drivers/isdn/icn/icn.c linux-2.6.32.43/drivers/isdn/icn/icn.c
30797 --- linux-2.6.32.43/drivers/isdn/icn/icn.c 2011-03-27 14:31:47.000000000 -0400
30798 +++ linux-2.6.32.43/drivers/isdn/icn/icn.c 2011-04-17 15:56:46.000000000 -0400
30799 @@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len
30800 if (count > len)
30801 count = len;
30802 if (user) {
30803 - if (copy_from_user(msg, buf, count))
30804 + if (count > sizeof msg || copy_from_user(msg, buf, count))
30805 return -EFAULT;
30806 } else
30807 memcpy(msg, buf, count);
30808 diff -urNp linux-2.6.32.43/drivers/isdn/mISDN/socket.c linux-2.6.32.43/drivers/isdn/mISDN/socket.c
30809 --- linux-2.6.32.43/drivers/isdn/mISDN/socket.c 2011-03-27 14:31:47.000000000 -0400
30810 +++ linux-2.6.32.43/drivers/isdn/mISDN/socket.c 2011-04-17 15:56:46.000000000 -0400
30811 @@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, uns
30812 if (dev) {
30813 struct mISDN_devinfo di;
30814
30815 + memset(&di, 0, sizeof(di));
30816 di.id = dev->id;
30817 di.Dprotocols = dev->Dprotocols;
30818 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
30819 @@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, uns
30820 if (dev) {
30821 struct mISDN_devinfo di;
30822
30823 + memset(&di, 0, sizeof(di));
30824 di.id = dev->id;
30825 di.Dprotocols = dev->Dprotocols;
30826 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
30827 diff -urNp linux-2.6.32.43/drivers/isdn/sc/interrupt.c linux-2.6.32.43/drivers/isdn/sc/interrupt.c
30828 --- linux-2.6.32.43/drivers/isdn/sc/interrupt.c 2011-03-27 14:31:47.000000000 -0400
30829 +++ linux-2.6.32.43/drivers/isdn/sc/interrupt.c 2011-04-17 15:56:46.000000000 -0400
30830 @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy,
30831 }
30832 else if(callid>=0x0000 && callid<=0x7FFF)
30833 {
30834 + int len;
30835 +
30836 pr_debug("%s: Got Incoming Call\n",
30837 sc_adapter[card]->devicename);
30838 - strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
30839 - strcpy(setup.eazmsn,
30840 - sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
30841 + len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
30842 + sizeof(setup.phone));
30843 + if (len >= sizeof(setup.phone))
30844 + continue;
30845 + len = strlcpy(setup.eazmsn,
30846 + sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
30847 + sizeof(setup.eazmsn));
30848 + if (len >= sizeof(setup.eazmsn))
30849 + continue;
30850 setup.si1 = 7;
30851 setup.si2 = 0;
30852 setup.plan = 0;
30853 @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy,
30854 * Handle a GetMyNumber Rsp
30855 */
30856 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
30857 - strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
30858 + strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
30859 + rcvmsg.msg_data.byte_array,
30860 + sizeof(rcvmsg.msg_data.byte_array));
30861 continue;
30862 }
30863
30864 diff -urNp linux-2.6.32.43/drivers/lguest/core.c linux-2.6.32.43/drivers/lguest/core.c
30865 --- linux-2.6.32.43/drivers/lguest/core.c 2011-03-27 14:31:47.000000000 -0400
30866 +++ linux-2.6.32.43/drivers/lguest/core.c 2011-04-17 15:56:46.000000000 -0400
30867 @@ -91,9 +91,17 @@ static __init int map_switcher(void)
30868 * it's worked so far. The end address needs +1 because __get_vm_area
30869 * allocates an extra guard page, so we need space for that.
30870 */
30871 +
30872 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30873 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30874 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
30875 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30876 +#else
30877 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30878 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
30879 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30880 +#endif
30881 +
30882 if (!switcher_vma) {
30883 err = -ENOMEM;
30884 printk("lguest: could not map switcher pages high\n");
30885 @@ -118,7 +126,7 @@ static __init int map_switcher(void)
30886 * Now the Switcher is mapped at the right address, we can't fail!
30887 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
30888 */
30889 - memcpy(switcher_vma->addr, start_switcher_text,
30890 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
30891 end_switcher_text - start_switcher_text);
30892
30893 printk(KERN_INFO "lguest: mapped switcher at %p\n",
30894 diff -urNp linux-2.6.32.43/drivers/lguest/x86/core.c linux-2.6.32.43/drivers/lguest/x86/core.c
30895 --- linux-2.6.32.43/drivers/lguest/x86/core.c 2011-03-27 14:31:47.000000000 -0400
30896 +++ linux-2.6.32.43/drivers/lguest/x86/core.c 2011-04-17 15:56:46.000000000 -0400
30897 @@ -59,7 +59,7 @@ static struct {
30898 /* Offset from where switcher.S was compiled to where we've copied it */
30899 static unsigned long switcher_offset(void)
30900 {
30901 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
30902 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
30903 }
30904
30905 /* This cpu's struct lguest_pages. */
30906 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
30907 * These copies are pretty cheap, so we do them unconditionally: */
30908 /* Save the current Host top-level page directory.
30909 */
30910 +
30911 +#ifdef CONFIG_PAX_PER_CPU_PGD
30912 + pages->state.host_cr3 = read_cr3();
30913 +#else
30914 pages->state.host_cr3 = __pa(current->mm->pgd);
30915 +#endif
30916 +
30917 /*
30918 * Set up the Guest's page tables to see this CPU's pages (and no
30919 * other CPU's pages).
30920 @@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
30921 * compiled-in switcher code and the high-mapped copy we just made.
30922 */
30923 for (i = 0; i < IDT_ENTRIES; i++)
30924 - default_idt_entries[i] += switcher_offset();
30925 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
30926
30927 /*
30928 * Set up the Switcher's per-cpu areas.
30929 @@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
30930 * it will be undisturbed when we switch. To change %cs and jump we
30931 * need this structure to feed to Intel's "lcall" instruction.
30932 */
30933 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
30934 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
30935 lguest_entry.segment = LGUEST_CS;
30936
30937 /*
30938 diff -urNp linux-2.6.32.43/drivers/lguest/x86/switcher_32.S linux-2.6.32.43/drivers/lguest/x86/switcher_32.S
30939 --- linux-2.6.32.43/drivers/lguest/x86/switcher_32.S 2011-03-27 14:31:47.000000000 -0400
30940 +++ linux-2.6.32.43/drivers/lguest/x86/switcher_32.S 2011-04-17 15:56:46.000000000 -0400
30941 @@ -87,6 +87,7 @@
30942 #include <asm/page.h>
30943 #include <asm/segment.h>
30944 #include <asm/lguest.h>
30945 +#include <asm/processor-flags.h>
30946
30947 // We mark the start of the code to copy
30948 // It's placed in .text tho it's never run here
30949 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
30950 // Changes type when we load it: damn Intel!
30951 // For after we switch over our page tables
30952 // That entry will be read-only: we'd crash.
30953 +
30954 +#ifdef CONFIG_PAX_KERNEXEC
30955 + mov %cr0, %edx
30956 + xor $X86_CR0_WP, %edx
30957 + mov %edx, %cr0
30958 +#endif
30959 +
30960 movl $(GDT_ENTRY_TSS*8), %edx
30961 ltr %dx
30962
30963 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
30964 // Let's clear it again for our return.
30965 // The GDT descriptor of the Host
30966 // Points to the table after two "size" bytes
30967 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
30968 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
30969 // Clear "used" from type field (byte 5, bit 2)
30970 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
30971 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
30972 +
30973 +#ifdef CONFIG_PAX_KERNEXEC
30974 + mov %cr0, %eax
30975 + xor $X86_CR0_WP, %eax
30976 + mov %eax, %cr0
30977 +#endif
30978
30979 // Once our page table's switched, the Guest is live!
30980 // The Host fades as we run this final step.
30981 @@ -295,13 +309,12 @@ deliver_to_host:
30982 // I consulted gcc, and it gave
30983 // These instructions, which I gladly credit:
30984 leal (%edx,%ebx,8), %eax
30985 - movzwl (%eax),%edx
30986 - movl 4(%eax), %eax
30987 - xorw %ax, %ax
30988 - orl %eax, %edx
30989 + movl 4(%eax), %edx
30990 + movw (%eax), %dx
30991 // Now the address of the handler's in %edx
30992 // We call it now: its "iret" drops us home.
30993 - jmp *%edx
30994 + ljmp $__KERNEL_CS, $1f
30995 +1: jmp *%edx
30996
30997 // Every interrupt can come to us here
30998 // But we must truly tell each apart.
30999 diff -urNp linux-2.6.32.43/drivers/macintosh/via-pmu-backlight.c linux-2.6.32.43/drivers/macintosh/via-pmu-backlight.c
31000 --- linux-2.6.32.43/drivers/macintosh/via-pmu-backlight.c 2011-03-27 14:31:47.000000000 -0400
31001 +++ linux-2.6.32.43/drivers/macintosh/via-pmu-backlight.c 2011-04-17 15:56:46.000000000 -0400
31002 @@ -15,7 +15,7 @@
31003
31004 #define MAX_PMU_LEVEL 0xFF
31005
31006 -static struct backlight_ops pmu_backlight_data;
31007 +static const struct backlight_ops pmu_backlight_data;
31008 static DEFINE_SPINLOCK(pmu_backlight_lock);
31009 static int sleeping, uses_pmu_bl;
31010 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
31011 @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
31012 return bd->props.brightness;
31013 }
31014
31015 -static struct backlight_ops pmu_backlight_data = {
31016 +static const struct backlight_ops pmu_backlight_data = {
31017 .get_brightness = pmu_backlight_get_brightness,
31018 .update_status = pmu_backlight_update_status,
31019
31020 diff -urNp linux-2.6.32.43/drivers/macintosh/via-pmu.c linux-2.6.32.43/drivers/macintosh/via-pmu.c
31021 --- linux-2.6.32.43/drivers/macintosh/via-pmu.c 2011-03-27 14:31:47.000000000 -0400
31022 +++ linux-2.6.32.43/drivers/macintosh/via-pmu.c 2011-04-17 15:56:46.000000000 -0400
31023 @@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state
31024 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
31025 }
31026
31027 -static struct platform_suspend_ops pmu_pm_ops = {
31028 +static const struct platform_suspend_ops pmu_pm_ops = {
31029 .enter = powerbook_sleep,
31030 .valid = pmu_sleep_valid,
31031 };
31032 diff -urNp linux-2.6.32.43/drivers/md/dm.c linux-2.6.32.43/drivers/md/dm.c
31033 --- linux-2.6.32.43/drivers/md/dm.c 2011-03-27 14:31:47.000000000 -0400
31034 +++ linux-2.6.32.43/drivers/md/dm.c 2011-05-04 17:56:28.000000000 -0400
31035 @@ -163,9 +163,9 @@ struct mapped_device {
31036 /*
31037 * Event handling.
31038 */
31039 - atomic_t event_nr;
31040 + atomic_unchecked_t event_nr;
31041 wait_queue_head_t eventq;
31042 - atomic_t uevent_seq;
31043 + atomic_unchecked_t uevent_seq;
31044 struct list_head uevent_list;
31045 spinlock_t uevent_lock; /* Protect access to uevent_list */
31046
31047 @@ -1770,8 +1770,8 @@ static struct mapped_device *alloc_dev(i
31048 rwlock_init(&md->map_lock);
31049 atomic_set(&md->holders, 1);
31050 atomic_set(&md->open_count, 0);
31051 - atomic_set(&md->event_nr, 0);
31052 - atomic_set(&md->uevent_seq, 0);
31053 + atomic_set_unchecked(&md->event_nr, 0);
31054 + atomic_set_unchecked(&md->uevent_seq, 0);
31055 INIT_LIST_HEAD(&md->uevent_list);
31056 spin_lock_init(&md->uevent_lock);
31057
31058 @@ -1921,7 +1921,7 @@ static void event_callback(void *context
31059
31060 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31061
31062 - atomic_inc(&md->event_nr);
31063 + atomic_inc_unchecked(&md->event_nr);
31064 wake_up(&md->eventq);
31065 }
31066
31067 @@ -2556,18 +2556,18 @@ void dm_kobject_uevent(struct mapped_dev
31068
31069 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31070 {
31071 - return atomic_add_return(1, &md->uevent_seq);
31072 + return atomic_add_return_unchecked(1, &md->uevent_seq);
31073 }
31074
31075 uint32_t dm_get_event_nr(struct mapped_device *md)
31076 {
31077 - return atomic_read(&md->event_nr);
31078 + return atomic_read_unchecked(&md->event_nr);
31079 }
31080
31081 int dm_wait_event(struct mapped_device *md, int event_nr)
31082 {
31083 return wait_event_interruptible(md->eventq,
31084 - (event_nr != atomic_read(&md->event_nr)));
31085 + (event_nr != atomic_read_unchecked(&md->event_nr)));
31086 }
31087
31088 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31089 diff -urNp linux-2.6.32.43/drivers/md/dm-ioctl.c linux-2.6.32.43/drivers/md/dm-ioctl.c
31090 --- linux-2.6.32.43/drivers/md/dm-ioctl.c 2011-03-27 14:31:47.000000000 -0400
31091 +++ linux-2.6.32.43/drivers/md/dm-ioctl.c 2011-04-17 15:56:46.000000000 -0400
31092 @@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, str
31093 cmd == DM_LIST_VERSIONS_CMD)
31094 return 0;
31095
31096 - if ((cmd == DM_DEV_CREATE_CMD)) {
31097 + if (cmd == DM_DEV_CREATE_CMD) {
31098 if (!*param->name) {
31099 DMWARN("name not supplied when creating device");
31100 return -EINVAL;
31101 diff -urNp linux-2.6.32.43/drivers/md/dm-raid1.c linux-2.6.32.43/drivers/md/dm-raid1.c
31102 --- linux-2.6.32.43/drivers/md/dm-raid1.c 2011-03-27 14:31:47.000000000 -0400
31103 +++ linux-2.6.32.43/drivers/md/dm-raid1.c 2011-05-04 17:56:28.000000000 -0400
31104 @@ -41,7 +41,7 @@ enum dm_raid1_error {
31105
31106 struct mirror {
31107 struct mirror_set *ms;
31108 - atomic_t error_count;
31109 + atomic_unchecked_t error_count;
31110 unsigned long error_type;
31111 struct dm_dev *dev;
31112 sector_t offset;
31113 @@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m
31114 * simple way to tell if a device has encountered
31115 * errors.
31116 */
31117 - atomic_inc(&m->error_count);
31118 + atomic_inc_unchecked(&m->error_count);
31119
31120 if (test_and_set_bit(error_type, &m->error_type))
31121 return;
31122 @@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m
31123 }
31124
31125 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
31126 - if (!atomic_read(&new->error_count)) {
31127 + if (!atomic_read_unchecked(&new->error_count)) {
31128 set_default_mirror(new);
31129 break;
31130 }
31131 @@ -363,7 +363,7 @@ static struct mirror *choose_mirror(stru
31132 struct mirror *m = get_default_mirror(ms);
31133
31134 do {
31135 - if (likely(!atomic_read(&m->error_count)))
31136 + if (likely(!atomic_read_unchecked(&m->error_count)))
31137 return m;
31138
31139 if (m-- == ms->mirror)
31140 @@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
31141 {
31142 struct mirror *default_mirror = get_default_mirror(m->ms);
31143
31144 - return !atomic_read(&default_mirror->error_count);
31145 + return !atomic_read_unchecked(&default_mirror->error_count);
31146 }
31147
31148 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31149 @@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *
31150 */
31151 if (likely(region_in_sync(ms, region, 1)))
31152 m = choose_mirror(ms, bio->bi_sector);
31153 - else if (m && atomic_read(&m->error_count))
31154 + else if (m && atomic_read_unchecked(&m->error_count))
31155 m = NULL;
31156
31157 if (likely(m))
31158 @@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set
31159 }
31160
31161 ms->mirror[mirror].ms = ms;
31162 - atomic_set(&(ms->mirror[mirror].error_count), 0);
31163 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31164 ms->mirror[mirror].error_type = 0;
31165 ms->mirror[mirror].offset = offset;
31166
31167 @@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_targ
31168 */
31169 static char device_status_char(struct mirror *m)
31170 {
31171 - if (!atomic_read(&(m->error_count)))
31172 + if (!atomic_read_unchecked(&(m->error_count)))
31173 return 'A';
31174
31175 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
31176 diff -urNp linux-2.6.32.43/drivers/md/dm-stripe.c linux-2.6.32.43/drivers/md/dm-stripe.c
31177 --- linux-2.6.32.43/drivers/md/dm-stripe.c 2011-03-27 14:31:47.000000000 -0400
31178 +++ linux-2.6.32.43/drivers/md/dm-stripe.c 2011-05-04 17:56:28.000000000 -0400
31179 @@ -20,7 +20,7 @@ struct stripe {
31180 struct dm_dev *dev;
31181 sector_t physical_start;
31182
31183 - atomic_t error_count;
31184 + atomic_unchecked_t error_count;
31185 };
31186
31187 struct stripe_c {
31188 @@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *
31189 kfree(sc);
31190 return r;
31191 }
31192 - atomic_set(&(sc->stripe[i].error_count), 0);
31193 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31194 }
31195
31196 ti->private = sc;
31197 @@ -257,7 +257,7 @@ static int stripe_status(struct dm_targe
31198 DMEMIT("%d ", sc->stripes);
31199 for (i = 0; i < sc->stripes; i++) {
31200 DMEMIT("%s ", sc->stripe[i].dev->name);
31201 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31202 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31203 'D' : 'A';
31204 }
31205 buffer[i] = '\0';
31206 @@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_targe
31207 */
31208 for (i = 0; i < sc->stripes; i++)
31209 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31210 - atomic_inc(&(sc->stripe[i].error_count));
31211 - if (atomic_read(&(sc->stripe[i].error_count)) <
31212 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
31213 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31214 DM_IO_ERROR_THRESHOLD)
31215 queue_work(kstriped, &sc->kstriped_ws);
31216 }
31217 diff -urNp linux-2.6.32.43/drivers/md/dm-sysfs.c linux-2.6.32.43/drivers/md/dm-sysfs.c
31218 --- linux-2.6.32.43/drivers/md/dm-sysfs.c 2011-03-27 14:31:47.000000000 -0400
31219 +++ linux-2.6.32.43/drivers/md/dm-sysfs.c 2011-04-17 15:56:46.000000000 -0400
31220 @@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
31221 NULL,
31222 };
31223
31224 -static struct sysfs_ops dm_sysfs_ops = {
31225 +static const struct sysfs_ops dm_sysfs_ops = {
31226 .show = dm_attr_show,
31227 };
31228
31229 diff -urNp linux-2.6.32.43/drivers/md/dm-table.c linux-2.6.32.43/drivers/md/dm-table.c
31230 --- linux-2.6.32.43/drivers/md/dm-table.c 2011-06-25 12:55:34.000000000 -0400
31231 +++ linux-2.6.32.43/drivers/md/dm-table.c 2011-06-25 12:56:37.000000000 -0400
31232 @@ -376,7 +376,7 @@ static int device_area_is_invalid(struct
31233 if (!dev_size)
31234 return 0;
31235
31236 - if ((start >= dev_size) || (start + len > dev_size)) {
31237 + if ((start >= dev_size) || (len > dev_size - start)) {
31238 DMWARN("%s: %s too small for target: "
31239 "start=%llu, len=%llu, dev_size=%llu",
31240 dm_device_name(ti->table->md), bdevname(bdev, b),
31241 diff -urNp linux-2.6.32.43/drivers/md/md.c linux-2.6.32.43/drivers/md/md.c
31242 --- linux-2.6.32.43/drivers/md/md.c 2011-07-13 17:23:04.000000000 -0400
31243 +++ linux-2.6.32.43/drivers/md/md.c 2011-07-13 17:23:18.000000000 -0400
31244 @@ -153,10 +153,10 @@ static int start_readonly;
31245 * start build, activate spare
31246 */
31247 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31248 -static atomic_t md_event_count;
31249 +static atomic_unchecked_t md_event_count;
31250 void md_new_event(mddev_t *mddev)
31251 {
31252 - atomic_inc(&md_event_count);
31253 + atomic_inc_unchecked(&md_event_count);
31254 wake_up(&md_event_waiters);
31255 }
31256 EXPORT_SYMBOL_GPL(md_new_event);
31257 @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31258 */
31259 static void md_new_event_inintr(mddev_t *mddev)
31260 {
31261 - atomic_inc(&md_event_count);
31262 + atomic_inc_unchecked(&md_event_count);
31263 wake_up(&md_event_waiters);
31264 }
31265
31266 @@ -1218,7 +1218,7 @@ static int super_1_load(mdk_rdev_t *rdev
31267
31268 rdev->preferred_minor = 0xffff;
31269 rdev->data_offset = le64_to_cpu(sb->data_offset);
31270 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31271 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31272
31273 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31274 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31275 @@ -1392,7 +1392,7 @@ static void super_1_sync(mddev_t *mddev,
31276 else
31277 sb->resync_offset = cpu_to_le64(0);
31278
31279 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31280 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31281
31282 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31283 sb->size = cpu_to_le64(mddev->dev_sectors);
31284 @@ -2214,7 +2214,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
31285 static ssize_t
31286 errors_show(mdk_rdev_t *rdev, char *page)
31287 {
31288 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31289 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31290 }
31291
31292 static ssize_t
31293 @@ -2223,7 +2223,7 @@ errors_store(mdk_rdev_t *rdev, const cha
31294 char *e;
31295 unsigned long n = simple_strtoul(buf, &e, 10);
31296 if (*buf && (*e == 0 || *e == '\n')) {
31297 - atomic_set(&rdev->corrected_errors, n);
31298 + atomic_set_unchecked(&rdev->corrected_errors, n);
31299 return len;
31300 }
31301 return -EINVAL;
31302 @@ -2517,7 +2517,7 @@ static void rdev_free(struct kobject *ko
31303 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
31304 kfree(rdev);
31305 }
31306 -static struct sysfs_ops rdev_sysfs_ops = {
31307 +static const struct sysfs_ops rdev_sysfs_ops = {
31308 .show = rdev_attr_show,
31309 .store = rdev_attr_store,
31310 };
31311 @@ -2566,8 +2566,8 @@ static mdk_rdev_t *md_import_device(dev_
31312 rdev->data_offset = 0;
31313 rdev->sb_events = 0;
31314 atomic_set(&rdev->nr_pending, 0);
31315 - atomic_set(&rdev->read_errors, 0);
31316 - atomic_set(&rdev->corrected_errors, 0);
31317 + atomic_set_unchecked(&rdev->read_errors, 0);
31318 + atomic_set_unchecked(&rdev->corrected_errors, 0);
31319
31320 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
31321 if (!size) {
31322 @@ -3887,7 +3887,7 @@ static void md_free(struct kobject *ko)
31323 kfree(mddev);
31324 }
31325
31326 -static struct sysfs_ops md_sysfs_ops = {
31327 +static const struct sysfs_ops md_sysfs_ops = {
31328 .show = md_attr_show,
31329 .store = md_attr_store,
31330 };
31331 @@ -4474,7 +4474,8 @@ out:
31332 err = 0;
31333 blk_integrity_unregister(disk);
31334 md_new_event(mddev);
31335 - sysfs_notify_dirent(mddev->sysfs_state);
31336 + if (mddev->sysfs_state)
31337 + sysfs_notify_dirent(mddev->sysfs_state);
31338 return err;
31339 }
31340
31341 @@ -5954,7 +5955,7 @@ static int md_seq_show(struct seq_file *
31342
31343 spin_unlock(&pers_lock);
31344 seq_printf(seq, "\n");
31345 - mi->event = atomic_read(&md_event_count);
31346 + mi->event = atomic_read_unchecked(&md_event_count);
31347 return 0;
31348 }
31349 if (v == (void*)2) {
31350 @@ -6043,7 +6044,7 @@ static int md_seq_show(struct seq_file *
31351 chunk_kb ? "KB" : "B");
31352 if (bitmap->file) {
31353 seq_printf(seq, ", file: ");
31354 - seq_path(seq, &bitmap->file->f_path, " \t\n");
31355 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31356 }
31357
31358 seq_printf(seq, "\n");
31359 @@ -6077,7 +6078,7 @@ static int md_seq_open(struct inode *ino
31360 else {
31361 struct seq_file *p = file->private_data;
31362 p->private = mi;
31363 - mi->event = atomic_read(&md_event_count);
31364 + mi->event = atomic_read_unchecked(&md_event_count);
31365 }
31366 return error;
31367 }
31368 @@ -6093,7 +6094,7 @@ static unsigned int mdstat_poll(struct f
31369 /* always allow read */
31370 mask = POLLIN | POLLRDNORM;
31371
31372 - if (mi->event != atomic_read(&md_event_count))
31373 + if (mi->event != atomic_read_unchecked(&md_event_count))
31374 mask |= POLLERR | POLLPRI;
31375 return mask;
31376 }
31377 @@ -6137,7 +6138,7 @@ static int is_mddev_idle(mddev_t *mddev,
31378 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31379 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31380 (int)part_stat_read(&disk->part0, sectors[1]) -
31381 - atomic_read(&disk->sync_io);
31382 + atomic_read_unchecked(&disk->sync_io);
31383 /* sync IO will cause sync_io to increase before the disk_stats
31384 * as sync_io is counted when a request starts, and
31385 * disk_stats is counted when it completes.
31386 diff -urNp linux-2.6.32.43/drivers/md/md.h linux-2.6.32.43/drivers/md/md.h
31387 --- linux-2.6.32.43/drivers/md/md.h 2011-03-27 14:31:47.000000000 -0400
31388 +++ linux-2.6.32.43/drivers/md/md.h 2011-05-04 17:56:20.000000000 -0400
31389 @@ -94,10 +94,10 @@ struct mdk_rdev_s
31390 * only maintained for arrays that
31391 * support hot removal
31392 */
31393 - atomic_t read_errors; /* number of consecutive read errors that
31394 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
31395 * we have tried to ignore.
31396 */
31397 - atomic_t corrected_errors; /* number of corrected read errors,
31398 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31399 * for reporting to userspace and storing
31400 * in superblock.
31401 */
31402 @@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_
31403
31404 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31405 {
31406 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31407 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31408 }
31409
31410 struct mdk_personality
31411 diff -urNp linux-2.6.32.43/drivers/md/raid10.c linux-2.6.32.43/drivers/md/raid10.c
31412 --- linux-2.6.32.43/drivers/md/raid10.c 2011-03-27 14:31:47.000000000 -0400
31413 +++ linux-2.6.32.43/drivers/md/raid10.c 2011-05-04 17:56:28.000000000 -0400
31414 @@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bi
31415 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
31416 set_bit(R10BIO_Uptodate, &r10_bio->state);
31417 else {
31418 - atomic_add(r10_bio->sectors,
31419 + atomic_add_unchecked(r10_bio->sectors,
31420 &conf->mirrors[d].rdev->corrected_errors);
31421 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
31422 md_error(r10_bio->mddev,
31423 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
31424 test_bit(In_sync, &rdev->flags)) {
31425 atomic_inc(&rdev->nr_pending);
31426 rcu_read_unlock();
31427 - atomic_add(s, &rdev->corrected_errors);
31428 + atomic_add_unchecked(s, &rdev->corrected_errors);
31429 if (sync_page_io(rdev->bdev,
31430 r10_bio->devs[sl].addr +
31431 sect + rdev->data_offset,
31432 diff -urNp linux-2.6.32.43/drivers/md/raid1.c linux-2.6.32.43/drivers/md/raid1.c
31433 --- linux-2.6.32.43/drivers/md/raid1.c 2011-03-27 14:31:47.000000000 -0400
31434 +++ linux-2.6.32.43/drivers/md/raid1.c 2011-05-04 17:56:28.000000000 -0400
31435 @@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *
31436 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
31437 continue;
31438 rdev = conf->mirrors[d].rdev;
31439 - atomic_add(s, &rdev->corrected_errors);
31440 + atomic_add_unchecked(s, &rdev->corrected_errors);
31441 if (sync_page_io(rdev->bdev,
31442 sect + rdev->data_offset,
31443 s<<9,
31444 @@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf,
31445 /* Well, this device is dead */
31446 md_error(mddev, rdev);
31447 else {
31448 - atomic_add(s, &rdev->corrected_errors);
31449 + atomic_add_unchecked(s, &rdev->corrected_errors);
31450 printk(KERN_INFO
31451 "raid1:%s: read error corrected "
31452 "(%d sectors at %llu on %s)\n",
31453 diff -urNp linux-2.6.32.43/drivers/md/raid5.c linux-2.6.32.43/drivers/md/raid5.c
31454 --- linux-2.6.32.43/drivers/md/raid5.c 2011-06-25 12:55:34.000000000 -0400
31455 +++ linux-2.6.32.43/drivers/md/raid5.c 2011-06-25 12:58:39.000000000 -0400
31456 @@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_hea
31457 bi->bi_next = NULL;
31458 if ((rw & WRITE) &&
31459 test_bit(R5_ReWrite, &sh->dev[i].flags))
31460 - atomic_add(STRIPE_SECTORS,
31461 + atomic_add_unchecked(STRIPE_SECTORS,
31462 &rdev->corrected_errors);
31463 generic_make_request(bi);
31464 } else {
31465 @@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struc
31466 clear_bit(R5_ReadError, &sh->dev[i].flags);
31467 clear_bit(R5_ReWrite, &sh->dev[i].flags);
31468 }
31469 - if (atomic_read(&conf->disks[i].rdev->read_errors))
31470 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
31471 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
31472 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
31473 } else {
31474 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
31475 int retry = 0;
31476 rdev = conf->disks[i].rdev;
31477
31478 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
31479 - atomic_inc(&rdev->read_errors);
31480 + atomic_inc_unchecked(&rdev->read_errors);
31481 if (conf->mddev->degraded >= conf->max_degraded)
31482 printk_rl(KERN_WARNING
31483 "raid5:%s: read error not correctable "
31484 @@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struc
31485 (unsigned long long)(sh->sector
31486 + rdev->data_offset),
31487 bdn);
31488 - else if (atomic_read(&rdev->read_errors)
31489 + else if (atomic_read_unchecked(&rdev->read_errors)
31490 > conf->max_nr_stripes)
31491 printk(KERN_WARNING
31492 "raid5:%s: Too many read errors, failing device %s.\n",
31493 @@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct s
31494 sector_t r_sector;
31495 struct stripe_head sh2;
31496
31497 + pax_track_stack();
31498
31499 chunk_offset = sector_div(new_sector, sectors_per_chunk);
31500 stripe = new_sector;
31501 diff -urNp linux-2.6.32.43/drivers/media/common/saa7146_hlp.c linux-2.6.32.43/drivers/media/common/saa7146_hlp.c
31502 --- linux-2.6.32.43/drivers/media/common/saa7146_hlp.c 2011-03-27 14:31:47.000000000 -0400
31503 +++ linux-2.6.32.43/drivers/media/common/saa7146_hlp.c 2011-05-16 21:46:57.000000000 -0400
31504 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
31505
31506 int x[32], y[32], w[32], h[32];
31507
31508 + pax_track_stack();
31509 +
31510 /* clear out memory */
31511 memset(&line_list[0], 0x00, sizeof(u32)*32);
31512 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
31513 diff -urNp linux-2.6.32.43/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.32.43/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
31514 --- linux-2.6.32.43/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-03-27 14:31:47.000000000 -0400
31515 +++ linux-2.6.32.43/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-16 21:46:57.000000000 -0400
31516 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
31517 u8 buf[HOST_LINK_BUF_SIZE];
31518 int i;
31519
31520 + pax_track_stack();
31521 +
31522 dprintk("%s\n", __func__);
31523
31524 /* check if we have space for a link buf in the rx_buffer */
31525 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
31526 unsigned long timeout;
31527 int written;
31528
31529 + pax_track_stack();
31530 +
31531 dprintk("%s\n", __func__);
31532
31533 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
31534 diff -urNp linux-2.6.32.43/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.32.43/drivers/media/dvb/dvb-core/dvbdev.c
31535 --- linux-2.6.32.43/drivers/media/dvb/dvb-core/dvbdev.c 2011-03-27 14:31:47.000000000 -0400
31536 +++ linux-2.6.32.43/drivers/media/dvb/dvb-core/dvbdev.c 2011-04-17 15:56:46.000000000 -0400
31537 @@ -191,6 +191,7 @@ int dvb_register_device(struct dvb_adapt
31538 const struct dvb_device *template, void *priv, int type)
31539 {
31540 struct dvb_device *dvbdev;
31541 + /* cannot be const */
31542 struct file_operations *dvbdevfops;
31543 struct device *clsdev;
31544 int minor;
31545 diff -urNp linux-2.6.32.43/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.32.43/drivers/media/dvb/dvb-usb/dib0700_core.c
31546 --- linux-2.6.32.43/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-03-27 14:31:47.000000000 -0400
31547 +++ linux-2.6.32.43/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-16 21:46:57.000000000 -0400
31548 @@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb
31549
31550 u8 buf[260];
31551
31552 + pax_track_stack();
31553 +
31554 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
31555 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
31556
31557 diff -urNp linux-2.6.32.43/drivers/media/dvb/frontends/or51211.c linux-2.6.32.43/drivers/media/dvb/frontends/or51211.c
31558 --- linux-2.6.32.43/drivers/media/dvb/frontends/or51211.c 2011-03-27 14:31:47.000000000 -0400
31559 +++ linux-2.6.32.43/drivers/media/dvb/frontends/or51211.c 2011-05-16 21:46:57.000000000 -0400
31560 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
31561 u8 tudata[585];
31562 int i;
31563
31564 + pax_track_stack();
31565 +
31566 dprintk("Firmware is %zd bytes\n",fw->size);
31567
31568 /* Get eprom data */
31569 diff -urNp linux-2.6.32.43/drivers/media/radio/radio-cadet.c linux-2.6.32.43/drivers/media/radio/radio-cadet.c
31570 --- linux-2.6.32.43/drivers/media/radio/radio-cadet.c 2011-03-27 14:31:47.000000000 -0400
31571 +++ linux-2.6.32.43/drivers/media/radio/radio-cadet.c 2011-04-17 15:56:46.000000000 -0400
31572 @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
31573 while (i < count && dev->rdsin != dev->rdsout)
31574 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
31575
31576 - if (copy_to_user(data, readbuf, i))
31577 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
31578 return -EFAULT;
31579 return i;
31580 }
31581 diff -urNp linux-2.6.32.43/drivers/media/video/cx18/cx18-driver.c linux-2.6.32.43/drivers/media/video/cx18/cx18-driver.c
31582 --- linux-2.6.32.43/drivers/media/video/cx18/cx18-driver.c 2011-03-27 14:31:47.000000000 -0400
31583 +++ linux-2.6.32.43/drivers/media/video/cx18/cx18-driver.c 2011-05-16 21:46:57.000000000 -0400
31584 @@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl
31585
31586 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
31587
31588 -static atomic_t cx18_instance = ATOMIC_INIT(0);
31589 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
31590
31591 /* Parameter declarations */
31592 static int cardtype[CX18_MAX_CARDS];
31593 @@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
31594 struct i2c_client c;
31595 u8 eedata[256];
31596
31597 + pax_track_stack();
31598 +
31599 memset(&c, 0, sizeof(c));
31600 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
31601 c.adapter = &cx->i2c_adap[0];
31602 @@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct p
31603 struct cx18 *cx;
31604
31605 /* FIXME - module parameter arrays constrain max instances */
31606 - i = atomic_inc_return(&cx18_instance) - 1;
31607 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
31608 if (i >= CX18_MAX_CARDS) {
31609 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
31610 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
31611 diff -urNp linux-2.6.32.43/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.32.43/drivers/media/video/ivtv/ivtv-driver.c
31612 --- linux-2.6.32.43/drivers/media/video/ivtv/ivtv-driver.c 2011-03-27 14:31:47.000000000 -0400
31613 +++ linux-2.6.32.43/drivers/media/video/ivtv/ivtv-driver.c 2011-05-04 17:56:28.000000000 -0400
31614 @@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl
31615 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
31616
31617 /* ivtv instance counter */
31618 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
31619 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
31620
31621 /* Parameter declarations */
31622 static int cardtype[IVTV_MAX_CARDS];
31623 diff -urNp linux-2.6.32.43/drivers/media/video/omap24xxcam.c linux-2.6.32.43/drivers/media/video/omap24xxcam.c
31624 --- linux-2.6.32.43/drivers/media/video/omap24xxcam.c 2011-03-27 14:31:47.000000000 -0400
31625 +++ linux-2.6.32.43/drivers/media/video/omap24xxcam.c 2011-05-04 17:56:28.000000000 -0400
31626 @@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(str
31627 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
31628
31629 do_gettimeofday(&vb->ts);
31630 - vb->field_count = atomic_add_return(2, &fh->field_count);
31631 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
31632 if (csr & csr_error) {
31633 vb->state = VIDEOBUF_ERROR;
31634 if (!atomic_read(&fh->cam->in_reset)) {
31635 diff -urNp linux-2.6.32.43/drivers/media/video/omap24xxcam.h linux-2.6.32.43/drivers/media/video/omap24xxcam.h
31636 --- linux-2.6.32.43/drivers/media/video/omap24xxcam.h 2011-03-27 14:31:47.000000000 -0400
31637 +++ linux-2.6.32.43/drivers/media/video/omap24xxcam.h 2011-05-04 17:56:28.000000000 -0400
31638 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
31639 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
31640 struct videobuf_queue vbq;
31641 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
31642 - atomic_t field_count; /* field counter for videobuf_buffer */
31643 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
31644 /* accessing cam here doesn't need serialisation: it's constant */
31645 struct omap24xxcam_device *cam;
31646 };
31647 diff -urNp linux-2.6.32.43/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.32.43/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
31648 --- linux-2.6.32.43/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-03-27 14:31:47.000000000 -0400
31649 +++ linux-2.6.32.43/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-16 21:46:57.000000000 -0400
31650 @@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
31651 u8 *eeprom;
31652 struct tveeprom tvdata;
31653
31654 + pax_track_stack();
31655 +
31656 memset(&tvdata,0,sizeof(tvdata));
31657
31658 eeprom = pvr2_eeprom_fetch(hdw);
31659 diff -urNp linux-2.6.32.43/drivers/media/video/saa7134/saa6752hs.c linux-2.6.32.43/drivers/media/video/saa7134/saa6752hs.c
31660 --- linux-2.6.32.43/drivers/media/video/saa7134/saa6752hs.c 2011-03-27 14:31:47.000000000 -0400
31661 +++ linux-2.6.32.43/drivers/media/video/saa7134/saa6752hs.c 2011-05-16 21:46:57.000000000 -0400
31662 @@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_su
31663 unsigned char localPAT[256];
31664 unsigned char localPMT[256];
31665
31666 + pax_track_stack();
31667 +
31668 /* Set video format - must be done first as it resets other settings */
31669 set_reg8(client, 0x41, h->video_format);
31670
31671 diff -urNp linux-2.6.32.43/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.32.43/drivers/media/video/saa7164/saa7164-cmd.c
31672 --- linux-2.6.32.43/drivers/media/video/saa7164/saa7164-cmd.c 2011-03-27 14:31:47.000000000 -0400
31673 +++ linux-2.6.32.43/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-16 21:46:57.000000000 -0400
31674 @@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_d
31675 wait_queue_head_t *q = 0;
31676 dprintk(DBGLVL_CMD, "%s()\n", __func__);
31677
31678 + pax_track_stack();
31679 +
31680 /* While any outstand message on the bus exists... */
31681 do {
31682
31683 @@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
31684 u8 tmp[512];
31685 dprintk(DBGLVL_CMD, "%s()\n", __func__);
31686
31687 + pax_track_stack();
31688 +
31689 while (loop) {
31690
31691 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
31692 diff -urNp linux-2.6.32.43/drivers/media/video/usbvideo/konicawc.c linux-2.6.32.43/drivers/media/video/usbvideo/konicawc.c
31693 --- linux-2.6.32.43/drivers/media/video/usbvideo/konicawc.c 2011-03-27 14:31:47.000000000 -0400
31694 +++ linux-2.6.32.43/drivers/media/video/usbvideo/konicawc.c 2011-04-17 15:56:46.000000000 -0400
31695 @@ -225,7 +225,7 @@ static void konicawc_register_input(stru
31696 int error;
31697
31698 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
31699 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31700 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31701
31702 cam->input = input_dev = input_allocate_device();
31703 if (!input_dev) {
31704 diff -urNp linux-2.6.32.43/drivers/media/video/usbvideo/quickcam_messenger.c linux-2.6.32.43/drivers/media/video/usbvideo/quickcam_messenger.c
31705 --- linux-2.6.32.43/drivers/media/video/usbvideo/quickcam_messenger.c 2011-03-27 14:31:47.000000000 -0400
31706 +++ linux-2.6.32.43/drivers/media/video/usbvideo/quickcam_messenger.c 2011-04-17 15:56:46.000000000 -0400
31707 @@ -89,7 +89,7 @@ static void qcm_register_input(struct qc
31708 int error;
31709
31710 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
31711 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31712 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31713
31714 cam->input = input_dev = input_allocate_device();
31715 if (!input_dev) {
31716 diff -urNp linux-2.6.32.43/drivers/media/video/usbvision/usbvision-core.c linux-2.6.32.43/drivers/media/video/usbvision/usbvision-core.c
31717 --- linux-2.6.32.43/drivers/media/video/usbvision/usbvision-core.c 2011-03-27 14:31:47.000000000 -0400
31718 +++ linux-2.6.32.43/drivers/media/video/usbvision/usbvision-core.c 2011-05-16 21:46:57.000000000 -0400
31719 @@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_c
31720 unsigned char rv, gv, bv;
31721 static unsigned char *Y, *U, *V;
31722
31723 + pax_track_stack();
31724 +
31725 frame = usbvision->curFrame;
31726 imageSize = frame->frmwidth * frame->frmheight;
31727 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
31728 diff -urNp linux-2.6.32.43/drivers/media/video/v4l2-device.c linux-2.6.32.43/drivers/media/video/v4l2-device.c
31729 --- linux-2.6.32.43/drivers/media/video/v4l2-device.c 2011-03-27 14:31:47.000000000 -0400
31730 +++ linux-2.6.32.43/drivers/media/video/v4l2-device.c 2011-05-04 17:56:28.000000000 -0400
31731 @@ -50,9 +50,9 @@ int v4l2_device_register(struct device *
31732 EXPORT_SYMBOL_GPL(v4l2_device_register);
31733
31734 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
31735 - atomic_t *instance)
31736 + atomic_unchecked_t *instance)
31737 {
31738 - int num = atomic_inc_return(instance) - 1;
31739 + int num = atomic_inc_return_unchecked(instance) - 1;
31740 int len = strlen(basename);
31741
31742 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
31743 diff -urNp linux-2.6.32.43/drivers/media/video/videobuf-dma-sg.c linux-2.6.32.43/drivers/media/video/videobuf-dma-sg.c
31744 --- linux-2.6.32.43/drivers/media/video/videobuf-dma-sg.c 2011-03-27 14:31:47.000000000 -0400
31745 +++ linux-2.6.32.43/drivers/media/video/videobuf-dma-sg.c 2011-05-16 21:46:57.000000000 -0400
31746 @@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
31747 {
31748 struct videobuf_queue q;
31749
31750 + pax_track_stack();
31751 +
31752 /* Required to make generic handler to call __videobuf_alloc */
31753 q.int_ops = &sg_ops;
31754
31755 diff -urNp linux-2.6.32.43/drivers/message/fusion/mptbase.c linux-2.6.32.43/drivers/message/fusion/mptbase.c
31756 --- linux-2.6.32.43/drivers/message/fusion/mptbase.c 2011-03-27 14:31:47.000000000 -0400
31757 +++ linux-2.6.32.43/drivers/message/fusion/mptbase.c 2011-04-17 15:56:46.000000000 -0400
31758 @@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **s
31759 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
31760 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
31761
31762 +#ifdef CONFIG_GRKERNSEC_HIDESYM
31763 + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31764 + NULL, NULL);
31765 +#else
31766 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31767 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
31768 +#endif
31769 +
31770 /*
31771 * Rounding UP to nearest 4-kB boundary here...
31772 */
31773 diff -urNp linux-2.6.32.43/drivers/message/fusion/mptsas.c linux-2.6.32.43/drivers/message/fusion/mptsas.c
31774 --- linux-2.6.32.43/drivers/message/fusion/mptsas.c 2011-03-27 14:31:47.000000000 -0400
31775 +++ linux-2.6.32.43/drivers/message/fusion/mptsas.c 2011-04-17 15:56:46.000000000 -0400
31776 @@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devin
31777 return 0;
31778 }
31779
31780 +static inline void
31781 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31782 +{
31783 + if (phy_info->port_details) {
31784 + phy_info->port_details->rphy = rphy;
31785 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31786 + ioc->name, rphy));
31787 + }
31788 +
31789 + if (rphy) {
31790 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31791 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31792 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31793 + ioc->name, rphy, rphy->dev.release));
31794 + }
31795 +}
31796 +
31797 /* no mutex */
31798 static void
31799 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
31800 @@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
31801 return NULL;
31802 }
31803
31804 -static inline void
31805 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31806 -{
31807 - if (phy_info->port_details) {
31808 - phy_info->port_details->rphy = rphy;
31809 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31810 - ioc->name, rphy));
31811 - }
31812 -
31813 - if (rphy) {
31814 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31815 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31816 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31817 - ioc->name, rphy, rphy->dev.release));
31818 - }
31819 -}
31820 -
31821 static inline struct sas_port *
31822 mptsas_get_port(struct mptsas_phyinfo *phy_info)
31823 {
31824 diff -urNp linux-2.6.32.43/drivers/message/fusion/mptscsih.c linux-2.6.32.43/drivers/message/fusion/mptscsih.c
31825 --- linux-2.6.32.43/drivers/message/fusion/mptscsih.c 2011-03-27 14:31:47.000000000 -0400
31826 +++ linux-2.6.32.43/drivers/message/fusion/mptscsih.c 2011-04-17 15:56:46.000000000 -0400
31827 @@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
31828
31829 h = shost_priv(SChost);
31830
31831 - if (h) {
31832 - if (h->info_kbuf == NULL)
31833 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31834 - return h->info_kbuf;
31835 - h->info_kbuf[0] = '\0';
31836 + if (!h)
31837 + return NULL;
31838
31839 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31840 - h->info_kbuf[size-1] = '\0';
31841 - }
31842 + if (h->info_kbuf == NULL)
31843 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31844 + return h->info_kbuf;
31845 + h->info_kbuf[0] = '\0';
31846 +
31847 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31848 + h->info_kbuf[size-1] = '\0';
31849
31850 return h->info_kbuf;
31851 }
31852 diff -urNp linux-2.6.32.43/drivers/message/i2o/i2o_config.c linux-2.6.32.43/drivers/message/i2o/i2o_config.c
31853 --- linux-2.6.32.43/drivers/message/i2o/i2o_config.c 2011-03-27 14:31:47.000000000 -0400
31854 +++ linux-2.6.32.43/drivers/message/i2o/i2o_config.c 2011-05-16 21:46:57.000000000 -0400
31855 @@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned lon
31856 struct i2o_message *msg;
31857 unsigned int iop;
31858
31859 + pax_track_stack();
31860 +
31861 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
31862 return -EFAULT;
31863
31864 diff -urNp linux-2.6.32.43/drivers/message/i2o/i2o_proc.c linux-2.6.32.43/drivers/message/i2o/i2o_proc.c
31865 --- linux-2.6.32.43/drivers/message/i2o/i2o_proc.c 2011-03-27 14:31:47.000000000 -0400
31866 +++ linux-2.6.32.43/drivers/message/i2o/i2o_proc.c 2011-04-17 15:56:46.000000000 -0400
31867 @@ -259,13 +259,6 @@ static char *scsi_devices[] = {
31868 "Array Controller Device"
31869 };
31870
31871 -static char *chtostr(u8 * chars, int n)
31872 -{
31873 - char tmp[256];
31874 - tmp[0] = 0;
31875 - return strncat(tmp, (char *)chars, n);
31876 -}
31877 -
31878 static int i2o_report_query_status(struct seq_file *seq, int block_status,
31879 char *group)
31880 {
31881 @@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct
31882
31883 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
31884 seq_printf(seq, "%-#8x", ddm_table.module_id);
31885 - seq_printf(seq, "%-29s",
31886 - chtostr(ddm_table.module_name_version, 28));
31887 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
31888 seq_printf(seq, "%9d ", ddm_table.data_size);
31889 seq_printf(seq, "%8d", ddm_table.code_size);
31890
31891 @@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(s
31892
31893 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
31894 seq_printf(seq, "%-#8x", dst->module_id);
31895 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
31896 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
31897 + seq_printf(seq, "%-.28s", dst->module_name_version);
31898 + seq_printf(seq, "%-.8s", dst->date);
31899 seq_printf(seq, "%8d ", dst->module_size);
31900 seq_printf(seq, "%8d ", dst->mpb_size);
31901 seq_printf(seq, "0x%04x", dst->module_flags);
31902 @@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(str
31903 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
31904 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
31905 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
31906 - seq_printf(seq, "Vendor info : %s\n",
31907 - chtostr((u8 *) (work32 + 2), 16));
31908 - seq_printf(seq, "Product info : %s\n",
31909 - chtostr((u8 *) (work32 + 6), 16));
31910 - seq_printf(seq, "Description : %s\n",
31911 - chtostr((u8 *) (work32 + 10), 16));
31912 - seq_printf(seq, "Product rev. : %s\n",
31913 - chtostr((u8 *) (work32 + 14), 8));
31914 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
31915 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
31916 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
31917 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
31918
31919 seq_printf(seq, "Serial number : ");
31920 print_serial_number(seq, (u8 *) (work32 + 16),
31921 @@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(str
31922 }
31923
31924 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
31925 - seq_printf(seq, "Module name : %s\n",
31926 - chtostr(result.module_name, 24));
31927 - seq_printf(seq, "Module revision : %s\n",
31928 - chtostr(result.module_rev, 8));
31929 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
31930 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
31931
31932 seq_printf(seq, "Serial number : ");
31933 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
31934 @@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq
31935 return 0;
31936 }
31937
31938 - seq_printf(seq, "Device name : %s\n",
31939 - chtostr(result.device_name, 64));
31940 - seq_printf(seq, "Service name : %s\n",
31941 - chtostr(result.service_name, 64));
31942 - seq_printf(seq, "Physical name : %s\n",
31943 - chtostr(result.physical_location, 64));
31944 - seq_printf(seq, "Instance number : %s\n",
31945 - chtostr(result.instance_number, 4));
31946 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
31947 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
31948 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
31949 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
31950
31951 return 0;
31952 }
31953 diff -urNp linux-2.6.32.43/drivers/message/i2o/iop.c linux-2.6.32.43/drivers/message/i2o/iop.c
31954 --- linux-2.6.32.43/drivers/message/i2o/iop.c 2011-03-27 14:31:47.000000000 -0400
31955 +++ linux-2.6.32.43/drivers/message/i2o/iop.c 2011-05-04 17:56:28.000000000 -0400
31956 @@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
31957
31958 spin_lock_irqsave(&c->context_list_lock, flags);
31959
31960 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
31961 - atomic_inc(&c->context_list_counter);
31962 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
31963 + atomic_inc_unchecked(&c->context_list_counter);
31964
31965 - entry->context = atomic_read(&c->context_list_counter);
31966 + entry->context = atomic_read_unchecked(&c->context_list_counter);
31967
31968 list_add(&entry->list, &c->context_list);
31969
31970 @@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(voi
31971
31972 #if BITS_PER_LONG == 64
31973 spin_lock_init(&c->context_list_lock);
31974 - atomic_set(&c->context_list_counter, 0);
31975 + atomic_set_unchecked(&c->context_list_counter, 0);
31976 INIT_LIST_HEAD(&c->context_list);
31977 #endif
31978
31979 diff -urNp linux-2.6.32.43/drivers/mfd/wm8350-i2c.c linux-2.6.32.43/drivers/mfd/wm8350-i2c.c
31980 --- linux-2.6.32.43/drivers/mfd/wm8350-i2c.c 2011-03-27 14:31:47.000000000 -0400
31981 +++ linux-2.6.32.43/drivers/mfd/wm8350-i2c.c 2011-05-16 21:46:57.000000000 -0400
31982 @@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struc
31983 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
31984 int ret;
31985
31986 + pax_track_stack();
31987 +
31988 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
31989 return -EINVAL;
31990
31991 diff -urNp linux-2.6.32.43/drivers/misc/kgdbts.c linux-2.6.32.43/drivers/misc/kgdbts.c
31992 --- linux-2.6.32.43/drivers/misc/kgdbts.c 2011-03-27 14:31:47.000000000 -0400
31993 +++ linux-2.6.32.43/drivers/misc/kgdbts.c 2011-04-17 15:56:46.000000000 -0400
31994 @@ -118,7 +118,7 @@
31995 } while (0)
31996 #define MAX_CONFIG_LEN 40
31997
31998 -static struct kgdb_io kgdbts_io_ops;
31999 +static const struct kgdb_io kgdbts_io_ops;
32000 static char get_buf[BUFMAX];
32001 static int get_buf_cnt;
32002 static char put_buf[BUFMAX];
32003 @@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void
32004 module_put(THIS_MODULE);
32005 }
32006
32007 -static struct kgdb_io kgdbts_io_ops = {
32008 +static const struct kgdb_io kgdbts_io_ops = {
32009 .name = "kgdbts",
32010 .read_char = kgdbts_get_char,
32011 .write_char = kgdbts_put_char,
32012 diff -urNp linux-2.6.32.43/drivers/misc/sgi-gru/gruhandles.c linux-2.6.32.43/drivers/misc/sgi-gru/gruhandles.c
32013 --- linux-2.6.32.43/drivers/misc/sgi-gru/gruhandles.c 2011-03-27 14:31:47.000000000 -0400
32014 +++ linux-2.6.32.43/drivers/misc/sgi-gru/gruhandles.c 2011-04-17 15:56:46.000000000 -0400
32015 @@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistic
32016
32017 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
32018 {
32019 - atomic_long_inc(&mcs_op_statistics[op].count);
32020 - atomic_long_add(clks, &mcs_op_statistics[op].total);
32021 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32022 + atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
32023 if (mcs_op_statistics[op].max < clks)
32024 mcs_op_statistics[op].max = clks;
32025 }
32026 diff -urNp linux-2.6.32.43/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.32.43/drivers/misc/sgi-gru/gruprocfs.c
32027 --- linux-2.6.32.43/drivers/misc/sgi-gru/gruprocfs.c 2011-03-27 14:31:47.000000000 -0400
32028 +++ linux-2.6.32.43/drivers/misc/sgi-gru/gruprocfs.c 2011-04-17 15:56:46.000000000 -0400
32029 @@ -32,9 +32,9 @@
32030
32031 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32032
32033 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32034 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32035 {
32036 - unsigned long val = atomic_long_read(v);
32037 + unsigned long val = atomic_long_read_unchecked(v);
32038
32039 if (val)
32040 seq_printf(s, "%16lu %s\n", val, id);
32041 @@ -136,8 +136,8 @@ static int mcs_statistics_show(struct se
32042 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
32043
32044 for (op = 0; op < mcsop_last; op++) {
32045 - count = atomic_long_read(&mcs_op_statistics[op].count);
32046 - total = atomic_long_read(&mcs_op_statistics[op].total);
32047 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32048 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32049 max = mcs_op_statistics[op].max;
32050 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32051 count ? total / count : 0, max);
32052 diff -urNp linux-2.6.32.43/drivers/misc/sgi-gru/grutables.h linux-2.6.32.43/drivers/misc/sgi-gru/grutables.h
32053 --- linux-2.6.32.43/drivers/misc/sgi-gru/grutables.h 2011-03-27 14:31:47.000000000 -0400
32054 +++ linux-2.6.32.43/drivers/misc/sgi-gru/grutables.h 2011-04-17 15:56:46.000000000 -0400
32055 @@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
32056 * GRU statistics.
32057 */
32058 struct gru_stats_s {
32059 - atomic_long_t vdata_alloc;
32060 - atomic_long_t vdata_free;
32061 - atomic_long_t gts_alloc;
32062 - atomic_long_t gts_free;
32063 - atomic_long_t vdata_double_alloc;
32064 - atomic_long_t gts_double_allocate;
32065 - atomic_long_t assign_context;
32066 - atomic_long_t assign_context_failed;
32067 - atomic_long_t free_context;
32068 - atomic_long_t load_user_context;
32069 - atomic_long_t load_kernel_context;
32070 - atomic_long_t lock_kernel_context;
32071 - atomic_long_t unlock_kernel_context;
32072 - atomic_long_t steal_user_context;
32073 - atomic_long_t steal_kernel_context;
32074 - atomic_long_t steal_context_failed;
32075 - atomic_long_t nopfn;
32076 - atomic_long_t break_cow;
32077 - atomic_long_t asid_new;
32078 - atomic_long_t asid_next;
32079 - atomic_long_t asid_wrap;
32080 - atomic_long_t asid_reuse;
32081 - atomic_long_t intr;
32082 - atomic_long_t intr_mm_lock_failed;
32083 - atomic_long_t call_os;
32084 - atomic_long_t call_os_offnode_reference;
32085 - atomic_long_t call_os_check_for_bug;
32086 - atomic_long_t call_os_wait_queue;
32087 - atomic_long_t user_flush_tlb;
32088 - atomic_long_t user_unload_context;
32089 - atomic_long_t user_exception;
32090 - atomic_long_t set_context_option;
32091 - atomic_long_t migrate_check;
32092 - atomic_long_t migrated_retarget;
32093 - atomic_long_t migrated_unload;
32094 - atomic_long_t migrated_unload_delay;
32095 - atomic_long_t migrated_nopfn_retarget;
32096 - atomic_long_t migrated_nopfn_unload;
32097 - atomic_long_t tlb_dropin;
32098 - atomic_long_t tlb_dropin_fail_no_asid;
32099 - atomic_long_t tlb_dropin_fail_upm;
32100 - atomic_long_t tlb_dropin_fail_invalid;
32101 - atomic_long_t tlb_dropin_fail_range_active;
32102 - atomic_long_t tlb_dropin_fail_idle;
32103 - atomic_long_t tlb_dropin_fail_fmm;
32104 - atomic_long_t tlb_dropin_fail_no_exception;
32105 - atomic_long_t tlb_dropin_fail_no_exception_war;
32106 - atomic_long_t tfh_stale_on_fault;
32107 - atomic_long_t mmu_invalidate_range;
32108 - atomic_long_t mmu_invalidate_page;
32109 - atomic_long_t mmu_clear_flush_young;
32110 - atomic_long_t flush_tlb;
32111 - atomic_long_t flush_tlb_gru;
32112 - atomic_long_t flush_tlb_gru_tgh;
32113 - atomic_long_t flush_tlb_gru_zero_asid;
32114 -
32115 - atomic_long_t copy_gpa;
32116 -
32117 - atomic_long_t mesq_receive;
32118 - atomic_long_t mesq_receive_none;
32119 - atomic_long_t mesq_send;
32120 - atomic_long_t mesq_send_failed;
32121 - atomic_long_t mesq_noop;
32122 - atomic_long_t mesq_send_unexpected_error;
32123 - atomic_long_t mesq_send_lb_overflow;
32124 - atomic_long_t mesq_send_qlimit_reached;
32125 - atomic_long_t mesq_send_amo_nacked;
32126 - atomic_long_t mesq_send_put_nacked;
32127 - atomic_long_t mesq_qf_not_full;
32128 - atomic_long_t mesq_qf_locked;
32129 - atomic_long_t mesq_qf_noop_not_full;
32130 - atomic_long_t mesq_qf_switch_head_failed;
32131 - atomic_long_t mesq_qf_unexpected_error;
32132 - atomic_long_t mesq_noop_unexpected_error;
32133 - atomic_long_t mesq_noop_lb_overflow;
32134 - atomic_long_t mesq_noop_qlimit_reached;
32135 - atomic_long_t mesq_noop_amo_nacked;
32136 - atomic_long_t mesq_noop_put_nacked;
32137 + atomic_long_unchecked_t vdata_alloc;
32138 + atomic_long_unchecked_t vdata_free;
32139 + atomic_long_unchecked_t gts_alloc;
32140 + atomic_long_unchecked_t gts_free;
32141 + atomic_long_unchecked_t vdata_double_alloc;
32142 + atomic_long_unchecked_t gts_double_allocate;
32143 + atomic_long_unchecked_t assign_context;
32144 + atomic_long_unchecked_t assign_context_failed;
32145 + atomic_long_unchecked_t free_context;
32146 + atomic_long_unchecked_t load_user_context;
32147 + atomic_long_unchecked_t load_kernel_context;
32148 + atomic_long_unchecked_t lock_kernel_context;
32149 + atomic_long_unchecked_t unlock_kernel_context;
32150 + atomic_long_unchecked_t steal_user_context;
32151 + atomic_long_unchecked_t steal_kernel_context;
32152 + atomic_long_unchecked_t steal_context_failed;
32153 + atomic_long_unchecked_t nopfn;
32154 + atomic_long_unchecked_t break_cow;
32155 + atomic_long_unchecked_t asid_new;
32156 + atomic_long_unchecked_t asid_next;
32157 + atomic_long_unchecked_t asid_wrap;
32158 + atomic_long_unchecked_t asid_reuse;
32159 + atomic_long_unchecked_t intr;
32160 + atomic_long_unchecked_t intr_mm_lock_failed;
32161 + atomic_long_unchecked_t call_os;
32162 + atomic_long_unchecked_t call_os_offnode_reference;
32163 + atomic_long_unchecked_t call_os_check_for_bug;
32164 + atomic_long_unchecked_t call_os_wait_queue;
32165 + atomic_long_unchecked_t user_flush_tlb;
32166 + atomic_long_unchecked_t user_unload_context;
32167 + atomic_long_unchecked_t user_exception;
32168 + atomic_long_unchecked_t set_context_option;
32169 + atomic_long_unchecked_t migrate_check;
32170 + atomic_long_unchecked_t migrated_retarget;
32171 + atomic_long_unchecked_t migrated_unload;
32172 + atomic_long_unchecked_t migrated_unload_delay;
32173 + atomic_long_unchecked_t migrated_nopfn_retarget;
32174 + atomic_long_unchecked_t migrated_nopfn_unload;
32175 + atomic_long_unchecked_t tlb_dropin;
32176 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
32177 + atomic_long_unchecked_t tlb_dropin_fail_upm;
32178 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
32179 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
32180 + atomic_long_unchecked_t tlb_dropin_fail_idle;
32181 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
32182 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
32183 + atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
32184 + atomic_long_unchecked_t tfh_stale_on_fault;
32185 + atomic_long_unchecked_t mmu_invalidate_range;
32186 + atomic_long_unchecked_t mmu_invalidate_page;
32187 + atomic_long_unchecked_t mmu_clear_flush_young;
32188 + atomic_long_unchecked_t flush_tlb;
32189 + atomic_long_unchecked_t flush_tlb_gru;
32190 + atomic_long_unchecked_t flush_tlb_gru_tgh;
32191 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
32192 +
32193 + atomic_long_unchecked_t copy_gpa;
32194 +
32195 + atomic_long_unchecked_t mesq_receive;
32196 + atomic_long_unchecked_t mesq_receive_none;
32197 + atomic_long_unchecked_t mesq_send;
32198 + atomic_long_unchecked_t mesq_send_failed;
32199 + atomic_long_unchecked_t mesq_noop;
32200 + atomic_long_unchecked_t mesq_send_unexpected_error;
32201 + atomic_long_unchecked_t mesq_send_lb_overflow;
32202 + atomic_long_unchecked_t mesq_send_qlimit_reached;
32203 + atomic_long_unchecked_t mesq_send_amo_nacked;
32204 + atomic_long_unchecked_t mesq_send_put_nacked;
32205 + atomic_long_unchecked_t mesq_qf_not_full;
32206 + atomic_long_unchecked_t mesq_qf_locked;
32207 + atomic_long_unchecked_t mesq_qf_noop_not_full;
32208 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
32209 + atomic_long_unchecked_t mesq_qf_unexpected_error;
32210 + atomic_long_unchecked_t mesq_noop_unexpected_error;
32211 + atomic_long_unchecked_t mesq_noop_lb_overflow;
32212 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
32213 + atomic_long_unchecked_t mesq_noop_amo_nacked;
32214 + atomic_long_unchecked_t mesq_noop_put_nacked;
32215
32216 };
32217
32218 @@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start
32219 cchop_deallocate, tghop_invalidate, mcsop_last};
32220
32221 struct mcs_op_statistic {
32222 - atomic_long_t count;
32223 - atomic_long_t total;
32224 + atomic_long_unchecked_t count;
32225 + atomic_long_unchecked_t total;
32226 unsigned long max;
32227 };
32228
32229 @@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_st
32230
32231 #define STAT(id) do { \
32232 if (gru_options & OPT_STATS) \
32233 - atomic_long_inc(&gru_stats.id); \
32234 + atomic_long_inc_unchecked(&gru_stats.id); \
32235 } while (0)
32236
32237 #ifdef CONFIG_SGI_GRU_DEBUG
32238 diff -urNp linux-2.6.32.43/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.32.43/drivers/mtd/chips/cfi_cmdset_0001.c
32239 --- linux-2.6.32.43/drivers/mtd/chips/cfi_cmdset_0001.c 2011-03-27 14:31:47.000000000 -0400
32240 +++ linux-2.6.32.43/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-16 21:46:57.000000000 -0400
32241 @@ -743,6 +743,8 @@ static int chip_ready (struct map_info *
32242 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
32243 unsigned long timeo = jiffies + HZ;
32244
32245 + pax_track_stack();
32246 +
32247 /* Prevent setting state FL_SYNCING for chip in suspended state. */
32248 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
32249 goto sleep;
32250 @@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(stru
32251 unsigned long initial_adr;
32252 int initial_len = len;
32253
32254 + pax_track_stack();
32255 +
32256 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
32257 adr += chip->start;
32258 initial_adr = adr;
32259 @@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(st
32260 int retries = 3;
32261 int ret;
32262
32263 + pax_track_stack();
32264 +
32265 adr += chip->start;
32266
32267 retry:
32268 diff -urNp linux-2.6.32.43/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.32.43/drivers/mtd/chips/cfi_cmdset_0020.c
32269 --- linux-2.6.32.43/drivers/mtd/chips/cfi_cmdset_0020.c 2011-03-27 14:31:47.000000000 -0400
32270 +++ linux-2.6.32.43/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-16 21:46:57.000000000 -0400
32271 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
32272 unsigned long cmd_addr;
32273 struct cfi_private *cfi = map->fldrv_priv;
32274
32275 + pax_track_stack();
32276 +
32277 adr += chip->start;
32278
32279 /* Ensure cmd read/writes are aligned. */
32280 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
32281 DECLARE_WAITQUEUE(wait, current);
32282 int wbufsize, z;
32283
32284 + pax_track_stack();
32285 +
32286 /* M58LW064A requires bus alignment for buffer wriets -- saw */
32287 if (adr & (map_bankwidth(map)-1))
32288 return -EINVAL;
32289 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
32290 DECLARE_WAITQUEUE(wait, current);
32291 int ret = 0;
32292
32293 + pax_track_stack();
32294 +
32295 adr += chip->start;
32296
32297 /* Let's determine this according to the interleave only once */
32298 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
32299 unsigned long timeo = jiffies + HZ;
32300 DECLARE_WAITQUEUE(wait, current);
32301
32302 + pax_track_stack();
32303 +
32304 adr += chip->start;
32305
32306 /* Let's determine this according to the interleave only once */
32307 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
32308 unsigned long timeo = jiffies + HZ;
32309 DECLARE_WAITQUEUE(wait, current);
32310
32311 + pax_track_stack();
32312 +
32313 adr += chip->start;
32314
32315 /* Let's determine this according to the interleave only once */
32316 diff -urNp linux-2.6.32.43/drivers/mtd/devices/doc2000.c linux-2.6.32.43/drivers/mtd/devices/doc2000.c
32317 --- linux-2.6.32.43/drivers/mtd/devices/doc2000.c 2011-03-27 14:31:47.000000000 -0400
32318 +++ linux-2.6.32.43/drivers/mtd/devices/doc2000.c 2011-04-17 15:56:46.000000000 -0400
32319 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
32320
32321 /* The ECC will not be calculated correctly if less than 512 is written */
32322 /* DBB-
32323 - if (len != 0x200 && eccbuf)
32324 + if (len != 0x200)
32325 printk(KERN_WARNING
32326 "ECC needs a full sector write (adr: %lx size %lx)\n",
32327 (long) to, (long) len);
32328 diff -urNp linux-2.6.32.43/drivers/mtd/devices/doc2001.c linux-2.6.32.43/drivers/mtd/devices/doc2001.c
32329 --- linux-2.6.32.43/drivers/mtd/devices/doc2001.c 2011-03-27 14:31:47.000000000 -0400
32330 +++ linux-2.6.32.43/drivers/mtd/devices/doc2001.c 2011-04-17 15:56:46.000000000 -0400
32331 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
32332 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
32333
32334 /* Don't allow read past end of device */
32335 - if (from >= this->totlen)
32336 + if (from >= this->totlen || !len)
32337 return -EINVAL;
32338
32339 /* Don't allow a single read to cross a 512-byte block boundary */
32340 diff -urNp linux-2.6.32.43/drivers/mtd/ftl.c linux-2.6.32.43/drivers/mtd/ftl.c
32341 --- linux-2.6.32.43/drivers/mtd/ftl.c 2011-03-27 14:31:47.000000000 -0400
32342 +++ linux-2.6.32.43/drivers/mtd/ftl.c 2011-05-16 21:46:57.000000000 -0400
32343 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
32344 loff_t offset;
32345 uint16_t srcunitswap = cpu_to_le16(srcunit);
32346
32347 + pax_track_stack();
32348 +
32349 eun = &part->EUNInfo[srcunit];
32350 xfer = &part->XferInfo[xferunit];
32351 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
32352 diff -urNp linux-2.6.32.43/drivers/mtd/inftlcore.c linux-2.6.32.43/drivers/mtd/inftlcore.c
32353 --- linux-2.6.32.43/drivers/mtd/inftlcore.c 2011-03-27 14:31:47.000000000 -0400
32354 +++ linux-2.6.32.43/drivers/mtd/inftlcore.c 2011-05-16 21:46:57.000000000 -0400
32355 @@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLr
32356 struct inftl_oob oob;
32357 size_t retlen;
32358
32359 + pax_track_stack();
32360 +
32361 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
32362 "pending=%d)\n", inftl, thisVUC, pendingblock);
32363
32364 diff -urNp linux-2.6.32.43/drivers/mtd/inftlmount.c linux-2.6.32.43/drivers/mtd/inftlmount.c
32365 --- linux-2.6.32.43/drivers/mtd/inftlmount.c 2011-03-27 14:31:47.000000000 -0400
32366 +++ linux-2.6.32.43/drivers/mtd/inftlmount.c 2011-05-16 21:46:57.000000000 -0400
32367 @@ -54,6 +54,8 @@ static int find_boot_record(struct INFTL
32368 struct INFTLPartition *ip;
32369 size_t retlen;
32370
32371 + pax_track_stack();
32372 +
32373 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
32374
32375 /*
32376 diff -urNp linux-2.6.32.43/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.32.43/drivers/mtd/lpddr/qinfo_probe.c
32377 --- linux-2.6.32.43/drivers/mtd/lpddr/qinfo_probe.c 2011-03-27 14:31:47.000000000 -0400
32378 +++ linux-2.6.32.43/drivers/mtd/lpddr/qinfo_probe.c 2011-05-16 21:46:57.000000000 -0400
32379 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
32380 {
32381 map_word pfow_val[4];
32382
32383 + pax_track_stack();
32384 +
32385 /* Check identification string */
32386 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
32387 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
32388 diff -urNp linux-2.6.32.43/drivers/mtd/mtdchar.c linux-2.6.32.43/drivers/mtd/mtdchar.c
32389 --- linux-2.6.32.43/drivers/mtd/mtdchar.c 2011-03-27 14:31:47.000000000 -0400
32390 +++ linux-2.6.32.43/drivers/mtd/mtdchar.c 2011-05-16 21:46:57.000000000 -0400
32391 @@ -460,6 +460,8 @@ static int mtd_ioctl(struct inode *inode
32392 u_long size;
32393 struct mtd_info_user info;
32394
32395 + pax_track_stack();
32396 +
32397 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
32398
32399 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
32400 diff -urNp linux-2.6.32.43/drivers/mtd/nftlcore.c linux-2.6.32.43/drivers/mtd/nftlcore.c
32401 --- linux-2.6.32.43/drivers/mtd/nftlcore.c 2011-03-27 14:31:47.000000000 -0400
32402 +++ linux-2.6.32.43/drivers/mtd/nftlcore.c 2011-05-16 21:46:57.000000000 -0400
32403 @@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLre
32404 int inplace = 1;
32405 size_t retlen;
32406
32407 + pax_track_stack();
32408 +
32409 memset(BlockMap, 0xff, sizeof(BlockMap));
32410 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
32411
32412 diff -urNp linux-2.6.32.43/drivers/mtd/nftlmount.c linux-2.6.32.43/drivers/mtd/nftlmount.c
32413 --- linux-2.6.32.43/drivers/mtd/nftlmount.c 2011-03-27 14:31:47.000000000 -0400
32414 +++ linux-2.6.32.43/drivers/mtd/nftlmount.c 2011-05-18 20:09:37.000000000 -0400
32415 @@ -23,6 +23,7 @@
32416 #include <asm/errno.h>
32417 #include <linux/delay.h>
32418 #include <linux/slab.h>
32419 +#include <linux/sched.h>
32420 #include <linux/mtd/mtd.h>
32421 #include <linux/mtd/nand.h>
32422 #include <linux/mtd/nftl.h>
32423 @@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLr
32424 struct mtd_info *mtd = nftl->mbd.mtd;
32425 unsigned int i;
32426
32427 + pax_track_stack();
32428 +
32429 /* Assume logical EraseSize == physical erasesize for starting the scan.
32430 We'll sort it out later if we find a MediaHeader which says otherwise */
32431 /* Actually, we won't. The new DiskOnChip driver has already scanned
32432 diff -urNp linux-2.6.32.43/drivers/mtd/ubi/build.c linux-2.6.32.43/drivers/mtd/ubi/build.c
32433 --- linux-2.6.32.43/drivers/mtd/ubi/build.c 2011-03-27 14:31:47.000000000 -0400
32434 +++ linux-2.6.32.43/drivers/mtd/ubi/build.c 2011-04-17 15:56:46.000000000 -0400
32435 @@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
32436 static int __init bytes_str_to_int(const char *str)
32437 {
32438 char *endp;
32439 - unsigned long result;
32440 + unsigned long result, scale = 1;
32441
32442 result = simple_strtoul(str, &endp, 0);
32443 if (str == endp || result >= INT_MAX) {
32444 @@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const
32445
32446 switch (*endp) {
32447 case 'G':
32448 - result *= 1024;
32449 + scale *= 1024;
32450 case 'M':
32451 - result *= 1024;
32452 + scale *= 1024;
32453 case 'K':
32454 - result *= 1024;
32455 + scale *= 1024;
32456 if (endp[1] == 'i' && endp[2] == 'B')
32457 endp += 2;
32458 case '\0':
32459 @@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const
32460 return -EINVAL;
32461 }
32462
32463 - return result;
32464 + if ((intoverflow_t)result*scale >= INT_MAX) {
32465 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
32466 + str);
32467 + return -EINVAL;
32468 + }
32469 +
32470 + return result*scale;
32471 }
32472
32473 /**
32474 diff -urNp linux-2.6.32.43/drivers/net/bnx2.c linux-2.6.32.43/drivers/net/bnx2.c
32475 --- linux-2.6.32.43/drivers/net/bnx2.c 2011-03-27 14:31:47.000000000 -0400
32476 +++ linux-2.6.32.43/drivers/net/bnx2.c 2011-05-16 21:46:57.000000000 -0400
32477 @@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
32478 int rc = 0;
32479 u32 magic, csum;
32480
32481 + pax_track_stack();
32482 +
32483 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
32484 goto test_nvram_done;
32485
32486 diff -urNp linux-2.6.32.43/drivers/net/cxgb3/t3_hw.c linux-2.6.32.43/drivers/net/cxgb3/t3_hw.c
32487 --- linux-2.6.32.43/drivers/net/cxgb3/t3_hw.c 2011-03-27 14:31:47.000000000 -0400
32488 +++ linux-2.6.32.43/drivers/net/cxgb3/t3_hw.c 2011-05-16 21:46:57.000000000 -0400
32489 @@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter
32490 int i, addr, ret;
32491 struct t3_vpd vpd;
32492
32493 + pax_track_stack();
32494 +
32495 /*
32496 * Card information is normally at VPD_BASE but some early cards had
32497 * it at 0.
32498 diff -urNp linux-2.6.32.43/drivers/net/e1000e/82571.c linux-2.6.32.43/drivers/net/e1000e/82571.c
32499 --- linux-2.6.32.43/drivers/net/e1000e/82571.c 2011-03-27 14:31:47.000000000 -0400
32500 +++ linux-2.6.32.43/drivers/net/e1000e/82571.c 2011-04-17 15:56:46.000000000 -0400
32501 @@ -212,6 +212,7 @@ static s32 e1000_init_mac_params_82571(s
32502 {
32503 struct e1000_hw *hw = &adapter->hw;
32504 struct e1000_mac_info *mac = &hw->mac;
32505 + /* cannot be const */
32506 struct e1000_mac_operations *func = &mac->ops;
32507 u32 swsm = 0;
32508 u32 swsm2 = 0;
32509 @@ -1656,7 +1657,7 @@ static void e1000_clear_hw_cntrs_82571(s
32510 temp = er32(ICRXDMTC);
32511 }
32512
32513 -static struct e1000_mac_operations e82571_mac_ops = {
32514 +static const struct e1000_mac_operations e82571_mac_ops = {
32515 /* .check_mng_mode: mac type dependent */
32516 /* .check_for_link: media type dependent */
32517 .id_led_init = e1000e_id_led_init,
32518 @@ -1674,7 +1675,7 @@ static struct e1000_mac_operations e8257
32519 .setup_led = e1000e_setup_led_generic,
32520 };
32521
32522 -static struct e1000_phy_operations e82_phy_ops_igp = {
32523 +static const struct e1000_phy_operations e82_phy_ops_igp = {
32524 .acquire_phy = e1000_get_hw_semaphore_82571,
32525 .check_reset_block = e1000e_check_reset_block_generic,
32526 .commit_phy = NULL,
32527 @@ -1691,7 +1692,7 @@ static struct e1000_phy_operations e82_p
32528 .cfg_on_link_up = NULL,
32529 };
32530
32531 -static struct e1000_phy_operations e82_phy_ops_m88 = {
32532 +static const struct e1000_phy_operations e82_phy_ops_m88 = {
32533 .acquire_phy = e1000_get_hw_semaphore_82571,
32534 .check_reset_block = e1000e_check_reset_block_generic,
32535 .commit_phy = e1000e_phy_sw_reset,
32536 @@ -1708,7 +1709,7 @@ static struct e1000_phy_operations e82_p
32537 .cfg_on_link_up = NULL,
32538 };
32539
32540 -static struct e1000_phy_operations e82_phy_ops_bm = {
32541 +static const struct e1000_phy_operations e82_phy_ops_bm = {
32542 .acquire_phy = e1000_get_hw_semaphore_82571,
32543 .check_reset_block = e1000e_check_reset_block_generic,
32544 .commit_phy = e1000e_phy_sw_reset,
32545 @@ -1725,7 +1726,7 @@ static struct e1000_phy_operations e82_p
32546 .cfg_on_link_up = NULL,
32547 };
32548
32549 -static struct e1000_nvm_operations e82571_nvm_ops = {
32550 +static const struct e1000_nvm_operations e82571_nvm_ops = {
32551 .acquire_nvm = e1000_acquire_nvm_82571,
32552 .read_nvm = e1000e_read_nvm_eerd,
32553 .release_nvm = e1000_release_nvm_82571,
32554 diff -urNp linux-2.6.32.43/drivers/net/e1000e/e1000.h linux-2.6.32.43/drivers/net/e1000e/e1000.h
32555 --- linux-2.6.32.43/drivers/net/e1000e/e1000.h 2011-03-27 14:31:47.000000000 -0400
32556 +++ linux-2.6.32.43/drivers/net/e1000e/e1000.h 2011-04-17 15:56:46.000000000 -0400
32557 @@ -375,9 +375,9 @@ struct e1000_info {
32558 u32 pba;
32559 u32 max_hw_frame_size;
32560 s32 (*get_variants)(struct e1000_adapter *);
32561 - struct e1000_mac_operations *mac_ops;
32562 - struct e1000_phy_operations *phy_ops;
32563 - struct e1000_nvm_operations *nvm_ops;
32564 + const struct e1000_mac_operations *mac_ops;
32565 + const struct e1000_phy_operations *phy_ops;
32566 + const struct e1000_nvm_operations *nvm_ops;
32567 };
32568
32569 /* hardware capability, feature, and workaround flags */
32570 diff -urNp linux-2.6.32.43/drivers/net/e1000e/es2lan.c linux-2.6.32.43/drivers/net/e1000e/es2lan.c
32571 --- linux-2.6.32.43/drivers/net/e1000e/es2lan.c 2011-03-27 14:31:47.000000000 -0400
32572 +++ linux-2.6.32.43/drivers/net/e1000e/es2lan.c 2011-04-17 15:56:46.000000000 -0400
32573 @@ -207,6 +207,7 @@ static s32 e1000_init_mac_params_80003es
32574 {
32575 struct e1000_hw *hw = &adapter->hw;
32576 struct e1000_mac_info *mac = &hw->mac;
32577 + /* cannot be const */
32578 struct e1000_mac_operations *func = &mac->ops;
32579
32580 /* Set media type */
32581 @@ -1365,7 +1366,7 @@ static void e1000_clear_hw_cntrs_80003es
32582 temp = er32(ICRXDMTC);
32583 }
32584
32585 -static struct e1000_mac_operations es2_mac_ops = {
32586 +static const struct e1000_mac_operations es2_mac_ops = {
32587 .id_led_init = e1000e_id_led_init,
32588 .check_mng_mode = e1000e_check_mng_mode_generic,
32589 /* check_for_link dependent on media type */
32590 @@ -1383,7 +1384,7 @@ static struct e1000_mac_operations es2_m
32591 .setup_led = e1000e_setup_led_generic,
32592 };
32593
32594 -static struct e1000_phy_operations es2_phy_ops = {
32595 +static const struct e1000_phy_operations es2_phy_ops = {
32596 .acquire_phy = e1000_acquire_phy_80003es2lan,
32597 .check_reset_block = e1000e_check_reset_block_generic,
32598 .commit_phy = e1000e_phy_sw_reset,
32599 @@ -1400,7 +1401,7 @@ static struct e1000_phy_operations es2_p
32600 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
32601 };
32602
32603 -static struct e1000_nvm_operations es2_nvm_ops = {
32604 +static const struct e1000_nvm_operations es2_nvm_ops = {
32605 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
32606 .read_nvm = e1000e_read_nvm_eerd,
32607 .release_nvm = e1000_release_nvm_80003es2lan,
32608 diff -urNp linux-2.6.32.43/drivers/net/e1000e/hw.h linux-2.6.32.43/drivers/net/e1000e/hw.h
32609 --- linux-2.6.32.43/drivers/net/e1000e/hw.h 2011-03-27 14:31:47.000000000 -0400
32610 +++ linux-2.6.32.43/drivers/net/e1000e/hw.h 2011-04-17 15:56:46.000000000 -0400
32611 @@ -756,34 +756,34 @@ struct e1000_mac_operations {
32612
32613 /* Function pointers for the PHY. */
32614 struct e1000_phy_operations {
32615 - s32 (*acquire_phy)(struct e1000_hw *);
32616 - s32 (*check_polarity)(struct e1000_hw *);
32617 - s32 (*check_reset_block)(struct e1000_hw *);
32618 - s32 (*commit_phy)(struct e1000_hw *);
32619 - s32 (*force_speed_duplex)(struct e1000_hw *);
32620 - s32 (*get_cfg_done)(struct e1000_hw *hw);
32621 - s32 (*get_cable_length)(struct e1000_hw *);
32622 - s32 (*get_phy_info)(struct e1000_hw *);
32623 - s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
32624 - s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
32625 - void (*release_phy)(struct e1000_hw *);
32626 - s32 (*reset_phy)(struct e1000_hw *);
32627 - s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
32628 - s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
32629 - s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
32630 - s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
32631 - s32 (*cfg_on_link_up)(struct e1000_hw *);
32632 + s32 (* acquire_phy)(struct e1000_hw *);
32633 + s32 (* check_polarity)(struct e1000_hw *);
32634 + s32 (* check_reset_block)(struct e1000_hw *);
32635 + s32 (* commit_phy)(struct e1000_hw *);
32636 + s32 (* force_speed_duplex)(struct e1000_hw *);
32637 + s32 (* get_cfg_done)(struct e1000_hw *hw);
32638 + s32 (* get_cable_length)(struct e1000_hw *);
32639 + s32 (* get_phy_info)(struct e1000_hw *);
32640 + s32 (* read_phy_reg)(struct e1000_hw *, u32, u16 *);
32641 + s32 (* read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
32642 + void (* release_phy)(struct e1000_hw *);
32643 + s32 (* reset_phy)(struct e1000_hw *);
32644 + s32 (* set_d0_lplu_state)(struct e1000_hw *, bool);
32645 + s32 (* set_d3_lplu_state)(struct e1000_hw *, bool);
32646 + s32 (* write_phy_reg)(struct e1000_hw *, u32, u16);
32647 + s32 (* write_phy_reg_locked)(struct e1000_hw *, u32, u16);
32648 + s32 (* cfg_on_link_up)(struct e1000_hw *);
32649 };
32650
32651 /* Function pointers for the NVM. */
32652 struct e1000_nvm_operations {
32653 - s32 (*acquire_nvm)(struct e1000_hw *);
32654 - s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
32655 - void (*release_nvm)(struct e1000_hw *);
32656 - s32 (*update_nvm)(struct e1000_hw *);
32657 - s32 (*valid_led_default)(struct e1000_hw *, u16 *);
32658 - s32 (*validate_nvm)(struct e1000_hw *);
32659 - s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
32660 + s32 (* const acquire_nvm)(struct e1000_hw *);
32661 + s32 (* const read_nvm)(struct e1000_hw *, u16, u16, u16 *);
32662 + void (* const release_nvm)(struct e1000_hw *);
32663 + s32 (* const update_nvm)(struct e1000_hw *);
32664 + s32 (* const valid_led_default)(struct e1000_hw *, u16 *);
32665 + s32 (* const validate_nvm)(struct e1000_hw *);
32666 + s32 (* const write_nvm)(struct e1000_hw *, u16, u16, u16 *);
32667 };
32668
32669 struct e1000_mac_info {
32670 diff -urNp linux-2.6.32.43/drivers/net/e1000e/ich8lan.c linux-2.6.32.43/drivers/net/e1000e/ich8lan.c
32671 --- linux-2.6.32.43/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:01.000000000 -0400
32672 +++ linux-2.6.32.43/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:32.000000000 -0400
32673 @@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan
32674 }
32675 }
32676
32677 -static struct e1000_mac_operations ich8_mac_ops = {
32678 +static const struct e1000_mac_operations ich8_mac_ops = {
32679 .id_led_init = e1000e_id_led_init,
32680 .check_mng_mode = e1000_check_mng_mode_ich8lan,
32681 .check_for_link = e1000_check_for_copper_link_ich8lan,
32682 @@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_
32683 /* id_led_init dependent on mac type */
32684 };
32685
32686 -static struct e1000_phy_operations ich8_phy_ops = {
32687 +static const struct e1000_phy_operations ich8_phy_ops = {
32688 .acquire_phy = e1000_acquire_swflag_ich8lan,
32689 .check_reset_block = e1000_check_reset_block_ich8lan,
32690 .commit_phy = NULL,
32691 @@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_
32692 .write_phy_reg = e1000e_write_phy_reg_igp,
32693 };
32694
32695 -static struct e1000_nvm_operations ich8_nvm_ops = {
32696 +static const struct e1000_nvm_operations ich8_nvm_ops = {
32697 .acquire_nvm = e1000_acquire_nvm_ich8lan,
32698 .read_nvm = e1000_read_nvm_ich8lan,
32699 .release_nvm = e1000_release_nvm_ich8lan,
32700 diff -urNp linux-2.6.32.43/drivers/net/hamradio/6pack.c linux-2.6.32.43/drivers/net/hamradio/6pack.c
32701 --- linux-2.6.32.43/drivers/net/hamradio/6pack.c 2011-07-13 17:23:04.000000000 -0400
32702 +++ linux-2.6.32.43/drivers/net/hamradio/6pack.c 2011-07-13 17:23:18.000000000 -0400
32703 @@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct t
32704 unsigned char buf[512];
32705 int count1;
32706
32707 + pax_track_stack();
32708 +
32709 if (!count)
32710 return;
32711
32712 diff -urNp linux-2.6.32.43/drivers/net/ibmveth.c linux-2.6.32.43/drivers/net/ibmveth.c
32713 --- linux-2.6.32.43/drivers/net/ibmveth.c 2011-03-27 14:31:47.000000000 -0400
32714 +++ linux-2.6.32.43/drivers/net/ibmveth.c 2011-04-17 15:56:46.000000000 -0400
32715 @@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attr
32716 NULL,
32717 };
32718
32719 -static struct sysfs_ops veth_pool_ops = {
32720 +static const struct sysfs_ops veth_pool_ops = {
32721 .show = veth_pool_show,
32722 .store = veth_pool_store,
32723 };
32724 diff -urNp linux-2.6.32.43/drivers/net/igb/e1000_82575.c linux-2.6.32.43/drivers/net/igb/e1000_82575.c
32725 --- linux-2.6.32.43/drivers/net/igb/e1000_82575.c 2011-03-27 14:31:47.000000000 -0400
32726 +++ linux-2.6.32.43/drivers/net/igb/e1000_82575.c 2011-04-17 15:56:46.000000000 -0400
32727 @@ -1410,7 +1410,7 @@ void igb_vmdq_set_replication_pf(struct
32728 wr32(E1000_VT_CTL, vt_ctl);
32729 }
32730
32731 -static struct e1000_mac_operations e1000_mac_ops_82575 = {
32732 +static const struct e1000_mac_operations e1000_mac_ops_82575 = {
32733 .reset_hw = igb_reset_hw_82575,
32734 .init_hw = igb_init_hw_82575,
32735 .check_for_link = igb_check_for_link_82575,
32736 @@ -1419,13 +1419,13 @@ static struct e1000_mac_operations e1000
32737 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
32738 };
32739
32740 -static struct e1000_phy_operations e1000_phy_ops_82575 = {
32741 +static const struct e1000_phy_operations e1000_phy_ops_82575 = {
32742 .acquire = igb_acquire_phy_82575,
32743 .get_cfg_done = igb_get_cfg_done_82575,
32744 .release = igb_release_phy_82575,
32745 };
32746
32747 -static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
32748 +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
32749 .acquire = igb_acquire_nvm_82575,
32750 .read = igb_read_nvm_eerd,
32751 .release = igb_release_nvm_82575,
32752 diff -urNp linux-2.6.32.43/drivers/net/igb/e1000_hw.h linux-2.6.32.43/drivers/net/igb/e1000_hw.h
32753 --- linux-2.6.32.43/drivers/net/igb/e1000_hw.h 2011-03-27 14:31:47.000000000 -0400
32754 +++ linux-2.6.32.43/drivers/net/igb/e1000_hw.h 2011-04-17 15:56:46.000000000 -0400
32755 @@ -305,17 +305,17 @@ struct e1000_phy_operations {
32756 };
32757
32758 struct e1000_nvm_operations {
32759 - s32 (*acquire)(struct e1000_hw *);
32760 - s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
32761 - void (*release)(struct e1000_hw *);
32762 - s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
32763 + s32 (* const acquire)(struct e1000_hw *);
32764 + s32 (* const read)(struct e1000_hw *, u16, u16, u16 *);
32765 + void (* const release)(struct e1000_hw *);
32766 + s32 (* const write)(struct e1000_hw *, u16, u16, u16 *);
32767 };
32768
32769 struct e1000_info {
32770 s32 (*get_invariants)(struct e1000_hw *);
32771 - struct e1000_mac_operations *mac_ops;
32772 - struct e1000_phy_operations *phy_ops;
32773 - struct e1000_nvm_operations *nvm_ops;
32774 + const struct e1000_mac_operations *mac_ops;
32775 + const struct e1000_phy_operations *phy_ops;
32776 + const struct e1000_nvm_operations *nvm_ops;
32777 };
32778
32779 extern const struct e1000_info e1000_82575_info;
32780 diff -urNp linux-2.6.32.43/drivers/net/iseries_veth.c linux-2.6.32.43/drivers/net/iseries_veth.c
32781 --- linux-2.6.32.43/drivers/net/iseries_veth.c 2011-03-27 14:31:47.000000000 -0400
32782 +++ linux-2.6.32.43/drivers/net/iseries_veth.c 2011-04-17 15:56:46.000000000 -0400
32783 @@ -384,7 +384,7 @@ static struct attribute *veth_cnx_defaul
32784 NULL
32785 };
32786
32787 -static struct sysfs_ops veth_cnx_sysfs_ops = {
32788 +static const struct sysfs_ops veth_cnx_sysfs_ops = {
32789 .show = veth_cnx_attribute_show
32790 };
32791
32792 @@ -441,7 +441,7 @@ static struct attribute *veth_port_defau
32793 NULL
32794 };
32795
32796 -static struct sysfs_ops veth_port_sysfs_ops = {
32797 +static const struct sysfs_ops veth_port_sysfs_ops = {
32798 .show = veth_port_attribute_show
32799 };
32800
32801 diff -urNp linux-2.6.32.43/drivers/net/ixgb/ixgb_main.c linux-2.6.32.43/drivers/net/ixgb/ixgb_main.c
32802 --- linux-2.6.32.43/drivers/net/ixgb/ixgb_main.c 2011-03-27 14:31:47.000000000 -0400
32803 +++ linux-2.6.32.43/drivers/net/ixgb/ixgb_main.c 2011-05-16 21:46:57.000000000 -0400
32804 @@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev
32805 u32 rctl;
32806 int i;
32807
32808 + pax_track_stack();
32809 +
32810 /* Check for Promiscuous and All Multicast modes */
32811
32812 rctl = IXGB_READ_REG(hw, RCTL);
32813 diff -urNp linux-2.6.32.43/drivers/net/ixgb/ixgb_param.c linux-2.6.32.43/drivers/net/ixgb/ixgb_param.c
32814 --- linux-2.6.32.43/drivers/net/ixgb/ixgb_param.c 2011-03-27 14:31:47.000000000 -0400
32815 +++ linux-2.6.32.43/drivers/net/ixgb/ixgb_param.c 2011-05-16 21:46:57.000000000 -0400
32816 @@ -260,6 +260,9 @@ void __devinit
32817 ixgb_check_options(struct ixgb_adapter *adapter)
32818 {
32819 int bd = adapter->bd_number;
32820 +
32821 + pax_track_stack();
32822 +
32823 if (bd >= IXGB_MAX_NIC) {
32824 printk(KERN_NOTICE
32825 "Warning: no configuration for board #%i\n", bd);
32826 diff -urNp linux-2.6.32.43/drivers/net/mlx4/main.c linux-2.6.32.43/drivers/net/mlx4/main.c
32827 --- linux-2.6.32.43/drivers/net/mlx4/main.c 2011-03-27 14:31:47.000000000 -0400
32828 +++ linux-2.6.32.43/drivers/net/mlx4/main.c 2011-05-18 20:09:37.000000000 -0400
32829 @@ -38,6 +38,7 @@
32830 #include <linux/errno.h>
32831 #include <linux/pci.h>
32832 #include <linux/dma-mapping.h>
32833 +#include <linux/sched.h>
32834
32835 #include <linux/mlx4/device.h>
32836 #include <linux/mlx4/doorbell.h>
32837 @@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev
32838 u64 icm_size;
32839 int err;
32840
32841 + pax_track_stack();
32842 +
32843 err = mlx4_QUERY_FW(dev);
32844 if (err) {
32845 if (err == -EACCES)
32846 diff -urNp linux-2.6.32.43/drivers/net/niu.c linux-2.6.32.43/drivers/net/niu.c
32847 --- linux-2.6.32.43/drivers/net/niu.c 2011-05-10 22:12:01.000000000 -0400
32848 +++ linux-2.6.32.43/drivers/net/niu.c 2011-05-16 21:46:57.000000000 -0400
32849 @@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struc
32850 int i, num_irqs, err;
32851 u8 first_ldg;
32852
32853 + pax_track_stack();
32854 +
32855 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
32856 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
32857 ldg_num_map[i] = first_ldg + i;
32858 diff -urNp linux-2.6.32.43/drivers/net/pcnet32.c linux-2.6.32.43/drivers/net/pcnet32.c
32859 --- linux-2.6.32.43/drivers/net/pcnet32.c 2011-03-27 14:31:47.000000000 -0400
32860 +++ linux-2.6.32.43/drivers/net/pcnet32.c 2011-04-17 15:56:46.000000000 -0400
32861 @@ -79,7 +79,7 @@ static int cards_found;
32862 /*
32863 * VLB I/O addresses
32864 */
32865 -static unsigned int pcnet32_portlist[] __initdata =
32866 +static unsigned int pcnet32_portlist[] __devinitdata =
32867 { 0x300, 0x320, 0x340, 0x360, 0 };
32868
32869 static int pcnet32_debug = 0;
32870 diff -urNp linux-2.6.32.43/drivers/net/tg3.h linux-2.6.32.43/drivers/net/tg3.h
32871 --- linux-2.6.32.43/drivers/net/tg3.h 2011-03-27 14:31:47.000000000 -0400
32872 +++ linux-2.6.32.43/drivers/net/tg3.h 2011-04-17 15:56:46.000000000 -0400
32873 @@ -95,6 +95,7 @@
32874 #define CHIPREV_ID_5750_A0 0x4000
32875 #define CHIPREV_ID_5750_A1 0x4001
32876 #define CHIPREV_ID_5750_A3 0x4003
32877 +#define CHIPREV_ID_5750_C1 0x4201
32878 #define CHIPREV_ID_5750_C2 0x4202
32879 #define CHIPREV_ID_5752_A0_HW 0x5000
32880 #define CHIPREV_ID_5752_A0 0x6000
32881 diff -urNp linux-2.6.32.43/drivers/net/tulip/de2104x.c linux-2.6.32.43/drivers/net/tulip/de2104x.c
32882 --- linux-2.6.32.43/drivers/net/tulip/de2104x.c 2011-03-27 14:31:47.000000000 -0400
32883 +++ linux-2.6.32.43/drivers/net/tulip/de2104x.c 2011-05-16 21:46:57.000000000 -0400
32884 @@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_i
32885 struct de_srom_info_leaf *il;
32886 void *bufp;
32887
32888 + pax_track_stack();
32889 +
32890 /* download entire eeprom */
32891 for (i = 0; i < DE_EEPROM_WORDS; i++)
32892 ((__le16 *)ee_data)[i] =
32893 diff -urNp linux-2.6.32.43/drivers/net/tulip/de4x5.c linux-2.6.32.43/drivers/net/tulip/de4x5.c
32894 --- linux-2.6.32.43/drivers/net/tulip/de4x5.c 2011-03-27 14:31:47.000000000 -0400
32895 +++ linux-2.6.32.43/drivers/net/tulip/de4x5.c 2011-04-17 15:56:46.000000000 -0400
32896 @@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, stru
32897 for (i=0; i<ETH_ALEN; i++) {
32898 tmp.addr[i] = dev->dev_addr[i];
32899 }
32900 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32901 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32902 break;
32903
32904 case DE4X5_SET_HWADDR: /* Set the hardware address */
32905 @@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, stru
32906 spin_lock_irqsave(&lp->lock, flags);
32907 memcpy(&statbuf, &lp->pktStats, ioc->len);
32908 spin_unlock_irqrestore(&lp->lock, flags);
32909 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
32910 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32911 return -EFAULT;
32912 break;
32913 }
32914 diff -urNp linux-2.6.32.43/drivers/net/usb/hso.c linux-2.6.32.43/drivers/net/usb/hso.c
32915 --- linux-2.6.32.43/drivers/net/usb/hso.c 2011-03-27 14:31:47.000000000 -0400
32916 +++ linux-2.6.32.43/drivers/net/usb/hso.c 2011-04-17 15:56:46.000000000 -0400
32917 @@ -71,7 +71,7 @@
32918 #include <asm/byteorder.h>
32919 #include <linux/serial_core.h>
32920 #include <linux/serial.h>
32921 -
32922 +#include <asm/local.h>
32923
32924 #define DRIVER_VERSION "1.2"
32925 #define MOD_AUTHOR "Option Wireless"
32926 @@ -258,7 +258,7 @@ struct hso_serial {
32927
32928 /* from usb_serial_port */
32929 struct tty_struct *tty;
32930 - int open_count;
32931 + local_t open_count;
32932 spinlock_t serial_lock;
32933
32934 int (*write_data) (struct hso_serial *serial);
32935 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_
32936 struct urb *urb;
32937
32938 urb = serial->rx_urb[0];
32939 - if (serial->open_count > 0) {
32940 + if (local_read(&serial->open_count) > 0) {
32941 count = put_rxbuf_data(urb, serial);
32942 if (count == -1)
32943 return;
32944 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_cal
32945 DUMP1(urb->transfer_buffer, urb->actual_length);
32946
32947 /* Anyone listening? */
32948 - if (serial->open_count == 0)
32949 + if (local_read(&serial->open_count) == 0)
32950 return;
32951
32952 if (status == 0) {
32953 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
32954 spin_unlock_irq(&serial->serial_lock);
32955
32956 /* check for port already opened, if not set the termios */
32957 - serial->open_count++;
32958 - if (serial->open_count == 1) {
32959 + if (local_inc_return(&serial->open_count) == 1) {
32960 tty->low_latency = 1;
32961 serial->rx_state = RX_IDLE;
32962 /* Force default termio settings */
32963 @@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_st
32964 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
32965 if (result) {
32966 hso_stop_serial_device(serial->parent);
32967 - serial->open_count--;
32968 + local_dec(&serial->open_count);
32969 kref_put(&serial->parent->ref, hso_serial_ref_free);
32970 }
32971 } else {
32972 @@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_
32973
32974 /* reset the rts and dtr */
32975 /* do the actual close */
32976 - serial->open_count--;
32977 + local_dec(&serial->open_count);
32978
32979 - if (serial->open_count <= 0) {
32980 - serial->open_count = 0;
32981 + if (local_read(&serial->open_count) <= 0) {
32982 + local_set(&serial->open_count, 0);
32983 spin_lock_irq(&serial->serial_lock);
32984 if (serial->tty == tty) {
32985 serial->tty->driver_data = NULL;
32986 @@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struc
32987
32988 /* the actual setup */
32989 spin_lock_irqsave(&serial->serial_lock, flags);
32990 - if (serial->open_count)
32991 + if (local_read(&serial->open_count))
32992 _hso_serial_set_termios(tty, old);
32993 else
32994 tty->termios = old;
32995 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
32996 /* Start all serial ports */
32997 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
32998 if (serial_table[i] && (serial_table[i]->interface == iface)) {
32999 - if (dev2ser(serial_table[i])->open_count) {
33000 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
33001 result =
33002 hso_start_serial_device(serial_table[i], GFP_NOIO);
33003 hso_kick_transmit(dev2ser(serial_table[i]));
33004 diff -urNp linux-2.6.32.43/drivers/net/vxge/vxge-main.c linux-2.6.32.43/drivers/net/vxge/vxge-main.c
33005 --- linux-2.6.32.43/drivers/net/vxge/vxge-main.c 2011-03-27 14:31:47.000000000 -0400
33006 +++ linux-2.6.32.43/drivers/net/vxge/vxge-main.c 2011-05-16 21:46:57.000000000 -0400
33007 @@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_T
33008 struct sk_buff *completed[NR_SKB_COMPLETED];
33009 int more;
33010
33011 + pax_track_stack();
33012 +
33013 do {
33014 more = 0;
33015 skb_ptr = completed;
33016 @@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_conf
33017 u8 mtable[256] = {0}; /* CPU to vpath mapping */
33018 int index;
33019
33020 + pax_track_stack();
33021 +
33022 /*
33023 * Filling
33024 * - itable with bucket numbers
33025 diff -urNp linux-2.6.32.43/drivers/net/wan/cycx_x25.c linux-2.6.32.43/drivers/net/wan/cycx_x25.c
33026 --- linux-2.6.32.43/drivers/net/wan/cycx_x25.c 2011-03-27 14:31:47.000000000 -0400
33027 +++ linux-2.6.32.43/drivers/net/wan/cycx_x25.c 2011-05-16 21:46:57.000000000 -0400
33028 @@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned
33029 unsigned char hex[1024],
33030 * phex = hex;
33031
33032 + pax_track_stack();
33033 +
33034 if (len >= (sizeof(hex) / 2))
33035 len = (sizeof(hex) / 2) - 1;
33036
33037 diff -urNp linux-2.6.32.43/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.32.43/drivers/net/wimax/i2400m/usb-fw.c
33038 --- linux-2.6.32.43/drivers/net/wimax/i2400m/usb-fw.c 2011-03-27 14:31:47.000000000 -0400
33039 +++ linux-2.6.32.43/drivers/net/wimax/i2400m/usb-fw.c 2011-05-16 21:46:57.000000000 -0400
33040 @@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
33041 int do_autopm = 1;
33042 DECLARE_COMPLETION_ONSTACK(notif_completion);
33043
33044 + pax_track_stack();
33045 +
33046 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
33047 i2400m, ack, ack_size);
33048 BUG_ON(_ack == i2400m->bm_ack_buf);
33049 diff -urNp linux-2.6.32.43/drivers/net/wireless/airo.c linux-2.6.32.43/drivers/net/wireless/airo.c
33050 --- linux-2.6.32.43/drivers/net/wireless/airo.c 2011-03-27 14:31:47.000000000 -0400
33051 +++ linux-2.6.32.43/drivers/net/wireless/airo.c 2011-05-16 21:46:57.000000000 -0400
33052 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
33053 BSSListElement * loop_net;
33054 BSSListElement * tmp_net;
33055
33056 + pax_track_stack();
33057 +
33058 /* Blow away current list of scan results */
33059 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
33060 list_move_tail (&loop_net->list, &ai->network_free_list);
33061 @@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *
33062 WepKeyRid wkr;
33063 int rc;
33064
33065 + pax_track_stack();
33066 +
33067 memset( &mySsid, 0, sizeof( mySsid ) );
33068 kfree (ai->flash);
33069 ai->flash = NULL;
33070 @@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct i
33071 __le32 *vals = stats.vals;
33072 int len;
33073
33074 + pax_track_stack();
33075 +
33076 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
33077 return -ENOMEM;
33078 data = (struct proc_data *)file->private_data;
33079 @@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct ino
33080 /* If doLoseSync is not 1, we won't do a Lose Sync */
33081 int doLoseSync = -1;
33082
33083 + pax_track_stack();
33084 +
33085 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
33086 return -ENOMEM;
33087 data = (struct proc_data *)file->private_data;
33088 @@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_de
33089 int i;
33090 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
33091
33092 + pax_track_stack();
33093 +
33094 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
33095 if (!qual)
33096 return -ENOMEM;
33097 @@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(str
33098 CapabilityRid cap_rid;
33099 __le32 *vals = stats_rid.vals;
33100
33101 + pax_track_stack();
33102 +
33103 /* Get stats out of the card */
33104 clear_bit(JOB_WSTATS, &local->jobs);
33105 if (local->power.event) {
33106 diff -urNp linux-2.6.32.43/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.32.43/drivers/net/wireless/ath/ath5k/debug.c
33107 --- linux-2.6.32.43/drivers/net/wireless/ath/ath5k/debug.c 2011-03-27 14:31:47.000000000 -0400
33108 +++ linux-2.6.32.43/drivers/net/wireless/ath/ath5k/debug.c 2011-05-16 21:46:57.000000000 -0400
33109 @@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct f
33110 unsigned int v;
33111 u64 tsf;
33112
33113 + pax_track_stack();
33114 +
33115 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
33116 len += snprintf(buf+len, sizeof(buf)-len,
33117 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
33118 @@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct fi
33119 unsigned int len = 0;
33120 unsigned int i;
33121
33122 + pax_track_stack();
33123 +
33124 len += snprintf(buf+len, sizeof(buf)-len,
33125 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
33126
33127 diff -urNp linux-2.6.32.43/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.32.43/drivers/net/wireless/ath/ath9k/debug.c
33128 --- linux-2.6.32.43/drivers/net/wireless/ath/ath9k/debug.c 2011-03-27 14:31:47.000000000 -0400
33129 +++ linux-2.6.32.43/drivers/net/wireless/ath/ath9k/debug.c 2011-05-16 21:46:57.000000000 -0400
33130 @@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struc
33131 char buf[512];
33132 unsigned int len = 0;
33133
33134 + pax_track_stack();
33135 +
33136 len += snprintf(buf + len, sizeof(buf) - len,
33137 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
33138 len += snprintf(buf + len, sizeof(buf) - len,
33139 @@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct fi
33140 int i;
33141 u8 addr[ETH_ALEN];
33142
33143 + pax_track_stack();
33144 +
33145 len += snprintf(buf + len, sizeof(buf) - len,
33146 "primary: %s (%s chan=%d ht=%d)\n",
33147 wiphy_name(sc->pri_wiphy->hw->wiphy),
33148 diff -urNp linux-2.6.32.43/drivers/net/wireless/b43/debugfs.c linux-2.6.32.43/drivers/net/wireless/b43/debugfs.c
33149 --- linux-2.6.32.43/drivers/net/wireless/b43/debugfs.c 2011-03-27 14:31:47.000000000 -0400
33150 +++ linux-2.6.32.43/drivers/net/wireless/b43/debugfs.c 2011-04-17 15:56:46.000000000 -0400
33151 @@ -43,7 +43,7 @@ static struct dentry *rootdir;
33152 struct b43_debugfs_fops {
33153 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
33154 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
33155 - struct file_operations fops;
33156 + const struct file_operations fops;
33157 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
33158 size_t file_struct_offset;
33159 };
33160 diff -urNp linux-2.6.32.43/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.32.43/drivers/net/wireless/b43legacy/debugfs.c
33161 --- linux-2.6.32.43/drivers/net/wireless/b43legacy/debugfs.c 2011-03-27 14:31:47.000000000 -0400
33162 +++ linux-2.6.32.43/drivers/net/wireless/b43legacy/debugfs.c 2011-04-17 15:56:46.000000000 -0400
33163 @@ -44,7 +44,7 @@ static struct dentry *rootdir;
33164 struct b43legacy_debugfs_fops {
33165 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
33166 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
33167 - struct file_operations fops;
33168 + const struct file_operations fops;
33169 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
33170 size_t file_struct_offset;
33171 /* Take wl->irq_lock before calling read/write? */
33172 diff -urNp linux-2.6.32.43/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.32.43/drivers/net/wireless/ipw2x00/ipw2100.c
33173 --- linux-2.6.32.43/drivers/net/wireless/ipw2x00/ipw2100.c 2011-03-27 14:31:47.000000000 -0400
33174 +++ linux-2.6.32.43/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-16 21:46:57.000000000 -0400
33175 @@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2
33176 int err;
33177 DECLARE_SSID_BUF(ssid);
33178
33179 + pax_track_stack();
33180 +
33181 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
33182
33183 if (ssid_len)
33184 @@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw210
33185 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
33186 int err;
33187
33188 + pax_track_stack();
33189 +
33190 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
33191 idx, keylen, len);
33192
33193 diff -urNp linux-2.6.32.43/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.32.43/drivers/net/wireless/ipw2x00/libipw_rx.c
33194 --- linux-2.6.32.43/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-03-27 14:31:47.000000000 -0400
33195 +++ linux-2.6.32.43/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-16 21:46:57.000000000 -0400
33196 @@ -1566,6 +1566,8 @@ static void libipw_process_probe_respons
33197 unsigned long flags;
33198 DECLARE_SSID_BUF(ssid);
33199
33200 + pax_track_stack();
33201 +
33202 LIBIPW_DEBUG_SCAN("'%s' (%pM"
33203 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
33204 print_ssid(ssid, info_element->data, info_element->len),
33205 diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-1000.c linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-1000.c
33206 --- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-03-27 14:31:47.000000000 -0400
33207 +++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-04-17 15:56:46.000000000 -0400
33208 @@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib =
33209 },
33210 };
33211
33212 -static struct iwl_ops iwl1000_ops = {
33213 +static const struct iwl_ops iwl1000_ops = {
33214 .ucode = &iwl5000_ucode,
33215 .lib = &iwl1000_lib,
33216 .hcmd = &iwl5000_hcmd,
33217 diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-3945.c linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-3945.c
33218 --- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-03-27 14:31:47.000000000 -0400
33219 +++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-04-17 15:56:46.000000000 -0400
33220 @@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945
33221 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
33222 };
33223
33224 -static struct iwl_ops iwl3945_ops = {
33225 +static const struct iwl_ops iwl3945_ops = {
33226 .ucode = &iwl3945_ucode,
33227 .lib = &iwl3945_lib,
33228 .hcmd = &iwl3945_hcmd,
33229 diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-4965.c
33230 --- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-03-27 14:31:47.000000000 -0400
33231 +++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-04-17 15:56:46.000000000 -0400
33232 @@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib =
33233 },
33234 };
33235
33236 -static struct iwl_ops iwl4965_ops = {
33237 +static const struct iwl_ops iwl4965_ops = {
33238 .ucode = &iwl4965_ucode,
33239 .lib = &iwl4965_lib,
33240 .hcmd = &iwl4965_hcmd,
33241 diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-5000.c linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-5000.c
33242 --- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:55:34.000000000 -0400
33243 +++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:56:37.000000000 -0400
33244 @@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib =
33245 },
33246 };
33247
33248 -struct iwl_ops iwl5000_ops = {
33249 +const struct iwl_ops iwl5000_ops = {
33250 .ucode = &iwl5000_ucode,
33251 .lib = &iwl5000_lib,
33252 .hcmd = &iwl5000_hcmd,
33253 .utils = &iwl5000_hcmd_utils,
33254 };
33255
33256 -static struct iwl_ops iwl5150_ops = {
33257 +static const struct iwl_ops iwl5150_ops = {
33258 .ucode = &iwl5000_ucode,
33259 .lib = &iwl5150_lib,
33260 .hcmd = &iwl5000_hcmd,
33261 diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-6000.c
33262 --- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-03-27 14:31:47.000000000 -0400
33263 +++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-04-17 15:56:46.000000000 -0400
33264 @@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000
33265 .calc_rssi = iwl5000_calc_rssi,
33266 };
33267
33268 -static struct iwl_ops iwl6000_ops = {
33269 +static const struct iwl_ops iwl6000_ops = {
33270 .ucode = &iwl5000_ucode,
33271 .lib = &iwl6000_lib,
33272 .hcmd = &iwl5000_hcmd,
33273 diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
33274 --- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-03-27 14:31:47.000000000 -0400
33275 +++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-16 21:46:57.000000000 -0400
33276 @@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, s
33277 u8 active_index = 0;
33278 s32 tpt = 0;
33279
33280 + pax_track_stack();
33281 +
33282 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
33283
33284 if (!ieee80211_is_data(hdr->frame_control) ||
33285 @@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_
33286 u8 valid_tx_ant = 0;
33287 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
33288
33289 + pax_track_stack();
33290 +
33291 /* Override starting rate (index 0) if needed for debug purposes */
33292 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
33293
33294 diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-debugfs.c
33295 --- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-03-27 14:31:47.000000000 -0400
33296 +++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-16 21:46:57.000000000 -0400
33297 @@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(str
33298 int pos = 0;
33299 const size_t bufsz = sizeof(buf);
33300
33301 + pax_track_stack();
33302 +
33303 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
33304 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
33305 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
33306 @@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
33307 const size_t bufsz = sizeof(buf);
33308 ssize_t ret;
33309
33310 + pax_track_stack();
33311 +
33312 for (i = 0; i < AC_NUM; i++) {
33313 pos += scnprintf(buf + pos, bufsz - pos,
33314 "\tcw_min\tcw_max\taifsn\ttxop\n");
33315 diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-debug.h
33316 --- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-03-27 14:31:47.000000000 -0400
33317 +++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-04-17 15:56:46.000000000 -0400
33318 @@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_pri
33319 #endif
33320
33321 #else
33322 -#define IWL_DEBUG(__priv, level, fmt, args...)
33323 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
33324 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
33325 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
33326 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
33327 void *p, u32 len)
33328 {}
33329 diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-dev.h linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-dev.h
33330 --- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-03-27 14:31:47.000000000 -0400
33331 +++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-04-17 15:56:46.000000000 -0400
33332 @@ -68,7 +68,7 @@ struct iwl_tx_queue;
33333
33334 /* shared structures from iwl-5000.c */
33335 extern struct iwl_mod_params iwl50_mod_params;
33336 -extern struct iwl_ops iwl5000_ops;
33337 +extern const struct iwl_ops iwl5000_ops;
33338 extern struct iwl_ucode_ops iwl5000_ucode;
33339 extern struct iwl_lib_ops iwl5000_lib;
33340 extern struct iwl_hcmd_ops iwl5000_hcmd;
33341 diff -urNp linux-2.6.32.43/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.32.43/drivers/net/wireless/iwmc3200wifi/debugfs.c
33342 --- linux-2.6.32.43/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-03-27 14:31:47.000000000 -0400
33343 +++ linux-2.6.32.43/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-16 21:46:57.000000000 -0400
33344 @@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
33345 int buf_len = 512;
33346 size_t len = 0;
33347
33348 + pax_track_stack();
33349 +
33350 if (*ppos != 0)
33351 return 0;
33352 if (count < sizeof(buf))
33353 diff -urNp linux-2.6.32.43/drivers/net/wireless/libertas/debugfs.c linux-2.6.32.43/drivers/net/wireless/libertas/debugfs.c
33354 --- linux-2.6.32.43/drivers/net/wireless/libertas/debugfs.c 2011-03-27 14:31:47.000000000 -0400
33355 +++ linux-2.6.32.43/drivers/net/wireless/libertas/debugfs.c 2011-04-17 15:56:46.000000000 -0400
33356 @@ -708,7 +708,7 @@ out_unlock:
33357 struct lbs_debugfs_files {
33358 const char *name;
33359 int perm;
33360 - struct file_operations fops;
33361 + const struct file_operations fops;
33362 };
33363
33364 static const struct lbs_debugfs_files debugfs_files[] = {
33365 diff -urNp linux-2.6.32.43/drivers/net/wireless/rndis_wlan.c linux-2.6.32.43/drivers/net/wireless/rndis_wlan.c
33366 --- linux-2.6.32.43/drivers/net/wireless/rndis_wlan.c 2011-03-27 14:31:47.000000000 -0400
33367 +++ linux-2.6.32.43/drivers/net/wireless/rndis_wlan.c 2011-04-17 15:56:46.000000000 -0400
33368 @@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbn
33369
33370 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
33371
33372 - if (rts_threshold < 0 || rts_threshold > 2347)
33373 + if (rts_threshold > 2347)
33374 rts_threshold = 2347;
33375
33376 tmp = cpu_to_le32(rts_threshold);
33377 diff -urNp linux-2.6.32.43/drivers/oprofile/buffer_sync.c linux-2.6.32.43/drivers/oprofile/buffer_sync.c
33378 --- linux-2.6.32.43/drivers/oprofile/buffer_sync.c 2011-03-27 14:31:47.000000000 -0400
33379 +++ linux-2.6.32.43/drivers/oprofile/buffer_sync.c 2011-04-17 15:56:46.000000000 -0400
33380 @@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
33381 if (cookie == NO_COOKIE)
33382 offset = pc;
33383 if (cookie == INVALID_COOKIE) {
33384 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33385 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33386 offset = pc;
33387 }
33388 if (cookie != last_cookie) {
33389 @@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
33390 /* add userspace sample */
33391
33392 if (!mm) {
33393 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
33394 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
33395 return 0;
33396 }
33397
33398 cookie = lookup_dcookie(mm, s->eip, &offset);
33399
33400 if (cookie == INVALID_COOKIE) {
33401 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33402 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33403 return 0;
33404 }
33405
33406 @@ -561,7 +561,7 @@ void sync_buffer(int cpu)
33407 /* ignore backtraces if failed to add a sample */
33408 if (state == sb_bt_start) {
33409 state = sb_bt_ignore;
33410 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
33411 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
33412 }
33413 }
33414 release_mm(mm);
33415 diff -urNp linux-2.6.32.43/drivers/oprofile/event_buffer.c linux-2.6.32.43/drivers/oprofile/event_buffer.c
33416 --- linux-2.6.32.43/drivers/oprofile/event_buffer.c 2011-03-27 14:31:47.000000000 -0400
33417 +++ linux-2.6.32.43/drivers/oprofile/event_buffer.c 2011-04-17 15:56:46.000000000 -0400
33418 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
33419 }
33420
33421 if (buffer_pos == buffer_size) {
33422 - atomic_inc(&oprofile_stats.event_lost_overflow);
33423 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
33424 return;
33425 }
33426
33427 diff -urNp linux-2.6.32.43/drivers/oprofile/oprof.c linux-2.6.32.43/drivers/oprofile/oprof.c
33428 --- linux-2.6.32.43/drivers/oprofile/oprof.c 2011-03-27 14:31:47.000000000 -0400
33429 +++ linux-2.6.32.43/drivers/oprofile/oprof.c 2011-04-17 15:56:46.000000000 -0400
33430 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
33431 if (oprofile_ops.switch_events())
33432 return;
33433
33434 - atomic_inc(&oprofile_stats.multiplex_counter);
33435 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
33436 start_switch_worker();
33437 }
33438
33439 diff -urNp linux-2.6.32.43/drivers/oprofile/oprofilefs.c linux-2.6.32.43/drivers/oprofile/oprofilefs.c
33440 --- linux-2.6.32.43/drivers/oprofile/oprofilefs.c 2011-03-27 14:31:47.000000000 -0400
33441 +++ linux-2.6.32.43/drivers/oprofile/oprofilefs.c 2011-04-17 15:56:46.000000000 -0400
33442 @@ -187,7 +187,7 @@ static const struct file_operations atom
33443
33444
33445 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
33446 - char const *name, atomic_t *val)
33447 + char const *name, atomic_unchecked_t *val)
33448 {
33449 struct dentry *d = __oprofilefs_create_file(sb, root, name,
33450 &atomic_ro_fops, 0444);
33451 diff -urNp linux-2.6.32.43/drivers/oprofile/oprofile_stats.c linux-2.6.32.43/drivers/oprofile/oprofile_stats.c
33452 --- linux-2.6.32.43/drivers/oprofile/oprofile_stats.c 2011-03-27 14:31:47.000000000 -0400
33453 +++ linux-2.6.32.43/drivers/oprofile/oprofile_stats.c 2011-04-17 15:56:46.000000000 -0400
33454 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
33455 cpu_buf->sample_invalid_eip = 0;
33456 }
33457
33458 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
33459 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
33460 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
33461 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
33462 - atomic_set(&oprofile_stats.multiplex_counter, 0);
33463 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
33464 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
33465 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
33466 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
33467 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
33468 }
33469
33470
33471 diff -urNp linux-2.6.32.43/drivers/oprofile/oprofile_stats.h linux-2.6.32.43/drivers/oprofile/oprofile_stats.h
33472 --- linux-2.6.32.43/drivers/oprofile/oprofile_stats.h 2011-03-27 14:31:47.000000000 -0400
33473 +++ linux-2.6.32.43/drivers/oprofile/oprofile_stats.h 2011-04-17 15:56:46.000000000 -0400
33474 @@ -13,11 +13,11 @@
33475 #include <asm/atomic.h>
33476
33477 struct oprofile_stat_struct {
33478 - atomic_t sample_lost_no_mm;
33479 - atomic_t sample_lost_no_mapping;
33480 - atomic_t bt_lost_no_mapping;
33481 - atomic_t event_lost_overflow;
33482 - atomic_t multiplex_counter;
33483 + atomic_unchecked_t sample_lost_no_mm;
33484 + atomic_unchecked_t sample_lost_no_mapping;
33485 + atomic_unchecked_t bt_lost_no_mapping;
33486 + atomic_unchecked_t event_lost_overflow;
33487 + atomic_unchecked_t multiplex_counter;
33488 };
33489
33490 extern struct oprofile_stat_struct oprofile_stats;
33491 diff -urNp linux-2.6.32.43/drivers/parisc/pdc_stable.c linux-2.6.32.43/drivers/parisc/pdc_stable.c
33492 --- linux-2.6.32.43/drivers/parisc/pdc_stable.c 2011-03-27 14:31:47.000000000 -0400
33493 +++ linux-2.6.32.43/drivers/parisc/pdc_stable.c 2011-04-17 15:56:46.000000000 -0400
33494 @@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj
33495 return ret;
33496 }
33497
33498 -static struct sysfs_ops pdcspath_attr_ops = {
33499 +static const struct sysfs_ops pdcspath_attr_ops = {
33500 .show = pdcspath_attr_show,
33501 .store = pdcspath_attr_store,
33502 };
33503 diff -urNp linux-2.6.32.43/drivers/parport/procfs.c linux-2.6.32.43/drivers/parport/procfs.c
33504 --- linux-2.6.32.43/drivers/parport/procfs.c 2011-03-27 14:31:47.000000000 -0400
33505 +++ linux-2.6.32.43/drivers/parport/procfs.c 2011-04-17 15:56:46.000000000 -0400
33506 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
33507
33508 *ppos += len;
33509
33510 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
33511 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
33512 }
33513
33514 #ifdef CONFIG_PARPORT_1284
33515 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
33516
33517 *ppos += len;
33518
33519 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
33520 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
33521 }
33522 #endif /* IEEE1284.3 support. */
33523
33524 diff -urNp linux-2.6.32.43/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.32.43/drivers/pci/hotplug/acpiphp_glue.c
33525 --- linux-2.6.32.43/drivers/pci/hotplug/acpiphp_glue.c 2011-03-27 14:31:47.000000000 -0400
33526 +++ linux-2.6.32.43/drivers/pci/hotplug/acpiphp_glue.c 2011-04-17 15:56:46.000000000 -0400
33527 @@ -111,7 +111,7 @@ static int post_dock_fixups(struct notif
33528 }
33529
33530
33531 -static struct acpi_dock_ops acpiphp_dock_ops = {
33532 +static const struct acpi_dock_ops acpiphp_dock_ops = {
33533 .handler = handle_hotplug_event_func,
33534 };
33535
33536 diff -urNp linux-2.6.32.43/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.32.43/drivers/pci/hotplug/cpqphp_nvram.c
33537 --- linux-2.6.32.43/drivers/pci/hotplug/cpqphp_nvram.c 2011-03-27 14:31:47.000000000 -0400
33538 +++ linux-2.6.32.43/drivers/pci/hotplug/cpqphp_nvram.c 2011-04-17 15:56:46.000000000 -0400
33539 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
33540
33541 void compaq_nvram_init (void __iomem *rom_start)
33542 {
33543 +
33544 +#ifndef CONFIG_PAX_KERNEXEC
33545 if (rom_start) {
33546 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
33547 }
33548 +#endif
33549 +
33550 dbg("int15 entry = %p\n", compaq_int15_entry_point);
33551
33552 /* initialize our int15 lock */
33553 diff -urNp linux-2.6.32.43/drivers/pci/hotplug/fakephp.c linux-2.6.32.43/drivers/pci/hotplug/fakephp.c
33554 --- linux-2.6.32.43/drivers/pci/hotplug/fakephp.c 2011-03-27 14:31:47.000000000 -0400
33555 +++ linux-2.6.32.43/drivers/pci/hotplug/fakephp.c 2011-04-17 15:56:46.000000000 -0400
33556 @@ -73,7 +73,7 @@ static void legacy_release(struct kobjec
33557 }
33558
33559 static struct kobj_type legacy_ktype = {
33560 - .sysfs_ops = &(struct sysfs_ops){
33561 + .sysfs_ops = &(const struct sysfs_ops){
33562 .store = legacy_store, .show = legacy_show
33563 },
33564 .release = &legacy_release,
33565 diff -urNp linux-2.6.32.43/drivers/pci/intel-iommu.c linux-2.6.32.43/drivers/pci/intel-iommu.c
33566 --- linux-2.6.32.43/drivers/pci/intel-iommu.c 2011-05-10 22:12:01.000000000 -0400
33567 +++ linux-2.6.32.43/drivers/pci/intel-iommu.c 2011-05-10 22:12:33.000000000 -0400
33568 @@ -2643,7 +2643,7 @@ error:
33569 return 0;
33570 }
33571
33572 -static dma_addr_t intel_map_page(struct device *dev, struct page *page,
33573 +dma_addr_t intel_map_page(struct device *dev, struct page *page,
33574 unsigned long offset, size_t size,
33575 enum dma_data_direction dir,
33576 struct dma_attrs *attrs)
33577 @@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain
33578 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
33579 }
33580
33581 -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
33582 +void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
33583 size_t size, enum dma_data_direction dir,
33584 struct dma_attrs *attrs)
33585 {
33586 @@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct devi
33587 }
33588 }
33589
33590 -static void *intel_alloc_coherent(struct device *hwdev, size_t size,
33591 +void *intel_alloc_coherent(struct device *hwdev, size_t size,
33592 dma_addr_t *dma_handle, gfp_t flags)
33593 {
33594 void *vaddr;
33595 @@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct
33596 return NULL;
33597 }
33598
33599 -static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
33600 +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
33601 dma_addr_t dma_handle)
33602 {
33603 int order;
33604 @@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct d
33605 free_pages((unsigned long)vaddr, order);
33606 }
33607
33608 -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
33609 +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
33610 int nelems, enum dma_data_direction dir,
33611 struct dma_attrs *attrs)
33612 {
33613 @@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(str
33614 return nelems;
33615 }
33616
33617 -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
33618 +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
33619 enum dma_data_direction dir, struct dma_attrs *attrs)
33620 {
33621 int i;
33622 @@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *h
33623 return nelems;
33624 }
33625
33626 -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
33627 +int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
33628 {
33629 return !dma_addr;
33630 }
33631
33632 -struct dma_map_ops intel_dma_ops = {
33633 +const struct dma_map_ops intel_dma_ops = {
33634 .alloc_coherent = intel_alloc_coherent,
33635 .free_coherent = intel_free_coherent,
33636 .map_sg = intel_map_sg,
33637 diff -urNp linux-2.6.32.43/drivers/pci/pcie/aspm.c linux-2.6.32.43/drivers/pci/pcie/aspm.c
33638 --- linux-2.6.32.43/drivers/pci/pcie/aspm.c 2011-03-27 14:31:47.000000000 -0400
33639 +++ linux-2.6.32.43/drivers/pci/pcie/aspm.c 2011-04-17 15:56:46.000000000 -0400
33640 @@ -27,9 +27,9 @@
33641 #define MODULE_PARAM_PREFIX "pcie_aspm."
33642
33643 /* Note: those are not register definitions */
33644 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
33645 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
33646 -#define ASPM_STATE_L1 (4) /* L1 state */
33647 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
33648 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
33649 +#define ASPM_STATE_L1 (4U) /* L1 state */
33650 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
33651 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
33652
33653 diff -urNp linux-2.6.32.43/drivers/pci/probe.c linux-2.6.32.43/drivers/pci/probe.c
33654 --- linux-2.6.32.43/drivers/pci/probe.c 2011-03-27 14:31:47.000000000 -0400
33655 +++ linux-2.6.32.43/drivers/pci/probe.c 2011-04-17 15:56:46.000000000 -0400
33656 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
33657 return ret;
33658 }
33659
33660 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
33661 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
33662 struct device_attribute *attr,
33663 char *buf)
33664 {
33665 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
33666 }
33667
33668 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
33669 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
33670 struct device_attribute *attr,
33671 char *buf)
33672 {
33673 diff -urNp linux-2.6.32.43/drivers/pci/proc.c linux-2.6.32.43/drivers/pci/proc.c
33674 --- linux-2.6.32.43/drivers/pci/proc.c 2011-03-27 14:31:47.000000000 -0400
33675 +++ linux-2.6.32.43/drivers/pci/proc.c 2011-04-17 15:56:46.000000000 -0400
33676 @@ -480,7 +480,16 @@ static const struct file_operations proc
33677 static int __init pci_proc_init(void)
33678 {
33679 struct pci_dev *dev = NULL;
33680 +
33681 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
33682 +#ifdef CONFIG_GRKERNSEC_PROC_USER
33683 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
33684 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33685 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33686 +#endif
33687 +#else
33688 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
33689 +#endif
33690 proc_create("devices", 0, proc_bus_pci_dir,
33691 &proc_bus_pci_dev_operations);
33692 proc_initialized = 1;
33693 diff -urNp linux-2.6.32.43/drivers/pci/slot.c linux-2.6.32.43/drivers/pci/slot.c
33694 --- linux-2.6.32.43/drivers/pci/slot.c 2011-03-27 14:31:47.000000000 -0400
33695 +++ linux-2.6.32.43/drivers/pci/slot.c 2011-04-17 15:56:46.000000000 -0400
33696 @@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struc
33697 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
33698 }
33699
33700 -static struct sysfs_ops pci_slot_sysfs_ops = {
33701 +static const struct sysfs_ops pci_slot_sysfs_ops = {
33702 .show = pci_slot_attr_show,
33703 .store = pci_slot_attr_store,
33704 };
33705 diff -urNp linux-2.6.32.43/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.32.43/drivers/pcmcia/pcmcia_ioctl.c
33706 --- linux-2.6.32.43/drivers/pcmcia/pcmcia_ioctl.c 2011-03-27 14:31:47.000000000 -0400
33707 +++ linux-2.6.32.43/drivers/pcmcia/pcmcia_ioctl.c 2011-04-17 15:56:46.000000000 -0400
33708 @@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode
33709 return -EFAULT;
33710 }
33711 }
33712 - buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
33713 + buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
33714 if (!buf)
33715 return -ENOMEM;
33716
33717 diff -urNp linux-2.6.32.43/drivers/platform/x86/acer-wmi.c linux-2.6.32.43/drivers/platform/x86/acer-wmi.c
33718 --- linux-2.6.32.43/drivers/platform/x86/acer-wmi.c 2011-03-27 14:31:47.000000000 -0400
33719 +++ linux-2.6.32.43/drivers/platform/x86/acer-wmi.c 2011-04-17 15:56:46.000000000 -0400
33720 @@ -918,7 +918,7 @@ static int update_bl_status(struct backl
33721 return 0;
33722 }
33723
33724 -static struct backlight_ops acer_bl_ops = {
33725 +static const struct backlight_ops acer_bl_ops = {
33726 .get_brightness = read_brightness,
33727 .update_status = update_bl_status,
33728 };
33729 diff -urNp linux-2.6.32.43/drivers/platform/x86/asus_acpi.c linux-2.6.32.43/drivers/platform/x86/asus_acpi.c
33730 --- linux-2.6.32.43/drivers/platform/x86/asus_acpi.c 2011-03-27 14:31:47.000000000 -0400
33731 +++ linux-2.6.32.43/drivers/platform/x86/asus_acpi.c 2011-04-17 15:56:46.000000000 -0400
33732 @@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_
33733 return 0;
33734 }
33735
33736 -static struct backlight_ops asus_backlight_data = {
33737 +static const struct backlight_ops asus_backlight_data = {
33738 .get_brightness = read_brightness,
33739 .update_status = set_brightness_status,
33740 };
33741 diff -urNp linux-2.6.32.43/drivers/platform/x86/asus-laptop.c linux-2.6.32.43/drivers/platform/x86/asus-laptop.c
33742 --- linux-2.6.32.43/drivers/platform/x86/asus-laptop.c 2011-03-27 14:31:47.000000000 -0400
33743 +++ linux-2.6.32.43/drivers/platform/x86/asus-laptop.c 2011-04-17 15:56:46.000000000 -0400
33744 @@ -250,7 +250,7 @@ static struct backlight_device *asus_bac
33745 */
33746 static int read_brightness(struct backlight_device *bd);
33747 static int update_bl_status(struct backlight_device *bd);
33748 -static struct backlight_ops asusbl_ops = {
33749 +static const struct backlight_ops asusbl_ops = {
33750 .get_brightness = read_brightness,
33751 .update_status = update_bl_status,
33752 };
33753 diff -urNp linux-2.6.32.43/drivers/platform/x86/compal-laptop.c linux-2.6.32.43/drivers/platform/x86/compal-laptop.c
33754 --- linux-2.6.32.43/drivers/platform/x86/compal-laptop.c 2011-03-27 14:31:47.000000000 -0400
33755 +++ linux-2.6.32.43/drivers/platform/x86/compal-laptop.c 2011-04-17 15:56:46.000000000 -0400
33756 @@ -163,7 +163,7 @@ static int bl_update_status(struct backl
33757 return set_lcd_level(b->props.brightness);
33758 }
33759
33760 -static struct backlight_ops compalbl_ops = {
33761 +static const struct backlight_ops compalbl_ops = {
33762 .get_brightness = bl_get_brightness,
33763 .update_status = bl_update_status,
33764 };
33765 diff -urNp linux-2.6.32.43/drivers/platform/x86/dell-laptop.c linux-2.6.32.43/drivers/platform/x86/dell-laptop.c
33766 --- linux-2.6.32.43/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:01.000000000 -0400
33767 +++ linux-2.6.32.43/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:33.000000000 -0400
33768 @@ -318,7 +318,7 @@ static int dell_get_intensity(struct bac
33769 return buffer.output[1];
33770 }
33771
33772 -static struct backlight_ops dell_ops = {
33773 +static const struct backlight_ops dell_ops = {
33774 .get_brightness = dell_get_intensity,
33775 .update_status = dell_send_intensity,
33776 };
33777 diff -urNp linux-2.6.32.43/drivers/platform/x86/eeepc-laptop.c linux-2.6.32.43/drivers/platform/x86/eeepc-laptop.c
33778 --- linux-2.6.32.43/drivers/platform/x86/eeepc-laptop.c 2011-03-27 14:31:47.000000000 -0400
33779 +++ linux-2.6.32.43/drivers/platform/x86/eeepc-laptop.c 2011-04-17 15:56:46.000000000 -0400
33780 @@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device
33781 */
33782 static int read_brightness(struct backlight_device *bd);
33783 static int update_bl_status(struct backlight_device *bd);
33784 -static struct backlight_ops eeepcbl_ops = {
33785 +static const struct backlight_ops eeepcbl_ops = {
33786 .get_brightness = read_brightness,
33787 .update_status = update_bl_status,
33788 };
33789 diff -urNp linux-2.6.32.43/drivers/platform/x86/fujitsu-laptop.c linux-2.6.32.43/drivers/platform/x86/fujitsu-laptop.c
33790 --- linux-2.6.32.43/drivers/platform/x86/fujitsu-laptop.c 2011-03-27 14:31:47.000000000 -0400
33791 +++ linux-2.6.32.43/drivers/platform/x86/fujitsu-laptop.c 2011-04-17 15:56:46.000000000 -0400
33792 @@ -436,7 +436,7 @@ static int bl_update_status(struct backl
33793 return ret;
33794 }
33795
33796 -static struct backlight_ops fujitsubl_ops = {
33797 +static const struct backlight_ops fujitsubl_ops = {
33798 .get_brightness = bl_get_brightness,
33799 .update_status = bl_update_status,
33800 };
33801 diff -urNp linux-2.6.32.43/drivers/platform/x86/msi-laptop.c linux-2.6.32.43/drivers/platform/x86/msi-laptop.c
33802 --- linux-2.6.32.43/drivers/platform/x86/msi-laptop.c 2011-03-27 14:31:47.000000000 -0400
33803 +++ linux-2.6.32.43/drivers/platform/x86/msi-laptop.c 2011-04-17 15:56:46.000000000 -0400
33804 @@ -161,7 +161,7 @@ static int bl_update_status(struct backl
33805 return set_lcd_level(b->props.brightness);
33806 }
33807
33808 -static struct backlight_ops msibl_ops = {
33809 +static const struct backlight_ops msibl_ops = {
33810 .get_brightness = bl_get_brightness,
33811 .update_status = bl_update_status,
33812 };
33813 diff -urNp linux-2.6.32.43/drivers/platform/x86/panasonic-laptop.c linux-2.6.32.43/drivers/platform/x86/panasonic-laptop.c
33814 --- linux-2.6.32.43/drivers/platform/x86/panasonic-laptop.c 2011-03-27 14:31:47.000000000 -0400
33815 +++ linux-2.6.32.43/drivers/platform/x86/panasonic-laptop.c 2011-04-17 15:56:46.000000000 -0400
33816 @@ -352,7 +352,7 @@ static int bl_set_status(struct backligh
33817 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
33818 }
33819
33820 -static struct backlight_ops pcc_backlight_ops = {
33821 +static const struct backlight_ops pcc_backlight_ops = {
33822 .get_brightness = bl_get,
33823 .update_status = bl_set_status,
33824 };
33825 diff -urNp linux-2.6.32.43/drivers/platform/x86/sony-laptop.c linux-2.6.32.43/drivers/platform/x86/sony-laptop.c
33826 --- linux-2.6.32.43/drivers/platform/x86/sony-laptop.c 2011-03-27 14:31:47.000000000 -0400
33827 +++ linux-2.6.32.43/drivers/platform/x86/sony-laptop.c 2011-04-17 15:56:46.000000000 -0400
33828 @@ -850,7 +850,7 @@ static int sony_backlight_get_brightness
33829 }
33830
33831 static struct backlight_device *sony_backlight_device;
33832 -static struct backlight_ops sony_backlight_ops = {
33833 +static const struct backlight_ops sony_backlight_ops = {
33834 .update_status = sony_backlight_update_status,
33835 .get_brightness = sony_backlight_get_brightness,
33836 };
33837 diff -urNp linux-2.6.32.43/drivers/platform/x86/thinkpad_acpi.c linux-2.6.32.43/drivers/platform/x86/thinkpad_acpi.c
33838 --- linux-2.6.32.43/drivers/platform/x86/thinkpad_acpi.c 2011-03-27 14:31:47.000000000 -0400
33839 +++ linux-2.6.32.43/drivers/platform/x86/thinkpad_acpi.c 2011-04-17 15:56:46.000000000 -0400
33840 @@ -6122,7 +6122,7 @@ static void tpacpi_brightness_notify_cha
33841 BACKLIGHT_UPDATE_HOTKEY);
33842 }
33843
33844 -static struct backlight_ops ibm_backlight_data = {
33845 +static const struct backlight_ops ibm_backlight_data = {
33846 .get_brightness = brightness_get,
33847 .update_status = brightness_update_status,
33848 };
33849 diff -urNp linux-2.6.32.43/drivers/platform/x86/toshiba_acpi.c linux-2.6.32.43/drivers/platform/x86/toshiba_acpi.c
33850 --- linux-2.6.32.43/drivers/platform/x86/toshiba_acpi.c 2011-03-27 14:31:47.000000000 -0400
33851 +++ linux-2.6.32.43/drivers/platform/x86/toshiba_acpi.c 2011-04-17 15:56:46.000000000 -0400
33852 @@ -671,7 +671,7 @@ static acpi_status remove_device(void)
33853 return AE_OK;
33854 }
33855
33856 -static struct backlight_ops toshiba_backlight_data = {
33857 +static const struct backlight_ops toshiba_backlight_data = {
33858 .get_brightness = get_lcd,
33859 .update_status = set_lcd_status,
33860 };
33861 diff -urNp linux-2.6.32.43/drivers/pnp/pnpbios/bioscalls.c linux-2.6.32.43/drivers/pnp/pnpbios/bioscalls.c
33862 --- linux-2.6.32.43/drivers/pnp/pnpbios/bioscalls.c 2011-03-27 14:31:47.000000000 -0400
33863 +++ linux-2.6.32.43/drivers/pnp/pnpbios/bioscalls.c 2011-04-17 15:56:46.000000000 -0400
33864 @@ -60,7 +60,7 @@ do { \
33865 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
33866 } while(0)
33867
33868 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
33869 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
33870 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
33871
33872 /*
33873 @@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func
33874
33875 cpu = get_cpu();
33876 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
33877 +
33878 + pax_open_kernel();
33879 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
33880 + pax_close_kernel();
33881
33882 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
33883 spin_lock_irqsave(&pnp_bios_lock, flags);
33884 @@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func
33885 :"memory");
33886 spin_unlock_irqrestore(&pnp_bios_lock, flags);
33887
33888 + pax_open_kernel();
33889 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
33890 + pax_close_kernel();
33891 +
33892 put_cpu();
33893
33894 /* If we get here and this is set then the PnP BIOS faulted on us. */
33895 @@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 n
33896 return status;
33897 }
33898
33899 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
33900 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
33901 {
33902 int i;
33903
33904 @@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_i
33905 pnp_bios_callpoint.offset = header->fields.pm16offset;
33906 pnp_bios_callpoint.segment = PNP_CS16;
33907
33908 + pax_open_kernel();
33909 +
33910 for_each_possible_cpu(i) {
33911 struct desc_struct *gdt = get_cpu_gdt_table(i);
33912 if (!gdt)
33913 @@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_i
33914 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
33915 (unsigned long)__va(header->fields.pm16dseg));
33916 }
33917 +
33918 + pax_close_kernel();
33919 }
33920 diff -urNp linux-2.6.32.43/drivers/pnp/resource.c linux-2.6.32.43/drivers/pnp/resource.c
33921 --- linux-2.6.32.43/drivers/pnp/resource.c 2011-03-27 14:31:47.000000000 -0400
33922 +++ linux-2.6.32.43/drivers/pnp/resource.c 2011-04-17 15:56:46.000000000 -0400
33923 @@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
33924 return 1;
33925
33926 /* check if the resource is valid */
33927 - if (*irq < 0 || *irq > 15)
33928 + if (*irq > 15)
33929 return 0;
33930
33931 /* check if the resource is reserved */
33932 @@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
33933 return 1;
33934
33935 /* check if the resource is valid */
33936 - if (*dma < 0 || *dma == 4 || *dma > 7)
33937 + if (*dma == 4 || *dma > 7)
33938 return 0;
33939
33940 /* check if the resource is reserved */
33941 diff -urNp linux-2.6.32.43/drivers/rtc/rtc-dev.c linux-2.6.32.43/drivers/rtc/rtc-dev.c
33942 --- linux-2.6.32.43/drivers/rtc/rtc-dev.c 2011-03-27 14:31:47.000000000 -0400
33943 +++ linux-2.6.32.43/drivers/rtc/rtc-dev.c 2011-04-17 15:56:46.000000000 -0400
33944 @@ -14,6 +14,7 @@
33945 #include <linux/module.h>
33946 #include <linux/rtc.h>
33947 #include <linux/sched.h>
33948 +#include <linux/grsecurity.h>
33949 #include "rtc-core.h"
33950
33951 static dev_t rtc_devt;
33952 @@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *f
33953 if (copy_from_user(&tm, uarg, sizeof(tm)))
33954 return -EFAULT;
33955
33956 + gr_log_timechange();
33957 +
33958 return rtc_set_time(rtc, &tm);
33959
33960 case RTC_PIE_ON:
33961 diff -urNp linux-2.6.32.43/drivers/s390/cio/qdio_perf.c linux-2.6.32.43/drivers/s390/cio/qdio_perf.c
33962 --- linux-2.6.32.43/drivers/s390/cio/qdio_perf.c 2011-03-27 14:31:47.000000000 -0400
33963 +++ linux-2.6.32.43/drivers/s390/cio/qdio_perf.c 2011-04-17 15:56:46.000000000 -0400
33964 @@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_
33965 static int qdio_perf_proc_show(struct seq_file *m, void *v)
33966 {
33967 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
33968 - (long)atomic_long_read(&perf_stats.qdio_int));
33969 + (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
33970 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
33971 - (long)atomic_long_read(&perf_stats.pci_int));
33972 + (long)atomic_long_read_unchecked(&perf_stats.pci_int));
33973 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
33974 - (long)atomic_long_read(&perf_stats.thin_int));
33975 + (long)atomic_long_read_unchecked(&perf_stats.thin_int));
33976 seq_printf(m, "\n");
33977 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
33978 - (long)atomic_long_read(&perf_stats.tasklet_inbound));
33979 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
33980 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
33981 - (long)atomic_long_read(&perf_stats.tasklet_outbound));
33982 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
33983 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
33984 - (long)atomic_long_read(&perf_stats.tasklet_thinint),
33985 - (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
33986 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
33987 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
33988 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
33989 - (long)atomic_long_read(&perf_stats.thinint_inbound),
33990 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
33991 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
33992 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
33993 seq_printf(m, "\n");
33994 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
33995 - (long)atomic_long_read(&perf_stats.siga_in));
33996 + (long)atomic_long_read_unchecked(&perf_stats.siga_in));
33997 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
33998 - (long)atomic_long_read(&perf_stats.siga_out));
33999 + (long)atomic_long_read_unchecked(&perf_stats.siga_out));
34000 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
34001 - (long)atomic_long_read(&perf_stats.siga_sync));
34002 + (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
34003 seq_printf(m, "\n");
34004 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
34005 - (long)atomic_long_read(&perf_stats.inbound_handler));
34006 + (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
34007 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
34008 - (long)atomic_long_read(&perf_stats.outbound_handler));
34009 + (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
34010 seq_printf(m, "\n");
34011 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
34012 - (long)atomic_long_read(&perf_stats.fast_requeue));
34013 + (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
34014 seq_printf(m, "Number of outbound target full condition\t: %li\n",
34015 - (long)atomic_long_read(&perf_stats.outbound_target_full));
34016 + (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
34017 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
34018 - (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
34019 + (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
34020 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
34021 - (long)atomic_long_read(&perf_stats.debug_stop_polling));
34022 + (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
34023 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
34024 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
34025 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
34026 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
34027 - (long)atomic_long_read(&perf_stats.debug_eqbs_all),
34028 - (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
34029 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
34030 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
34031 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
34032 - (long)atomic_long_read(&perf_stats.debug_sqbs_all),
34033 - (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
34034 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
34035 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
34036 seq_printf(m, "\n");
34037 return 0;
34038 }
34039 diff -urNp linux-2.6.32.43/drivers/s390/cio/qdio_perf.h linux-2.6.32.43/drivers/s390/cio/qdio_perf.h
34040 --- linux-2.6.32.43/drivers/s390/cio/qdio_perf.h 2011-03-27 14:31:47.000000000 -0400
34041 +++ linux-2.6.32.43/drivers/s390/cio/qdio_perf.h 2011-04-17 15:56:46.000000000 -0400
34042 @@ -13,46 +13,46 @@
34043
34044 struct qdio_perf_stats {
34045 /* interrupt handler calls */
34046 - atomic_long_t qdio_int;
34047 - atomic_long_t pci_int;
34048 - atomic_long_t thin_int;
34049 + atomic_long_unchecked_t qdio_int;
34050 + atomic_long_unchecked_t pci_int;
34051 + atomic_long_unchecked_t thin_int;
34052
34053 /* tasklet runs */
34054 - atomic_long_t tasklet_inbound;
34055 - atomic_long_t tasklet_outbound;
34056 - atomic_long_t tasklet_thinint;
34057 - atomic_long_t tasklet_thinint_loop;
34058 - atomic_long_t thinint_inbound;
34059 - atomic_long_t thinint_inbound_loop;
34060 - atomic_long_t thinint_inbound_loop2;
34061 + atomic_long_unchecked_t tasklet_inbound;
34062 + atomic_long_unchecked_t tasklet_outbound;
34063 + atomic_long_unchecked_t tasklet_thinint;
34064 + atomic_long_unchecked_t tasklet_thinint_loop;
34065 + atomic_long_unchecked_t thinint_inbound;
34066 + atomic_long_unchecked_t thinint_inbound_loop;
34067 + atomic_long_unchecked_t thinint_inbound_loop2;
34068
34069 /* signal adapter calls */
34070 - atomic_long_t siga_out;
34071 - atomic_long_t siga_in;
34072 - atomic_long_t siga_sync;
34073 + atomic_long_unchecked_t siga_out;
34074 + atomic_long_unchecked_t siga_in;
34075 + atomic_long_unchecked_t siga_sync;
34076
34077 /* misc */
34078 - atomic_long_t inbound_handler;
34079 - atomic_long_t outbound_handler;
34080 - atomic_long_t fast_requeue;
34081 - atomic_long_t outbound_target_full;
34082 + atomic_long_unchecked_t inbound_handler;
34083 + atomic_long_unchecked_t outbound_handler;
34084 + atomic_long_unchecked_t fast_requeue;
34085 + atomic_long_unchecked_t outbound_target_full;
34086
34087 /* for debugging */
34088 - atomic_long_t debug_tl_out_timer;
34089 - atomic_long_t debug_stop_polling;
34090 - atomic_long_t debug_eqbs_all;
34091 - atomic_long_t debug_eqbs_incomplete;
34092 - atomic_long_t debug_sqbs_all;
34093 - atomic_long_t debug_sqbs_incomplete;
34094 + atomic_long_unchecked_t debug_tl_out_timer;
34095 + atomic_long_unchecked_t debug_stop_polling;
34096 + atomic_long_unchecked_t debug_eqbs_all;
34097 + atomic_long_unchecked_t debug_eqbs_incomplete;
34098 + atomic_long_unchecked_t debug_sqbs_all;
34099 + atomic_long_unchecked_t debug_sqbs_incomplete;
34100 };
34101
34102 extern struct qdio_perf_stats perf_stats;
34103 extern int qdio_performance_stats;
34104
34105 -static inline void qdio_perf_stat_inc(atomic_long_t *count)
34106 +static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
34107 {
34108 if (qdio_performance_stats)
34109 - atomic_long_inc(count);
34110 + atomic_long_inc_unchecked(count);
34111 }
34112
34113 int qdio_setup_perf_stats(void);
34114 diff -urNp linux-2.6.32.43/drivers/scsi/aacraid/commctrl.c linux-2.6.32.43/drivers/scsi/aacraid/commctrl.c
34115 --- linux-2.6.32.43/drivers/scsi/aacraid/commctrl.c 2011-03-27 14:31:47.000000000 -0400
34116 +++ linux-2.6.32.43/drivers/scsi/aacraid/commctrl.c 2011-05-16 21:46:57.000000000 -0400
34117 @@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_d
34118 u32 actual_fibsize64, actual_fibsize = 0;
34119 int i;
34120
34121 + pax_track_stack();
34122
34123 if (dev->in_reset) {
34124 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
34125 diff -urNp linux-2.6.32.43/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.32.43/drivers/scsi/aic94xx/aic94xx_init.c
34126 --- linux-2.6.32.43/drivers/scsi/aic94xx/aic94xx_init.c 2011-03-27 14:31:47.000000000 -0400
34127 +++ linux-2.6.32.43/drivers/scsi/aic94xx/aic94xx_init.c 2011-04-17 15:56:46.000000000 -0400
34128 @@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(stru
34129 flash_error_table[i].reason);
34130 }
34131
34132 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
34133 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
34134 asd_show_update_bios, asd_store_update_bios);
34135
34136 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
34137 diff -urNp linux-2.6.32.43/drivers/scsi/BusLogic.c linux-2.6.32.43/drivers/scsi/BusLogic.c
34138 --- linux-2.6.32.43/drivers/scsi/BusLogic.c 2011-03-27 14:31:47.000000000 -0400
34139 +++ linux-2.6.32.43/drivers/scsi/BusLogic.c 2011-05-16 21:46:57.000000000 -0400
34140 @@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFla
34141 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
34142 *PrototypeHostAdapter)
34143 {
34144 + pax_track_stack();
34145 +
34146 /*
34147 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
34148 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
34149 diff -urNp linux-2.6.32.43/drivers/scsi/dpt_i2o.c linux-2.6.32.43/drivers/scsi/dpt_i2o.c
34150 --- linux-2.6.32.43/drivers/scsi/dpt_i2o.c 2011-03-27 14:31:47.000000000 -0400
34151 +++ linux-2.6.32.43/drivers/scsi/dpt_i2o.c 2011-05-16 21:46:57.000000000 -0400
34152 @@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* p
34153 dma_addr_t addr;
34154 ulong flags = 0;
34155
34156 + pax_track_stack();
34157 +
34158 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
34159 // get user msg size in u32s
34160 if(get_user(size, &user_msg[0])){
34161 @@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
34162 s32 rcode;
34163 dma_addr_t addr;
34164
34165 + pax_track_stack();
34166 +
34167 memset(msg, 0 , sizeof(msg));
34168 len = scsi_bufflen(cmd);
34169 direction = 0x00000000;
34170 diff -urNp linux-2.6.32.43/drivers/scsi/eata.c linux-2.6.32.43/drivers/scsi/eata.c
34171 --- linux-2.6.32.43/drivers/scsi/eata.c 2011-03-27 14:31:47.000000000 -0400
34172 +++ linux-2.6.32.43/drivers/scsi/eata.c 2011-05-16 21:46:57.000000000 -0400
34173 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
34174 struct hostdata *ha;
34175 char name[16];
34176
34177 + pax_track_stack();
34178 +
34179 sprintf(name, "%s%d", driver_name, j);
34180
34181 if (!request_region(port_base, REGION_SIZE, driver_name)) {
34182 diff -urNp linux-2.6.32.43/drivers/scsi/fcoe/libfcoe.c linux-2.6.32.43/drivers/scsi/fcoe/libfcoe.c
34183 --- linux-2.6.32.43/drivers/scsi/fcoe/libfcoe.c 2011-03-27 14:31:47.000000000 -0400
34184 +++ linux-2.6.32.43/drivers/scsi/fcoe/libfcoe.c 2011-05-16 21:46:57.000000000 -0400
34185 @@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fc
34186 size_t rlen;
34187 size_t dlen;
34188
34189 + pax_track_stack();
34190 +
34191 fiph = (struct fip_header *)skb->data;
34192 sub = fiph->fip_subcode;
34193 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
34194 diff -urNp linux-2.6.32.43/drivers/scsi/gdth.c linux-2.6.32.43/drivers/scsi/gdth.c
34195 --- linux-2.6.32.43/drivers/scsi/gdth.c 2011-03-27 14:31:47.000000000 -0400
34196 +++ linux-2.6.32.43/drivers/scsi/gdth.c 2011-05-16 21:46:57.000000000 -0400
34197 @@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
34198 ulong flags;
34199 gdth_ha_str *ha;
34200
34201 + pax_track_stack();
34202 +
34203 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
34204 return -EFAULT;
34205 ha = gdth_find_ha(ldrv.ionode);
34206 @@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg
34207 gdth_ha_str *ha;
34208 int rval;
34209
34210 + pax_track_stack();
34211 +
34212 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
34213 res.number >= MAX_HDRIVES)
34214 return -EFAULT;
34215 @@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg,
34216 gdth_ha_str *ha;
34217 int rval;
34218
34219 + pax_track_stack();
34220 +
34221 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
34222 return -EFAULT;
34223 ha = gdth_find_ha(gen.ionode);
34224 @@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
34225 int i;
34226 gdth_cmd_str gdtcmd;
34227 char cmnd[MAX_COMMAND_SIZE];
34228 +
34229 + pax_track_stack();
34230 +
34231 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
34232
34233 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
34234 diff -urNp linux-2.6.32.43/drivers/scsi/gdth_proc.c linux-2.6.32.43/drivers/scsi/gdth_proc.c
34235 --- linux-2.6.32.43/drivers/scsi/gdth_proc.c 2011-03-27 14:31:47.000000000 -0400
34236 +++ linux-2.6.32.43/drivers/scsi/gdth_proc.c 2011-05-16 21:46:57.000000000 -0400
34237 @@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi
34238 ulong64 paddr;
34239
34240 char cmnd[MAX_COMMAND_SIZE];
34241 +
34242 + pax_track_stack();
34243 +
34244 memset(cmnd, 0xff, 12);
34245 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
34246
34247 @@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,ch
34248 gdth_hget_str *phg;
34249 char cmnd[MAX_COMMAND_SIZE];
34250
34251 + pax_track_stack();
34252 +
34253 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
34254 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
34255 if (!gdtcmd || !estr)
34256 diff -urNp linux-2.6.32.43/drivers/scsi/hosts.c linux-2.6.32.43/drivers/scsi/hosts.c
34257 --- linux-2.6.32.43/drivers/scsi/hosts.c 2011-03-27 14:31:47.000000000 -0400
34258 +++ linux-2.6.32.43/drivers/scsi/hosts.c 2011-05-04 17:56:28.000000000 -0400
34259 @@ -40,7 +40,7 @@
34260 #include "scsi_logging.h"
34261
34262
34263 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
34264 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
34265
34266
34267 static void scsi_host_cls_release(struct device *dev)
34268 @@ -344,7 +344,7 @@ struct Scsi_Host *scsi_host_alloc(struct
34269 * subtract one because we increment first then return, but we need to
34270 * know what the next host number was before increment
34271 */
34272 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
34273 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
34274 shost->dma_channel = 0xff;
34275
34276 /* These three are default values which can be overridden */
34277 diff -urNp linux-2.6.32.43/drivers/scsi/ipr.c linux-2.6.32.43/drivers/scsi/ipr.c
34278 --- linux-2.6.32.43/drivers/scsi/ipr.c 2011-03-27 14:31:47.000000000 -0400
34279 +++ linux-2.6.32.43/drivers/scsi/ipr.c 2011-04-17 15:56:46.000000000 -0400
34280 @@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
34281 return true;
34282 }
34283
34284 -static struct ata_port_operations ipr_sata_ops = {
34285 +static const struct ata_port_operations ipr_sata_ops = {
34286 .phy_reset = ipr_ata_phy_reset,
34287 .hardreset = ipr_sata_reset,
34288 .post_internal_cmd = ipr_ata_post_internal,
34289 diff -urNp linux-2.6.32.43/drivers/scsi/libfc/fc_exch.c linux-2.6.32.43/drivers/scsi/libfc/fc_exch.c
34290 --- linux-2.6.32.43/drivers/scsi/libfc/fc_exch.c 2011-03-27 14:31:47.000000000 -0400
34291 +++ linux-2.6.32.43/drivers/scsi/libfc/fc_exch.c 2011-04-17 15:56:46.000000000 -0400
34292 @@ -86,12 +86,12 @@ struct fc_exch_mgr {
34293 * all together if not used XXX
34294 */
34295 struct {
34296 - atomic_t no_free_exch;
34297 - atomic_t no_free_exch_xid;
34298 - atomic_t xid_not_found;
34299 - atomic_t xid_busy;
34300 - atomic_t seq_not_found;
34301 - atomic_t non_bls_resp;
34302 + atomic_unchecked_t no_free_exch;
34303 + atomic_unchecked_t no_free_exch_xid;
34304 + atomic_unchecked_t xid_not_found;
34305 + atomic_unchecked_t xid_busy;
34306 + atomic_unchecked_t seq_not_found;
34307 + atomic_unchecked_t non_bls_resp;
34308 } stats;
34309 };
34310 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
34311 @@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(
34312 /* allocate memory for exchange */
34313 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
34314 if (!ep) {
34315 - atomic_inc(&mp->stats.no_free_exch);
34316 + atomic_inc_unchecked(&mp->stats.no_free_exch);
34317 goto out;
34318 }
34319 memset(ep, 0, sizeof(*ep));
34320 @@ -557,7 +557,7 @@ out:
34321 return ep;
34322 err:
34323 spin_unlock_bh(&pool->lock);
34324 - atomic_inc(&mp->stats.no_free_exch_xid);
34325 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
34326 mempool_free(ep, mp->ep_pool);
34327 return NULL;
34328 }
34329 @@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34330 xid = ntohs(fh->fh_ox_id); /* we originated exch */
34331 ep = fc_exch_find(mp, xid);
34332 if (!ep) {
34333 - atomic_inc(&mp->stats.xid_not_found);
34334 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34335 reject = FC_RJT_OX_ID;
34336 goto out;
34337 }
34338 @@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34339 ep = fc_exch_find(mp, xid);
34340 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
34341 if (ep) {
34342 - atomic_inc(&mp->stats.xid_busy);
34343 + atomic_inc_unchecked(&mp->stats.xid_busy);
34344 reject = FC_RJT_RX_ID;
34345 goto rel;
34346 }
34347 @@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34348 }
34349 xid = ep->xid; /* get our XID */
34350 } else if (!ep) {
34351 - atomic_inc(&mp->stats.xid_not_found);
34352 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34353 reject = FC_RJT_RX_ID; /* XID not found */
34354 goto out;
34355 }
34356 @@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34357 } else {
34358 sp = &ep->seq;
34359 if (sp->id != fh->fh_seq_id) {
34360 - atomic_inc(&mp->stats.seq_not_found);
34361 + atomic_inc_unchecked(&mp->stats.seq_not_found);
34362 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
34363 goto rel;
34364 }
34365 @@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct
34366
34367 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
34368 if (!ep) {
34369 - atomic_inc(&mp->stats.xid_not_found);
34370 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34371 goto out;
34372 }
34373 if (ep->esb_stat & ESB_ST_COMPLETE) {
34374 - atomic_inc(&mp->stats.xid_not_found);
34375 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34376 goto out;
34377 }
34378 if (ep->rxid == FC_XID_UNKNOWN)
34379 ep->rxid = ntohs(fh->fh_rx_id);
34380 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
34381 - atomic_inc(&mp->stats.xid_not_found);
34382 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34383 goto rel;
34384 }
34385 if (ep->did != ntoh24(fh->fh_s_id) &&
34386 ep->did != FC_FID_FLOGI) {
34387 - atomic_inc(&mp->stats.xid_not_found);
34388 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34389 goto rel;
34390 }
34391 sof = fr_sof(fp);
34392 @@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct
34393 } else {
34394 sp = &ep->seq;
34395 if (sp->id != fh->fh_seq_id) {
34396 - atomic_inc(&mp->stats.seq_not_found);
34397 + atomic_inc_unchecked(&mp->stats.seq_not_found);
34398 goto rel;
34399 }
34400 }
34401 @@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_
34402 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
34403
34404 if (!sp)
34405 - atomic_inc(&mp->stats.xid_not_found);
34406 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34407 else
34408 - atomic_inc(&mp->stats.non_bls_resp);
34409 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
34410
34411 fc_frame_free(fp);
34412 }
34413 diff -urNp linux-2.6.32.43/drivers/scsi/libsas/sas_ata.c linux-2.6.32.43/drivers/scsi/libsas/sas_ata.c
34414 --- linux-2.6.32.43/drivers/scsi/libsas/sas_ata.c 2011-03-27 14:31:47.000000000 -0400
34415 +++ linux-2.6.32.43/drivers/scsi/libsas/sas_ata.c 2011-04-23 12:56:11.000000000 -0400
34416 @@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_l
34417 }
34418 }
34419
34420 -static struct ata_port_operations sas_sata_ops = {
34421 +static const struct ata_port_operations sas_sata_ops = {
34422 .phy_reset = sas_ata_phy_reset,
34423 .post_internal_cmd = sas_ata_post_internal,
34424 .qc_defer = ata_std_qc_defer,
34425 diff -urNp linux-2.6.32.43/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.32.43/drivers/scsi/lpfc/lpfc_debugfs.c
34426 --- linux-2.6.32.43/drivers/scsi/lpfc/lpfc_debugfs.c 2011-03-27 14:31:47.000000000 -0400
34427 +++ linux-2.6.32.43/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-16 21:46:57.000000000 -0400
34428 @@ -124,7 +124,7 @@ struct lpfc_debug {
34429 int len;
34430 };
34431
34432 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34433 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34434 static unsigned long lpfc_debugfs_start_time = 0L;
34435
34436 /**
34437 @@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
34438 lpfc_debugfs_enable = 0;
34439
34440 len = 0;
34441 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
34442 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
34443 (lpfc_debugfs_max_disc_trc - 1);
34444 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
34445 dtp = vport->disc_trc + i;
34446 @@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
34447 lpfc_debugfs_enable = 0;
34448
34449 len = 0;
34450 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
34451 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
34452 (lpfc_debugfs_max_slow_ring_trc - 1);
34453 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
34454 dtp = phba->slow_ring_trc + i;
34455 @@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
34456 uint32_t *ptr;
34457 char buffer[1024];
34458
34459 + pax_track_stack();
34460 +
34461 off = 0;
34462 spin_lock_irq(&phba->hbalock);
34463
34464 @@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
34465 !vport || !vport->disc_trc)
34466 return;
34467
34468 - index = atomic_inc_return(&vport->disc_trc_cnt) &
34469 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
34470 (lpfc_debugfs_max_disc_trc - 1);
34471 dtp = vport->disc_trc + index;
34472 dtp->fmt = fmt;
34473 dtp->data1 = data1;
34474 dtp->data2 = data2;
34475 dtp->data3 = data3;
34476 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34477 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34478 dtp->jif = jiffies;
34479 #endif
34480 return;
34481 @@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
34482 !phba || !phba->slow_ring_trc)
34483 return;
34484
34485 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
34486 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
34487 (lpfc_debugfs_max_slow_ring_trc - 1);
34488 dtp = phba->slow_ring_trc + index;
34489 dtp->fmt = fmt;
34490 dtp->data1 = data1;
34491 dtp->data2 = data2;
34492 dtp->data3 = data3;
34493 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34494 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34495 dtp->jif = jiffies;
34496 #endif
34497 return;
34498 @@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
34499 "slow_ring buffer\n");
34500 goto debug_failed;
34501 }
34502 - atomic_set(&phba->slow_ring_trc_cnt, 0);
34503 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
34504 memset(phba->slow_ring_trc, 0,
34505 (sizeof(struct lpfc_debugfs_trc) *
34506 lpfc_debugfs_max_slow_ring_trc));
34507 @@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
34508 "buffer\n");
34509 goto debug_failed;
34510 }
34511 - atomic_set(&vport->disc_trc_cnt, 0);
34512 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
34513
34514 snprintf(name, sizeof(name), "discovery_trace");
34515 vport->debug_disc_trc =
34516 diff -urNp linux-2.6.32.43/drivers/scsi/lpfc/lpfc.h linux-2.6.32.43/drivers/scsi/lpfc/lpfc.h
34517 --- linux-2.6.32.43/drivers/scsi/lpfc/lpfc.h 2011-03-27 14:31:47.000000000 -0400
34518 +++ linux-2.6.32.43/drivers/scsi/lpfc/lpfc.h 2011-05-04 17:56:28.000000000 -0400
34519 @@ -400,7 +400,7 @@ struct lpfc_vport {
34520 struct dentry *debug_nodelist;
34521 struct dentry *vport_debugfs_root;
34522 struct lpfc_debugfs_trc *disc_trc;
34523 - atomic_t disc_trc_cnt;
34524 + atomic_unchecked_t disc_trc_cnt;
34525 #endif
34526 uint8_t stat_data_enabled;
34527 uint8_t stat_data_blocked;
34528 @@ -725,8 +725,8 @@ struct lpfc_hba {
34529 struct timer_list fabric_block_timer;
34530 unsigned long bit_flags;
34531 #define FABRIC_COMANDS_BLOCKED 0
34532 - atomic_t num_rsrc_err;
34533 - atomic_t num_cmd_success;
34534 + atomic_unchecked_t num_rsrc_err;
34535 + atomic_unchecked_t num_cmd_success;
34536 unsigned long last_rsrc_error_time;
34537 unsigned long last_ramp_down_time;
34538 unsigned long last_ramp_up_time;
34539 @@ -740,7 +740,7 @@ struct lpfc_hba {
34540 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
34541 struct dentry *debug_slow_ring_trc;
34542 struct lpfc_debugfs_trc *slow_ring_trc;
34543 - atomic_t slow_ring_trc_cnt;
34544 + atomic_unchecked_t slow_ring_trc_cnt;
34545 #endif
34546
34547 /* Used for deferred freeing of ELS data buffers */
34548 diff -urNp linux-2.6.32.43/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.32.43/drivers/scsi/lpfc/lpfc_scsi.c
34549 --- linux-2.6.32.43/drivers/scsi/lpfc/lpfc_scsi.c 2011-03-27 14:31:47.000000000 -0400
34550 +++ linux-2.6.32.43/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-04 17:56:28.000000000 -0400
34551 @@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
34552 uint32_t evt_posted;
34553
34554 spin_lock_irqsave(&phba->hbalock, flags);
34555 - atomic_inc(&phba->num_rsrc_err);
34556 + atomic_inc_unchecked(&phba->num_rsrc_err);
34557 phba->last_rsrc_error_time = jiffies;
34558
34559 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
34560 @@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
34561 unsigned long flags;
34562 struct lpfc_hba *phba = vport->phba;
34563 uint32_t evt_posted;
34564 - atomic_inc(&phba->num_cmd_success);
34565 + atomic_inc_unchecked(&phba->num_cmd_success);
34566
34567 if (vport->cfg_lun_queue_depth <= queue_depth)
34568 return;
34569 @@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
34570 int i;
34571 struct lpfc_rport_data *rdata;
34572
34573 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
34574 - num_cmd_success = atomic_read(&phba->num_cmd_success);
34575 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
34576 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
34577
34578 vports = lpfc_create_vport_work_array(phba);
34579 if (vports != NULL)
34580 @@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
34581 }
34582 }
34583 lpfc_destroy_vport_work_array(phba, vports);
34584 - atomic_set(&phba->num_rsrc_err, 0);
34585 - atomic_set(&phba->num_cmd_success, 0);
34586 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
34587 + atomic_set_unchecked(&phba->num_cmd_success, 0);
34588 }
34589
34590 /**
34591 @@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
34592 }
34593 }
34594 lpfc_destroy_vport_work_array(phba, vports);
34595 - atomic_set(&phba->num_rsrc_err, 0);
34596 - atomic_set(&phba->num_cmd_success, 0);
34597 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
34598 + atomic_set_unchecked(&phba->num_cmd_success, 0);
34599 }
34600
34601 /**
34602 diff -urNp linux-2.6.32.43/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.32.43/drivers/scsi/megaraid/megaraid_mbox.c
34603 --- linux-2.6.32.43/drivers/scsi/megaraid/megaraid_mbox.c 2011-03-27 14:31:47.000000000 -0400
34604 +++ linux-2.6.32.43/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-16 21:46:57.000000000 -0400
34605 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
34606 int rval;
34607 int i;
34608
34609 + pax_track_stack();
34610 +
34611 // Allocate memory for the base list of scb for management module.
34612 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
34613
34614 diff -urNp linux-2.6.32.43/drivers/scsi/osd/osd_initiator.c linux-2.6.32.43/drivers/scsi/osd/osd_initiator.c
34615 --- linux-2.6.32.43/drivers/scsi/osd/osd_initiator.c 2011-03-27 14:31:47.000000000 -0400
34616 +++ linux-2.6.32.43/drivers/scsi/osd/osd_initiator.c 2011-05-16 21:46:57.000000000 -0400
34617 @@ -94,6 +94,8 @@ static int _osd_print_system_info(struct
34618 int nelem = ARRAY_SIZE(get_attrs), a = 0;
34619 int ret;
34620
34621 + pax_track_stack();
34622 +
34623 or = osd_start_request(od, GFP_KERNEL);
34624 if (!or)
34625 return -ENOMEM;
34626 diff -urNp linux-2.6.32.43/drivers/scsi/pmcraid.c linux-2.6.32.43/drivers/scsi/pmcraid.c
34627 --- linux-2.6.32.43/drivers/scsi/pmcraid.c 2011-05-10 22:12:01.000000000 -0400
34628 +++ linux-2.6.32.43/drivers/scsi/pmcraid.c 2011-05-10 22:12:33.000000000 -0400
34629 @@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct sc
34630 res->scsi_dev = scsi_dev;
34631 scsi_dev->hostdata = res;
34632 res->change_detected = 0;
34633 - atomic_set(&res->read_failures, 0);
34634 - atomic_set(&res->write_failures, 0);
34635 + atomic_set_unchecked(&res->read_failures, 0);
34636 + atomic_set_unchecked(&res->write_failures, 0);
34637 rc = 0;
34638 }
34639 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
34640 @@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct
34641
34642 /* If this was a SCSI read/write command keep count of errors */
34643 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
34644 - atomic_inc(&res->read_failures);
34645 + atomic_inc_unchecked(&res->read_failures);
34646 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
34647 - atomic_inc(&res->write_failures);
34648 + atomic_inc_unchecked(&res->write_failures);
34649
34650 if (!RES_IS_GSCSI(res->cfg_entry) &&
34651 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
34652 @@ -4113,7 +4113,7 @@ static void pmcraid_worker_function(stru
34653
34654 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
34655 /* add resources only after host is added into system */
34656 - if (!atomic_read(&pinstance->expose_resources))
34657 + if (!atomic_read_unchecked(&pinstance->expose_resources))
34658 return;
34659
34660 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
34661 @@ -4847,7 +4847,7 @@ static int __devinit pmcraid_init_instan
34662 init_waitqueue_head(&pinstance->reset_wait_q);
34663
34664 atomic_set(&pinstance->outstanding_cmds, 0);
34665 - atomic_set(&pinstance->expose_resources, 0);
34666 + atomic_set_unchecked(&pinstance->expose_resources, 0);
34667
34668 INIT_LIST_HEAD(&pinstance->free_res_q);
34669 INIT_LIST_HEAD(&pinstance->used_res_q);
34670 @@ -5499,7 +5499,7 @@ static int __devinit pmcraid_probe(
34671 /* Schedule worker thread to handle CCN and take care of adding and
34672 * removing devices to OS
34673 */
34674 - atomic_set(&pinstance->expose_resources, 1);
34675 + atomic_set_unchecked(&pinstance->expose_resources, 1);
34676 schedule_work(&pinstance->worker_q);
34677 return rc;
34678
34679 diff -urNp linux-2.6.32.43/drivers/scsi/pmcraid.h linux-2.6.32.43/drivers/scsi/pmcraid.h
34680 --- linux-2.6.32.43/drivers/scsi/pmcraid.h 2011-03-27 14:31:47.000000000 -0400
34681 +++ linux-2.6.32.43/drivers/scsi/pmcraid.h 2011-05-04 17:56:28.000000000 -0400
34682 @@ -690,7 +690,7 @@ struct pmcraid_instance {
34683 atomic_t outstanding_cmds;
34684
34685 /* should add/delete resources to mid-layer now ?*/
34686 - atomic_t expose_resources;
34687 + atomic_unchecked_t expose_resources;
34688
34689 /* Tasklet to handle deferred processing */
34690 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
34691 @@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
34692 struct list_head queue; /* link to "to be exposed" resources */
34693 struct pmcraid_config_table_entry cfg_entry;
34694 struct scsi_device *scsi_dev; /* Link scsi_device structure */
34695 - atomic_t read_failures; /* count of failed READ commands */
34696 - atomic_t write_failures; /* count of failed WRITE commands */
34697 + atomic_unchecked_t read_failures; /* count of failed READ commands */
34698 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34699
34700 /* To indicate add/delete/modify during CCN */
34701 u8 change_detected;
34702 diff -urNp linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_def.h
34703 --- linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_def.h 2011-03-27 14:31:47.000000000 -0400
34704 +++ linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_def.h 2011-05-04 17:56:28.000000000 -0400
34705 @@ -240,7 +240,7 @@ struct ddb_entry {
34706 atomic_t retry_relogin_timer; /* Min Time between relogins
34707 * (4000 only) */
34708 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
34709 - atomic_t relogin_retry_count; /* Num of times relogin has been
34710 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34711 * retried */
34712
34713 uint16_t port;
34714 diff -urNp linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_init.c
34715 --- linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_init.c 2011-03-27 14:31:47.000000000 -0400
34716 +++ linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_init.c 2011-05-04 17:56:28.000000000 -0400
34717 @@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_
34718 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
34719 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34720 atomic_set(&ddb_entry->relogin_timer, 0);
34721 - atomic_set(&ddb_entry->relogin_retry_count, 0);
34722 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34723 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34724 list_add_tail(&ddb_entry->list, &ha->ddb_list);
34725 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
34726 @@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct s
34727 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34728 atomic_set(&ddb_entry->port_down_timer,
34729 ha->port_down_retry_count);
34730 - atomic_set(&ddb_entry->relogin_retry_count, 0);
34731 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34732 atomic_set(&ddb_entry->relogin_timer, 0);
34733 clear_bit(DF_RELOGIN, &ddb_entry->flags);
34734 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
34735 diff -urNp linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_os.c
34736 --- linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_os.c 2011-03-27 14:31:47.000000000 -0400
34737 +++ linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_os.c 2011-05-04 17:56:28.000000000 -0400
34738 @@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_ql
34739 ddb_entry->fw_ddb_device_state ==
34740 DDB_DS_SESSION_FAILED) {
34741 /* Reset retry relogin timer */
34742 - atomic_inc(&ddb_entry->relogin_retry_count);
34743 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34744 DEBUG2(printk("scsi%ld: index[%d] relogin"
34745 " timed out-retrying"
34746 " relogin (%d)\n",
34747 ha->host_no,
34748 ddb_entry->fw_ddb_index,
34749 - atomic_read(&ddb_entry->
34750 + atomic_read_unchecked(&ddb_entry->
34751 relogin_retry_count))
34752 );
34753 start_dpc++;
34754 diff -urNp linux-2.6.32.43/drivers/scsi/scsi.c linux-2.6.32.43/drivers/scsi/scsi.c
34755 --- linux-2.6.32.43/drivers/scsi/scsi.c 2011-03-27 14:31:47.000000000 -0400
34756 +++ linux-2.6.32.43/drivers/scsi/scsi.c 2011-05-04 17:56:28.000000000 -0400
34757 @@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
34758 unsigned long timeout;
34759 int rtn = 0;
34760
34761 - atomic_inc(&cmd->device->iorequest_cnt);
34762 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34763
34764 /* check if the device is still usable */
34765 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34766 diff -urNp linux-2.6.32.43/drivers/scsi/scsi_debug.c linux-2.6.32.43/drivers/scsi/scsi_debug.c
34767 --- linux-2.6.32.43/drivers/scsi/scsi_debug.c 2011-03-27 14:31:47.000000000 -0400
34768 +++ linux-2.6.32.43/drivers/scsi/scsi_debug.c 2011-05-16 21:46:57.000000000 -0400
34769 @@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_
34770 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
34771 unsigned char *cmd = (unsigned char *)scp->cmnd;
34772
34773 + pax_track_stack();
34774 +
34775 if ((errsts = check_readiness(scp, 1, devip)))
34776 return errsts;
34777 memset(arr, 0, sizeof(arr));
34778 @@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cm
34779 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
34780 unsigned char *cmd = (unsigned char *)scp->cmnd;
34781
34782 + pax_track_stack();
34783 +
34784 if ((errsts = check_readiness(scp, 1, devip)))
34785 return errsts;
34786 memset(arr, 0, sizeof(arr));
34787 diff -urNp linux-2.6.32.43/drivers/scsi/scsi_lib.c linux-2.6.32.43/drivers/scsi/scsi_lib.c
34788 --- linux-2.6.32.43/drivers/scsi/scsi_lib.c 2011-05-10 22:12:01.000000000 -0400
34789 +++ linux-2.6.32.43/drivers/scsi/scsi_lib.c 2011-05-10 22:12:33.000000000 -0400
34790 @@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct req
34791
34792 scsi_init_cmd_errh(cmd);
34793 cmd->result = DID_NO_CONNECT << 16;
34794 - atomic_inc(&cmd->device->iorequest_cnt);
34795 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34796
34797 /*
34798 * SCSI request completion path will do scsi_device_unbusy(),
34799 @@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct req
34800 */
34801 cmd->serial_number = 0;
34802
34803 - atomic_inc(&cmd->device->iodone_cnt);
34804 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
34805 if (cmd->result)
34806 - atomic_inc(&cmd->device->ioerr_cnt);
34807 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34808
34809 disposition = scsi_decide_disposition(cmd);
34810 if (disposition != SUCCESS &&
34811 diff -urNp linux-2.6.32.43/drivers/scsi/scsi_sysfs.c linux-2.6.32.43/drivers/scsi/scsi_sysfs.c
34812 --- linux-2.6.32.43/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:34.000000000 -0400
34813 +++ linux-2.6.32.43/drivers/scsi/scsi_sysfs.c 2011-06-25 12:56:37.000000000 -0400
34814 @@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev,
34815 char *buf) \
34816 { \
34817 struct scsi_device *sdev = to_scsi_device(dev); \
34818 - unsigned long long count = atomic_read(&sdev->field); \
34819 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
34820 return snprintf(buf, 20, "0x%llx\n", count); \
34821 } \
34822 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34823 diff -urNp linux-2.6.32.43/drivers/scsi/scsi_transport_fc.c linux-2.6.32.43/drivers/scsi/scsi_transport_fc.c
34824 --- linux-2.6.32.43/drivers/scsi/scsi_transport_fc.c 2011-03-27 14:31:47.000000000 -0400
34825 +++ linux-2.6.32.43/drivers/scsi/scsi_transport_fc.c 2011-05-04 17:56:28.000000000 -0400
34826 @@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
34827 * Netlink Infrastructure
34828 */
34829
34830 -static atomic_t fc_event_seq;
34831 +static atomic_unchecked_t fc_event_seq;
34832
34833 /**
34834 * fc_get_event_number - Obtain the next sequential FC event number
34835 @@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
34836 u32
34837 fc_get_event_number(void)
34838 {
34839 - return atomic_add_return(1, &fc_event_seq);
34840 + return atomic_add_return_unchecked(1, &fc_event_seq);
34841 }
34842 EXPORT_SYMBOL(fc_get_event_number);
34843
34844 @@ -641,7 +641,7 @@ static __init int fc_transport_init(void
34845 {
34846 int error;
34847
34848 - atomic_set(&fc_event_seq, 0);
34849 + atomic_set_unchecked(&fc_event_seq, 0);
34850
34851 error = transport_class_register(&fc_host_class);
34852 if (error)
34853 diff -urNp linux-2.6.32.43/drivers/scsi/scsi_transport_iscsi.c linux-2.6.32.43/drivers/scsi/scsi_transport_iscsi.c
34854 --- linux-2.6.32.43/drivers/scsi/scsi_transport_iscsi.c 2011-03-27 14:31:47.000000000 -0400
34855 +++ linux-2.6.32.43/drivers/scsi/scsi_transport_iscsi.c 2011-05-04 17:56:28.000000000 -0400
34856 @@ -81,7 +81,7 @@ struct iscsi_internal {
34857 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
34858 };
34859
34860 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34861 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34862 static struct workqueue_struct *iscsi_eh_timer_workq;
34863
34864 /*
34865 @@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_s
34866 int err;
34867
34868 ihost = shost->shost_data;
34869 - session->sid = atomic_add_return(1, &iscsi_session_nr);
34870 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34871
34872 if (id == ISCSI_MAX_TARGET) {
34873 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
34874 @@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(v
34875 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34876 ISCSI_TRANSPORT_VERSION);
34877
34878 - atomic_set(&iscsi_session_nr, 0);
34879 + atomic_set_unchecked(&iscsi_session_nr, 0);
34880
34881 err = class_register(&iscsi_transport_class);
34882 if (err)
34883 diff -urNp linux-2.6.32.43/drivers/scsi/scsi_transport_srp.c linux-2.6.32.43/drivers/scsi/scsi_transport_srp.c
34884 --- linux-2.6.32.43/drivers/scsi/scsi_transport_srp.c 2011-03-27 14:31:47.000000000 -0400
34885 +++ linux-2.6.32.43/drivers/scsi/scsi_transport_srp.c 2011-05-04 17:56:28.000000000 -0400
34886 @@ -33,7 +33,7 @@
34887 #include "scsi_transport_srp_internal.h"
34888
34889 struct srp_host_attrs {
34890 - atomic_t next_port_id;
34891 + atomic_unchecked_t next_port_id;
34892 };
34893 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34894
34895 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
34896 struct Scsi_Host *shost = dev_to_shost(dev);
34897 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34898
34899 - atomic_set(&srp_host->next_port_id, 0);
34900 + atomic_set_unchecked(&srp_host->next_port_id, 0);
34901 return 0;
34902 }
34903
34904 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
34905 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34906 rport->roles = ids->roles;
34907
34908 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34909 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34910 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34911
34912 transport_setup_device(&rport->dev);
34913 diff -urNp linux-2.6.32.43/drivers/scsi/sg.c linux-2.6.32.43/drivers/scsi/sg.c
34914 --- linux-2.6.32.43/drivers/scsi/sg.c 2011-03-27 14:31:47.000000000 -0400
34915 +++ linux-2.6.32.43/drivers/scsi/sg.c 2011-04-17 15:56:46.000000000 -0400
34916 @@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
34917 const struct file_operations * fops;
34918 };
34919
34920 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34921 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34922 {"allow_dio", &adio_fops},
34923 {"debug", &debug_fops},
34924 {"def_reserved_size", &dressz_fops},
34925 @@ -2307,7 +2307,7 @@ sg_proc_init(void)
34926 {
34927 int k, mask;
34928 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34929 - struct sg_proc_leaf * leaf;
34930 + const struct sg_proc_leaf * leaf;
34931
34932 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34933 if (!sg_proc_sgp)
34934 diff -urNp linux-2.6.32.43/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.32.43/drivers/scsi/sym53c8xx_2/sym_glue.c
34935 --- linux-2.6.32.43/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-03-27 14:31:47.000000000 -0400
34936 +++ linux-2.6.32.43/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-16 21:46:57.000000000 -0400
34937 @@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct p
34938 int do_iounmap = 0;
34939 int do_disable_device = 1;
34940
34941 + pax_track_stack();
34942 +
34943 memset(&sym_dev, 0, sizeof(sym_dev));
34944 memset(&nvram, 0, sizeof(nvram));
34945 sym_dev.pdev = pdev;
34946 diff -urNp linux-2.6.32.43/drivers/serial/kgdboc.c linux-2.6.32.43/drivers/serial/kgdboc.c
34947 --- linux-2.6.32.43/drivers/serial/kgdboc.c 2011-03-27 14:31:47.000000000 -0400
34948 +++ linux-2.6.32.43/drivers/serial/kgdboc.c 2011-04-17 15:56:46.000000000 -0400
34949 @@ -18,7 +18,7 @@
34950
34951 #define MAX_CONFIG_LEN 40
34952
34953 -static struct kgdb_io kgdboc_io_ops;
34954 +static const struct kgdb_io kgdboc_io_ops;
34955
34956 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
34957 static int configured = -1;
34958 @@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
34959 module_put(THIS_MODULE);
34960 }
34961
34962 -static struct kgdb_io kgdboc_io_ops = {
34963 +static const struct kgdb_io kgdboc_io_ops = {
34964 .name = "kgdboc",
34965 .read_char = kgdboc_get_char,
34966 .write_char = kgdboc_put_char,
34967 diff -urNp linux-2.6.32.43/drivers/spi/spi.c linux-2.6.32.43/drivers/spi/spi.c
34968 --- linux-2.6.32.43/drivers/spi/spi.c 2011-03-27 14:31:47.000000000 -0400
34969 +++ linux-2.6.32.43/drivers/spi/spi.c 2011-05-04 17:56:28.000000000 -0400
34970 @@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, str
34971 EXPORT_SYMBOL_GPL(spi_sync);
34972
34973 /* portable code must never pass more than 32 bytes */
34974 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34975 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
34976
34977 static u8 *buf;
34978
34979 diff -urNp linux-2.6.32.43/drivers/staging/android/binder.c linux-2.6.32.43/drivers/staging/android/binder.c
34980 --- linux-2.6.32.43/drivers/staging/android/binder.c 2011-03-27 14:31:47.000000000 -0400
34981 +++ linux-2.6.32.43/drivers/staging/android/binder.c 2011-04-17 15:56:46.000000000 -0400
34982 @@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_a
34983 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
34984 }
34985
34986 -static struct vm_operations_struct binder_vm_ops = {
34987 +static const struct vm_operations_struct binder_vm_ops = {
34988 .open = binder_vma_open,
34989 .close = binder_vma_close,
34990 };
34991 diff -urNp linux-2.6.32.43/drivers/staging/b3dfg/b3dfg.c linux-2.6.32.43/drivers/staging/b3dfg/b3dfg.c
34992 --- linux-2.6.32.43/drivers/staging/b3dfg/b3dfg.c 2011-03-27 14:31:47.000000000 -0400
34993 +++ linux-2.6.32.43/drivers/staging/b3dfg/b3dfg.c 2011-04-17 15:56:46.000000000 -0400
34994 @@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_are
34995 return VM_FAULT_NOPAGE;
34996 }
34997
34998 -static struct vm_operations_struct b3dfg_vm_ops = {
34999 +static const struct vm_operations_struct b3dfg_vm_ops = {
35000 .fault = b3dfg_vma_fault,
35001 };
35002
35003 @@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp,
35004 return r;
35005 }
35006
35007 -static struct file_operations b3dfg_fops = {
35008 +static const struct file_operations b3dfg_fops = {
35009 .owner = THIS_MODULE,
35010 .open = b3dfg_open,
35011 .release = b3dfg_release,
35012 diff -urNp linux-2.6.32.43/drivers/staging/comedi/comedi_fops.c linux-2.6.32.43/drivers/staging/comedi/comedi_fops.c
35013 --- linux-2.6.32.43/drivers/staging/comedi/comedi_fops.c 2011-03-27 14:31:47.000000000 -0400
35014 +++ linux-2.6.32.43/drivers/staging/comedi/comedi_fops.c 2011-04-17 15:56:46.000000000 -0400
35015 @@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct
35016 mutex_unlock(&dev->mutex);
35017 }
35018
35019 -static struct vm_operations_struct comedi_vm_ops = {
35020 +static const struct vm_operations_struct comedi_vm_ops = {
35021 .close = comedi_unmap,
35022 };
35023
35024 diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.32.43/drivers/staging/dream/qdsp5/adsp_driver.c
35025 --- linux-2.6.32.43/drivers/staging/dream/qdsp5/adsp_driver.c 2011-03-27 14:31:47.000000000 -0400
35026 +++ linux-2.6.32.43/drivers/staging/dream/qdsp5/adsp_driver.c 2011-04-17 15:56:46.000000000 -0400
35027 @@ -576,7 +576,7 @@ static struct adsp_device *inode_to_devi
35028 static dev_t adsp_devno;
35029 static struct class *adsp_class;
35030
35031 -static struct file_operations adsp_fops = {
35032 +static const struct file_operations adsp_fops = {
35033 .owner = THIS_MODULE,
35034 .open = adsp_open,
35035 .unlocked_ioctl = adsp_ioctl,
35036 diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_aac.c
35037 --- linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_aac.c 2011-03-27 14:31:47.000000000 -0400
35038 +++ linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_aac.c 2011-04-17 15:56:46.000000000 -0400
35039 @@ -1022,7 +1022,7 @@ done:
35040 return rc;
35041 }
35042
35043 -static struct file_operations audio_aac_fops = {
35044 +static const struct file_operations audio_aac_fops = {
35045 .owner = THIS_MODULE,
35046 .open = audio_open,
35047 .release = audio_release,
35048 diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_amrnb.c
35049 --- linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-03-27 14:31:47.000000000 -0400
35050 +++ linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-04-17 15:56:46.000000000 -0400
35051 @@ -833,7 +833,7 @@ done:
35052 return rc;
35053 }
35054
35055 -static struct file_operations audio_amrnb_fops = {
35056 +static const struct file_operations audio_amrnb_fops = {
35057 .owner = THIS_MODULE,
35058 .open = audamrnb_open,
35059 .release = audamrnb_release,
35060 diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_evrc.c
35061 --- linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_evrc.c 2011-03-27 14:31:47.000000000 -0400
35062 +++ linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_evrc.c 2011-04-17 15:56:46.000000000 -0400
35063 @@ -805,7 +805,7 @@ dma_fail:
35064 return rc;
35065 }
35066
35067 -static struct file_operations audio_evrc_fops = {
35068 +static const struct file_operations audio_evrc_fops = {
35069 .owner = THIS_MODULE,
35070 .open = audevrc_open,
35071 .release = audevrc_release,
35072 diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_in.c
35073 --- linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_in.c 2011-03-27 14:31:47.000000000 -0400
35074 +++ linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_in.c 2011-04-17 15:56:46.000000000 -0400
35075 @@ -913,7 +913,7 @@ static int audpre_open(struct inode *ino
35076 return 0;
35077 }
35078
35079 -static struct file_operations audio_fops = {
35080 +static const struct file_operations audio_fops = {
35081 .owner = THIS_MODULE,
35082 .open = audio_in_open,
35083 .release = audio_in_release,
35084 @@ -922,7 +922,7 @@ static struct file_operations audio_fops
35085 .unlocked_ioctl = audio_in_ioctl,
35086 };
35087
35088 -static struct file_operations audpre_fops = {
35089 +static const struct file_operations audpre_fops = {
35090 .owner = THIS_MODULE,
35091 .open = audpre_open,
35092 .unlocked_ioctl = audpre_ioctl,
35093 diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_mp3.c
35094 --- linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_mp3.c 2011-03-27 14:31:47.000000000 -0400
35095 +++ linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_mp3.c 2011-04-17 15:56:46.000000000 -0400
35096 @@ -941,7 +941,7 @@ done:
35097 return rc;
35098 }
35099
35100 -static struct file_operations audio_mp3_fops = {
35101 +static const struct file_operations audio_mp3_fops = {
35102 .owner = THIS_MODULE,
35103 .open = audio_open,
35104 .release = audio_release,
35105 diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_out.c
35106 --- linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_out.c 2011-03-27 14:31:47.000000000 -0400
35107 +++ linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_out.c 2011-04-17 15:56:46.000000000 -0400
35108 @@ -810,7 +810,7 @@ static int audpp_open(struct inode *inod
35109 return 0;
35110 }
35111
35112 -static struct file_operations audio_fops = {
35113 +static const struct file_operations audio_fops = {
35114 .owner = THIS_MODULE,
35115 .open = audio_open,
35116 .release = audio_release,
35117 @@ -819,7 +819,7 @@ static struct file_operations audio_fops
35118 .unlocked_ioctl = audio_ioctl,
35119 };
35120
35121 -static struct file_operations audpp_fops = {
35122 +static const struct file_operations audpp_fops = {
35123 .owner = THIS_MODULE,
35124 .open = audpp_open,
35125 .unlocked_ioctl = audpp_ioctl,
35126 diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_qcelp.c
35127 --- linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-03-27 14:31:47.000000000 -0400
35128 +++ linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-04-17 15:56:46.000000000 -0400
35129 @@ -816,7 +816,7 @@ err:
35130 return rc;
35131 }
35132
35133 -static struct file_operations audio_qcelp_fops = {
35134 +static const struct file_operations audio_qcelp_fops = {
35135 .owner = THIS_MODULE,
35136 .open = audqcelp_open,
35137 .release = audqcelp_release,
35138 diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/snd.c linux-2.6.32.43/drivers/staging/dream/qdsp5/snd.c
35139 --- linux-2.6.32.43/drivers/staging/dream/qdsp5/snd.c 2011-03-27 14:31:47.000000000 -0400
35140 +++ linux-2.6.32.43/drivers/staging/dream/qdsp5/snd.c 2011-04-17 15:56:46.000000000 -0400
35141 @@ -242,7 +242,7 @@ err:
35142 return rc;
35143 }
35144
35145 -static struct file_operations snd_fops = {
35146 +static const struct file_operations snd_fops = {
35147 .owner = THIS_MODULE,
35148 .open = snd_open,
35149 .release = snd_release,
35150 diff -urNp linux-2.6.32.43/drivers/staging/dream/smd/smd_qmi.c linux-2.6.32.43/drivers/staging/dream/smd/smd_qmi.c
35151 --- linux-2.6.32.43/drivers/staging/dream/smd/smd_qmi.c 2011-03-27 14:31:47.000000000 -0400
35152 +++ linux-2.6.32.43/drivers/staging/dream/smd/smd_qmi.c 2011-04-17 15:56:46.000000000 -0400
35153 @@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip,
35154 return 0;
35155 }
35156
35157 -static struct file_operations qmi_fops = {
35158 +static const struct file_operations qmi_fops = {
35159 .owner = THIS_MODULE,
35160 .read = qmi_read,
35161 .write = qmi_write,
35162 diff -urNp linux-2.6.32.43/drivers/staging/dream/smd/smd_rpcrouter_device.c linux-2.6.32.43/drivers/staging/dream/smd/smd_rpcrouter_device.c
35163 --- linux-2.6.32.43/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-03-27 14:31:47.000000000 -0400
35164 +++ linux-2.6.32.43/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-04-17 15:56:46.000000000 -0400
35165 @@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file
35166 return rc;
35167 }
35168
35169 -static struct file_operations rpcrouter_server_fops = {
35170 +static const struct file_operations rpcrouter_server_fops = {
35171 .owner = THIS_MODULE,
35172 .open = rpcrouter_open,
35173 .release = rpcrouter_release,
35174 @@ -224,7 +224,7 @@ static struct file_operations rpcrouter_
35175 .unlocked_ioctl = rpcrouter_ioctl,
35176 };
35177
35178 -static struct file_operations rpcrouter_router_fops = {
35179 +static const struct file_operations rpcrouter_router_fops = {
35180 .owner = THIS_MODULE,
35181 .open = rpcrouter_open,
35182 .release = rpcrouter_release,
35183 diff -urNp linux-2.6.32.43/drivers/staging/dst/dcore.c linux-2.6.32.43/drivers/staging/dst/dcore.c
35184 --- linux-2.6.32.43/drivers/staging/dst/dcore.c 2011-03-27 14:31:47.000000000 -0400
35185 +++ linux-2.6.32.43/drivers/staging/dst/dcore.c 2011-04-17 15:56:46.000000000 -0400
35186 @@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendi
35187 return 0;
35188 }
35189
35190 -static struct block_device_operations dst_blk_ops = {
35191 +static const struct block_device_operations dst_blk_ops = {
35192 .open = dst_bdev_open,
35193 .release = dst_bdev_release,
35194 .owner = THIS_MODULE,
35195 @@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(s
35196 n->size = ctl->size;
35197
35198 atomic_set(&n->refcnt, 1);
35199 - atomic_long_set(&n->gen, 0);
35200 + atomic_long_set_unchecked(&n->gen, 0);
35201 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
35202
35203 err = dst_node_sysfs_init(n);
35204 diff -urNp linux-2.6.32.43/drivers/staging/dst/trans.c linux-2.6.32.43/drivers/staging/dst/trans.c
35205 --- linux-2.6.32.43/drivers/staging/dst/trans.c 2011-03-27 14:31:47.000000000 -0400
35206 +++ linux-2.6.32.43/drivers/staging/dst/trans.c 2011-04-17 15:56:46.000000000 -0400
35207 @@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n,
35208 t->error = 0;
35209 t->retries = 0;
35210 atomic_set(&t->refcnt, 1);
35211 - t->gen = atomic_long_inc_return(&n->gen);
35212 + t->gen = atomic_long_inc_return_unchecked(&n->gen);
35213
35214 t->enc = bio_data_dir(bio);
35215 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
35216 diff -urNp linux-2.6.32.43/drivers/staging/et131x/et1310_tx.c linux-2.6.32.43/drivers/staging/et131x/et1310_tx.c
35217 --- linux-2.6.32.43/drivers/staging/et131x/et1310_tx.c 2011-03-27 14:31:47.000000000 -0400
35218 +++ linux-2.6.32.43/drivers/staging/et131x/et1310_tx.c 2011-05-04 17:56:28.000000000 -0400
35219 @@ -710,11 +710,11 @@ inline void et131x_free_send_packet(stru
35220 struct net_device_stats *stats = &etdev->net_stats;
35221
35222 if (pMpTcb->Flags & fMP_DEST_BROAD)
35223 - atomic_inc(&etdev->Stats.brdcstxmt);
35224 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
35225 else if (pMpTcb->Flags & fMP_DEST_MULTI)
35226 - atomic_inc(&etdev->Stats.multixmt);
35227 + atomic_inc_unchecked(&etdev->Stats.multixmt);
35228 else
35229 - atomic_inc(&etdev->Stats.unixmt);
35230 + atomic_inc_unchecked(&etdev->Stats.unixmt);
35231
35232 if (pMpTcb->Packet) {
35233 stats->tx_bytes += pMpTcb->Packet->len;
35234 diff -urNp linux-2.6.32.43/drivers/staging/et131x/et131x_adapter.h linux-2.6.32.43/drivers/staging/et131x/et131x_adapter.h
35235 --- linux-2.6.32.43/drivers/staging/et131x/et131x_adapter.h 2011-03-27 14:31:47.000000000 -0400
35236 +++ linux-2.6.32.43/drivers/staging/et131x/et131x_adapter.h 2011-05-04 17:56:28.000000000 -0400
35237 @@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
35238 * operations
35239 */
35240 u32 unircv; /* # multicast packets received */
35241 - atomic_t unixmt; /* # multicast packets for Tx */
35242 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
35243 u32 multircv; /* # multicast packets received */
35244 - atomic_t multixmt; /* # multicast packets for Tx */
35245 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
35246 u32 brdcstrcv; /* # broadcast packets received */
35247 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
35248 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
35249 u32 norcvbuf; /* # Rx packets discarded */
35250 u32 noxmtbuf; /* # Tx packets discarded */
35251
35252 diff -urNp linux-2.6.32.43/drivers/staging/go7007/go7007-v4l2.c linux-2.6.32.43/drivers/staging/go7007/go7007-v4l2.c
35253 --- linux-2.6.32.43/drivers/staging/go7007/go7007-v4l2.c 2011-03-27 14:31:47.000000000 -0400
35254 +++ linux-2.6.32.43/drivers/staging/go7007/go7007-v4l2.c 2011-04-17 15:56:46.000000000 -0400
35255 @@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_are
35256 return 0;
35257 }
35258
35259 -static struct vm_operations_struct go7007_vm_ops = {
35260 +static const struct vm_operations_struct go7007_vm_ops = {
35261 .open = go7007_vm_open,
35262 .close = go7007_vm_close,
35263 .fault = go7007_vm_fault,
35264 diff -urNp linux-2.6.32.43/drivers/staging/hv/blkvsc_drv.c linux-2.6.32.43/drivers/staging/hv/blkvsc_drv.c
35265 --- linux-2.6.32.43/drivers/staging/hv/blkvsc_drv.c 2011-03-27 14:31:47.000000000 -0400
35266 +++ linux-2.6.32.43/drivers/staging/hv/blkvsc_drv.c 2011-04-17 15:56:46.000000000 -0400
35267 @@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKV
35268 /* The one and only one */
35269 static struct blkvsc_driver_context g_blkvsc_drv;
35270
35271 -static struct block_device_operations block_ops = {
35272 +static const struct block_device_operations block_ops = {
35273 .owner = THIS_MODULE,
35274 .open = blkvsc_open,
35275 .release = blkvsc_release,
35276 diff -urNp linux-2.6.32.43/drivers/staging/hv/Channel.c linux-2.6.32.43/drivers/staging/hv/Channel.c
35277 --- linux-2.6.32.43/drivers/staging/hv/Channel.c 2011-04-17 17:00:52.000000000 -0400
35278 +++ linux-2.6.32.43/drivers/staging/hv/Channel.c 2011-05-04 17:56:28.000000000 -0400
35279 @@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vm
35280
35281 DPRINT_ENTER(VMBUS);
35282
35283 - nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
35284 - atomic_inc(&gVmbusConnection.NextGpadlHandle);
35285 + nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
35286 + atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
35287
35288 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
35289 ASSERT(msgInfo != NULL);
35290 diff -urNp linux-2.6.32.43/drivers/staging/hv/Hv.c linux-2.6.32.43/drivers/staging/hv/Hv.c
35291 --- linux-2.6.32.43/drivers/staging/hv/Hv.c 2011-03-27 14:31:47.000000000 -0400
35292 +++ linux-2.6.32.43/drivers/staging/hv/Hv.c 2011-04-17 15:56:46.000000000 -0400
35293 @@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, vo
35294 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
35295 u32 outputAddressHi = outputAddress >> 32;
35296 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
35297 - volatile void *hypercallPage = gHvContext.HypercallPage;
35298 + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
35299
35300 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
35301 Control, Input, Output);
35302 diff -urNp linux-2.6.32.43/drivers/staging/hv/vmbus_drv.c linux-2.6.32.43/drivers/staging/hv/vmbus_drv.c
35303 --- linux-2.6.32.43/drivers/staging/hv/vmbus_drv.c 2011-03-27 14:31:47.000000000 -0400
35304 +++ linux-2.6.32.43/drivers/staging/hv/vmbus_drv.c 2011-05-04 17:56:28.000000000 -0400
35305 @@ -532,7 +532,7 @@ static int vmbus_child_device_register(s
35306 to_device_context(root_device_obj);
35307 struct device_context *child_device_ctx =
35308 to_device_context(child_device_obj);
35309 - static atomic_t device_num = ATOMIC_INIT(0);
35310 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
35311
35312 DPRINT_ENTER(VMBUS_DRV);
35313
35314 @@ -541,7 +541,7 @@ static int vmbus_child_device_register(s
35315
35316 /* Set the device name. Otherwise, device_register() will fail. */
35317 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
35318 - atomic_inc_return(&device_num));
35319 + atomic_inc_return_unchecked(&device_num));
35320
35321 /* The new device belongs to this bus */
35322 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
35323 diff -urNp linux-2.6.32.43/drivers/staging/hv/VmbusPrivate.h linux-2.6.32.43/drivers/staging/hv/VmbusPrivate.h
35324 --- linux-2.6.32.43/drivers/staging/hv/VmbusPrivate.h 2011-04-17 17:00:52.000000000 -0400
35325 +++ linux-2.6.32.43/drivers/staging/hv/VmbusPrivate.h 2011-05-04 17:56:28.000000000 -0400
35326 @@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
35327 struct VMBUS_CONNECTION {
35328 enum VMBUS_CONNECT_STATE ConnectState;
35329
35330 - atomic_t NextGpadlHandle;
35331 + atomic_unchecked_t NextGpadlHandle;
35332
35333 /*
35334 * Represents channel interrupts. Each bit position represents a
35335 diff -urNp linux-2.6.32.43/drivers/staging/octeon/ethernet.c linux-2.6.32.43/drivers/staging/octeon/ethernet.c
35336 --- linux-2.6.32.43/drivers/staging/octeon/ethernet.c 2011-03-27 14:31:47.000000000 -0400
35337 +++ linux-2.6.32.43/drivers/staging/octeon/ethernet.c 2011-05-04 17:56:28.000000000 -0400
35338 @@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_
35339 * since the RX tasklet also increments it.
35340 */
35341 #ifdef CONFIG_64BIT
35342 - atomic64_add(rx_status.dropped_packets,
35343 - (atomic64_t *)&priv->stats.rx_dropped);
35344 + atomic64_add_unchecked(rx_status.dropped_packets,
35345 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35346 #else
35347 - atomic_add(rx_status.dropped_packets,
35348 - (atomic_t *)&priv->stats.rx_dropped);
35349 + atomic_add_unchecked(rx_status.dropped_packets,
35350 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
35351 #endif
35352 }
35353
35354 diff -urNp linux-2.6.32.43/drivers/staging/octeon/ethernet-rx.c linux-2.6.32.43/drivers/staging/octeon/ethernet-rx.c
35355 --- linux-2.6.32.43/drivers/staging/octeon/ethernet-rx.c 2011-03-27 14:31:47.000000000 -0400
35356 +++ linux-2.6.32.43/drivers/staging/octeon/ethernet-rx.c 2011-05-04 17:56:28.000000000 -0400
35357 @@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long un
35358 /* Increment RX stats for virtual ports */
35359 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
35360 #ifdef CONFIG_64BIT
35361 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
35362 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
35363 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
35364 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
35365 #else
35366 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
35367 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
35368 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
35369 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
35370 #endif
35371 }
35372 netif_receive_skb(skb);
35373 @@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long un
35374 dev->name);
35375 */
35376 #ifdef CONFIG_64BIT
35377 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
35378 + atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
35379 #else
35380 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
35381 + atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
35382 #endif
35383 dev_kfree_skb_irq(skb);
35384 }
35385 diff -urNp linux-2.6.32.43/drivers/staging/panel/panel.c linux-2.6.32.43/drivers/staging/panel/panel.c
35386 --- linux-2.6.32.43/drivers/staging/panel/panel.c 2011-03-27 14:31:47.000000000 -0400
35387 +++ linux-2.6.32.43/drivers/staging/panel/panel.c 2011-04-17 15:56:46.000000000 -0400
35388 @@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *ino
35389 return 0;
35390 }
35391
35392 -static struct file_operations lcd_fops = {
35393 +static const struct file_operations lcd_fops = {
35394 .write = lcd_write,
35395 .open = lcd_open,
35396 .release = lcd_release,
35397 @@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *
35398 return 0;
35399 }
35400
35401 -static struct file_operations keypad_fops = {
35402 +static const struct file_operations keypad_fops = {
35403 .read = keypad_read, /* read */
35404 .open = keypad_open, /* open */
35405 .release = keypad_release, /* close */
35406 diff -urNp linux-2.6.32.43/drivers/staging/phison/phison.c linux-2.6.32.43/drivers/staging/phison/phison.c
35407 --- linux-2.6.32.43/drivers/staging/phison/phison.c 2011-03-27 14:31:47.000000000 -0400
35408 +++ linux-2.6.32.43/drivers/staging/phison/phison.c 2011-04-17 15:56:46.000000000 -0400
35409 @@ -43,7 +43,7 @@ static struct scsi_host_template phison_
35410 ATA_BMDMA_SHT(DRV_NAME),
35411 };
35412
35413 -static struct ata_port_operations phison_ops = {
35414 +static const struct ata_port_operations phison_ops = {
35415 .inherits = &ata_bmdma_port_ops,
35416 .prereset = phison_pre_reset,
35417 };
35418 diff -urNp linux-2.6.32.43/drivers/staging/poch/poch.c linux-2.6.32.43/drivers/staging/poch/poch.c
35419 --- linux-2.6.32.43/drivers/staging/poch/poch.c 2011-03-27 14:31:47.000000000 -0400
35420 +++ linux-2.6.32.43/drivers/staging/poch/poch.c 2011-04-17 15:56:46.000000000 -0400
35421 @@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inod
35422 return 0;
35423 }
35424
35425 -static struct file_operations poch_fops = {
35426 +static const struct file_operations poch_fops = {
35427 .owner = THIS_MODULE,
35428 .open = poch_open,
35429 .release = poch_release,
35430 diff -urNp linux-2.6.32.43/drivers/staging/pohmelfs/inode.c linux-2.6.32.43/drivers/staging/pohmelfs/inode.c
35431 --- linux-2.6.32.43/drivers/staging/pohmelfs/inode.c 2011-03-27 14:31:47.000000000 -0400
35432 +++ linux-2.6.32.43/drivers/staging/pohmelfs/inode.c 2011-05-04 17:56:20.000000000 -0400
35433 @@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct su
35434 mutex_init(&psb->mcache_lock);
35435 psb->mcache_root = RB_ROOT;
35436 psb->mcache_timeout = msecs_to_jiffies(5000);
35437 - atomic_long_set(&psb->mcache_gen, 0);
35438 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
35439
35440 psb->trans_max_pages = 100;
35441
35442 @@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct su
35443 INIT_LIST_HEAD(&psb->crypto_ready_list);
35444 INIT_LIST_HEAD(&psb->crypto_active_list);
35445
35446 - atomic_set(&psb->trans_gen, 1);
35447 + atomic_set_unchecked(&psb->trans_gen, 1);
35448 atomic_long_set(&psb->total_inodes, 0);
35449
35450 mutex_init(&psb->state_lock);
35451 diff -urNp linux-2.6.32.43/drivers/staging/pohmelfs/mcache.c linux-2.6.32.43/drivers/staging/pohmelfs/mcache.c
35452 --- linux-2.6.32.43/drivers/staging/pohmelfs/mcache.c 2011-03-27 14:31:47.000000000 -0400
35453 +++ linux-2.6.32.43/drivers/staging/pohmelfs/mcache.c 2011-04-17 15:56:46.000000000 -0400
35454 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
35455 m->data = data;
35456 m->start = start;
35457 m->size = size;
35458 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
35459 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
35460
35461 mutex_lock(&psb->mcache_lock);
35462 err = pohmelfs_mcache_insert(psb, m);
35463 diff -urNp linux-2.6.32.43/drivers/staging/pohmelfs/netfs.h linux-2.6.32.43/drivers/staging/pohmelfs/netfs.h
35464 --- linux-2.6.32.43/drivers/staging/pohmelfs/netfs.h 2011-03-27 14:31:47.000000000 -0400
35465 +++ linux-2.6.32.43/drivers/staging/pohmelfs/netfs.h 2011-05-04 17:56:20.000000000 -0400
35466 @@ -570,14 +570,14 @@ struct pohmelfs_config;
35467 struct pohmelfs_sb {
35468 struct rb_root mcache_root;
35469 struct mutex mcache_lock;
35470 - atomic_long_t mcache_gen;
35471 + atomic_long_unchecked_t mcache_gen;
35472 unsigned long mcache_timeout;
35473
35474 unsigned int idx;
35475
35476 unsigned int trans_retries;
35477
35478 - atomic_t trans_gen;
35479 + atomic_unchecked_t trans_gen;
35480
35481 unsigned int crypto_attached_size;
35482 unsigned int crypto_align_size;
35483 diff -urNp linux-2.6.32.43/drivers/staging/pohmelfs/trans.c linux-2.6.32.43/drivers/staging/pohmelfs/trans.c
35484 --- linux-2.6.32.43/drivers/staging/pohmelfs/trans.c 2011-03-27 14:31:47.000000000 -0400
35485 +++ linux-2.6.32.43/drivers/staging/pohmelfs/trans.c 2011-05-04 17:56:28.000000000 -0400
35486 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
35487 int err;
35488 struct netfs_cmd *cmd = t->iovec.iov_base;
35489
35490 - t->gen = atomic_inc_return(&psb->trans_gen);
35491 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
35492
35493 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
35494 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
35495 diff -urNp linux-2.6.32.43/drivers/staging/sep/sep_driver.c linux-2.6.32.43/drivers/staging/sep/sep_driver.c
35496 --- linux-2.6.32.43/drivers/staging/sep/sep_driver.c 2011-03-27 14:31:47.000000000 -0400
35497 +++ linux-2.6.32.43/drivers/staging/sep/sep_driver.c 2011-04-17 15:56:46.000000000 -0400
35498 @@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver
35499 static dev_t sep_devno;
35500
35501 /* the files operations structure of the driver */
35502 -static struct file_operations sep_file_operations = {
35503 +static const struct file_operations sep_file_operations = {
35504 .owner = THIS_MODULE,
35505 .ioctl = sep_ioctl,
35506 .poll = sep_poll,
35507 diff -urNp linux-2.6.32.43/drivers/staging/usbip/vhci.h linux-2.6.32.43/drivers/staging/usbip/vhci.h
35508 --- linux-2.6.32.43/drivers/staging/usbip/vhci.h 2011-03-27 14:31:47.000000000 -0400
35509 +++ linux-2.6.32.43/drivers/staging/usbip/vhci.h 2011-05-04 17:56:28.000000000 -0400
35510 @@ -92,7 +92,7 @@ struct vhci_hcd {
35511 unsigned resuming:1;
35512 unsigned long re_timeout;
35513
35514 - atomic_t seqnum;
35515 + atomic_unchecked_t seqnum;
35516
35517 /*
35518 * NOTE:
35519 diff -urNp linux-2.6.32.43/drivers/staging/usbip/vhci_hcd.c linux-2.6.32.43/drivers/staging/usbip/vhci_hcd.c
35520 --- linux-2.6.32.43/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:01.000000000 -0400
35521 +++ linux-2.6.32.43/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:33.000000000 -0400
35522 @@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
35523 return;
35524 }
35525
35526 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
35527 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35528 if (priv->seqnum == 0xffff)
35529 usbip_uinfo("seqnum max\n");
35530
35531 @@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_h
35532 return -ENOMEM;
35533 }
35534
35535 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
35536 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35537 if (unlink->seqnum == 0xffff)
35538 usbip_uinfo("seqnum max\n");
35539
35540 @@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hc
35541 vdev->rhport = rhport;
35542 }
35543
35544 - atomic_set(&vhci->seqnum, 0);
35545 + atomic_set_unchecked(&vhci->seqnum, 0);
35546 spin_lock_init(&vhci->lock);
35547
35548
35549 diff -urNp linux-2.6.32.43/drivers/staging/usbip/vhci_rx.c linux-2.6.32.43/drivers/staging/usbip/vhci_rx.c
35550 --- linux-2.6.32.43/drivers/staging/usbip/vhci_rx.c 2011-04-17 17:00:52.000000000 -0400
35551 +++ linux-2.6.32.43/drivers/staging/usbip/vhci_rx.c 2011-05-04 17:56:28.000000000 -0400
35552 @@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct
35553 usbip_uerr("cannot find a urb of seqnum %u\n",
35554 pdu->base.seqnum);
35555 usbip_uinfo("max seqnum %d\n",
35556 - atomic_read(&the_controller->seqnum));
35557 + atomic_read_unchecked(&the_controller->seqnum));
35558 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
35559 return;
35560 }
35561 diff -urNp linux-2.6.32.43/drivers/staging/vme/devices/vme_user.c linux-2.6.32.43/drivers/staging/vme/devices/vme_user.c
35562 --- linux-2.6.32.43/drivers/staging/vme/devices/vme_user.c 2011-03-27 14:31:47.000000000 -0400
35563 +++ linux-2.6.32.43/drivers/staging/vme/devices/vme_user.c 2011-04-17 15:56:46.000000000 -0400
35564 @@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *
35565 static int __init vme_user_probe(struct device *, int, int);
35566 static int __exit vme_user_remove(struct device *, int, int);
35567
35568 -static struct file_operations vme_user_fops = {
35569 +static const struct file_operations vme_user_fops = {
35570 .open = vme_user_open,
35571 .release = vme_user_release,
35572 .read = vme_user_read,
35573 diff -urNp linux-2.6.32.43/drivers/telephony/ixj.c linux-2.6.32.43/drivers/telephony/ixj.c
35574 --- linux-2.6.32.43/drivers/telephony/ixj.c 2011-03-27 14:31:47.000000000 -0400
35575 +++ linux-2.6.32.43/drivers/telephony/ixj.c 2011-05-16 21:46:57.000000000 -0400
35576 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
35577 bool mContinue;
35578 char *pIn, *pOut;
35579
35580 + pax_track_stack();
35581 +
35582 if (!SCI_Prepare(j))
35583 return 0;
35584
35585 diff -urNp linux-2.6.32.43/drivers/uio/uio.c linux-2.6.32.43/drivers/uio/uio.c
35586 --- linux-2.6.32.43/drivers/uio/uio.c 2011-03-27 14:31:47.000000000 -0400
35587 +++ linux-2.6.32.43/drivers/uio/uio.c 2011-05-04 17:56:20.000000000 -0400
35588 @@ -23,6 +23,7 @@
35589 #include <linux/string.h>
35590 #include <linux/kobject.h>
35591 #include <linux/uio_driver.h>
35592 +#include <asm/local.h>
35593
35594 #define UIO_MAX_DEVICES 255
35595
35596 @@ -30,10 +31,10 @@ struct uio_device {
35597 struct module *owner;
35598 struct device *dev;
35599 int minor;
35600 - atomic_t event;
35601 + atomic_unchecked_t event;
35602 struct fasync_struct *async_queue;
35603 wait_queue_head_t wait;
35604 - int vma_count;
35605 + local_t vma_count;
35606 struct uio_info *info;
35607 struct kobject *map_dir;
35608 struct kobject *portio_dir;
35609 @@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobj
35610 return entry->show(mem, buf);
35611 }
35612
35613 -static struct sysfs_ops map_sysfs_ops = {
35614 +static const struct sysfs_ops map_sysfs_ops = {
35615 .show = map_type_show,
35616 };
35617
35618 @@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct k
35619 return entry->show(port, buf);
35620 }
35621
35622 -static struct sysfs_ops portio_sysfs_ops = {
35623 +static const struct sysfs_ops portio_sysfs_ops = {
35624 .show = portio_type_show,
35625 };
35626
35627 @@ -255,7 +256,7 @@ static ssize_t show_event(struct device
35628 struct uio_device *idev = dev_get_drvdata(dev);
35629 if (idev)
35630 return sprintf(buf, "%u\n",
35631 - (unsigned int)atomic_read(&idev->event));
35632 + (unsigned int)atomic_read_unchecked(&idev->event));
35633 else
35634 return -ENODEV;
35635 }
35636 @@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *i
35637 {
35638 struct uio_device *idev = info->uio_dev;
35639
35640 - atomic_inc(&idev->event);
35641 + atomic_inc_unchecked(&idev->event);
35642 wake_up_interruptible(&idev->wait);
35643 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
35644 }
35645 @@ -477,7 +478,7 @@ static int uio_open(struct inode *inode,
35646 }
35647
35648 listener->dev = idev;
35649 - listener->event_count = atomic_read(&idev->event);
35650 + listener->event_count = atomic_read_unchecked(&idev->event);
35651 filep->private_data = listener;
35652
35653 if (idev->info->open) {
35654 @@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file
35655 return -EIO;
35656
35657 poll_wait(filep, &idev->wait, wait);
35658 - if (listener->event_count != atomic_read(&idev->event))
35659 + if (listener->event_count != atomic_read_unchecked(&idev->event))
35660 return POLLIN | POLLRDNORM;
35661 return 0;
35662 }
35663 @@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *fil
35664 do {
35665 set_current_state(TASK_INTERRUPTIBLE);
35666
35667 - event_count = atomic_read(&idev->event);
35668 + event_count = atomic_read_unchecked(&idev->event);
35669 if (event_count != listener->event_count) {
35670 if (copy_to_user(buf, &event_count, count))
35671 retval = -EFAULT;
35672 @@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_
35673 static void uio_vma_open(struct vm_area_struct *vma)
35674 {
35675 struct uio_device *idev = vma->vm_private_data;
35676 - idev->vma_count++;
35677 + local_inc(&idev->vma_count);
35678 }
35679
35680 static void uio_vma_close(struct vm_area_struct *vma)
35681 {
35682 struct uio_device *idev = vma->vm_private_data;
35683 - idev->vma_count--;
35684 + local_dec(&idev->vma_count);
35685 }
35686
35687 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35688 @@ -840,7 +841,7 @@ int __uio_register_device(struct module
35689 idev->owner = owner;
35690 idev->info = info;
35691 init_waitqueue_head(&idev->wait);
35692 - atomic_set(&idev->event, 0);
35693 + atomic_set_unchecked(&idev->event, 0);
35694
35695 ret = uio_get_minor(idev);
35696 if (ret)
35697 diff -urNp linux-2.6.32.43/drivers/usb/atm/usbatm.c linux-2.6.32.43/drivers/usb/atm/usbatm.c
35698 --- linux-2.6.32.43/drivers/usb/atm/usbatm.c 2011-03-27 14:31:47.000000000 -0400
35699 +++ linux-2.6.32.43/drivers/usb/atm/usbatm.c 2011-04-17 15:56:46.000000000 -0400
35700 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
35701 if (printk_ratelimit())
35702 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
35703 __func__, vpi, vci);
35704 - atomic_inc(&vcc->stats->rx_err);
35705 + atomic_inc_unchecked(&vcc->stats->rx_err);
35706 return;
35707 }
35708
35709 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
35710 if (length > ATM_MAX_AAL5_PDU) {
35711 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
35712 __func__, length, vcc);
35713 - atomic_inc(&vcc->stats->rx_err);
35714 + atomic_inc_unchecked(&vcc->stats->rx_err);
35715 goto out;
35716 }
35717
35718 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
35719 if (sarb->len < pdu_length) {
35720 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
35721 __func__, pdu_length, sarb->len, vcc);
35722 - atomic_inc(&vcc->stats->rx_err);
35723 + atomic_inc_unchecked(&vcc->stats->rx_err);
35724 goto out;
35725 }
35726
35727 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
35728 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
35729 __func__, vcc);
35730 - atomic_inc(&vcc->stats->rx_err);
35731 + atomic_inc_unchecked(&vcc->stats->rx_err);
35732 goto out;
35733 }
35734
35735 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
35736 if (printk_ratelimit())
35737 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
35738 __func__, length);
35739 - atomic_inc(&vcc->stats->rx_drop);
35740 + atomic_inc_unchecked(&vcc->stats->rx_drop);
35741 goto out;
35742 }
35743
35744 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
35745
35746 vcc->push(vcc, skb);
35747
35748 - atomic_inc(&vcc->stats->rx);
35749 + atomic_inc_unchecked(&vcc->stats->rx);
35750 out:
35751 skb_trim(sarb, 0);
35752 }
35753 @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
35754 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
35755
35756 usbatm_pop(vcc, skb);
35757 - atomic_inc(&vcc->stats->tx);
35758 + atomic_inc_unchecked(&vcc->stats->tx);
35759
35760 skb = skb_dequeue(&instance->sndqueue);
35761 }
35762 @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
35763 if (!left--)
35764 return sprintf(page,
35765 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
35766 - atomic_read(&atm_dev->stats.aal5.tx),
35767 - atomic_read(&atm_dev->stats.aal5.tx_err),
35768 - atomic_read(&atm_dev->stats.aal5.rx),
35769 - atomic_read(&atm_dev->stats.aal5.rx_err),
35770 - atomic_read(&atm_dev->stats.aal5.rx_drop));
35771 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
35772 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
35773 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
35774 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
35775 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
35776
35777 if (!left--) {
35778 if (instance->disconnected)
35779 diff -urNp linux-2.6.32.43/drivers/usb/class/cdc-wdm.c linux-2.6.32.43/drivers/usb/class/cdc-wdm.c
35780 --- linux-2.6.32.43/drivers/usb/class/cdc-wdm.c 2011-03-27 14:31:47.000000000 -0400
35781 +++ linux-2.6.32.43/drivers/usb/class/cdc-wdm.c 2011-04-17 15:56:46.000000000 -0400
35782 @@ -314,7 +314,7 @@ static ssize_t wdm_write
35783 if (r < 0)
35784 goto outnp;
35785
35786 - if (!file->f_flags && O_NONBLOCK)
35787 + if (!(file->f_flags & O_NONBLOCK))
35788 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
35789 &desc->flags));
35790 else
35791 diff -urNp linux-2.6.32.43/drivers/usb/core/hcd.c linux-2.6.32.43/drivers/usb/core/hcd.c
35792 --- linux-2.6.32.43/drivers/usb/core/hcd.c 2011-03-27 14:31:47.000000000 -0400
35793 +++ linux-2.6.32.43/drivers/usb/core/hcd.c 2011-04-17 15:56:46.000000000 -0400
35794 @@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
35795
35796 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
35797
35798 -struct usb_mon_operations *mon_ops;
35799 +const struct usb_mon_operations *mon_ops;
35800
35801 /*
35802 * The registration is unlocked.
35803 @@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
35804 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
35805 */
35806
35807 -int usb_mon_register (struct usb_mon_operations *ops)
35808 +int usb_mon_register (const struct usb_mon_operations *ops)
35809 {
35810
35811 if (mon_ops)
35812 diff -urNp linux-2.6.32.43/drivers/usb/core/hcd.h linux-2.6.32.43/drivers/usb/core/hcd.h
35813 --- linux-2.6.32.43/drivers/usb/core/hcd.h 2011-03-27 14:31:47.000000000 -0400
35814 +++ linux-2.6.32.43/drivers/usb/core/hcd.h 2011-04-17 15:56:46.000000000 -0400
35815 @@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) {
35816 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
35817
35818 struct usb_mon_operations {
35819 - void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
35820 - void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
35821 - void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
35822 + void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
35823 + void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
35824 + void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
35825 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
35826 };
35827
35828 -extern struct usb_mon_operations *mon_ops;
35829 +extern const struct usb_mon_operations *mon_ops;
35830
35831 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
35832 {
35833 @@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(s
35834 (*mon_ops->urb_complete)(bus, urb, status);
35835 }
35836
35837 -int usb_mon_register(struct usb_mon_operations *ops);
35838 +int usb_mon_register(const struct usb_mon_operations *ops);
35839 void usb_mon_deregister(void);
35840
35841 #else
35842 diff -urNp linux-2.6.32.43/drivers/usb/core/message.c linux-2.6.32.43/drivers/usb/core/message.c
35843 --- linux-2.6.32.43/drivers/usb/core/message.c 2011-03-27 14:31:47.000000000 -0400
35844 +++ linux-2.6.32.43/drivers/usb/core/message.c 2011-04-17 15:56:46.000000000 -0400
35845 @@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device
35846 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
35847 if (buf) {
35848 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
35849 - if (len > 0) {
35850 - smallbuf = kmalloc(++len, GFP_NOIO);
35851 + if (len++ > 0) {
35852 + smallbuf = kmalloc(len, GFP_NOIO);
35853 if (!smallbuf)
35854 return buf;
35855 memcpy(smallbuf, buf, len);
35856 diff -urNp linux-2.6.32.43/drivers/usb/misc/appledisplay.c linux-2.6.32.43/drivers/usb/misc/appledisplay.c
35857 --- linux-2.6.32.43/drivers/usb/misc/appledisplay.c 2011-03-27 14:31:47.000000000 -0400
35858 +++ linux-2.6.32.43/drivers/usb/misc/appledisplay.c 2011-04-17 15:56:46.000000000 -0400
35859 @@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightnes
35860 return pdata->msgdata[1];
35861 }
35862
35863 -static struct backlight_ops appledisplay_bl_data = {
35864 +static const struct backlight_ops appledisplay_bl_data = {
35865 .get_brightness = appledisplay_bl_get_brightness,
35866 .update_status = appledisplay_bl_update_status,
35867 };
35868 diff -urNp linux-2.6.32.43/drivers/usb/mon/mon_main.c linux-2.6.32.43/drivers/usb/mon/mon_main.c
35869 --- linux-2.6.32.43/drivers/usb/mon/mon_main.c 2011-03-27 14:31:47.000000000 -0400
35870 +++ linux-2.6.32.43/drivers/usb/mon/mon_main.c 2011-04-17 15:56:46.000000000 -0400
35871 @@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
35872 /*
35873 * Ops
35874 */
35875 -static struct usb_mon_operations mon_ops_0 = {
35876 +static const struct usb_mon_operations mon_ops_0 = {
35877 .urb_submit = mon_submit,
35878 .urb_submit_error = mon_submit_error,
35879 .urb_complete = mon_complete,
35880 diff -urNp linux-2.6.32.43/drivers/usb/wusbcore/wa-hc.h linux-2.6.32.43/drivers/usb/wusbcore/wa-hc.h
35881 --- linux-2.6.32.43/drivers/usb/wusbcore/wa-hc.h 2011-03-27 14:31:47.000000000 -0400
35882 +++ linux-2.6.32.43/drivers/usb/wusbcore/wa-hc.h 2011-05-04 17:56:28.000000000 -0400
35883 @@ -192,7 +192,7 @@ struct wahc {
35884 struct list_head xfer_delayed_list;
35885 spinlock_t xfer_list_lock;
35886 struct work_struct xfer_work;
35887 - atomic_t xfer_id_count;
35888 + atomic_unchecked_t xfer_id_count;
35889 };
35890
35891
35892 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
35893 INIT_LIST_HEAD(&wa->xfer_delayed_list);
35894 spin_lock_init(&wa->xfer_list_lock);
35895 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
35896 - atomic_set(&wa->xfer_id_count, 1);
35897 + atomic_set_unchecked(&wa->xfer_id_count, 1);
35898 }
35899
35900 /**
35901 diff -urNp linux-2.6.32.43/drivers/usb/wusbcore/wa-xfer.c linux-2.6.32.43/drivers/usb/wusbcore/wa-xfer.c
35902 --- linux-2.6.32.43/drivers/usb/wusbcore/wa-xfer.c 2011-03-27 14:31:47.000000000 -0400
35903 +++ linux-2.6.32.43/drivers/usb/wusbcore/wa-xfer.c 2011-05-04 17:56:28.000000000 -0400
35904 @@ -293,7 +293,7 @@ out:
35905 */
35906 static void wa_xfer_id_init(struct wa_xfer *xfer)
35907 {
35908 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
35909 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
35910 }
35911
35912 /*
35913 diff -urNp linux-2.6.32.43/drivers/uwb/wlp/messages.c linux-2.6.32.43/drivers/uwb/wlp/messages.c
35914 --- linux-2.6.32.43/drivers/uwb/wlp/messages.c 2011-03-27 14:31:47.000000000 -0400
35915 +++ linux-2.6.32.43/drivers/uwb/wlp/messages.c 2011-04-17 15:56:46.000000000 -0400
35916 @@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
35917 size_t len = skb->len;
35918 size_t used;
35919 ssize_t result;
35920 - struct wlp_nonce enonce, rnonce;
35921 + struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
35922 enum wlp_assc_error assc_err;
35923 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
35924 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
35925 diff -urNp linux-2.6.32.43/drivers/uwb/wlp/sysfs.c linux-2.6.32.43/drivers/uwb/wlp/sysfs.c
35926 --- linux-2.6.32.43/drivers/uwb/wlp/sysfs.c 2011-03-27 14:31:47.000000000 -0400
35927 +++ linux-2.6.32.43/drivers/uwb/wlp/sysfs.c 2011-04-17 15:56:46.000000000 -0400
35928 @@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobjec
35929 return ret;
35930 }
35931
35932 -static
35933 -struct sysfs_ops wss_sysfs_ops = {
35934 +static const struct sysfs_ops wss_sysfs_ops = {
35935 .show = wlp_wss_attr_show,
35936 .store = wlp_wss_attr_store,
35937 };
35938 diff -urNp linux-2.6.32.43/drivers/video/atmel_lcdfb.c linux-2.6.32.43/drivers/video/atmel_lcdfb.c
35939 --- linux-2.6.32.43/drivers/video/atmel_lcdfb.c 2011-03-27 14:31:47.000000000 -0400
35940 +++ linux-2.6.32.43/drivers/video/atmel_lcdfb.c 2011-04-17 15:56:46.000000000 -0400
35941 @@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struc
35942 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
35943 }
35944
35945 -static struct backlight_ops atmel_lcdc_bl_ops = {
35946 +static const struct backlight_ops atmel_lcdc_bl_ops = {
35947 .update_status = atmel_bl_update_status,
35948 .get_brightness = atmel_bl_get_brightness,
35949 };
35950 diff -urNp linux-2.6.32.43/drivers/video/aty/aty128fb.c linux-2.6.32.43/drivers/video/aty/aty128fb.c
35951 --- linux-2.6.32.43/drivers/video/aty/aty128fb.c 2011-03-27 14:31:47.000000000 -0400
35952 +++ linux-2.6.32.43/drivers/video/aty/aty128fb.c 2011-04-17 15:56:46.000000000 -0400
35953 @@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(stru
35954 return bd->props.brightness;
35955 }
35956
35957 -static struct backlight_ops aty128_bl_data = {
35958 +static const struct backlight_ops aty128_bl_data = {
35959 .get_brightness = aty128_bl_get_brightness,
35960 .update_status = aty128_bl_update_status,
35961 };
35962 diff -urNp linux-2.6.32.43/drivers/video/aty/atyfb_base.c linux-2.6.32.43/drivers/video/aty/atyfb_base.c
35963 --- linux-2.6.32.43/drivers/video/aty/atyfb_base.c 2011-03-27 14:31:47.000000000 -0400
35964 +++ linux-2.6.32.43/drivers/video/aty/atyfb_base.c 2011-04-17 15:56:46.000000000 -0400
35965 @@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct
35966 return bd->props.brightness;
35967 }
35968
35969 -static struct backlight_ops aty_bl_data = {
35970 +static const struct backlight_ops aty_bl_data = {
35971 .get_brightness = aty_bl_get_brightness,
35972 .update_status = aty_bl_update_status,
35973 };
35974 diff -urNp linux-2.6.32.43/drivers/video/aty/radeon_backlight.c linux-2.6.32.43/drivers/video/aty/radeon_backlight.c
35975 --- linux-2.6.32.43/drivers/video/aty/radeon_backlight.c 2011-03-27 14:31:47.000000000 -0400
35976 +++ linux-2.6.32.43/drivers/video/aty/radeon_backlight.c 2011-04-17 15:56:46.000000000 -0400
35977 @@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(stru
35978 return bd->props.brightness;
35979 }
35980
35981 -static struct backlight_ops radeon_bl_data = {
35982 +static const struct backlight_ops radeon_bl_data = {
35983 .get_brightness = radeon_bl_get_brightness,
35984 .update_status = radeon_bl_update_status,
35985 };
35986 diff -urNp linux-2.6.32.43/drivers/video/backlight/adp5520_bl.c linux-2.6.32.43/drivers/video/backlight/adp5520_bl.c
35987 --- linux-2.6.32.43/drivers/video/backlight/adp5520_bl.c 2011-03-27 14:31:47.000000000 -0400
35988 +++ linux-2.6.32.43/drivers/video/backlight/adp5520_bl.c 2011-04-17 15:56:46.000000000 -0400
35989 @@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(str
35990 return error ? data->current_brightness : reg_val;
35991 }
35992
35993 -static struct backlight_ops adp5520_bl_ops = {
35994 +static const struct backlight_ops adp5520_bl_ops = {
35995 .update_status = adp5520_bl_update_status,
35996 .get_brightness = adp5520_bl_get_brightness,
35997 };
35998 diff -urNp linux-2.6.32.43/drivers/video/backlight/adx_bl.c linux-2.6.32.43/drivers/video/backlight/adx_bl.c
35999 --- linux-2.6.32.43/drivers/video/backlight/adx_bl.c 2011-03-27 14:31:47.000000000 -0400
36000 +++ linux-2.6.32.43/drivers/video/backlight/adx_bl.c 2011-04-17 15:56:46.000000000 -0400
36001 @@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct
36002 return 1;
36003 }
36004
36005 -static struct backlight_ops adx_backlight_ops = {
36006 +static const struct backlight_ops adx_backlight_ops = {
36007 .options = 0,
36008 .update_status = adx_backlight_update_status,
36009 .get_brightness = adx_backlight_get_brightness,
36010 diff -urNp linux-2.6.32.43/drivers/video/backlight/atmel-pwm-bl.c linux-2.6.32.43/drivers/video/backlight/atmel-pwm-bl.c
36011 --- linux-2.6.32.43/drivers/video/backlight/atmel-pwm-bl.c 2011-03-27 14:31:47.000000000 -0400
36012 +++ linux-2.6.32.43/drivers/video/backlight/atmel-pwm-bl.c 2011-04-17 15:56:46.000000000 -0400
36013 @@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct
36014 return pwm_channel_enable(&pwmbl->pwmc);
36015 }
36016
36017 -static struct backlight_ops atmel_pwm_bl_ops = {
36018 +static const struct backlight_ops atmel_pwm_bl_ops = {
36019 .get_brightness = atmel_pwm_bl_get_intensity,
36020 .update_status = atmel_pwm_bl_set_intensity,
36021 };
36022 diff -urNp linux-2.6.32.43/drivers/video/backlight/backlight.c linux-2.6.32.43/drivers/video/backlight/backlight.c
36023 --- linux-2.6.32.43/drivers/video/backlight/backlight.c 2011-03-27 14:31:47.000000000 -0400
36024 +++ linux-2.6.32.43/drivers/video/backlight/backlight.c 2011-04-17 15:56:46.000000000 -0400
36025 @@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
36026 * ERR_PTR() or a pointer to the newly allocated device.
36027 */
36028 struct backlight_device *backlight_device_register(const char *name,
36029 - struct device *parent, void *devdata, struct backlight_ops *ops)
36030 + struct device *parent, void *devdata, const struct backlight_ops *ops)
36031 {
36032 struct backlight_device *new_bd;
36033 int rc;
36034 diff -urNp linux-2.6.32.43/drivers/video/backlight/corgi_lcd.c linux-2.6.32.43/drivers/video/backlight/corgi_lcd.c
36035 --- linux-2.6.32.43/drivers/video/backlight/corgi_lcd.c 2011-03-27 14:31:47.000000000 -0400
36036 +++ linux-2.6.32.43/drivers/video/backlight/corgi_lcd.c 2011-04-17 15:56:46.000000000 -0400
36037 @@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit
36038 }
36039 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
36040
36041 -static struct backlight_ops corgi_bl_ops = {
36042 +static const struct backlight_ops corgi_bl_ops = {
36043 .get_brightness = corgi_bl_get_intensity,
36044 .update_status = corgi_bl_update_status,
36045 };
36046 diff -urNp linux-2.6.32.43/drivers/video/backlight/cr_bllcd.c linux-2.6.32.43/drivers/video/backlight/cr_bllcd.c
36047 --- linux-2.6.32.43/drivers/video/backlight/cr_bllcd.c 2011-03-27 14:31:47.000000000 -0400
36048 +++ linux-2.6.32.43/drivers/video/backlight/cr_bllcd.c 2011-04-17 15:56:46.000000000 -0400
36049 @@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(st
36050 return intensity;
36051 }
36052
36053 -static struct backlight_ops cr_backlight_ops = {
36054 +static const struct backlight_ops cr_backlight_ops = {
36055 .get_brightness = cr_backlight_get_intensity,
36056 .update_status = cr_backlight_set_intensity,
36057 };
36058 diff -urNp linux-2.6.32.43/drivers/video/backlight/da903x_bl.c linux-2.6.32.43/drivers/video/backlight/da903x_bl.c
36059 --- linux-2.6.32.43/drivers/video/backlight/da903x_bl.c 2011-03-27 14:31:47.000000000 -0400
36060 +++ linux-2.6.32.43/drivers/video/backlight/da903x_bl.c 2011-04-17 15:56:46.000000000 -0400
36061 @@ -94,7 +94,7 @@ static int da903x_backlight_get_brightne
36062 return data->current_brightness;
36063 }
36064
36065 -static struct backlight_ops da903x_backlight_ops = {
36066 +static const struct backlight_ops da903x_backlight_ops = {
36067 .update_status = da903x_backlight_update_status,
36068 .get_brightness = da903x_backlight_get_brightness,
36069 };
36070 diff -urNp linux-2.6.32.43/drivers/video/backlight/generic_bl.c linux-2.6.32.43/drivers/video/backlight/generic_bl.c
36071 --- linux-2.6.32.43/drivers/video/backlight/generic_bl.c 2011-03-27 14:31:47.000000000 -0400
36072 +++ linux-2.6.32.43/drivers/video/backlight/generic_bl.c 2011-04-17 15:56:46.000000000 -0400
36073 @@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
36074 }
36075 EXPORT_SYMBOL(corgibl_limit_intensity);
36076
36077 -static struct backlight_ops genericbl_ops = {
36078 +static const struct backlight_ops genericbl_ops = {
36079 .options = BL_CORE_SUSPENDRESUME,
36080 .get_brightness = genericbl_get_intensity,
36081 .update_status = genericbl_send_intensity,
36082 diff -urNp linux-2.6.32.43/drivers/video/backlight/hp680_bl.c linux-2.6.32.43/drivers/video/backlight/hp680_bl.c
36083 --- linux-2.6.32.43/drivers/video/backlight/hp680_bl.c 2011-03-27 14:31:47.000000000 -0400
36084 +++ linux-2.6.32.43/drivers/video/backlight/hp680_bl.c 2011-04-17 15:56:46.000000000 -0400
36085 @@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct
36086 return current_intensity;
36087 }
36088
36089 -static struct backlight_ops hp680bl_ops = {
36090 +static const struct backlight_ops hp680bl_ops = {
36091 .get_brightness = hp680bl_get_intensity,
36092 .update_status = hp680bl_set_intensity,
36093 };
36094 diff -urNp linux-2.6.32.43/drivers/video/backlight/jornada720_bl.c linux-2.6.32.43/drivers/video/backlight/jornada720_bl.c
36095 --- linux-2.6.32.43/drivers/video/backlight/jornada720_bl.c 2011-03-27 14:31:47.000000000 -0400
36096 +++ linux-2.6.32.43/drivers/video/backlight/jornada720_bl.c 2011-04-17 15:56:46.000000000 -0400
36097 @@ -93,7 +93,7 @@ out:
36098 return ret;
36099 }
36100
36101 -static struct backlight_ops jornada_bl_ops = {
36102 +static const struct backlight_ops jornada_bl_ops = {
36103 .get_brightness = jornada_bl_get_brightness,
36104 .update_status = jornada_bl_update_status,
36105 .options = BL_CORE_SUSPENDRESUME,
36106 diff -urNp linux-2.6.32.43/drivers/video/backlight/kb3886_bl.c linux-2.6.32.43/drivers/video/backlight/kb3886_bl.c
36107 --- linux-2.6.32.43/drivers/video/backlight/kb3886_bl.c 2011-03-27 14:31:47.000000000 -0400
36108 +++ linux-2.6.32.43/drivers/video/backlight/kb3886_bl.c 2011-04-17 15:56:46.000000000 -0400
36109 @@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct
36110 return kb3886bl_intensity;
36111 }
36112
36113 -static struct backlight_ops kb3886bl_ops = {
36114 +static const struct backlight_ops kb3886bl_ops = {
36115 .get_brightness = kb3886bl_get_intensity,
36116 .update_status = kb3886bl_send_intensity,
36117 };
36118 diff -urNp linux-2.6.32.43/drivers/video/backlight/locomolcd.c linux-2.6.32.43/drivers/video/backlight/locomolcd.c
36119 --- linux-2.6.32.43/drivers/video/backlight/locomolcd.c 2011-03-27 14:31:47.000000000 -0400
36120 +++ linux-2.6.32.43/drivers/video/backlight/locomolcd.c 2011-04-17 15:56:46.000000000 -0400
36121 @@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struc
36122 return current_intensity;
36123 }
36124
36125 -static struct backlight_ops locomobl_data = {
36126 +static const struct backlight_ops locomobl_data = {
36127 .get_brightness = locomolcd_get_intensity,
36128 .update_status = locomolcd_set_intensity,
36129 };
36130 diff -urNp linux-2.6.32.43/drivers/video/backlight/mbp_nvidia_bl.c linux-2.6.32.43/drivers/video/backlight/mbp_nvidia_bl.c
36131 --- linux-2.6.32.43/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:01.000000000 -0400
36132 +++ linux-2.6.32.43/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:33.000000000 -0400
36133 @@ -33,7 +33,7 @@ struct dmi_match_data {
36134 unsigned long iostart;
36135 unsigned long iolen;
36136 /* Backlight operations structure. */
36137 - struct backlight_ops backlight_ops;
36138 + const struct backlight_ops backlight_ops;
36139 };
36140
36141 /* Module parameters. */
36142 diff -urNp linux-2.6.32.43/drivers/video/backlight/omap1_bl.c linux-2.6.32.43/drivers/video/backlight/omap1_bl.c
36143 --- linux-2.6.32.43/drivers/video/backlight/omap1_bl.c 2011-03-27 14:31:47.000000000 -0400
36144 +++ linux-2.6.32.43/drivers/video/backlight/omap1_bl.c 2011-04-17 15:56:46.000000000 -0400
36145 @@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct b
36146 return bl->current_intensity;
36147 }
36148
36149 -static struct backlight_ops omapbl_ops = {
36150 +static const struct backlight_ops omapbl_ops = {
36151 .get_brightness = omapbl_get_intensity,
36152 .update_status = omapbl_update_status,
36153 };
36154 diff -urNp linux-2.6.32.43/drivers/video/backlight/progear_bl.c linux-2.6.32.43/drivers/video/backlight/progear_bl.c
36155 --- linux-2.6.32.43/drivers/video/backlight/progear_bl.c 2011-03-27 14:31:47.000000000 -0400
36156 +++ linux-2.6.32.43/drivers/video/backlight/progear_bl.c 2011-04-17 15:56:46.000000000 -0400
36157 @@ -54,7 +54,7 @@ static int progearbl_get_intensity(struc
36158 return intensity - HW_LEVEL_MIN;
36159 }
36160
36161 -static struct backlight_ops progearbl_ops = {
36162 +static const struct backlight_ops progearbl_ops = {
36163 .get_brightness = progearbl_get_intensity,
36164 .update_status = progearbl_set_intensity,
36165 };
36166 diff -urNp linux-2.6.32.43/drivers/video/backlight/pwm_bl.c linux-2.6.32.43/drivers/video/backlight/pwm_bl.c
36167 --- linux-2.6.32.43/drivers/video/backlight/pwm_bl.c 2011-03-27 14:31:47.000000000 -0400
36168 +++ linux-2.6.32.43/drivers/video/backlight/pwm_bl.c 2011-04-17 15:56:46.000000000 -0400
36169 @@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(
36170 return bl->props.brightness;
36171 }
36172
36173 -static struct backlight_ops pwm_backlight_ops = {
36174 +static const struct backlight_ops pwm_backlight_ops = {
36175 .update_status = pwm_backlight_update_status,
36176 .get_brightness = pwm_backlight_get_brightness,
36177 };
36178 diff -urNp linux-2.6.32.43/drivers/video/backlight/tosa_bl.c linux-2.6.32.43/drivers/video/backlight/tosa_bl.c
36179 --- linux-2.6.32.43/drivers/video/backlight/tosa_bl.c 2011-03-27 14:31:47.000000000 -0400
36180 +++ linux-2.6.32.43/drivers/video/backlight/tosa_bl.c 2011-04-17 15:56:46.000000000 -0400
36181 @@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct
36182 return props->brightness;
36183 }
36184
36185 -static struct backlight_ops bl_ops = {
36186 +static const struct backlight_ops bl_ops = {
36187 .get_brightness = tosa_bl_get_brightness,
36188 .update_status = tosa_bl_update_status,
36189 };
36190 diff -urNp linux-2.6.32.43/drivers/video/backlight/wm831x_bl.c linux-2.6.32.43/drivers/video/backlight/wm831x_bl.c
36191 --- linux-2.6.32.43/drivers/video/backlight/wm831x_bl.c 2011-03-27 14:31:47.000000000 -0400
36192 +++ linux-2.6.32.43/drivers/video/backlight/wm831x_bl.c 2011-04-17 15:56:46.000000000 -0400
36193 @@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightne
36194 return data->current_brightness;
36195 }
36196
36197 -static struct backlight_ops wm831x_backlight_ops = {
36198 +static const struct backlight_ops wm831x_backlight_ops = {
36199 .options = BL_CORE_SUSPENDRESUME,
36200 .update_status = wm831x_backlight_update_status,
36201 .get_brightness = wm831x_backlight_get_brightness,
36202 diff -urNp linux-2.6.32.43/drivers/video/bf54x-lq043fb.c linux-2.6.32.43/drivers/video/bf54x-lq043fb.c
36203 --- linux-2.6.32.43/drivers/video/bf54x-lq043fb.c 2011-03-27 14:31:47.000000000 -0400
36204 +++ linux-2.6.32.43/drivers/video/bf54x-lq043fb.c 2011-04-17 15:56:46.000000000 -0400
36205 @@ -463,7 +463,7 @@ static int bl_get_brightness(struct back
36206 return 0;
36207 }
36208
36209 -static struct backlight_ops bfin_lq043fb_bl_ops = {
36210 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
36211 .get_brightness = bl_get_brightness,
36212 };
36213
36214 diff -urNp linux-2.6.32.43/drivers/video/bfin-t350mcqb-fb.c linux-2.6.32.43/drivers/video/bfin-t350mcqb-fb.c
36215 --- linux-2.6.32.43/drivers/video/bfin-t350mcqb-fb.c 2011-03-27 14:31:47.000000000 -0400
36216 +++ linux-2.6.32.43/drivers/video/bfin-t350mcqb-fb.c 2011-04-17 15:56:46.000000000 -0400
36217 @@ -381,7 +381,7 @@ static int bl_get_brightness(struct back
36218 return 0;
36219 }
36220
36221 -static struct backlight_ops bfin_lq043fb_bl_ops = {
36222 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
36223 .get_brightness = bl_get_brightness,
36224 };
36225
36226 diff -urNp linux-2.6.32.43/drivers/video/fbcmap.c linux-2.6.32.43/drivers/video/fbcmap.c
36227 --- linux-2.6.32.43/drivers/video/fbcmap.c 2011-03-27 14:31:47.000000000 -0400
36228 +++ linux-2.6.32.43/drivers/video/fbcmap.c 2011-04-17 15:56:46.000000000 -0400
36229 @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
36230 rc = -ENODEV;
36231 goto out;
36232 }
36233 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
36234 - !info->fbops->fb_setcmap)) {
36235 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
36236 rc = -EINVAL;
36237 goto out1;
36238 }
36239 diff -urNp linux-2.6.32.43/drivers/video/fbmem.c linux-2.6.32.43/drivers/video/fbmem.c
36240 --- linux-2.6.32.43/drivers/video/fbmem.c 2011-03-27 14:31:47.000000000 -0400
36241 +++ linux-2.6.32.43/drivers/video/fbmem.c 2011-05-16 21:46:57.000000000 -0400
36242 @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
36243 image->dx += image->width + 8;
36244 }
36245 } else if (rotate == FB_ROTATE_UD) {
36246 - for (x = 0; x < num && image->dx >= 0; x++) {
36247 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
36248 info->fbops->fb_imageblit(info, image);
36249 image->dx -= image->width + 8;
36250 }
36251 @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
36252 image->dy += image->height + 8;
36253 }
36254 } else if (rotate == FB_ROTATE_CCW) {
36255 - for (x = 0; x < num && image->dy >= 0; x++) {
36256 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
36257 info->fbops->fb_imageblit(info, image);
36258 image->dy -= image->height + 8;
36259 }
36260 @@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct
36261 int flags = info->flags;
36262 int ret = 0;
36263
36264 + pax_track_stack();
36265 +
36266 if (var->activate & FB_ACTIVATE_INV_MODE) {
36267 struct fb_videomode mode1, mode2;
36268
36269 @@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *
36270 void __user *argp = (void __user *)arg;
36271 long ret = 0;
36272
36273 + pax_track_stack();
36274 +
36275 switch (cmd) {
36276 case FBIOGET_VSCREENINFO:
36277 if (!lock_fb_info(info))
36278 @@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *
36279 return -EFAULT;
36280 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
36281 return -EINVAL;
36282 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
36283 + if (con2fb.framebuffer >= FB_MAX)
36284 return -EINVAL;
36285 if (!registered_fb[con2fb.framebuffer])
36286 request_module("fb%d", con2fb.framebuffer);
36287 diff -urNp linux-2.6.32.43/drivers/video/i810/i810_accel.c linux-2.6.32.43/drivers/video/i810/i810_accel.c
36288 --- linux-2.6.32.43/drivers/video/i810/i810_accel.c 2011-03-27 14:31:47.000000000 -0400
36289 +++ linux-2.6.32.43/drivers/video/i810/i810_accel.c 2011-04-17 15:56:46.000000000 -0400
36290 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
36291 }
36292 }
36293 printk("ringbuffer lockup!!!\n");
36294 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
36295 i810_report_error(mmio);
36296 par->dev_flags |= LOCKUP;
36297 info->pixmap.scan_align = 1;
36298 diff -urNp linux-2.6.32.43/drivers/video/nvidia/nv_backlight.c linux-2.6.32.43/drivers/video/nvidia/nv_backlight.c
36299 --- linux-2.6.32.43/drivers/video/nvidia/nv_backlight.c 2011-03-27 14:31:47.000000000 -0400
36300 +++ linux-2.6.32.43/drivers/video/nvidia/nv_backlight.c 2011-04-17 15:56:46.000000000 -0400
36301 @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
36302 return bd->props.brightness;
36303 }
36304
36305 -static struct backlight_ops nvidia_bl_ops = {
36306 +static const struct backlight_ops nvidia_bl_ops = {
36307 .get_brightness = nvidia_bl_get_brightness,
36308 .update_status = nvidia_bl_update_status,
36309 };
36310 diff -urNp linux-2.6.32.43/drivers/video/riva/fbdev.c linux-2.6.32.43/drivers/video/riva/fbdev.c
36311 --- linux-2.6.32.43/drivers/video/riva/fbdev.c 2011-03-27 14:31:47.000000000 -0400
36312 +++ linux-2.6.32.43/drivers/video/riva/fbdev.c 2011-04-17 15:56:46.000000000 -0400
36313 @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
36314 return bd->props.brightness;
36315 }
36316
36317 -static struct backlight_ops riva_bl_ops = {
36318 +static const struct backlight_ops riva_bl_ops = {
36319 .get_brightness = riva_bl_get_brightness,
36320 .update_status = riva_bl_update_status,
36321 };
36322 diff -urNp linux-2.6.32.43/drivers/video/uvesafb.c linux-2.6.32.43/drivers/video/uvesafb.c
36323 --- linux-2.6.32.43/drivers/video/uvesafb.c 2011-03-27 14:31:47.000000000 -0400
36324 +++ linux-2.6.32.43/drivers/video/uvesafb.c 2011-04-17 15:56:46.000000000 -0400
36325 @@ -18,6 +18,7 @@
36326 #include <linux/fb.h>
36327 #include <linux/io.h>
36328 #include <linux/mutex.h>
36329 +#include <linux/moduleloader.h>
36330 #include <video/edid.h>
36331 #include <video/uvesafb.h>
36332 #ifdef CONFIG_X86
36333 @@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
36334 NULL,
36335 };
36336
36337 - return call_usermodehelper(v86d_path, argv, envp, 1);
36338 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
36339 }
36340
36341 /*
36342 @@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(
36343 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
36344 par->pmi_setpal = par->ypan = 0;
36345 } else {
36346 +
36347 +#ifdef CONFIG_PAX_KERNEXEC
36348 +#ifdef CONFIG_MODULES
36349 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
36350 +#endif
36351 + if (!par->pmi_code) {
36352 + par->pmi_setpal = par->ypan = 0;
36353 + return 0;
36354 + }
36355 +#endif
36356 +
36357 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
36358 + task->t.regs.edi);
36359 +
36360 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36361 + pax_open_kernel();
36362 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
36363 + pax_close_kernel();
36364 +
36365 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
36366 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
36367 +#else
36368 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
36369 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
36370 +#endif
36371 +
36372 printk(KERN_INFO "uvesafb: protected mode interface info at "
36373 "%04x:%04x\n",
36374 (u16)task->t.regs.es, (u16)task->t.regs.edi);
36375 @@ -1799,6 +1822,11 @@ out:
36376 if (par->vbe_modes)
36377 kfree(par->vbe_modes);
36378
36379 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36380 + if (par->pmi_code)
36381 + module_free_exec(NULL, par->pmi_code);
36382 +#endif
36383 +
36384 framebuffer_release(info);
36385 return err;
36386 }
36387 @@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platfor
36388 kfree(par->vbe_state_orig);
36389 if (par->vbe_state_saved)
36390 kfree(par->vbe_state_saved);
36391 +
36392 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36393 + if (par->pmi_code)
36394 + module_free_exec(NULL, par->pmi_code);
36395 +#endif
36396 +
36397 }
36398
36399 framebuffer_release(info);
36400 diff -urNp linux-2.6.32.43/drivers/video/vesafb.c linux-2.6.32.43/drivers/video/vesafb.c
36401 --- linux-2.6.32.43/drivers/video/vesafb.c 2011-03-27 14:31:47.000000000 -0400
36402 +++ linux-2.6.32.43/drivers/video/vesafb.c 2011-04-17 15:56:46.000000000 -0400
36403 @@ -9,6 +9,7 @@
36404 */
36405
36406 #include <linux/module.h>
36407 +#include <linux/moduleloader.h>
36408 #include <linux/kernel.h>
36409 #include <linux/errno.h>
36410 #include <linux/string.h>
36411 @@ -53,8 +54,8 @@ static int vram_remap __initdata; /*
36412 static int vram_total __initdata; /* Set total amount of memory */
36413 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
36414 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
36415 -static void (*pmi_start)(void) __read_mostly;
36416 -static void (*pmi_pal) (void) __read_mostly;
36417 +static void (*pmi_start)(void) __read_only;
36418 +static void (*pmi_pal) (void) __read_only;
36419 static int depth __read_mostly;
36420 static int vga_compat __read_mostly;
36421 /* --------------------------------------------------------------------- */
36422 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
36423 unsigned int size_vmode;
36424 unsigned int size_remap;
36425 unsigned int size_total;
36426 + void *pmi_code = NULL;
36427
36428 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
36429 return -ENODEV;
36430 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
36431 size_remap = size_total;
36432 vesafb_fix.smem_len = size_remap;
36433
36434 -#ifndef __i386__
36435 - screen_info.vesapm_seg = 0;
36436 -#endif
36437 -
36438 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
36439 printk(KERN_WARNING
36440 "vesafb: cannot reserve video memory at 0x%lx\n",
36441 @@ -315,9 +313,21 @@ static int __init vesafb_probe(struct pl
36442 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
36443 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
36444
36445 +#ifdef __i386__
36446 +
36447 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36448 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
36449 + if (!pmi_code)
36450 +#elif !defined(CONFIG_PAX_KERNEXEC)
36451 + if (0)
36452 +#endif
36453 +
36454 +#endif
36455 + screen_info.vesapm_seg = 0;
36456 +
36457 if (screen_info.vesapm_seg) {
36458 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
36459 - screen_info.vesapm_seg,screen_info.vesapm_off);
36460 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
36461 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
36462 }
36463
36464 if (screen_info.vesapm_seg < 0xc000)
36465 @@ -325,9 +335,25 @@ static int __init vesafb_probe(struct pl
36466
36467 if (ypan || pmi_setpal) {
36468 unsigned short *pmi_base;
36469 - pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
36470 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
36471 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
36472 +
36473 + pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
36474 +
36475 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36476 + pax_open_kernel();
36477 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
36478 +#else
36479 + pmi_code = pmi_base;
36480 +#endif
36481 +
36482 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
36483 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
36484 +
36485 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36486 + pmi_start = ktva_ktla(pmi_start);
36487 + pmi_pal = ktva_ktla(pmi_pal);
36488 + pax_close_kernel();
36489 +#endif
36490 +
36491 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
36492 if (pmi_base[3]) {
36493 printk(KERN_INFO "vesafb: pmi: ports = ");
36494 @@ -469,6 +495,11 @@ static int __init vesafb_probe(struct pl
36495 info->node, info->fix.id);
36496 return 0;
36497 err:
36498 +
36499 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36500 + module_free_exec(NULL, pmi_code);
36501 +#endif
36502 +
36503 if (info->screen_base)
36504 iounmap(info->screen_base);
36505 framebuffer_release(info);
36506 diff -urNp linux-2.6.32.43/drivers/xen/sys-hypervisor.c linux-2.6.32.43/drivers/xen/sys-hypervisor.c
36507 --- linux-2.6.32.43/drivers/xen/sys-hypervisor.c 2011-03-27 14:31:47.000000000 -0400
36508 +++ linux-2.6.32.43/drivers/xen/sys-hypervisor.c 2011-04-17 15:56:46.000000000 -0400
36509 @@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct ko
36510 return 0;
36511 }
36512
36513 -static struct sysfs_ops hyp_sysfs_ops = {
36514 +static const struct sysfs_ops hyp_sysfs_ops = {
36515 .show = hyp_sysfs_show,
36516 .store = hyp_sysfs_store,
36517 };
36518 diff -urNp linux-2.6.32.43/fs/9p/vfs_inode.c linux-2.6.32.43/fs/9p/vfs_inode.c
36519 --- linux-2.6.32.43/fs/9p/vfs_inode.c 2011-03-27 14:31:47.000000000 -0400
36520 +++ linux-2.6.32.43/fs/9p/vfs_inode.c 2011-04-17 15:56:46.000000000 -0400
36521 @@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct
36522 static void
36523 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
36524 {
36525 - char *s = nd_get_link(nd);
36526 + const char *s = nd_get_link(nd);
36527
36528 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
36529 IS_ERR(s) ? "<error>" : s);
36530 diff -urNp linux-2.6.32.43/fs/aio.c linux-2.6.32.43/fs/aio.c
36531 --- linux-2.6.32.43/fs/aio.c 2011-03-27 14:31:47.000000000 -0400
36532 +++ linux-2.6.32.43/fs/aio.c 2011-06-04 20:40:21.000000000 -0400
36533 @@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx
36534 size += sizeof(struct io_event) * nr_events;
36535 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
36536
36537 - if (nr_pages < 0)
36538 + if (nr_pages <= 0)
36539 return -EINVAL;
36540
36541 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
36542 @@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ct
36543 struct aio_timeout to;
36544 int retry = 0;
36545
36546 + pax_track_stack();
36547 +
36548 /* needed to zero any padding within an entry (there shouldn't be
36549 * any, but C is fun!
36550 */
36551 @@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *i
36552 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
36553 {
36554 ssize_t ret;
36555 + struct iovec iovstack;
36556
36557 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
36558 kiocb->ki_nbytes, 1,
36559 - &kiocb->ki_inline_vec, &kiocb->ki_iovec);
36560 + &iovstack, &kiocb->ki_iovec);
36561 if (ret < 0)
36562 goto out;
36563
36564 + if (kiocb->ki_iovec == &iovstack) {
36565 + kiocb->ki_inline_vec = iovstack;
36566 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
36567 + }
36568 kiocb->ki_nr_segs = kiocb->ki_nbytes;
36569 kiocb->ki_cur_seg = 0;
36570 /* ki_nbytes/left now reflect bytes instead of segs */
36571 diff -urNp linux-2.6.32.43/fs/attr.c linux-2.6.32.43/fs/attr.c
36572 --- linux-2.6.32.43/fs/attr.c 2011-03-27 14:31:47.000000000 -0400
36573 +++ linux-2.6.32.43/fs/attr.c 2011-04-17 15:56:46.000000000 -0400
36574 @@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode
36575 unsigned long limit;
36576
36577 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
36578 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
36579 if (limit != RLIM_INFINITY && offset > limit)
36580 goto out_sig;
36581 if (offset > inode->i_sb->s_maxbytes)
36582 diff -urNp linux-2.6.32.43/fs/autofs/root.c linux-2.6.32.43/fs/autofs/root.c
36583 --- linux-2.6.32.43/fs/autofs/root.c 2011-03-27 14:31:47.000000000 -0400
36584 +++ linux-2.6.32.43/fs/autofs/root.c 2011-04-17 15:56:46.000000000 -0400
36585 @@ -299,7 +299,8 @@ static int autofs_root_symlink(struct in
36586 set_bit(n,sbi->symlink_bitmap);
36587 sl = &sbi->symlink[n];
36588 sl->len = strlen(symname);
36589 - sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
36590 + slsize = sl->len+1;
36591 + sl->data = kmalloc(slsize, GFP_KERNEL);
36592 if (!sl->data) {
36593 clear_bit(n,sbi->symlink_bitmap);
36594 unlock_kernel();
36595 diff -urNp linux-2.6.32.43/fs/autofs4/symlink.c linux-2.6.32.43/fs/autofs4/symlink.c
36596 --- linux-2.6.32.43/fs/autofs4/symlink.c 2011-03-27 14:31:47.000000000 -0400
36597 +++ linux-2.6.32.43/fs/autofs4/symlink.c 2011-04-17 15:56:46.000000000 -0400
36598 @@ -15,7 +15,7 @@
36599 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
36600 {
36601 struct autofs_info *ino = autofs4_dentry_ino(dentry);
36602 - nd_set_link(nd, (char *)ino->u.symlink);
36603 + nd_set_link(nd, ino->u.symlink);
36604 return NULL;
36605 }
36606
36607 diff -urNp linux-2.6.32.43/fs/befs/linuxvfs.c linux-2.6.32.43/fs/befs/linuxvfs.c
36608 --- linux-2.6.32.43/fs/befs/linuxvfs.c 2011-03-27 14:31:47.000000000 -0400
36609 +++ linux-2.6.32.43/fs/befs/linuxvfs.c 2011-04-17 15:56:46.000000000 -0400
36610 @@ -493,7 +493,7 @@ static void befs_put_link(struct dentry
36611 {
36612 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
36613 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
36614 - char *link = nd_get_link(nd);
36615 + const char *link = nd_get_link(nd);
36616 if (!IS_ERR(link))
36617 kfree(link);
36618 }
36619 diff -urNp linux-2.6.32.43/fs/binfmt_aout.c linux-2.6.32.43/fs/binfmt_aout.c
36620 --- linux-2.6.32.43/fs/binfmt_aout.c 2011-03-27 14:31:47.000000000 -0400
36621 +++ linux-2.6.32.43/fs/binfmt_aout.c 2011-04-17 15:56:46.000000000 -0400
36622 @@ -16,6 +16,7 @@
36623 #include <linux/string.h>
36624 #include <linux/fs.h>
36625 #include <linux/file.h>
36626 +#include <linux/security.h>
36627 #include <linux/stat.h>
36628 #include <linux/fcntl.h>
36629 #include <linux/ptrace.h>
36630 @@ -102,6 +103,8 @@ static int aout_core_dump(long signr, st
36631 #endif
36632 # define START_STACK(u) (u.start_stack)
36633
36634 + memset(&dump, 0, sizeof(dump));
36635 +
36636 fs = get_fs();
36637 set_fs(KERNEL_DS);
36638 has_dumped = 1;
36639 @@ -113,10 +116,12 @@ static int aout_core_dump(long signr, st
36640
36641 /* If the size of the dump file exceeds the rlimit, then see what would happen
36642 if we wrote the stack, but not the data area. */
36643 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
36644 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
36645 dump.u_dsize = 0;
36646
36647 /* Make sure we have enough room to write the stack and data areas. */
36648 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
36649 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
36650 dump.u_ssize = 0;
36651
36652 @@ -146,9 +151,7 @@ static int aout_core_dump(long signr, st
36653 dump_size = dump.u_ssize << PAGE_SHIFT;
36654 DUMP_WRITE(dump_start,dump_size);
36655 }
36656 -/* Finally dump the task struct. Not be used by gdb, but could be useful */
36657 - set_fs(KERNEL_DS);
36658 - DUMP_WRITE(current,sizeof(*current));
36659 +/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
36660 end_coredump:
36661 set_fs(fs);
36662 return has_dumped;
36663 @@ -249,6 +252,8 @@ static int load_aout_binary(struct linux
36664 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
36665 if (rlim >= RLIM_INFINITY)
36666 rlim = ~0;
36667 +
36668 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
36669 if (ex.a_data + ex.a_bss > rlim)
36670 return -ENOMEM;
36671
36672 @@ -277,6 +282,27 @@ static int load_aout_binary(struct linux
36673 install_exec_creds(bprm);
36674 current->flags &= ~PF_FORKNOEXEC;
36675
36676 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
36677 + current->mm->pax_flags = 0UL;
36678 +#endif
36679 +
36680 +#ifdef CONFIG_PAX_PAGEEXEC
36681 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
36682 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
36683 +
36684 +#ifdef CONFIG_PAX_EMUTRAMP
36685 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
36686 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
36687 +#endif
36688 +
36689 +#ifdef CONFIG_PAX_MPROTECT
36690 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
36691 + current->mm->pax_flags |= MF_PAX_MPROTECT;
36692 +#endif
36693 +
36694 + }
36695 +#endif
36696 +
36697 if (N_MAGIC(ex) == OMAGIC) {
36698 unsigned long text_addr, map_size;
36699 loff_t pos;
36700 @@ -349,7 +375,7 @@ static int load_aout_binary(struct linux
36701
36702 down_write(&current->mm->mmap_sem);
36703 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
36704 - PROT_READ | PROT_WRITE | PROT_EXEC,
36705 + PROT_READ | PROT_WRITE,
36706 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
36707 fd_offset + ex.a_text);
36708 up_write(&current->mm->mmap_sem);
36709 diff -urNp linux-2.6.32.43/fs/binfmt_elf.c linux-2.6.32.43/fs/binfmt_elf.c
36710 --- linux-2.6.32.43/fs/binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
36711 +++ linux-2.6.32.43/fs/binfmt_elf.c 2011-05-16 21:46:57.000000000 -0400
36712 @@ -50,6 +50,10 @@ static int elf_core_dump(long signr, str
36713 #define elf_core_dump NULL
36714 #endif
36715
36716 +#ifdef CONFIG_PAX_MPROTECT
36717 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
36718 +#endif
36719 +
36720 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
36721 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
36722 #else
36723 @@ -69,6 +73,11 @@ static struct linux_binfmt elf_format =
36724 .load_binary = load_elf_binary,
36725 .load_shlib = load_elf_library,
36726 .core_dump = elf_core_dump,
36727 +
36728 +#ifdef CONFIG_PAX_MPROTECT
36729 + .handle_mprotect= elf_handle_mprotect,
36730 +#endif
36731 +
36732 .min_coredump = ELF_EXEC_PAGESIZE,
36733 .hasvdso = 1
36734 };
36735 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
36736
36737 static int set_brk(unsigned long start, unsigned long end)
36738 {
36739 + unsigned long e = end;
36740 +
36741 start = ELF_PAGEALIGN(start);
36742 end = ELF_PAGEALIGN(end);
36743 if (end > start) {
36744 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
36745 if (BAD_ADDR(addr))
36746 return addr;
36747 }
36748 - current->mm->start_brk = current->mm->brk = end;
36749 + current->mm->start_brk = current->mm->brk = e;
36750 return 0;
36751 }
36752
36753 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
36754 elf_addr_t __user *u_rand_bytes;
36755 const char *k_platform = ELF_PLATFORM;
36756 const char *k_base_platform = ELF_BASE_PLATFORM;
36757 - unsigned char k_rand_bytes[16];
36758 + u32 k_rand_bytes[4];
36759 int items;
36760 elf_addr_t *elf_info;
36761 int ei_index = 0;
36762 const struct cred *cred = current_cred();
36763 struct vm_area_struct *vma;
36764 + unsigned long saved_auxv[AT_VECTOR_SIZE];
36765 +
36766 + pax_track_stack();
36767
36768 /*
36769 * In some cases (e.g. Hyper-Threading), we want to avoid L1
36770 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
36771 * Generate 16 random bytes for userspace PRNG seeding.
36772 */
36773 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
36774 - u_rand_bytes = (elf_addr_t __user *)
36775 - STACK_ALLOC(p, sizeof(k_rand_bytes));
36776 + srandom32(k_rand_bytes[0] ^ random32());
36777 + srandom32(k_rand_bytes[1] ^ random32());
36778 + srandom32(k_rand_bytes[2] ^ random32());
36779 + srandom32(k_rand_bytes[3] ^ random32());
36780 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
36781 + u_rand_bytes = (elf_addr_t __user *) p;
36782 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
36783 return -EFAULT;
36784
36785 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
36786 return -EFAULT;
36787 current->mm->env_end = p;
36788
36789 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
36790 +
36791 /* Put the elf_info on the stack in the right place. */
36792 sp = (elf_addr_t __user *)envp + 1;
36793 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
36794 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
36795 return -EFAULT;
36796 return 0;
36797 }
36798 @@ -385,10 +405,10 @@ static unsigned long load_elf_interp(str
36799 {
36800 struct elf_phdr *elf_phdata;
36801 struct elf_phdr *eppnt;
36802 - unsigned long load_addr = 0;
36803 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
36804 int load_addr_set = 0;
36805 unsigned long last_bss = 0, elf_bss = 0;
36806 - unsigned long error = ~0UL;
36807 + unsigned long error = -EINVAL;
36808 unsigned long total_size;
36809 int retval, i, size;
36810
36811 @@ -434,6 +454,11 @@ static unsigned long load_elf_interp(str
36812 goto out_close;
36813 }
36814
36815 +#ifdef CONFIG_PAX_SEGMEXEC
36816 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
36817 + pax_task_size = SEGMEXEC_TASK_SIZE;
36818 +#endif
36819 +
36820 eppnt = elf_phdata;
36821 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
36822 if (eppnt->p_type == PT_LOAD) {
36823 @@ -477,8 +502,8 @@ static unsigned long load_elf_interp(str
36824 k = load_addr + eppnt->p_vaddr;
36825 if (BAD_ADDR(k) ||
36826 eppnt->p_filesz > eppnt->p_memsz ||
36827 - eppnt->p_memsz > TASK_SIZE ||
36828 - TASK_SIZE - eppnt->p_memsz < k) {
36829 + eppnt->p_memsz > pax_task_size ||
36830 + pax_task_size - eppnt->p_memsz < k) {
36831 error = -ENOMEM;
36832 goto out_close;
36833 }
36834 @@ -532,6 +557,194 @@ out:
36835 return error;
36836 }
36837
36838 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
36839 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
36840 +{
36841 + unsigned long pax_flags = 0UL;
36842 +
36843 +#ifdef CONFIG_PAX_PAGEEXEC
36844 + if (elf_phdata->p_flags & PF_PAGEEXEC)
36845 + pax_flags |= MF_PAX_PAGEEXEC;
36846 +#endif
36847 +
36848 +#ifdef CONFIG_PAX_SEGMEXEC
36849 + if (elf_phdata->p_flags & PF_SEGMEXEC)
36850 + pax_flags |= MF_PAX_SEGMEXEC;
36851 +#endif
36852 +
36853 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36854 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36855 + if (nx_enabled)
36856 + pax_flags &= ~MF_PAX_SEGMEXEC;
36857 + else
36858 + pax_flags &= ~MF_PAX_PAGEEXEC;
36859 + }
36860 +#endif
36861 +
36862 +#ifdef CONFIG_PAX_EMUTRAMP
36863 + if (elf_phdata->p_flags & PF_EMUTRAMP)
36864 + pax_flags |= MF_PAX_EMUTRAMP;
36865 +#endif
36866 +
36867 +#ifdef CONFIG_PAX_MPROTECT
36868 + if (elf_phdata->p_flags & PF_MPROTECT)
36869 + pax_flags |= MF_PAX_MPROTECT;
36870 +#endif
36871 +
36872 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36873 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
36874 + pax_flags |= MF_PAX_RANDMMAP;
36875 +#endif
36876 +
36877 + return pax_flags;
36878 +}
36879 +#endif
36880 +
36881 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36882 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
36883 +{
36884 + unsigned long pax_flags = 0UL;
36885 +
36886 +#ifdef CONFIG_PAX_PAGEEXEC
36887 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
36888 + pax_flags |= MF_PAX_PAGEEXEC;
36889 +#endif
36890 +
36891 +#ifdef CONFIG_PAX_SEGMEXEC
36892 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
36893 + pax_flags |= MF_PAX_SEGMEXEC;
36894 +#endif
36895 +
36896 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36897 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36898 + if (nx_enabled)
36899 + pax_flags &= ~MF_PAX_SEGMEXEC;
36900 + else
36901 + pax_flags &= ~MF_PAX_PAGEEXEC;
36902 + }
36903 +#endif
36904 +
36905 +#ifdef CONFIG_PAX_EMUTRAMP
36906 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
36907 + pax_flags |= MF_PAX_EMUTRAMP;
36908 +#endif
36909 +
36910 +#ifdef CONFIG_PAX_MPROTECT
36911 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
36912 + pax_flags |= MF_PAX_MPROTECT;
36913 +#endif
36914 +
36915 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36916 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
36917 + pax_flags |= MF_PAX_RANDMMAP;
36918 +#endif
36919 +
36920 + return pax_flags;
36921 +}
36922 +#endif
36923 +
36924 +#ifdef CONFIG_PAX_EI_PAX
36925 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
36926 +{
36927 + unsigned long pax_flags = 0UL;
36928 +
36929 +#ifdef CONFIG_PAX_PAGEEXEC
36930 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
36931 + pax_flags |= MF_PAX_PAGEEXEC;
36932 +#endif
36933 +
36934 +#ifdef CONFIG_PAX_SEGMEXEC
36935 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
36936 + pax_flags |= MF_PAX_SEGMEXEC;
36937 +#endif
36938 +
36939 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36940 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36941 + if (nx_enabled)
36942 + pax_flags &= ~MF_PAX_SEGMEXEC;
36943 + else
36944 + pax_flags &= ~MF_PAX_PAGEEXEC;
36945 + }
36946 +#endif
36947 +
36948 +#ifdef CONFIG_PAX_EMUTRAMP
36949 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
36950 + pax_flags |= MF_PAX_EMUTRAMP;
36951 +#endif
36952 +
36953 +#ifdef CONFIG_PAX_MPROTECT
36954 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
36955 + pax_flags |= MF_PAX_MPROTECT;
36956 +#endif
36957 +
36958 +#ifdef CONFIG_PAX_ASLR
36959 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
36960 + pax_flags |= MF_PAX_RANDMMAP;
36961 +#endif
36962 +
36963 + return pax_flags;
36964 +}
36965 +#endif
36966 +
36967 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
36968 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
36969 +{
36970 + unsigned long pax_flags = 0UL;
36971 +
36972 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36973 + unsigned long i;
36974 + int found_flags = 0;
36975 +#endif
36976 +
36977 +#ifdef CONFIG_PAX_EI_PAX
36978 + pax_flags = pax_parse_ei_pax(elf_ex);
36979 +#endif
36980 +
36981 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36982 + for (i = 0UL; i < elf_ex->e_phnum; i++)
36983 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
36984 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
36985 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
36986 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
36987 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
36988 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
36989 + return -EINVAL;
36990 +
36991 +#ifdef CONFIG_PAX_SOFTMODE
36992 + if (pax_softmode)
36993 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
36994 + else
36995 +#endif
36996 +
36997 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
36998 + found_flags = 1;
36999 + break;
37000 + }
37001 +#endif
37002 +
37003 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
37004 + if (found_flags == 0) {
37005 + struct elf_phdr phdr;
37006 + memset(&phdr, 0, sizeof(phdr));
37007 + phdr.p_flags = PF_NOEMUTRAMP;
37008 +#ifdef CONFIG_PAX_SOFTMODE
37009 + if (pax_softmode)
37010 + pax_flags = pax_parse_softmode(&phdr);
37011 + else
37012 +#endif
37013 + pax_flags = pax_parse_hardmode(&phdr);
37014 + }
37015 +#endif
37016 +
37017 +
37018 + if (0 > pax_check_flags(&pax_flags))
37019 + return -EINVAL;
37020 +
37021 + current->mm->pax_flags = pax_flags;
37022 + return 0;
37023 +}
37024 +#endif
37025 +
37026 /*
37027 * These are the functions used to load ELF style executables and shared
37028 * libraries. There is no binary dependent code anywhere else.
37029 @@ -548,6 +761,11 @@ static unsigned long randomize_stack_top
37030 {
37031 unsigned int random_variable = 0;
37032
37033 +#ifdef CONFIG_PAX_RANDUSTACK
37034 + if (randomize_va_space)
37035 + return stack_top - current->mm->delta_stack;
37036 +#endif
37037 +
37038 if ((current->flags & PF_RANDOMIZE) &&
37039 !(current->personality & ADDR_NO_RANDOMIZE)) {
37040 random_variable = get_random_int() & STACK_RND_MASK;
37041 @@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_
37042 unsigned long load_addr = 0, load_bias = 0;
37043 int load_addr_set = 0;
37044 char * elf_interpreter = NULL;
37045 - unsigned long error;
37046 + unsigned long error = 0;
37047 struct elf_phdr *elf_ppnt, *elf_phdata;
37048 unsigned long elf_bss, elf_brk;
37049 int retval, i;
37050 @@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_
37051 unsigned long start_code, end_code, start_data, end_data;
37052 unsigned long reloc_func_desc = 0;
37053 int executable_stack = EXSTACK_DEFAULT;
37054 - unsigned long def_flags = 0;
37055 struct {
37056 struct elfhdr elf_ex;
37057 struct elfhdr interp_elf_ex;
37058 } *loc;
37059 + unsigned long pax_task_size = TASK_SIZE;
37060
37061 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
37062 if (!loc) {
37063 @@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_
37064
37065 /* OK, This is the point of no return */
37066 current->flags &= ~PF_FORKNOEXEC;
37067 - current->mm->def_flags = def_flags;
37068 +
37069 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
37070 + current->mm->pax_flags = 0UL;
37071 +#endif
37072 +
37073 +#ifdef CONFIG_PAX_DLRESOLVE
37074 + current->mm->call_dl_resolve = 0UL;
37075 +#endif
37076 +
37077 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
37078 + current->mm->call_syscall = 0UL;
37079 +#endif
37080 +
37081 +#ifdef CONFIG_PAX_ASLR
37082 + current->mm->delta_mmap = 0UL;
37083 + current->mm->delta_stack = 0UL;
37084 +#endif
37085 +
37086 + current->mm->def_flags = 0;
37087 +
37088 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
37089 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
37090 + send_sig(SIGKILL, current, 0);
37091 + goto out_free_dentry;
37092 + }
37093 +#endif
37094 +
37095 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
37096 + pax_set_initial_flags(bprm);
37097 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
37098 + if (pax_set_initial_flags_func)
37099 + (pax_set_initial_flags_func)(bprm);
37100 +#endif
37101 +
37102 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
37103 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
37104 + current->mm->context.user_cs_limit = PAGE_SIZE;
37105 + current->mm->def_flags |= VM_PAGEEXEC;
37106 + }
37107 +#endif
37108 +
37109 +#ifdef CONFIG_PAX_SEGMEXEC
37110 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
37111 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
37112 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
37113 + pax_task_size = SEGMEXEC_TASK_SIZE;
37114 + }
37115 +#endif
37116 +
37117 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
37118 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37119 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
37120 + put_cpu();
37121 + }
37122 +#endif
37123
37124 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
37125 may depend on the personality. */
37126 SET_PERSONALITY(loc->elf_ex);
37127 +
37128 +#ifdef CONFIG_PAX_ASLR
37129 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
37130 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
37131 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
37132 + }
37133 +#endif
37134 +
37135 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
37136 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37137 + executable_stack = EXSTACK_DISABLE_X;
37138 + current->personality &= ~READ_IMPLIES_EXEC;
37139 + } else
37140 +#endif
37141 +
37142 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
37143 current->personality |= READ_IMPLIES_EXEC;
37144
37145 @@ -804,6 +1091,20 @@ static int load_elf_binary(struct linux_
37146 #else
37147 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
37148 #endif
37149 +
37150 +#ifdef CONFIG_PAX_RANDMMAP
37151 + /* PaX: randomize base address at the default exe base if requested */
37152 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
37153 +#ifdef CONFIG_SPARC64
37154 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
37155 +#else
37156 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
37157 +#endif
37158 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
37159 + elf_flags |= MAP_FIXED;
37160 + }
37161 +#endif
37162 +
37163 }
37164
37165 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
37166 @@ -836,9 +1137,9 @@ static int load_elf_binary(struct linux_
37167 * allowed task size. Note that p_filesz must always be
37168 * <= p_memsz so it is only necessary to check p_memsz.
37169 */
37170 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
37171 - elf_ppnt->p_memsz > TASK_SIZE ||
37172 - TASK_SIZE - elf_ppnt->p_memsz < k) {
37173 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
37174 + elf_ppnt->p_memsz > pax_task_size ||
37175 + pax_task_size - elf_ppnt->p_memsz < k) {
37176 /* set_brk can never work. Avoid overflows. */
37177 send_sig(SIGKILL, current, 0);
37178 retval = -EINVAL;
37179 @@ -866,6 +1167,11 @@ static int load_elf_binary(struct linux_
37180 start_data += load_bias;
37181 end_data += load_bias;
37182
37183 +#ifdef CONFIG_PAX_RANDMMAP
37184 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
37185 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
37186 +#endif
37187 +
37188 /* Calling set_brk effectively mmaps the pages that we need
37189 * for the bss and break sections. We must do this before
37190 * mapping in the interpreter, to make sure it doesn't wind
37191 @@ -877,9 +1183,11 @@ static int load_elf_binary(struct linux_
37192 goto out_free_dentry;
37193 }
37194 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
37195 - send_sig(SIGSEGV, current, 0);
37196 - retval = -EFAULT; /* Nobody gets to see this, but.. */
37197 - goto out_free_dentry;
37198 + /*
37199 + * This bss-zeroing can fail if the ELF
37200 + * file specifies odd protections. So
37201 + * we don't check the return value
37202 + */
37203 }
37204
37205 if (elf_interpreter) {
37206 @@ -1112,8 +1420,10 @@ static int dump_seek(struct file *file,
37207 unsigned long n = off;
37208 if (n > PAGE_SIZE)
37209 n = PAGE_SIZE;
37210 - if (!dump_write(file, buf, n))
37211 + if (!dump_write(file, buf, n)) {
37212 + free_page((unsigned long)buf);
37213 return 0;
37214 + }
37215 off -= n;
37216 }
37217 free_page((unsigned long)buf);
37218 @@ -1125,7 +1435,7 @@ static int dump_seek(struct file *file,
37219 * Decide what to dump of a segment, part, all or none.
37220 */
37221 static unsigned long vma_dump_size(struct vm_area_struct *vma,
37222 - unsigned long mm_flags)
37223 + unsigned long mm_flags, long signr)
37224 {
37225 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
37226
37227 @@ -1159,7 +1469,7 @@ static unsigned long vma_dump_size(struc
37228 if (vma->vm_file == NULL)
37229 return 0;
37230
37231 - if (FILTER(MAPPED_PRIVATE))
37232 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
37233 goto whole;
37234
37235 /*
37236 @@ -1255,8 +1565,11 @@ static int writenote(struct memelfnote *
37237 #undef DUMP_WRITE
37238
37239 #define DUMP_WRITE(addr, nr) \
37240 + do { \
37241 + gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
37242 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
37243 - goto end_coredump;
37244 + goto end_coredump; \
37245 + } while (0);
37246
37247 static void fill_elf_header(struct elfhdr *elf, int segs,
37248 u16 machine, u32 flags, u8 osabi)
37249 @@ -1385,9 +1698,9 @@ static void fill_auxv_note(struct memelf
37250 {
37251 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
37252 int i = 0;
37253 - do
37254 + do {
37255 i += 2;
37256 - while (auxv[i - 2] != AT_NULL);
37257 + } while (auxv[i - 2] != AT_NULL);
37258 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
37259 }
37260
37261 @@ -1973,7 +2286,7 @@ static int elf_core_dump(long signr, str
37262 phdr.p_offset = offset;
37263 phdr.p_vaddr = vma->vm_start;
37264 phdr.p_paddr = 0;
37265 - phdr.p_filesz = vma_dump_size(vma, mm_flags);
37266 + phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
37267 phdr.p_memsz = vma->vm_end - vma->vm_start;
37268 offset += phdr.p_filesz;
37269 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
37270 @@ -2006,7 +2319,7 @@ static int elf_core_dump(long signr, str
37271 unsigned long addr;
37272 unsigned long end;
37273
37274 - end = vma->vm_start + vma_dump_size(vma, mm_flags);
37275 + end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
37276
37277 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
37278 struct page *page;
37279 @@ -2015,6 +2328,7 @@ static int elf_core_dump(long signr, str
37280 page = get_dump_page(addr);
37281 if (page) {
37282 void *kaddr = kmap(page);
37283 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
37284 stop = ((size += PAGE_SIZE) > limit) ||
37285 !dump_write(file, kaddr, PAGE_SIZE);
37286 kunmap(page);
37287 @@ -2042,6 +2356,97 @@ out:
37288
37289 #endif /* USE_ELF_CORE_DUMP */
37290
37291 +#ifdef CONFIG_PAX_MPROTECT
37292 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
37293 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
37294 + * we'll remove VM_MAYWRITE for good on RELRO segments.
37295 + *
37296 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
37297 + * basis because we want to allow the common case and not the special ones.
37298 + */
37299 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
37300 +{
37301 + struct elfhdr elf_h;
37302 + struct elf_phdr elf_p;
37303 + unsigned long i;
37304 + unsigned long oldflags;
37305 + bool is_textrel_rw, is_textrel_rx, is_relro;
37306 +
37307 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
37308 + return;
37309 +
37310 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
37311 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
37312 +
37313 +#ifdef CONFIG_PAX_ELFRELOCS
37314 + /* possible TEXTREL */
37315 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
37316 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
37317 +#else
37318 + is_textrel_rw = false;
37319 + is_textrel_rx = false;
37320 +#endif
37321 +
37322 + /* possible RELRO */
37323 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
37324 +
37325 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
37326 + return;
37327 +
37328 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
37329 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
37330 +
37331 +#ifdef CONFIG_PAX_ETEXECRELOCS
37332 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
37333 +#else
37334 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
37335 +#endif
37336 +
37337 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
37338 + !elf_check_arch(&elf_h) ||
37339 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
37340 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
37341 + return;
37342 +
37343 + for (i = 0UL; i < elf_h.e_phnum; i++) {
37344 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
37345 + return;
37346 + switch (elf_p.p_type) {
37347 + case PT_DYNAMIC:
37348 + if (!is_textrel_rw && !is_textrel_rx)
37349 + continue;
37350 + i = 0UL;
37351 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
37352 + elf_dyn dyn;
37353 +
37354 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
37355 + return;
37356 + if (dyn.d_tag == DT_NULL)
37357 + return;
37358 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
37359 + gr_log_textrel(vma);
37360 + if (is_textrel_rw)
37361 + vma->vm_flags |= VM_MAYWRITE;
37362 + else
37363 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
37364 + vma->vm_flags &= ~VM_MAYWRITE;
37365 + return;
37366 + }
37367 + i++;
37368 + }
37369 + return;
37370 +
37371 + case PT_GNU_RELRO:
37372 + if (!is_relro)
37373 + continue;
37374 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
37375 + vma->vm_flags &= ~VM_MAYWRITE;
37376 + return;
37377 + }
37378 + }
37379 +}
37380 +#endif
37381 +
37382 static int __init init_elf_binfmt(void)
37383 {
37384 return register_binfmt(&elf_format);
37385 diff -urNp linux-2.6.32.43/fs/binfmt_flat.c linux-2.6.32.43/fs/binfmt_flat.c
37386 --- linux-2.6.32.43/fs/binfmt_flat.c 2011-03-27 14:31:47.000000000 -0400
37387 +++ linux-2.6.32.43/fs/binfmt_flat.c 2011-04-17 15:56:46.000000000 -0400
37388 @@ -564,7 +564,9 @@ static int load_flat_file(struct linux_b
37389 realdatastart = (unsigned long) -ENOMEM;
37390 printk("Unable to allocate RAM for process data, errno %d\n",
37391 (int)-realdatastart);
37392 + down_write(&current->mm->mmap_sem);
37393 do_munmap(current->mm, textpos, text_len);
37394 + up_write(&current->mm->mmap_sem);
37395 ret = realdatastart;
37396 goto err;
37397 }
37398 @@ -588,8 +590,10 @@ static int load_flat_file(struct linux_b
37399 }
37400 if (IS_ERR_VALUE(result)) {
37401 printk("Unable to read data+bss, errno %d\n", (int)-result);
37402 + down_write(&current->mm->mmap_sem);
37403 do_munmap(current->mm, textpos, text_len);
37404 do_munmap(current->mm, realdatastart, data_len + extra);
37405 + up_write(&current->mm->mmap_sem);
37406 ret = result;
37407 goto err;
37408 }
37409 @@ -658,8 +662,10 @@ static int load_flat_file(struct linux_b
37410 }
37411 if (IS_ERR_VALUE(result)) {
37412 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
37413 + down_write(&current->mm->mmap_sem);
37414 do_munmap(current->mm, textpos, text_len + data_len + extra +
37415 MAX_SHARED_LIBS * sizeof(unsigned long));
37416 + up_write(&current->mm->mmap_sem);
37417 ret = result;
37418 goto err;
37419 }
37420 diff -urNp linux-2.6.32.43/fs/bio.c linux-2.6.32.43/fs/bio.c
37421 --- linux-2.6.32.43/fs/bio.c 2011-03-27 14:31:47.000000000 -0400
37422 +++ linux-2.6.32.43/fs/bio.c 2011-04-17 15:56:46.000000000 -0400
37423 @@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_cr
37424
37425 i = 0;
37426 while (i < bio_slab_nr) {
37427 - struct bio_slab *bslab = &bio_slabs[i];
37428 + bslab = &bio_slabs[i];
37429
37430 if (!bslab->slab && entry == -1)
37431 entry = i;
37432 @@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct b
37433 const int read = bio_data_dir(bio) == READ;
37434 struct bio_map_data *bmd = bio->bi_private;
37435 int i;
37436 - char *p = bmd->sgvecs[0].iov_base;
37437 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
37438
37439 __bio_for_each_segment(bvec, bio, i, 0) {
37440 char *addr = page_address(bvec->bv_page);
37441 diff -urNp linux-2.6.32.43/fs/block_dev.c linux-2.6.32.43/fs/block_dev.c
37442 --- linux-2.6.32.43/fs/block_dev.c 2011-06-25 12:55:34.000000000 -0400
37443 +++ linux-2.6.32.43/fs/block_dev.c 2011-06-25 12:56:37.000000000 -0400
37444 @@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev,
37445 else if (bdev->bd_contains == bdev)
37446 res = 0; /* is a whole device which isn't held */
37447
37448 - else if (bdev->bd_contains->bd_holder == bd_claim)
37449 + else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
37450 res = 0; /* is a partition of a device that is being partitioned */
37451 else if (bdev->bd_contains->bd_holder != NULL)
37452 res = -EBUSY; /* is a partition of a held device */
37453 diff -urNp linux-2.6.32.43/fs/btrfs/ctree.c linux-2.6.32.43/fs/btrfs/ctree.c
37454 --- linux-2.6.32.43/fs/btrfs/ctree.c 2011-03-27 14:31:47.000000000 -0400
37455 +++ linux-2.6.32.43/fs/btrfs/ctree.c 2011-04-17 15:56:46.000000000 -0400
37456 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
37457 free_extent_buffer(buf);
37458 add_root_to_dirty_list(root);
37459 } else {
37460 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
37461 - parent_start = parent->start;
37462 - else
37463 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
37464 + if (parent)
37465 + parent_start = parent->start;
37466 + else
37467 + parent_start = 0;
37468 + } else
37469 parent_start = 0;
37470
37471 WARN_ON(trans->transid != btrfs_header_generation(parent));
37472 @@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_tran
37473
37474 ret = 0;
37475 if (slot == 0) {
37476 - struct btrfs_disk_key disk_key;
37477 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
37478 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
37479 }
37480 diff -urNp linux-2.6.32.43/fs/btrfs/disk-io.c linux-2.6.32.43/fs/btrfs/disk-io.c
37481 --- linux-2.6.32.43/fs/btrfs/disk-io.c 2011-04-17 17:00:52.000000000 -0400
37482 +++ linux-2.6.32.43/fs/btrfs/disk-io.c 2011-04-17 17:03:11.000000000 -0400
37483 @@ -39,7 +39,7 @@
37484 #include "tree-log.h"
37485 #include "free-space-cache.h"
37486
37487 -static struct extent_io_ops btree_extent_io_ops;
37488 +static const struct extent_io_ops btree_extent_io_ops;
37489 static void end_workqueue_fn(struct btrfs_work *work);
37490 static void free_fs_root(struct btrfs_root *root);
37491
37492 @@ -2607,7 +2607,7 @@ out:
37493 return 0;
37494 }
37495
37496 -static struct extent_io_ops btree_extent_io_ops = {
37497 +static const struct extent_io_ops btree_extent_io_ops = {
37498 .write_cache_pages_lock_hook = btree_lock_page_hook,
37499 .readpage_end_io_hook = btree_readpage_end_io_hook,
37500 .submit_bio_hook = btree_submit_bio_hook,
37501 diff -urNp linux-2.6.32.43/fs/btrfs/extent_io.h linux-2.6.32.43/fs/btrfs/extent_io.h
37502 --- linux-2.6.32.43/fs/btrfs/extent_io.h 2011-03-27 14:31:47.000000000 -0400
37503 +++ linux-2.6.32.43/fs/btrfs/extent_io.h 2011-04-17 15:56:46.000000000 -0400
37504 @@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(s
37505 struct bio *bio, int mirror_num,
37506 unsigned long bio_flags);
37507 struct extent_io_ops {
37508 - int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
37509 + int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
37510 u64 start, u64 end, int *page_started,
37511 unsigned long *nr_written);
37512 - int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
37513 - int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
37514 + int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
37515 + int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
37516 extent_submit_bio_hook_t *submit_bio_hook;
37517 - int (*merge_bio_hook)(struct page *page, unsigned long offset,
37518 + int (* const merge_bio_hook)(struct page *page, unsigned long offset,
37519 size_t size, struct bio *bio,
37520 unsigned long bio_flags);
37521 - int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
37522 - int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
37523 + int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
37524 + int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
37525 u64 start, u64 end,
37526 struct extent_state *state);
37527 - int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
37528 + int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
37529 u64 start, u64 end,
37530 struct extent_state *state);
37531 - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
37532 + int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
37533 struct extent_state *state);
37534 - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
37535 + int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
37536 struct extent_state *state, int uptodate);
37537 - int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
37538 + int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
37539 unsigned long old, unsigned long bits);
37540 - int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
37541 + int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
37542 unsigned long bits);
37543 - int (*merge_extent_hook)(struct inode *inode,
37544 + int (* const merge_extent_hook)(struct inode *inode,
37545 struct extent_state *new,
37546 struct extent_state *other);
37547 - int (*split_extent_hook)(struct inode *inode,
37548 + int (* const split_extent_hook)(struct inode *inode,
37549 struct extent_state *orig, u64 split);
37550 - int (*write_cache_pages_lock_hook)(struct page *page);
37551 + int (* const write_cache_pages_lock_hook)(struct page *page);
37552 };
37553
37554 struct extent_io_tree {
37555 @@ -88,7 +88,7 @@ struct extent_io_tree {
37556 u64 dirty_bytes;
37557 spinlock_t lock;
37558 spinlock_t buffer_lock;
37559 - struct extent_io_ops *ops;
37560 + const struct extent_io_ops *ops;
37561 };
37562
37563 struct extent_state {
37564 diff -urNp linux-2.6.32.43/fs/btrfs/extent-tree.c linux-2.6.32.43/fs/btrfs/extent-tree.c
37565 --- linux-2.6.32.43/fs/btrfs/extent-tree.c 2011-03-27 14:31:47.000000000 -0400
37566 +++ linux-2.6.32.43/fs/btrfs/extent-tree.c 2011-06-12 06:39:08.000000000 -0400
37567 @@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(
37568 u64 group_start = group->key.objectid;
37569 new_extents = kmalloc(sizeof(*new_extents),
37570 GFP_NOFS);
37571 + if (!new_extents) {
37572 + ret = -ENOMEM;
37573 + goto out;
37574 + }
37575 nr_extents = 1;
37576 ret = get_new_locations(reloc_inode,
37577 extent_key,
37578 diff -urNp linux-2.6.32.43/fs/btrfs/free-space-cache.c linux-2.6.32.43/fs/btrfs/free-space-cache.c
37579 --- linux-2.6.32.43/fs/btrfs/free-space-cache.c 2011-03-27 14:31:47.000000000 -0400
37580 +++ linux-2.6.32.43/fs/btrfs/free-space-cache.c 2011-04-17 15:56:46.000000000 -0400
37581 @@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
37582
37583 while(1) {
37584 if (entry->bytes < bytes || entry->offset < min_start) {
37585 - struct rb_node *node;
37586 -
37587 node = rb_next(&entry->offset_index);
37588 if (!node)
37589 break;
37590 @@ -1226,7 +1224,7 @@ again:
37591 */
37592 while (entry->bitmap || found_bitmap ||
37593 (!entry->bitmap && entry->bytes < min_bytes)) {
37594 - struct rb_node *node = rb_next(&entry->offset_index);
37595 + node = rb_next(&entry->offset_index);
37596
37597 if (entry->bitmap && entry->bytes > bytes + empty_size) {
37598 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
37599 diff -urNp linux-2.6.32.43/fs/btrfs/inode.c linux-2.6.32.43/fs/btrfs/inode.c
37600 --- linux-2.6.32.43/fs/btrfs/inode.c 2011-03-27 14:31:47.000000000 -0400
37601 +++ linux-2.6.32.43/fs/btrfs/inode.c 2011-06-12 06:39:58.000000000 -0400
37602 @@ -63,7 +63,7 @@ static const struct inode_operations btr
37603 static const struct address_space_operations btrfs_aops;
37604 static const struct address_space_operations btrfs_symlink_aops;
37605 static const struct file_operations btrfs_dir_file_operations;
37606 -static struct extent_io_ops btrfs_extent_io_ops;
37607 +static const struct extent_io_ops btrfs_extent_io_ops;
37608
37609 static struct kmem_cache *btrfs_inode_cachep;
37610 struct kmem_cache *btrfs_trans_handle_cachep;
37611 @@ -925,6 +925,7 @@ static int cow_file_range_async(struct i
37612 1, 0, NULL, GFP_NOFS);
37613 while (start < end) {
37614 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
37615 + BUG_ON(!async_cow);
37616 async_cow->inode = inode;
37617 async_cow->root = root;
37618 async_cow->locked_page = locked_page;
37619 @@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(st
37620 inline_size = btrfs_file_extent_inline_item_len(leaf,
37621 btrfs_item_nr(leaf, path->slots[0]));
37622 tmp = kmalloc(inline_size, GFP_NOFS);
37623 + if (!tmp)
37624 + return -ENOMEM;
37625 ptr = btrfs_file_extent_inline_start(item);
37626
37627 read_extent_buffer(leaf, tmp, ptr, inline_size);
37628 @@ -5410,7 +5413,7 @@ fail:
37629 return -ENOMEM;
37630 }
37631
37632 -static int btrfs_getattr(struct vfsmount *mnt,
37633 +int btrfs_getattr(struct vfsmount *mnt,
37634 struct dentry *dentry, struct kstat *stat)
37635 {
37636 struct inode *inode = dentry->d_inode;
37637 @@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount
37638 return 0;
37639 }
37640
37641 +EXPORT_SYMBOL(btrfs_getattr);
37642 +
37643 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
37644 +{
37645 + return BTRFS_I(inode)->root->anon_super.s_dev;
37646 +}
37647 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
37648 +
37649 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
37650 struct inode *new_dir, struct dentry *new_dentry)
37651 {
37652 @@ -5972,7 +5983,7 @@ static const struct file_operations btrf
37653 .fsync = btrfs_sync_file,
37654 };
37655
37656 -static struct extent_io_ops btrfs_extent_io_ops = {
37657 +static const struct extent_io_ops btrfs_extent_io_ops = {
37658 .fill_delalloc = run_delalloc_range,
37659 .submit_bio_hook = btrfs_submit_bio_hook,
37660 .merge_bio_hook = btrfs_merge_bio_hook,
37661 diff -urNp linux-2.6.32.43/fs/btrfs/relocation.c linux-2.6.32.43/fs/btrfs/relocation.c
37662 --- linux-2.6.32.43/fs/btrfs/relocation.c 2011-03-27 14:31:47.000000000 -0400
37663 +++ linux-2.6.32.43/fs/btrfs/relocation.c 2011-04-17 15:56:46.000000000 -0400
37664 @@ -884,7 +884,7 @@ static int __update_reloc_root(struct bt
37665 }
37666 spin_unlock(&rc->reloc_root_tree.lock);
37667
37668 - BUG_ON((struct btrfs_root *)node->data != root);
37669 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
37670
37671 if (!del) {
37672 spin_lock(&rc->reloc_root_tree.lock);
37673 diff -urNp linux-2.6.32.43/fs/btrfs/sysfs.c linux-2.6.32.43/fs/btrfs/sysfs.c
37674 --- linux-2.6.32.43/fs/btrfs/sysfs.c 2011-03-27 14:31:47.000000000 -0400
37675 +++ linux-2.6.32.43/fs/btrfs/sysfs.c 2011-04-17 15:56:46.000000000 -0400
37676 @@ -164,12 +164,12 @@ static void btrfs_root_release(struct ko
37677 complete(&root->kobj_unregister);
37678 }
37679
37680 -static struct sysfs_ops btrfs_super_attr_ops = {
37681 +static const struct sysfs_ops btrfs_super_attr_ops = {
37682 .show = btrfs_super_attr_show,
37683 .store = btrfs_super_attr_store,
37684 };
37685
37686 -static struct sysfs_ops btrfs_root_attr_ops = {
37687 +static const struct sysfs_ops btrfs_root_attr_ops = {
37688 .show = btrfs_root_attr_show,
37689 .store = btrfs_root_attr_store,
37690 };
37691 diff -urNp linux-2.6.32.43/fs/buffer.c linux-2.6.32.43/fs/buffer.c
37692 --- linux-2.6.32.43/fs/buffer.c 2011-03-27 14:31:47.000000000 -0400
37693 +++ linux-2.6.32.43/fs/buffer.c 2011-04-17 15:56:46.000000000 -0400
37694 @@ -25,6 +25,7 @@
37695 #include <linux/percpu.h>
37696 #include <linux/slab.h>
37697 #include <linux/capability.h>
37698 +#include <linux/security.h>
37699 #include <linux/blkdev.h>
37700 #include <linux/file.h>
37701 #include <linux/quotaops.h>
37702 diff -urNp linux-2.6.32.43/fs/cachefiles/bind.c linux-2.6.32.43/fs/cachefiles/bind.c
37703 --- linux-2.6.32.43/fs/cachefiles/bind.c 2011-03-27 14:31:47.000000000 -0400
37704 +++ linux-2.6.32.43/fs/cachefiles/bind.c 2011-04-17 15:56:46.000000000 -0400
37705 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
37706 args);
37707
37708 /* start by checking things over */
37709 - ASSERT(cache->fstop_percent >= 0 &&
37710 - cache->fstop_percent < cache->fcull_percent &&
37711 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
37712 cache->fcull_percent < cache->frun_percent &&
37713 cache->frun_percent < 100);
37714
37715 - ASSERT(cache->bstop_percent >= 0 &&
37716 - cache->bstop_percent < cache->bcull_percent &&
37717 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
37718 cache->bcull_percent < cache->brun_percent &&
37719 cache->brun_percent < 100);
37720
37721 diff -urNp linux-2.6.32.43/fs/cachefiles/daemon.c linux-2.6.32.43/fs/cachefiles/daemon.c
37722 --- linux-2.6.32.43/fs/cachefiles/daemon.c 2011-03-27 14:31:47.000000000 -0400
37723 +++ linux-2.6.32.43/fs/cachefiles/daemon.c 2011-04-17 15:56:46.000000000 -0400
37724 @@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(s
37725 if (test_bit(CACHEFILES_DEAD, &cache->flags))
37726 return -EIO;
37727
37728 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
37729 + if (datalen > PAGE_SIZE - 1)
37730 return -EOPNOTSUPP;
37731
37732 /* drag the command string into the kernel so we can parse it */
37733 @@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
37734 if (args[0] != '%' || args[1] != '\0')
37735 return -EINVAL;
37736
37737 - if (fstop < 0 || fstop >= cache->fcull_percent)
37738 + if (fstop >= cache->fcull_percent)
37739 return cachefiles_daemon_range_error(cache, args);
37740
37741 cache->fstop_percent = fstop;
37742 @@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
37743 if (args[0] != '%' || args[1] != '\0')
37744 return -EINVAL;
37745
37746 - if (bstop < 0 || bstop >= cache->bcull_percent)
37747 + if (bstop >= cache->bcull_percent)
37748 return cachefiles_daemon_range_error(cache, args);
37749
37750 cache->bstop_percent = bstop;
37751 diff -urNp linux-2.6.32.43/fs/cachefiles/internal.h linux-2.6.32.43/fs/cachefiles/internal.h
37752 --- linux-2.6.32.43/fs/cachefiles/internal.h 2011-03-27 14:31:47.000000000 -0400
37753 +++ linux-2.6.32.43/fs/cachefiles/internal.h 2011-05-04 17:56:28.000000000 -0400
37754 @@ -56,7 +56,7 @@ struct cachefiles_cache {
37755 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
37756 struct rb_root active_nodes; /* active nodes (can't be culled) */
37757 rwlock_t active_lock; /* lock for active_nodes */
37758 - atomic_t gravecounter; /* graveyard uniquifier */
37759 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
37760 unsigned frun_percent; /* when to stop culling (% files) */
37761 unsigned fcull_percent; /* when to start culling (% files) */
37762 unsigned fstop_percent; /* when to stop allocating (% files) */
37763 @@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struc
37764 * proc.c
37765 */
37766 #ifdef CONFIG_CACHEFILES_HISTOGRAM
37767 -extern atomic_t cachefiles_lookup_histogram[HZ];
37768 -extern atomic_t cachefiles_mkdir_histogram[HZ];
37769 -extern atomic_t cachefiles_create_histogram[HZ];
37770 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
37771 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
37772 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
37773
37774 extern int __init cachefiles_proc_init(void);
37775 extern void cachefiles_proc_cleanup(void);
37776 static inline
37777 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
37778 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
37779 {
37780 unsigned long jif = jiffies - start_jif;
37781 if (jif >= HZ)
37782 jif = HZ - 1;
37783 - atomic_inc(&histogram[jif]);
37784 + atomic_inc_unchecked(&histogram[jif]);
37785 }
37786
37787 #else
37788 diff -urNp linux-2.6.32.43/fs/cachefiles/namei.c linux-2.6.32.43/fs/cachefiles/namei.c
37789 --- linux-2.6.32.43/fs/cachefiles/namei.c 2011-03-27 14:31:47.000000000 -0400
37790 +++ linux-2.6.32.43/fs/cachefiles/namei.c 2011-05-04 17:56:28.000000000 -0400
37791 @@ -250,7 +250,7 @@ try_again:
37792 /* first step is to make up a grave dentry in the graveyard */
37793 sprintf(nbuffer, "%08x%08x",
37794 (uint32_t) get_seconds(),
37795 - (uint32_t) atomic_inc_return(&cache->gravecounter));
37796 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
37797
37798 /* do the multiway lock magic */
37799 trap = lock_rename(cache->graveyard, dir);
37800 diff -urNp linux-2.6.32.43/fs/cachefiles/proc.c linux-2.6.32.43/fs/cachefiles/proc.c
37801 --- linux-2.6.32.43/fs/cachefiles/proc.c 2011-03-27 14:31:47.000000000 -0400
37802 +++ linux-2.6.32.43/fs/cachefiles/proc.c 2011-05-04 17:56:28.000000000 -0400
37803 @@ -14,9 +14,9 @@
37804 #include <linux/seq_file.h>
37805 #include "internal.h"
37806
37807 -atomic_t cachefiles_lookup_histogram[HZ];
37808 -atomic_t cachefiles_mkdir_histogram[HZ];
37809 -atomic_t cachefiles_create_histogram[HZ];
37810 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
37811 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
37812 +atomic_unchecked_t cachefiles_create_histogram[HZ];
37813
37814 /*
37815 * display the latency histogram
37816 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
37817 return 0;
37818 default:
37819 index = (unsigned long) v - 3;
37820 - x = atomic_read(&cachefiles_lookup_histogram[index]);
37821 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
37822 - z = atomic_read(&cachefiles_create_histogram[index]);
37823 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
37824 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
37825 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
37826 if (x == 0 && y == 0 && z == 0)
37827 return 0;
37828
37829 diff -urNp linux-2.6.32.43/fs/cachefiles/rdwr.c linux-2.6.32.43/fs/cachefiles/rdwr.c
37830 --- linux-2.6.32.43/fs/cachefiles/rdwr.c 2011-03-27 14:31:47.000000000 -0400
37831 +++ linux-2.6.32.43/fs/cachefiles/rdwr.c 2011-04-17 15:56:46.000000000 -0400
37832 @@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache
37833 old_fs = get_fs();
37834 set_fs(KERNEL_DS);
37835 ret = file->f_op->write(
37836 - file, (const void __user *) data, len, &pos);
37837 + file, (__force const void __user *) data, len, &pos);
37838 set_fs(old_fs);
37839 kunmap(page);
37840 if (ret != len)
37841 diff -urNp linux-2.6.32.43/fs/cifs/cifs_debug.c linux-2.6.32.43/fs/cifs/cifs_debug.c
37842 --- linux-2.6.32.43/fs/cifs/cifs_debug.c 2011-03-27 14:31:47.000000000 -0400
37843 +++ linux-2.6.32.43/fs/cifs/cifs_debug.c 2011-05-04 17:56:28.000000000 -0400
37844 @@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(str
37845 tcon = list_entry(tmp3,
37846 struct cifsTconInfo,
37847 tcon_list);
37848 - atomic_set(&tcon->num_smbs_sent, 0);
37849 - atomic_set(&tcon->num_writes, 0);
37850 - atomic_set(&tcon->num_reads, 0);
37851 - atomic_set(&tcon->num_oplock_brks, 0);
37852 - atomic_set(&tcon->num_opens, 0);
37853 - atomic_set(&tcon->num_posixopens, 0);
37854 - atomic_set(&tcon->num_posixmkdirs, 0);
37855 - atomic_set(&tcon->num_closes, 0);
37856 - atomic_set(&tcon->num_deletes, 0);
37857 - atomic_set(&tcon->num_mkdirs, 0);
37858 - atomic_set(&tcon->num_rmdirs, 0);
37859 - atomic_set(&tcon->num_renames, 0);
37860 - atomic_set(&tcon->num_t2renames, 0);
37861 - atomic_set(&tcon->num_ffirst, 0);
37862 - atomic_set(&tcon->num_fnext, 0);
37863 - atomic_set(&tcon->num_fclose, 0);
37864 - atomic_set(&tcon->num_hardlinks, 0);
37865 - atomic_set(&tcon->num_symlinks, 0);
37866 - atomic_set(&tcon->num_locks, 0);
37867 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
37868 + atomic_set_unchecked(&tcon->num_writes, 0);
37869 + atomic_set_unchecked(&tcon->num_reads, 0);
37870 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
37871 + atomic_set_unchecked(&tcon->num_opens, 0);
37872 + atomic_set_unchecked(&tcon->num_posixopens, 0);
37873 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
37874 + atomic_set_unchecked(&tcon->num_closes, 0);
37875 + atomic_set_unchecked(&tcon->num_deletes, 0);
37876 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
37877 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
37878 + atomic_set_unchecked(&tcon->num_renames, 0);
37879 + atomic_set_unchecked(&tcon->num_t2renames, 0);
37880 + atomic_set_unchecked(&tcon->num_ffirst, 0);
37881 + atomic_set_unchecked(&tcon->num_fnext, 0);
37882 + atomic_set_unchecked(&tcon->num_fclose, 0);
37883 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
37884 + atomic_set_unchecked(&tcon->num_symlinks, 0);
37885 + atomic_set_unchecked(&tcon->num_locks, 0);
37886 }
37887 }
37888 }
37889 @@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct s
37890 if (tcon->need_reconnect)
37891 seq_puts(m, "\tDISCONNECTED ");
37892 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
37893 - atomic_read(&tcon->num_smbs_sent),
37894 - atomic_read(&tcon->num_oplock_brks));
37895 + atomic_read_unchecked(&tcon->num_smbs_sent),
37896 + atomic_read_unchecked(&tcon->num_oplock_brks));
37897 seq_printf(m, "\nReads: %d Bytes: %lld",
37898 - atomic_read(&tcon->num_reads),
37899 + atomic_read_unchecked(&tcon->num_reads),
37900 (long long)(tcon->bytes_read));
37901 seq_printf(m, "\nWrites: %d Bytes: %lld",
37902 - atomic_read(&tcon->num_writes),
37903 + atomic_read_unchecked(&tcon->num_writes),
37904 (long long)(tcon->bytes_written));
37905 seq_printf(m, "\nFlushes: %d",
37906 - atomic_read(&tcon->num_flushes));
37907 + atomic_read_unchecked(&tcon->num_flushes));
37908 seq_printf(m, "\nLocks: %d HardLinks: %d "
37909 "Symlinks: %d",
37910 - atomic_read(&tcon->num_locks),
37911 - atomic_read(&tcon->num_hardlinks),
37912 - atomic_read(&tcon->num_symlinks));
37913 + atomic_read_unchecked(&tcon->num_locks),
37914 + atomic_read_unchecked(&tcon->num_hardlinks),
37915 + atomic_read_unchecked(&tcon->num_symlinks));
37916 seq_printf(m, "\nOpens: %d Closes: %d "
37917 "Deletes: %d",
37918 - atomic_read(&tcon->num_opens),
37919 - atomic_read(&tcon->num_closes),
37920 - atomic_read(&tcon->num_deletes));
37921 + atomic_read_unchecked(&tcon->num_opens),
37922 + atomic_read_unchecked(&tcon->num_closes),
37923 + atomic_read_unchecked(&tcon->num_deletes));
37924 seq_printf(m, "\nPosix Opens: %d "
37925 "Posix Mkdirs: %d",
37926 - atomic_read(&tcon->num_posixopens),
37927 - atomic_read(&tcon->num_posixmkdirs));
37928 + atomic_read_unchecked(&tcon->num_posixopens),
37929 + atomic_read_unchecked(&tcon->num_posixmkdirs));
37930 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
37931 - atomic_read(&tcon->num_mkdirs),
37932 - atomic_read(&tcon->num_rmdirs));
37933 + atomic_read_unchecked(&tcon->num_mkdirs),
37934 + atomic_read_unchecked(&tcon->num_rmdirs));
37935 seq_printf(m, "\nRenames: %d T2 Renames %d",
37936 - atomic_read(&tcon->num_renames),
37937 - atomic_read(&tcon->num_t2renames));
37938 + atomic_read_unchecked(&tcon->num_renames),
37939 + atomic_read_unchecked(&tcon->num_t2renames));
37940 seq_printf(m, "\nFindFirst: %d FNext %d "
37941 "FClose %d",
37942 - atomic_read(&tcon->num_ffirst),
37943 - atomic_read(&tcon->num_fnext),
37944 - atomic_read(&tcon->num_fclose));
37945 + atomic_read_unchecked(&tcon->num_ffirst),
37946 + atomic_read_unchecked(&tcon->num_fnext),
37947 + atomic_read_unchecked(&tcon->num_fclose));
37948 }
37949 }
37950 }
37951 diff -urNp linux-2.6.32.43/fs/cifs/cifsglob.h linux-2.6.32.43/fs/cifs/cifsglob.h
37952 --- linux-2.6.32.43/fs/cifs/cifsglob.h 2011-03-27 14:31:47.000000000 -0400
37953 +++ linux-2.6.32.43/fs/cifs/cifsglob.h 2011-05-04 17:56:28.000000000 -0400
37954 @@ -252,28 +252,28 @@ struct cifsTconInfo {
37955 __u16 Flags; /* optional support bits */
37956 enum statusEnum tidStatus;
37957 #ifdef CONFIG_CIFS_STATS
37958 - atomic_t num_smbs_sent;
37959 - atomic_t num_writes;
37960 - atomic_t num_reads;
37961 - atomic_t num_flushes;
37962 - atomic_t num_oplock_brks;
37963 - atomic_t num_opens;
37964 - atomic_t num_closes;
37965 - atomic_t num_deletes;
37966 - atomic_t num_mkdirs;
37967 - atomic_t num_posixopens;
37968 - atomic_t num_posixmkdirs;
37969 - atomic_t num_rmdirs;
37970 - atomic_t num_renames;
37971 - atomic_t num_t2renames;
37972 - atomic_t num_ffirst;
37973 - atomic_t num_fnext;
37974 - atomic_t num_fclose;
37975 - atomic_t num_hardlinks;
37976 - atomic_t num_symlinks;
37977 - atomic_t num_locks;
37978 - atomic_t num_acl_get;
37979 - atomic_t num_acl_set;
37980 + atomic_unchecked_t num_smbs_sent;
37981 + atomic_unchecked_t num_writes;
37982 + atomic_unchecked_t num_reads;
37983 + atomic_unchecked_t num_flushes;
37984 + atomic_unchecked_t num_oplock_brks;
37985 + atomic_unchecked_t num_opens;
37986 + atomic_unchecked_t num_closes;
37987 + atomic_unchecked_t num_deletes;
37988 + atomic_unchecked_t num_mkdirs;
37989 + atomic_unchecked_t num_posixopens;
37990 + atomic_unchecked_t num_posixmkdirs;
37991 + atomic_unchecked_t num_rmdirs;
37992 + atomic_unchecked_t num_renames;
37993 + atomic_unchecked_t num_t2renames;
37994 + atomic_unchecked_t num_ffirst;
37995 + atomic_unchecked_t num_fnext;
37996 + atomic_unchecked_t num_fclose;
37997 + atomic_unchecked_t num_hardlinks;
37998 + atomic_unchecked_t num_symlinks;
37999 + atomic_unchecked_t num_locks;
38000 + atomic_unchecked_t num_acl_get;
38001 + atomic_unchecked_t num_acl_set;
38002 #ifdef CONFIG_CIFS_STATS2
38003 unsigned long long time_writes;
38004 unsigned long long time_reads;
38005 @@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const st
38006 }
38007
38008 #ifdef CONFIG_CIFS_STATS
38009 -#define cifs_stats_inc atomic_inc
38010 +#define cifs_stats_inc atomic_inc_unchecked
38011
38012 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
38013 unsigned int bytes)
38014 diff -urNp linux-2.6.32.43/fs/cifs/link.c linux-2.6.32.43/fs/cifs/link.c
38015 --- linux-2.6.32.43/fs/cifs/link.c 2011-03-27 14:31:47.000000000 -0400
38016 +++ linux-2.6.32.43/fs/cifs/link.c 2011-04-17 15:56:46.000000000 -0400
38017 @@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct
38018
38019 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
38020 {
38021 - char *p = nd_get_link(nd);
38022 + const char *p = nd_get_link(nd);
38023 if (!IS_ERR(p))
38024 kfree(p);
38025 }
38026 diff -urNp linux-2.6.32.43/fs/coda/cache.c linux-2.6.32.43/fs/coda/cache.c
38027 --- linux-2.6.32.43/fs/coda/cache.c 2011-03-27 14:31:47.000000000 -0400
38028 +++ linux-2.6.32.43/fs/coda/cache.c 2011-05-04 17:56:28.000000000 -0400
38029 @@ -24,14 +24,14 @@
38030 #include <linux/coda_fs_i.h>
38031 #include <linux/coda_cache.h>
38032
38033 -static atomic_t permission_epoch = ATOMIC_INIT(0);
38034 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
38035
38036 /* replace or extend an acl cache hit */
38037 void coda_cache_enter(struct inode *inode, int mask)
38038 {
38039 struct coda_inode_info *cii = ITOC(inode);
38040
38041 - cii->c_cached_epoch = atomic_read(&permission_epoch);
38042 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
38043 if (cii->c_uid != current_fsuid()) {
38044 cii->c_uid = current_fsuid();
38045 cii->c_cached_perm = mask;
38046 @@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inod
38047 void coda_cache_clear_inode(struct inode *inode)
38048 {
38049 struct coda_inode_info *cii = ITOC(inode);
38050 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
38051 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
38052 }
38053
38054 /* remove all acl caches */
38055 void coda_cache_clear_all(struct super_block *sb)
38056 {
38057 - atomic_inc(&permission_epoch);
38058 + atomic_inc_unchecked(&permission_epoch);
38059 }
38060
38061
38062 @@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode
38063
38064 hit = (mask & cii->c_cached_perm) == mask &&
38065 cii->c_uid == current_fsuid() &&
38066 - cii->c_cached_epoch == atomic_read(&permission_epoch);
38067 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
38068
38069 return hit;
38070 }
38071 diff -urNp linux-2.6.32.43/fs/compat_binfmt_elf.c linux-2.6.32.43/fs/compat_binfmt_elf.c
38072 --- linux-2.6.32.43/fs/compat_binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
38073 +++ linux-2.6.32.43/fs/compat_binfmt_elf.c 2011-04-17 15:56:46.000000000 -0400
38074 @@ -29,10 +29,12 @@
38075 #undef elfhdr
38076 #undef elf_phdr
38077 #undef elf_note
38078 +#undef elf_dyn
38079 #undef elf_addr_t
38080 #define elfhdr elf32_hdr
38081 #define elf_phdr elf32_phdr
38082 #define elf_note elf32_note
38083 +#define elf_dyn Elf32_Dyn
38084 #define elf_addr_t Elf32_Addr
38085
38086 /*
38087 diff -urNp linux-2.6.32.43/fs/compat.c linux-2.6.32.43/fs/compat.c
38088 --- linux-2.6.32.43/fs/compat.c 2011-04-17 17:00:52.000000000 -0400
38089 +++ linux-2.6.32.43/fs/compat.c 2011-05-16 21:46:57.000000000 -0400
38090 @@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
38091
38092 struct compat_readdir_callback {
38093 struct compat_old_linux_dirent __user *dirent;
38094 + struct file * file;
38095 int result;
38096 };
38097
38098 @@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf
38099 buf->result = -EOVERFLOW;
38100 return -EOVERFLOW;
38101 }
38102 +
38103 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38104 + return 0;
38105 +
38106 buf->result++;
38107 dirent = buf->dirent;
38108 if (!access_ok(VERIFY_WRITE, dirent,
38109 @@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(u
38110
38111 buf.result = 0;
38112 buf.dirent = dirent;
38113 + buf.file = file;
38114
38115 error = vfs_readdir(file, compat_fillonedir, &buf);
38116 if (buf.result)
38117 @@ -899,6 +905,7 @@ struct compat_linux_dirent {
38118 struct compat_getdents_callback {
38119 struct compat_linux_dirent __user *current_dir;
38120 struct compat_linux_dirent __user *previous;
38121 + struct file * file;
38122 int count;
38123 int error;
38124 };
38125 @@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, c
38126 buf->error = -EOVERFLOW;
38127 return -EOVERFLOW;
38128 }
38129 +
38130 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38131 + return 0;
38132 +
38133 dirent = buf->previous;
38134 if (dirent) {
38135 if (__put_user(offset, &dirent->d_off))
38136 @@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsi
38137 buf.previous = NULL;
38138 buf.count = count;
38139 buf.error = 0;
38140 + buf.file = file;
38141
38142 error = vfs_readdir(file, compat_filldir, &buf);
38143 if (error >= 0)
38144 @@ -987,6 +999,7 @@ out:
38145 struct compat_getdents_callback64 {
38146 struct linux_dirent64 __user *current_dir;
38147 struct linux_dirent64 __user *previous;
38148 + struct file * file;
38149 int count;
38150 int error;
38151 };
38152 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf
38153 buf->error = -EINVAL; /* only used if we fail.. */
38154 if (reclen > buf->count)
38155 return -EINVAL;
38156 +
38157 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38158 + return 0;
38159 +
38160 dirent = buf->previous;
38161
38162 if (dirent) {
38163 @@ -1054,6 +1071,7 @@ asmlinkage long compat_sys_getdents64(un
38164 buf.previous = NULL;
38165 buf.count = count;
38166 buf.error = 0;
38167 + buf.file = file;
38168
38169 error = vfs_readdir(file, compat_filldir64, &buf);
38170 if (error >= 0)
38171 @@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(in
38172 * verify all the pointers
38173 */
38174 ret = -EINVAL;
38175 - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
38176 + if (nr_segs > UIO_MAXIOV)
38177 goto out;
38178 if (!file->f_op)
38179 goto out;
38180 @@ -1463,6 +1481,11 @@ int compat_do_execve(char * filename,
38181 compat_uptr_t __user *envp,
38182 struct pt_regs * regs)
38183 {
38184 +#ifdef CONFIG_GRKERNSEC
38185 + struct file *old_exec_file;
38186 + struct acl_subject_label *old_acl;
38187 + struct rlimit old_rlim[RLIM_NLIMITS];
38188 +#endif
38189 struct linux_binprm *bprm;
38190 struct file *file;
38191 struct files_struct *displaced;
38192 @@ -1499,6 +1522,19 @@ int compat_do_execve(char * filename,
38193 bprm->filename = filename;
38194 bprm->interp = filename;
38195
38196 + if (gr_process_user_ban()) {
38197 + retval = -EPERM;
38198 + goto out_file;
38199 + }
38200 +
38201 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
38202 + retval = -EAGAIN;
38203 + if (gr_handle_nproc())
38204 + goto out_file;
38205 + retval = -EACCES;
38206 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
38207 + goto out_file;
38208 +
38209 retval = bprm_mm_init(bprm);
38210 if (retval)
38211 goto out_file;
38212 @@ -1528,9 +1564,40 @@ int compat_do_execve(char * filename,
38213 if (retval < 0)
38214 goto out;
38215
38216 + if (!gr_tpe_allow(file)) {
38217 + retval = -EACCES;
38218 + goto out;
38219 + }
38220 +
38221 + if (gr_check_crash_exec(file)) {
38222 + retval = -EACCES;
38223 + goto out;
38224 + }
38225 +
38226 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
38227 +
38228 + gr_handle_exec_args_compat(bprm, argv);
38229 +
38230 +#ifdef CONFIG_GRKERNSEC
38231 + old_acl = current->acl;
38232 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
38233 + old_exec_file = current->exec_file;
38234 + get_file(file);
38235 + current->exec_file = file;
38236 +#endif
38237 +
38238 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
38239 + bprm->unsafe & LSM_UNSAFE_SHARE);
38240 + if (retval < 0)
38241 + goto out_fail;
38242 +
38243 retval = search_binary_handler(bprm, regs);
38244 if (retval < 0)
38245 - goto out;
38246 + goto out_fail;
38247 +#ifdef CONFIG_GRKERNSEC
38248 + if (old_exec_file)
38249 + fput(old_exec_file);
38250 +#endif
38251
38252 /* execve succeeded */
38253 current->fs->in_exec = 0;
38254 @@ -1541,6 +1608,14 @@ int compat_do_execve(char * filename,
38255 put_files_struct(displaced);
38256 return retval;
38257
38258 +out_fail:
38259 +#ifdef CONFIG_GRKERNSEC
38260 + current->acl = old_acl;
38261 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
38262 + fput(current->exec_file);
38263 + current->exec_file = old_exec_file;
38264 +#endif
38265 +
38266 out:
38267 if (bprm->mm) {
38268 acct_arg_size(bprm, 0);
38269 @@ -1711,6 +1786,8 @@ int compat_core_sys_select(int n, compat
38270 struct fdtable *fdt;
38271 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
38272
38273 + pax_track_stack();
38274 +
38275 if (n < 0)
38276 goto out_nofds;
38277
38278 diff -urNp linux-2.6.32.43/fs/compat_ioctl.c linux-2.6.32.43/fs/compat_ioctl.c
38279 --- linux-2.6.32.43/fs/compat_ioctl.c 2011-03-27 14:31:47.000000000 -0400
38280 +++ linux-2.6.32.43/fs/compat_ioctl.c 2011-04-23 12:56:11.000000000 -0400
38281 @@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsi
38282 up = (struct compat_video_spu_palette __user *) arg;
38283 err = get_user(palp, &up->palette);
38284 err |= get_user(length, &up->length);
38285 + if (err)
38286 + return -EFAULT;
38287
38288 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
38289 err = put_user(compat_ptr(palp), &up_native->palette);
38290 diff -urNp linux-2.6.32.43/fs/configfs/dir.c linux-2.6.32.43/fs/configfs/dir.c
38291 --- linux-2.6.32.43/fs/configfs/dir.c 2011-03-27 14:31:47.000000000 -0400
38292 +++ linux-2.6.32.43/fs/configfs/dir.c 2011-05-11 18:25:15.000000000 -0400
38293 @@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file
38294 }
38295 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
38296 struct configfs_dirent *next;
38297 - const char * name;
38298 + const unsigned char * name;
38299 + char d_name[sizeof(next->s_dentry->d_iname)];
38300 int len;
38301
38302 next = list_entry(p, struct configfs_dirent,
38303 @@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file
38304 continue;
38305
38306 name = configfs_get_name(next);
38307 - len = strlen(name);
38308 + if (next->s_dentry && name == next->s_dentry->d_iname) {
38309 + len = next->s_dentry->d_name.len;
38310 + memcpy(d_name, name, len);
38311 + name = d_name;
38312 + } else
38313 + len = strlen(name);
38314 if (next->s_dentry)
38315 ino = next->s_dentry->d_inode->i_ino;
38316 else
38317 diff -urNp linux-2.6.32.43/fs/dcache.c linux-2.6.32.43/fs/dcache.c
38318 --- linux-2.6.32.43/fs/dcache.c 2011-03-27 14:31:47.000000000 -0400
38319 +++ linux-2.6.32.43/fs/dcache.c 2011-04-23 13:32:21.000000000 -0400
38320 @@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
38321
38322 static struct kmem_cache *dentry_cache __read_mostly;
38323
38324 -#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
38325 -
38326 /*
38327 * This is the single most critical data structure when it comes
38328 * to the dcache: the hashtable for lookups. Somebody should try
38329 @@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned lon
38330 mempages -= reserve;
38331
38332 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
38333 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
38334 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
38335
38336 dcache_init();
38337 inode_init();
38338 diff -urNp linux-2.6.32.43/fs/dlm/lockspace.c linux-2.6.32.43/fs/dlm/lockspace.c
38339 --- linux-2.6.32.43/fs/dlm/lockspace.c 2011-03-27 14:31:47.000000000 -0400
38340 +++ linux-2.6.32.43/fs/dlm/lockspace.c 2011-04-17 15:56:46.000000000 -0400
38341 @@ -148,7 +148,7 @@ static void lockspace_kobj_release(struc
38342 kfree(ls);
38343 }
38344
38345 -static struct sysfs_ops dlm_attr_ops = {
38346 +static const struct sysfs_ops dlm_attr_ops = {
38347 .show = dlm_attr_show,
38348 .store = dlm_attr_store,
38349 };
38350 diff -urNp linux-2.6.32.43/fs/ecryptfs/inode.c linux-2.6.32.43/fs/ecryptfs/inode.c
38351 --- linux-2.6.32.43/fs/ecryptfs/inode.c 2011-03-27 14:31:47.000000000 -0400
38352 +++ linux-2.6.32.43/fs/ecryptfs/inode.c 2011-04-17 15:56:46.000000000 -0400
38353 @@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struc
38354 old_fs = get_fs();
38355 set_fs(get_ds());
38356 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
38357 - (char __user *)lower_buf,
38358 + (__force char __user *)lower_buf,
38359 lower_bufsiz);
38360 set_fs(old_fs);
38361 if (rc < 0)
38362 @@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct
38363 }
38364 old_fs = get_fs();
38365 set_fs(get_ds());
38366 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
38367 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
38368 set_fs(old_fs);
38369 if (rc < 0)
38370 goto out_free;
38371 diff -urNp linux-2.6.32.43/fs/exec.c linux-2.6.32.43/fs/exec.c
38372 --- linux-2.6.32.43/fs/exec.c 2011-06-25 12:55:34.000000000 -0400
38373 +++ linux-2.6.32.43/fs/exec.c 2011-07-06 19:53:33.000000000 -0400
38374 @@ -56,12 +56,24 @@
38375 #include <linux/fsnotify.h>
38376 #include <linux/fs_struct.h>
38377 #include <linux/pipe_fs_i.h>
38378 +#include <linux/random.h>
38379 +#include <linux/seq_file.h>
38380 +
38381 +#ifdef CONFIG_PAX_REFCOUNT
38382 +#include <linux/kallsyms.h>
38383 +#include <linux/kdebug.h>
38384 +#endif
38385
38386 #include <asm/uaccess.h>
38387 #include <asm/mmu_context.h>
38388 #include <asm/tlb.h>
38389 #include "internal.h"
38390
38391 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
38392 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
38393 +EXPORT_SYMBOL(pax_set_initial_flags_func);
38394 +#endif
38395 +
38396 int core_uses_pid;
38397 char core_pattern[CORENAME_MAX_SIZE] = "core";
38398 unsigned int core_pipe_limit;
38399 @@ -115,7 +127,7 @@ SYSCALL_DEFINE1(uselib, const char __use
38400 goto out;
38401
38402 file = do_filp_open(AT_FDCWD, tmp,
38403 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
38404 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
38405 MAY_READ | MAY_EXEC | MAY_OPEN);
38406 putname(tmp);
38407 error = PTR_ERR(file);
38408 @@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_b
38409 int write)
38410 {
38411 struct page *page;
38412 - int ret;
38413
38414 -#ifdef CONFIG_STACK_GROWSUP
38415 - if (write) {
38416 - ret = expand_stack_downwards(bprm->vma, pos);
38417 - if (ret < 0)
38418 - return NULL;
38419 - }
38420 -#endif
38421 - ret = get_user_pages(current, bprm->mm, pos,
38422 - 1, write, 1, &page, NULL);
38423 - if (ret <= 0)
38424 + if (0 > expand_stack_downwards(bprm->vma, pos))
38425 + return NULL;
38426 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
38427 return NULL;
38428
38429 if (write) {
38430 @@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_b
38431 vma->vm_end = STACK_TOP_MAX;
38432 vma->vm_start = vma->vm_end - PAGE_SIZE;
38433 vma->vm_flags = VM_STACK_FLAGS;
38434 +
38435 +#ifdef CONFIG_PAX_SEGMEXEC
38436 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
38437 +#endif
38438 +
38439 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
38440
38441 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
38442 @@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_b
38443 mm->stack_vm = mm->total_vm = 1;
38444 up_write(&mm->mmap_sem);
38445 bprm->p = vma->vm_end - sizeof(void *);
38446 +
38447 +#ifdef CONFIG_PAX_RANDUSTACK
38448 + if (randomize_va_space)
38449 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
38450 +#endif
38451 +
38452 return 0;
38453 err:
38454 up_write(&mm->mmap_sem);
38455 @@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char **
38456 int r;
38457 mm_segment_t oldfs = get_fs();
38458 set_fs(KERNEL_DS);
38459 - r = copy_strings(argc, (char __user * __user *)argv, bprm);
38460 + r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
38461 set_fs(oldfs);
38462 return r;
38463 }
38464 @@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_are
38465 unsigned long new_end = old_end - shift;
38466 struct mmu_gather *tlb;
38467
38468 - BUG_ON(new_start > new_end);
38469 + if (new_start >= new_end || new_start < mmap_min_addr)
38470 + return -ENOMEM;
38471
38472 /*
38473 * ensure there are no vmas between where we want to go
38474 @@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_are
38475 if (vma != find_vma(mm, new_start))
38476 return -EFAULT;
38477
38478 +#ifdef CONFIG_PAX_SEGMEXEC
38479 + BUG_ON(pax_find_mirror_vma(vma));
38480 +#endif
38481 +
38482 /*
38483 * cover the whole range: [new_start, old_end)
38484 */
38485 @@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm
38486 stack_top = arch_align_stack(stack_top);
38487 stack_top = PAGE_ALIGN(stack_top);
38488
38489 - if (unlikely(stack_top < mmap_min_addr) ||
38490 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
38491 - return -ENOMEM;
38492 -
38493 stack_shift = vma->vm_end - stack_top;
38494
38495 bprm->p -= stack_shift;
38496 @@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm
38497 bprm->exec -= stack_shift;
38498
38499 down_write(&mm->mmap_sem);
38500 +
38501 + /* Move stack pages down in memory. */
38502 + if (stack_shift) {
38503 + ret = shift_arg_pages(vma, stack_shift);
38504 + if (ret)
38505 + goto out_unlock;
38506 + }
38507 +
38508 vm_flags = VM_STACK_FLAGS;
38509
38510 /*
38511 @@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm
38512 vm_flags &= ~VM_EXEC;
38513 vm_flags |= mm->def_flags;
38514
38515 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
38516 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38517 + vm_flags &= ~VM_EXEC;
38518 +
38519 +#ifdef CONFIG_PAX_MPROTECT
38520 + if (mm->pax_flags & MF_PAX_MPROTECT)
38521 + vm_flags &= ~VM_MAYEXEC;
38522 +#endif
38523 +
38524 + }
38525 +#endif
38526 +
38527 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
38528 vm_flags);
38529 if (ret)
38530 goto out_unlock;
38531 BUG_ON(prev != vma);
38532
38533 - /* Move stack pages down in memory. */
38534 - if (stack_shift) {
38535 - ret = shift_arg_pages(vma, stack_shift);
38536 - if (ret)
38537 - goto out_unlock;
38538 - }
38539 -
38540 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
38541 stack_size = vma->vm_end - vma->vm_start;
38542 /*
38543 @@ -707,7 +736,7 @@ struct file *open_exec(const char *name)
38544 int err;
38545
38546 file = do_filp_open(AT_FDCWD, name,
38547 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
38548 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
38549 MAY_EXEC | MAY_OPEN);
38550 if (IS_ERR(file))
38551 goto out;
38552 @@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_
38553 old_fs = get_fs();
38554 set_fs(get_ds());
38555 /* The cast to a user pointer is valid due to the set_fs() */
38556 - result = vfs_read(file, (void __user *)addr, count, &pos);
38557 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
38558 set_fs(old_fs);
38559 return result;
38560 }
38561 @@ -1152,7 +1181,7 @@ int check_unsafe_exec(struct linux_binpr
38562 }
38563 rcu_read_unlock();
38564
38565 - if (p->fs->users > n_fs) {
38566 + if (atomic_read(&p->fs->users) > n_fs) {
38567 bprm->unsafe |= LSM_UNSAFE_SHARE;
38568 } else {
38569 res = -EAGAIN;
38570 @@ -1347,6 +1376,11 @@ int do_execve(char * filename,
38571 char __user *__user *envp,
38572 struct pt_regs * regs)
38573 {
38574 +#ifdef CONFIG_GRKERNSEC
38575 + struct file *old_exec_file;
38576 + struct acl_subject_label *old_acl;
38577 + struct rlimit old_rlim[RLIM_NLIMITS];
38578 +#endif
38579 struct linux_binprm *bprm;
38580 struct file *file;
38581 struct files_struct *displaced;
38582 @@ -1383,6 +1417,23 @@ int do_execve(char * filename,
38583 bprm->filename = filename;
38584 bprm->interp = filename;
38585
38586 + if (gr_process_user_ban()) {
38587 + retval = -EPERM;
38588 + goto out_file;
38589 + }
38590 +
38591 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
38592 +
38593 + if (gr_handle_nproc()) {
38594 + retval = -EAGAIN;
38595 + goto out_file;
38596 + }
38597 +
38598 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
38599 + retval = -EACCES;
38600 + goto out_file;
38601 + }
38602 +
38603 retval = bprm_mm_init(bprm);
38604 if (retval)
38605 goto out_file;
38606 @@ -1412,10 +1463,41 @@ int do_execve(char * filename,
38607 if (retval < 0)
38608 goto out;
38609
38610 + if (!gr_tpe_allow(file)) {
38611 + retval = -EACCES;
38612 + goto out;
38613 + }
38614 +
38615 + if (gr_check_crash_exec(file)) {
38616 + retval = -EACCES;
38617 + goto out;
38618 + }
38619 +
38620 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
38621 +
38622 + gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
38623 +
38624 +#ifdef CONFIG_GRKERNSEC
38625 + old_acl = current->acl;
38626 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
38627 + old_exec_file = current->exec_file;
38628 + get_file(file);
38629 + current->exec_file = file;
38630 +#endif
38631 +
38632 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
38633 + bprm->unsafe & LSM_UNSAFE_SHARE);
38634 + if (retval < 0)
38635 + goto out_fail;
38636 +
38637 current->flags &= ~PF_KTHREAD;
38638 retval = search_binary_handler(bprm,regs);
38639 if (retval < 0)
38640 - goto out;
38641 + goto out_fail;
38642 +#ifdef CONFIG_GRKERNSEC
38643 + if (old_exec_file)
38644 + fput(old_exec_file);
38645 +#endif
38646
38647 /* execve succeeded */
38648 current->fs->in_exec = 0;
38649 @@ -1426,6 +1508,14 @@ int do_execve(char * filename,
38650 put_files_struct(displaced);
38651 return retval;
38652
38653 +out_fail:
38654 +#ifdef CONFIG_GRKERNSEC
38655 + current->acl = old_acl;
38656 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
38657 + fput(current->exec_file);
38658 + current->exec_file = old_exec_file;
38659 +#endif
38660 +
38661 out:
38662 if (bprm->mm) {
38663 acct_arg_size(bprm, 0);
38664 @@ -1591,6 +1681,220 @@ out:
38665 return ispipe;
38666 }
38667
38668 +int pax_check_flags(unsigned long *flags)
38669 +{
38670 + int retval = 0;
38671 +
38672 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
38673 + if (*flags & MF_PAX_SEGMEXEC)
38674 + {
38675 + *flags &= ~MF_PAX_SEGMEXEC;
38676 + retval = -EINVAL;
38677 + }
38678 +#endif
38679 +
38680 + if ((*flags & MF_PAX_PAGEEXEC)
38681 +
38682 +#ifdef CONFIG_PAX_PAGEEXEC
38683 + && (*flags & MF_PAX_SEGMEXEC)
38684 +#endif
38685 +
38686 + )
38687 + {
38688 + *flags &= ~MF_PAX_PAGEEXEC;
38689 + retval = -EINVAL;
38690 + }
38691 +
38692 + if ((*flags & MF_PAX_MPROTECT)
38693 +
38694 +#ifdef CONFIG_PAX_MPROTECT
38695 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
38696 +#endif
38697 +
38698 + )
38699 + {
38700 + *flags &= ~MF_PAX_MPROTECT;
38701 + retval = -EINVAL;
38702 + }
38703 +
38704 + if ((*flags & MF_PAX_EMUTRAMP)
38705 +
38706 +#ifdef CONFIG_PAX_EMUTRAMP
38707 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
38708 +#endif
38709 +
38710 + )
38711 + {
38712 + *flags &= ~MF_PAX_EMUTRAMP;
38713 + retval = -EINVAL;
38714 + }
38715 +
38716 + return retval;
38717 +}
38718 +
38719 +EXPORT_SYMBOL(pax_check_flags);
38720 +
38721 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
38722 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
38723 +{
38724 + struct task_struct *tsk = current;
38725 + struct mm_struct *mm = current->mm;
38726 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
38727 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
38728 + char *path_exec = NULL;
38729 + char *path_fault = NULL;
38730 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
38731 +
38732 + if (buffer_exec && buffer_fault) {
38733 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
38734 +
38735 + down_read(&mm->mmap_sem);
38736 + vma = mm->mmap;
38737 + while (vma && (!vma_exec || !vma_fault)) {
38738 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
38739 + vma_exec = vma;
38740 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
38741 + vma_fault = vma;
38742 + vma = vma->vm_next;
38743 + }
38744 + if (vma_exec) {
38745 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
38746 + if (IS_ERR(path_exec))
38747 + path_exec = "<path too long>";
38748 + else {
38749 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
38750 + if (path_exec) {
38751 + *path_exec = 0;
38752 + path_exec = buffer_exec;
38753 + } else
38754 + path_exec = "<path too long>";
38755 + }
38756 + }
38757 + if (vma_fault) {
38758 + start = vma_fault->vm_start;
38759 + end = vma_fault->vm_end;
38760 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
38761 + if (vma_fault->vm_file) {
38762 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
38763 + if (IS_ERR(path_fault))
38764 + path_fault = "<path too long>";
38765 + else {
38766 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
38767 + if (path_fault) {
38768 + *path_fault = 0;
38769 + path_fault = buffer_fault;
38770 + } else
38771 + path_fault = "<path too long>";
38772 + }
38773 + } else
38774 + path_fault = "<anonymous mapping>";
38775 + }
38776 + up_read(&mm->mmap_sem);
38777 + }
38778 + if (tsk->signal->curr_ip)
38779 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
38780 + else
38781 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
38782 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
38783 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
38784 + task_uid(tsk), task_euid(tsk), pc, sp);
38785 + free_page((unsigned long)buffer_exec);
38786 + free_page((unsigned long)buffer_fault);
38787 + pax_report_insns(pc, sp);
38788 + do_coredump(SIGKILL, SIGKILL, regs);
38789 +}
38790 +#endif
38791 +
38792 +#ifdef CONFIG_PAX_REFCOUNT
38793 +void pax_report_refcount_overflow(struct pt_regs *regs)
38794 +{
38795 + if (current->signal->curr_ip)
38796 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
38797 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
38798 + else
38799 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
38800 + current->comm, task_pid_nr(current), current_uid(), current_euid());
38801 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
38802 + show_regs(regs);
38803 + force_sig_specific(SIGKILL, current);
38804 +}
38805 +#endif
38806 +
38807 +#ifdef CONFIG_PAX_USERCOPY
38808 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
38809 +int object_is_on_stack(const void *obj, unsigned long len)
38810 +{
38811 + const void * const stack = task_stack_page(current);
38812 + const void * const stackend = stack + THREAD_SIZE;
38813 +
38814 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38815 + const void *frame = NULL;
38816 + const void *oldframe;
38817 +#endif
38818 +
38819 + if (obj + len < obj)
38820 + return -1;
38821 +
38822 + if (obj + len <= stack || stackend <= obj)
38823 + return 0;
38824 +
38825 + if (obj < stack || stackend < obj + len)
38826 + return -1;
38827 +
38828 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38829 + oldframe = __builtin_frame_address(1);
38830 + if (oldframe)
38831 + frame = __builtin_frame_address(2);
38832 + /*
38833 + low ----------------------------------------------> high
38834 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
38835 + ^----------------^
38836 + allow copies only within here
38837 + */
38838 + while (stack <= frame && frame < stackend) {
38839 + /* if obj + len extends past the last frame, this
38840 + check won't pass and the next frame will be 0,
38841 + causing us to bail out and correctly report
38842 + the copy as invalid
38843 + */
38844 + if (obj + len <= frame)
38845 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
38846 + oldframe = frame;
38847 + frame = *(const void * const *)frame;
38848 + }
38849 + return -1;
38850 +#else
38851 + return 1;
38852 +#endif
38853 +}
38854 +
38855 +
38856 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
38857 +{
38858 + if (current->signal->curr_ip)
38859 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38860 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38861 + else
38862 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38863 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38864 +
38865 + dump_stack();
38866 + gr_handle_kernel_exploit();
38867 + do_group_exit(SIGKILL);
38868 +}
38869 +#endif
38870 +
38871 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
38872 +void pax_track_stack(void)
38873 +{
38874 + unsigned long sp = (unsigned long)&sp;
38875 + if (sp < current_thread_info()->lowest_stack &&
38876 + sp > (unsigned long)task_stack_page(current))
38877 + current_thread_info()->lowest_stack = sp;
38878 +}
38879 +EXPORT_SYMBOL(pax_track_stack);
38880 +#endif
38881 +
38882 static int zap_process(struct task_struct *start)
38883 {
38884 struct task_struct *t;
38885 @@ -1793,17 +2097,17 @@ static void wait_for_dump_helpers(struct
38886 pipe = file->f_path.dentry->d_inode->i_pipe;
38887
38888 pipe_lock(pipe);
38889 - pipe->readers++;
38890 - pipe->writers--;
38891 + atomic_inc(&pipe->readers);
38892 + atomic_dec(&pipe->writers);
38893
38894 - while ((pipe->readers > 1) && (!signal_pending(current))) {
38895 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
38896 wake_up_interruptible_sync(&pipe->wait);
38897 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
38898 pipe_wait(pipe);
38899 }
38900
38901 - pipe->readers--;
38902 - pipe->writers++;
38903 + atomic_dec(&pipe->readers);
38904 + atomic_inc(&pipe->writers);
38905 pipe_unlock(pipe);
38906
38907 }
38908 @@ -1826,10 +2130,13 @@ void do_coredump(long signr, int exit_co
38909 char **helper_argv = NULL;
38910 int helper_argc = 0;
38911 int dump_count = 0;
38912 - static atomic_t core_dump_count = ATOMIC_INIT(0);
38913 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
38914
38915 audit_core_dumps(signr);
38916
38917 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
38918 + gr_handle_brute_attach(current, mm->flags);
38919 +
38920 binfmt = mm->binfmt;
38921 if (!binfmt || !binfmt->core_dump)
38922 goto fail;
38923 @@ -1874,6 +2181,8 @@ void do_coredump(long signr, int exit_co
38924 */
38925 clear_thread_flag(TIF_SIGPENDING);
38926
38927 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
38928 +
38929 /*
38930 * lock_kernel() because format_corename() is controlled by sysctl, which
38931 * uses lock_kernel()
38932 @@ -1908,7 +2217,7 @@ void do_coredump(long signr, int exit_co
38933 goto fail_unlock;
38934 }
38935
38936 - dump_count = atomic_inc_return(&core_dump_count);
38937 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
38938 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
38939 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
38940 task_tgid_vnr(current), current->comm);
38941 @@ -1972,7 +2281,7 @@ close_fail:
38942 filp_close(file, NULL);
38943 fail_dropcount:
38944 if (dump_count)
38945 - atomic_dec(&core_dump_count);
38946 + atomic_dec_unchecked(&core_dump_count);
38947 fail_unlock:
38948 if (helper_argv)
38949 argv_free(helper_argv);
38950 diff -urNp linux-2.6.32.43/fs/ext2/balloc.c linux-2.6.32.43/fs/ext2/balloc.c
38951 --- linux-2.6.32.43/fs/ext2/balloc.c 2011-03-27 14:31:47.000000000 -0400
38952 +++ linux-2.6.32.43/fs/ext2/balloc.c 2011-04-17 15:56:46.000000000 -0400
38953 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
38954
38955 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38956 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38957 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38958 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38959 sbi->s_resuid != current_fsuid() &&
38960 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38961 return 0;
38962 diff -urNp linux-2.6.32.43/fs/ext3/balloc.c linux-2.6.32.43/fs/ext3/balloc.c
38963 --- linux-2.6.32.43/fs/ext3/balloc.c 2011-03-27 14:31:47.000000000 -0400
38964 +++ linux-2.6.32.43/fs/ext3/balloc.c 2011-04-17 15:56:46.000000000 -0400
38965 @@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct e
38966
38967 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38968 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38969 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38970 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38971 sbi->s_resuid != current_fsuid() &&
38972 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38973 return 0;
38974 diff -urNp linux-2.6.32.43/fs/ext4/balloc.c linux-2.6.32.43/fs/ext4/balloc.c
38975 --- linux-2.6.32.43/fs/ext4/balloc.c 2011-03-27 14:31:47.000000000 -0400
38976 +++ linux-2.6.32.43/fs/ext4/balloc.c 2011-04-17 15:56:46.000000000 -0400
38977 @@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_
38978 /* Hm, nope. Are (enough) root reserved blocks available? */
38979 if (sbi->s_resuid == current_fsuid() ||
38980 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
38981 - capable(CAP_SYS_RESOURCE)) {
38982 + capable_nolog(CAP_SYS_RESOURCE)) {
38983 if (free_blocks >= (nblocks + dirty_blocks))
38984 return 1;
38985 }
38986 diff -urNp linux-2.6.32.43/fs/ext4/ext4.h linux-2.6.32.43/fs/ext4/ext4.h
38987 --- linux-2.6.32.43/fs/ext4/ext4.h 2011-03-27 14:31:47.000000000 -0400
38988 +++ linux-2.6.32.43/fs/ext4/ext4.h 2011-04-17 15:56:46.000000000 -0400
38989 @@ -1078,19 +1078,19 @@ struct ext4_sb_info {
38990
38991 /* stats for buddy allocator */
38992 spinlock_t s_mb_pa_lock;
38993 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
38994 - atomic_t s_bal_success; /* we found long enough chunks */
38995 - atomic_t s_bal_allocated; /* in blocks */
38996 - atomic_t s_bal_ex_scanned; /* total extents scanned */
38997 - atomic_t s_bal_goals; /* goal hits */
38998 - atomic_t s_bal_breaks; /* too long searches */
38999 - atomic_t s_bal_2orders; /* 2^order hits */
39000 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
39001 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
39002 + atomic_unchecked_t s_bal_allocated; /* in blocks */
39003 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
39004 + atomic_unchecked_t s_bal_goals; /* goal hits */
39005 + atomic_unchecked_t s_bal_breaks; /* too long searches */
39006 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
39007 spinlock_t s_bal_lock;
39008 unsigned long s_mb_buddies_generated;
39009 unsigned long long s_mb_generation_time;
39010 - atomic_t s_mb_lost_chunks;
39011 - atomic_t s_mb_preallocated;
39012 - atomic_t s_mb_discarded;
39013 + atomic_unchecked_t s_mb_lost_chunks;
39014 + atomic_unchecked_t s_mb_preallocated;
39015 + atomic_unchecked_t s_mb_discarded;
39016 atomic_t s_lock_busy;
39017
39018 /* locality groups */
39019 diff -urNp linux-2.6.32.43/fs/ext4/mballoc.c linux-2.6.32.43/fs/ext4/mballoc.c
39020 --- linux-2.6.32.43/fs/ext4/mballoc.c 2011-06-25 12:55:34.000000000 -0400
39021 +++ linux-2.6.32.43/fs/ext4/mballoc.c 2011-06-25 12:56:37.000000000 -0400
39022 @@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ex
39023 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
39024
39025 if (EXT4_SB(sb)->s_mb_stats)
39026 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
39027 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
39028
39029 break;
39030 }
39031 @@ -2131,7 +2131,7 @@ repeat:
39032 ac->ac_status = AC_STATUS_CONTINUE;
39033 ac->ac_flags |= EXT4_MB_HINT_FIRST;
39034 cr = 3;
39035 - atomic_inc(&sbi->s_mb_lost_chunks);
39036 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
39037 goto repeat;
39038 }
39039 }
39040 @@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struc
39041 ext4_grpblk_t counters[16];
39042 } sg;
39043
39044 + pax_track_stack();
39045 +
39046 group--;
39047 if (group == 0)
39048 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
39049 @@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *
39050 if (sbi->s_mb_stats) {
39051 printk(KERN_INFO
39052 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
39053 - atomic_read(&sbi->s_bal_allocated),
39054 - atomic_read(&sbi->s_bal_reqs),
39055 - atomic_read(&sbi->s_bal_success));
39056 + atomic_read_unchecked(&sbi->s_bal_allocated),
39057 + atomic_read_unchecked(&sbi->s_bal_reqs),
39058 + atomic_read_unchecked(&sbi->s_bal_success));
39059 printk(KERN_INFO
39060 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
39061 "%u 2^N hits, %u breaks, %u lost\n",
39062 - atomic_read(&sbi->s_bal_ex_scanned),
39063 - atomic_read(&sbi->s_bal_goals),
39064 - atomic_read(&sbi->s_bal_2orders),
39065 - atomic_read(&sbi->s_bal_breaks),
39066 - atomic_read(&sbi->s_mb_lost_chunks));
39067 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
39068 + atomic_read_unchecked(&sbi->s_bal_goals),
39069 + atomic_read_unchecked(&sbi->s_bal_2orders),
39070 + atomic_read_unchecked(&sbi->s_bal_breaks),
39071 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
39072 printk(KERN_INFO
39073 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
39074 sbi->s_mb_buddies_generated++,
39075 sbi->s_mb_generation_time);
39076 printk(KERN_INFO
39077 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
39078 - atomic_read(&sbi->s_mb_preallocated),
39079 - atomic_read(&sbi->s_mb_discarded));
39080 + atomic_read_unchecked(&sbi->s_mb_preallocated),
39081 + atomic_read_unchecked(&sbi->s_mb_discarded));
39082 }
39083
39084 free_percpu(sbi->s_locality_groups);
39085 @@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct
39086 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
39087
39088 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
39089 - atomic_inc(&sbi->s_bal_reqs);
39090 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
39091 + atomic_inc_unchecked(&sbi->s_bal_reqs);
39092 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
39093 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
39094 - atomic_inc(&sbi->s_bal_success);
39095 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
39096 + atomic_inc_unchecked(&sbi->s_bal_success);
39097 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
39098 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
39099 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
39100 - atomic_inc(&sbi->s_bal_goals);
39101 + atomic_inc_unchecked(&sbi->s_bal_goals);
39102 if (ac->ac_found > sbi->s_mb_max_to_scan)
39103 - atomic_inc(&sbi->s_bal_breaks);
39104 + atomic_inc_unchecked(&sbi->s_bal_breaks);
39105 }
39106
39107 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
39108 @@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
39109 trace_ext4_mb_new_inode_pa(ac, pa);
39110
39111 ext4_mb_use_inode_pa(ac, pa);
39112 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39113 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39114
39115 ei = EXT4_I(ac->ac_inode);
39116 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
39117 @@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
39118 trace_ext4_mb_new_group_pa(ac, pa);
39119
39120 ext4_mb_use_group_pa(ac, pa);
39121 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39122 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39123
39124 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
39125 lg = ac->ac_lg;
39126 @@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
39127 * from the bitmap and continue.
39128 */
39129 }
39130 - atomic_add(free, &sbi->s_mb_discarded);
39131 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
39132
39133 return err;
39134 }
39135 @@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_bud
39136 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
39137 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
39138 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
39139 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
39140 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
39141
39142 if (ac) {
39143 ac->ac_sb = sb;
39144 diff -urNp linux-2.6.32.43/fs/ext4/super.c linux-2.6.32.43/fs/ext4/super.c
39145 --- linux-2.6.32.43/fs/ext4/super.c 2011-03-27 14:31:47.000000000 -0400
39146 +++ linux-2.6.32.43/fs/ext4/super.c 2011-04-17 15:56:46.000000000 -0400
39147 @@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobje
39148 }
39149
39150
39151 -static struct sysfs_ops ext4_attr_ops = {
39152 +static const struct sysfs_ops ext4_attr_ops = {
39153 .show = ext4_attr_show,
39154 .store = ext4_attr_store,
39155 };
39156 diff -urNp linux-2.6.32.43/fs/fcntl.c linux-2.6.32.43/fs/fcntl.c
39157 --- linux-2.6.32.43/fs/fcntl.c 2011-03-27 14:31:47.000000000 -0400
39158 +++ linux-2.6.32.43/fs/fcntl.c 2011-04-17 15:56:46.000000000 -0400
39159 @@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct
39160 if (err)
39161 return err;
39162
39163 + if (gr_handle_chroot_fowner(pid, type))
39164 + return -ENOENT;
39165 + if (gr_check_protected_task_fowner(pid, type))
39166 + return -EACCES;
39167 +
39168 f_modown(filp, pid, type, force);
39169 return 0;
39170 }
39171 @@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned in
39172 switch (cmd) {
39173 case F_DUPFD:
39174 case F_DUPFD_CLOEXEC:
39175 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
39176 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
39177 break;
39178 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
39179 diff -urNp linux-2.6.32.43/fs/fifo.c linux-2.6.32.43/fs/fifo.c
39180 --- linux-2.6.32.43/fs/fifo.c 2011-03-27 14:31:47.000000000 -0400
39181 +++ linux-2.6.32.43/fs/fifo.c 2011-04-17 15:56:46.000000000 -0400
39182 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode
39183 */
39184 filp->f_op = &read_pipefifo_fops;
39185 pipe->r_counter++;
39186 - if (pipe->readers++ == 0)
39187 + if (atomic_inc_return(&pipe->readers) == 1)
39188 wake_up_partner(inode);
39189
39190 - if (!pipe->writers) {
39191 + if (!atomic_read(&pipe->writers)) {
39192 if ((filp->f_flags & O_NONBLOCK)) {
39193 /* suppress POLLHUP until we have
39194 * seen a writer */
39195 @@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode
39196 * errno=ENXIO when there is no process reading the FIFO.
39197 */
39198 ret = -ENXIO;
39199 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
39200 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
39201 goto err;
39202
39203 filp->f_op = &write_pipefifo_fops;
39204 pipe->w_counter++;
39205 - if (!pipe->writers++)
39206 + if (atomic_inc_return(&pipe->writers) == 1)
39207 wake_up_partner(inode);
39208
39209 - if (!pipe->readers) {
39210 + if (!atomic_read(&pipe->readers)) {
39211 wait_for_partner(inode, &pipe->r_counter);
39212 if (signal_pending(current))
39213 goto err_wr;
39214 @@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode
39215 */
39216 filp->f_op = &rdwr_pipefifo_fops;
39217
39218 - pipe->readers++;
39219 - pipe->writers++;
39220 + atomic_inc(&pipe->readers);
39221 + atomic_inc(&pipe->writers);
39222 pipe->r_counter++;
39223 pipe->w_counter++;
39224 - if (pipe->readers == 1 || pipe->writers == 1)
39225 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
39226 wake_up_partner(inode);
39227 break;
39228
39229 @@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode
39230 return 0;
39231
39232 err_rd:
39233 - if (!--pipe->readers)
39234 + if (atomic_dec_and_test(&pipe->readers))
39235 wake_up_interruptible(&pipe->wait);
39236 ret = -ERESTARTSYS;
39237 goto err;
39238
39239 err_wr:
39240 - if (!--pipe->writers)
39241 + if (atomic_dec_and_test(&pipe->writers))
39242 wake_up_interruptible(&pipe->wait);
39243 ret = -ERESTARTSYS;
39244 goto err;
39245
39246 err:
39247 - if (!pipe->readers && !pipe->writers)
39248 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
39249 free_pipe_info(inode);
39250
39251 err_nocleanup:
39252 diff -urNp linux-2.6.32.43/fs/file.c linux-2.6.32.43/fs/file.c
39253 --- linux-2.6.32.43/fs/file.c 2011-03-27 14:31:47.000000000 -0400
39254 +++ linux-2.6.32.43/fs/file.c 2011-04-17 15:56:46.000000000 -0400
39255 @@ -14,6 +14,7 @@
39256 #include <linux/slab.h>
39257 #include <linux/vmalloc.h>
39258 #include <linux/file.h>
39259 +#include <linux/security.h>
39260 #include <linux/fdtable.h>
39261 #include <linux/bitops.h>
39262 #include <linux/interrupt.h>
39263 @@ -257,6 +258,8 @@ int expand_files(struct files_struct *fi
39264 * N.B. For clone tasks sharing a files structure, this test
39265 * will limit the total number of files that can be opened.
39266 */
39267 +
39268 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
39269 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
39270 return -EMFILE;
39271
39272 diff -urNp linux-2.6.32.43/fs/filesystems.c linux-2.6.32.43/fs/filesystems.c
39273 --- linux-2.6.32.43/fs/filesystems.c 2011-03-27 14:31:47.000000000 -0400
39274 +++ linux-2.6.32.43/fs/filesystems.c 2011-04-17 15:56:46.000000000 -0400
39275 @@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(con
39276 int len = dot ? dot - name : strlen(name);
39277
39278 fs = __get_fs_type(name, len);
39279 +
39280 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
39281 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
39282 +#else
39283 if (!fs && (request_module("%.*s", len, name) == 0))
39284 +#endif
39285 fs = __get_fs_type(name, len);
39286
39287 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
39288 diff -urNp linux-2.6.32.43/fs/fscache/cookie.c linux-2.6.32.43/fs/fscache/cookie.c
39289 --- linux-2.6.32.43/fs/fscache/cookie.c 2011-03-27 14:31:47.000000000 -0400
39290 +++ linux-2.6.32.43/fs/fscache/cookie.c 2011-05-04 17:56:28.000000000 -0400
39291 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
39292 parent ? (char *) parent->def->name : "<no-parent>",
39293 def->name, netfs_data);
39294
39295 - fscache_stat(&fscache_n_acquires);
39296 + fscache_stat_unchecked(&fscache_n_acquires);
39297
39298 /* if there's no parent cookie, then we don't create one here either */
39299 if (!parent) {
39300 - fscache_stat(&fscache_n_acquires_null);
39301 + fscache_stat_unchecked(&fscache_n_acquires_null);
39302 _leave(" [no parent]");
39303 return NULL;
39304 }
39305 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
39306 /* allocate and initialise a cookie */
39307 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
39308 if (!cookie) {
39309 - fscache_stat(&fscache_n_acquires_oom);
39310 + fscache_stat_unchecked(&fscache_n_acquires_oom);
39311 _leave(" [ENOMEM]");
39312 return NULL;
39313 }
39314 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
39315
39316 switch (cookie->def->type) {
39317 case FSCACHE_COOKIE_TYPE_INDEX:
39318 - fscache_stat(&fscache_n_cookie_index);
39319 + fscache_stat_unchecked(&fscache_n_cookie_index);
39320 break;
39321 case FSCACHE_COOKIE_TYPE_DATAFILE:
39322 - fscache_stat(&fscache_n_cookie_data);
39323 + fscache_stat_unchecked(&fscache_n_cookie_data);
39324 break;
39325 default:
39326 - fscache_stat(&fscache_n_cookie_special);
39327 + fscache_stat_unchecked(&fscache_n_cookie_special);
39328 break;
39329 }
39330
39331 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
39332 if (fscache_acquire_non_index_cookie(cookie) < 0) {
39333 atomic_dec(&parent->n_children);
39334 __fscache_cookie_put(cookie);
39335 - fscache_stat(&fscache_n_acquires_nobufs);
39336 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
39337 _leave(" = NULL");
39338 return NULL;
39339 }
39340 }
39341
39342 - fscache_stat(&fscache_n_acquires_ok);
39343 + fscache_stat_unchecked(&fscache_n_acquires_ok);
39344 _leave(" = %p", cookie);
39345 return cookie;
39346 }
39347 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
39348 cache = fscache_select_cache_for_object(cookie->parent);
39349 if (!cache) {
39350 up_read(&fscache_addremove_sem);
39351 - fscache_stat(&fscache_n_acquires_no_cache);
39352 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
39353 _leave(" = -ENOMEDIUM [no cache]");
39354 return -ENOMEDIUM;
39355 }
39356 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
39357 object = cache->ops->alloc_object(cache, cookie);
39358 fscache_stat_d(&fscache_n_cop_alloc_object);
39359 if (IS_ERR(object)) {
39360 - fscache_stat(&fscache_n_object_no_alloc);
39361 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
39362 ret = PTR_ERR(object);
39363 goto error;
39364 }
39365
39366 - fscache_stat(&fscache_n_object_alloc);
39367 + fscache_stat_unchecked(&fscache_n_object_alloc);
39368
39369 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
39370
39371 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
39372 struct fscache_object *object;
39373 struct hlist_node *_p;
39374
39375 - fscache_stat(&fscache_n_updates);
39376 + fscache_stat_unchecked(&fscache_n_updates);
39377
39378 if (!cookie) {
39379 - fscache_stat(&fscache_n_updates_null);
39380 + fscache_stat_unchecked(&fscache_n_updates_null);
39381 _leave(" [no cookie]");
39382 return;
39383 }
39384 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
39385 struct fscache_object *object;
39386 unsigned long event;
39387
39388 - fscache_stat(&fscache_n_relinquishes);
39389 + fscache_stat_unchecked(&fscache_n_relinquishes);
39390 if (retire)
39391 - fscache_stat(&fscache_n_relinquishes_retire);
39392 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
39393
39394 if (!cookie) {
39395 - fscache_stat(&fscache_n_relinquishes_null);
39396 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
39397 _leave(" [no cookie]");
39398 return;
39399 }
39400 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
39401
39402 /* wait for the cookie to finish being instantiated (or to fail) */
39403 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
39404 - fscache_stat(&fscache_n_relinquishes_waitcrt);
39405 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
39406 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
39407 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
39408 }
39409 diff -urNp linux-2.6.32.43/fs/fscache/internal.h linux-2.6.32.43/fs/fscache/internal.h
39410 --- linux-2.6.32.43/fs/fscache/internal.h 2011-03-27 14:31:47.000000000 -0400
39411 +++ linux-2.6.32.43/fs/fscache/internal.h 2011-05-04 17:56:28.000000000 -0400
39412 @@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
39413 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
39414 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
39415
39416 -extern atomic_t fscache_n_op_pend;
39417 -extern atomic_t fscache_n_op_run;
39418 -extern atomic_t fscache_n_op_enqueue;
39419 -extern atomic_t fscache_n_op_deferred_release;
39420 -extern atomic_t fscache_n_op_release;
39421 -extern atomic_t fscache_n_op_gc;
39422 -extern atomic_t fscache_n_op_cancelled;
39423 -extern atomic_t fscache_n_op_rejected;
39424 -
39425 -extern atomic_t fscache_n_attr_changed;
39426 -extern atomic_t fscache_n_attr_changed_ok;
39427 -extern atomic_t fscache_n_attr_changed_nobufs;
39428 -extern atomic_t fscache_n_attr_changed_nomem;
39429 -extern atomic_t fscache_n_attr_changed_calls;
39430 -
39431 -extern atomic_t fscache_n_allocs;
39432 -extern atomic_t fscache_n_allocs_ok;
39433 -extern atomic_t fscache_n_allocs_wait;
39434 -extern atomic_t fscache_n_allocs_nobufs;
39435 -extern atomic_t fscache_n_allocs_intr;
39436 -extern atomic_t fscache_n_allocs_object_dead;
39437 -extern atomic_t fscache_n_alloc_ops;
39438 -extern atomic_t fscache_n_alloc_op_waits;
39439 -
39440 -extern atomic_t fscache_n_retrievals;
39441 -extern atomic_t fscache_n_retrievals_ok;
39442 -extern atomic_t fscache_n_retrievals_wait;
39443 -extern atomic_t fscache_n_retrievals_nodata;
39444 -extern atomic_t fscache_n_retrievals_nobufs;
39445 -extern atomic_t fscache_n_retrievals_intr;
39446 -extern atomic_t fscache_n_retrievals_nomem;
39447 -extern atomic_t fscache_n_retrievals_object_dead;
39448 -extern atomic_t fscache_n_retrieval_ops;
39449 -extern atomic_t fscache_n_retrieval_op_waits;
39450 -
39451 -extern atomic_t fscache_n_stores;
39452 -extern atomic_t fscache_n_stores_ok;
39453 -extern atomic_t fscache_n_stores_again;
39454 -extern atomic_t fscache_n_stores_nobufs;
39455 -extern atomic_t fscache_n_stores_oom;
39456 -extern atomic_t fscache_n_store_ops;
39457 -extern atomic_t fscache_n_store_calls;
39458 -extern atomic_t fscache_n_store_pages;
39459 -extern atomic_t fscache_n_store_radix_deletes;
39460 -extern atomic_t fscache_n_store_pages_over_limit;
39461 -
39462 -extern atomic_t fscache_n_store_vmscan_not_storing;
39463 -extern atomic_t fscache_n_store_vmscan_gone;
39464 -extern atomic_t fscache_n_store_vmscan_busy;
39465 -extern atomic_t fscache_n_store_vmscan_cancelled;
39466 -
39467 -extern atomic_t fscache_n_marks;
39468 -extern atomic_t fscache_n_uncaches;
39469 -
39470 -extern atomic_t fscache_n_acquires;
39471 -extern atomic_t fscache_n_acquires_null;
39472 -extern atomic_t fscache_n_acquires_no_cache;
39473 -extern atomic_t fscache_n_acquires_ok;
39474 -extern atomic_t fscache_n_acquires_nobufs;
39475 -extern atomic_t fscache_n_acquires_oom;
39476 -
39477 -extern atomic_t fscache_n_updates;
39478 -extern atomic_t fscache_n_updates_null;
39479 -extern atomic_t fscache_n_updates_run;
39480 -
39481 -extern atomic_t fscache_n_relinquishes;
39482 -extern atomic_t fscache_n_relinquishes_null;
39483 -extern atomic_t fscache_n_relinquishes_waitcrt;
39484 -extern atomic_t fscache_n_relinquishes_retire;
39485 -
39486 -extern atomic_t fscache_n_cookie_index;
39487 -extern atomic_t fscache_n_cookie_data;
39488 -extern atomic_t fscache_n_cookie_special;
39489 -
39490 -extern atomic_t fscache_n_object_alloc;
39491 -extern atomic_t fscache_n_object_no_alloc;
39492 -extern atomic_t fscache_n_object_lookups;
39493 -extern atomic_t fscache_n_object_lookups_negative;
39494 -extern atomic_t fscache_n_object_lookups_positive;
39495 -extern atomic_t fscache_n_object_lookups_timed_out;
39496 -extern atomic_t fscache_n_object_created;
39497 -extern atomic_t fscache_n_object_avail;
39498 -extern atomic_t fscache_n_object_dead;
39499 -
39500 -extern atomic_t fscache_n_checkaux_none;
39501 -extern atomic_t fscache_n_checkaux_okay;
39502 -extern atomic_t fscache_n_checkaux_update;
39503 -extern atomic_t fscache_n_checkaux_obsolete;
39504 +extern atomic_unchecked_t fscache_n_op_pend;
39505 +extern atomic_unchecked_t fscache_n_op_run;
39506 +extern atomic_unchecked_t fscache_n_op_enqueue;
39507 +extern atomic_unchecked_t fscache_n_op_deferred_release;
39508 +extern atomic_unchecked_t fscache_n_op_release;
39509 +extern atomic_unchecked_t fscache_n_op_gc;
39510 +extern atomic_unchecked_t fscache_n_op_cancelled;
39511 +extern atomic_unchecked_t fscache_n_op_rejected;
39512 +
39513 +extern atomic_unchecked_t fscache_n_attr_changed;
39514 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
39515 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
39516 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
39517 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
39518 +
39519 +extern atomic_unchecked_t fscache_n_allocs;
39520 +extern atomic_unchecked_t fscache_n_allocs_ok;
39521 +extern atomic_unchecked_t fscache_n_allocs_wait;
39522 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
39523 +extern atomic_unchecked_t fscache_n_allocs_intr;
39524 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
39525 +extern atomic_unchecked_t fscache_n_alloc_ops;
39526 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
39527 +
39528 +extern atomic_unchecked_t fscache_n_retrievals;
39529 +extern atomic_unchecked_t fscache_n_retrievals_ok;
39530 +extern atomic_unchecked_t fscache_n_retrievals_wait;
39531 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
39532 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
39533 +extern atomic_unchecked_t fscache_n_retrievals_intr;
39534 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
39535 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
39536 +extern atomic_unchecked_t fscache_n_retrieval_ops;
39537 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
39538 +
39539 +extern atomic_unchecked_t fscache_n_stores;
39540 +extern atomic_unchecked_t fscache_n_stores_ok;
39541 +extern atomic_unchecked_t fscache_n_stores_again;
39542 +extern atomic_unchecked_t fscache_n_stores_nobufs;
39543 +extern atomic_unchecked_t fscache_n_stores_oom;
39544 +extern atomic_unchecked_t fscache_n_store_ops;
39545 +extern atomic_unchecked_t fscache_n_store_calls;
39546 +extern atomic_unchecked_t fscache_n_store_pages;
39547 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
39548 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
39549 +
39550 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
39551 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
39552 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
39553 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
39554 +
39555 +extern atomic_unchecked_t fscache_n_marks;
39556 +extern atomic_unchecked_t fscache_n_uncaches;
39557 +
39558 +extern atomic_unchecked_t fscache_n_acquires;
39559 +extern atomic_unchecked_t fscache_n_acquires_null;
39560 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
39561 +extern atomic_unchecked_t fscache_n_acquires_ok;
39562 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
39563 +extern atomic_unchecked_t fscache_n_acquires_oom;
39564 +
39565 +extern atomic_unchecked_t fscache_n_updates;
39566 +extern atomic_unchecked_t fscache_n_updates_null;
39567 +extern atomic_unchecked_t fscache_n_updates_run;
39568 +
39569 +extern atomic_unchecked_t fscache_n_relinquishes;
39570 +extern atomic_unchecked_t fscache_n_relinquishes_null;
39571 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
39572 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
39573 +
39574 +extern atomic_unchecked_t fscache_n_cookie_index;
39575 +extern atomic_unchecked_t fscache_n_cookie_data;
39576 +extern atomic_unchecked_t fscache_n_cookie_special;
39577 +
39578 +extern atomic_unchecked_t fscache_n_object_alloc;
39579 +extern atomic_unchecked_t fscache_n_object_no_alloc;
39580 +extern atomic_unchecked_t fscache_n_object_lookups;
39581 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
39582 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
39583 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
39584 +extern atomic_unchecked_t fscache_n_object_created;
39585 +extern atomic_unchecked_t fscache_n_object_avail;
39586 +extern atomic_unchecked_t fscache_n_object_dead;
39587 +
39588 +extern atomic_unchecked_t fscache_n_checkaux_none;
39589 +extern atomic_unchecked_t fscache_n_checkaux_okay;
39590 +extern atomic_unchecked_t fscache_n_checkaux_update;
39591 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
39592
39593 extern atomic_t fscache_n_cop_alloc_object;
39594 extern atomic_t fscache_n_cop_lookup_object;
39595 @@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t
39596 atomic_inc(stat);
39597 }
39598
39599 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
39600 +{
39601 + atomic_inc_unchecked(stat);
39602 +}
39603 +
39604 static inline void fscache_stat_d(atomic_t *stat)
39605 {
39606 atomic_dec(stat);
39607 @@ -259,6 +264,7 @@ extern const struct file_operations fsca
39608
39609 #define __fscache_stat(stat) (NULL)
39610 #define fscache_stat(stat) do {} while (0)
39611 +#define fscache_stat_unchecked(stat) do {} while (0)
39612 #define fscache_stat_d(stat) do {} while (0)
39613 #endif
39614
39615 diff -urNp linux-2.6.32.43/fs/fscache/object.c linux-2.6.32.43/fs/fscache/object.c
39616 --- linux-2.6.32.43/fs/fscache/object.c 2011-03-27 14:31:47.000000000 -0400
39617 +++ linux-2.6.32.43/fs/fscache/object.c 2011-05-04 17:56:28.000000000 -0400
39618 @@ -144,7 +144,7 @@ static void fscache_object_state_machine
39619 /* update the object metadata on disk */
39620 case FSCACHE_OBJECT_UPDATING:
39621 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
39622 - fscache_stat(&fscache_n_updates_run);
39623 + fscache_stat_unchecked(&fscache_n_updates_run);
39624 fscache_stat(&fscache_n_cop_update_object);
39625 object->cache->ops->update_object(object);
39626 fscache_stat_d(&fscache_n_cop_update_object);
39627 @@ -233,7 +233,7 @@ static void fscache_object_state_machine
39628 spin_lock(&object->lock);
39629 object->state = FSCACHE_OBJECT_DEAD;
39630 spin_unlock(&object->lock);
39631 - fscache_stat(&fscache_n_object_dead);
39632 + fscache_stat_unchecked(&fscache_n_object_dead);
39633 goto terminal_transit;
39634
39635 /* handle the parent cache of this object being withdrawn from
39636 @@ -248,7 +248,7 @@ static void fscache_object_state_machine
39637 spin_lock(&object->lock);
39638 object->state = FSCACHE_OBJECT_DEAD;
39639 spin_unlock(&object->lock);
39640 - fscache_stat(&fscache_n_object_dead);
39641 + fscache_stat_unchecked(&fscache_n_object_dead);
39642 goto terminal_transit;
39643
39644 /* complain about the object being woken up once it is
39645 @@ -492,7 +492,7 @@ static void fscache_lookup_object(struct
39646 parent->cookie->def->name, cookie->def->name,
39647 object->cache->tag->name);
39648
39649 - fscache_stat(&fscache_n_object_lookups);
39650 + fscache_stat_unchecked(&fscache_n_object_lookups);
39651 fscache_stat(&fscache_n_cop_lookup_object);
39652 ret = object->cache->ops->lookup_object(object);
39653 fscache_stat_d(&fscache_n_cop_lookup_object);
39654 @@ -503,7 +503,7 @@ static void fscache_lookup_object(struct
39655 if (ret == -ETIMEDOUT) {
39656 /* probably stuck behind another object, so move this one to
39657 * the back of the queue */
39658 - fscache_stat(&fscache_n_object_lookups_timed_out);
39659 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
39660 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
39661 }
39662
39663 @@ -526,7 +526,7 @@ void fscache_object_lookup_negative(stru
39664
39665 spin_lock(&object->lock);
39666 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
39667 - fscache_stat(&fscache_n_object_lookups_negative);
39668 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
39669
39670 /* transit here to allow write requests to begin stacking up
39671 * and read requests to begin returning ENODATA */
39672 @@ -572,7 +572,7 @@ void fscache_obtained_object(struct fsca
39673 * result, in which case there may be data available */
39674 spin_lock(&object->lock);
39675 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
39676 - fscache_stat(&fscache_n_object_lookups_positive);
39677 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
39678
39679 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
39680
39681 @@ -586,7 +586,7 @@ void fscache_obtained_object(struct fsca
39682 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
39683 } else {
39684 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
39685 - fscache_stat(&fscache_n_object_created);
39686 + fscache_stat_unchecked(&fscache_n_object_created);
39687
39688 object->state = FSCACHE_OBJECT_AVAILABLE;
39689 spin_unlock(&object->lock);
39690 @@ -633,7 +633,7 @@ static void fscache_object_available(str
39691 fscache_enqueue_dependents(object);
39692
39693 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
39694 - fscache_stat(&fscache_n_object_avail);
39695 + fscache_stat_unchecked(&fscache_n_object_avail);
39696
39697 _leave("");
39698 }
39699 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
39700 enum fscache_checkaux result;
39701
39702 if (!object->cookie->def->check_aux) {
39703 - fscache_stat(&fscache_n_checkaux_none);
39704 + fscache_stat_unchecked(&fscache_n_checkaux_none);
39705 return FSCACHE_CHECKAUX_OKAY;
39706 }
39707
39708 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
39709 switch (result) {
39710 /* entry okay as is */
39711 case FSCACHE_CHECKAUX_OKAY:
39712 - fscache_stat(&fscache_n_checkaux_okay);
39713 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
39714 break;
39715
39716 /* entry requires update */
39717 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
39718 - fscache_stat(&fscache_n_checkaux_update);
39719 + fscache_stat_unchecked(&fscache_n_checkaux_update);
39720 break;
39721
39722 /* entry requires deletion */
39723 case FSCACHE_CHECKAUX_OBSOLETE:
39724 - fscache_stat(&fscache_n_checkaux_obsolete);
39725 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
39726 break;
39727
39728 default:
39729 diff -urNp linux-2.6.32.43/fs/fscache/operation.c linux-2.6.32.43/fs/fscache/operation.c
39730 --- linux-2.6.32.43/fs/fscache/operation.c 2011-03-27 14:31:47.000000000 -0400
39731 +++ linux-2.6.32.43/fs/fscache/operation.c 2011-05-04 17:56:28.000000000 -0400
39732 @@ -16,7 +16,7 @@
39733 #include <linux/seq_file.h>
39734 #include "internal.h"
39735
39736 -atomic_t fscache_op_debug_id;
39737 +atomic_unchecked_t fscache_op_debug_id;
39738 EXPORT_SYMBOL(fscache_op_debug_id);
39739
39740 /**
39741 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs
39742 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
39743 ASSERTCMP(atomic_read(&op->usage), >, 0);
39744
39745 - fscache_stat(&fscache_n_op_enqueue);
39746 + fscache_stat_unchecked(&fscache_n_op_enqueue);
39747 switch (op->flags & FSCACHE_OP_TYPE) {
39748 case FSCACHE_OP_FAST:
39749 _debug("queue fast");
39750 @@ -76,7 +76,7 @@ static void fscache_run_op(struct fscach
39751 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
39752 if (op->processor)
39753 fscache_enqueue_operation(op);
39754 - fscache_stat(&fscache_n_op_run);
39755 + fscache_stat_unchecked(&fscache_n_op_run);
39756 }
39757
39758 /*
39759 @@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct f
39760 if (object->n_ops > 0) {
39761 atomic_inc(&op->usage);
39762 list_add_tail(&op->pend_link, &object->pending_ops);
39763 - fscache_stat(&fscache_n_op_pend);
39764 + fscache_stat_unchecked(&fscache_n_op_pend);
39765 } else if (!list_empty(&object->pending_ops)) {
39766 atomic_inc(&op->usage);
39767 list_add_tail(&op->pend_link, &object->pending_ops);
39768 - fscache_stat(&fscache_n_op_pend);
39769 + fscache_stat_unchecked(&fscache_n_op_pend);
39770 fscache_start_operations(object);
39771 } else {
39772 ASSERTCMP(object->n_in_progress, ==, 0);
39773 @@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct f
39774 object->n_exclusive++; /* reads and writes must wait */
39775 atomic_inc(&op->usage);
39776 list_add_tail(&op->pend_link, &object->pending_ops);
39777 - fscache_stat(&fscache_n_op_pend);
39778 + fscache_stat_unchecked(&fscache_n_op_pend);
39779 ret = 0;
39780 } else {
39781 /* not allowed to submit ops in any other state */
39782 @@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_obj
39783 if (object->n_exclusive > 0) {
39784 atomic_inc(&op->usage);
39785 list_add_tail(&op->pend_link, &object->pending_ops);
39786 - fscache_stat(&fscache_n_op_pend);
39787 + fscache_stat_unchecked(&fscache_n_op_pend);
39788 } else if (!list_empty(&object->pending_ops)) {
39789 atomic_inc(&op->usage);
39790 list_add_tail(&op->pend_link, &object->pending_ops);
39791 - fscache_stat(&fscache_n_op_pend);
39792 + fscache_stat_unchecked(&fscache_n_op_pend);
39793 fscache_start_operations(object);
39794 } else {
39795 ASSERTCMP(object->n_exclusive, ==, 0);
39796 @@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_obj
39797 object->n_ops++;
39798 atomic_inc(&op->usage);
39799 list_add_tail(&op->pend_link, &object->pending_ops);
39800 - fscache_stat(&fscache_n_op_pend);
39801 + fscache_stat_unchecked(&fscache_n_op_pend);
39802 ret = 0;
39803 } else if (object->state == FSCACHE_OBJECT_DYING ||
39804 object->state == FSCACHE_OBJECT_LC_DYING ||
39805 object->state == FSCACHE_OBJECT_WITHDRAWING) {
39806 - fscache_stat(&fscache_n_op_rejected);
39807 + fscache_stat_unchecked(&fscache_n_op_rejected);
39808 ret = -ENOBUFS;
39809 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
39810 fscache_report_unexpected_submission(object, op, ostate);
39811 @@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_ope
39812
39813 ret = -EBUSY;
39814 if (!list_empty(&op->pend_link)) {
39815 - fscache_stat(&fscache_n_op_cancelled);
39816 + fscache_stat_unchecked(&fscache_n_op_cancelled);
39817 list_del_init(&op->pend_link);
39818 object->n_ops--;
39819 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
39820 @@ -344,7 +344,7 @@ void fscache_put_operation(struct fscach
39821 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
39822 BUG();
39823
39824 - fscache_stat(&fscache_n_op_release);
39825 + fscache_stat_unchecked(&fscache_n_op_release);
39826
39827 if (op->release) {
39828 op->release(op);
39829 @@ -361,7 +361,7 @@ void fscache_put_operation(struct fscach
39830 * lock, and defer it otherwise */
39831 if (!spin_trylock(&object->lock)) {
39832 _debug("defer put");
39833 - fscache_stat(&fscache_n_op_deferred_release);
39834 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
39835
39836 cache = object->cache;
39837 spin_lock(&cache->op_gc_list_lock);
39838 @@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_st
39839
39840 _debug("GC DEFERRED REL OBJ%x OP%x",
39841 object->debug_id, op->debug_id);
39842 - fscache_stat(&fscache_n_op_gc);
39843 + fscache_stat_unchecked(&fscache_n_op_gc);
39844
39845 ASSERTCMP(atomic_read(&op->usage), ==, 0);
39846
39847 diff -urNp linux-2.6.32.43/fs/fscache/page.c linux-2.6.32.43/fs/fscache/page.c
39848 --- linux-2.6.32.43/fs/fscache/page.c 2011-03-27 14:31:47.000000000 -0400
39849 +++ linux-2.6.32.43/fs/fscache/page.c 2011-05-04 17:56:28.000000000 -0400
39850 @@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct
39851 val = radix_tree_lookup(&cookie->stores, page->index);
39852 if (!val) {
39853 rcu_read_unlock();
39854 - fscache_stat(&fscache_n_store_vmscan_not_storing);
39855 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
39856 __fscache_uncache_page(cookie, page);
39857 return true;
39858 }
39859 @@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct
39860 spin_unlock(&cookie->stores_lock);
39861
39862 if (xpage) {
39863 - fscache_stat(&fscache_n_store_vmscan_cancelled);
39864 - fscache_stat(&fscache_n_store_radix_deletes);
39865 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
39866 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39867 ASSERTCMP(xpage, ==, page);
39868 } else {
39869 - fscache_stat(&fscache_n_store_vmscan_gone);
39870 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
39871 }
39872
39873 wake_up_bit(&cookie->flags, 0);
39874 @@ -106,7 +106,7 @@ page_busy:
39875 /* we might want to wait here, but that could deadlock the allocator as
39876 * the slow-work threads writing to the cache may all end up sleeping
39877 * on memory allocation */
39878 - fscache_stat(&fscache_n_store_vmscan_busy);
39879 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
39880 return false;
39881 }
39882 EXPORT_SYMBOL(__fscache_maybe_release_page);
39883 @@ -130,7 +130,7 @@ static void fscache_end_page_write(struc
39884 FSCACHE_COOKIE_STORING_TAG);
39885 if (!radix_tree_tag_get(&cookie->stores, page->index,
39886 FSCACHE_COOKIE_PENDING_TAG)) {
39887 - fscache_stat(&fscache_n_store_radix_deletes);
39888 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39889 xpage = radix_tree_delete(&cookie->stores, page->index);
39890 }
39891 spin_unlock(&cookie->stores_lock);
39892 @@ -151,7 +151,7 @@ static void fscache_attr_changed_op(stru
39893
39894 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
39895
39896 - fscache_stat(&fscache_n_attr_changed_calls);
39897 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
39898
39899 if (fscache_object_is_active(object)) {
39900 fscache_set_op_state(op, "CallFS");
39901 @@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscach
39902
39903 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39904
39905 - fscache_stat(&fscache_n_attr_changed);
39906 + fscache_stat_unchecked(&fscache_n_attr_changed);
39907
39908 op = kzalloc(sizeof(*op), GFP_KERNEL);
39909 if (!op) {
39910 - fscache_stat(&fscache_n_attr_changed_nomem);
39911 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
39912 _leave(" = -ENOMEM");
39913 return -ENOMEM;
39914 }
39915 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
39916 if (fscache_submit_exclusive_op(object, op) < 0)
39917 goto nobufs;
39918 spin_unlock(&cookie->lock);
39919 - fscache_stat(&fscache_n_attr_changed_ok);
39920 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
39921 fscache_put_operation(op);
39922 _leave(" = 0");
39923 return 0;
39924 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
39925 nobufs:
39926 spin_unlock(&cookie->lock);
39927 kfree(op);
39928 - fscache_stat(&fscache_n_attr_changed_nobufs);
39929 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
39930 _leave(" = %d", -ENOBUFS);
39931 return -ENOBUFS;
39932 }
39933 @@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache
39934 /* allocate a retrieval operation and attempt to submit it */
39935 op = kzalloc(sizeof(*op), GFP_NOIO);
39936 if (!op) {
39937 - fscache_stat(&fscache_n_retrievals_nomem);
39938 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39939 return NULL;
39940 }
39941
39942 @@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_loo
39943 return 0;
39944 }
39945
39946 - fscache_stat(&fscache_n_retrievals_wait);
39947 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
39948
39949 jif = jiffies;
39950 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
39951 fscache_wait_bit_interruptible,
39952 TASK_INTERRUPTIBLE) != 0) {
39953 - fscache_stat(&fscache_n_retrievals_intr);
39954 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
39955 _leave(" = -ERESTARTSYS");
39956 return -ERESTARTSYS;
39957 }
39958 @@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_loo
39959 */
39960 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
39961 struct fscache_retrieval *op,
39962 - atomic_t *stat_op_waits,
39963 - atomic_t *stat_object_dead)
39964 + atomic_unchecked_t *stat_op_waits,
39965 + atomic_unchecked_t *stat_object_dead)
39966 {
39967 int ret;
39968
39969 @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_ac
39970 goto check_if_dead;
39971
39972 _debug(">>> WT");
39973 - fscache_stat(stat_op_waits);
39974 + fscache_stat_unchecked(stat_op_waits);
39975 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
39976 fscache_wait_bit_interruptible,
39977 TASK_INTERRUPTIBLE) < 0) {
39978 @@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_ac
39979
39980 check_if_dead:
39981 if (unlikely(fscache_object_is_dead(object))) {
39982 - fscache_stat(stat_object_dead);
39983 + fscache_stat_unchecked(stat_object_dead);
39984 return -ENOBUFS;
39985 }
39986 return 0;
39987 @@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct
39988
39989 _enter("%p,%p,,,", cookie, page);
39990
39991 - fscache_stat(&fscache_n_retrievals);
39992 + fscache_stat_unchecked(&fscache_n_retrievals);
39993
39994 if (hlist_empty(&cookie->backing_objects))
39995 goto nobufs;
39996 @@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct
39997 goto nobufs_unlock;
39998 spin_unlock(&cookie->lock);
39999
40000 - fscache_stat(&fscache_n_retrieval_ops);
40001 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
40002
40003 /* pin the netfs read context in case we need to do the actual netfs
40004 * read because we've encountered a cache read failure */
40005 @@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct
40006
40007 error:
40008 if (ret == -ENOMEM)
40009 - fscache_stat(&fscache_n_retrievals_nomem);
40010 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
40011 else if (ret == -ERESTARTSYS)
40012 - fscache_stat(&fscache_n_retrievals_intr);
40013 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
40014 else if (ret == -ENODATA)
40015 - fscache_stat(&fscache_n_retrievals_nodata);
40016 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
40017 else if (ret < 0)
40018 - fscache_stat(&fscache_n_retrievals_nobufs);
40019 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40020 else
40021 - fscache_stat(&fscache_n_retrievals_ok);
40022 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
40023
40024 fscache_put_retrieval(op);
40025 _leave(" = %d", ret);
40026 @@ -453,7 +453,7 @@ nobufs_unlock:
40027 spin_unlock(&cookie->lock);
40028 kfree(op);
40029 nobufs:
40030 - fscache_stat(&fscache_n_retrievals_nobufs);
40031 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40032 _leave(" = -ENOBUFS");
40033 return -ENOBUFS;
40034 }
40035 @@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct
40036
40037 _enter("%p,,%d,,,", cookie, *nr_pages);
40038
40039 - fscache_stat(&fscache_n_retrievals);
40040 + fscache_stat_unchecked(&fscache_n_retrievals);
40041
40042 if (hlist_empty(&cookie->backing_objects))
40043 goto nobufs;
40044 @@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct
40045 goto nobufs_unlock;
40046 spin_unlock(&cookie->lock);
40047
40048 - fscache_stat(&fscache_n_retrieval_ops);
40049 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
40050
40051 /* pin the netfs read context in case we need to do the actual netfs
40052 * read because we've encountered a cache read failure */
40053 @@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct
40054
40055 error:
40056 if (ret == -ENOMEM)
40057 - fscache_stat(&fscache_n_retrievals_nomem);
40058 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
40059 else if (ret == -ERESTARTSYS)
40060 - fscache_stat(&fscache_n_retrievals_intr);
40061 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
40062 else if (ret == -ENODATA)
40063 - fscache_stat(&fscache_n_retrievals_nodata);
40064 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
40065 else if (ret < 0)
40066 - fscache_stat(&fscache_n_retrievals_nobufs);
40067 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40068 else
40069 - fscache_stat(&fscache_n_retrievals_ok);
40070 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
40071
40072 fscache_put_retrieval(op);
40073 _leave(" = %d", ret);
40074 @@ -570,7 +570,7 @@ nobufs_unlock:
40075 spin_unlock(&cookie->lock);
40076 kfree(op);
40077 nobufs:
40078 - fscache_stat(&fscache_n_retrievals_nobufs);
40079 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40080 _leave(" = -ENOBUFS");
40081 return -ENOBUFS;
40082 }
40083 @@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_
40084
40085 _enter("%p,%p,,,", cookie, page);
40086
40087 - fscache_stat(&fscache_n_allocs);
40088 + fscache_stat_unchecked(&fscache_n_allocs);
40089
40090 if (hlist_empty(&cookie->backing_objects))
40091 goto nobufs;
40092 @@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_
40093 goto nobufs_unlock;
40094 spin_unlock(&cookie->lock);
40095
40096 - fscache_stat(&fscache_n_alloc_ops);
40097 + fscache_stat_unchecked(&fscache_n_alloc_ops);
40098
40099 ret = fscache_wait_for_retrieval_activation(
40100 object, op,
40101 @@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_
40102
40103 error:
40104 if (ret == -ERESTARTSYS)
40105 - fscache_stat(&fscache_n_allocs_intr);
40106 + fscache_stat_unchecked(&fscache_n_allocs_intr);
40107 else if (ret < 0)
40108 - fscache_stat(&fscache_n_allocs_nobufs);
40109 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
40110 else
40111 - fscache_stat(&fscache_n_allocs_ok);
40112 + fscache_stat_unchecked(&fscache_n_allocs_ok);
40113
40114 fscache_put_retrieval(op);
40115 _leave(" = %d", ret);
40116 @@ -651,7 +651,7 @@ nobufs_unlock:
40117 spin_unlock(&cookie->lock);
40118 kfree(op);
40119 nobufs:
40120 - fscache_stat(&fscache_n_allocs_nobufs);
40121 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
40122 _leave(" = -ENOBUFS");
40123 return -ENOBUFS;
40124 }
40125 @@ -694,7 +694,7 @@ static void fscache_write_op(struct fsca
40126
40127 spin_lock(&cookie->stores_lock);
40128
40129 - fscache_stat(&fscache_n_store_calls);
40130 + fscache_stat_unchecked(&fscache_n_store_calls);
40131
40132 /* find a page to store */
40133 page = NULL;
40134 @@ -705,7 +705,7 @@ static void fscache_write_op(struct fsca
40135 page = results[0];
40136 _debug("gang %d [%lx]", n, page->index);
40137 if (page->index > op->store_limit) {
40138 - fscache_stat(&fscache_n_store_pages_over_limit);
40139 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
40140 goto superseded;
40141 }
40142
40143 @@ -721,7 +721,7 @@ static void fscache_write_op(struct fsca
40144
40145 if (page) {
40146 fscache_set_op_state(&op->op, "Store");
40147 - fscache_stat(&fscache_n_store_pages);
40148 + fscache_stat_unchecked(&fscache_n_store_pages);
40149 fscache_stat(&fscache_n_cop_write_page);
40150 ret = object->cache->ops->write_page(op, page);
40151 fscache_stat_d(&fscache_n_cop_write_page);
40152 @@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_
40153 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
40154 ASSERT(PageFsCache(page));
40155
40156 - fscache_stat(&fscache_n_stores);
40157 + fscache_stat_unchecked(&fscache_n_stores);
40158
40159 op = kzalloc(sizeof(*op), GFP_NOIO);
40160 if (!op)
40161 @@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_
40162 spin_unlock(&cookie->stores_lock);
40163 spin_unlock(&object->lock);
40164
40165 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
40166 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
40167 op->store_limit = object->store_limit;
40168
40169 if (fscache_submit_op(object, &op->op) < 0)
40170 @@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_
40171
40172 spin_unlock(&cookie->lock);
40173 radix_tree_preload_end();
40174 - fscache_stat(&fscache_n_store_ops);
40175 - fscache_stat(&fscache_n_stores_ok);
40176 + fscache_stat_unchecked(&fscache_n_store_ops);
40177 + fscache_stat_unchecked(&fscache_n_stores_ok);
40178
40179 /* the slow work queue now carries its own ref on the object */
40180 fscache_put_operation(&op->op);
40181 @@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_
40182 return 0;
40183
40184 already_queued:
40185 - fscache_stat(&fscache_n_stores_again);
40186 + fscache_stat_unchecked(&fscache_n_stores_again);
40187 already_pending:
40188 spin_unlock(&cookie->stores_lock);
40189 spin_unlock(&object->lock);
40190 spin_unlock(&cookie->lock);
40191 radix_tree_preload_end();
40192 kfree(op);
40193 - fscache_stat(&fscache_n_stores_ok);
40194 + fscache_stat_unchecked(&fscache_n_stores_ok);
40195 _leave(" = 0");
40196 return 0;
40197
40198 @@ -886,14 +886,14 @@ nobufs:
40199 spin_unlock(&cookie->lock);
40200 radix_tree_preload_end();
40201 kfree(op);
40202 - fscache_stat(&fscache_n_stores_nobufs);
40203 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
40204 _leave(" = -ENOBUFS");
40205 return -ENOBUFS;
40206
40207 nomem_free:
40208 kfree(op);
40209 nomem:
40210 - fscache_stat(&fscache_n_stores_oom);
40211 + fscache_stat_unchecked(&fscache_n_stores_oom);
40212 _leave(" = -ENOMEM");
40213 return -ENOMEM;
40214 }
40215 @@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscac
40216 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
40217 ASSERTCMP(page, !=, NULL);
40218
40219 - fscache_stat(&fscache_n_uncaches);
40220 + fscache_stat_unchecked(&fscache_n_uncaches);
40221
40222 /* cache withdrawal may beat us to it */
40223 if (!PageFsCache(page))
40224 @@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fs
40225 unsigned long loop;
40226
40227 #ifdef CONFIG_FSCACHE_STATS
40228 - atomic_add(pagevec->nr, &fscache_n_marks);
40229 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
40230 #endif
40231
40232 for (loop = 0; loop < pagevec->nr; loop++) {
40233 diff -urNp linux-2.6.32.43/fs/fscache/stats.c linux-2.6.32.43/fs/fscache/stats.c
40234 --- linux-2.6.32.43/fs/fscache/stats.c 2011-03-27 14:31:47.000000000 -0400
40235 +++ linux-2.6.32.43/fs/fscache/stats.c 2011-05-04 17:56:28.000000000 -0400
40236 @@ -18,95 +18,95 @@
40237 /*
40238 * operation counters
40239 */
40240 -atomic_t fscache_n_op_pend;
40241 -atomic_t fscache_n_op_run;
40242 -atomic_t fscache_n_op_enqueue;
40243 -atomic_t fscache_n_op_requeue;
40244 -atomic_t fscache_n_op_deferred_release;
40245 -atomic_t fscache_n_op_release;
40246 -atomic_t fscache_n_op_gc;
40247 -atomic_t fscache_n_op_cancelled;
40248 -atomic_t fscache_n_op_rejected;
40249 -
40250 -atomic_t fscache_n_attr_changed;
40251 -atomic_t fscache_n_attr_changed_ok;
40252 -atomic_t fscache_n_attr_changed_nobufs;
40253 -atomic_t fscache_n_attr_changed_nomem;
40254 -atomic_t fscache_n_attr_changed_calls;
40255 -
40256 -atomic_t fscache_n_allocs;
40257 -atomic_t fscache_n_allocs_ok;
40258 -atomic_t fscache_n_allocs_wait;
40259 -atomic_t fscache_n_allocs_nobufs;
40260 -atomic_t fscache_n_allocs_intr;
40261 -atomic_t fscache_n_allocs_object_dead;
40262 -atomic_t fscache_n_alloc_ops;
40263 -atomic_t fscache_n_alloc_op_waits;
40264 -
40265 -atomic_t fscache_n_retrievals;
40266 -atomic_t fscache_n_retrievals_ok;
40267 -atomic_t fscache_n_retrievals_wait;
40268 -atomic_t fscache_n_retrievals_nodata;
40269 -atomic_t fscache_n_retrievals_nobufs;
40270 -atomic_t fscache_n_retrievals_intr;
40271 -atomic_t fscache_n_retrievals_nomem;
40272 -atomic_t fscache_n_retrievals_object_dead;
40273 -atomic_t fscache_n_retrieval_ops;
40274 -atomic_t fscache_n_retrieval_op_waits;
40275 -
40276 -atomic_t fscache_n_stores;
40277 -atomic_t fscache_n_stores_ok;
40278 -atomic_t fscache_n_stores_again;
40279 -atomic_t fscache_n_stores_nobufs;
40280 -atomic_t fscache_n_stores_oom;
40281 -atomic_t fscache_n_store_ops;
40282 -atomic_t fscache_n_store_calls;
40283 -atomic_t fscache_n_store_pages;
40284 -atomic_t fscache_n_store_radix_deletes;
40285 -atomic_t fscache_n_store_pages_over_limit;
40286 -
40287 -atomic_t fscache_n_store_vmscan_not_storing;
40288 -atomic_t fscache_n_store_vmscan_gone;
40289 -atomic_t fscache_n_store_vmscan_busy;
40290 -atomic_t fscache_n_store_vmscan_cancelled;
40291 -
40292 -atomic_t fscache_n_marks;
40293 -atomic_t fscache_n_uncaches;
40294 -
40295 -atomic_t fscache_n_acquires;
40296 -atomic_t fscache_n_acquires_null;
40297 -atomic_t fscache_n_acquires_no_cache;
40298 -atomic_t fscache_n_acquires_ok;
40299 -atomic_t fscache_n_acquires_nobufs;
40300 -atomic_t fscache_n_acquires_oom;
40301 -
40302 -atomic_t fscache_n_updates;
40303 -atomic_t fscache_n_updates_null;
40304 -atomic_t fscache_n_updates_run;
40305 -
40306 -atomic_t fscache_n_relinquishes;
40307 -atomic_t fscache_n_relinquishes_null;
40308 -atomic_t fscache_n_relinquishes_waitcrt;
40309 -atomic_t fscache_n_relinquishes_retire;
40310 -
40311 -atomic_t fscache_n_cookie_index;
40312 -atomic_t fscache_n_cookie_data;
40313 -atomic_t fscache_n_cookie_special;
40314 -
40315 -atomic_t fscache_n_object_alloc;
40316 -atomic_t fscache_n_object_no_alloc;
40317 -atomic_t fscache_n_object_lookups;
40318 -atomic_t fscache_n_object_lookups_negative;
40319 -atomic_t fscache_n_object_lookups_positive;
40320 -atomic_t fscache_n_object_lookups_timed_out;
40321 -atomic_t fscache_n_object_created;
40322 -atomic_t fscache_n_object_avail;
40323 -atomic_t fscache_n_object_dead;
40324 -
40325 -atomic_t fscache_n_checkaux_none;
40326 -atomic_t fscache_n_checkaux_okay;
40327 -atomic_t fscache_n_checkaux_update;
40328 -atomic_t fscache_n_checkaux_obsolete;
40329 +atomic_unchecked_t fscache_n_op_pend;
40330 +atomic_unchecked_t fscache_n_op_run;
40331 +atomic_unchecked_t fscache_n_op_enqueue;
40332 +atomic_unchecked_t fscache_n_op_requeue;
40333 +atomic_unchecked_t fscache_n_op_deferred_release;
40334 +atomic_unchecked_t fscache_n_op_release;
40335 +atomic_unchecked_t fscache_n_op_gc;
40336 +atomic_unchecked_t fscache_n_op_cancelled;
40337 +atomic_unchecked_t fscache_n_op_rejected;
40338 +
40339 +atomic_unchecked_t fscache_n_attr_changed;
40340 +atomic_unchecked_t fscache_n_attr_changed_ok;
40341 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
40342 +atomic_unchecked_t fscache_n_attr_changed_nomem;
40343 +atomic_unchecked_t fscache_n_attr_changed_calls;
40344 +
40345 +atomic_unchecked_t fscache_n_allocs;
40346 +atomic_unchecked_t fscache_n_allocs_ok;
40347 +atomic_unchecked_t fscache_n_allocs_wait;
40348 +atomic_unchecked_t fscache_n_allocs_nobufs;
40349 +atomic_unchecked_t fscache_n_allocs_intr;
40350 +atomic_unchecked_t fscache_n_allocs_object_dead;
40351 +atomic_unchecked_t fscache_n_alloc_ops;
40352 +atomic_unchecked_t fscache_n_alloc_op_waits;
40353 +
40354 +atomic_unchecked_t fscache_n_retrievals;
40355 +atomic_unchecked_t fscache_n_retrievals_ok;
40356 +atomic_unchecked_t fscache_n_retrievals_wait;
40357 +atomic_unchecked_t fscache_n_retrievals_nodata;
40358 +atomic_unchecked_t fscache_n_retrievals_nobufs;
40359 +atomic_unchecked_t fscache_n_retrievals_intr;
40360 +atomic_unchecked_t fscache_n_retrievals_nomem;
40361 +atomic_unchecked_t fscache_n_retrievals_object_dead;
40362 +atomic_unchecked_t fscache_n_retrieval_ops;
40363 +atomic_unchecked_t fscache_n_retrieval_op_waits;
40364 +
40365 +atomic_unchecked_t fscache_n_stores;
40366 +atomic_unchecked_t fscache_n_stores_ok;
40367 +atomic_unchecked_t fscache_n_stores_again;
40368 +atomic_unchecked_t fscache_n_stores_nobufs;
40369 +atomic_unchecked_t fscache_n_stores_oom;
40370 +atomic_unchecked_t fscache_n_store_ops;
40371 +atomic_unchecked_t fscache_n_store_calls;
40372 +atomic_unchecked_t fscache_n_store_pages;
40373 +atomic_unchecked_t fscache_n_store_radix_deletes;
40374 +atomic_unchecked_t fscache_n_store_pages_over_limit;
40375 +
40376 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
40377 +atomic_unchecked_t fscache_n_store_vmscan_gone;
40378 +atomic_unchecked_t fscache_n_store_vmscan_busy;
40379 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
40380 +
40381 +atomic_unchecked_t fscache_n_marks;
40382 +atomic_unchecked_t fscache_n_uncaches;
40383 +
40384 +atomic_unchecked_t fscache_n_acquires;
40385 +atomic_unchecked_t fscache_n_acquires_null;
40386 +atomic_unchecked_t fscache_n_acquires_no_cache;
40387 +atomic_unchecked_t fscache_n_acquires_ok;
40388 +atomic_unchecked_t fscache_n_acquires_nobufs;
40389 +atomic_unchecked_t fscache_n_acquires_oom;
40390 +
40391 +atomic_unchecked_t fscache_n_updates;
40392 +atomic_unchecked_t fscache_n_updates_null;
40393 +atomic_unchecked_t fscache_n_updates_run;
40394 +
40395 +atomic_unchecked_t fscache_n_relinquishes;
40396 +atomic_unchecked_t fscache_n_relinquishes_null;
40397 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
40398 +atomic_unchecked_t fscache_n_relinquishes_retire;
40399 +
40400 +atomic_unchecked_t fscache_n_cookie_index;
40401 +atomic_unchecked_t fscache_n_cookie_data;
40402 +atomic_unchecked_t fscache_n_cookie_special;
40403 +
40404 +atomic_unchecked_t fscache_n_object_alloc;
40405 +atomic_unchecked_t fscache_n_object_no_alloc;
40406 +atomic_unchecked_t fscache_n_object_lookups;
40407 +atomic_unchecked_t fscache_n_object_lookups_negative;
40408 +atomic_unchecked_t fscache_n_object_lookups_positive;
40409 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
40410 +atomic_unchecked_t fscache_n_object_created;
40411 +atomic_unchecked_t fscache_n_object_avail;
40412 +atomic_unchecked_t fscache_n_object_dead;
40413 +
40414 +atomic_unchecked_t fscache_n_checkaux_none;
40415 +atomic_unchecked_t fscache_n_checkaux_okay;
40416 +atomic_unchecked_t fscache_n_checkaux_update;
40417 +atomic_unchecked_t fscache_n_checkaux_obsolete;
40418
40419 atomic_t fscache_n_cop_alloc_object;
40420 atomic_t fscache_n_cop_lookup_object;
40421 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
40422 seq_puts(m, "FS-Cache statistics\n");
40423
40424 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
40425 - atomic_read(&fscache_n_cookie_index),
40426 - atomic_read(&fscache_n_cookie_data),
40427 - atomic_read(&fscache_n_cookie_special));
40428 + atomic_read_unchecked(&fscache_n_cookie_index),
40429 + atomic_read_unchecked(&fscache_n_cookie_data),
40430 + atomic_read_unchecked(&fscache_n_cookie_special));
40431
40432 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
40433 - atomic_read(&fscache_n_object_alloc),
40434 - atomic_read(&fscache_n_object_no_alloc),
40435 - atomic_read(&fscache_n_object_avail),
40436 - atomic_read(&fscache_n_object_dead));
40437 + atomic_read_unchecked(&fscache_n_object_alloc),
40438 + atomic_read_unchecked(&fscache_n_object_no_alloc),
40439 + atomic_read_unchecked(&fscache_n_object_avail),
40440 + atomic_read_unchecked(&fscache_n_object_dead));
40441 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
40442 - atomic_read(&fscache_n_checkaux_none),
40443 - atomic_read(&fscache_n_checkaux_okay),
40444 - atomic_read(&fscache_n_checkaux_update),
40445 - atomic_read(&fscache_n_checkaux_obsolete));
40446 + atomic_read_unchecked(&fscache_n_checkaux_none),
40447 + atomic_read_unchecked(&fscache_n_checkaux_okay),
40448 + atomic_read_unchecked(&fscache_n_checkaux_update),
40449 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
40450
40451 seq_printf(m, "Pages : mrk=%u unc=%u\n",
40452 - atomic_read(&fscache_n_marks),
40453 - atomic_read(&fscache_n_uncaches));
40454 + atomic_read_unchecked(&fscache_n_marks),
40455 + atomic_read_unchecked(&fscache_n_uncaches));
40456
40457 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
40458 " oom=%u\n",
40459 - atomic_read(&fscache_n_acquires),
40460 - atomic_read(&fscache_n_acquires_null),
40461 - atomic_read(&fscache_n_acquires_no_cache),
40462 - atomic_read(&fscache_n_acquires_ok),
40463 - atomic_read(&fscache_n_acquires_nobufs),
40464 - atomic_read(&fscache_n_acquires_oom));
40465 + atomic_read_unchecked(&fscache_n_acquires),
40466 + atomic_read_unchecked(&fscache_n_acquires_null),
40467 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
40468 + atomic_read_unchecked(&fscache_n_acquires_ok),
40469 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
40470 + atomic_read_unchecked(&fscache_n_acquires_oom));
40471
40472 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
40473 - atomic_read(&fscache_n_object_lookups),
40474 - atomic_read(&fscache_n_object_lookups_negative),
40475 - atomic_read(&fscache_n_object_lookups_positive),
40476 - atomic_read(&fscache_n_object_lookups_timed_out),
40477 - atomic_read(&fscache_n_object_created));
40478 + atomic_read_unchecked(&fscache_n_object_lookups),
40479 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
40480 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
40481 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
40482 + atomic_read_unchecked(&fscache_n_object_created));
40483
40484 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
40485 - atomic_read(&fscache_n_updates),
40486 - atomic_read(&fscache_n_updates_null),
40487 - atomic_read(&fscache_n_updates_run));
40488 + atomic_read_unchecked(&fscache_n_updates),
40489 + atomic_read_unchecked(&fscache_n_updates_null),
40490 + atomic_read_unchecked(&fscache_n_updates_run));
40491
40492 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
40493 - atomic_read(&fscache_n_relinquishes),
40494 - atomic_read(&fscache_n_relinquishes_null),
40495 - atomic_read(&fscache_n_relinquishes_waitcrt),
40496 - atomic_read(&fscache_n_relinquishes_retire));
40497 + atomic_read_unchecked(&fscache_n_relinquishes),
40498 + atomic_read_unchecked(&fscache_n_relinquishes_null),
40499 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
40500 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
40501
40502 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
40503 - atomic_read(&fscache_n_attr_changed),
40504 - atomic_read(&fscache_n_attr_changed_ok),
40505 - atomic_read(&fscache_n_attr_changed_nobufs),
40506 - atomic_read(&fscache_n_attr_changed_nomem),
40507 - atomic_read(&fscache_n_attr_changed_calls));
40508 + atomic_read_unchecked(&fscache_n_attr_changed),
40509 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
40510 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
40511 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
40512 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
40513
40514 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
40515 - atomic_read(&fscache_n_allocs),
40516 - atomic_read(&fscache_n_allocs_ok),
40517 - atomic_read(&fscache_n_allocs_wait),
40518 - atomic_read(&fscache_n_allocs_nobufs),
40519 - atomic_read(&fscache_n_allocs_intr));
40520 + atomic_read_unchecked(&fscache_n_allocs),
40521 + atomic_read_unchecked(&fscache_n_allocs_ok),
40522 + atomic_read_unchecked(&fscache_n_allocs_wait),
40523 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
40524 + atomic_read_unchecked(&fscache_n_allocs_intr));
40525 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
40526 - atomic_read(&fscache_n_alloc_ops),
40527 - atomic_read(&fscache_n_alloc_op_waits),
40528 - atomic_read(&fscache_n_allocs_object_dead));
40529 + atomic_read_unchecked(&fscache_n_alloc_ops),
40530 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
40531 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
40532
40533 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
40534 " int=%u oom=%u\n",
40535 - atomic_read(&fscache_n_retrievals),
40536 - atomic_read(&fscache_n_retrievals_ok),
40537 - atomic_read(&fscache_n_retrievals_wait),
40538 - atomic_read(&fscache_n_retrievals_nodata),
40539 - atomic_read(&fscache_n_retrievals_nobufs),
40540 - atomic_read(&fscache_n_retrievals_intr),
40541 - atomic_read(&fscache_n_retrievals_nomem));
40542 + atomic_read_unchecked(&fscache_n_retrievals),
40543 + atomic_read_unchecked(&fscache_n_retrievals_ok),
40544 + atomic_read_unchecked(&fscache_n_retrievals_wait),
40545 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
40546 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
40547 + atomic_read_unchecked(&fscache_n_retrievals_intr),
40548 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
40549 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
40550 - atomic_read(&fscache_n_retrieval_ops),
40551 - atomic_read(&fscache_n_retrieval_op_waits),
40552 - atomic_read(&fscache_n_retrievals_object_dead));
40553 + atomic_read_unchecked(&fscache_n_retrieval_ops),
40554 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
40555 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
40556
40557 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
40558 - atomic_read(&fscache_n_stores),
40559 - atomic_read(&fscache_n_stores_ok),
40560 - atomic_read(&fscache_n_stores_again),
40561 - atomic_read(&fscache_n_stores_nobufs),
40562 - atomic_read(&fscache_n_stores_oom));
40563 + atomic_read_unchecked(&fscache_n_stores),
40564 + atomic_read_unchecked(&fscache_n_stores_ok),
40565 + atomic_read_unchecked(&fscache_n_stores_again),
40566 + atomic_read_unchecked(&fscache_n_stores_nobufs),
40567 + atomic_read_unchecked(&fscache_n_stores_oom));
40568 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
40569 - atomic_read(&fscache_n_store_ops),
40570 - atomic_read(&fscache_n_store_calls),
40571 - atomic_read(&fscache_n_store_pages),
40572 - atomic_read(&fscache_n_store_radix_deletes),
40573 - atomic_read(&fscache_n_store_pages_over_limit));
40574 + atomic_read_unchecked(&fscache_n_store_ops),
40575 + atomic_read_unchecked(&fscache_n_store_calls),
40576 + atomic_read_unchecked(&fscache_n_store_pages),
40577 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
40578 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
40579
40580 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
40581 - atomic_read(&fscache_n_store_vmscan_not_storing),
40582 - atomic_read(&fscache_n_store_vmscan_gone),
40583 - atomic_read(&fscache_n_store_vmscan_busy),
40584 - atomic_read(&fscache_n_store_vmscan_cancelled));
40585 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
40586 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
40587 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
40588 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
40589
40590 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
40591 - atomic_read(&fscache_n_op_pend),
40592 - atomic_read(&fscache_n_op_run),
40593 - atomic_read(&fscache_n_op_enqueue),
40594 - atomic_read(&fscache_n_op_cancelled),
40595 - atomic_read(&fscache_n_op_rejected));
40596 + atomic_read_unchecked(&fscache_n_op_pend),
40597 + atomic_read_unchecked(&fscache_n_op_run),
40598 + atomic_read_unchecked(&fscache_n_op_enqueue),
40599 + atomic_read_unchecked(&fscache_n_op_cancelled),
40600 + atomic_read_unchecked(&fscache_n_op_rejected));
40601 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
40602 - atomic_read(&fscache_n_op_deferred_release),
40603 - atomic_read(&fscache_n_op_release),
40604 - atomic_read(&fscache_n_op_gc));
40605 + atomic_read_unchecked(&fscache_n_op_deferred_release),
40606 + atomic_read_unchecked(&fscache_n_op_release),
40607 + atomic_read_unchecked(&fscache_n_op_gc));
40608
40609 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
40610 atomic_read(&fscache_n_cop_alloc_object),
40611 diff -urNp linux-2.6.32.43/fs/fs_struct.c linux-2.6.32.43/fs/fs_struct.c
40612 --- linux-2.6.32.43/fs/fs_struct.c 2011-03-27 14:31:47.000000000 -0400
40613 +++ linux-2.6.32.43/fs/fs_struct.c 2011-04-17 15:56:46.000000000 -0400
40614 @@ -4,6 +4,7 @@
40615 #include <linux/path.h>
40616 #include <linux/slab.h>
40617 #include <linux/fs_struct.h>
40618 +#include <linux/grsecurity.h>
40619
40620 /*
40621 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
40622 @@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
40623 old_root = fs->root;
40624 fs->root = *path;
40625 path_get(path);
40626 + gr_set_chroot_entries(current, path);
40627 write_unlock(&fs->lock);
40628 if (old_root.dentry)
40629 path_put(&old_root);
40630 @@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
40631 && fs->root.mnt == old_root->mnt) {
40632 path_get(new_root);
40633 fs->root = *new_root;
40634 + gr_set_chroot_entries(p, new_root);
40635 count++;
40636 }
40637 if (fs->pwd.dentry == old_root->dentry
40638 @@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
40639 task_lock(tsk);
40640 write_lock(&fs->lock);
40641 tsk->fs = NULL;
40642 - kill = !--fs->users;
40643 + gr_clear_chroot_entries(tsk);
40644 + kill = !atomic_dec_return(&fs->users);
40645 write_unlock(&fs->lock);
40646 task_unlock(tsk);
40647 if (kill)
40648 @@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
40649 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
40650 /* We don't need to lock fs - think why ;-) */
40651 if (fs) {
40652 - fs->users = 1;
40653 + atomic_set(&fs->users, 1);
40654 fs->in_exec = 0;
40655 rwlock_init(&fs->lock);
40656 fs->umask = old->umask;
40657 @@ -127,8 +131,9 @@ int unshare_fs_struct(void)
40658
40659 task_lock(current);
40660 write_lock(&fs->lock);
40661 - kill = !--fs->users;
40662 + kill = !atomic_dec_return(&fs->users);
40663 current->fs = new_fs;
40664 + gr_set_chroot_entries(current, &new_fs->root);
40665 write_unlock(&fs->lock);
40666 task_unlock(current);
40667
40668 @@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
40669
40670 /* to be mentioned only in INIT_TASK */
40671 struct fs_struct init_fs = {
40672 - .users = 1,
40673 + .users = ATOMIC_INIT(1),
40674 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
40675 .umask = 0022,
40676 };
40677 @@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
40678 task_lock(current);
40679
40680 write_lock(&init_fs.lock);
40681 - init_fs.users++;
40682 + atomic_inc(&init_fs.users);
40683 write_unlock(&init_fs.lock);
40684
40685 write_lock(&fs->lock);
40686 current->fs = &init_fs;
40687 - kill = !--fs->users;
40688 + gr_set_chroot_entries(current, &current->fs->root);
40689 + kill = !atomic_dec_return(&fs->users);
40690 write_unlock(&fs->lock);
40691
40692 task_unlock(current);
40693 diff -urNp linux-2.6.32.43/fs/fuse/cuse.c linux-2.6.32.43/fs/fuse/cuse.c
40694 --- linux-2.6.32.43/fs/fuse/cuse.c 2011-03-27 14:31:47.000000000 -0400
40695 +++ linux-2.6.32.43/fs/fuse/cuse.c 2011-04-17 15:56:46.000000000 -0400
40696 @@ -528,8 +528,18 @@ static int cuse_channel_release(struct i
40697 return rc;
40698 }
40699
40700 -static struct file_operations cuse_channel_fops; /* initialized during init */
40701 -
40702 +static const struct file_operations cuse_channel_fops = { /* initialized during init */
40703 + .owner = THIS_MODULE,
40704 + .llseek = no_llseek,
40705 + .read = do_sync_read,
40706 + .aio_read = fuse_dev_read,
40707 + .write = do_sync_write,
40708 + .aio_write = fuse_dev_write,
40709 + .poll = fuse_dev_poll,
40710 + .open = cuse_channel_open,
40711 + .release = cuse_channel_release,
40712 + .fasync = fuse_dev_fasync,
40713 +};
40714
40715 /**************************************************************************
40716 * Misc stuff and module initializatiion
40717 @@ -575,12 +585,6 @@ static int __init cuse_init(void)
40718 for (i = 0; i < CUSE_CONNTBL_LEN; i++)
40719 INIT_LIST_HEAD(&cuse_conntbl[i]);
40720
40721 - /* inherit and extend fuse_dev_operations */
40722 - cuse_channel_fops = fuse_dev_operations;
40723 - cuse_channel_fops.owner = THIS_MODULE;
40724 - cuse_channel_fops.open = cuse_channel_open;
40725 - cuse_channel_fops.release = cuse_channel_release;
40726 -
40727 cuse_class = class_create(THIS_MODULE, "cuse");
40728 if (IS_ERR(cuse_class))
40729 return PTR_ERR(cuse_class);
40730 diff -urNp linux-2.6.32.43/fs/fuse/dev.c linux-2.6.32.43/fs/fuse/dev.c
40731 --- linux-2.6.32.43/fs/fuse/dev.c 2011-03-27 14:31:47.000000000 -0400
40732 +++ linux-2.6.32.43/fs/fuse/dev.c 2011-04-17 15:56:46.000000000 -0400
40733 @@ -745,7 +745,7 @@ __releases(&fc->lock)
40734 * request_end(). Otherwise add it to the processing list, and set
40735 * the 'sent' flag.
40736 */
40737 -static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
40738 +ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
40739 unsigned long nr_segs, loff_t pos)
40740 {
40741 int err;
40742 @@ -827,6 +827,7 @@ static ssize_t fuse_dev_read(struct kioc
40743 spin_unlock(&fc->lock);
40744 return err;
40745 }
40746 +EXPORT_SYMBOL_GPL(fuse_dev_read);
40747
40748 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
40749 struct fuse_copy_state *cs)
40750 @@ -885,7 +886,7 @@ static int fuse_notify_inval_entry(struc
40751 {
40752 struct fuse_notify_inval_entry_out outarg;
40753 int err = -EINVAL;
40754 - char buf[FUSE_NAME_MAX+1];
40755 + char *buf = NULL;
40756 struct qstr name;
40757
40758 if (size < sizeof(outarg))
40759 @@ -899,6 +900,11 @@ static int fuse_notify_inval_entry(struc
40760 if (outarg.namelen > FUSE_NAME_MAX)
40761 goto err;
40762
40763 + err = -ENOMEM;
40764 + buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
40765 + if (!buf)
40766 + goto err;
40767 +
40768 name.name = buf;
40769 name.len = outarg.namelen;
40770 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
40771 @@ -910,17 +916,15 @@ static int fuse_notify_inval_entry(struc
40772
40773 down_read(&fc->killsb);
40774 err = -ENOENT;
40775 - if (!fc->sb)
40776 - goto err_unlock;
40777 -
40778 - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
40779 -
40780 -err_unlock:
40781 + if (fc->sb)
40782 + err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
40783 up_read(&fc->killsb);
40784 + kfree(buf);
40785 return err;
40786
40787 err:
40788 fuse_copy_finish(cs);
40789 + kfree(buf);
40790 return err;
40791 }
40792
40793 @@ -987,7 +991,7 @@ static int copy_out_args(struct fuse_cop
40794 * it from the list and copy the rest of the buffer to the request.
40795 * The request is finished by calling request_end()
40796 */
40797 -static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40798 +ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40799 unsigned long nr_segs, loff_t pos)
40800 {
40801 int err;
40802 @@ -1083,8 +1087,9 @@ static ssize_t fuse_dev_write(struct kio
40803 fuse_copy_finish(&cs);
40804 return err;
40805 }
40806 +EXPORT_SYMBOL_GPL(fuse_dev_write);
40807
40808 -static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
40809 +unsigned fuse_dev_poll(struct file *file, poll_table *wait)
40810 {
40811 unsigned mask = POLLOUT | POLLWRNORM;
40812 struct fuse_conn *fc = fuse_get_conn(file);
40813 @@ -1102,6 +1107,7 @@ static unsigned fuse_dev_poll(struct fil
40814
40815 return mask;
40816 }
40817 +EXPORT_SYMBOL_GPL(fuse_dev_poll);
40818
40819 /*
40820 * Abort all requests on the given list (pending or processing)
40821 @@ -1218,7 +1224,7 @@ int fuse_dev_release(struct inode *inode
40822 }
40823 EXPORT_SYMBOL_GPL(fuse_dev_release);
40824
40825 -static int fuse_dev_fasync(int fd, struct file *file, int on)
40826 +int fuse_dev_fasync(int fd, struct file *file, int on)
40827 {
40828 struct fuse_conn *fc = fuse_get_conn(file);
40829 if (!fc)
40830 @@ -1227,6 +1233,7 @@ static int fuse_dev_fasync(int fd, struc
40831 /* No locking - fasync_helper does its own locking */
40832 return fasync_helper(fd, file, on, &fc->fasync);
40833 }
40834 +EXPORT_SYMBOL_GPL(fuse_dev_fasync);
40835
40836 const struct file_operations fuse_dev_operations = {
40837 .owner = THIS_MODULE,
40838 diff -urNp linux-2.6.32.43/fs/fuse/dir.c linux-2.6.32.43/fs/fuse/dir.c
40839 --- linux-2.6.32.43/fs/fuse/dir.c 2011-03-27 14:31:47.000000000 -0400
40840 +++ linux-2.6.32.43/fs/fuse/dir.c 2011-04-17 15:56:46.000000000 -0400
40841 @@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
40842 return link;
40843 }
40844
40845 -static void free_link(char *link)
40846 +static void free_link(const char *link)
40847 {
40848 if (!IS_ERR(link))
40849 free_page((unsigned long) link);
40850 diff -urNp linux-2.6.32.43/fs/fuse/fuse_i.h linux-2.6.32.43/fs/fuse/fuse_i.h
40851 --- linux-2.6.32.43/fs/fuse/fuse_i.h 2011-03-27 14:31:47.000000000 -0400
40852 +++ linux-2.6.32.43/fs/fuse/fuse_i.h 2011-04-17 15:56:46.000000000 -0400
40853 @@ -525,6 +525,16 @@ extern const struct file_operations fuse
40854
40855 extern const struct dentry_operations fuse_dentry_operations;
40856
40857 +extern ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
40858 + unsigned long nr_segs, loff_t pos);
40859 +
40860 +extern ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40861 + unsigned long nr_segs, loff_t pos);
40862 +
40863 +extern unsigned fuse_dev_poll(struct file *file, poll_table *wait);
40864 +
40865 +extern int fuse_dev_fasync(int fd, struct file *file, int on);
40866 +
40867 /**
40868 * Inode to nodeid comparison.
40869 */
40870 diff -urNp linux-2.6.32.43/fs/gfs2/ops_inode.c linux-2.6.32.43/fs/gfs2/ops_inode.c
40871 --- linux-2.6.32.43/fs/gfs2/ops_inode.c 2011-03-27 14:31:47.000000000 -0400
40872 +++ linux-2.6.32.43/fs/gfs2/ops_inode.c 2011-05-16 21:46:57.000000000 -0400
40873 @@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odi
40874 unsigned int x;
40875 int error;
40876
40877 + pax_track_stack();
40878 +
40879 if (ndentry->d_inode) {
40880 nip = GFS2_I(ndentry->d_inode);
40881 if (ip == nip)
40882 diff -urNp linux-2.6.32.43/fs/gfs2/sys.c linux-2.6.32.43/fs/gfs2/sys.c
40883 --- linux-2.6.32.43/fs/gfs2/sys.c 2011-03-27 14:31:47.000000000 -0400
40884 +++ linux-2.6.32.43/fs/gfs2/sys.c 2011-04-17 15:56:46.000000000 -0400
40885 @@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct ko
40886 return a->store ? a->store(sdp, buf, len) : len;
40887 }
40888
40889 -static struct sysfs_ops gfs2_attr_ops = {
40890 +static const struct sysfs_ops gfs2_attr_ops = {
40891 .show = gfs2_attr_show,
40892 .store = gfs2_attr_store,
40893 };
40894 @@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset
40895 return 0;
40896 }
40897
40898 -static struct kset_uevent_ops gfs2_uevent_ops = {
40899 +static const struct kset_uevent_ops gfs2_uevent_ops = {
40900 .uevent = gfs2_uevent,
40901 };
40902
40903 diff -urNp linux-2.6.32.43/fs/hfsplus/catalog.c linux-2.6.32.43/fs/hfsplus/catalog.c
40904 --- linux-2.6.32.43/fs/hfsplus/catalog.c 2011-03-27 14:31:47.000000000 -0400
40905 +++ linux-2.6.32.43/fs/hfsplus/catalog.c 2011-05-16 21:46:57.000000000 -0400
40906 @@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block
40907 int err;
40908 u16 type;
40909
40910 + pax_track_stack();
40911 +
40912 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
40913 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
40914 if (err)
40915 @@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct
40916 int entry_size;
40917 int err;
40918
40919 + pax_track_stack();
40920 +
40921 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
40922 sb = dir->i_sb;
40923 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
40924 @@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
40925 int entry_size, type;
40926 int err = 0;
40927
40928 + pax_track_stack();
40929 +
40930 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
40931 dst_dir->i_ino, dst_name->name);
40932 sb = src_dir->i_sb;
40933 diff -urNp linux-2.6.32.43/fs/hfsplus/dir.c linux-2.6.32.43/fs/hfsplus/dir.c
40934 --- linux-2.6.32.43/fs/hfsplus/dir.c 2011-03-27 14:31:47.000000000 -0400
40935 +++ linux-2.6.32.43/fs/hfsplus/dir.c 2011-05-16 21:46:57.000000000 -0400
40936 @@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *
40937 struct hfsplus_readdir_data *rd;
40938 u16 type;
40939
40940 + pax_track_stack();
40941 +
40942 if (filp->f_pos >= inode->i_size)
40943 return 0;
40944
40945 diff -urNp linux-2.6.32.43/fs/hfsplus/inode.c linux-2.6.32.43/fs/hfsplus/inode.c
40946 --- linux-2.6.32.43/fs/hfsplus/inode.c 2011-03-27 14:31:47.000000000 -0400
40947 +++ linux-2.6.32.43/fs/hfsplus/inode.c 2011-05-16 21:46:57.000000000 -0400
40948 @@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode
40949 int res = 0;
40950 u16 type;
40951
40952 + pax_track_stack();
40953 +
40954 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
40955
40956 HFSPLUS_I(inode).dev = 0;
40957 @@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode
40958 struct hfs_find_data fd;
40959 hfsplus_cat_entry entry;
40960
40961 + pax_track_stack();
40962 +
40963 if (HFSPLUS_IS_RSRC(inode))
40964 main_inode = HFSPLUS_I(inode).rsrc_inode;
40965
40966 diff -urNp linux-2.6.32.43/fs/hfsplus/ioctl.c linux-2.6.32.43/fs/hfsplus/ioctl.c
40967 --- linux-2.6.32.43/fs/hfsplus/ioctl.c 2011-03-27 14:31:47.000000000 -0400
40968 +++ linux-2.6.32.43/fs/hfsplus/ioctl.c 2011-05-16 21:46:57.000000000 -0400
40969 @@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dent
40970 struct hfsplus_cat_file *file;
40971 int res;
40972
40973 + pax_track_stack();
40974 +
40975 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40976 return -EOPNOTSUPP;
40977
40978 @@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *
40979 struct hfsplus_cat_file *file;
40980 ssize_t res = 0;
40981
40982 + pax_track_stack();
40983 +
40984 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40985 return -EOPNOTSUPP;
40986
40987 diff -urNp linux-2.6.32.43/fs/hfsplus/super.c linux-2.6.32.43/fs/hfsplus/super.c
40988 --- linux-2.6.32.43/fs/hfsplus/super.c 2011-03-27 14:31:47.000000000 -0400
40989 +++ linux-2.6.32.43/fs/hfsplus/super.c 2011-05-16 21:46:57.000000000 -0400
40990 @@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct sup
40991 struct nls_table *nls = NULL;
40992 int err = -EINVAL;
40993
40994 + pax_track_stack();
40995 +
40996 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
40997 if (!sbi)
40998 return -ENOMEM;
40999 diff -urNp linux-2.6.32.43/fs/hugetlbfs/inode.c linux-2.6.32.43/fs/hugetlbfs/inode.c
41000 --- linux-2.6.32.43/fs/hugetlbfs/inode.c 2011-03-27 14:31:47.000000000 -0400
41001 +++ linux-2.6.32.43/fs/hugetlbfs/inode.c 2011-04-17 15:56:46.000000000 -0400
41002 @@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs
41003 .kill_sb = kill_litter_super,
41004 };
41005
41006 -static struct vfsmount *hugetlbfs_vfsmount;
41007 +struct vfsmount *hugetlbfs_vfsmount;
41008
41009 static int can_do_hugetlb_shm(void)
41010 {
41011 diff -urNp linux-2.6.32.43/fs/ioctl.c linux-2.6.32.43/fs/ioctl.c
41012 --- linux-2.6.32.43/fs/ioctl.c 2011-03-27 14:31:47.000000000 -0400
41013 +++ linux-2.6.32.43/fs/ioctl.c 2011-04-17 15:56:46.000000000 -0400
41014 @@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
41015 u64 phys, u64 len, u32 flags)
41016 {
41017 struct fiemap_extent extent;
41018 - struct fiemap_extent *dest = fieinfo->fi_extents_start;
41019 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
41020
41021 /* only count the extents */
41022 if (fieinfo->fi_extents_max == 0) {
41023 @@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
41024
41025 fieinfo.fi_flags = fiemap.fm_flags;
41026 fieinfo.fi_extents_max = fiemap.fm_extent_count;
41027 - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
41028 + fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
41029
41030 if (fiemap.fm_extent_count != 0 &&
41031 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
41032 @@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
41033 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
41034 fiemap.fm_flags = fieinfo.fi_flags;
41035 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
41036 - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
41037 + if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
41038 error = -EFAULT;
41039
41040 return error;
41041 diff -urNp linux-2.6.32.43/fs/jbd/checkpoint.c linux-2.6.32.43/fs/jbd/checkpoint.c
41042 --- linux-2.6.32.43/fs/jbd/checkpoint.c 2011-03-27 14:31:47.000000000 -0400
41043 +++ linux-2.6.32.43/fs/jbd/checkpoint.c 2011-05-16 21:46:57.000000000 -0400
41044 @@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal
41045 tid_t this_tid;
41046 int result;
41047
41048 + pax_track_stack();
41049 +
41050 jbd_debug(1, "Start checkpoint\n");
41051
41052 /*
41053 diff -urNp linux-2.6.32.43/fs/jffs2/compr_rtime.c linux-2.6.32.43/fs/jffs2/compr_rtime.c
41054 --- linux-2.6.32.43/fs/jffs2/compr_rtime.c 2011-03-27 14:31:47.000000000 -0400
41055 +++ linux-2.6.32.43/fs/jffs2/compr_rtime.c 2011-05-16 21:46:57.000000000 -0400
41056 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
41057 int outpos = 0;
41058 int pos=0;
41059
41060 + pax_track_stack();
41061 +
41062 memset(positions,0,sizeof(positions));
41063
41064 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
41065 @@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsign
41066 int outpos = 0;
41067 int pos=0;
41068
41069 + pax_track_stack();
41070 +
41071 memset(positions,0,sizeof(positions));
41072
41073 while (outpos<destlen) {
41074 diff -urNp linux-2.6.32.43/fs/jffs2/compr_rubin.c linux-2.6.32.43/fs/jffs2/compr_rubin.c
41075 --- linux-2.6.32.43/fs/jffs2/compr_rubin.c 2011-03-27 14:31:47.000000000 -0400
41076 +++ linux-2.6.32.43/fs/jffs2/compr_rubin.c 2011-05-16 21:46:57.000000000 -0400
41077 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
41078 int ret;
41079 uint32_t mysrclen, mydstlen;
41080
41081 + pax_track_stack();
41082 +
41083 mysrclen = *sourcelen;
41084 mydstlen = *dstlen - 8;
41085
41086 diff -urNp linux-2.6.32.43/fs/jffs2/erase.c linux-2.6.32.43/fs/jffs2/erase.c
41087 --- linux-2.6.32.43/fs/jffs2/erase.c 2011-03-27 14:31:47.000000000 -0400
41088 +++ linux-2.6.32.43/fs/jffs2/erase.c 2011-04-17 15:56:46.000000000 -0400
41089 @@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
41090 struct jffs2_unknown_node marker = {
41091 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
41092 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
41093 - .totlen = cpu_to_je32(c->cleanmarker_size)
41094 + .totlen = cpu_to_je32(c->cleanmarker_size),
41095 + .hdr_crc = cpu_to_je32(0)
41096 };
41097
41098 jffs2_prealloc_raw_node_refs(c, jeb, 1);
41099 diff -urNp linux-2.6.32.43/fs/jffs2/wbuf.c linux-2.6.32.43/fs/jffs2/wbuf.c
41100 --- linux-2.6.32.43/fs/jffs2/wbuf.c 2011-03-27 14:31:47.000000000 -0400
41101 +++ linux-2.6.32.43/fs/jffs2/wbuf.c 2011-04-17 15:56:46.000000000 -0400
41102 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
41103 {
41104 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
41105 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
41106 - .totlen = constant_cpu_to_je32(8)
41107 + .totlen = constant_cpu_to_je32(8),
41108 + .hdr_crc = constant_cpu_to_je32(0)
41109 };
41110
41111 /*
41112 diff -urNp linux-2.6.32.43/fs/jffs2/xattr.c linux-2.6.32.43/fs/jffs2/xattr.c
41113 --- linux-2.6.32.43/fs/jffs2/xattr.c 2011-03-27 14:31:47.000000000 -0400
41114 +++ linux-2.6.32.43/fs/jffs2/xattr.c 2011-05-16 21:46:57.000000000 -0400
41115 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
41116
41117 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
41118
41119 + pax_track_stack();
41120 +
41121 /* Phase.1 : Merge same xref */
41122 for (i=0; i < XREF_TMPHASH_SIZE; i++)
41123 xref_tmphash[i] = NULL;
41124 diff -urNp linux-2.6.32.43/fs/jfs/super.c linux-2.6.32.43/fs/jfs/super.c
41125 --- linux-2.6.32.43/fs/jfs/super.c 2011-03-27 14:31:47.000000000 -0400
41126 +++ linux-2.6.32.43/fs/jfs/super.c 2011-06-07 18:06:04.000000000 -0400
41127 @@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
41128
41129 jfs_inode_cachep =
41130 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
41131 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
41132 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
41133 init_once);
41134 if (jfs_inode_cachep == NULL)
41135 return -ENOMEM;
41136 diff -urNp linux-2.6.32.43/fs/Kconfig.binfmt linux-2.6.32.43/fs/Kconfig.binfmt
41137 --- linux-2.6.32.43/fs/Kconfig.binfmt 2011-03-27 14:31:47.000000000 -0400
41138 +++ linux-2.6.32.43/fs/Kconfig.binfmt 2011-04-17 15:56:46.000000000 -0400
41139 @@ -86,7 +86,7 @@ config HAVE_AOUT
41140
41141 config BINFMT_AOUT
41142 tristate "Kernel support for a.out and ECOFF binaries"
41143 - depends on HAVE_AOUT
41144 + depends on HAVE_AOUT && BROKEN
41145 ---help---
41146 A.out (Assembler.OUTput) is a set of formats for libraries and
41147 executables used in the earliest versions of UNIX. Linux used
41148 diff -urNp linux-2.6.32.43/fs/libfs.c linux-2.6.32.43/fs/libfs.c
41149 --- linux-2.6.32.43/fs/libfs.c 2011-03-27 14:31:47.000000000 -0400
41150 +++ linux-2.6.32.43/fs/libfs.c 2011-05-11 18:25:15.000000000 -0400
41151 @@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, v
41152
41153 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
41154 struct dentry *next;
41155 + char d_name[sizeof(next->d_iname)];
41156 + const unsigned char *name;
41157 +
41158 next = list_entry(p, struct dentry, d_u.d_child);
41159 if (d_unhashed(next) || !next->d_inode)
41160 continue;
41161
41162 spin_unlock(&dcache_lock);
41163 - if (filldir(dirent, next->d_name.name,
41164 + name = next->d_name.name;
41165 + if (name == next->d_iname) {
41166 + memcpy(d_name, name, next->d_name.len);
41167 + name = d_name;
41168 + }
41169 + if (filldir(dirent, name,
41170 next->d_name.len, filp->f_pos,
41171 next->d_inode->i_ino,
41172 dt_type(next->d_inode)) < 0)
41173 diff -urNp linux-2.6.32.43/fs/lockd/clntproc.c linux-2.6.32.43/fs/lockd/clntproc.c
41174 --- linux-2.6.32.43/fs/lockd/clntproc.c 2011-03-27 14:31:47.000000000 -0400
41175 +++ linux-2.6.32.43/fs/lockd/clntproc.c 2011-05-16 21:46:57.000000000 -0400
41176 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
41177 /*
41178 * Cookie counter for NLM requests
41179 */
41180 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
41181 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
41182
41183 void nlmclnt_next_cookie(struct nlm_cookie *c)
41184 {
41185 - u32 cookie = atomic_inc_return(&nlm_cookie);
41186 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
41187
41188 memcpy(c->data, &cookie, 4);
41189 c->len=4;
41190 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
41191 struct nlm_rqst reqst, *req;
41192 int status;
41193
41194 + pax_track_stack();
41195 +
41196 req = &reqst;
41197 memset(req, 0, sizeof(*req));
41198 locks_init_lock(&req->a_args.lock.fl);
41199 diff -urNp linux-2.6.32.43/fs/lockd/svc.c linux-2.6.32.43/fs/lockd/svc.c
41200 --- linux-2.6.32.43/fs/lockd/svc.c 2011-03-27 14:31:47.000000000 -0400
41201 +++ linux-2.6.32.43/fs/lockd/svc.c 2011-04-17 15:56:46.000000000 -0400
41202 @@ -43,7 +43,7 @@
41203
41204 static struct svc_program nlmsvc_program;
41205
41206 -struct nlmsvc_binding * nlmsvc_ops;
41207 +const struct nlmsvc_binding * nlmsvc_ops;
41208 EXPORT_SYMBOL_GPL(nlmsvc_ops);
41209
41210 static DEFINE_MUTEX(nlmsvc_mutex);
41211 diff -urNp linux-2.6.32.43/fs/locks.c linux-2.6.32.43/fs/locks.c
41212 --- linux-2.6.32.43/fs/locks.c 2011-03-27 14:31:47.000000000 -0400
41213 +++ linux-2.6.32.43/fs/locks.c 2011-07-06 19:47:11.000000000 -0400
41214 @@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
41215
41216 static struct kmem_cache *filelock_cache __read_mostly;
41217
41218 +static void locks_init_lock_always(struct file_lock *fl)
41219 +{
41220 + fl->fl_next = NULL;
41221 + fl->fl_fasync = NULL;
41222 + fl->fl_owner = NULL;
41223 + fl->fl_pid = 0;
41224 + fl->fl_nspid = NULL;
41225 + fl->fl_file = NULL;
41226 + fl->fl_flags = 0;
41227 + fl->fl_type = 0;
41228 + fl->fl_start = fl->fl_end = 0;
41229 +}
41230 +
41231 /* Allocate an empty lock structure. */
41232 static struct file_lock *locks_alloc_lock(void)
41233 {
41234 - return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
41235 + struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
41236 +
41237 + if (fl)
41238 + locks_init_lock_always(fl);
41239 +
41240 + return fl;
41241 }
41242
41243 void locks_release_private(struct file_lock *fl)
41244 @@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *f
41245 INIT_LIST_HEAD(&fl->fl_link);
41246 INIT_LIST_HEAD(&fl->fl_block);
41247 init_waitqueue_head(&fl->fl_wait);
41248 - fl->fl_next = NULL;
41249 - fl->fl_fasync = NULL;
41250 - fl->fl_owner = NULL;
41251 - fl->fl_pid = 0;
41252 - fl->fl_nspid = NULL;
41253 - fl->fl_file = NULL;
41254 - fl->fl_flags = 0;
41255 - fl->fl_type = 0;
41256 - fl->fl_start = fl->fl_end = 0;
41257 fl->fl_ops = NULL;
41258 fl->fl_lmops = NULL;
41259 + locks_init_lock_always(fl);
41260 }
41261
41262 EXPORT_SYMBOL(locks_init_lock);
41263 @@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *fil
41264 return;
41265
41266 if (filp->f_op && filp->f_op->flock) {
41267 - struct file_lock fl = {
41268 + struct file_lock flock = {
41269 .fl_pid = current->tgid,
41270 .fl_file = filp,
41271 .fl_flags = FL_FLOCK,
41272 .fl_type = F_UNLCK,
41273 .fl_end = OFFSET_MAX,
41274 };
41275 - filp->f_op->flock(filp, F_SETLKW, &fl);
41276 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
41277 - fl.fl_ops->fl_release_private(&fl);
41278 + filp->f_op->flock(filp, F_SETLKW, &flock);
41279 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
41280 + flock.fl_ops->fl_release_private(&flock);
41281 }
41282
41283 lock_kernel();
41284 diff -urNp linux-2.6.32.43/fs/namei.c linux-2.6.32.43/fs/namei.c
41285 --- linux-2.6.32.43/fs/namei.c 2011-03-27 14:31:47.000000000 -0400
41286 +++ linux-2.6.32.43/fs/namei.c 2011-05-16 21:46:57.000000000 -0400
41287 @@ -224,14 +224,6 @@ int generic_permission(struct inode *ino
41288 return ret;
41289
41290 /*
41291 - * Read/write DACs are always overridable.
41292 - * Executable DACs are overridable if at least one exec bit is set.
41293 - */
41294 - if (!(mask & MAY_EXEC) || execute_ok(inode))
41295 - if (capable(CAP_DAC_OVERRIDE))
41296 - return 0;
41297 -
41298 - /*
41299 * Searching includes executable on directories, else just read.
41300 */
41301 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
41302 @@ -239,6 +231,14 @@ int generic_permission(struct inode *ino
41303 if (capable(CAP_DAC_READ_SEARCH))
41304 return 0;
41305
41306 + /*
41307 + * Read/write DACs are always overridable.
41308 + * Executable DACs are overridable if at least one exec bit is set.
41309 + */
41310 + if (!(mask & MAY_EXEC) || execute_ok(inode))
41311 + if (capable(CAP_DAC_OVERRIDE))
41312 + return 0;
41313 +
41314 return -EACCES;
41315 }
41316
41317 @@ -458,7 +458,8 @@ static int exec_permission_lite(struct i
41318 if (!ret)
41319 goto ok;
41320
41321 - if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
41322 + if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
41323 + capable(CAP_DAC_OVERRIDE))
41324 goto ok;
41325
41326 return ret;
41327 @@ -638,7 +639,7 @@ static __always_inline int __do_follow_l
41328 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
41329 error = PTR_ERR(cookie);
41330 if (!IS_ERR(cookie)) {
41331 - char *s = nd_get_link(nd);
41332 + const char *s = nd_get_link(nd);
41333 error = 0;
41334 if (s)
41335 error = __vfs_follow_link(nd, s);
41336 @@ -669,6 +670,13 @@ static inline int do_follow_link(struct
41337 err = security_inode_follow_link(path->dentry, nd);
41338 if (err)
41339 goto loop;
41340 +
41341 + if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
41342 + path->dentry->d_inode, path->dentry, nd->path.mnt)) {
41343 + err = -EACCES;
41344 + goto loop;
41345 + }
41346 +
41347 current->link_count++;
41348 current->total_link_count++;
41349 nd->depth++;
41350 @@ -1016,11 +1024,18 @@ return_reval:
41351 break;
41352 }
41353 return_base:
41354 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
41355 + path_put(&nd->path);
41356 + return -ENOENT;
41357 + }
41358 return 0;
41359 out_dput:
41360 path_put_conditional(&next, nd);
41361 break;
41362 }
41363 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
41364 + err = -ENOENT;
41365 +
41366 path_put(&nd->path);
41367 return_err:
41368 return err;
41369 @@ -1091,13 +1106,20 @@ static int do_path_lookup(int dfd, const
41370 int retval = path_init(dfd, name, flags, nd);
41371 if (!retval)
41372 retval = path_walk(name, nd);
41373 - if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
41374 - nd->path.dentry->d_inode))
41375 - audit_inode(name, nd->path.dentry);
41376 +
41377 + if (likely(!retval)) {
41378 + if (nd->path.dentry && nd->path.dentry->d_inode) {
41379 + if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
41380 + retval = -ENOENT;
41381 + if (!audit_dummy_context())
41382 + audit_inode(name, nd->path.dentry);
41383 + }
41384 + }
41385 if (nd->root.mnt) {
41386 path_put(&nd->root);
41387 nd->root.mnt = NULL;
41388 }
41389 +
41390 return retval;
41391 }
41392
41393 @@ -1576,6 +1598,20 @@ int may_open(struct path *path, int acc_
41394 if (error)
41395 goto err_out;
41396
41397 +
41398 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
41399 + error = -EPERM;
41400 + goto err_out;
41401 + }
41402 + if (gr_handle_rawio(inode)) {
41403 + error = -EPERM;
41404 + goto err_out;
41405 + }
41406 + if (!gr_acl_handle_open(dentry, path->mnt, flag)) {
41407 + error = -EACCES;
41408 + goto err_out;
41409 + }
41410 +
41411 if (flag & O_TRUNC) {
41412 error = get_write_access(inode);
41413 if (error)
41414 @@ -1621,12 +1657,19 @@ static int __open_namei_create(struct na
41415 int error;
41416 struct dentry *dir = nd->path.dentry;
41417
41418 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, flag, mode)) {
41419 + error = -EACCES;
41420 + goto out_unlock;
41421 + }
41422 +
41423 if (!IS_POSIXACL(dir->d_inode))
41424 mode &= ~current_umask();
41425 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
41426 if (error)
41427 goto out_unlock;
41428 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
41429 + if (!error)
41430 + gr_handle_create(path->dentry, nd->path.mnt);
41431 out_unlock:
41432 mutex_unlock(&dir->d_inode->i_mutex);
41433 dput(nd->path.dentry);
41434 @@ -1709,6 +1752,22 @@ struct file *do_filp_open(int dfd, const
41435 &nd, flag);
41436 if (error)
41437 return ERR_PTR(error);
41438 +
41439 + if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
41440 + error = -EPERM;
41441 + goto exit;
41442 + }
41443 +
41444 + if (gr_handle_rawio(nd.path.dentry->d_inode)) {
41445 + error = -EPERM;
41446 + goto exit;
41447 + }
41448 +
41449 + if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, flag)) {
41450 + error = -EACCES;
41451 + goto exit;
41452 + }
41453 +
41454 goto ok;
41455 }
41456
41457 @@ -1795,6 +1854,14 @@ do_last:
41458 /*
41459 * It already exists.
41460 */
41461 +
41462 + /* only check if O_CREAT is specified, all other checks need
41463 + to go into may_open */
41464 + if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
41465 + error = -EACCES;
41466 + goto exit_mutex_unlock;
41467 + }
41468 +
41469 mutex_unlock(&dir->d_inode->i_mutex);
41470 audit_inode(pathname, path.dentry);
41471
41472 @@ -1887,6 +1954,13 @@ do_link:
41473 error = security_inode_follow_link(path.dentry, &nd);
41474 if (error)
41475 goto exit_dput;
41476 +
41477 + if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
41478 + path.dentry, nd.path.mnt)) {
41479 + error = -EACCES;
41480 + goto exit_dput;
41481 + }
41482 +
41483 error = __do_follow_link(&path, &nd);
41484 if (error) {
41485 /* Does someone understand code flow here? Or it is only
41486 @@ -2061,6 +2135,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
41487 error = may_mknod(mode);
41488 if (error)
41489 goto out_dput;
41490 +
41491 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
41492 + error = -EPERM;
41493 + goto out_dput;
41494 + }
41495 +
41496 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
41497 + error = -EACCES;
41498 + goto out_dput;
41499 + }
41500 +
41501 error = mnt_want_write(nd.path.mnt);
41502 if (error)
41503 goto out_dput;
41504 @@ -2081,6 +2166,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
41505 }
41506 out_drop_write:
41507 mnt_drop_write(nd.path.mnt);
41508 +
41509 + if (!error)
41510 + gr_handle_create(dentry, nd.path.mnt);
41511 out_dput:
41512 dput(dentry);
41513 out_unlock:
41514 @@ -2134,6 +2222,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
41515 if (IS_ERR(dentry))
41516 goto out_unlock;
41517
41518 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
41519 + error = -EACCES;
41520 + goto out_dput;
41521 + }
41522 +
41523 if (!IS_POSIXACL(nd.path.dentry->d_inode))
41524 mode &= ~current_umask();
41525 error = mnt_want_write(nd.path.mnt);
41526 @@ -2145,6 +2238,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
41527 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
41528 out_drop_write:
41529 mnt_drop_write(nd.path.mnt);
41530 +
41531 + if (!error)
41532 + gr_handle_create(dentry, nd.path.mnt);
41533 +
41534 out_dput:
41535 dput(dentry);
41536 out_unlock:
41537 @@ -2226,6 +2323,8 @@ static long do_rmdir(int dfd, const char
41538 char * name;
41539 struct dentry *dentry;
41540 struct nameidata nd;
41541 + ino_t saved_ino = 0;
41542 + dev_t saved_dev = 0;
41543
41544 error = user_path_parent(dfd, pathname, &nd, &name);
41545 if (error)
41546 @@ -2250,6 +2349,19 @@ static long do_rmdir(int dfd, const char
41547 error = PTR_ERR(dentry);
41548 if (IS_ERR(dentry))
41549 goto exit2;
41550 +
41551 + if (dentry->d_inode != NULL) {
41552 + if (dentry->d_inode->i_nlink <= 1) {
41553 + saved_ino = dentry->d_inode->i_ino;
41554 + saved_dev = gr_get_dev_from_dentry(dentry);
41555 + }
41556 +
41557 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
41558 + error = -EACCES;
41559 + goto exit3;
41560 + }
41561 + }
41562 +
41563 error = mnt_want_write(nd.path.mnt);
41564 if (error)
41565 goto exit3;
41566 @@ -2257,6 +2369,8 @@ static long do_rmdir(int dfd, const char
41567 if (error)
41568 goto exit4;
41569 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
41570 + if (!error && (saved_dev || saved_ino))
41571 + gr_handle_delete(saved_ino, saved_dev);
41572 exit4:
41573 mnt_drop_write(nd.path.mnt);
41574 exit3:
41575 @@ -2318,6 +2432,8 @@ static long do_unlinkat(int dfd, const c
41576 struct dentry *dentry;
41577 struct nameidata nd;
41578 struct inode *inode = NULL;
41579 + ino_t saved_ino = 0;
41580 + dev_t saved_dev = 0;
41581
41582 error = user_path_parent(dfd, pathname, &nd, &name);
41583 if (error)
41584 @@ -2337,8 +2453,19 @@ static long do_unlinkat(int dfd, const c
41585 if (nd.last.name[nd.last.len])
41586 goto slashes;
41587 inode = dentry->d_inode;
41588 - if (inode)
41589 + if (inode) {
41590 + if (inode->i_nlink <= 1) {
41591 + saved_ino = inode->i_ino;
41592 + saved_dev = gr_get_dev_from_dentry(dentry);
41593 + }
41594 +
41595 atomic_inc(&inode->i_count);
41596 +
41597 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
41598 + error = -EACCES;
41599 + goto exit2;
41600 + }
41601 + }
41602 error = mnt_want_write(nd.path.mnt);
41603 if (error)
41604 goto exit2;
41605 @@ -2346,6 +2473,8 @@ static long do_unlinkat(int dfd, const c
41606 if (error)
41607 goto exit3;
41608 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
41609 + if (!error && (saved_ino || saved_dev))
41610 + gr_handle_delete(saved_ino, saved_dev);
41611 exit3:
41612 mnt_drop_write(nd.path.mnt);
41613 exit2:
41614 @@ -2424,6 +2553,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
41615 if (IS_ERR(dentry))
41616 goto out_unlock;
41617
41618 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
41619 + error = -EACCES;
41620 + goto out_dput;
41621 + }
41622 +
41623 error = mnt_want_write(nd.path.mnt);
41624 if (error)
41625 goto out_dput;
41626 @@ -2431,6 +2565,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
41627 if (error)
41628 goto out_drop_write;
41629 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
41630 + if (!error)
41631 + gr_handle_create(dentry, nd.path.mnt);
41632 out_drop_write:
41633 mnt_drop_write(nd.path.mnt);
41634 out_dput:
41635 @@ -2524,6 +2660,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
41636 error = PTR_ERR(new_dentry);
41637 if (IS_ERR(new_dentry))
41638 goto out_unlock;
41639 +
41640 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
41641 + old_path.dentry->d_inode,
41642 + old_path.dentry->d_inode->i_mode, to)) {
41643 + error = -EACCES;
41644 + goto out_dput;
41645 + }
41646 +
41647 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
41648 + old_path.dentry, old_path.mnt, to)) {
41649 + error = -EACCES;
41650 + goto out_dput;
41651 + }
41652 +
41653 error = mnt_want_write(nd.path.mnt);
41654 if (error)
41655 goto out_dput;
41656 @@ -2531,6 +2681,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
41657 if (error)
41658 goto out_drop_write;
41659 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
41660 + if (!error)
41661 + gr_handle_create(new_dentry, nd.path.mnt);
41662 out_drop_write:
41663 mnt_drop_write(nd.path.mnt);
41664 out_dput:
41665 @@ -2708,6 +2860,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41666 char *to;
41667 int error;
41668
41669 + pax_track_stack();
41670 +
41671 error = user_path_parent(olddfd, oldname, &oldnd, &from);
41672 if (error)
41673 goto exit;
41674 @@ -2764,6 +2918,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41675 if (new_dentry == trap)
41676 goto exit5;
41677
41678 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
41679 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
41680 + to);
41681 + if (error)
41682 + goto exit5;
41683 +
41684 error = mnt_want_write(oldnd.path.mnt);
41685 if (error)
41686 goto exit5;
41687 @@ -2773,6 +2933,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41688 goto exit6;
41689 error = vfs_rename(old_dir->d_inode, old_dentry,
41690 new_dir->d_inode, new_dentry);
41691 + if (!error)
41692 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
41693 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
41694 exit6:
41695 mnt_drop_write(oldnd.path.mnt);
41696 exit5:
41697 @@ -2798,6 +2961,8 @@ SYSCALL_DEFINE2(rename, const char __use
41698
41699 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
41700 {
41701 + char tmpbuf[64];
41702 + const char *newlink;
41703 int len;
41704
41705 len = PTR_ERR(link);
41706 @@ -2807,7 +2972,14 @@ int vfs_readlink(struct dentry *dentry,
41707 len = strlen(link);
41708 if (len > (unsigned) buflen)
41709 len = buflen;
41710 - if (copy_to_user(buffer, link, len))
41711 +
41712 + if (len < sizeof(tmpbuf)) {
41713 + memcpy(tmpbuf, link, len);
41714 + newlink = tmpbuf;
41715 + } else
41716 + newlink = link;
41717 +
41718 + if (copy_to_user(buffer, newlink, len))
41719 len = -EFAULT;
41720 out:
41721 return len;
41722 diff -urNp linux-2.6.32.43/fs/namespace.c linux-2.6.32.43/fs/namespace.c
41723 --- linux-2.6.32.43/fs/namespace.c 2011-03-27 14:31:47.000000000 -0400
41724 +++ linux-2.6.32.43/fs/namespace.c 2011-04-17 15:56:46.000000000 -0400
41725 @@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mn
41726 if (!(sb->s_flags & MS_RDONLY))
41727 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
41728 up_write(&sb->s_umount);
41729 +
41730 + gr_log_remount(mnt->mnt_devname, retval);
41731 +
41732 return retval;
41733 }
41734
41735 @@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mn
41736 security_sb_umount_busy(mnt);
41737 up_write(&namespace_sem);
41738 release_mounts(&umount_list);
41739 +
41740 + gr_log_unmount(mnt->mnt_devname, retval);
41741 +
41742 return retval;
41743 }
41744
41745 @@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_
41746 if (retval)
41747 goto dput_out;
41748
41749 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
41750 + retval = -EPERM;
41751 + goto dput_out;
41752 + }
41753 +
41754 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
41755 + retval = -EPERM;
41756 + goto dput_out;
41757 + }
41758 +
41759 if (flags & MS_REMOUNT)
41760 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
41761 data_page);
41762 @@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_
41763 dev_name, data_page);
41764 dput_out:
41765 path_put(&path);
41766 +
41767 + gr_log_mount(dev_name, dir_name, retval);
41768 +
41769 return retval;
41770 }
41771
41772 @@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
41773 goto out1;
41774 }
41775
41776 + if (gr_handle_chroot_pivot()) {
41777 + error = -EPERM;
41778 + path_put(&old);
41779 + goto out1;
41780 + }
41781 +
41782 read_lock(&current->fs->lock);
41783 root = current->fs->root;
41784 path_get(&current->fs->root);
41785 diff -urNp linux-2.6.32.43/fs/ncpfs/dir.c linux-2.6.32.43/fs/ncpfs/dir.c
41786 --- linux-2.6.32.43/fs/ncpfs/dir.c 2011-03-27 14:31:47.000000000 -0400
41787 +++ linux-2.6.32.43/fs/ncpfs/dir.c 2011-05-16 21:46:57.000000000 -0400
41788 @@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *den
41789 int res, val = 0, len;
41790 __u8 __name[NCP_MAXPATHLEN + 1];
41791
41792 + pax_track_stack();
41793 +
41794 parent = dget_parent(dentry);
41795 dir = parent->d_inode;
41796
41797 @@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct
41798 int error, res, len;
41799 __u8 __name[NCP_MAXPATHLEN + 1];
41800
41801 + pax_track_stack();
41802 +
41803 lock_kernel();
41804 error = -EIO;
41805 if (!ncp_conn_valid(server))
41806 @@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, st
41807 int error, result, len;
41808 int opmode;
41809 __u8 __name[NCP_MAXPATHLEN + 1];
41810 -
41811 +
41812 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
41813 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
41814
41815 + pax_track_stack();
41816 +
41817 error = -EIO;
41818 lock_kernel();
41819 if (!ncp_conn_valid(server))
41820 @@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir,
41821 int error, len;
41822 __u8 __name[NCP_MAXPATHLEN + 1];
41823
41824 + pax_track_stack();
41825 +
41826 DPRINTK("ncp_mkdir: making %s/%s\n",
41827 dentry->d_parent->d_name.name, dentry->d_name.name);
41828
41829 @@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir,
41830 if (!ncp_conn_valid(server))
41831 goto out;
41832
41833 + pax_track_stack();
41834 +
41835 ncp_age_dentry(server, dentry);
41836 len = sizeof(__name);
41837 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
41838 @@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_
41839 int old_len, new_len;
41840 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
41841
41842 + pax_track_stack();
41843 +
41844 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
41845 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
41846 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
41847 diff -urNp linux-2.6.32.43/fs/ncpfs/inode.c linux-2.6.32.43/fs/ncpfs/inode.c
41848 --- linux-2.6.32.43/fs/ncpfs/inode.c 2011-03-27 14:31:47.000000000 -0400
41849 +++ linux-2.6.32.43/fs/ncpfs/inode.c 2011-05-16 21:46:57.000000000 -0400
41850 @@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_b
41851 #endif
41852 struct ncp_entry_info finfo;
41853
41854 + pax_track_stack();
41855 +
41856 data.wdog_pid = NULL;
41857 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
41858 if (!server)
41859 diff -urNp linux-2.6.32.43/fs/nfs/inode.c linux-2.6.32.43/fs/nfs/inode.c
41860 --- linux-2.6.32.43/fs/nfs/inode.c 2011-05-10 22:12:01.000000000 -0400
41861 +++ linux-2.6.32.43/fs/nfs/inode.c 2011-07-06 19:53:33.000000000 -0400
41862 @@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct
41863 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
41864 nfsi->attrtimeo_timestamp = jiffies;
41865
41866 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
41867 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
41868 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
41869 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
41870 else
41871 @@ -973,16 +973,16 @@ static int nfs_size_need_update(const st
41872 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
41873 }
41874
41875 -static atomic_long_t nfs_attr_generation_counter;
41876 +static atomic_long_unchecked_t nfs_attr_generation_counter;
41877
41878 static unsigned long nfs_read_attr_generation_counter(void)
41879 {
41880 - return atomic_long_read(&nfs_attr_generation_counter);
41881 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
41882 }
41883
41884 unsigned long nfs_inc_attr_generation_counter(void)
41885 {
41886 - return atomic_long_inc_return(&nfs_attr_generation_counter);
41887 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
41888 }
41889
41890 void nfs_fattr_init(struct nfs_fattr *fattr)
41891 diff -urNp linux-2.6.32.43/fs/nfsd/lockd.c linux-2.6.32.43/fs/nfsd/lockd.c
41892 --- linux-2.6.32.43/fs/nfsd/lockd.c 2011-04-17 17:00:52.000000000 -0400
41893 +++ linux-2.6.32.43/fs/nfsd/lockd.c 2011-04-17 17:03:15.000000000 -0400
41894 @@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
41895 fput(filp);
41896 }
41897
41898 -static struct nlmsvc_binding nfsd_nlm_ops = {
41899 +static const struct nlmsvc_binding nfsd_nlm_ops = {
41900 .fopen = nlm_fopen, /* open file for locking */
41901 .fclose = nlm_fclose, /* close file */
41902 };
41903 diff -urNp linux-2.6.32.43/fs/nfsd/nfs4state.c linux-2.6.32.43/fs/nfsd/nfs4state.c
41904 --- linux-2.6.32.43/fs/nfsd/nfs4state.c 2011-03-27 14:31:47.000000000 -0400
41905 +++ linux-2.6.32.43/fs/nfsd/nfs4state.c 2011-05-16 21:46:57.000000000 -0400
41906 @@ -3457,6 +3457,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
41907 unsigned int cmd;
41908 int err;
41909
41910 + pax_track_stack();
41911 +
41912 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
41913 (long long) lock->lk_offset,
41914 (long long) lock->lk_length);
41915 diff -urNp linux-2.6.32.43/fs/nfsd/nfs4xdr.c linux-2.6.32.43/fs/nfsd/nfs4xdr.c
41916 --- linux-2.6.32.43/fs/nfsd/nfs4xdr.c 2011-03-27 14:31:47.000000000 -0400
41917 +++ linux-2.6.32.43/fs/nfsd/nfs4xdr.c 2011-05-16 21:46:57.000000000 -0400
41918 @@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
41919 struct nfsd4_compoundres *resp = rqstp->rq_resp;
41920 u32 minorversion = resp->cstate.minorversion;
41921
41922 + pax_track_stack();
41923 +
41924 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
41925 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
41926 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
41927 diff -urNp linux-2.6.32.43/fs/nfsd/vfs.c linux-2.6.32.43/fs/nfsd/vfs.c
41928 --- linux-2.6.32.43/fs/nfsd/vfs.c 2011-05-10 22:12:01.000000000 -0400
41929 +++ linux-2.6.32.43/fs/nfsd/vfs.c 2011-05-10 22:12:33.000000000 -0400
41930 @@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
41931 } else {
41932 oldfs = get_fs();
41933 set_fs(KERNEL_DS);
41934 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
41935 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
41936 set_fs(oldfs);
41937 }
41938
41939 @@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
41940
41941 /* Write the data. */
41942 oldfs = get_fs(); set_fs(KERNEL_DS);
41943 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
41944 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
41945 set_fs(oldfs);
41946 if (host_err < 0)
41947 goto out_nfserr;
41948 @@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
41949 */
41950
41951 oldfs = get_fs(); set_fs(KERNEL_DS);
41952 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
41953 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
41954 set_fs(oldfs);
41955
41956 if (host_err < 0)
41957 diff -urNp linux-2.6.32.43/fs/nilfs2/ioctl.c linux-2.6.32.43/fs/nilfs2/ioctl.c
41958 --- linux-2.6.32.43/fs/nilfs2/ioctl.c 2011-03-27 14:31:47.000000000 -0400
41959 +++ linux-2.6.32.43/fs/nilfs2/ioctl.c 2011-05-04 17:56:28.000000000 -0400
41960 @@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(st
41961 unsigned int cmd, void __user *argp)
41962 {
41963 struct nilfs_argv argv[5];
41964 - const static size_t argsz[5] = {
41965 + static const size_t argsz[5] = {
41966 sizeof(struct nilfs_vdesc),
41967 sizeof(struct nilfs_period),
41968 sizeof(__u64),
41969 diff -urNp linux-2.6.32.43/fs/notify/dnotify/dnotify.c linux-2.6.32.43/fs/notify/dnotify/dnotify.c
41970 --- linux-2.6.32.43/fs/notify/dnotify/dnotify.c 2011-03-27 14:31:47.000000000 -0400
41971 +++ linux-2.6.32.43/fs/notify/dnotify/dnotify.c 2011-04-17 15:56:46.000000000 -0400
41972 @@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsn
41973 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
41974 }
41975
41976 -static struct fsnotify_ops dnotify_fsnotify_ops = {
41977 +static const struct fsnotify_ops dnotify_fsnotify_ops = {
41978 .handle_event = dnotify_handle_event,
41979 .should_send_event = dnotify_should_send_event,
41980 .free_group_priv = NULL,
41981 diff -urNp linux-2.6.32.43/fs/notify/notification.c linux-2.6.32.43/fs/notify/notification.c
41982 --- linux-2.6.32.43/fs/notify/notification.c 2011-03-27 14:31:47.000000000 -0400
41983 +++ linux-2.6.32.43/fs/notify/notification.c 2011-05-04 17:56:28.000000000 -0400
41984 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
41985 * get set to 0 so it will never get 'freed'
41986 */
41987 static struct fsnotify_event q_overflow_event;
41988 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41989 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41990
41991 /**
41992 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
41993 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
41994 */
41995 u32 fsnotify_get_cookie(void)
41996 {
41997 - return atomic_inc_return(&fsnotify_sync_cookie);
41998 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
41999 }
42000 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
42001
42002 diff -urNp linux-2.6.32.43/fs/ntfs/dir.c linux-2.6.32.43/fs/ntfs/dir.c
42003 --- linux-2.6.32.43/fs/ntfs/dir.c 2011-03-27 14:31:47.000000000 -0400
42004 +++ linux-2.6.32.43/fs/ntfs/dir.c 2011-04-17 15:56:46.000000000 -0400
42005 @@ -1328,7 +1328,7 @@ find_next_index_buffer:
42006 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
42007 ~(s64)(ndir->itype.index.block_size - 1)));
42008 /* Bounds checks. */
42009 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
42010 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
42011 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
42012 "inode 0x%lx or driver bug.", vdir->i_ino);
42013 goto err_out;
42014 diff -urNp linux-2.6.32.43/fs/ntfs/file.c linux-2.6.32.43/fs/ntfs/file.c
42015 --- linux-2.6.32.43/fs/ntfs/file.c 2011-03-27 14:31:47.000000000 -0400
42016 +++ linux-2.6.32.43/fs/ntfs/file.c 2011-04-17 15:56:46.000000000 -0400
42017 @@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_
42018 #endif /* NTFS_RW */
42019 };
42020
42021 -const struct file_operations ntfs_empty_file_ops = {};
42022 +const struct file_operations ntfs_empty_file_ops __read_only;
42023
42024 -const struct inode_operations ntfs_empty_inode_ops = {};
42025 +const struct inode_operations ntfs_empty_inode_ops __read_only;
42026 diff -urNp linux-2.6.32.43/fs/ocfs2/cluster/masklog.c linux-2.6.32.43/fs/ocfs2/cluster/masklog.c
42027 --- linux-2.6.32.43/fs/ocfs2/cluster/masklog.c 2011-03-27 14:31:47.000000000 -0400
42028 +++ linux-2.6.32.43/fs/ocfs2/cluster/masklog.c 2011-04-17 15:56:46.000000000 -0400
42029 @@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject
42030 return mlog_mask_store(mlog_attr->mask, buf, count);
42031 }
42032
42033 -static struct sysfs_ops mlog_attr_ops = {
42034 +static const struct sysfs_ops mlog_attr_ops = {
42035 .show = mlog_show,
42036 .store = mlog_store,
42037 };
42038 diff -urNp linux-2.6.32.43/fs/ocfs2/localalloc.c linux-2.6.32.43/fs/ocfs2/localalloc.c
42039 --- linux-2.6.32.43/fs/ocfs2/localalloc.c 2011-03-27 14:31:47.000000000 -0400
42040 +++ linux-2.6.32.43/fs/ocfs2/localalloc.c 2011-04-17 15:56:46.000000000 -0400
42041 @@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_windo
42042 goto bail;
42043 }
42044
42045 - atomic_inc(&osb->alloc_stats.moves);
42046 + atomic_inc_unchecked(&osb->alloc_stats.moves);
42047
42048 status = 0;
42049 bail:
42050 diff -urNp linux-2.6.32.43/fs/ocfs2/namei.c linux-2.6.32.43/fs/ocfs2/namei.c
42051 --- linux-2.6.32.43/fs/ocfs2/namei.c 2011-03-27 14:31:47.000000000 -0400
42052 +++ linux-2.6.32.43/fs/ocfs2/namei.c 2011-05-16 21:46:57.000000000 -0400
42053 @@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *ol
42054 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
42055 struct ocfs2_dir_lookup_result target_insert = { NULL, };
42056
42057 + pax_track_stack();
42058 +
42059 /* At some point it might be nice to break this function up a
42060 * bit. */
42061
42062 diff -urNp linux-2.6.32.43/fs/ocfs2/ocfs2.h linux-2.6.32.43/fs/ocfs2/ocfs2.h
42063 --- linux-2.6.32.43/fs/ocfs2/ocfs2.h 2011-03-27 14:31:47.000000000 -0400
42064 +++ linux-2.6.32.43/fs/ocfs2/ocfs2.h 2011-04-17 15:56:46.000000000 -0400
42065 @@ -217,11 +217,11 @@ enum ocfs2_vol_state
42066
42067 struct ocfs2_alloc_stats
42068 {
42069 - atomic_t moves;
42070 - atomic_t local_data;
42071 - atomic_t bitmap_data;
42072 - atomic_t bg_allocs;
42073 - atomic_t bg_extends;
42074 + atomic_unchecked_t moves;
42075 + atomic_unchecked_t local_data;
42076 + atomic_unchecked_t bitmap_data;
42077 + atomic_unchecked_t bg_allocs;
42078 + atomic_unchecked_t bg_extends;
42079 };
42080
42081 enum ocfs2_local_alloc_state
42082 diff -urNp linux-2.6.32.43/fs/ocfs2/suballoc.c linux-2.6.32.43/fs/ocfs2/suballoc.c
42083 --- linux-2.6.32.43/fs/ocfs2/suballoc.c 2011-03-27 14:31:47.000000000 -0400
42084 +++ linux-2.6.32.43/fs/ocfs2/suballoc.c 2011-04-17 15:56:46.000000000 -0400
42085 @@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(s
42086 mlog_errno(status);
42087 goto bail;
42088 }
42089 - atomic_inc(&osb->alloc_stats.bg_extends);
42090 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
42091
42092 /* You should never ask for this much metadata */
42093 BUG_ON(bits_wanted >
42094 @@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
42095 mlog_errno(status);
42096 goto bail;
42097 }
42098 - atomic_inc(&osb->alloc_stats.bg_allocs);
42099 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
42100
42101 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
42102 ac->ac_bits_given += (*num_bits);
42103 @@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
42104 mlog_errno(status);
42105 goto bail;
42106 }
42107 - atomic_inc(&osb->alloc_stats.bg_allocs);
42108 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
42109
42110 BUG_ON(num_bits != 1);
42111
42112 @@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
42113 cluster_start,
42114 num_clusters);
42115 if (!status)
42116 - atomic_inc(&osb->alloc_stats.local_data);
42117 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
42118 } else {
42119 if (min_clusters > (osb->bitmap_cpg - 1)) {
42120 /* The only paths asking for contiguousness
42121 @@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
42122 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
42123 bg_blkno,
42124 bg_bit_off);
42125 - atomic_inc(&osb->alloc_stats.bitmap_data);
42126 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
42127 }
42128 }
42129 if (status < 0) {
42130 diff -urNp linux-2.6.32.43/fs/ocfs2/super.c linux-2.6.32.43/fs/ocfs2/super.c
42131 --- linux-2.6.32.43/fs/ocfs2/super.c 2011-03-27 14:31:47.000000000 -0400
42132 +++ linux-2.6.32.43/fs/ocfs2/super.c 2011-04-17 15:56:46.000000000 -0400
42133 @@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
42134 "%10s => GlobalAllocs: %d LocalAllocs: %d "
42135 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
42136 "Stats",
42137 - atomic_read(&osb->alloc_stats.bitmap_data),
42138 - atomic_read(&osb->alloc_stats.local_data),
42139 - atomic_read(&osb->alloc_stats.bg_allocs),
42140 - atomic_read(&osb->alloc_stats.moves),
42141 - atomic_read(&osb->alloc_stats.bg_extends));
42142 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
42143 + atomic_read_unchecked(&osb->alloc_stats.local_data),
42144 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
42145 + atomic_read_unchecked(&osb->alloc_stats.moves),
42146 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
42147
42148 out += snprintf(buf + out, len - out,
42149 "%10s => State: %u Descriptor: %llu Size: %u bits "
42150 @@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct
42151 spin_lock_init(&osb->osb_xattr_lock);
42152 ocfs2_init_inode_steal_slot(osb);
42153
42154 - atomic_set(&osb->alloc_stats.moves, 0);
42155 - atomic_set(&osb->alloc_stats.local_data, 0);
42156 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
42157 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
42158 - atomic_set(&osb->alloc_stats.bg_extends, 0);
42159 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
42160 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
42161 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
42162 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
42163 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
42164
42165 /* Copy the blockcheck stats from the superblock probe */
42166 osb->osb_ecc_stats = *stats;
42167 diff -urNp linux-2.6.32.43/fs/open.c linux-2.6.32.43/fs/open.c
42168 --- linux-2.6.32.43/fs/open.c 2011-03-27 14:31:47.000000000 -0400
42169 +++ linux-2.6.32.43/fs/open.c 2011-04-17 15:56:46.000000000 -0400
42170 @@ -275,6 +275,10 @@ static long do_sys_truncate(const char _
42171 error = locks_verify_truncate(inode, NULL, length);
42172 if (!error)
42173 error = security_path_truncate(&path, length, 0);
42174 +
42175 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
42176 + error = -EACCES;
42177 +
42178 if (!error) {
42179 vfs_dq_init(inode);
42180 error = do_truncate(path.dentry, length, 0, NULL);
42181 @@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
42182 if (__mnt_is_readonly(path.mnt))
42183 res = -EROFS;
42184
42185 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
42186 + res = -EACCES;
42187 +
42188 out_path_release:
42189 path_put(&path);
42190 out:
42191 @@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user
42192 if (error)
42193 goto dput_and_out;
42194
42195 + gr_log_chdir(path.dentry, path.mnt);
42196 +
42197 set_fs_pwd(current->fs, &path);
42198
42199 dput_and_out:
42200 @@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
42201 goto out_putf;
42202
42203 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
42204 +
42205 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
42206 + error = -EPERM;
42207 +
42208 + if (!error)
42209 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
42210 +
42211 if (!error)
42212 set_fs_pwd(current->fs, &file->f_path);
42213 out_putf:
42214 @@ -588,7 +604,18 @@ SYSCALL_DEFINE1(chroot, const char __use
42215 if (!capable(CAP_SYS_CHROOT))
42216 goto dput_and_out;
42217
42218 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
42219 + goto dput_and_out;
42220 +
42221 + if (gr_handle_chroot_caps(&path)) {
42222 + error = -ENOMEM;
42223 + goto dput_and_out;
42224 + }
42225 +
42226 set_fs_root(current->fs, &path);
42227 +
42228 + gr_handle_chroot_chdir(&path);
42229 +
42230 error = 0;
42231 dput_and_out:
42232 path_put(&path);
42233 @@ -616,12 +643,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
42234 err = mnt_want_write_file(file);
42235 if (err)
42236 goto out_putf;
42237 +
42238 mutex_lock(&inode->i_mutex);
42239 +
42240 + if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
42241 + err = -EACCES;
42242 + goto out_unlock;
42243 + }
42244 +
42245 if (mode == (mode_t) -1)
42246 mode = inode->i_mode;
42247 +
42248 + if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
42249 + err = -EPERM;
42250 + goto out_unlock;
42251 + }
42252 +
42253 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
42254 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
42255 err = notify_change(dentry, &newattrs);
42256 +
42257 +out_unlock:
42258 mutex_unlock(&inode->i_mutex);
42259 mnt_drop_write(file->f_path.mnt);
42260 out_putf:
42261 @@ -645,12 +687,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
42262 error = mnt_want_write(path.mnt);
42263 if (error)
42264 goto dput_and_out;
42265 +
42266 mutex_lock(&inode->i_mutex);
42267 +
42268 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
42269 + error = -EACCES;
42270 + goto out_unlock;
42271 + }
42272 +
42273 if (mode == (mode_t) -1)
42274 mode = inode->i_mode;
42275 +
42276 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
42277 + error = -EACCES;
42278 + goto out_unlock;
42279 + }
42280 +
42281 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
42282 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
42283 error = notify_change(path.dentry, &newattrs);
42284 +
42285 +out_unlock:
42286 mutex_unlock(&inode->i_mutex);
42287 mnt_drop_write(path.mnt);
42288 dput_and_out:
42289 @@ -664,12 +721,15 @@ SYSCALL_DEFINE2(chmod, const char __user
42290 return sys_fchmodat(AT_FDCWD, filename, mode);
42291 }
42292
42293 -static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
42294 +static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
42295 {
42296 struct inode *inode = dentry->d_inode;
42297 int error;
42298 struct iattr newattrs;
42299
42300 + if (!gr_acl_handle_chown(dentry, mnt))
42301 + return -EACCES;
42302 +
42303 newattrs.ia_valid = ATTR_CTIME;
42304 if (user != (uid_t) -1) {
42305 newattrs.ia_valid |= ATTR_UID;
42306 @@ -700,7 +760,7 @@ SYSCALL_DEFINE3(chown, const char __user
42307 error = mnt_want_write(path.mnt);
42308 if (error)
42309 goto out_release;
42310 - error = chown_common(path.dentry, user, group);
42311 + error = chown_common(path.dentry, user, group, path.mnt);
42312 mnt_drop_write(path.mnt);
42313 out_release:
42314 path_put(&path);
42315 @@ -725,7 +785,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
42316 error = mnt_want_write(path.mnt);
42317 if (error)
42318 goto out_release;
42319 - error = chown_common(path.dentry, user, group);
42320 + error = chown_common(path.dentry, user, group, path.mnt);
42321 mnt_drop_write(path.mnt);
42322 out_release:
42323 path_put(&path);
42324 @@ -744,7 +804,7 @@ SYSCALL_DEFINE3(lchown, const char __use
42325 error = mnt_want_write(path.mnt);
42326 if (error)
42327 goto out_release;
42328 - error = chown_common(path.dentry, user, group);
42329 + error = chown_common(path.dentry, user, group, path.mnt);
42330 mnt_drop_write(path.mnt);
42331 out_release:
42332 path_put(&path);
42333 @@ -767,7 +827,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
42334 goto out_fput;
42335 dentry = file->f_path.dentry;
42336 audit_inode(NULL, dentry);
42337 - error = chown_common(dentry, user, group);
42338 + error = chown_common(dentry, user, group, file->f_path.mnt);
42339 mnt_drop_write(file->f_path.mnt);
42340 out_fput:
42341 fput(file);
42342 @@ -1036,7 +1096,10 @@ long do_sys_open(int dfd, const char __u
42343 if (!IS_ERR(tmp)) {
42344 fd = get_unused_fd_flags(flags);
42345 if (fd >= 0) {
42346 - struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
42347 + struct file *f;
42348 + /* don't allow to be set by userland */
42349 + flags &= ~FMODE_GREXEC;
42350 + f = do_filp_open(dfd, tmp, flags, mode, 0);
42351 if (IS_ERR(f)) {
42352 put_unused_fd(fd);
42353 fd = PTR_ERR(f);
42354 diff -urNp linux-2.6.32.43/fs/partitions/ldm.c linux-2.6.32.43/fs/partitions/ldm.c
42355 --- linux-2.6.32.43/fs/partitions/ldm.c 2011-06-25 12:55:34.000000000 -0400
42356 +++ linux-2.6.32.43/fs/partitions/ldm.c 2011-06-25 12:56:37.000000000 -0400
42357 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
42358 ldm_error ("A VBLK claims to have %d parts.", num);
42359 return false;
42360 }
42361 +
42362 if (rec >= num) {
42363 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
42364 return false;
42365 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
42366 goto found;
42367 }
42368
42369 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
42370 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
42371 if (!f) {
42372 ldm_crit ("Out of memory.");
42373 return false;
42374 diff -urNp linux-2.6.32.43/fs/partitions/mac.c linux-2.6.32.43/fs/partitions/mac.c
42375 --- linux-2.6.32.43/fs/partitions/mac.c 2011-03-27 14:31:47.000000000 -0400
42376 +++ linux-2.6.32.43/fs/partitions/mac.c 2011-04-17 15:56:46.000000000 -0400
42377 @@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitio
42378 return 0; /* not a MacOS disk */
42379 }
42380 blocks_in_map = be32_to_cpu(part->map_count);
42381 + printk(" [mac]");
42382 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
42383 put_dev_sector(sect);
42384 return 0;
42385 }
42386 - printk(" [mac]");
42387 for (slot = 1; slot <= blocks_in_map; ++slot) {
42388 int pos = slot * secsize;
42389 put_dev_sector(sect);
42390 diff -urNp linux-2.6.32.43/fs/pipe.c linux-2.6.32.43/fs/pipe.c
42391 --- linux-2.6.32.43/fs/pipe.c 2011-03-27 14:31:47.000000000 -0400
42392 +++ linux-2.6.32.43/fs/pipe.c 2011-04-23 13:37:17.000000000 -0400
42393 @@ -401,9 +401,9 @@ redo:
42394 }
42395 if (bufs) /* More to do? */
42396 continue;
42397 - if (!pipe->writers)
42398 + if (!atomic_read(&pipe->writers))
42399 break;
42400 - if (!pipe->waiting_writers) {
42401 + if (!atomic_read(&pipe->waiting_writers)) {
42402 /* syscall merging: Usually we must not sleep
42403 * if O_NONBLOCK is set, or if we got some data.
42404 * But if a writer sleeps in kernel space, then
42405 @@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
42406 mutex_lock(&inode->i_mutex);
42407 pipe = inode->i_pipe;
42408
42409 - if (!pipe->readers) {
42410 + if (!atomic_read(&pipe->readers)) {
42411 send_sig(SIGPIPE, current, 0);
42412 ret = -EPIPE;
42413 goto out;
42414 @@ -511,7 +511,7 @@ redo1:
42415 for (;;) {
42416 int bufs;
42417
42418 - if (!pipe->readers) {
42419 + if (!atomic_read(&pipe->readers)) {
42420 send_sig(SIGPIPE, current, 0);
42421 if (!ret)
42422 ret = -EPIPE;
42423 @@ -597,9 +597,9 @@ redo2:
42424 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
42425 do_wakeup = 0;
42426 }
42427 - pipe->waiting_writers++;
42428 + atomic_inc(&pipe->waiting_writers);
42429 pipe_wait(pipe);
42430 - pipe->waiting_writers--;
42431 + atomic_dec(&pipe->waiting_writers);
42432 }
42433 out:
42434 mutex_unlock(&inode->i_mutex);
42435 @@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
42436 mask = 0;
42437 if (filp->f_mode & FMODE_READ) {
42438 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
42439 - if (!pipe->writers && filp->f_version != pipe->w_counter)
42440 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
42441 mask |= POLLHUP;
42442 }
42443
42444 @@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
42445 * Most Unices do not set POLLERR for FIFOs but on Linux they
42446 * behave exactly like pipes for poll().
42447 */
42448 - if (!pipe->readers)
42449 + if (!atomic_read(&pipe->readers))
42450 mask |= POLLERR;
42451 }
42452
42453 @@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
42454
42455 mutex_lock(&inode->i_mutex);
42456 pipe = inode->i_pipe;
42457 - pipe->readers -= decr;
42458 - pipe->writers -= decw;
42459 + atomic_sub(decr, &pipe->readers);
42460 + atomic_sub(decw, &pipe->writers);
42461
42462 - if (!pipe->readers && !pipe->writers) {
42463 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
42464 free_pipe_info(inode);
42465 } else {
42466 wake_up_interruptible_sync(&pipe->wait);
42467 @@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
42468
42469 if (inode->i_pipe) {
42470 ret = 0;
42471 - inode->i_pipe->readers++;
42472 + atomic_inc(&inode->i_pipe->readers);
42473 }
42474
42475 mutex_unlock(&inode->i_mutex);
42476 @@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
42477
42478 if (inode->i_pipe) {
42479 ret = 0;
42480 - inode->i_pipe->writers++;
42481 + atomic_inc(&inode->i_pipe->writers);
42482 }
42483
42484 mutex_unlock(&inode->i_mutex);
42485 @@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
42486 if (inode->i_pipe) {
42487 ret = 0;
42488 if (filp->f_mode & FMODE_READ)
42489 - inode->i_pipe->readers++;
42490 + atomic_inc(&inode->i_pipe->readers);
42491 if (filp->f_mode & FMODE_WRITE)
42492 - inode->i_pipe->writers++;
42493 + atomic_inc(&inode->i_pipe->writers);
42494 }
42495
42496 mutex_unlock(&inode->i_mutex);
42497 @@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
42498 inode->i_pipe = NULL;
42499 }
42500
42501 -static struct vfsmount *pipe_mnt __read_mostly;
42502 +struct vfsmount *pipe_mnt __read_mostly;
42503 static int pipefs_delete_dentry(struct dentry *dentry)
42504 {
42505 /*
42506 @@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(voi
42507 goto fail_iput;
42508 inode->i_pipe = pipe;
42509
42510 - pipe->readers = pipe->writers = 1;
42511 + atomic_set(&pipe->readers, 1);
42512 + atomic_set(&pipe->writers, 1);
42513 inode->i_fop = &rdwr_pipefifo_fops;
42514
42515 /*
42516 diff -urNp linux-2.6.32.43/fs/proc/array.c linux-2.6.32.43/fs/proc/array.c
42517 --- linux-2.6.32.43/fs/proc/array.c 2011-03-27 14:31:47.000000000 -0400
42518 +++ linux-2.6.32.43/fs/proc/array.c 2011-05-16 21:46:57.000000000 -0400
42519 @@ -60,6 +60,7 @@
42520 #include <linux/tty.h>
42521 #include <linux/string.h>
42522 #include <linux/mman.h>
42523 +#include <linux/grsecurity.h>
42524 #include <linux/proc_fs.h>
42525 #include <linux/ioport.h>
42526 #include <linux/uaccess.h>
42527 @@ -321,6 +322,21 @@ static inline void task_context_switch_c
42528 p->nivcsw);
42529 }
42530
42531 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42532 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
42533 +{
42534 + if (p->mm)
42535 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
42536 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
42537 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
42538 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
42539 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
42540 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
42541 + else
42542 + seq_printf(m, "PaX:\t-----\n");
42543 +}
42544 +#endif
42545 +
42546 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
42547 struct pid *pid, struct task_struct *task)
42548 {
42549 @@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m,
42550 task_cap(m, task);
42551 cpuset_task_status_allowed(m, task);
42552 task_context_switch_counts(m, task);
42553 +
42554 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42555 + task_pax(m, task);
42556 +#endif
42557 +
42558 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
42559 + task_grsec_rbac(m, task);
42560 +#endif
42561 +
42562 return 0;
42563 }
42564
42565 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42566 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42567 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
42568 + _mm->pax_flags & MF_PAX_SEGMEXEC))
42569 +#endif
42570 +
42571 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
42572 struct pid *pid, struct task_struct *task, int whole)
42573 {
42574 @@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file
42575 cputime_t cutime, cstime, utime, stime;
42576 cputime_t cgtime, gtime;
42577 unsigned long rsslim = 0;
42578 - char tcomm[sizeof(task->comm)];
42579 + char tcomm[sizeof(task->comm)] = { 0 };
42580 unsigned long flags;
42581
42582 + pax_track_stack();
42583 +
42584 state = *get_task_state(task);
42585 vsize = eip = esp = 0;
42586 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
42587 @@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file
42588 gtime = task_gtime(task);
42589 }
42590
42591 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42592 + if (PAX_RAND_FLAGS(mm)) {
42593 + eip = 0;
42594 + esp = 0;
42595 + wchan = 0;
42596 + }
42597 +#endif
42598 +#ifdef CONFIG_GRKERNSEC_HIDESYM
42599 + wchan = 0;
42600 + eip =0;
42601 + esp =0;
42602 +#endif
42603 +
42604 /* scale priority and nice values from timeslices to -20..20 */
42605 /* to make it look like a "normal" Unix priority/nice value */
42606 priority = task_prio(task);
42607 @@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file
42608 vsize,
42609 mm ? get_mm_rss(mm) : 0,
42610 rsslim,
42611 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42612 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
42613 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
42614 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
42615 +#else
42616 mm ? (permitted ? mm->start_code : 1) : 0,
42617 mm ? (permitted ? mm->end_code : 1) : 0,
42618 (permitted && mm) ? mm->start_stack : 0,
42619 +#endif
42620 esp,
42621 eip,
42622 /* The signal information here is obsolete.
42623 @@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, s
42624
42625 return 0;
42626 }
42627 +
42628 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42629 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
42630 +{
42631 + u32 curr_ip = 0;
42632 + unsigned long flags;
42633 +
42634 + if (lock_task_sighand(task, &flags)) {
42635 + curr_ip = task->signal->curr_ip;
42636 + unlock_task_sighand(task, &flags);
42637 + }
42638 +
42639 + return sprintf(buffer, "%pI4\n", &curr_ip);
42640 +}
42641 +#endif
42642 diff -urNp linux-2.6.32.43/fs/proc/base.c linux-2.6.32.43/fs/proc/base.c
42643 --- linux-2.6.32.43/fs/proc/base.c 2011-04-22 19:16:29.000000000 -0400
42644 +++ linux-2.6.32.43/fs/proc/base.c 2011-06-04 21:20:50.000000000 -0400
42645 @@ -102,6 +102,22 @@ struct pid_entry {
42646 union proc_op op;
42647 };
42648
42649 +struct getdents_callback {
42650 + struct linux_dirent __user * current_dir;
42651 + struct linux_dirent __user * previous;
42652 + struct file * file;
42653 + int count;
42654 + int error;
42655 +};
42656 +
42657 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
42658 + loff_t offset, u64 ino, unsigned int d_type)
42659 +{
42660 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
42661 + buf->error = -EINVAL;
42662 + return 0;
42663 +}
42664 +
42665 #define NOD(NAME, MODE, IOP, FOP, OP) { \
42666 .name = (NAME), \
42667 .len = sizeof(NAME) - 1, \
42668 @@ -213,6 +229,9 @@ static int check_mem_permission(struct t
42669 if (task == current)
42670 return 0;
42671
42672 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
42673 + return -EPERM;
42674 +
42675 /*
42676 * If current is actively ptrace'ing, and would also be
42677 * permitted to freshly attach with ptrace now, permit it.
42678 @@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_
42679 if (!mm->arg_end)
42680 goto out_mm; /* Shh! No looking before we're done */
42681
42682 + if (gr_acl_handle_procpidmem(task))
42683 + goto out_mm;
42684 +
42685 len = mm->arg_end - mm->arg_start;
42686
42687 if (len > PAGE_SIZE)
42688 @@ -287,12 +309,28 @@ out:
42689 return res;
42690 }
42691
42692 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42693 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42694 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
42695 + _mm->pax_flags & MF_PAX_SEGMEXEC))
42696 +#endif
42697 +
42698 static int proc_pid_auxv(struct task_struct *task, char *buffer)
42699 {
42700 int res = 0;
42701 struct mm_struct *mm = get_task_mm(task);
42702 if (mm) {
42703 unsigned int nwords = 0;
42704 +
42705 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42706 + /* allow if we're currently ptracing this task */
42707 + if (PAX_RAND_FLAGS(mm) &&
42708 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
42709 + mmput(mm);
42710 + return res;
42711 + }
42712 +#endif
42713 +
42714 do {
42715 nwords += 2;
42716 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
42717 @@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_str
42718 }
42719
42720
42721 -#ifdef CONFIG_KALLSYMS
42722 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42723 /*
42724 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
42725 * Returns the resolved symbol. If that fails, simply return the address.
42726 @@ -328,7 +366,7 @@ static int proc_pid_wchan(struct task_st
42727 }
42728 #endif /* CONFIG_KALLSYMS */
42729
42730 -#ifdef CONFIG_STACKTRACE
42731 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42732
42733 #define MAX_STACK_TRACE_DEPTH 64
42734
42735 @@ -522,7 +560,7 @@ static int proc_pid_limits(struct task_s
42736 return count;
42737 }
42738
42739 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42740 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42741 static int proc_pid_syscall(struct task_struct *task, char *buffer)
42742 {
42743 long nr;
42744 @@ -547,7 +585,7 @@ static int proc_pid_syscall(struct task_
42745 /************************************************************************/
42746
42747 /* permission checks */
42748 -static int proc_fd_access_allowed(struct inode *inode)
42749 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
42750 {
42751 struct task_struct *task;
42752 int allowed = 0;
42753 @@ -557,7 +595,10 @@ static int proc_fd_access_allowed(struct
42754 */
42755 task = get_proc_task(inode);
42756 if (task) {
42757 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
42758 + if (log)
42759 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
42760 + else
42761 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
42762 put_task_struct(task);
42763 }
42764 return allowed;
42765 @@ -936,6 +977,9 @@ static ssize_t environ_read(struct file
42766 if (!task)
42767 goto out_no_task;
42768
42769 + if (gr_acl_handle_procpidmem(task))
42770 + goto out;
42771 +
42772 if (!ptrace_may_access(task, PTRACE_MODE_READ))
42773 goto out;
42774
42775 @@ -1350,7 +1394,7 @@ static void *proc_pid_follow_link(struct
42776 path_put(&nd->path);
42777
42778 /* Are we allowed to snoop on the tasks file descriptors? */
42779 - if (!proc_fd_access_allowed(inode))
42780 + if (!proc_fd_access_allowed(inode,0))
42781 goto out;
42782
42783 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
42784 @@ -1390,8 +1434,18 @@ static int proc_pid_readlink(struct dent
42785 struct path path;
42786
42787 /* Are we allowed to snoop on the tasks file descriptors? */
42788 - if (!proc_fd_access_allowed(inode))
42789 - goto out;
42790 + /* logging this is needed for learning on chromium to work properly,
42791 + but we don't want to flood the logs from 'ps' which does a readlink
42792 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
42793 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
42794 + */
42795 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
42796 + if (!proc_fd_access_allowed(inode,0))
42797 + goto out;
42798 + } else {
42799 + if (!proc_fd_access_allowed(inode,1))
42800 + goto out;
42801 + }
42802
42803 error = PROC_I(inode)->op.proc_get_link(inode, &path);
42804 if (error)
42805 @@ -1456,7 +1510,11 @@ static struct inode *proc_pid_make_inode
42806 rcu_read_lock();
42807 cred = __task_cred(task);
42808 inode->i_uid = cred->euid;
42809 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42810 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42811 +#else
42812 inode->i_gid = cred->egid;
42813 +#endif
42814 rcu_read_unlock();
42815 }
42816 security_task_to_inode(task, inode);
42817 @@ -1474,6 +1532,9 @@ static int pid_getattr(struct vfsmount *
42818 struct inode *inode = dentry->d_inode;
42819 struct task_struct *task;
42820 const struct cred *cred;
42821 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42822 + const struct cred *tmpcred = current_cred();
42823 +#endif
42824
42825 generic_fillattr(inode, stat);
42826
42827 @@ -1481,13 +1542,41 @@ static int pid_getattr(struct vfsmount *
42828 stat->uid = 0;
42829 stat->gid = 0;
42830 task = pid_task(proc_pid(inode), PIDTYPE_PID);
42831 +
42832 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
42833 + rcu_read_unlock();
42834 + return -ENOENT;
42835 + }
42836 +
42837 if (task) {
42838 + cred = __task_cred(task);
42839 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42840 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
42841 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42842 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
42843 +#endif
42844 + ) {
42845 +#endif
42846 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
42847 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42848 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
42849 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42850 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
42851 +#endif
42852 task_dumpable(task)) {
42853 - cred = __task_cred(task);
42854 stat->uid = cred->euid;
42855 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42856 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
42857 +#else
42858 stat->gid = cred->egid;
42859 +#endif
42860 }
42861 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42862 + } else {
42863 + rcu_read_unlock();
42864 + return -ENOENT;
42865 + }
42866 +#endif
42867 }
42868 rcu_read_unlock();
42869 return 0;
42870 @@ -1518,11 +1607,20 @@ static int pid_revalidate(struct dentry
42871
42872 if (task) {
42873 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
42874 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42875 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
42876 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42877 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
42878 +#endif
42879 task_dumpable(task)) {
42880 rcu_read_lock();
42881 cred = __task_cred(task);
42882 inode->i_uid = cred->euid;
42883 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42884 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42885 +#else
42886 inode->i_gid = cred->egid;
42887 +#endif
42888 rcu_read_unlock();
42889 } else {
42890 inode->i_uid = 0;
42891 @@ -1643,7 +1741,8 @@ static int proc_fd_info(struct inode *in
42892 int fd = proc_fd(inode);
42893
42894 if (task) {
42895 - files = get_files_struct(task);
42896 + if (!gr_acl_handle_procpidmem(task))
42897 + files = get_files_struct(task);
42898 put_task_struct(task);
42899 }
42900 if (files) {
42901 @@ -1895,12 +1994,22 @@ static const struct file_operations proc
42902 static int proc_fd_permission(struct inode *inode, int mask)
42903 {
42904 int rv;
42905 + struct task_struct *task;
42906
42907 rv = generic_permission(inode, mask, NULL);
42908 - if (rv == 0)
42909 - return 0;
42910 +
42911 if (task_pid(current) == proc_pid(inode))
42912 rv = 0;
42913 +
42914 + task = get_proc_task(inode);
42915 + if (task == NULL)
42916 + return rv;
42917 +
42918 + if (gr_acl_handle_procpidmem(task))
42919 + rv = -EACCES;
42920 +
42921 + put_task_struct(task);
42922 +
42923 return rv;
42924 }
42925
42926 @@ -2009,6 +2118,9 @@ static struct dentry *proc_pident_lookup
42927 if (!task)
42928 goto out_no_task;
42929
42930 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42931 + goto out;
42932 +
42933 /*
42934 * Yes, it does not scale. And it should not. Don't add
42935 * new entries into /proc/<tgid>/ without very good reasons.
42936 @@ -2053,6 +2165,9 @@ static int proc_pident_readdir(struct fi
42937 if (!task)
42938 goto out_no_task;
42939
42940 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42941 + goto out;
42942 +
42943 ret = 0;
42944 i = filp->f_pos;
42945 switch (i) {
42946 @@ -2320,7 +2435,7 @@ static void *proc_self_follow_link(struc
42947 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
42948 void *cookie)
42949 {
42950 - char *s = nd_get_link(nd);
42951 + const char *s = nd_get_link(nd);
42952 if (!IS_ERR(s))
42953 __putname(s);
42954 }
42955 @@ -2519,7 +2634,7 @@ static const struct pid_entry tgid_base_
42956 #ifdef CONFIG_SCHED_DEBUG
42957 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
42958 #endif
42959 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42960 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42961 INF("syscall", S_IRUSR, proc_pid_syscall),
42962 #endif
42963 INF("cmdline", S_IRUGO, proc_pid_cmdline),
42964 @@ -2544,10 +2659,10 @@ static const struct pid_entry tgid_base_
42965 #ifdef CONFIG_SECURITY
42966 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42967 #endif
42968 -#ifdef CONFIG_KALLSYMS
42969 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42970 INF("wchan", S_IRUGO, proc_pid_wchan),
42971 #endif
42972 -#ifdef CONFIG_STACKTRACE
42973 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42974 ONE("stack", S_IRUSR, proc_pid_stack),
42975 #endif
42976 #ifdef CONFIG_SCHEDSTATS
42977 @@ -2577,6 +2692,9 @@ static const struct pid_entry tgid_base_
42978 #ifdef CONFIG_TASK_IO_ACCOUNTING
42979 INF("io", S_IRUGO, proc_tgid_io_accounting),
42980 #endif
42981 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42982 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
42983 +#endif
42984 };
42985
42986 static int proc_tgid_base_readdir(struct file * filp,
42987 @@ -2701,7 +2819,14 @@ static struct dentry *proc_pid_instantia
42988 if (!inode)
42989 goto out;
42990
42991 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42992 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
42993 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42994 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42995 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
42996 +#else
42997 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
42998 +#endif
42999 inode->i_op = &proc_tgid_base_inode_operations;
43000 inode->i_fop = &proc_tgid_base_operations;
43001 inode->i_flags|=S_IMMUTABLE;
43002 @@ -2743,7 +2868,11 @@ struct dentry *proc_pid_lookup(struct in
43003 if (!task)
43004 goto out;
43005
43006 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
43007 + goto out_put_task;
43008 +
43009 result = proc_pid_instantiate(dir, dentry, task, NULL);
43010 +out_put_task:
43011 put_task_struct(task);
43012 out:
43013 return result;
43014 @@ -2808,6 +2937,11 @@ int proc_pid_readdir(struct file * filp,
43015 {
43016 unsigned int nr;
43017 struct task_struct *reaper;
43018 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43019 + const struct cred *tmpcred = current_cred();
43020 + const struct cred *itercred;
43021 +#endif
43022 + filldir_t __filldir = filldir;
43023 struct tgid_iter iter;
43024 struct pid_namespace *ns;
43025
43026 @@ -2831,8 +2965,27 @@ int proc_pid_readdir(struct file * filp,
43027 for (iter = next_tgid(ns, iter);
43028 iter.task;
43029 iter.tgid += 1, iter = next_tgid(ns, iter)) {
43030 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43031 + rcu_read_lock();
43032 + itercred = __task_cred(iter.task);
43033 +#endif
43034 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
43035 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43036 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
43037 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
43038 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
43039 +#endif
43040 + )
43041 +#endif
43042 + )
43043 + __filldir = &gr_fake_filldir;
43044 + else
43045 + __filldir = filldir;
43046 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43047 + rcu_read_unlock();
43048 +#endif
43049 filp->f_pos = iter.tgid + TGID_OFFSET;
43050 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
43051 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
43052 put_task_struct(iter.task);
43053 goto out;
43054 }
43055 @@ -2858,7 +3011,7 @@ static const struct pid_entry tid_base_s
43056 #ifdef CONFIG_SCHED_DEBUG
43057 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
43058 #endif
43059 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
43060 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
43061 INF("syscall", S_IRUSR, proc_pid_syscall),
43062 #endif
43063 INF("cmdline", S_IRUGO, proc_pid_cmdline),
43064 @@ -2882,10 +3035,10 @@ static const struct pid_entry tid_base_s
43065 #ifdef CONFIG_SECURITY
43066 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
43067 #endif
43068 -#ifdef CONFIG_KALLSYMS
43069 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
43070 INF("wchan", S_IRUGO, proc_pid_wchan),
43071 #endif
43072 -#ifdef CONFIG_STACKTRACE
43073 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
43074 ONE("stack", S_IRUSR, proc_pid_stack),
43075 #endif
43076 #ifdef CONFIG_SCHEDSTATS
43077 diff -urNp linux-2.6.32.43/fs/proc/cmdline.c linux-2.6.32.43/fs/proc/cmdline.c
43078 --- linux-2.6.32.43/fs/proc/cmdline.c 2011-03-27 14:31:47.000000000 -0400
43079 +++ linux-2.6.32.43/fs/proc/cmdline.c 2011-04-17 15:56:46.000000000 -0400
43080 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
43081
43082 static int __init proc_cmdline_init(void)
43083 {
43084 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
43085 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
43086 +#else
43087 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
43088 +#endif
43089 return 0;
43090 }
43091 module_init(proc_cmdline_init);
43092 diff -urNp linux-2.6.32.43/fs/proc/devices.c linux-2.6.32.43/fs/proc/devices.c
43093 --- linux-2.6.32.43/fs/proc/devices.c 2011-03-27 14:31:47.000000000 -0400
43094 +++ linux-2.6.32.43/fs/proc/devices.c 2011-04-17 15:56:46.000000000 -0400
43095 @@ -64,7 +64,11 @@ static const struct file_operations proc
43096
43097 static int __init proc_devices_init(void)
43098 {
43099 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
43100 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
43101 +#else
43102 proc_create("devices", 0, NULL, &proc_devinfo_operations);
43103 +#endif
43104 return 0;
43105 }
43106 module_init(proc_devices_init);
43107 diff -urNp linux-2.6.32.43/fs/proc/inode.c linux-2.6.32.43/fs/proc/inode.c
43108 --- linux-2.6.32.43/fs/proc/inode.c 2011-03-27 14:31:47.000000000 -0400
43109 +++ linux-2.6.32.43/fs/proc/inode.c 2011-04-17 15:56:46.000000000 -0400
43110 @@ -457,7 +457,11 @@ struct inode *proc_get_inode(struct supe
43111 if (de->mode) {
43112 inode->i_mode = de->mode;
43113 inode->i_uid = de->uid;
43114 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
43115 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
43116 +#else
43117 inode->i_gid = de->gid;
43118 +#endif
43119 }
43120 if (de->size)
43121 inode->i_size = de->size;
43122 diff -urNp linux-2.6.32.43/fs/proc/internal.h linux-2.6.32.43/fs/proc/internal.h
43123 --- linux-2.6.32.43/fs/proc/internal.h 2011-03-27 14:31:47.000000000 -0400
43124 +++ linux-2.6.32.43/fs/proc/internal.h 2011-04-17 15:56:46.000000000 -0400
43125 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
43126 struct pid *pid, struct task_struct *task);
43127 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
43128 struct pid *pid, struct task_struct *task);
43129 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
43130 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
43131 +#endif
43132 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
43133
43134 extern const struct file_operations proc_maps_operations;
43135 diff -urNp linux-2.6.32.43/fs/proc/Kconfig linux-2.6.32.43/fs/proc/Kconfig
43136 --- linux-2.6.32.43/fs/proc/Kconfig 2011-03-27 14:31:47.000000000 -0400
43137 +++ linux-2.6.32.43/fs/proc/Kconfig 2011-04-17 15:56:46.000000000 -0400
43138 @@ -30,12 +30,12 @@ config PROC_FS
43139
43140 config PROC_KCORE
43141 bool "/proc/kcore support" if !ARM
43142 - depends on PROC_FS && MMU
43143 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
43144
43145 config PROC_VMCORE
43146 bool "/proc/vmcore support (EXPERIMENTAL)"
43147 - depends on PROC_FS && CRASH_DUMP
43148 - default y
43149 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
43150 + default n
43151 help
43152 Exports the dump image of crashed kernel in ELF format.
43153
43154 @@ -59,8 +59,8 @@ config PROC_SYSCTL
43155 limited in memory.
43156
43157 config PROC_PAGE_MONITOR
43158 - default y
43159 - depends on PROC_FS && MMU
43160 + default n
43161 + depends on PROC_FS && MMU && !GRKERNSEC
43162 bool "Enable /proc page monitoring" if EMBEDDED
43163 help
43164 Various /proc files exist to monitor process memory utilization:
43165 diff -urNp linux-2.6.32.43/fs/proc/kcore.c linux-2.6.32.43/fs/proc/kcore.c
43166 --- linux-2.6.32.43/fs/proc/kcore.c 2011-03-27 14:31:47.000000000 -0400
43167 +++ linux-2.6.32.43/fs/proc/kcore.c 2011-05-16 21:46:57.000000000 -0400
43168 @@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bu
43169 off_t offset = 0;
43170 struct kcore_list *m;
43171
43172 + pax_track_stack();
43173 +
43174 /* setup ELF header */
43175 elf = (struct elfhdr *) bufp;
43176 bufp += sizeof(struct elfhdr);
43177 @@ -477,9 +479,10 @@ read_kcore(struct file *file, char __use
43178 * the addresses in the elf_phdr on our list.
43179 */
43180 start = kc_offset_to_vaddr(*fpos - elf_buflen);
43181 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
43182 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
43183 + if (tsz > buflen)
43184 tsz = buflen;
43185 -
43186 +
43187 while (buflen) {
43188 struct kcore_list *m;
43189
43190 @@ -508,20 +511,23 @@ read_kcore(struct file *file, char __use
43191 kfree(elf_buf);
43192 } else {
43193 if (kern_addr_valid(start)) {
43194 - unsigned long n;
43195 + char *elf_buf;
43196 + mm_segment_t oldfs;
43197
43198 - n = copy_to_user(buffer, (char *)start, tsz);
43199 - /*
43200 - * We cannot distingush between fault on source
43201 - * and fault on destination. When this happens
43202 - * we clear too and hope it will trigger the
43203 - * EFAULT again.
43204 - */
43205 - if (n) {
43206 - if (clear_user(buffer + tsz - n,
43207 - n))
43208 + elf_buf = kmalloc(tsz, GFP_KERNEL);
43209 + if (!elf_buf)
43210 + return -ENOMEM;
43211 + oldfs = get_fs();
43212 + set_fs(KERNEL_DS);
43213 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
43214 + set_fs(oldfs);
43215 + if (copy_to_user(buffer, elf_buf, tsz)) {
43216 + kfree(elf_buf);
43217 return -EFAULT;
43218 + }
43219 }
43220 + set_fs(oldfs);
43221 + kfree(elf_buf);
43222 } else {
43223 if (clear_user(buffer, tsz))
43224 return -EFAULT;
43225 @@ -541,6 +547,9 @@ read_kcore(struct file *file, char __use
43226
43227 static int open_kcore(struct inode *inode, struct file *filp)
43228 {
43229 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
43230 + return -EPERM;
43231 +#endif
43232 if (!capable(CAP_SYS_RAWIO))
43233 return -EPERM;
43234 if (kcore_need_update)
43235 diff -urNp linux-2.6.32.43/fs/proc/meminfo.c linux-2.6.32.43/fs/proc/meminfo.c
43236 --- linux-2.6.32.43/fs/proc/meminfo.c 2011-03-27 14:31:47.000000000 -0400
43237 +++ linux-2.6.32.43/fs/proc/meminfo.c 2011-05-16 21:46:57.000000000 -0400
43238 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
43239 unsigned long pages[NR_LRU_LISTS];
43240 int lru;
43241
43242 + pax_track_stack();
43243 +
43244 /*
43245 * display in kilobytes.
43246 */
43247 @@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_
43248 vmi.used >> 10,
43249 vmi.largest_chunk >> 10
43250 #ifdef CONFIG_MEMORY_FAILURE
43251 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
43252 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
43253 #endif
43254 );
43255
43256 diff -urNp linux-2.6.32.43/fs/proc/nommu.c linux-2.6.32.43/fs/proc/nommu.c
43257 --- linux-2.6.32.43/fs/proc/nommu.c 2011-03-27 14:31:47.000000000 -0400
43258 +++ linux-2.6.32.43/fs/proc/nommu.c 2011-04-17 15:56:46.000000000 -0400
43259 @@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_
43260 if (len < 1)
43261 len = 1;
43262 seq_printf(m, "%*c", len, ' ');
43263 - seq_path(m, &file->f_path, "");
43264 + seq_path(m, &file->f_path, "\n\\");
43265 }
43266
43267 seq_putc(m, '\n');
43268 diff -urNp linux-2.6.32.43/fs/proc/proc_net.c linux-2.6.32.43/fs/proc/proc_net.c
43269 --- linux-2.6.32.43/fs/proc/proc_net.c 2011-03-27 14:31:47.000000000 -0400
43270 +++ linux-2.6.32.43/fs/proc/proc_net.c 2011-04-17 15:56:46.000000000 -0400
43271 @@ -104,6 +104,17 @@ static struct net *get_proc_task_net(str
43272 struct task_struct *task;
43273 struct nsproxy *ns;
43274 struct net *net = NULL;
43275 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43276 + const struct cred *cred = current_cred();
43277 +#endif
43278 +
43279 +#ifdef CONFIG_GRKERNSEC_PROC_USER
43280 + if (cred->fsuid)
43281 + return net;
43282 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43283 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
43284 + return net;
43285 +#endif
43286
43287 rcu_read_lock();
43288 task = pid_task(proc_pid(dir), PIDTYPE_PID);
43289 diff -urNp linux-2.6.32.43/fs/proc/proc_sysctl.c linux-2.6.32.43/fs/proc/proc_sysctl.c
43290 --- linux-2.6.32.43/fs/proc/proc_sysctl.c 2011-03-27 14:31:47.000000000 -0400
43291 +++ linux-2.6.32.43/fs/proc/proc_sysctl.c 2011-04-17 15:56:46.000000000 -0400
43292 @@ -7,6 +7,8 @@
43293 #include <linux/security.h>
43294 #include "internal.h"
43295
43296 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
43297 +
43298 static const struct dentry_operations proc_sys_dentry_operations;
43299 static const struct file_operations proc_sys_file_operations;
43300 static const struct inode_operations proc_sys_inode_operations;
43301 @@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
43302 if (!p)
43303 goto out;
43304
43305 + if (gr_handle_sysctl(p, MAY_EXEC))
43306 + goto out;
43307 +
43308 err = ERR_PTR(-ENOMEM);
43309 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
43310 if (h)
43311 @@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
43312 if (*pos < file->f_pos)
43313 continue;
43314
43315 + if (gr_handle_sysctl(table, 0))
43316 + continue;
43317 +
43318 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
43319 if (res)
43320 return res;
43321 @@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
43322 if (IS_ERR(head))
43323 return PTR_ERR(head);
43324
43325 + if (table && gr_handle_sysctl(table, MAY_EXEC))
43326 + return -ENOENT;
43327 +
43328 generic_fillattr(inode, stat);
43329 if (table)
43330 stat->mode = (stat->mode & S_IFMT) | table->mode;
43331 diff -urNp linux-2.6.32.43/fs/proc/root.c linux-2.6.32.43/fs/proc/root.c
43332 --- linux-2.6.32.43/fs/proc/root.c 2011-03-27 14:31:47.000000000 -0400
43333 +++ linux-2.6.32.43/fs/proc/root.c 2011-04-17 15:56:46.000000000 -0400
43334 @@ -134,7 +134,15 @@ void __init proc_root_init(void)
43335 #ifdef CONFIG_PROC_DEVICETREE
43336 proc_device_tree_init();
43337 #endif
43338 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
43339 +#ifdef CONFIG_GRKERNSEC_PROC_USER
43340 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
43341 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43342 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
43343 +#endif
43344 +#else
43345 proc_mkdir("bus", NULL);
43346 +#endif
43347 proc_sys_init();
43348 }
43349
43350 diff -urNp linux-2.6.32.43/fs/proc/task_mmu.c linux-2.6.32.43/fs/proc/task_mmu.c
43351 --- linux-2.6.32.43/fs/proc/task_mmu.c 2011-03-27 14:31:47.000000000 -0400
43352 +++ linux-2.6.32.43/fs/proc/task_mmu.c 2011-04-23 13:38:09.000000000 -0400
43353 @@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
43354 "VmStk:\t%8lu kB\n"
43355 "VmExe:\t%8lu kB\n"
43356 "VmLib:\t%8lu kB\n"
43357 - "VmPTE:\t%8lu kB\n",
43358 - hiwater_vm << (PAGE_SHIFT-10),
43359 + "VmPTE:\t%8lu kB\n"
43360 +
43361 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
43362 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
43363 +#endif
43364 +
43365 + ,hiwater_vm << (PAGE_SHIFT-10),
43366 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
43367 mm->locked_vm << (PAGE_SHIFT-10),
43368 hiwater_rss << (PAGE_SHIFT-10),
43369 total_rss << (PAGE_SHIFT-10),
43370 data << (PAGE_SHIFT-10),
43371 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
43372 - (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
43373 + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
43374 +
43375 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
43376 + , mm->context.user_cs_base, mm->context.user_cs_limit
43377 +#endif
43378 +
43379 + );
43380 }
43381
43382 unsigned long task_vsize(struct mm_struct *mm)
43383 @@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, v
43384 struct proc_maps_private *priv = m->private;
43385 struct vm_area_struct *vma = v;
43386
43387 - vma_stop(priv, vma);
43388 + if (!IS_ERR(vma))
43389 + vma_stop(priv, vma);
43390 if (priv->task)
43391 put_task_struct(priv->task);
43392 }
43393 @@ -199,6 +211,12 @@ static int do_maps_open(struct inode *in
43394 return ret;
43395 }
43396
43397 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43398 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
43399 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
43400 + _mm->pax_flags & MF_PAX_SEGMEXEC))
43401 +#endif
43402 +
43403 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
43404 {
43405 struct mm_struct *mm = vma->vm_mm;
43406 @@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file
43407 int flags = vma->vm_flags;
43408 unsigned long ino = 0;
43409 unsigned long long pgoff = 0;
43410 - unsigned long start;
43411 dev_t dev = 0;
43412 int len;
43413
43414 @@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file
43415 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
43416 }
43417
43418 - /* We don't show the stack guard page in /proc/maps */
43419 - start = vma->vm_start;
43420 - if (vma->vm_flags & VM_GROWSDOWN)
43421 - if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
43422 - start += PAGE_SIZE;
43423 -
43424 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
43425 - start,
43426 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43427 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
43428 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
43429 +#else
43430 + vma->vm_start,
43431 vma->vm_end,
43432 +#endif
43433 flags & VM_READ ? 'r' : '-',
43434 flags & VM_WRITE ? 'w' : '-',
43435 flags & VM_EXEC ? 'x' : '-',
43436 flags & VM_MAYSHARE ? 's' : 'p',
43437 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43438 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
43439 +#else
43440 pgoff,
43441 +#endif
43442 MAJOR(dev), MINOR(dev), ino, &len);
43443
43444 /*
43445 @@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file
43446 */
43447 if (file) {
43448 pad_len_spaces(m, len);
43449 - seq_path(m, &file->f_path, "\n");
43450 + seq_path(m, &file->f_path, "\n\\");
43451 } else {
43452 const char *name = arch_vma_name(vma);
43453 if (!name) {
43454 @@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file
43455 if (vma->vm_start <= mm->brk &&
43456 vma->vm_end >= mm->start_brk) {
43457 name = "[heap]";
43458 - } else if (vma->vm_start <= mm->start_stack &&
43459 - vma->vm_end >= mm->start_stack) {
43460 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
43461 + (vma->vm_start <= mm->start_stack &&
43462 + vma->vm_end >= mm->start_stack)) {
43463 name = "[stack]";
43464 }
43465 } else {
43466 @@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m,
43467 };
43468
43469 memset(&mss, 0, sizeof mss);
43470 - mss.vma = vma;
43471 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
43472 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
43473 +
43474 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43475 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
43476 +#endif
43477 + mss.vma = vma;
43478 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
43479 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
43480 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43481 + }
43482 +#endif
43483
43484 show_map_vma(m, vma);
43485
43486 @@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m,
43487 "Swap: %8lu kB\n"
43488 "KernelPageSize: %8lu kB\n"
43489 "MMUPageSize: %8lu kB\n",
43490 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43491 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
43492 +#else
43493 (vma->vm_end - vma->vm_start) >> 10,
43494 +#endif
43495 mss.resident >> 10,
43496 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
43497 mss.shared_clean >> 10,
43498 diff -urNp linux-2.6.32.43/fs/proc/task_nommu.c linux-2.6.32.43/fs/proc/task_nommu.c
43499 --- linux-2.6.32.43/fs/proc/task_nommu.c 2011-03-27 14:31:47.000000000 -0400
43500 +++ linux-2.6.32.43/fs/proc/task_nommu.c 2011-04-17 15:56:46.000000000 -0400
43501 @@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct
43502 else
43503 bytes += kobjsize(mm);
43504
43505 - if (current->fs && current->fs->users > 1)
43506 + if (current->fs && atomic_read(&current->fs->users) > 1)
43507 sbytes += kobjsize(current->fs);
43508 else
43509 bytes += kobjsize(current->fs);
43510 @@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_fil
43511 if (len < 1)
43512 len = 1;
43513 seq_printf(m, "%*c", len, ' ');
43514 - seq_path(m, &file->f_path, "");
43515 + seq_path(m, &file->f_path, "\n\\");
43516 }
43517
43518 seq_putc(m, '\n');
43519 diff -urNp linux-2.6.32.43/fs/readdir.c linux-2.6.32.43/fs/readdir.c
43520 --- linux-2.6.32.43/fs/readdir.c 2011-03-27 14:31:47.000000000 -0400
43521 +++ linux-2.6.32.43/fs/readdir.c 2011-04-17 15:56:46.000000000 -0400
43522 @@ -16,6 +16,7 @@
43523 #include <linux/security.h>
43524 #include <linux/syscalls.h>
43525 #include <linux/unistd.h>
43526 +#include <linux/namei.h>
43527
43528 #include <asm/uaccess.h>
43529
43530 @@ -67,6 +68,7 @@ struct old_linux_dirent {
43531
43532 struct readdir_callback {
43533 struct old_linux_dirent __user * dirent;
43534 + struct file * file;
43535 int result;
43536 };
43537
43538 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
43539 buf->result = -EOVERFLOW;
43540 return -EOVERFLOW;
43541 }
43542 +
43543 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43544 + return 0;
43545 +
43546 buf->result++;
43547 dirent = buf->dirent;
43548 if (!access_ok(VERIFY_WRITE, dirent,
43549 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
43550
43551 buf.result = 0;
43552 buf.dirent = dirent;
43553 + buf.file = file;
43554
43555 error = vfs_readdir(file, fillonedir, &buf);
43556 if (buf.result)
43557 @@ -142,6 +149,7 @@ struct linux_dirent {
43558 struct getdents_callback {
43559 struct linux_dirent __user * current_dir;
43560 struct linux_dirent __user * previous;
43561 + struct file * file;
43562 int count;
43563 int error;
43564 };
43565 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
43566 buf->error = -EOVERFLOW;
43567 return -EOVERFLOW;
43568 }
43569 +
43570 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43571 + return 0;
43572 +
43573 dirent = buf->previous;
43574 if (dirent) {
43575 if (__put_user(offset, &dirent->d_off))
43576 @@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
43577 buf.previous = NULL;
43578 buf.count = count;
43579 buf.error = 0;
43580 + buf.file = file;
43581
43582 error = vfs_readdir(file, filldir, &buf);
43583 if (error >= 0)
43584 @@ -228,6 +241,7 @@ out:
43585 struct getdents_callback64 {
43586 struct linux_dirent64 __user * current_dir;
43587 struct linux_dirent64 __user * previous;
43588 + struct file *file;
43589 int count;
43590 int error;
43591 };
43592 @@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
43593 buf->error = -EINVAL; /* only used if we fail.. */
43594 if (reclen > buf->count)
43595 return -EINVAL;
43596 +
43597 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43598 + return 0;
43599 +
43600 dirent = buf->previous;
43601 if (dirent) {
43602 if (__put_user(offset, &dirent->d_off))
43603 @@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
43604
43605 buf.current_dir = dirent;
43606 buf.previous = NULL;
43607 + buf.file = file;
43608 buf.count = count;
43609 buf.error = 0;
43610
43611 diff -urNp linux-2.6.32.43/fs/reiserfs/dir.c linux-2.6.32.43/fs/reiserfs/dir.c
43612 --- linux-2.6.32.43/fs/reiserfs/dir.c 2011-03-27 14:31:47.000000000 -0400
43613 +++ linux-2.6.32.43/fs/reiserfs/dir.c 2011-05-16 21:46:57.000000000 -0400
43614 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
43615 struct reiserfs_dir_entry de;
43616 int ret = 0;
43617
43618 + pax_track_stack();
43619 +
43620 reiserfs_write_lock(inode->i_sb);
43621
43622 reiserfs_check_lock_depth(inode->i_sb, "readdir");
43623 diff -urNp linux-2.6.32.43/fs/reiserfs/do_balan.c linux-2.6.32.43/fs/reiserfs/do_balan.c
43624 --- linux-2.6.32.43/fs/reiserfs/do_balan.c 2011-03-27 14:31:47.000000000 -0400
43625 +++ linux-2.6.32.43/fs/reiserfs/do_balan.c 2011-04-17 15:56:46.000000000 -0400
43626 @@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb,
43627 return;
43628 }
43629
43630 - atomic_inc(&(fs_generation(tb->tb_sb)));
43631 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
43632 do_balance_starts(tb);
43633
43634 /* balance leaf returns 0 except if combining L R and S into
43635 diff -urNp linux-2.6.32.43/fs/reiserfs/item_ops.c linux-2.6.32.43/fs/reiserfs/item_ops.c
43636 --- linux-2.6.32.43/fs/reiserfs/item_ops.c 2011-03-27 14:31:47.000000000 -0400
43637 +++ linux-2.6.32.43/fs/reiserfs/item_ops.c 2011-04-17 15:56:46.000000000 -0400
43638 @@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
43639 vi->vi_index, vi->vi_type, vi->vi_ih);
43640 }
43641
43642 -static struct item_operations stat_data_ops = {
43643 +static const struct item_operations stat_data_ops = {
43644 .bytes_number = sd_bytes_number,
43645 .decrement_key = sd_decrement_key,
43646 .is_left_mergeable = sd_is_left_mergeable,
43647 @@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
43648 vi->vi_index, vi->vi_type, vi->vi_ih);
43649 }
43650
43651 -static struct item_operations direct_ops = {
43652 +static const struct item_operations direct_ops = {
43653 .bytes_number = direct_bytes_number,
43654 .decrement_key = direct_decrement_key,
43655 .is_left_mergeable = direct_is_left_mergeable,
43656 @@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
43657 vi->vi_index, vi->vi_type, vi->vi_ih);
43658 }
43659
43660 -static struct item_operations indirect_ops = {
43661 +static const struct item_operations indirect_ops = {
43662 .bytes_number = indirect_bytes_number,
43663 .decrement_key = indirect_decrement_key,
43664 .is_left_mergeable = indirect_is_left_mergeable,
43665 @@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
43666 printk("\n");
43667 }
43668
43669 -static struct item_operations direntry_ops = {
43670 +static const struct item_operations direntry_ops = {
43671 .bytes_number = direntry_bytes_number,
43672 .decrement_key = direntry_decrement_key,
43673 .is_left_mergeable = direntry_is_left_mergeable,
43674 @@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
43675 "Invalid item type observed, run fsck ASAP");
43676 }
43677
43678 -static struct item_operations errcatch_ops = {
43679 +static const struct item_operations errcatch_ops = {
43680 errcatch_bytes_number,
43681 errcatch_decrement_key,
43682 errcatch_is_left_mergeable,
43683 @@ -746,7 +746,7 @@ static struct item_operations errcatch_o
43684 #error Item types must use disk-format assigned values.
43685 #endif
43686
43687 -struct item_operations *item_ops[TYPE_ANY + 1] = {
43688 +const struct item_operations * const item_ops[TYPE_ANY + 1] = {
43689 &stat_data_ops,
43690 &indirect_ops,
43691 &direct_ops,
43692 diff -urNp linux-2.6.32.43/fs/reiserfs/journal.c linux-2.6.32.43/fs/reiserfs/journal.c
43693 --- linux-2.6.32.43/fs/reiserfs/journal.c 2011-03-27 14:31:47.000000000 -0400
43694 +++ linux-2.6.32.43/fs/reiserfs/journal.c 2011-05-16 21:46:57.000000000 -0400
43695 @@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_brea
43696 struct buffer_head *bh;
43697 int i, j;
43698
43699 + pax_track_stack();
43700 +
43701 bh = __getblk(dev, block, bufsize);
43702 if (buffer_uptodate(bh))
43703 return (bh);
43704 diff -urNp linux-2.6.32.43/fs/reiserfs/namei.c linux-2.6.32.43/fs/reiserfs/namei.c
43705 --- linux-2.6.32.43/fs/reiserfs/namei.c 2011-03-27 14:31:47.000000000 -0400
43706 +++ linux-2.6.32.43/fs/reiserfs/namei.c 2011-05-16 21:46:57.000000000 -0400
43707 @@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode
43708 unsigned long savelink = 1;
43709 struct timespec ctime;
43710
43711 + pax_track_stack();
43712 +
43713 /* three balancings: (1) old name removal, (2) new name insertion
43714 and (3) maybe "save" link insertion
43715 stat data updates: (1) old directory,
43716 diff -urNp linux-2.6.32.43/fs/reiserfs/procfs.c linux-2.6.32.43/fs/reiserfs/procfs.c
43717 --- linux-2.6.32.43/fs/reiserfs/procfs.c 2011-03-27 14:31:47.000000000 -0400
43718 +++ linux-2.6.32.43/fs/reiserfs/procfs.c 2011-05-16 21:46:57.000000000 -0400
43719 @@ -123,7 +123,7 @@ static int show_super(struct seq_file *m
43720 "SMALL_TAILS " : "NO_TAILS ",
43721 replay_only(sb) ? "REPLAY_ONLY " : "",
43722 convert_reiserfs(sb) ? "CONV " : "",
43723 - atomic_read(&r->s_generation_counter),
43724 + atomic_read_unchecked(&r->s_generation_counter),
43725 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
43726 SF(s_do_balance), SF(s_unneeded_left_neighbor),
43727 SF(s_good_search_by_key_reada), SF(s_bmaps),
43728 @@ -309,6 +309,8 @@ static int show_journal(struct seq_file
43729 struct journal_params *jp = &rs->s_v1.s_journal;
43730 char b[BDEVNAME_SIZE];
43731
43732 + pax_track_stack();
43733 +
43734 seq_printf(m, /* on-disk fields */
43735 "jp_journal_1st_block: \t%i\n"
43736 "jp_journal_dev: \t%s[%x]\n"
43737 diff -urNp linux-2.6.32.43/fs/reiserfs/stree.c linux-2.6.32.43/fs/reiserfs/stree.c
43738 --- linux-2.6.32.43/fs/reiserfs/stree.c 2011-03-27 14:31:47.000000000 -0400
43739 +++ linux-2.6.32.43/fs/reiserfs/stree.c 2011-05-16 21:46:57.000000000 -0400
43740 @@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs
43741 int iter = 0;
43742 #endif
43743
43744 + pax_track_stack();
43745 +
43746 BUG_ON(!th->t_trans_id);
43747
43748 init_tb_struct(th, &s_del_balance, sb, path,
43749 @@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct r
43750 int retval;
43751 int quota_cut_bytes = 0;
43752
43753 + pax_track_stack();
43754 +
43755 BUG_ON(!th->t_trans_id);
43756
43757 le_key2cpu_key(&cpu_key, key);
43758 @@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiser
43759 int quota_cut_bytes;
43760 loff_t tail_pos = 0;
43761
43762 + pax_track_stack();
43763 +
43764 BUG_ON(!th->t_trans_id);
43765
43766 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
43767 @@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reis
43768 int retval;
43769 int fs_gen;
43770
43771 + pax_track_stack();
43772 +
43773 BUG_ON(!th->t_trans_id);
43774
43775 fs_gen = get_generation(inode->i_sb);
43776 @@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs
43777 int fs_gen = 0;
43778 int quota_bytes = 0;
43779
43780 + pax_track_stack();
43781 +
43782 BUG_ON(!th->t_trans_id);
43783
43784 if (inode) { /* Do we count quotas for item? */
43785 diff -urNp linux-2.6.32.43/fs/reiserfs/super.c linux-2.6.32.43/fs/reiserfs/super.c
43786 --- linux-2.6.32.43/fs/reiserfs/super.c 2011-03-27 14:31:47.000000000 -0400
43787 +++ linux-2.6.32.43/fs/reiserfs/super.c 2011-05-16 21:46:57.000000000 -0400
43788 @@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct
43789 {.option_name = NULL}
43790 };
43791
43792 + pax_track_stack();
43793 +
43794 *blocks = 0;
43795 if (!options || !*options)
43796 /* use default configuration: create tails, journaling on, no
43797 diff -urNp linux-2.6.32.43/fs/select.c linux-2.6.32.43/fs/select.c
43798 --- linux-2.6.32.43/fs/select.c 2011-03-27 14:31:47.000000000 -0400
43799 +++ linux-2.6.32.43/fs/select.c 2011-05-16 21:46:57.000000000 -0400
43800 @@ -20,6 +20,7 @@
43801 #include <linux/module.h>
43802 #include <linux/slab.h>
43803 #include <linux/poll.h>
43804 +#include <linux/security.h>
43805 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
43806 #include <linux/file.h>
43807 #include <linux/fdtable.h>
43808 @@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, s
43809 int retval, i, timed_out = 0;
43810 unsigned long slack = 0;
43811
43812 + pax_track_stack();
43813 +
43814 rcu_read_lock();
43815 retval = max_select_fd(n, fds);
43816 rcu_read_unlock();
43817 @@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user
43818 /* Allocate small arguments on the stack to save memory and be faster */
43819 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
43820
43821 + pax_track_stack();
43822 +
43823 ret = -EINVAL;
43824 if (n < 0)
43825 goto out_nofds;
43826 @@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *uf
43827 struct poll_list *walk = head;
43828 unsigned long todo = nfds;
43829
43830 + pax_track_stack();
43831 +
43832 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
43833 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
43834 return -EINVAL;
43835
43836 diff -urNp linux-2.6.32.43/fs/seq_file.c linux-2.6.32.43/fs/seq_file.c
43837 --- linux-2.6.32.43/fs/seq_file.c 2011-03-27 14:31:47.000000000 -0400
43838 +++ linux-2.6.32.43/fs/seq_file.c 2011-04-17 15:56:46.000000000 -0400
43839 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
43840 return 0;
43841 }
43842 if (!m->buf) {
43843 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
43844 + m->size = PAGE_SIZE;
43845 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
43846 if (!m->buf)
43847 return -ENOMEM;
43848 }
43849 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
43850 Eoverflow:
43851 m->op->stop(m, p);
43852 kfree(m->buf);
43853 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
43854 + m->size <<= 1;
43855 + m->buf = kmalloc(m->size, GFP_KERNEL);
43856 return !m->buf ? -ENOMEM : -EAGAIN;
43857 }
43858
43859 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
43860 m->version = file->f_version;
43861 /* grab buffer if we didn't have one */
43862 if (!m->buf) {
43863 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
43864 + m->size = PAGE_SIZE;
43865 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
43866 if (!m->buf)
43867 goto Enomem;
43868 }
43869 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
43870 goto Fill;
43871 m->op->stop(m, p);
43872 kfree(m->buf);
43873 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
43874 + m->size <<= 1;
43875 + m->buf = kmalloc(m->size, GFP_KERNEL);
43876 if (!m->buf)
43877 goto Enomem;
43878 m->count = 0;
43879 diff -urNp linux-2.6.32.43/fs/smbfs/symlink.c linux-2.6.32.43/fs/smbfs/symlink.c
43880 --- linux-2.6.32.43/fs/smbfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
43881 +++ linux-2.6.32.43/fs/smbfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
43882 @@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
43883
43884 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
43885 {
43886 - char *s = nd_get_link(nd);
43887 + const char *s = nd_get_link(nd);
43888 if (!IS_ERR(s))
43889 __putname(s);
43890 }
43891 diff -urNp linux-2.6.32.43/fs/splice.c linux-2.6.32.43/fs/splice.c
43892 --- linux-2.6.32.43/fs/splice.c 2011-03-27 14:31:47.000000000 -0400
43893 +++ linux-2.6.32.43/fs/splice.c 2011-05-16 21:46:57.000000000 -0400
43894 @@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode
43895 pipe_lock(pipe);
43896
43897 for (;;) {
43898 - if (!pipe->readers) {
43899 + if (!atomic_read(&pipe->readers)) {
43900 send_sig(SIGPIPE, current, 0);
43901 if (!ret)
43902 ret = -EPIPE;
43903 @@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode
43904 do_wakeup = 0;
43905 }
43906
43907 - pipe->waiting_writers++;
43908 + atomic_inc(&pipe->waiting_writers);
43909 pipe_wait(pipe);
43910 - pipe->waiting_writers--;
43911 + atomic_dec(&pipe->waiting_writers);
43912 }
43913
43914 pipe_unlock(pipe);
43915 @@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *
43916 .spd_release = spd_release_page,
43917 };
43918
43919 + pax_track_stack();
43920 +
43921 index = *ppos >> PAGE_CACHE_SHIFT;
43922 loff = *ppos & ~PAGE_CACHE_MASK;
43923 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
43924 @@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file
43925 old_fs = get_fs();
43926 set_fs(get_ds());
43927 /* The cast to a user pointer is valid due to the set_fs() */
43928 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
43929 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
43930 set_fs(old_fs);
43931
43932 return res;
43933 @@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file
43934 old_fs = get_fs();
43935 set_fs(get_ds());
43936 /* The cast to a user pointer is valid due to the set_fs() */
43937 - res = vfs_write(file, (const char __user *)buf, count, &pos);
43938 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
43939 set_fs(old_fs);
43940
43941 return res;
43942 @@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct
43943 .spd_release = spd_release_page,
43944 };
43945
43946 + pax_track_stack();
43947 +
43948 index = *ppos >> PAGE_CACHE_SHIFT;
43949 offset = *ppos & ~PAGE_CACHE_MASK;
43950 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
43951 @@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct
43952 goto err;
43953
43954 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
43955 - vec[i].iov_base = (void __user *) page_address(page);
43956 + vec[i].iov_base = (__force void __user *) page_address(page);
43957 vec[i].iov_len = this_len;
43958 pages[i] = page;
43959 spd.nr_pages++;
43960 @@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
43961 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
43962 {
43963 while (!pipe->nrbufs) {
43964 - if (!pipe->writers)
43965 + if (!atomic_read(&pipe->writers))
43966 return 0;
43967
43968 - if (!pipe->waiting_writers && sd->num_spliced)
43969 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
43970 return 0;
43971
43972 if (sd->flags & SPLICE_F_NONBLOCK)
43973 @@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct fi
43974 * out of the pipe right after the splice_to_pipe(). So set
43975 * PIPE_READERS appropriately.
43976 */
43977 - pipe->readers = 1;
43978 + atomic_set(&pipe->readers, 1);
43979
43980 current->splice_pipe = pipe;
43981 }
43982 @@ -1592,6 +1596,8 @@ static long vmsplice_to_pipe(struct file
43983 .spd_release = spd_release_page,
43984 };
43985
43986 + pax_track_stack();
43987 +
43988 pipe = pipe_info(file->f_path.dentry->d_inode);
43989 if (!pipe)
43990 return -EBADF;
43991 @@ -1700,9 +1706,9 @@ static int ipipe_prep(struct pipe_inode_
43992 ret = -ERESTARTSYS;
43993 break;
43994 }
43995 - if (!pipe->writers)
43996 + if (!atomic_read(&pipe->writers))
43997 break;
43998 - if (!pipe->waiting_writers) {
43999 + if (!atomic_read(&pipe->waiting_writers)) {
44000 if (flags & SPLICE_F_NONBLOCK) {
44001 ret = -EAGAIN;
44002 break;
44003 @@ -1734,7 +1740,7 @@ static int opipe_prep(struct pipe_inode_
44004 pipe_lock(pipe);
44005
44006 while (pipe->nrbufs >= PIPE_BUFFERS) {
44007 - if (!pipe->readers) {
44008 + if (!atomic_read(&pipe->readers)) {
44009 send_sig(SIGPIPE, current, 0);
44010 ret = -EPIPE;
44011 break;
44012 @@ -1747,9 +1753,9 @@ static int opipe_prep(struct pipe_inode_
44013 ret = -ERESTARTSYS;
44014 break;
44015 }
44016 - pipe->waiting_writers++;
44017 + atomic_inc(&pipe->waiting_writers);
44018 pipe_wait(pipe);
44019 - pipe->waiting_writers--;
44020 + atomic_dec(&pipe->waiting_writers);
44021 }
44022
44023 pipe_unlock(pipe);
44024 @@ -1785,14 +1791,14 @@ retry:
44025 pipe_double_lock(ipipe, opipe);
44026
44027 do {
44028 - if (!opipe->readers) {
44029 + if (!atomic_read(&opipe->readers)) {
44030 send_sig(SIGPIPE, current, 0);
44031 if (!ret)
44032 ret = -EPIPE;
44033 break;
44034 }
44035
44036 - if (!ipipe->nrbufs && !ipipe->writers)
44037 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
44038 break;
44039
44040 /*
44041 @@ -1892,7 +1898,7 @@ static int link_pipe(struct pipe_inode_i
44042 pipe_double_lock(ipipe, opipe);
44043
44044 do {
44045 - if (!opipe->readers) {
44046 + if (!atomic_read(&opipe->readers)) {
44047 send_sig(SIGPIPE, current, 0);
44048 if (!ret)
44049 ret = -EPIPE;
44050 @@ -1937,7 +1943,7 @@ static int link_pipe(struct pipe_inode_i
44051 * return EAGAIN if we have the potential of some data in the
44052 * future, otherwise just return 0
44053 */
44054 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
44055 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
44056 ret = -EAGAIN;
44057
44058 pipe_unlock(ipipe);
44059 diff -urNp linux-2.6.32.43/fs/sysfs/file.c linux-2.6.32.43/fs/sysfs/file.c
44060 --- linux-2.6.32.43/fs/sysfs/file.c 2011-03-27 14:31:47.000000000 -0400
44061 +++ linux-2.6.32.43/fs/sysfs/file.c 2011-05-04 17:56:20.000000000 -0400
44062 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
44063
44064 struct sysfs_open_dirent {
44065 atomic_t refcnt;
44066 - atomic_t event;
44067 + atomic_unchecked_t event;
44068 wait_queue_head_t poll;
44069 struct list_head buffers; /* goes through sysfs_buffer.list */
44070 };
44071 @@ -53,7 +53,7 @@ struct sysfs_buffer {
44072 size_t count;
44073 loff_t pos;
44074 char * page;
44075 - struct sysfs_ops * ops;
44076 + const struct sysfs_ops * ops;
44077 struct mutex mutex;
44078 int needs_read_fill;
44079 int event;
44080 @@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentr
44081 {
44082 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
44083 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
44084 - struct sysfs_ops * ops = buffer->ops;
44085 + const struct sysfs_ops * ops = buffer->ops;
44086 int ret = 0;
44087 ssize_t count;
44088
44089 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
44090 if (!sysfs_get_active_two(attr_sd))
44091 return -ENODEV;
44092
44093 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
44094 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
44095 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
44096
44097 sysfs_put_active_two(attr_sd);
44098 @@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentr
44099 {
44100 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
44101 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
44102 - struct sysfs_ops * ops = buffer->ops;
44103 + const struct sysfs_ops * ops = buffer->ops;
44104 int rc;
44105
44106 /* need attr_sd for attr and ops, its parent for kobj */
44107 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
44108 return -ENOMEM;
44109
44110 atomic_set(&new_od->refcnt, 0);
44111 - atomic_set(&new_od->event, 1);
44112 + atomic_set_unchecked(&new_od->event, 1);
44113 init_waitqueue_head(&new_od->poll);
44114 INIT_LIST_HEAD(&new_od->buffers);
44115 goto retry;
44116 @@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode
44117 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
44118 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
44119 struct sysfs_buffer *buffer;
44120 - struct sysfs_ops *ops;
44121 + const struct sysfs_ops *ops;
44122 int error = -EACCES;
44123 char *p;
44124
44125 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
44126
44127 sysfs_put_active_two(attr_sd);
44128
44129 - if (buffer->event != atomic_read(&od->event))
44130 + if (buffer->event != atomic_read_unchecked(&od->event))
44131 goto trigger;
44132
44133 return DEFAULT_POLLMASK;
44134 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
44135
44136 od = sd->s_attr.open;
44137 if (od) {
44138 - atomic_inc(&od->event);
44139 + atomic_inc_unchecked(&od->event);
44140 wake_up_interruptible(&od->poll);
44141 }
44142
44143 diff -urNp linux-2.6.32.43/fs/sysfs/mount.c linux-2.6.32.43/fs/sysfs/mount.c
44144 --- linux-2.6.32.43/fs/sysfs/mount.c 2011-03-27 14:31:47.000000000 -0400
44145 +++ linux-2.6.32.43/fs/sysfs/mount.c 2011-04-17 15:56:46.000000000 -0400
44146 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
44147 .s_name = "",
44148 .s_count = ATOMIC_INIT(1),
44149 .s_flags = SYSFS_DIR,
44150 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
44151 + .s_mode = S_IFDIR | S_IRWXU,
44152 +#else
44153 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
44154 +#endif
44155 .s_ino = 1,
44156 };
44157
44158 diff -urNp linux-2.6.32.43/fs/sysfs/symlink.c linux-2.6.32.43/fs/sysfs/symlink.c
44159 --- linux-2.6.32.43/fs/sysfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
44160 +++ linux-2.6.32.43/fs/sysfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
44161 @@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct de
44162
44163 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
44164 {
44165 - char *page = nd_get_link(nd);
44166 + const char *page = nd_get_link(nd);
44167 if (!IS_ERR(page))
44168 free_page((unsigned long)page);
44169 }
44170 diff -urNp linux-2.6.32.43/fs/udf/balloc.c linux-2.6.32.43/fs/udf/balloc.c
44171 --- linux-2.6.32.43/fs/udf/balloc.c 2011-03-27 14:31:47.000000000 -0400
44172 +++ linux-2.6.32.43/fs/udf/balloc.c 2011-04-17 15:56:46.000000000 -0400
44173 @@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struc
44174
44175 mutex_lock(&sbi->s_alloc_mutex);
44176 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
44177 - if (bloc->logicalBlockNum < 0 ||
44178 - (bloc->logicalBlockNum + count) >
44179 - partmap->s_partition_len) {
44180 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
44181 udf_debug("%d < %d || %d + %d > %d\n",
44182 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
44183 count, partmap->s_partition_len);
44184 @@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct
44185
44186 mutex_lock(&sbi->s_alloc_mutex);
44187 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
44188 - if (bloc->logicalBlockNum < 0 ||
44189 - (bloc->logicalBlockNum + count) >
44190 - partmap->s_partition_len) {
44191 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
44192 udf_debug("%d < %d || %d + %d > %d\n",
44193 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
44194 partmap->s_partition_len);
44195 diff -urNp linux-2.6.32.43/fs/udf/inode.c linux-2.6.32.43/fs/udf/inode.c
44196 --- linux-2.6.32.43/fs/udf/inode.c 2011-03-27 14:31:47.000000000 -0400
44197 +++ linux-2.6.32.43/fs/udf/inode.c 2011-05-16 21:46:57.000000000 -0400
44198 @@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(
44199 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
44200 int lastblock = 0;
44201
44202 + pax_track_stack();
44203 +
44204 prev_epos.offset = udf_file_entry_alloc_offset(inode);
44205 prev_epos.block = iinfo->i_location;
44206 prev_epos.bh = NULL;
44207 diff -urNp linux-2.6.32.43/fs/udf/misc.c linux-2.6.32.43/fs/udf/misc.c
44208 --- linux-2.6.32.43/fs/udf/misc.c 2011-03-27 14:31:47.000000000 -0400
44209 +++ linux-2.6.32.43/fs/udf/misc.c 2011-04-23 12:56:11.000000000 -0400
44210 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
44211
44212 u8 udf_tag_checksum(const struct tag *t)
44213 {
44214 - u8 *data = (u8 *)t;
44215 + const u8 *data = (const u8 *)t;
44216 u8 checksum = 0;
44217 int i;
44218 for (i = 0; i < sizeof(struct tag); ++i)
44219 diff -urNp linux-2.6.32.43/fs/utimes.c linux-2.6.32.43/fs/utimes.c
44220 --- linux-2.6.32.43/fs/utimes.c 2011-03-27 14:31:47.000000000 -0400
44221 +++ linux-2.6.32.43/fs/utimes.c 2011-04-17 15:56:46.000000000 -0400
44222 @@ -1,6 +1,7 @@
44223 #include <linux/compiler.h>
44224 #include <linux/file.h>
44225 #include <linux/fs.h>
44226 +#include <linux/security.h>
44227 #include <linux/linkage.h>
44228 #include <linux/mount.h>
44229 #include <linux/namei.h>
44230 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
44231 goto mnt_drop_write_and_out;
44232 }
44233 }
44234 +
44235 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
44236 + error = -EACCES;
44237 + goto mnt_drop_write_and_out;
44238 + }
44239 +
44240 mutex_lock(&inode->i_mutex);
44241 error = notify_change(path->dentry, &newattrs);
44242 mutex_unlock(&inode->i_mutex);
44243 diff -urNp linux-2.6.32.43/fs/xattr_acl.c linux-2.6.32.43/fs/xattr_acl.c
44244 --- linux-2.6.32.43/fs/xattr_acl.c 2011-03-27 14:31:47.000000000 -0400
44245 +++ linux-2.6.32.43/fs/xattr_acl.c 2011-04-17 15:56:46.000000000 -0400
44246 @@ -17,8 +17,8 @@
44247 struct posix_acl *
44248 posix_acl_from_xattr(const void *value, size_t size)
44249 {
44250 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
44251 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
44252 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
44253 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
44254 int count;
44255 struct posix_acl *acl;
44256 struct posix_acl_entry *acl_e;
44257 diff -urNp linux-2.6.32.43/fs/xattr.c linux-2.6.32.43/fs/xattr.c
44258 --- linux-2.6.32.43/fs/xattr.c 2011-03-27 14:31:47.000000000 -0400
44259 +++ linux-2.6.32.43/fs/xattr.c 2011-04-17 15:56:46.000000000 -0400
44260 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
44261 * Extended attribute SET operations
44262 */
44263 static long
44264 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
44265 +setxattr(struct path *path, const char __user *name, const void __user *value,
44266 size_t size, int flags)
44267 {
44268 int error;
44269 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
44270 return PTR_ERR(kvalue);
44271 }
44272
44273 - error = vfs_setxattr(d, kname, kvalue, size, flags);
44274 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
44275 + error = -EACCES;
44276 + goto out;
44277 + }
44278 +
44279 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
44280 +out:
44281 kfree(kvalue);
44282 return error;
44283 }
44284 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
44285 return error;
44286 error = mnt_want_write(path.mnt);
44287 if (!error) {
44288 - error = setxattr(path.dentry, name, value, size, flags);
44289 + error = setxattr(&path, name, value, size, flags);
44290 mnt_drop_write(path.mnt);
44291 }
44292 path_put(&path);
44293 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
44294 return error;
44295 error = mnt_want_write(path.mnt);
44296 if (!error) {
44297 - error = setxattr(path.dentry, name, value, size, flags);
44298 + error = setxattr(&path, name, value, size, flags);
44299 mnt_drop_write(path.mnt);
44300 }
44301 path_put(&path);
44302 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
44303 const void __user *,value, size_t, size, int, flags)
44304 {
44305 struct file *f;
44306 - struct dentry *dentry;
44307 int error = -EBADF;
44308
44309 f = fget(fd);
44310 if (!f)
44311 return error;
44312 - dentry = f->f_path.dentry;
44313 - audit_inode(NULL, dentry);
44314 + audit_inode(NULL, f->f_path.dentry);
44315 error = mnt_want_write_file(f);
44316 if (!error) {
44317 - error = setxattr(dentry, name, value, size, flags);
44318 + error = setxattr(&f->f_path, name, value, size, flags);
44319 mnt_drop_write(f->f_path.mnt);
44320 }
44321 fput(f);
44322 diff -urNp linux-2.6.32.43/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.32.43/fs/xfs/linux-2.6/xfs_ioctl32.c
44323 --- linux-2.6.32.43/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-03-27 14:31:47.000000000 -0400
44324 +++ linux-2.6.32.43/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-04-17 15:56:46.000000000 -0400
44325 @@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
44326 xfs_fsop_geom_t fsgeo;
44327 int error;
44328
44329 + memset(&fsgeo, 0, sizeof(fsgeo));
44330 error = xfs_fs_geometry(mp, &fsgeo, 3);
44331 if (error)
44332 return -error;
44333 diff -urNp linux-2.6.32.43/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.32.43/fs/xfs/linux-2.6/xfs_ioctl.c
44334 --- linux-2.6.32.43/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 17:00:52.000000000 -0400
44335 +++ linux-2.6.32.43/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 20:07:09.000000000 -0400
44336 @@ -134,7 +134,7 @@ xfs_find_handle(
44337 }
44338
44339 error = -EFAULT;
44340 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
44341 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
44342 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
44343 goto out_put;
44344
44345 @@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
44346 if (IS_ERR(dentry))
44347 return PTR_ERR(dentry);
44348
44349 - kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
44350 + kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
44351 if (!kbuf)
44352 goto out_dput;
44353
44354 @@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
44355 xfs_mount_t *mp,
44356 void __user *arg)
44357 {
44358 - xfs_fsop_geom_t fsgeo;
44359 + xfs_fsop_geom_t fsgeo;
44360 int error;
44361
44362 error = xfs_fs_geometry(mp, &fsgeo, 3);
44363 diff -urNp linux-2.6.32.43/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.32.43/fs/xfs/linux-2.6/xfs_iops.c
44364 --- linux-2.6.32.43/fs/xfs/linux-2.6/xfs_iops.c 2011-03-27 14:31:47.000000000 -0400
44365 +++ linux-2.6.32.43/fs/xfs/linux-2.6/xfs_iops.c 2011-04-17 15:56:46.000000000 -0400
44366 @@ -468,7 +468,7 @@ xfs_vn_put_link(
44367 struct nameidata *nd,
44368 void *p)
44369 {
44370 - char *s = nd_get_link(nd);
44371 + const char *s = nd_get_link(nd);
44372
44373 if (!IS_ERR(s))
44374 kfree(s);
44375 diff -urNp linux-2.6.32.43/fs/xfs/xfs_bmap.c linux-2.6.32.43/fs/xfs/xfs_bmap.c
44376 --- linux-2.6.32.43/fs/xfs/xfs_bmap.c 2011-03-27 14:31:47.000000000 -0400
44377 +++ linux-2.6.32.43/fs/xfs/xfs_bmap.c 2011-04-17 15:56:46.000000000 -0400
44378 @@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
44379 int nmap,
44380 int ret_nmap);
44381 #else
44382 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
44383 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
44384 #endif /* DEBUG */
44385
44386 #if defined(XFS_RW_TRACE)
44387 diff -urNp linux-2.6.32.43/fs/xfs/xfs_dir2_sf.c linux-2.6.32.43/fs/xfs/xfs_dir2_sf.c
44388 --- linux-2.6.32.43/fs/xfs/xfs_dir2_sf.c 2011-03-27 14:31:47.000000000 -0400
44389 +++ linux-2.6.32.43/fs/xfs/xfs_dir2_sf.c 2011-04-18 22:07:30.000000000 -0400
44390 @@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
44391 }
44392
44393 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
44394 - if (filldir(dirent, sfep->name, sfep->namelen,
44395 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
44396 + char name[sfep->namelen];
44397 + memcpy(name, sfep->name, sfep->namelen);
44398 + if (filldir(dirent, name, sfep->namelen,
44399 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
44400 + *offset = off & 0x7fffffff;
44401 + return 0;
44402 + }
44403 + } else if (filldir(dirent, sfep->name, sfep->namelen,
44404 off & 0x7fffffff, ino, DT_UNKNOWN)) {
44405 *offset = off & 0x7fffffff;
44406 return 0;
44407 diff -urNp linux-2.6.32.43/grsecurity/gracl_alloc.c linux-2.6.32.43/grsecurity/gracl_alloc.c
44408 --- linux-2.6.32.43/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
44409 +++ linux-2.6.32.43/grsecurity/gracl_alloc.c 2011-04-17 15:56:46.000000000 -0400
44410 @@ -0,0 +1,105 @@
44411 +#include <linux/kernel.h>
44412 +#include <linux/mm.h>
44413 +#include <linux/slab.h>
44414 +#include <linux/vmalloc.h>
44415 +#include <linux/gracl.h>
44416 +#include <linux/grsecurity.h>
44417 +
44418 +static unsigned long alloc_stack_next = 1;
44419 +static unsigned long alloc_stack_size = 1;
44420 +static void **alloc_stack;
44421 +
44422 +static __inline__ int
44423 +alloc_pop(void)
44424 +{
44425 + if (alloc_stack_next == 1)
44426 + return 0;
44427 +
44428 + kfree(alloc_stack[alloc_stack_next - 2]);
44429 +
44430 + alloc_stack_next--;
44431 +
44432 + return 1;
44433 +}
44434 +
44435 +static __inline__ int
44436 +alloc_push(void *buf)
44437 +{
44438 + if (alloc_stack_next >= alloc_stack_size)
44439 + return 1;
44440 +
44441 + alloc_stack[alloc_stack_next - 1] = buf;
44442 +
44443 + alloc_stack_next++;
44444 +
44445 + return 0;
44446 +}
44447 +
44448 +void *
44449 +acl_alloc(unsigned long len)
44450 +{
44451 + void *ret = NULL;
44452 +
44453 + if (!len || len > PAGE_SIZE)
44454 + goto out;
44455 +
44456 + ret = kmalloc(len, GFP_KERNEL);
44457 +
44458 + if (ret) {
44459 + if (alloc_push(ret)) {
44460 + kfree(ret);
44461 + ret = NULL;
44462 + }
44463 + }
44464 +
44465 +out:
44466 + return ret;
44467 +}
44468 +
44469 +void *
44470 +acl_alloc_num(unsigned long num, unsigned long len)
44471 +{
44472 + if (!len || (num > (PAGE_SIZE / len)))
44473 + return NULL;
44474 +
44475 + return acl_alloc(num * len);
44476 +}
44477 +
44478 +void
44479 +acl_free_all(void)
44480 +{
44481 + if (gr_acl_is_enabled() || !alloc_stack)
44482 + return;
44483 +
44484 + while (alloc_pop()) ;
44485 +
44486 + if (alloc_stack) {
44487 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
44488 + kfree(alloc_stack);
44489 + else
44490 + vfree(alloc_stack);
44491 + }
44492 +
44493 + alloc_stack = NULL;
44494 + alloc_stack_size = 1;
44495 + alloc_stack_next = 1;
44496 +
44497 + return;
44498 +}
44499 +
44500 +int
44501 +acl_alloc_stack_init(unsigned long size)
44502 +{
44503 + if ((size * sizeof (void *)) <= PAGE_SIZE)
44504 + alloc_stack =
44505 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
44506 + else
44507 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
44508 +
44509 + alloc_stack_size = size;
44510 +
44511 + if (!alloc_stack)
44512 + return 0;
44513 + else
44514 + return 1;
44515 +}
44516 diff -urNp linux-2.6.32.43/grsecurity/gracl.c linux-2.6.32.43/grsecurity/gracl.c
44517 --- linux-2.6.32.43/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
44518 +++ linux-2.6.32.43/grsecurity/gracl.c 2011-07-14 20:02:48.000000000 -0400
44519 @@ -0,0 +1,4082 @@
44520 +#include <linux/kernel.h>
44521 +#include <linux/module.h>
44522 +#include <linux/sched.h>
44523 +#include <linux/mm.h>
44524 +#include <linux/file.h>
44525 +#include <linux/fs.h>
44526 +#include <linux/namei.h>
44527 +#include <linux/mount.h>
44528 +#include <linux/tty.h>
44529 +#include <linux/proc_fs.h>
44530 +#include <linux/smp_lock.h>
44531 +#include <linux/slab.h>
44532 +#include <linux/vmalloc.h>
44533 +#include <linux/types.h>
44534 +#include <linux/sysctl.h>
44535 +#include <linux/netdevice.h>
44536 +#include <linux/ptrace.h>
44537 +#include <linux/gracl.h>
44538 +#include <linux/gralloc.h>
44539 +#include <linux/grsecurity.h>
44540 +#include <linux/grinternal.h>
44541 +#include <linux/pid_namespace.h>
44542 +#include <linux/fdtable.h>
44543 +#include <linux/percpu.h>
44544 +
44545 +#include <asm/uaccess.h>
44546 +#include <asm/errno.h>
44547 +#include <asm/mman.h>
44548 +
44549 +static struct acl_role_db acl_role_set;
44550 +static struct name_db name_set;
44551 +static struct inodev_db inodev_set;
44552 +
44553 +/* for keeping track of userspace pointers used for subjects, so we
44554 + can share references in the kernel as well
44555 +*/
44556 +
44557 +static struct dentry *real_root;
44558 +static struct vfsmount *real_root_mnt;
44559 +
44560 +static struct acl_subj_map_db subj_map_set;
44561 +
44562 +static struct acl_role_label *default_role;
44563 +
44564 +static struct acl_role_label *role_list;
44565 +
44566 +static u16 acl_sp_role_value;
44567 +
44568 +extern char *gr_shared_page[4];
44569 +static DEFINE_MUTEX(gr_dev_mutex);
44570 +DEFINE_RWLOCK(gr_inode_lock);
44571 +
44572 +struct gr_arg *gr_usermode;
44573 +
44574 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
44575 +
44576 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
44577 +extern void gr_clear_learn_entries(void);
44578 +
44579 +#ifdef CONFIG_GRKERNSEC_RESLOG
44580 +extern void gr_log_resource(const struct task_struct *task,
44581 + const int res, const unsigned long wanted, const int gt);
44582 +#endif
44583 +
44584 +unsigned char *gr_system_salt;
44585 +unsigned char *gr_system_sum;
44586 +
44587 +static struct sprole_pw **acl_special_roles = NULL;
44588 +static __u16 num_sprole_pws = 0;
44589 +
44590 +static struct acl_role_label *kernel_role = NULL;
44591 +
44592 +static unsigned int gr_auth_attempts = 0;
44593 +static unsigned long gr_auth_expires = 0UL;
44594 +
44595 +#ifdef CONFIG_NET
44596 +extern struct vfsmount *sock_mnt;
44597 +#endif
44598 +extern struct vfsmount *pipe_mnt;
44599 +extern struct vfsmount *shm_mnt;
44600 +#ifdef CONFIG_HUGETLBFS
44601 +extern struct vfsmount *hugetlbfs_vfsmount;
44602 +#endif
44603 +
44604 +static struct acl_object_label *fakefs_obj_rw;
44605 +static struct acl_object_label *fakefs_obj_rwx;
44606 +
44607 +extern int gr_init_uidset(void);
44608 +extern void gr_free_uidset(void);
44609 +extern void gr_remove_uid(uid_t uid);
44610 +extern int gr_find_uid(uid_t uid);
44611 +
44612 +__inline__ int
44613 +gr_acl_is_enabled(void)
44614 +{
44615 + return (gr_status & GR_READY);
44616 +}
44617 +
44618 +#ifdef CONFIG_BTRFS_FS
44619 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
44620 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
44621 +#endif
44622 +
44623 +static inline dev_t __get_dev(const struct dentry *dentry)
44624 +{
44625 +#ifdef CONFIG_BTRFS_FS
44626 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
44627 + return get_btrfs_dev_from_inode(dentry->d_inode);
44628 + else
44629 +#endif
44630 + return dentry->d_inode->i_sb->s_dev;
44631 +}
44632 +
44633 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
44634 +{
44635 + return __get_dev(dentry);
44636 +}
44637 +
44638 +static char gr_task_roletype_to_char(struct task_struct *task)
44639 +{
44640 + switch (task->role->roletype &
44641 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
44642 + GR_ROLE_SPECIAL)) {
44643 + case GR_ROLE_DEFAULT:
44644 + return 'D';
44645 + case GR_ROLE_USER:
44646 + return 'U';
44647 + case GR_ROLE_GROUP:
44648 + return 'G';
44649 + case GR_ROLE_SPECIAL:
44650 + return 'S';
44651 + }
44652 +
44653 + return 'X';
44654 +}
44655 +
44656 +char gr_roletype_to_char(void)
44657 +{
44658 + return gr_task_roletype_to_char(current);
44659 +}
44660 +
44661 +__inline__ int
44662 +gr_acl_tpe_check(void)
44663 +{
44664 + if (unlikely(!(gr_status & GR_READY)))
44665 + return 0;
44666 + if (current->role->roletype & GR_ROLE_TPE)
44667 + return 1;
44668 + else
44669 + return 0;
44670 +}
44671 +
44672 +int
44673 +gr_handle_rawio(const struct inode *inode)
44674 +{
44675 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
44676 + if (inode && S_ISBLK(inode->i_mode) &&
44677 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
44678 + !capable(CAP_SYS_RAWIO))
44679 + return 1;
44680 +#endif
44681 + return 0;
44682 +}
44683 +
44684 +static int
44685 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
44686 +{
44687 + if (likely(lena != lenb))
44688 + return 0;
44689 +
44690 + return !memcmp(a, b, lena);
44691 +}
44692 +
44693 +/* this must be called with vfsmount_lock and dcache_lock held */
44694 +
44695 +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
44696 + struct dentry *root, struct vfsmount *rootmnt,
44697 + char *buffer, int buflen)
44698 +{
44699 + char * end = buffer+buflen;
44700 + char * retval;
44701 + int namelen;
44702 +
44703 + *--end = '\0';
44704 + buflen--;
44705 +
44706 + if (buflen < 1)
44707 + goto Elong;
44708 + /* Get '/' right */
44709 + retval = end-1;
44710 + *retval = '/';
44711 +
44712 + for (;;) {
44713 + struct dentry * parent;
44714 +
44715 + if (dentry == root && vfsmnt == rootmnt)
44716 + break;
44717 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
44718 + /* Global root? */
44719 + if (vfsmnt->mnt_parent == vfsmnt)
44720 + goto global_root;
44721 + dentry = vfsmnt->mnt_mountpoint;
44722 + vfsmnt = vfsmnt->mnt_parent;
44723 + continue;
44724 + }
44725 + parent = dentry->d_parent;
44726 + prefetch(parent);
44727 + namelen = dentry->d_name.len;
44728 + buflen -= namelen + 1;
44729 + if (buflen < 0)
44730 + goto Elong;
44731 + end -= namelen;
44732 + memcpy(end, dentry->d_name.name, namelen);
44733 + *--end = '/';
44734 + retval = end;
44735 + dentry = parent;
44736 + }
44737 +
44738 +out:
44739 + return retval;
44740 +
44741 +global_root:
44742 + namelen = dentry->d_name.len;
44743 + buflen -= namelen;
44744 + if (buflen < 0)
44745 + goto Elong;
44746 + retval -= namelen-1; /* hit the slash */
44747 + memcpy(retval, dentry->d_name.name, namelen);
44748 + goto out;
44749 +Elong:
44750 + retval = ERR_PTR(-ENAMETOOLONG);
44751 + goto out;
44752 +}
44753 +
44754 +static char *
44755 +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
44756 + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
44757 +{
44758 + char *retval;
44759 +
44760 + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
44761 + if (unlikely(IS_ERR(retval)))
44762 + retval = strcpy(buf, "<path too long>");
44763 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
44764 + retval[1] = '\0';
44765 +
44766 + return retval;
44767 +}
44768 +
44769 +static char *
44770 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44771 + char *buf, int buflen)
44772 +{
44773 + char *res;
44774 +
44775 + /* we can use real_root, real_root_mnt, because this is only called
44776 + by the RBAC system */
44777 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
44778 +
44779 + return res;
44780 +}
44781 +
44782 +static char *
44783 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44784 + char *buf, int buflen)
44785 +{
44786 + char *res;
44787 + struct dentry *root;
44788 + struct vfsmount *rootmnt;
44789 + struct task_struct *reaper = &init_task;
44790 +
44791 + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
44792 + read_lock(&reaper->fs->lock);
44793 + root = dget(reaper->fs->root.dentry);
44794 + rootmnt = mntget(reaper->fs->root.mnt);
44795 + read_unlock(&reaper->fs->lock);
44796 +
44797 + spin_lock(&dcache_lock);
44798 + spin_lock(&vfsmount_lock);
44799 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
44800 + spin_unlock(&vfsmount_lock);
44801 + spin_unlock(&dcache_lock);
44802 +
44803 + dput(root);
44804 + mntput(rootmnt);
44805 + return res;
44806 +}
44807 +
44808 +static char *
44809 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
44810 +{
44811 + char *ret;
44812 + spin_lock(&dcache_lock);
44813 + spin_lock(&vfsmount_lock);
44814 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44815 + PAGE_SIZE);
44816 + spin_unlock(&vfsmount_lock);
44817 + spin_unlock(&dcache_lock);
44818 + return ret;
44819 +}
44820 +
44821 +char *
44822 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
44823 +{
44824 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44825 + PAGE_SIZE);
44826 +}
44827 +
44828 +char *
44829 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
44830 +{
44831 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
44832 + PAGE_SIZE);
44833 +}
44834 +
44835 +char *
44836 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
44837 +{
44838 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
44839 + PAGE_SIZE);
44840 +}
44841 +
44842 +char *
44843 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
44844 +{
44845 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
44846 + PAGE_SIZE);
44847 +}
44848 +
44849 +char *
44850 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
44851 +{
44852 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
44853 + PAGE_SIZE);
44854 +}
44855 +
44856 +__inline__ __u32
44857 +to_gr_audit(const __u32 reqmode)
44858 +{
44859 + /* masks off auditable permission flags, then shifts them to create
44860 + auditing flags, and adds the special case of append auditing if
44861 + we're requesting write */
44862 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
44863 +}
44864 +
44865 +struct acl_subject_label *
44866 +lookup_subject_map(const struct acl_subject_label *userp)
44867 +{
44868 + unsigned int index = shash(userp, subj_map_set.s_size);
44869 + struct subject_map *match;
44870 +
44871 + match = subj_map_set.s_hash[index];
44872 +
44873 + while (match && match->user != userp)
44874 + match = match->next;
44875 +
44876 + if (match != NULL)
44877 + return match->kernel;
44878 + else
44879 + return NULL;
44880 +}
44881 +
44882 +static void
44883 +insert_subj_map_entry(struct subject_map *subjmap)
44884 +{
44885 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
44886 + struct subject_map **curr;
44887 +
44888 + subjmap->prev = NULL;
44889 +
44890 + curr = &subj_map_set.s_hash[index];
44891 + if (*curr != NULL)
44892 + (*curr)->prev = subjmap;
44893 +
44894 + subjmap->next = *curr;
44895 + *curr = subjmap;
44896 +
44897 + return;
44898 +}
44899 +
44900 +static struct acl_role_label *
44901 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
44902 + const gid_t gid)
44903 +{
44904 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
44905 + struct acl_role_label *match;
44906 + struct role_allowed_ip *ipp;
44907 + unsigned int x;
44908 + u32 curr_ip = task->signal->curr_ip;
44909 +
44910 + task->signal->saved_ip = curr_ip;
44911 +
44912 + match = acl_role_set.r_hash[index];
44913 +
44914 + while (match) {
44915 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
44916 + for (x = 0; x < match->domain_child_num; x++) {
44917 + if (match->domain_children[x] == uid)
44918 + goto found;
44919 + }
44920 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
44921 + break;
44922 + match = match->next;
44923 + }
44924 +found:
44925 + if (match == NULL) {
44926 + try_group:
44927 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
44928 + match = acl_role_set.r_hash[index];
44929 +
44930 + while (match) {
44931 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
44932 + for (x = 0; x < match->domain_child_num; x++) {
44933 + if (match->domain_children[x] == gid)
44934 + goto found2;
44935 + }
44936 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
44937 + break;
44938 + match = match->next;
44939 + }
44940 +found2:
44941 + if (match == NULL)
44942 + match = default_role;
44943 + if (match->allowed_ips == NULL)
44944 + return match;
44945 + else {
44946 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44947 + if (likely
44948 + ((ntohl(curr_ip) & ipp->netmask) ==
44949 + (ntohl(ipp->addr) & ipp->netmask)))
44950 + return match;
44951 + }
44952 + match = default_role;
44953 + }
44954 + } else if (match->allowed_ips == NULL) {
44955 + return match;
44956 + } else {
44957 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44958 + if (likely
44959 + ((ntohl(curr_ip) & ipp->netmask) ==
44960 + (ntohl(ipp->addr) & ipp->netmask)))
44961 + return match;
44962 + }
44963 + goto try_group;
44964 + }
44965 +
44966 + return match;
44967 +}
44968 +
44969 +struct acl_subject_label *
44970 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
44971 + const struct acl_role_label *role)
44972 +{
44973 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
44974 + struct acl_subject_label *match;
44975 +
44976 + match = role->subj_hash[index];
44977 +
44978 + while (match && (match->inode != ino || match->device != dev ||
44979 + (match->mode & GR_DELETED))) {
44980 + match = match->next;
44981 + }
44982 +
44983 + if (match && !(match->mode & GR_DELETED))
44984 + return match;
44985 + else
44986 + return NULL;
44987 +}
44988 +
44989 +struct acl_subject_label *
44990 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
44991 + const struct acl_role_label *role)
44992 +{
44993 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
44994 + struct acl_subject_label *match;
44995 +
44996 + match = role->subj_hash[index];
44997 +
44998 + while (match && (match->inode != ino || match->device != dev ||
44999 + !(match->mode & GR_DELETED))) {
45000 + match = match->next;
45001 + }
45002 +
45003 + if (match && (match->mode & GR_DELETED))
45004 + return match;
45005 + else
45006 + return NULL;
45007 +}
45008 +
45009 +static struct acl_object_label *
45010 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
45011 + const struct acl_subject_label *subj)
45012 +{
45013 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
45014 + struct acl_object_label *match;
45015 +
45016 + match = subj->obj_hash[index];
45017 +
45018 + while (match && (match->inode != ino || match->device != dev ||
45019 + (match->mode & GR_DELETED))) {
45020 + match = match->next;
45021 + }
45022 +
45023 + if (match && !(match->mode & GR_DELETED))
45024 + return match;
45025 + else
45026 + return NULL;
45027 +}
45028 +
45029 +static struct acl_object_label *
45030 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
45031 + const struct acl_subject_label *subj)
45032 +{
45033 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
45034 + struct acl_object_label *match;
45035 +
45036 + match = subj->obj_hash[index];
45037 +
45038 + while (match && (match->inode != ino || match->device != dev ||
45039 + !(match->mode & GR_DELETED))) {
45040 + match = match->next;
45041 + }
45042 +
45043 + if (match && (match->mode & GR_DELETED))
45044 + return match;
45045 +
45046 + match = subj->obj_hash[index];
45047 +
45048 + while (match && (match->inode != ino || match->device != dev ||
45049 + (match->mode & GR_DELETED))) {
45050 + match = match->next;
45051 + }
45052 +
45053 + if (match && !(match->mode & GR_DELETED))
45054 + return match;
45055 + else
45056 + return NULL;
45057 +}
45058 +
45059 +static struct name_entry *
45060 +lookup_name_entry(const char *name)
45061 +{
45062 + unsigned int len = strlen(name);
45063 + unsigned int key = full_name_hash(name, len);
45064 + unsigned int index = key % name_set.n_size;
45065 + struct name_entry *match;
45066 +
45067 + match = name_set.n_hash[index];
45068 +
45069 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
45070 + match = match->next;
45071 +
45072 + return match;
45073 +}
45074 +
45075 +static struct name_entry *
45076 +lookup_name_entry_create(const char *name)
45077 +{
45078 + unsigned int len = strlen(name);
45079 + unsigned int key = full_name_hash(name, len);
45080 + unsigned int index = key % name_set.n_size;
45081 + struct name_entry *match;
45082 +
45083 + match = name_set.n_hash[index];
45084 +
45085 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
45086 + !match->deleted))
45087 + match = match->next;
45088 +
45089 + if (match && match->deleted)
45090 + return match;
45091 +
45092 + match = name_set.n_hash[index];
45093 +
45094 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
45095 + match->deleted))
45096 + match = match->next;
45097 +
45098 + if (match && !match->deleted)
45099 + return match;
45100 + else
45101 + return NULL;
45102 +}
45103 +
45104 +static struct inodev_entry *
45105 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
45106 +{
45107 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
45108 + struct inodev_entry *match;
45109 +
45110 + match = inodev_set.i_hash[index];
45111 +
45112 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
45113 + match = match->next;
45114 +
45115 + return match;
45116 +}
45117 +
45118 +static void
45119 +insert_inodev_entry(struct inodev_entry *entry)
45120 +{
45121 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
45122 + inodev_set.i_size);
45123 + struct inodev_entry **curr;
45124 +
45125 + entry->prev = NULL;
45126 +
45127 + curr = &inodev_set.i_hash[index];
45128 + if (*curr != NULL)
45129 + (*curr)->prev = entry;
45130 +
45131 + entry->next = *curr;
45132 + *curr = entry;
45133 +
45134 + return;
45135 +}
45136 +
45137 +static void
45138 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
45139 +{
45140 + unsigned int index =
45141 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
45142 + struct acl_role_label **curr;
45143 + struct acl_role_label *tmp;
45144 +
45145 + curr = &acl_role_set.r_hash[index];
45146 +
45147 + /* if role was already inserted due to domains and already has
45148 + a role in the same bucket as it attached, then we need to
45149 + combine these two buckets
45150 + */
45151 + if (role->next) {
45152 + tmp = role->next;
45153 + while (tmp->next)
45154 + tmp = tmp->next;
45155 + tmp->next = *curr;
45156 + } else
45157 + role->next = *curr;
45158 + *curr = role;
45159 +
45160 + return;
45161 +}
45162 +
45163 +static void
45164 +insert_acl_role_label(struct acl_role_label *role)
45165 +{
45166 + int i;
45167 +
45168 + if (role_list == NULL) {
45169 + role_list = role;
45170 + role->prev = NULL;
45171 + } else {
45172 + role->prev = role_list;
45173 + role_list = role;
45174 + }
45175 +
45176 + /* used for hash chains */
45177 + role->next = NULL;
45178 +
45179 + if (role->roletype & GR_ROLE_DOMAIN) {
45180 + for (i = 0; i < role->domain_child_num; i++)
45181 + __insert_acl_role_label(role, role->domain_children[i]);
45182 + } else
45183 + __insert_acl_role_label(role, role->uidgid);
45184 +}
45185 +
45186 +static int
45187 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
45188 +{
45189 + struct name_entry **curr, *nentry;
45190 + struct inodev_entry *ientry;
45191 + unsigned int len = strlen(name);
45192 + unsigned int key = full_name_hash(name, len);
45193 + unsigned int index = key % name_set.n_size;
45194 +
45195 + curr = &name_set.n_hash[index];
45196 +
45197 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
45198 + curr = &((*curr)->next);
45199 +
45200 + if (*curr != NULL)
45201 + return 1;
45202 +
45203 + nentry = acl_alloc(sizeof (struct name_entry));
45204 + if (nentry == NULL)
45205 + return 0;
45206 + ientry = acl_alloc(sizeof (struct inodev_entry));
45207 + if (ientry == NULL)
45208 + return 0;
45209 + ientry->nentry = nentry;
45210 +
45211 + nentry->key = key;
45212 + nentry->name = name;
45213 + nentry->inode = inode;
45214 + nentry->device = device;
45215 + nentry->len = len;
45216 + nentry->deleted = deleted;
45217 +
45218 + nentry->prev = NULL;
45219 + curr = &name_set.n_hash[index];
45220 + if (*curr != NULL)
45221 + (*curr)->prev = nentry;
45222 + nentry->next = *curr;
45223 + *curr = nentry;
45224 +
45225 + /* insert us into the table searchable by inode/dev */
45226 + insert_inodev_entry(ientry);
45227 +
45228 + return 1;
45229 +}
45230 +
45231 +static void
45232 +insert_acl_obj_label(struct acl_object_label *obj,
45233 + struct acl_subject_label *subj)
45234 +{
45235 + unsigned int index =
45236 + fhash(obj->inode, obj->device, subj->obj_hash_size);
45237 + struct acl_object_label **curr;
45238 +
45239 +
45240 + obj->prev = NULL;
45241 +
45242 + curr = &subj->obj_hash[index];
45243 + if (*curr != NULL)
45244 + (*curr)->prev = obj;
45245 +
45246 + obj->next = *curr;
45247 + *curr = obj;
45248 +
45249 + return;
45250 +}
45251 +
45252 +static void
45253 +insert_acl_subj_label(struct acl_subject_label *obj,
45254 + struct acl_role_label *role)
45255 +{
45256 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
45257 + struct acl_subject_label **curr;
45258 +
45259 + obj->prev = NULL;
45260 +
45261 + curr = &role->subj_hash[index];
45262 + if (*curr != NULL)
45263 + (*curr)->prev = obj;
45264 +
45265 + obj->next = *curr;
45266 + *curr = obj;
45267 +
45268 + return;
45269 +}
45270 +
45271 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
45272 +
45273 +static void *
45274 +create_table(__u32 * len, int elementsize)
45275 +{
45276 + unsigned int table_sizes[] = {
45277 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
45278 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
45279 + 4194301, 8388593, 16777213, 33554393, 67108859
45280 + };
45281 + void *newtable = NULL;
45282 + unsigned int pwr = 0;
45283 +
45284 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
45285 + table_sizes[pwr] <= *len)
45286 + pwr++;
45287 +
45288 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
45289 + return newtable;
45290 +
45291 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
45292 + newtable =
45293 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
45294 + else
45295 + newtable = vmalloc(table_sizes[pwr] * elementsize);
45296 +
45297 + *len = table_sizes[pwr];
45298 +
45299 + return newtable;
45300 +}
45301 +
45302 +static int
45303 +init_variables(const struct gr_arg *arg)
45304 +{
45305 + struct task_struct *reaper = &init_task;
45306 + unsigned int stacksize;
45307 +
45308 + subj_map_set.s_size = arg->role_db.num_subjects;
45309 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
45310 + name_set.n_size = arg->role_db.num_objects;
45311 + inodev_set.i_size = arg->role_db.num_objects;
45312 +
45313 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
45314 + !name_set.n_size || !inodev_set.i_size)
45315 + return 1;
45316 +
45317 + if (!gr_init_uidset())
45318 + return 1;
45319 +
45320 + /* set up the stack that holds allocation info */
45321 +
45322 + stacksize = arg->role_db.num_pointers + 5;
45323 +
45324 + if (!acl_alloc_stack_init(stacksize))
45325 + return 1;
45326 +
45327 + /* grab reference for the real root dentry and vfsmount */
45328 + read_lock(&reaper->fs->lock);
45329 + real_root = dget(reaper->fs->root.dentry);
45330 + real_root_mnt = mntget(reaper->fs->root.mnt);
45331 + read_unlock(&reaper->fs->lock);
45332 +
45333 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
45334 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
45335 +#endif
45336 +
45337 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
45338 + if (fakefs_obj_rw == NULL)
45339 + return 1;
45340 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
45341 +
45342 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
45343 + if (fakefs_obj_rwx == NULL)
45344 + return 1;
45345 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
45346 +
45347 + subj_map_set.s_hash =
45348 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
45349 + acl_role_set.r_hash =
45350 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
45351 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
45352 + inodev_set.i_hash =
45353 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
45354 +
45355 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
45356 + !name_set.n_hash || !inodev_set.i_hash)
45357 + return 1;
45358 +
45359 + memset(subj_map_set.s_hash, 0,
45360 + sizeof(struct subject_map *) * subj_map_set.s_size);
45361 + memset(acl_role_set.r_hash, 0,
45362 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
45363 + memset(name_set.n_hash, 0,
45364 + sizeof (struct name_entry *) * name_set.n_size);
45365 + memset(inodev_set.i_hash, 0,
45366 + sizeof (struct inodev_entry *) * inodev_set.i_size);
45367 +
45368 + return 0;
45369 +}
45370 +
45371 +/* free information not needed after startup
45372 + currently contains user->kernel pointer mappings for subjects
45373 +*/
45374 +
45375 +static void
45376 +free_init_variables(void)
45377 +{
45378 + __u32 i;
45379 +
45380 + if (subj_map_set.s_hash) {
45381 + for (i = 0; i < subj_map_set.s_size; i++) {
45382 + if (subj_map_set.s_hash[i]) {
45383 + kfree(subj_map_set.s_hash[i]);
45384 + subj_map_set.s_hash[i] = NULL;
45385 + }
45386 + }
45387 +
45388 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
45389 + PAGE_SIZE)
45390 + kfree(subj_map_set.s_hash);
45391 + else
45392 + vfree(subj_map_set.s_hash);
45393 + }
45394 +
45395 + return;
45396 +}
45397 +
45398 +static void
45399 +free_variables(void)
45400 +{
45401 + struct acl_subject_label *s;
45402 + struct acl_role_label *r;
45403 + struct task_struct *task, *task2;
45404 + unsigned int x;
45405 +
45406 + gr_clear_learn_entries();
45407 +
45408 + read_lock(&tasklist_lock);
45409 + do_each_thread(task2, task) {
45410 + task->acl_sp_role = 0;
45411 + task->acl_role_id = 0;
45412 + task->acl = NULL;
45413 + task->role = NULL;
45414 + } while_each_thread(task2, task);
45415 + read_unlock(&tasklist_lock);
45416 +
45417 + /* release the reference to the real root dentry and vfsmount */
45418 + if (real_root)
45419 + dput(real_root);
45420 + real_root = NULL;
45421 + if (real_root_mnt)
45422 + mntput(real_root_mnt);
45423 + real_root_mnt = NULL;
45424 +
45425 + /* free all object hash tables */
45426 +
45427 + FOR_EACH_ROLE_START(r)
45428 + if (r->subj_hash == NULL)
45429 + goto next_role;
45430 + FOR_EACH_SUBJECT_START(r, s, x)
45431 + if (s->obj_hash == NULL)
45432 + break;
45433 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
45434 + kfree(s->obj_hash);
45435 + else
45436 + vfree(s->obj_hash);
45437 + FOR_EACH_SUBJECT_END(s, x)
45438 + FOR_EACH_NESTED_SUBJECT_START(r, s)
45439 + if (s->obj_hash == NULL)
45440 + break;
45441 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
45442 + kfree(s->obj_hash);
45443 + else
45444 + vfree(s->obj_hash);
45445 + FOR_EACH_NESTED_SUBJECT_END(s)
45446 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
45447 + kfree(r->subj_hash);
45448 + else
45449 + vfree(r->subj_hash);
45450 + r->subj_hash = NULL;
45451 +next_role:
45452 + FOR_EACH_ROLE_END(r)
45453 +
45454 + acl_free_all();
45455 +
45456 + if (acl_role_set.r_hash) {
45457 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
45458 + PAGE_SIZE)
45459 + kfree(acl_role_set.r_hash);
45460 + else
45461 + vfree(acl_role_set.r_hash);
45462 + }
45463 + if (name_set.n_hash) {
45464 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
45465 + PAGE_SIZE)
45466 + kfree(name_set.n_hash);
45467 + else
45468 + vfree(name_set.n_hash);
45469 + }
45470 +
45471 + if (inodev_set.i_hash) {
45472 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
45473 + PAGE_SIZE)
45474 + kfree(inodev_set.i_hash);
45475 + else
45476 + vfree(inodev_set.i_hash);
45477 + }
45478 +
45479 + gr_free_uidset();
45480 +
45481 + memset(&name_set, 0, sizeof (struct name_db));
45482 + memset(&inodev_set, 0, sizeof (struct inodev_db));
45483 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
45484 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
45485 +
45486 + default_role = NULL;
45487 + role_list = NULL;
45488 +
45489 + return;
45490 +}
45491 +
45492 +static __u32
45493 +count_user_objs(struct acl_object_label *userp)
45494 +{
45495 + struct acl_object_label o_tmp;
45496 + __u32 num = 0;
45497 +
45498 + while (userp) {
45499 + if (copy_from_user(&o_tmp, userp,
45500 + sizeof (struct acl_object_label)))
45501 + break;
45502 +
45503 + userp = o_tmp.prev;
45504 + num++;
45505 + }
45506 +
45507 + return num;
45508 +}
45509 +
45510 +static struct acl_subject_label *
45511 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
45512 +
45513 +static int
45514 +copy_user_glob(struct acl_object_label *obj)
45515 +{
45516 + struct acl_object_label *g_tmp, **guser;
45517 + unsigned int len;
45518 + char *tmp;
45519 +
45520 + if (obj->globbed == NULL)
45521 + return 0;
45522 +
45523 + guser = &obj->globbed;
45524 + while (*guser) {
45525 + g_tmp = (struct acl_object_label *)
45526 + acl_alloc(sizeof (struct acl_object_label));
45527 + if (g_tmp == NULL)
45528 + return -ENOMEM;
45529 +
45530 + if (copy_from_user(g_tmp, *guser,
45531 + sizeof (struct acl_object_label)))
45532 + return -EFAULT;
45533 +
45534 + len = strnlen_user(g_tmp->filename, PATH_MAX);
45535 +
45536 + if (!len || len >= PATH_MAX)
45537 + return -EINVAL;
45538 +
45539 + if ((tmp = (char *) acl_alloc(len)) == NULL)
45540 + return -ENOMEM;
45541 +
45542 + if (copy_from_user(tmp, g_tmp->filename, len))
45543 + return -EFAULT;
45544 + tmp[len-1] = '\0';
45545 + g_tmp->filename = tmp;
45546 +
45547 + *guser = g_tmp;
45548 + guser = &(g_tmp->next);
45549 + }
45550 +
45551 + return 0;
45552 +}
45553 +
45554 +static int
45555 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
45556 + struct acl_role_label *role)
45557 +{
45558 + struct acl_object_label *o_tmp;
45559 + unsigned int len;
45560 + int ret;
45561 + char *tmp;
45562 +
45563 + while (userp) {
45564 + if ((o_tmp = (struct acl_object_label *)
45565 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
45566 + return -ENOMEM;
45567 +
45568 + if (copy_from_user(o_tmp, userp,
45569 + sizeof (struct acl_object_label)))
45570 + return -EFAULT;
45571 +
45572 + userp = o_tmp->prev;
45573 +
45574 + len = strnlen_user(o_tmp->filename, PATH_MAX);
45575 +
45576 + if (!len || len >= PATH_MAX)
45577 + return -EINVAL;
45578 +
45579 + if ((tmp = (char *) acl_alloc(len)) == NULL)
45580 + return -ENOMEM;
45581 +
45582 + if (copy_from_user(tmp, o_tmp->filename, len))
45583 + return -EFAULT;
45584 + tmp[len-1] = '\0';
45585 + o_tmp->filename = tmp;
45586 +
45587 + insert_acl_obj_label(o_tmp, subj);
45588 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
45589 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
45590 + return -ENOMEM;
45591 +
45592 + ret = copy_user_glob(o_tmp);
45593 + if (ret)
45594 + return ret;
45595 +
45596 + if (o_tmp->nested) {
45597 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
45598 + if (IS_ERR(o_tmp->nested))
45599 + return PTR_ERR(o_tmp->nested);
45600 +
45601 + /* insert into nested subject list */
45602 + o_tmp->nested->next = role->hash->first;
45603 + role->hash->first = o_tmp->nested;
45604 + }
45605 + }
45606 +
45607 + return 0;
45608 +}
45609 +
45610 +static __u32
45611 +count_user_subjs(struct acl_subject_label *userp)
45612 +{
45613 + struct acl_subject_label s_tmp;
45614 + __u32 num = 0;
45615 +
45616 + while (userp) {
45617 + if (copy_from_user(&s_tmp, userp,
45618 + sizeof (struct acl_subject_label)))
45619 + break;
45620 +
45621 + userp = s_tmp.prev;
45622 + /* do not count nested subjects against this count, since
45623 + they are not included in the hash table, but are
45624 + attached to objects. We have already counted
45625 + the subjects in userspace for the allocation
45626 + stack
45627 + */
45628 + if (!(s_tmp.mode & GR_NESTED))
45629 + num++;
45630 + }
45631 +
45632 + return num;
45633 +}
45634 +
45635 +static int
45636 +copy_user_allowedips(struct acl_role_label *rolep)
45637 +{
45638 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
45639 +
45640 + ruserip = rolep->allowed_ips;
45641 +
45642 + while (ruserip) {
45643 + rlast = rtmp;
45644 +
45645 + if ((rtmp = (struct role_allowed_ip *)
45646 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
45647 + return -ENOMEM;
45648 +
45649 + if (copy_from_user(rtmp, ruserip,
45650 + sizeof (struct role_allowed_ip)))
45651 + return -EFAULT;
45652 +
45653 + ruserip = rtmp->prev;
45654 +
45655 + if (!rlast) {
45656 + rtmp->prev = NULL;
45657 + rolep->allowed_ips = rtmp;
45658 + } else {
45659 + rlast->next = rtmp;
45660 + rtmp->prev = rlast;
45661 + }
45662 +
45663 + if (!ruserip)
45664 + rtmp->next = NULL;
45665 + }
45666 +
45667 + return 0;
45668 +}
45669 +
45670 +static int
45671 +copy_user_transitions(struct acl_role_label *rolep)
45672 +{
45673 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
45674 +
45675 + unsigned int len;
45676 + char *tmp;
45677 +
45678 + rusertp = rolep->transitions;
45679 +
45680 + while (rusertp) {
45681 + rlast = rtmp;
45682 +
45683 + if ((rtmp = (struct role_transition *)
45684 + acl_alloc(sizeof (struct role_transition))) == NULL)
45685 + return -ENOMEM;
45686 +
45687 + if (copy_from_user(rtmp, rusertp,
45688 + sizeof (struct role_transition)))
45689 + return -EFAULT;
45690 +
45691 + rusertp = rtmp->prev;
45692 +
45693 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
45694 +
45695 + if (!len || len >= GR_SPROLE_LEN)
45696 + return -EINVAL;
45697 +
45698 + if ((tmp = (char *) acl_alloc(len)) == NULL)
45699 + return -ENOMEM;
45700 +
45701 + if (copy_from_user(tmp, rtmp->rolename, len))
45702 + return -EFAULT;
45703 + tmp[len-1] = '\0';
45704 + rtmp->rolename = tmp;
45705 +
45706 + if (!rlast) {
45707 + rtmp->prev = NULL;
45708 + rolep->transitions = rtmp;
45709 + } else {
45710 + rlast->next = rtmp;
45711 + rtmp->prev = rlast;
45712 + }
45713 +
45714 + if (!rusertp)
45715 + rtmp->next = NULL;
45716 + }
45717 +
45718 + return 0;
45719 +}
45720 +
45721 +static struct acl_subject_label *
45722 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
45723 +{
45724 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
45725 + unsigned int len;
45726 + char *tmp;
45727 + __u32 num_objs;
45728 + struct acl_ip_label **i_tmp, *i_utmp2;
45729 + struct gr_hash_struct ghash;
45730 + struct subject_map *subjmap;
45731 + unsigned int i_num;
45732 + int err;
45733 +
45734 + s_tmp = lookup_subject_map(userp);
45735 +
45736 + /* we've already copied this subject into the kernel, just return
45737 + the reference to it, and don't copy it over again
45738 + */
45739 + if (s_tmp)
45740 + return(s_tmp);
45741 +
45742 + if ((s_tmp = (struct acl_subject_label *)
45743 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
45744 + return ERR_PTR(-ENOMEM);
45745 +
45746 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
45747 + if (subjmap == NULL)
45748 + return ERR_PTR(-ENOMEM);
45749 +
45750 + subjmap->user = userp;
45751 + subjmap->kernel = s_tmp;
45752 + insert_subj_map_entry(subjmap);
45753 +
45754 + if (copy_from_user(s_tmp, userp,
45755 + sizeof (struct acl_subject_label)))
45756 + return ERR_PTR(-EFAULT);
45757 +
45758 + len = strnlen_user(s_tmp->filename, PATH_MAX);
45759 +
45760 + if (!len || len >= PATH_MAX)
45761 + return ERR_PTR(-EINVAL);
45762 +
45763 + if ((tmp = (char *) acl_alloc(len)) == NULL)
45764 + return ERR_PTR(-ENOMEM);
45765 +
45766 + if (copy_from_user(tmp, s_tmp->filename, len))
45767 + return ERR_PTR(-EFAULT);
45768 + tmp[len-1] = '\0';
45769 + s_tmp->filename = tmp;
45770 +
45771 + if (!strcmp(s_tmp->filename, "/"))
45772 + role->root_label = s_tmp;
45773 +
45774 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
45775 + return ERR_PTR(-EFAULT);
45776 +
45777 + /* copy user and group transition tables */
45778 +
45779 + if (s_tmp->user_trans_num) {
45780 + uid_t *uidlist;
45781 +
45782 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
45783 + if (uidlist == NULL)
45784 + return ERR_PTR(-ENOMEM);
45785 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
45786 + return ERR_PTR(-EFAULT);
45787 +
45788 + s_tmp->user_transitions = uidlist;
45789 + }
45790 +
45791 + if (s_tmp->group_trans_num) {
45792 + gid_t *gidlist;
45793 +
45794 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
45795 + if (gidlist == NULL)
45796 + return ERR_PTR(-ENOMEM);
45797 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
45798 + return ERR_PTR(-EFAULT);
45799 +
45800 + s_tmp->group_transitions = gidlist;
45801 + }
45802 +
45803 + /* set up object hash table */
45804 + num_objs = count_user_objs(ghash.first);
45805 +
45806 + s_tmp->obj_hash_size = num_objs;
45807 + s_tmp->obj_hash =
45808 + (struct acl_object_label **)
45809 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
45810 +
45811 + if (!s_tmp->obj_hash)
45812 + return ERR_PTR(-ENOMEM);
45813 +
45814 + memset(s_tmp->obj_hash, 0,
45815 + s_tmp->obj_hash_size *
45816 + sizeof (struct acl_object_label *));
45817 +
45818 + /* add in objects */
45819 + err = copy_user_objs(ghash.first, s_tmp, role);
45820 +
45821 + if (err)
45822 + return ERR_PTR(err);
45823 +
45824 + /* set pointer for parent subject */
45825 + if (s_tmp->parent_subject) {
45826 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
45827 +
45828 + if (IS_ERR(s_tmp2))
45829 + return s_tmp2;
45830 +
45831 + s_tmp->parent_subject = s_tmp2;
45832 + }
45833 +
45834 + /* add in ip acls */
45835 +
45836 + if (!s_tmp->ip_num) {
45837 + s_tmp->ips = NULL;
45838 + goto insert;
45839 + }
45840 +
45841 + i_tmp =
45842 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
45843 + sizeof (struct acl_ip_label *));
45844 +
45845 + if (!i_tmp)
45846 + return ERR_PTR(-ENOMEM);
45847 +
45848 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
45849 + *(i_tmp + i_num) =
45850 + (struct acl_ip_label *)
45851 + acl_alloc(sizeof (struct acl_ip_label));
45852 + if (!*(i_tmp + i_num))
45853 + return ERR_PTR(-ENOMEM);
45854 +
45855 + if (copy_from_user
45856 + (&i_utmp2, s_tmp->ips + i_num,
45857 + sizeof (struct acl_ip_label *)))
45858 + return ERR_PTR(-EFAULT);
45859 +
45860 + if (copy_from_user
45861 + (*(i_tmp + i_num), i_utmp2,
45862 + sizeof (struct acl_ip_label)))
45863 + return ERR_PTR(-EFAULT);
45864 +
45865 + if ((*(i_tmp + i_num))->iface == NULL)
45866 + continue;
45867 +
45868 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
45869 + if (!len || len >= IFNAMSIZ)
45870 + return ERR_PTR(-EINVAL);
45871 + tmp = acl_alloc(len);
45872 + if (tmp == NULL)
45873 + return ERR_PTR(-ENOMEM);
45874 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
45875 + return ERR_PTR(-EFAULT);
45876 + (*(i_tmp + i_num))->iface = tmp;
45877 + }
45878 +
45879 + s_tmp->ips = i_tmp;
45880 +
45881 +insert:
45882 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
45883 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
45884 + return ERR_PTR(-ENOMEM);
45885 +
45886 + return s_tmp;
45887 +}
45888 +
45889 +static int
45890 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
45891 +{
45892 + struct acl_subject_label s_pre;
45893 + struct acl_subject_label * ret;
45894 + int err;
45895 +
45896 + while (userp) {
45897 + if (copy_from_user(&s_pre, userp,
45898 + sizeof (struct acl_subject_label)))
45899 + return -EFAULT;
45900 +
45901 + /* do not add nested subjects here, add
45902 + while parsing objects
45903 + */
45904 +
45905 + if (s_pre.mode & GR_NESTED) {
45906 + userp = s_pre.prev;
45907 + continue;
45908 + }
45909 +
45910 + ret = do_copy_user_subj(userp, role);
45911 +
45912 + err = PTR_ERR(ret);
45913 + if (IS_ERR(ret))
45914 + return err;
45915 +
45916 + insert_acl_subj_label(ret, role);
45917 +
45918 + userp = s_pre.prev;
45919 + }
45920 +
45921 + return 0;
45922 +}
45923 +
45924 +static int
45925 +copy_user_acl(struct gr_arg *arg)
45926 +{
45927 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
45928 + struct sprole_pw *sptmp;
45929 + struct gr_hash_struct *ghash;
45930 + uid_t *domainlist;
45931 + unsigned int r_num;
45932 + unsigned int len;
45933 + char *tmp;
45934 + int err = 0;
45935 + __u16 i;
45936 + __u32 num_subjs;
45937 +
45938 + /* we need a default and kernel role */
45939 + if (arg->role_db.num_roles < 2)
45940 + return -EINVAL;
45941 +
45942 + /* copy special role authentication info from userspace */
45943 +
45944 + num_sprole_pws = arg->num_sprole_pws;
45945 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
45946 +
45947 + if (!acl_special_roles) {
45948 + err = -ENOMEM;
45949 + goto cleanup;
45950 + }
45951 +
45952 + for (i = 0; i < num_sprole_pws; i++) {
45953 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
45954 + if (!sptmp) {
45955 + err = -ENOMEM;
45956 + goto cleanup;
45957 + }
45958 + if (copy_from_user(sptmp, arg->sprole_pws + i,
45959 + sizeof (struct sprole_pw))) {
45960 + err = -EFAULT;
45961 + goto cleanup;
45962 + }
45963 +
45964 + len =
45965 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
45966 +
45967 + if (!len || len >= GR_SPROLE_LEN) {
45968 + err = -EINVAL;
45969 + goto cleanup;
45970 + }
45971 +
45972 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
45973 + err = -ENOMEM;
45974 + goto cleanup;
45975 + }
45976 +
45977 + if (copy_from_user(tmp, sptmp->rolename, len)) {
45978 + err = -EFAULT;
45979 + goto cleanup;
45980 + }
45981 + tmp[len-1] = '\0';
45982 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
45983 + printk(KERN_ALERT "Copying special role %s\n", tmp);
45984 +#endif
45985 + sptmp->rolename = tmp;
45986 + acl_special_roles[i] = sptmp;
45987 + }
45988 +
45989 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
45990 +
45991 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
45992 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
45993 +
45994 + if (!r_tmp) {
45995 + err = -ENOMEM;
45996 + goto cleanup;
45997 + }
45998 +
45999 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
46000 + sizeof (struct acl_role_label *))) {
46001 + err = -EFAULT;
46002 + goto cleanup;
46003 + }
46004 +
46005 + if (copy_from_user(r_tmp, r_utmp2,
46006 + sizeof (struct acl_role_label))) {
46007 + err = -EFAULT;
46008 + goto cleanup;
46009 + }
46010 +
46011 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
46012 +
46013 + if (!len || len >= PATH_MAX) {
46014 + err = -EINVAL;
46015 + goto cleanup;
46016 + }
46017 +
46018 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
46019 + err = -ENOMEM;
46020 + goto cleanup;
46021 + }
46022 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
46023 + err = -EFAULT;
46024 + goto cleanup;
46025 + }
46026 + tmp[len-1] = '\0';
46027 + r_tmp->rolename = tmp;
46028 +
46029 + if (!strcmp(r_tmp->rolename, "default")
46030 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
46031 + default_role = r_tmp;
46032 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
46033 + kernel_role = r_tmp;
46034 + }
46035 +
46036 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
46037 + err = -ENOMEM;
46038 + goto cleanup;
46039 + }
46040 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
46041 + err = -EFAULT;
46042 + goto cleanup;
46043 + }
46044 +
46045 + r_tmp->hash = ghash;
46046 +
46047 + num_subjs = count_user_subjs(r_tmp->hash->first);
46048 +
46049 + r_tmp->subj_hash_size = num_subjs;
46050 + r_tmp->subj_hash =
46051 + (struct acl_subject_label **)
46052 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
46053 +
46054 + if (!r_tmp->subj_hash) {
46055 + err = -ENOMEM;
46056 + goto cleanup;
46057 + }
46058 +
46059 + err = copy_user_allowedips(r_tmp);
46060 + if (err)
46061 + goto cleanup;
46062 +
46063 + /* copy domain info */
46064 + if (r_tmp->domain_children != NULL) {
46065 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
46066 + if (domainlist == NULL) {
46067 + err = -ENOMEM;
46068 + goto cleanup;
46069 + }
46070 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
46071 + err = -EFAULT;
46072 + goto cleanup;
46073 + }
46074 + r_tmp->domain_children = domainlist;
46075 + }
46076 +
46077 + err = copy_user_transitions(r_tmp);
46078 + if (err)
46079 + goto cleanup;
46080 +
46081 + memset(r_tmp->subj_hash, 0,
46082 + r_tmp->subj_hash_size *
46083 + sizeof (struct acl_subject_label *));
46084 +
46085 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
46086 +
46087 + if (err)
46088 + goto cleanup;
46089 +
46090 + /* set nested subject list to null */
46091 + r_tmp->hash->first = NULL;
46092 +
46093 + insert_acl_role_label(r_tmp);
46094 + }
46095 +
46096 + goto return_err;
46097 + cleanup:
46098 + free_variables();
46099 + return_err:
46100 + return err;
46101 +
46102 +}
46103 +
46104 +static int
46105 +gracl_init(struct gr_arg *args)
46106 +{
46107 + int error = 0;
46108 +
46109 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
46110 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
46111 +
46112 + if (init_variables(args)) {
46113 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
46114 + error = -ENOMEM;
46115 + free_variables();
46116 + goto out;
46117 + }
46118 +
46119 + error = copy_user_acl(args);
46120 + free_init_variables();
46121 + if (error) {
46122 + free_variables();
46123 + goto out;
46124 + }
46125 +
46126 + if ((error = gr_set_acls(0))) {
46127 + free_variables();
46128 + goto out;
46129 + }
46130 +
46131 + pax_open_kernel();
46132 + gr_status |= GR_READY;
46133 + pax_close_kernel();
46134 +
46135 + out:
46136 + return error;
46137 +}
46138 +
46139 +/* derived from glibc fnmatch() 0: match, 1: no match*/
46140 +
46141 +static int
46142 +glob_match(const char *p, const char *n)
46143 +{
46144 + char c;
46145 +
46146 + while ((c = *p++) != '\0') {
46147 + switch (c) {
46148 + case '?':
46149 + if (*n == '\0')
46150 + return 1;
46151 + else if (*n == '/')
46152 + return 1;
46153 + break;
46154 + case '\\':
46155 + if (*n != c)
46156 + return 1;
46157 + break;
46158 + case '*':
46159 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
46160 + if (*n == '/')
46161 + return 1;
46162 + else if (c == '?') {
46163 + if (*n == '\0')
46164 + return 1;
46165 + else
46166 + ++n;
46167 + }
46168 + }
46169 + if (c == '\0') {
46170 + return 0;
46171 + } else {
46172 + const char *endp;
46173 +
46174 + if ((endp = strchr(n, '/')) == NULL)
46175 + endp = n + strlen(n);
46176 +
46177 + if (c == '[') {
46178 + for (--p; n < endp; ++n)
46179 + if (!glob_match(p, n))
46180 + return 0;
46181 + } else if (c == '/') {
46182 + while (*n != '\0' && *n != '/')
46183 + ++n;
46184 + if (*n == '/' && !glob_match(p, n + 1))
46185 + return 0;
46186 + } else {
46187 + for (--p; n < endp; ++n)
46188 + if (*n == c && !glob_match(p, n))
46189 + return 0;
46190 + }
46191 +
46192 + return 1;
46193 + }
46194 + case '[':
46195 + {
46196 + int not;
46197 + char cold;
46198 +
46199 + if (*n == '\0' || *n == '/')
46200 + return 1;
46201 +
46202 + not = (*p == '!' || *p == '^');
46203 + if (not)
46204 + ++p;
46205 +
46206 + c = *p++;
46207 + for (;;) {
46208 + unsigned char fn = (unsigned char)*n;
46209 +
46210 + if (c == '\0')
46211 + return 1;
46212 + else {
46213 + if (c == fn)
46214 + goto matched;
46215 + cold = c;
46216 + c = *p++;
46217 +
46218 + if (c == '-' && *p != ']') {
46219 + unsigned char cend = *p++;
46220 +
46221 + if (cend == '\0')
46222 + return 1;
46223 +
46224 + if (cold <= fn && fn <= cend)
46225 + goto matched;
46226 +
46227 + c = *p++;
46228 + }
46229 + }
46230 +
46231 + if (c == ']')
46232 + break;
46233 + }
46234 + if (!not)
46235 + return 1;
46236 + break;
46237 + matched:
46238 + while (c != ']') {
46239 + if (c == '\0')
46240 + return 1;
46241 +
46242 + c = *p++;
46243 + }
46244 + if (not)
46245 + return 1;
46246 + }
46247 + break;
46248 + default:
46249 + if (c != *n)
46250 + return 1;
46251 + }
46252 +
46253 + ++n;
46254 + }
46255 +
46256 + if (*n == '\0')
46257 + return 0;
46258 +
46259 + if (*n == '/')
46260 + return 0;
46261 +
46262 + return 1;
46263 +}
46264 +
46265 +static struct acl_object_label *
46266 +chk_glob_label(struct acl_object_label *globbed,
46267 + struct dentry *dentry, struct vfsmount *mnt, char **path)
46268 +{
46269 + struct acl_object_label *tmp;
46270 +
46271 + if (*path == NULL)
46272 + *path = gr_to_filename_nolock(dentry, mnt);
46273 +
46274 + tmp = globbed;
46275 +
46276 + while (tmp) {
46277 + if (!glob_match(tmp->filename, *path))
46278 + return tmp;
46279 + tmp = tmp->next;
46280 + }
46281 +
46282 + return NULL;
46283 +}
46284 +
46285 +static struct acl_object_label *
46286 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
46287 + const ino_t curr_ino, const dev_t curr_dev,
46288 + const struct acl_subject_label *subj, char **path, const int checkglob)
46289 +{
46290 + struct acl_subject_label *tmpsubj;
46291 + struct acl_object_label *retval;
46292 + struct acl_object_label *retval2;
46293 +
46294 + tmpsubj = (struct acl_subject_label *) subj;
46295 + read_lock(&gr_inode_lock);
46296 + do {
46297 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
46298 + if (retval) {
46299 + if (checkglob && retval->globbed) {
46300 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
46301 + (struct vfsmount *)orig_mnt, path);
46302 + if (retval2)
46303 + retval = retval2;
46304 + }
46305 + break;
46306 + }
46307 + } while ((tmpsubj = tmpsubj->parent_subject));
46308 + read_unlock(&gr_inode_lock);
46309 +
46310 + return retval;
46311 +}
46312 +
46313 +static __inline__ struct acl_object_label *
46314 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
46315 + const struct dentry *curr_dentry,
46316 + const struct acl_subject_label *subj, char **path, const int checkglob)
46317 +{
46318 + int newglob = checkglob;
46319 +
46320 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
46321 + as we don't want a / * rule to match instead of the / object
46322 + don't do this for create lookups that call this function though, since they're looking up
46323 + on the parent and thus need globbing checks on all paths
46324 + */
46325 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
46326 + newglob = GR_NO_GLOB;
46327 +
46328 + return __full_lookup(orig_dentry, orig_mnt,
46329 + curr_dentry->d_inode->i_ino,
46330 + __get_dev(curr_dentry), subj, path, newglob);
46331 +}
46332 +
46333 +static struct acl_object_label *
46334 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46335 + const struct acl_subject_label *subj, char *path, const int checkglob)
46336 +{
46337 + struct dentry *dentry = (struct dentry *) l_dentry;
46338 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
46339 + struct acl_object_label *retval;
46340 +
46341 + spin_lock(&dcache_lock);
46342 + spin_lock(&vfsmount_lock);
46343 +
46344 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
46345 +#ifdef CONFIG_NET
46346 + mnt == sock_mnt ||
46347 +#endif
46348 +#ifdef CONFIG_HUGETLBFS
46349 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
46350 +#endif
46351 + /* ignore Eric Biederman */
46352 + IS_PRIVATE(l_dentry->d_inode))) {
46353 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
46354 + goto out;
46355 + }
46356 +
46357 + for (;;) {
46358 + if (dentry == real_root && mnt == real_root_mnt)
46359 + break;
46360 +
46361 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
46362 + if (mnt->mnt_parent == mnt)
46363 + break;
46364 +
46365 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
46366 + if (retval != NULL)
46367 + goto out;
46368 +
46369 + dentry = mnt->mnt_mountpoint;
46370 + mnt = mnt->mnt_parent;
46371 + continue;
46372 + }
46373 +
46374 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
46375 + if (retval != NULL)
46376 + goto out;
46377 +
46378 + dentry = dentry->d_parent;
46379 + }
46380 +
46381 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
46382 +
46383 + if (retval == NULL)
46384 + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
46385 +out:
46386 + spin_unlock(&vfsmount_lock);
46387 + spin_unlock(&dcache_lock);
46388 +
46389 + BUG_ON(retval == NULL);
46390 +
46391 + return retval;
46392 +}
46393 +
46394 +static __inline__ struct acl_object_label *
46395 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46396 + const struct acl_subject_label *subj)
46397 +{
46398 + char *path = NULL;
46399 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
46400 +}
46401 +
46402 +static __inline__ struct acl_object_label *
46403 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46404 + const struct acl_subject_label *subj)
46405 +{
46406 + char *path = NULL;
46407 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
46408 +}
46409 +
46410 +static __inline__ struct acl_object_label *
46411 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46412 + const struct acl_subject_label *subj, char *path)
46413 +{
46414 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
46415 +}
46416 +
46417 +static struct acl_subject_label *
46418 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46419 + const struct acl_role_label *role)
46420 +{
46421 + struct dentry *dentry = (struct dentry *) l_dentry;
46422 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
46423 + struct acl_subject_label *retval;
46424 +
46425 + spin_lock(&dcache_lock);
46426 + spin_lock(&vfsmount_lock);
46427 +
46428 + for (;;) {
46429 + if (dentry == real_root && mnt == real_root_mnt)
46430 + break;
46431 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
46432 + if (mnt->mnt_parent == mnt)
46433 + break;
46434 +
46435 + read_lock(&gr_inode_lock);
46436 + retval =
46437 + lookup_acl_subj_label(dentry->d_inode->i_ino,
46438 + __get_dev(dentry), role);
46439 + read_unlock(&gr_inode_lock);
46440 + if (retval != NULL)
46441 + goto out;
46442 +
46443 + dentry = mnt->mnt_mountpoint;
46444 + mnt = mnt->mnt_parent;
46445 + continue;
46446 + }
46447 +
46448 + read_lock(&gr_inode_lock);
46449 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
46450 + __get_dev(dentry), role);
46451 + read_unlock(&gr_inode_lock);
46452 + if (retval != NULL)
46453 + goto out;
46454 +
46455 + dentry = dentry->d_parent;
46456 + }
46457 +
46458 + read_lock(&gr_inode_lock);
46459 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
46460 + __get_dev(dentry), role);
46461 + read_unlock(&gr_inode_lock);
46462 +
46463 + if (unlikely(retval == NULL)) {
46464 + read_lock(&gr_inode_lock);
46465 + retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
46466 + __get_dev(real_root), role);
46467 + read_unlock(&gr_inode_lock);
46468 + }
46469 +out:
46470 + spin_unlock(&vfsmount_lock);
46471 + spin_unlock(&dcache_lock);
46472 +
46473 + BUG_ON(retval == NULL);
46474 +
46475 + return retval;
46476 +}
46477 +
46478 +static void
46479 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
46480 +{
46481 + struct task_struct *task = current;
46482 + const struct cred *cred = current_cred();
46483 +
46484 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
46485 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46486 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46487 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
46488 +
46489 + return;
46490 +}
46491 +
46492 +static void
46493 +gr_log_learn_sysctl(const char *path, const __u32 mode)
46494 +{
46495 + struct task_struct *task = current;
46496 + const struct cred *cred = current_cred();
46497 +
46498 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
46499 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46500 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46501 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
46502 +
46503 + return;
46504 +}
46505 +
46506 +static void
46507 +gr_log_learn_id_change(const char type, const unsigned int real,
46508 + const unsigned int effective, const unsigned int fs)
46509 +{
46510 + struct task_struct *task = current;
46511 + const struct cred *cred = current_cred();
46512 +
46513 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
46514 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46515 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46516 + type, real, effective, fs, &task->signal->saved_ip);
46517 +
46518 + return;
46519 +}
46520 +
46521 +__u32
46522 +gr_check_link(const struct dentry * new_dentry,
46523 + const struct dentry * parent_dentry,
46524 + const struct vfsmount * parent_mnt,
46525 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
46526 +{
46527 + struct acl_object_label *obj;
46528 + __u32 oldmode, newmode;
46529 + __u32 needmode;
46530 +
46531 + if (unlikely(!(gr_status & GR_READY)))
46532 + return (GR_CREATE | GR_LINK);
46533 +
46534 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
46535 + oldmode = obj->mode;
46536 +
46537 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46538 + oldmode |= (GR_CREATE | GR_LINK);
46539 +
46540 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
46541 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
46542 + needmode |= GR_SETID | GR_AUDIT_SETID;
46543 +
46544 + newmode =
46545 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
46546 + oldmode | needmode);
46547 +
46548 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
46549 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
46550 + GR_INHERIT | GR_AUDIT_INHERIT);
46551 +
46552 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
46553 + goto bad;
46554 +
46555 + if ((oldmode & needmode) != needmode)
46556 + goto bad;
46557 +
46558 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
46559 + if ((newmode & needmode) != needmode)
46560 + goto bad;
46561 +
46562 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
46563 + return newmode;
46564 +bad:
46565 + needmode = oldmode;
46566 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
46567 + needmode |= GR_SETID;
46568 +
46569 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
46570 + gr_log_learn(old_dentry, old_mnt, needmode);
46571 + return (GR_CREATE | GR_LINK);
46572 + } else if (newmode & GR_SUPPRESS)
46573 + return GR_SUPPRESS;
46574 + else
46575 + return 0;
46576 +}
46577 +
46578 +__u32
46579 +gr_search_file(const struct dentry * dentry, const __u32 mode,
46580 + const struct vfsmount * mnt)
46581 +{
46582 + __u32 retval = mode;
46583 + struct acl_subject_label *curracl;
46584 + struct acl_object_label *currobj;
46585 +
46586 + if (unlikely(!(gr_status & GR_READY)))
46587 + return (mode & ~GR_AUDITS);
46588 +
46589 + curracl = current->acl;
46590 +
46591 + currobj = chk_obj_label(dentry, mnt, curracl);
46592 + retval = currobj->mode & mode;
46593 +
46594 + /* if we're opening a specified transfer file for writing
46595 + (e.g. /dev/initctl), then transfer our role to init
46596 + */
46597 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
46598 + current->role->roletype & GR_ROLE_PERSIST)) {
46599 + struct task_struct *task = init_pid_ns.child_reaper;
46600 +
46601 + if (task->role != current->role) {
46602 + task->acl_sp_role = 0;
46603 + task->acl_role_id = current->acl_role_id;
46604 + task->role = current->role;
46605 + rcu_read_lock();
46606 + read_lock(&grsec_exec_file_lock);
46607 + gr_apply_subject_to_task(task);
46608 + read_unlock(&grsec_exec_file_lock);
46609 + rcu_read_unlock();
46610 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
46611 + }
46612 + }
46613 +
46614 + if (unlikely
46615 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
46616 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
46617 + __u32 new_mode = mode;
46618 +
46619 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46620 +
46621 + retval = new_mode;
46622 +
46623 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
46624 + new_mode |= GR_INHERIT;
46625 +
46626 + if (!(mode & GR_NOLEARN))
46627 + gr_log_learn(dentry, mnt, new_mode);
46628 + }
46629 +
46630 + return retval;
46631 +}
46632 +
46633 +__u32
46634 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
46635 + const struct vfsmount * mnt, const __u32 mode)
46636 +{
46637 + struct name_entry *match;
46638 + struct acl_object_label *matchpo;
46639 + struct acl_subject_label *curracl;
46640 + char *path;
46641 + __u32 retval;
46642 +
46643 + if (unlikely(!(gr_status & GR_READY)))
46644 + return (mode & ~GR_AUDITS);
46645 +
46646 + preempt_disable();
46647 + path = gr_to_filename_rbac(new_dentry, mnt);
46648 + match = lookup_name_entry_create(path);
46649 +
46650 + if (!match)
46651 + goto check_parent;
46652 +
46653 + curracl = current->acl;
46654 +
46655 + read_lock(&gr_inode_lock);
46656 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
46657 + read_unlock(&gr_inode_lock);
46658 +
46659 + if (matchpo) {
46660 + if ((matchpo->mode & mode) !=
46661 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
46662 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
46663 + __u32 new_mode = mode;
46664 +
46665 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46666 +
46667 + gr_log_learn(new_dentry, mnt, new_mode);
46668 +
46669 + preempt_enable();
46670 + return new_mode;
46671 + }
46672 + preempt_enable();
46673 + return (matchpo->mode & mode);
46674 + }
46675 +
46676 + check_parent:
46677 + curracl = current->acl;
46678 +
46679 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
46680 + retval = matchpo->mode & mode;
46681 +
46682 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
46683 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
46684 + __u32 new_mode = mode;
46685 +
46686 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46687 +
46688 + gr_log_learn(new_dentry, mnt, new_mode);
46689 + preempt_enable();
46690 + return new_mode;
46691 + }
46692 +
46693 + preempt_enable();
46694 + return retval;
46695 +}
46696 +
46697 +int
46698 +gr_check_hidden_task(const struct task_struct *task)
46699 +{
46700 + if (unlikely(!(gr_status & GR_READY)))
46701 + return 0;
46702 +
46703 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
46704 + return 1;
46705 +
46706 + return 0;
46707 +}
46708 +
46709 +int
46710 +gr_check_protected_task(const struct task_struct *task)
46711 +{
46712 + if (unlikely(!(gr_status & GR_READY) || !task))
46713 + return 0;
46714 +
46715 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
46716 + task->acl != current->acl)
46717 + return 1;
46718 +
46719 + return 0;
46720 +}
46721 +
46722 +int
46723 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
46724 +{
46725 + struct task_struct *p;
46726 + int ret = 0;
46727 +
46728 + if (unlikely(!(gr_status & GR_READY) || !pid))
46729 + return ret;
46730 +
46731 + read_lock(&tasklist_lock);
46732 + do_each_pid_task(pid, type, p) {
46733 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
46734 + p->acl != current->acl) {
46735 + ret = 1;
46736 + goto out;
46737 + }
46738 + } while_each_pid_task(pid, type, p);
46739 +out:
46740 + read_unlock(&tasklist_lock);
46741 +
46742 + return ret;
46743 +}
46744 +
46745 +void
46746 +gr_copy_label(struct task_struct *tsk)
46747 +{
46748 + tsk->signal->used_accept = 0;
46749 + tsk->acl_sp_role = 0;
46750 + tsk->acl_role_id = current->acl_role_id;
46751 + tsk->acl = current->acl;
46752 + tsk->role = current->role;
46753 + tsk->signal->curr_ip = current->signal->curr_ip;
46754 + tsk->signal->saved_ip = current->signal->saved_ip;
46755 + if (current->exec_file)
46756 + get_file(current->exec_file);
46757 + tsk->exec_file = current->exec_file;
46758 + tsk->is_writable = current->is_writable;
46759 + if (unlikely(current->signal->used_accept)) {
46760 + current->signal->curr_ip = 0;
46761 + current->signal->saved_ip = 0;
46762 + }
46763 +
46764 + return;
46765 +}
46766 +
46767 +static void
46768 +gr_set_proc_res(struct task_struct *task)
46769 +{
46770 + struct acl_subject_label *proc;
46771 + unsigned short i;
46772 +
46773 + proc = task->acl;
46774 +
46775 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
46776 + return;
46777 +
46778 + for (i = 0; i < RLIM_NLIMITS; i++) {
46779 + if (!(proc->resmask & (1 << i)))
46780 + continue;
46781 +
46782 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
46783 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
46784 + }
46785 +
46786 + return;
46787 +}
46788 +
46789 +extern int __gr_process_user_ban(struct user_struct *user);
46790 +
46791 +int
46792 +gr_check_user_change(int real, int effective, int fs)
46793 +{
46794 + unsigned int i;
46795 + __u16 num;
46796 + uid_t *uidlist;
46797 + int curuid;
46798 + int realok = 0;
46799 + int effectiveok = 0;
46800 + int fsok = 0;
46801 +
46802 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
46803 + struct user_struct *user;
46804 +
46805 + if (real == -1)
46806 + goto skipit;
46807 +
46808 + user = find_user(real);
46809 + if (user == NULL)
46810 + goto skipit;
46811 +
46812 + if (__gr_process_user_ban(user)) {
46813 + /* for find_user */
46814 + free_uid(user);
46815 + return 1;
46816 + }
46817 +
46818 + /* for find_user */
46819 + free_uid(user);
46820 +
46821 +skipit:
46822 +#endif
46823 +
46824 + if (unlikely(!(gr_status & GR_READY)))
46825 + return 0;
46826 +
46827 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46828 + gr_log_learn_id_change('u', real, effective, fs);
46829 +
46830 + num = current->acl->user_trans_num;
46831 + uidlist = current->acl->user_transitions;
46832 +
46833 + if (uidlist == NULL)
46834 + return 0;
46835 +
46836 + if (real == -1)
46837 + realok = 1;
46838 + if (effective == -1)
46839 + effectiveok = 1;
46840 + if (fs == -1)
46841 + fsok = 1;
46842 +
46843 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
46844 + for (i = 0; i < num; i++) {
46845 + curuid = (int)uidlist[i];
46846 + if (real == curuid)
46847 + realok = 1;
46848 + if (effective == curuid)
46849 + effectiveok = 1;
46850 + if (fs == curuid)
46851 + fsok = 1;
46852 + }
46853 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
46854 + for (i = 0; i < num; i++) {
46855 + curuid = (int)uidlist[i];
46856 + if (real == curuid)
46857 + break;
46858 + if (effective == curuid)
46859 + break;
46860 + if (fs == curuid)
46861 + break;
46862 + }
46863 + /* not in deny list */
46864 + if (i == num) {
46865 + realok = 1;
46866 + effectiveok = 1;
46867 + fsok = 1;
46868 + }
46869 + }
46870 +
46871 + if (realok && effectiveok && fsok)
46872 + return 0;
46873 + else {
46874 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46875 + return 1;
46876 + }
46877 +}
46878 +
46879 +int
46880 +gr_check_group_change(int real, int effective, int fs)
46881 +{
46882 + unsigned int i;
46883 + __u16 num;
46884 + gid_t *gidlist;
46885 + int curgid;
46886 + int realok = 0;
46887 + int effectiveok = 0;
46888 + int fsok = 0;
46889 +
46890 + if (unlikely(!(gr_status & GR_READY)))
46891 + return 0;
46892 +
46893 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46894 + gr_log_learn_id_change('g', real, effective, fs);
46895 +
46896 + num = current->acl->group_trans_num;
46897 + gidlist = current->acl->group_transitions;
46898 +
46899 + if (gidlist == NULL)
46900 + return 0;
46901 +
46902 + if (real == -1)
46903 + realok = 1;
46904 + if (effective == -1)
46905 + effectiveok = 1;
46906 + if (fs == -1)
46907 + fsok = 1;
46908 +
46909 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
46910 + for (i = 0; i < num; i++) {
46911 + curgid = (int)gidlist[i];
46912 + if (real == curgid)
46913 + realok = 1;
46914 + if (effective == curgid)
46915 + effectiveok = 1;
46916 + if (fs == curgid)
46917 + fsok = 1;
46918 + }
46919 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
46920 + for (i = 0; i < num; i++) {
46921 + curgid = (int)gidlist[i];
46922 + if (real == curgid)
46923 + break;
46924 + if (effective == curgid)
46925 + break;
46926 + if (fs == curgid)
46927 + break;
46928 + }
46929 + /* not in deny list */
46930 + if (i == num) {
46931 + realok = 1;
46932 + effectiveok = 1;
46933 + fsok = 1;
46934 + }
46935 + }
46936 +
46937 + if (realok && effectiveok && fsok)
46938 + return 0;
46939 + else {
46940 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46941 + return 1;
46942 + }
46943 +}
46944 +
46945 +void
46946 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
46947 +{
46948 + struct acl_role_label *role = task->role;
46949 + struct acl_subject_label *subj = NULL;
46950 + struct acl_object_label *obj;
46951 + struct file *filp;
46952 +
46953 + if (unlikely(!(gr_status & GR_READY)))
46954 + return;
46955 +
46956 + filp = task->exec_file;
46957 +
46958 + /* kernel process, we'll give them the kernel role */
46959 + if (unlikely(!filp)) {
46960 + task->role = kernel_role;
46961 + task->acl = kernel_role->root_label;
46962 + return;
46963 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
46964 + role = lookup_acl_role_label(task, uid, gid);
46965 +
46966 + /* perform subject lookup in possibly new role
46967 + we can use this result below in the case where role == task->role
46968 + */
46969 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
46970 +
46971 + /* if we changed uid/gid, but result in the same role
46972 + and are using inheritance, don't lose the inherited subject
46973 + if current subject is other than what normal lookup
46974 + would result in, we arrived via inheritance, don't
46975 + lose subject
46976 + */
46977 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
46978 + (subj == task->acl)))
46979 + task->acl = subj;
46980 +
46981 + task->role = role;
46982 +
46983 + task->is_writable = 0;
46984 +
46985 + /* ignore additional mmap checks for processes that are writable
46986 + by the default ACL */
46987 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
46988 + if (unlikely(obj->mode & GR_WRITE))
46989 + task->is_writable = 1;
46990 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
46991 + if (unlikely(obj->mode & GR_WRITE))
46992 + task->is_writable = 1;
46993 +
46994 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46995 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46996 +#endif
46997 +
46998 + gr_set_proc_res(task);
46999 +
47000 + return;
47001 +}
47002 +
47003 +int
47004 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
47005 + const int unsafe_share)
47006 +{
47007 + struct task_struct *task = current;
47008 + struct acl_subject_label *newacl;
47009 + struct acl_object_label *obj;
47010 + __u32 retmode;
47011 +
47012 + if (unlikely(!(gr_status & GR_READY)))
47013 + return 0;
47014 +
47015 + newacl = chk_subj_label(dentry, mnt, task->role);
47016 +
47017 + task_lock(task);
47018 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
47019 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
47020 + !(task->role->roletype & GR_ROLE_GOD) &&
47021 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
47022 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
47023 + task_unlock(task);
47024 + if (unsafe_share)
47025 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
47026 + else
47027 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
47028 + return -EACCES;
47029 + }
47030 + task_unlock(task);
47031 +
47032 + obj = chk_obj_label(dentry, mnt, task->acl);
47033 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
47034 +
47035 + if (!(task->acl->mode & GR_INHERITLEARN) &&
47036 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
47037 + if (obj->nested)
47038 + task->acl = obj->nested;
47039 + else
47040 + task->acl = newacl;
47041 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
47042 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
47043 +
47044 + task->is_writable = 0;
47045 +
47046 + /* ignore additional mmap checks for processes that are writable
47047 + by the default ACL */
47048 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
47049 + if (unlikely(obj->mode & GR_WRITE))
47050 + task->is_writable = 1;
47051 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
47052 + if (unlikely(obj->mode & GR_WRITE))
47053 + task->is_writable = 1;
47054 +
47055 + gr_set_proc_res(task);
47056 +
47057 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47058 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
47059 +#endif
47060 + return 0;
47061 +}
47062 +
47063 +/* always called with valid inodev ptr */
47064 +static void
47065 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
47066 +{
47067 + struct acl_object_label *matchpo;
47068 + struct acl_subject_label *matchps;
47069 + struct acl_subject_label *subj;
47070 + struct acl_role_label *role;
47071 + unsigned int x;
47072 +
47073 + FOR_EACH_ROLE_START(role)
47074 + FOR_EACH_SUBJECT_START(role, subj, x)
47075 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
47076 + matchpo->mode |= GR_DELETED;
47077 + FOR_EACH_SUBJECT_END(subj,x)
47078 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
47079 + if (subj->inode == ino && subj->device == dev)
47080 + subj->mode |= GR_DELETED;
47081 + FOR_EACH_NESTED_SUBJECT_END(subj)
47082 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
47083 + matchps->mode |= GR_DELETED;
47084 + FOR_EACH_ROLE_END(role)
47085 +
47086 + inodev->nentry->deleted = 1;
47087 +
47088 + return;
47089 +}
47090 +
47091 +void
47092 +gr_handle_delete(const ino_t ino, const dev_t dev)
47093 +{
47094 + struct inodev_entry *inodev;
47095 +
47096 + if (unlikely(!(gr_status & GR_READY)))
47097 + return;
47098 +
47099 + write_lock(&gr_inode_lock);
47100 + inodev = lookup_inodev_entry(ino, dev);
47101 + if (inodev != NULL)
47102 + do_handle_delete(inodev, ino, dev);
47103 + write_unlock(&gr_inode_lock);
47104 +
47105 + return;
47106 +}
47107 +
47108 +static void
47109 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
47110 + const ino_t newinode, const dev_t newdevice,
47111 + struct acl_subject_label *subj)
47112 +{
47113 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
47114 + struct acl_object_label *match;
47115 +
47116 + match = subj->obj_hash[index];
47117 +
47118 + while (match && (match->inode != oldinode ||
47119 + match->device != olddevice ||
47120 + !(match->mode & GR_DELETED)))
47121 + match = match->next;
47122 +
47123 + if (match && (match->inode == oldinode)
47124 + && (match->device == olddevice)
47125 + && (match->mode & GR_DELETED)) {
47126 + if (match->prev == NULL) {
47127 + subj->obj_hash[index] = match->next;
47128 + if (match->next != NULL)
47129 + match->next->prev = NULL;
47130 + } else {
47131 + match->prev->next = match->next;
47132 + if (match->next != NULL)
47133 + match->next->prev = match->prev;
47134 + }
47135 + match->prev = NULL;
47136 + match->next = NULL;
47137 + match->inode = newinode;
47138 + match->device = newdevice;
47139 + match->mode &= ~GR_DELETED;
47140 +
47141 + insert_acl_obj_label(match, subj);
47142 + }
47143 +
47144 + return;
47145 +}
47146 +
47147 +static void
47148 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
47149 + const ino_t newinode, const dev_t newdevice,
47150 + struct acl_role_label *role)
47151 +{
47152 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
47153 + struct acl_subject_label *match;
47154 +
47155 + match = role->subj_hash[index];
47156 +
47157 + while (match && (match->inode != oldinode ||
47158 + match->device != olddevice ||
47159 + !(match->mode & GR_DELETED)))
47160 + match = match->next;
47161 +
47162 + if (match && (match->inode == oldinode)
47163 + && (match->device == olddevice)
47164 + && (match->mode & GR_DELETED)) {
47165 + if (match->prev == NULL) {
47166 + role->subj_hash[index] = match->next;
47167 + if (match->next != NULL)
47168 + match->next->prev = NULL;
47169 + } else {
47170 + match->prev->next = match->next;
47171 + if (match->next != NULL)
47172 + match->next->prev = match->prev;
47173 + }
47174 + match->prev = NULL;
47175 + match->next = NULL;
47176 + match->inode = newinode;
47177 + match->device = newdevice;
47178 + match->mode &= ~GR_DELETED;
47179 +
47180 + insert_acl_subj_label(match, role);
47181 + }
47182 +
47183 + return;
47184 +}
47185 +
47186 +static void
47187 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
47188 + const ino_t newinode, const dev_t newdevice)
47189 +{
47190 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
47191 + struct inodev_entry *match;
47192 +
47193 + match = inodev_set.i_hash[index];
47194 +
47195 + while (match && (match->nentry->inode != oldinode ||
47196 + match->nentry->device != olddevice || !match->nentry->deleted))
47197 + match = match->next;
47198 +
47199 + if (match && (match->nentry->inode == oldinode)
47200 + && (match->nentry->device == olddevice) &&
47201 + match->nentry->deleted) {
47202 + if (match->prev == NULL) {
47203 + inodev_set.i_hash[index] = match->next;
47204 + if (match->next != NULL)
47205 + match->next->prev = NULL;
47206 + } else {
47207 + match->prev->next = match->next;
47208 + if (match->next != NULL)
47209 + match->next->prev = match->prev;
47210 + }
47211 + match->prev = NULL;
47212 + match->next = NULL;
47213 + match->nentry->inode = newinode;
47214 + match->nentry->device = newdevice;
47215 + match->nentry->deleted = 0;
47216 +
47217 + insert_inodev_entry(match);
47218 + }
47219 +
47220 + return;
47221 +}
47222 +
47223 +static void
47224 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
47225 + const struct vfsmount *mnt)
47226 +{
47227 + struct acl_subject_label *subj;
47228 + struct acl_role_label *role;
47229 + unsigned int x;
47230 + ino_t inode = dentry->d_inode->i_ino;
47231 + dev_t dev = __get_dev(dentry);
47232 +
47233 + FOR_EACH_ROLE_START(role)
47234 + update_acl_subj_label(matchn->inode, matchn->device,
47235 + inode, dev, role);
47236 +
47237 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
47238 + if ((subj->inode == inode) && (subj->device == dev)) {
47239 + subj->inode = inode;
47240 + subj->device = dev;
47241 + }
47242 + FOR_EACH_NESTED_SUBJECT_END(subj)
47243 + FOR_EACH_SUBJECT_START(role, subj, x)
47244 + update_acl_obj_label(matchn->inode, matchn->device,
47245 + inode, dev, subj);
47246 + FOR_EACH_SUBJECT_END(subj,x)
47247 + FOR_EACH_ROLE_END(role)
47248 +
47249 + update_inodev_entry(matchn->inode, matchn->device, inode, dev);
47250 +
47251 + return;
47252 +}
47253 +
47254 +void
47255 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
47256 +{
47257 + struct name_entry *matchn;
47258 +
47259 + if (unlikely(!(gr_status & GR_READY)))
47260 + return;
47261 +
47262 + preempt_disable();
47263 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
47264 +
47265 + if (unlikely((unsigned long)matchn)) {
47266 + write_lock(&gr_inode_lock);
47267 + do_handle_create(matchn, dentry, mnt);
47268 + write_unlock(&gr_inode_lock);
47269 + }
47270 + preempt_enable();
47271 +
47272 + return;
47273 +}
47274 +
47275 +void
47276 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
47277 + struct dentry *old_dentry,
47278 + struct dentry *new_dentry,
47279 + struct vfsmount *mnt, const __u8 replace)
47280 +{
47281 + struct name_entry *matchn;
47282 + struct inodev_entry *inodev;
47283 + ino_t oldinode = old_dentry->d_inode->i_ino;
47284 + dev_t olddev = __get_dev(old_dentry);
47285 +
47286 + /* vfs_rename swaps the name and parent link for old_dentry and
47287 + new_dentry
47288 + at this point, old_dentry has the new name, parent link, and inode
47289 + for the renamed file
47290 + if a file is being replaced by a rename, new_dentry has the inode
47291 + and name for the replaced file
47292 + */
47293 +
47294 + if (unlikely(!(gr_status & GR_READY)))
47295 + return;
47296 +
47297 + preempt_disable();
47298 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
47299 +
47300 + /* we wouldn't have to check d_inode if it weren't for
47301 + NFS silly-renaming
47302 + */
47303 +
47304 + write_lock(&gr_inode_lock);
47305 + if (unlikely(replace && new_dentry->d_inode)) {
47306 + ino_t newinode = new_dentry->d_inode->i_ino;
47307 + dev_t newdev = __get_dev(new_dentry);
47308 + inodev = lookup_inodev_entry(newinode, newdev);
47309 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
47310 + do_handle_delete(inodev, newinode, newdev);
47311 + }
47312 +
47313 + inodev = lookup_inodev_entry(oldinode, olddev);
47314 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
47315 + do_handle_delete(inodev, oldinode, olddev);
47316 +
47317 + if (unlikely((unsigned long)matchn))
47318 + do_handle_create(matchn, old_dentry, mnt);
47319 +
47320 + write_unlock(&gr_inode_lock);
47321 + preempt_enable();
47322 +
47323 + return;
47324 +}
47325 +
47326 +static int
47327 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
47328 + unsigned char **sum)
47329 +{
47330 + struct acl_role_label *r;
47331 + struct role_allowed_ip *ipp;
47332 + struct role_transition *trans;
47333 + unsigned int i;
47334 + int found = 0;
47335 + u32 curr_ip = current->signal->curr_ip;
47336 +
47337 + current->signal->saved_ip = curr_ip;
47338 +
47339 + /* check transition table */
47340 +
47341 + for (trans = current->role->transitions; trans; trans = trans->next) {
47342 + if (!strcmp(rolename, trans->rolename)) {
47343 + found = 1;
47344 + break;
47345 + }
47346 + }
47347 +
47348 + if (!found)
47349 + return 0;
47350 +
47351 + /* handle special roles that do not require authentication
47352 + and check ip */
47353 +
47354 + FOR_EACH_ROLE_START(r)
47355 + if (!strcmp(rolename, r->rolename) &&
47356 + (r->roletype & GR_ROLE_SPECIAL)) {
47357 + found = 0;
47358 + if (r->allowed_ips != NULL) {
47359 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
47360 + if ((ntohl(curr_ip) & ipp->netmask) ==
47361 + (ntohl(ipp->addr) & ipp->netmask))
47362 + found = 1;
47363 + }
47364 + } else
47365 + found = 2;
47366 + if (!found)
47367 + return 0;
47368 +
47369 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
47370 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
47371 + *salt = NULL;
47372 + *sum = NULL;
47373 + return 1;
47374 + }
47375 + }
47376 + FOR_EACH_ROLE_END(r)
47377 +
47378 + for (i = 0; i < num_sprole_pws; i++) {
47379 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
47380 + *salt = acl_special_roles[i]->salt;
47381 + *sum = acl_special_roles[i]->sum;
47382 + return 1;
47383 + }
47384 + }
47385 +
47386 + return 0;
47387 +}
47388 +
47389 +static void
47390 +assign_special_role(char *rolename)
47391 +{
47392 + struct acl_object_label *obj;
47393 + struct acl_role_label *r;
47394 + struct acl_role_label *assigned = NULL;
47395 + struct task_struct *tsk;
47396 + struct file *filp;
47397 +
47398 + FOR_EACH_ROLE_START(r)
47399 + if (!strcmp(rolename, r->rolename) &&
47400 + (r->roletype & GR_ROLE_SPECIAL)) {
47401 + assigned = r;
47402 + break;
47403 + }
47404 + FOR_EACH_ROLE_END(r)
47405 +
47406 + if (!assigned)
47407 + return;
47408 +
47409 + read_lock(&tasklist_lock);
47410 + read_lock(&grsec_exec_file_lock);
47411 +
47412 + tsk = current->real_parent;
47413 + if (tsk == NULL)
47414 + goto out_unlock;
47415 +
47416 + filp = tsk->exec_file;
47417 + if (filp == NULL)
47418 + goto out_unlock;
47419 +
47420 + tsk->is_writable = 0;
47421 +
47422 + tsk->acl_sp_role = 1;
47423 + tsk->acl_role_id = ++acl_sp_role_value;
47424 + tsk->role = assigned;
47425 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
47426 +
47427 + /* ignore additional mmap checks for processes that are writable
47428 + by the default ACL */
47429 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47430 + if (unlikely(obj->mode & GR_WRITE))
47431 + tsk->is_writable = 1;
47432 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
47433 + if (unlikely(obj->mode & GR_WRITE))
47434 + tsk->is_writable = 1;
47435 +
47436 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47437 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
47438 +#endif
47439 +
47440 +out_unlock:
47441 + read_unlock(&grsec_exec_file_lock);
47442 + read_unlock(&tasklist_lock);
47443 + return;
47444 +}
47445 +
47446 +int gr_check_secure_terminal(struct task_struct *task)
47447 +{
47448 + struct task_struct *p, *p2, *p3;
47449 + struct files_struct *files;
47450 + struct fdtable *fdt;
47451 + struct file *our_file = NULL, *file;
47452 + int i;
47453 +
47454 + if (task->signal->tty == NULL)
47455 + return 1;
47456 +
47457 + files = get_files_struct(task);
47458 + if (files != NULL) {
47459 + rcu_read_lock();
47460 + fdt = files_fdtable(files);
47461 + for (i=0; i < fdt->max_fds; i++) {
47462 + file = fcheck_files(files, i);
47463 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
47464 + get_file(file);
47465 + our_file = file;
47466 + }
47467 + }
47468 + rcu_read_unlock();
47469 + put_files_struct(files);
47470 + }
47471 +
47472 + if (our_file == NULL)
47473 + return 1;
47474 +
47475 + read_lock(&tasklist_lock);
47476 + do_each_thread(p2, p) {
47477 + files = get_files_struct(p);
47478 + if (files == NULL ||
47479 + (p->signal && p->signal->tty == task->signal->tty)) {
47480 + if (files != NULL)
47481 + put_files_struct(files);
47482 + continue;
47483 + }
47484 + rcu_read_lock();
47485 + fdt = files_fdtable(files);
47486 + for (i=0; i < fdt->max_fds; i++) {
47487 + file = fcheck_files(files, i);
47488 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
47489 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
47490 + p3 = task;
47491 + while (p3->pid > 0) {
47492 + if (p3 == p)
47493 + break;
47494 + p3 = p3->real_parent;
47495 + }
47496 + if (p3 == p)
47497 + break;
47498 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
47499 + gr_handle_alertkill(p);
47500 + rcu_read_unlock();
47501 + put_files_struct(files);
47502 + read_unlock(&tasklist_lock);
47503 + fput(our_file);
47504 + return 0;
47505 + }
47506 + }
47507 + rcu_read_unlock();
47508 + put_files_struct(files);
47509 + } while_each_thread(p2, p);
47510 + read_unlock(&tasklist_lock);
47511 +
47512 + fput(our_file);
47513 + return 1;
47514 +}
47515 +
47516 +ssize_t
47517 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
47518 +{
47519 + struct gr_arg_wrapper uwrap;
47520 + unsigned char *sprole_salt = NULL;
47521 + unsigned char *sprole_sum = NULL;
47522 + int error = sizeof (struct gr_arg_wrapper);
47523 + int error2 = 0;
47524 +
47525 + mutex_lock(&gr_dev_mutex);
47526 +
47527 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
47528 + error = -EPERM;
47529 + goto out;
47530 + }
47531 +
47532 + if (count != sizeof (struct gr_arg_wrapper)) {
47533 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
47534 + error = -EINVAL;
47535 + goto out;
47536 + }
47537 +
47538 +
47539 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
47540 + gr_auth_expires = 0;
47541 + gr_auth_attempts = 0;
47542 + }
47543 +
47544 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
47545 + error = -EFAULT;
47546 + goto out;
47547 + }
47548 +
47549 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
47550 + error = -EINVAL;
47551 + goto out;
47552 + }
47553 +
47554 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
47555 + error = -EFAULT;
47556 + goto out;
47557 + }
47558 +
47559 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
47560 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
47561 + time_after(gr_auth_expires, get_seconds())) {
47562 + error = -EBUSY;
47563 + goto out;
47564 + }
47565 +
47566 + /* if non-root trying to do anything other than use a special role,
47567 + do not attempt authentication, do not count towards authentication
47568 + locking
47569 + */
47570 +
47571 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
47572 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
47573 + current_uid()) {
47574 + error = -EPERM;
47575 + goto out;
47576 + }
47577 +
47578 + /* ensure pw and special role name are null terminated */
47579 +
47580 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
47581 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
47582 +
47583 + /* Okay.
47584 + * We have our enough of the argument structure..(we have yet
47585 + * to copy_from_user the tables themselves) . Copy the tables
47586 + * only if we need them, i.e. for loading operations. */
47587 +
47588 + switch (gr_usermode->mode) {
47589 + case GR_STATUS:
47590 + if (gr_status & GR_READY) {
47591 + error = 1;
47592 + if (!gr_check_secure_terminal(current))
47593 + error = 3;
47594 + } else
47595 + error = 2;
47596 + goto out;
47597 + case GR_SHUTDOWN:
47598 + if ((gr_status & GR_READY)
47599 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47600 + pax_open_kernel();
47601 + gr_status &= ~GR_READY;
47602 + pax_close_kernel();
47603 +
47604 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
47605 + free_variables();
47606 + memset(gr_usermode, 0, sizeof (struct gr_arg));
47607 + memset(gr_system_salt, 0, GR_SALT_LEN);
47608 + memset(gr_system_sum, 0, GR_SHA_LEN);
47609 + } else if (gr_status & GR_READY) {
47610 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
47611 + error = -EPERM;
47612 + } else {
47613 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
47614 + error = -EAGAIN;
47615 + }
47616 + break;
47617 + case GR_ENABLE:
47618 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
47619 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
47620 + else {
47621 + if (gr_status & GR_READY)
47622 + error = -EAGAIN;
47623 + else
47624 + error = error2;
47625 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
47626 + }
47627 + break;
47628 + case GR_RELOAD:
47629 + if (!(gr_status & GR_READY)) {
47630 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
47631 + error = -EAGAIN;
47632 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47633 + lock_kernel();
47634 +
47635 + pax_open_kernel();
47636 + gr_status &= ~GR_READY;
47637 + pax_close_kernel();
47638 +
47639 + free_variables();
47640 + if (!(error2 = gracl_init(gr_usermode))) {
47641 + unlock_kernel();
47642 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
47643 + } else {
47644 + unlock_kernel();
47645 + error = error2;
47646 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
47647 + }
47648 + } else {
47649 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
47650 + error = -EPERM;
47651 + }
47652 + break;
47653 + case GR_SEGVMOD:
47654 + if (unlikely(!(gr_status & GR_READY))) {
47655 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
47656 + error = -EAGAIN;
47657 + break;
47658 + }
47659 +
47660 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47661 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
47662 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
47663 + struct acl_subject_label *segvacl;
47664 + segvacl =
47665 + lookup_acl_subj_label(gr_usermode->segv_inode,
47666 + gr_usermode->segv_device,
47667 + current->role);
47668 + if (segvacl) {
47669 + segvacl->crashes = 0;
47670 + segvacl->expires = 0;
47671 + }
47672 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
47673 + gr_remove_uid(gr_usermode->segv_uid);
47674 + }
47675 + } else {
47676 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
47677 + error = -EPERM;
47678 + }
47679 + break;
47680 + case GR_SPROLE:
47681 + case GR_SPROLEPAM:
47682 + if (unlikely(!(gr_status & GR_READY))) {
47683 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
47684 + error = -EAGAIN;
47685 + break;
47686 + }
47687 +
47688 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
47689 + current->role->expires = 0;
47690 + current->role->auth_attempts = 0;
47691 + }
47692 +
47693 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
47694 + time_after(current->role->expires, get_seconds())) {
47695 + error = -EBUSY;
47696 + goto out;
47697 + }
47698 +
47699 + if (lookup_special_role_auth
47700 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
47701 + && ((!sprole_salt && !sprole_sum)
47702 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
47703 + char *p = "";
47704 + assign_special_role(gr_usermode->sp_role);
47705 + read_lock(&tasklist_lock);
47706 + if (current->real_parent)
47707 + p = current->real_parent->role->rolename;
47708 + read_unlock(&tasklist_lock);
47709 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
47710 + p, acl_sp_role_value);
47711 + } else {
47712 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
47713 + error = -EPERM;
47714 + if(!(current->role->auth_attempts++))
47715 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
47716 +
47717 + goto out;
47718 + }
47719 + break;
47720 + case GR_UNSPROLE:
47721 + if (unlikely(!(gr_status & GR_READY))) {
47722 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
47723 + error = -EAGAIN;
47724 + break;
47725 + }
47726 +
47727 + if (current->role->roletype & GR_ROLE_SPECIAL) {
47728 + char *p = "";
47729 + int i = 0;
47730 +
47731 + read_lock(&tasklist_lock);
47732 + if (current->real_parent) {
47733 + p = current->real_parent->role->rolename;
47734 + i = current->real_parent->acl_role_id;
47735 + }
47736 + read_unlock(&tasklist_lock);
47737 +
47738 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
47739 + gr_set_acls(1);
47740 + } else {
47741 + error = -EPERM;
47742 + goto out;
47743 + }
47744 + break;
47745 + default:
47746 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
47747 + error = -EINVAL;
47748 + break;
47749 + }
47750 +
47751 + if (error != -EPERM)
47752 + goto out;
47753 +
47754 + if(!(gr_auth_attempts++))
47755 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
47756 +
47757 + out:
47758 + mutex_unlock(&gr_dev_mutex);
47759 + return error;
47760 +}
47761 +
47762 +/* must be called with
47763 + rcu_read_lock();
47764 + read_lock(&tasklist_lock);
47765 + read_lock(&grsec_exec_file_lock);
47766 +*/
47767 +int gr_apply_subject_to_task(struct task_struct *task)
47768 +{
47769 + struct acl_object_label *obj;
47770 + char *tmpname;
47771 + struct acl_subject_label *tmpsubj;
47772 + struct file *filp;
47773 + struct name_entry *nmatch;
47774 +
47775 + filp = task->exec_file;
47776 + if (filp == NULL)
47777 + return 0;
47778 +
47779 + /* the following is to apply the correct subject
47780 + on binaries running when the RBAC system
47781 + is enabled, when the binaries have been
47782 + replaced or deleted since their execution
47783 + -----
47784 + when the RBAC system starts, the inode/dev
47785 + from exec_file will be one the RBAC system
47786 + is unaware of. It only knows the inode/dev
47787 + of the present file on disk, or the absence
47788 + of it.
47789 + */
47790 + preempt_disable();
47791 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
47792 +
47793 + nmatch = lookup_name_entry(tmpname);
47794 + preempt_enable();
47795 + tmpsubj = NULL;
47796 + if (nmatch) {
47797 + if (nmatch->deleted)
47798 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
47799 + else
47800 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
47801 + if (tmpsubj != NULL)
47802 + task->acl = tmpsubj;
47803 + }
47804 + if (tmpsubj == NULL)
47805 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
47806 + task->role);
47807 + if (task->acl) {
47808 + task->is_writable = 0;
47809 + /* ignore additional mmap checks for processes that are writable
47810 + by the default ACL */
47811 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47812 + if (unlikely(obj->mode & GR_WRITE))
47813 + task->is_writable = 1;
47814 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
47815 + if (unlikely(obj->mode & GR_WRITE))
47816 + task->is_writable = 1;
47817 +
47818 + gr_set_proc_res(task);
47819 +
47820 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47821 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
47822 +#endif
47823 + } else {
47824 + return 1;
47825 + }
47826 +
47827 + return 0;
47828 +}
47829 +
47830 +int
47831 +gr_set_acls(const int type)
47832 +{
47833 + struct task_struct *task, *task2;
47834 + struct acl_role_label *role = current->role;
47835 + __u16 acl_role_id = current->acl_role_id;
47836 + const struct cred *cred;
47837 + int ret;
47838 +
47839 + rcu_read_lock();
47840 + read_lock(&tasklist_lock);
47841 + read_lock(&grsec_exec_file_lock);
47842 + do_each_thread(task2, task) {
47843 + /* check to see if we're called from the exit handler,
47844 + if so, only replace ACLs that have inherited the admin
47845 + ACL */
47846 +
47847 + if (type && (task->role != role ||
47848 + task->acl_role_id != acl_role_id))
47849 + continue;
47850 +
47851 + task->acl_role_id = 0;
47852 + task->acl_sp_role = 0;
47853 +
47854 + if (task->exec_file) {
47855 + cred = __task_cred(task);
47856 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
47857 +
47858 + ret = gr_apply_subject_to_task(task);
47859 + if (ret) {
47860 + read_unlock(&grsec_exec_file_lock);
47861 + read_unlock(&tasklist_lock);
47862 + rcu_read_unlock();
47863 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
47864 + return ret;
47865 + }
47866 + } else {
47867 + // it's a kernel process
47868 + task->role = kernel_role;
47869 + task->acl = kernel_role->root_label;
47870 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
47871 + task->acl->mode &= ~GR_PROCFIND;
47872 +#endif
47873 + }
47874 + } while_each_thread(task2, task);
47875 + read_unlock(&grsec_exec_file_lock);
47876 + read_unlock(&tasklist_lock);
47877 + rcu_read_unlock();
47878 +
47879 + return 0;
47880 +}
47881 +
47882 +void
47883 +gr_learn_resource(const struct task_struct *task,
47884 + const int res, const unsigned long wanted, const int gt)
47885 +{
47886 + struct acl_subject_label *acl;
47887 + const struct cred *cred;
47888 +
47889 + if (unlikely((gr_status & GR_READY) &&
47890 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
47891 + goto skip_reslog;
47892 +
47893 +#ifdef CONFIG_GRKERNSEC_RESLOG
47894 + gr_log_resource(task, res, wanted, gt);
47895 +#endif
47896 + skip_reslog:
47897 +
47898 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
47899 + return;
47900 +
47901 + acl = task->acl;
47902 +
47903 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
47904 + !(acl->resmask & (1 << (unsigned short) res))))
47905 + return;
47906 +
47907 + if (wanted >= acl->res[res].rlim_cur) {
47908 + unsigned long res_add;
47909 +
47910 + res_add = wanted;
47911 + switch (res) {
47912 + case RLIMIT_CPU:
47913 + res_add += GR_RLIM_CPU_BUMP;
47914 + break;
47915 + case RLIMIT_FSIZE:
47916 + res_add += GR_RLIM_FSIZE_BUMP;
47917 + break;
47918 + case RLIMIT_DATA:
47919 + res_add += GR_RLIM_DATA_BUMP;
47920 + break;
47921 + case RLIMIT_STACK:
47922 + res_add += GR_RLIM_STACK_BUMP;
47923 + break;
47924 + case RLIMIT_CORE:
47925 + res_add += GR_RLIM_CORE_BUMP;
47926 + break;
47927 + case RLIMIT_RSS:
47928 + res_add += GR_RLIM_RSS_BUMP;
47929 + break;
47930 + case RLIMIT_NPROC:
47931 + res_add += GR_RLIM_NPROC_BUMP;
47932 + break;
47933 + case RLIMIT_NOFILE:
47934 + res_add += GR_RLIM_NOFILE_BUMP;
47935 + break;
47936 + case RLIMIT_MEMLOCK:
47937 + res_add += GR_RLIM_MEMLOCK_BUMP;
47938 + break;
47939 + case RLIMIT_AS:
47940 + res_add += GR_RLIM_AS_BUMP;
47941 + break;
47942 + case RLIMIT_LOCKS:
47943 + res_add += GR_RLIM_LOCKS_BUMP;
47944 + break;
47945 + case RLIMIT_SIGPENDING:
47946 + res_add += GR_RLIM_SIGPENDING_BUMP;
47947 + break;
47948 + case RLIMIT_MSGQUEUE:
47949 + res_add += GR_RLIM_MSGQUEUE_BUMP;
47950 + break;
47951 + case RLIMIT_NICE:
47952 + res_add += GR_RLIM_NICE_BUMP;
47953 + break;
47954 + case RLIMIT_RTPRIO:
47955 + res_add += GR_RLIM_RTPRIO_BUMP;
47956 + break;
47957 + case RLIMIT_RTTIME:
47958 + res_add += GR_RLIM_RTTIME_BUMP;
47959 + break;
47960 + }
47961 +
47962 + acl->res[res].rlim_cur = res_add;
47963 +
47964 + if (wanted > acl->res[res].rlim_max)
47965 + acl->res[res].rlim_max = res_add;
47966 +
47967 + /* only log the subject filename, since resource logging is supported for
47968 + single-subject learning only */
47969 + rcu_read_lock();
47970 + cred = __task_cred(task);
47971 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
47972 + task->role->roletype, cred->uid, cred->gid, acl->filename,
47973 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
47974 + "", (unsigned long) res, &task->signal->saved_ip);
47975 + rcu_read_unlock();
47976 + }
47977 +
47978 + return;
47979 +}
47980 +
47981 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
47982 +void
47983 +pax_set_initial_flags(struct linux_binprm *bprm)
47984 +{
47985 + struct task_struct *task = current;
47986 + struct acl_subject_label *proc;
47987 + unsigned long flags;
47988 +
47989 + if (unlikely(!(gr_status & GR_READY)))
47990 + return;
47991 +
47992 + flags = pax_get_flags(task);
47993 +
47994 + proc = task->acl;
47995 +
47996 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
47997 + flags &= ~MF_PAX_PAGEEXEC;
47998 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
47999 + flags &= ~MF_PAX_SEGMEXEC;
48000 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
48001 + flags &= ~MF_PAX_RANDMMAP;
48002 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
48003 + flags &= ~MF_PAX_EMUTRAMP;
48004 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
48005 + flags &= ~MF_PAX_MPROTECT;
48006 +
48007 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
48008 + flags |= MF_PAX_PAGEEXEC;
48009 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
48010 + flags |= MF_PAX_SEGMEXEC;
48011 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
48012 + flags |= MF_PAX_RANDMMAP;
48013 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
48014 + flags |= MF_PAX_EMUTRAMP;
48015 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
48016 + flags |= MF_PAX_MPROTECT;
48017 +
48018 + pax_set_flags(task, flags);
48019 +
48020 + return;
48021 +}
48022 +#endif
48023 +
48024 +#ifdef CONFIG_SYSCTL
48025 +/* Eric Biederman likes breaking userland ABI and every inode-based security
48026 + system to save 35kb of memory */
48027 +
48028 +/* we modify the passed in filename, but adjust it back before returning */
48029 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
48030 +{
48031 + struct name_entry *nmatch;
48032 + char *p, *lastp = NULL;
48033 + struct acl_object_label *obj = NULL, *tmp;
48034 + struct acl_subject_label *tmpsubj;
48035 + char c = '\0';
48036 +
48037 + read_lock(&gr_inode_lock);
48038 +
48039 + p = name + len - 1;
48040 + do {
48041 + nmatch = lookup_name_entry(name);
48042 + if (lastp != NULL)
48043 + *lastp = c;
48044 +
48045 + if (nmatch == NULL)
48046 + goto next_component;
48047 + tmpsubj = current->acl;
48048 + do {
48049 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
48050 + if (obj != NULL) {
48051 + tmp = obj->globbed;
48052 + while (tmp) {
48053 + if (!glob_match(tmp->filename, name)) {
48054 + obj = tmp;
48055 + goto found_obj;
48056 + }
48057 + tmp = tmp->next;
48058 + }
48059 + goto found_obj;
48060 + }
48061 + } while ((tmpsubj = tmpsubj->parent_subject));
48062 +next_component:
48063 + /* end case */
48064 + if (p == name)
48065 + break;
48066 +
48067 + while (*p != '/')
48068 + p--;
48069 + if (p == name)
48070 + lastp = p + 1;
48071 + else {
48072 + lastp = p;
48073 + p--;
48074 + }
48075 + c = *lastp;
48076 + *lastp = '\0';
48077 + } while (1);
48078 +found_obj:
48079 + read_unlock(&gr_inode_lock);
48080 + /* obj returned will always be non-null */
48081 + return obj;
48082 +}
48083 +
48084 +/* returns 0 when allowing, non-zero on error
48085 + op of 0 is used for readdir, so we don't log the names of hidden files
48086 +*/
48087 +__u32
48088 +gr_handle_sysctl(const struct ctl_table *table, const int op)
48089 +{
48090 + ctl_table *tmp;
48091 + const char *proc_sys = "/proc/sys";
48092 + char *path;
48093 + struct acl_object_label *obj;
48094 + unsigned short len = 0, pos = 0, depth = 0, i;
48095 + __u32 err = 0;
48096 + __u32 mode = 0;
48097 +
48098 + if (unlikely(!(gr_status & GR_READY)))
48099 + return 0;
48100 +
48101 + /* for now, ignore operations on non-sysctl entries if it's not a
48102 + readdir*/
48103 + if (table->child != NULL && op != 0)
48104 + return 0;
48105 +
48106 + mode |= GR_FIND;
48107 + /* it's only a read if it's an entry, read on dirs is for readdir */
48108 + if (op & MAY_READ)
48109 + mode |= GR_READ;
48110 + if (op & MAY_WRITE)
48111 + mode |= GR_WRITE;
48112 +
48113 + preempt_disable();
48114 +
48115 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
48116 +
48117 + /* it's only a read/write if it's an actual entry, not a dir
48118 + (which are opened for readdir)
48119 + */
48120 +
48121 + /* convert the requested sysctl entry into a pathname */
48122 +
48123 + for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
48124 + len += strlen(tmp->procname);
48125 + len++;
48126 + depth++;
48127 + }
48128 +
48129 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
48130 + /* deny */
48131 + goto out;
48132 + }
48133 +
48134 + memset(path, 0, PAGE_SIZE);
48135 +
48136 + memcpy(path, proc_sys, strlen(proc_sys));
48137 +
48138 + pos += strlen(proc_sys);
48139 +
48140 + for (; depth > 0; depth--) {
48141 + path[pos] = '/';
48142 + pos++;
48143 + for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
48144 + if (depth == i) {
48145 + memcpy(path + pos, tmp->procname,
48146 + strlen(tmp->procname));
48147 + pos += strlen(tmp->procname);
48148 + }
48149 + i++;
48150 + }
48151 + }
48152 +
48153 + obj = gr_lookup_by_name(path, pos);
48154 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
48155 +
48156 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
48157 + ((err & mode) != mode))) {
48158 + __u32 new_mode = mode;
48159 +
48160 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
48161 +
48162 + err = 0;
48163 + gr_log_learn_sysctl(path, new_mode);
48164 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
48165 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
48166 + err = -ENOENT;
48167 + } else if (!(err & GR_FIND)) {
48168 + err = -ENOENT;
48169 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
48170 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
48171 + path, (mode & GR_READ) ? " reading" : "",
48172 + (mode & GR_WRITE) ? " writing" : "");
48173 + err = -EACCES;
48174 + } else if ((err & mode) != mode) {
48175 + err = -EACCES;
48176 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
48177 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
48178 + path, (mode & GR_READ) ? " reading" : "",
48179 + (mode & GR_WRITE) ? " writing" : "");
48180 + err = 0;
48181 + } else
48182 + err = 0;
48183 +
48184 + out:
48185 + preempt_enable();
48186 +
48187 + return err;
48188 +}
48189 +#endif
48190 +
48191 +int
48192 +gr_handle_proc_ptrace(struct task_struct *task)
48193 +{
48194 + struct file *filp;
48195 + struct task_struct *tmp = task;
48196 + struct task_struct *curtemp = current;
48197 + __u32 retmode;
48198 +
48199 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
48200 + if (unlikely(!(gr_status & GR_READY)))
48201 + return 0;
48202 +#endif
48203 +
48204 + read_lock(&tasklist_lock);
48205 + read_lock(&grsec_exec_file_lock);
48206 + filp = task->exec_file;
48207 +
48208 + while (tmp->pid > 0) {
48209 + if (tmp == curtemp)
48210 + break;
48211 + tmp = tmp->real_parent;
48212 + }
48213 +
48214 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
48215 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
48216 + read_unlock(&grsec_exec_file_lock);
48217 + read_unlock(&tasklist_lock);
48218 + return 1;
48219 + }
48220 +
48221 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
48222 + if (!(gr_status & GR_READY)) {
48223 + read_unlock(&grsec_exec_file_lock);
48224 + read_unlock(&tasklist_lock);
48225 + return 0;
48226 + }
48227 +#endif
48228 +
48229 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
48230 + read_unlock(&grsec_exec_file_lock);
48231 + read_unlock(&tasklist_lock);
48232 +
48233 + if (retmode & GR_NOPTRACE)
48234 + return 1;
48235 +
48236 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
48237 + && (current->acl != task->acl || (current->acl != current->role->root_label
48238 + && current->pid != task->pid)))
48239 + return 1;
48240 +
48241 + return 0;
48242 +}
48243 +
48244 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
48245 +{
48246 + if (unlikely(!(gr_status & GR_READY)))
48247 + return;
48248 +
48249 + if (!(current->role->roletype & GR_ROLE_GOD))
48250 + return;
48251 +
48252 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
48253 + p->role->rolename, gr_task_roletype_to_char(p),
48254 + p->acl->filename);
48255 +}
48256 +
48257 +int
48258 +gr_handle_ptrace(struct task_struct *task, const long request)
48259 +{
48260 + struct task_struct *tmp = task;
48261 + struct task_struct *curtemp = current;
48262 + __u32 retmode;
48263 +
48264 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
48265 + if (unlikely(!(gr_status & GR_READY)))
48266 + return 0;
48267 +#endif
48268 +
48269 + read_lock(&tasklist_lock);
48270 + while (tmp->pid > 0) {
48271 + if (tmp == curtemp)
48272 + break;
48273 + tmp = tmp->real_parent;
48274 + }
48275 +
48276 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
48277 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
48278 + read_unlock(&tasklist_lock);
48279 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
48280 + return 1;
48281 + }
48282 + read_unlock(&tasklist_lock);
48283 +
48284 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
48285 + if (!(gr_status & GR_READY))
48286 + return 0;
48287 +#endif
48288 +
48289 + read_lock(&grsec_exec_file_lock);
48290 + if (unlikely(!task->exec_file)) {
48291 + read_unlock(&grsec_exec_file_lock);
48292 + return 0;
48293 + }
48294 +
48295 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
48296 + read_unlock(&grsec_exec_file_lock);
48297 +
48298 + if (retmode & GR_NOPTRACE) {
48299 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
48300 + return 1;
48301 + }
48302 +
48303 + if (retmode & GR_PTRACERD) {
48304 + switch (request) {
48305 + case PTRACE_POKETEXT:
48306 + case PTRACE_POKEDATA:
48307 + case PTRACE_POKEUSR:
48308 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
48309 + case PTRACE_SETREGS:
48310 + case PTRACE_SETFPREGS:
48311 +#endif
48312 +#ifdef CONFIG_X86
48313 + case PTRACE_SETFPXREGS:
48314 +#endif
48315 +#ifdef CONFIG_ALTIVEC
48316 + case PTRACE_SETVRREGS:
48317 +#endif
48318 + return 1;
48319 + default:
48320 + return 0;
48321 + }
48322 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
48323 + !(current->role->roletype & GR_ROLE_GOD) &&
48324 + (current->acl != task->acl)) {
48325 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
48326 + return 1;
48327 + }
48328 +
48329 + return 0;
48330 +}
48331 +
48332 +static int is_writable_mmap(const struct file *filp)
48333 +{
48334 + struct task_struct *task = current;
48335 + struct acl_object_label *obj, *obj2;
48336 +
48337 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
48338 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
48339 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
48340 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
48341 + task->role->root_label);
48342 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
48343 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
48344 + return 1;
48345 + }
48346 + }
48347 + return 0;
48348 +}
48349 +
48350 +int
48351 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
48352 +{
48353 + __u32 mode;
48354 +
48355 + if (unlikely(!file || !(prot & PROT_EXEC)))
48356 + return 1;
48357 +
48358 + if (is_writable_mmap(file))
48359 + return 0;
48360 +
48361 + mode =
48362 + gr_search_file(file->f_path.dentry,
48363 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
48364 + file->f_path.mnt);
48365 +
48366 + if (!gr_tpe_allow(file))
48367 + return 0;
48368 +
48369 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
48370 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48371 + return 0;
48372 + } else if (unlikely(!(mode & GR_EXEC))) {
48373 + return 0;
48374 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
48375 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48376 + return 1;
48377 + }
48378 +
48379 + return 1;
48380 +}
48381 +
48382 +int
48383 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
48384 +{
48385 + __u32 mode;
48386 +
48387 + if (unlikely(!file || !(prot & PROT_EXEC)))
48388 + return 1;
48389 +
48390 + if (is_writable_mmap(file))
48391 + return 0;
48392 +
48393 + mode =
48394 + gr_search_file(file->f_path.dentry,
48395 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
48396 + file->f_path.mnt);
48397 +
48398 + if (!gr_tpe_allow(file))
48399 + return 0;
48400 +
48401 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
48402 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48403 + return 0;
48404 + } else if (unlikely(!(mode & GR_EXEC))) {
48405 + return 0;
48406 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
48407 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48408 + return 1;
48409 + }
48410 +
48411 + return 1;
48412 +}
48413 +
48414 +void
48415 +gr_acl_handle_psacct(struct task_struct *task, const long code)
48416 +{
48417 + unsigned long runtime;
48418 + unsigned long cputime;
48419 + unsigned int wday, cday;
48420 + __u8 whr, chr;
48421 + __u8 wmin, cmin;
48422 + __u8 wsec, csec;
48423 + struct timespec timeval;
48424 +
48425 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
48426 + !(task->acl->mode & GR_PROCACCT)))
48427 + return;
48428 +
48429 + do_posix_clock_monotonic_gettime(&timeval);
48430 + runtime = timeval.tv_sec - task->start_time.tv_sec;
48431 + wday = runtime / (3600 * 24);
48432 + runtime -= wday * (3600 * 24);
48433 + whr = runtime / 3600;
48434 + runtime -= whr * 3600;
48435 + wmin = runtime / 60;
48436 + runtime -= wmin * 60;
48437 + wsec = runtime;
48438 +
48439 + cputime = (task->utime + task->stime) / HZ;
48440 + cday = cputime / (3600 * 24);
48441 + cputime -= cday * (3600 * 24);
48442 + chr = cputime / 3600;
48443 + cputime -= chr * 3600;
48444 + cmin = cputime / 60;
48445 + cputime -= cmin * 60;
48446 + csec = cputime;
48447 +
48448 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
48449 +
48450 + return;
48451 +}
48452 +
48453 +void gr_set_kernel_label(struct task_struct *task)
48454 +{
48455 + if (gr_status & GR_READY) {
48456 + task->role = kernel_role;
48457 + task->acl = kernel_role->root_label;
48458 + }
48459 + return;
48460 +}
48461 +
48462 +#ifdef CONFIG_TASKSTATS
48463 +int gr_is_taskstats_denied(int pid)
48464 +{
48465 + struct task_struct *task;
48466 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48467 + const struct cred *cred;
48468 +#endif
48469 + int ret = 0;
48470 +
48471 + /* restrict taskstats viewing to un-chrooted root users
48472 + who have the 'view' subject flag if the RBAC system is enabled
48473 + */
48474 +
48475 + rcu_read_lock();
48476 + read_lock(&tasklist_lock);
48477 + task = find_task_by_vpid(pid);
48478 + if (task) {
48479 +#ifdef CONFIG_GRKERNSEC_CHROOT
48480 + if (proc_is_chrooted(task))
48481 + ret = -EACCES;
48482 +#endif
48483 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48484 + cred = __task_cred(task);
48485 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48486 + if (cred->uid != 0)
48487 + ret = -EACCES;
48488 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48489 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
48490 + ret = -EACCES;
48491 +#endif
48492 +#endif
48493 + if (gr_status & GR_READY) {
48494 + if (!(task->acl->mode & GR_VIEW))
48495 + ret = -EACCES;
48496 + }
48497 + } else
48498 + ret = -ENOENT;
48499 +
48500 + read_unlock(&tasklist_lock);
48501 + rcu_read_unlock();
48502 +
48503 + return ret;
48504 +}
48505 +#endif
48506 +
48507 +/* AUXV entries are filled via a descendant of search_binary_handler
48508 + after we've already applied the subject for the target
48509 +*/
48510 +int gr_acl_enable_at_secure(void)
48511 +{
48512 + if (unlikely(!(gr_status & GR_READY)))
48513 + return 0;
48514 +
48515 + if (current->acl->mode & GR_ATSECURE)
48516 + return 1;
48517 +
48518 + return 0;
48519 +}
48520 +
48521 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
48522 +{
48523 + struct task_struct *task = current;
48524 + struct dentry *dentry = file->f_path.dentry;
48525 + struct vfsmount *mnt = file->f_path.mnt;
48526 + struct acl_object_label *obj, *tmp;
48527 + struct acl_subject_label *subj;
48528 + unsigned int bufsize;
48529 + int is_not_root;
48530 + char *path;
48531 + dev_t dev = __get_dev(dentry);
48532 +
48533 + if (unlikely(!(gr_status & GR_READY)))
48534 + return 1;
48535 +
48536 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
48537 + return 1;
48538 +
48539 + /* ignore Eric Biederman */
48540 + if (IS_PRIVATE(dentry->d_inode))
48541 + return 1;
48542 +
48543 + subj = task->acl;
48544 + do {
48545 + obj = lookup_acl_obj_label(ino, dev, subj);
48546 + if (obj != NULL)
48547 + return (obj->mode & GR_FIND) ? 1 : 0;
48548 + } while ((subj = subj->parent_subject));
48549 +
48550 + /* this is purely an optimization since we're looking for an object
48551 + for the directory we're doing a readdir on
48552 + if it's possible for any globbed object to match the entry we're
48553 + filling into the directory, then the object we find here will be
48554 + an anchor point with attached globbed objects
48555 + */
48556 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
48557 + if (obj->globbed == NULL)
48558 + return (obj->mode & GR_FIND) ? 1 : 0;
48559 +
48560 + is_not_root = ((obj->filename[0] == '/') &&
48561 + (obj->filename[1] == '\0')) ? 0 : 1;
48562 + bufsize = PAGE_SIZE - namelen - is_not_root;
48563 +
48564 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
48565 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
48566 + return 1;
48567 +
48568 + preempt_disable();
48569 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48570 + bufsize);
48571 +
48572 + bufsize = strlen(path);
48573 +
48574 + /* if base is "/", don't append an additional slash */
48575 + if (is_not_root)
48576 + *(path + bufsize) = '/';
48577 + memcpy(path + bufsize + is_not_root, name, namelen);
48578 + *(path + bufsize + namelen + is_not_root) = '\0';
48579 +
48580 + tmp = obj->globbed;
48581 + while (tmp) {
48582 + if (!glob_match(tmp->filename, path)) {
48583 + preempt_enable();
48584 + return (tmp->mode & GR_FIND) ? 1 : 0;
48585 + }
48586 + tmp = tmp->next;
48587 + }
48588 + preempt_enable();
48589 + return (obj->mode & GR_FIND) ? 1 : 0;
48590 +}
48591 +
48592 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
48593 +EXPORT_SYMBOL(gr_acl_is_enabled);
48594 +#endif
48595 +EXPORT_SYMBOL(gr_learn_resource);
48596 +EXPORT_SYMBOL(gr_set_kernel_label);
48597 +#ifdef CONFIG_SECURITY
48598 +EXPORT_SYMBOL(gr_check_user_change);
48599 +EXPORT_SYMBOL(gr_check_group_change);
48600 +#endif
48601 +
48602 diff -urNp linux-2.6.32.43/grsecurity/gracl_cap.c linux-2.6.32.43/grsecurity/gracl_cap.c
48603 --- linux-2.6.32.43/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
48604 +++ linux-2.6.32.43/grsecurity/gracl_cap.c 2011-04-17 15:56:46.000000000 -0400
48605 @@ -0,0 +1,138 @@
48606 +#include <linux/kernel.h>
48607 +#include <linux/module.h>
48608 +#include <linux/sched.h>
48609 +#include <linux/gracl.h>
48610 +#include <linux/grsecurity.h>
48611 +#include <linux/grinternal.h>
48612 +
48613 +static const char *captab_log[] = {
48614 + "CAP_CHOWN",
48615 + "CAP_DAC_OVERRIDE",
48616 + "CAP_DAC_READ_SEARCH",
48617 + "CAP_FOWNER",
48618 + "CAP_FSETID",
48619 + "CAP_KILL",
48620 + "CAP_SETGID",
48621 + "CAP_SETUID",
48622 + "CAP_SETPCAP",
48623 + "CAP_LINUX_IMMUTABLE",
48624 + "CAP_NET_BIND_SERVICE",
48625 + "CAP_NET_BROADCAST",
48626 + "CAP_NET_ADMIN",
48627 + "CAP_NET_RAW",
48628 + "CAP_IPC_LOCK",
48629 + "CAP_IPC_OWNER",
48630 + "CAP_SYS_MODULE",
48631 + "CAP_SYS_RAWIO",
48632 + "CAP_SYS_CHROOT",
48633 + "CAP_SYS_PTRACE",
48634 + "CAP_SYS_PACCT",
48635 + "CAP_SYS_ADMIN",
48636 + "CAP_SYS_BOOT",
48637 + "CAP_SYS_NICE",
48638 + "CAP_SYS_RESOURCE",
48639 + "CAP_SYS_TIME",
48640 + "CAP_SYS_TTY_CONFIG",
48641 + "CAP_MKNOD",
48642 + "CAP_LEASE",
48643 + "CAP_AUDIT_WRITE",
48644 + "CAP_AUDIT_CONTROL",
48645 + "CAP_SETFCAP",
48646 + "CAP_MAC_OVERRIDE",
48647 + "CAP_MAC_ADMIN"
48648 +};
48649 +
48650 +EXPORT_SYMBOL(gr_is_capable);
48651 +EXPORT_SYMBOL(gr_is_capable_nolog);
48652 +
48653 +int
48654 +gr_is_capable(const int cap)
48655 +{
48656 + struct task_struct *task = current;
48657 + const struct cred *cred = current_cred();
48658 + struct acl_subject_label *curracl;
48659 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
48660 + kernel_cap_t cap_audit = __cap_empty_set;
48661 +
48662 + if (!gr_acl_is_enabled())
48663 + return 1;
48664 +
48665 + curracl = task->acl;
48666 +
48667 + cap_drop = curracl->cap_lower;
48668 + cap_mask = curracl->cap_mask;
48669 + cap_audit = curracl->cap_invert_audit;
48670 +
48671 + while ((curracl = curracl->parent_subject)) {
48672 + /* if the cap isn't specified in the current computed mask but is specified in the
48673 + current level subject, and is lowered in the current level subject, then add
48674 + it to the set of dropped capabilities
48675 + otherwise, add the current level subject's mask to the current computed mask
48676 + */
48677 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
48678 + cap_raise(cap_mask, cap);
48679 + if (cap_raised(curracl->cap_lower, cap))
48680 + cap_raise(cap_drop, cap);
48681 + if (cap_raised(curracl->cap_invert_audit, cap))
48682 + cap_raise(cap_audit, cap);
48683 + }
48684 + }
48685 +
48686 + if (!cap_raised(cap_drop, cap)) {
48687 + if (cap_raised(cap_audit, cap))
48688 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
48689 + return 1;
48690 + }
48691 +
48692 + curracl = task->acl;
48693 +
48694 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
48695 + && cap_raised(cred->cap_effective, cap)) {
48696 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
48697 + task->role->roletype, cred->uid,
48698 + cred->gid, task->exec_file ?
48699 + gr_to_filename(task->exec_file->f_path.dentry,
48700 + task->exec_file->f_path.mnt) : curracl->filename,
48701 + curracl->filename, 0UL,
48702 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
48703 + return 1;
48704 + }
48705 +
48706 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
48707 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
48708 + return 0;
48709 +}
48710 +
48711 +int
48712 +gr_is_capable_nolog(const int cap)
48713 +{
48714 + struct acl_subject_label *curracl;
48715 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
48716 +
48717 + if (!gr_acl_is_enabled())
48718 + return 1;
48719 +
48720 + curracl = current->acl;
48721 +
48722 + cap_drop = curracl->cap_lower;
48723 + cap_mask = curracl->cap_mask;
48724 +
48725 + while ((curracl = curracl->parent_subject)) {
48726 + /* if the cap isn't specified in the current computed mask but is specified in the
48727 + current level subject, and is lowered in the current level subject, then add
48728 + it to the set of dropped capabilities
48729 + otherwise, add the current level subject's mask to the current computed mask
48730 + */
48731 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
48732 + cap_raise(cap_mask, cap);
48733 + if (cap_raised(curracl->cap_lower, cap))
48734 + cap_raise(cap_drop, cap);
48735 + }
48736 + }
48737 +
48738 + if (!cap_raised(cap_drop, cap))
48739 + return 1;
48740 +
48741 + return 0;
48742 +}
48743 +
48744 diff -urNp linux-2.6.32.43/grsecurity/gracl_fs.c linux-2.6.32.43/grsecurity/gracl_fs.c
48745 --- linux-2.6.32.43/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
48746 +++ linux-2.6.32.43/grsecurity/gracl_fs.c 2011-04-17 15:56:46.000000000 -0400
48747 @@ -0,0 +1,431 @@
48748 +#include <linux/kernel.h>
48749 +#include <linux/sched.h>
48750 +#include <linux/types.h>
48751 +#include <linux/fs.h>
48752 +#include <linux/file.h>
48753 +#include <linux/stat.h>
48754 +#include <linux/grsecurity.h>
48755 +#include <linux/grinternal.h>
48756 +#include <linux/gracl.h>
48757 +
48758 +__u32
48759 +gr_acl_handle_hidden_file(const struct dentry * dentry,
48760 + const struct vfsmount * mnt)
48761 +{
48762 + __u32 mode;
48763 +
48764 + if (unlikely(!dentry->d_inode))
48765 + return GR_FIND;
48766 +
48767 + mode =
48768 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
48769 +
48770 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
48771 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48772 + return mode;
48773 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
48774 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48775 + return 0;
48776 + } else if (unlikely(!(mode & GR_FIND)))
48777 + return 0;
48778 +
48779 + return GR_FIND;
48780 +}
48781 +
48782 +__u32
48783 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
48784 + const int fmode)
48785 +{
48786 + __u32 reqmode = GR_FIND;
48787 + __u32 mode;
48788 +
48789 + if (unlikely(!dentry->d_inode))
48790 + return reqmode;
48791 +
48792 + if (unlikely(fmode & O_APPEND))
48793 + reqmode |= GR_APPEND;
48794 + else if (unlikely(fmode & FMODE_WRITE))
48795 + reqmode |= GR_WRITE;
48796 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48797 + reqmode |= GR_READ;
48798 + if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
48799 + reqmode &= ~GR_READ;
48800 + mode =
48801 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48802 + mnt);
48803 +
48804 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48805 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48806 + reqmode & GR_READ ? " reading" : "",
48807 + reqmode & GR_WRITE ? " writing" : reqmode &
48808 + GR_APPEND ? " appending" : "");
48809 + return reqmode;
48810 + } else
48811 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48812 + {
48813 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48814 + reqmode & GR_READ ? " reading" : "",
48815 + reqmode & GR_WRITE ? " writing" : reqmode &
48816 + GR_APPEND ? " appending" : "");
48817 + return 0;
48818 + } else if (unlikely((mode & reqmode) != reqmode))
48819 + return 0;
48820 +
48821 + return reqmode;
48822 +}
48823 +
48824 +__u32
48825 +gr_acl_handle_creat(const struct dentry * dentry,
48826 + const struct dentry * p_dentry,
48827 + const struct vfsmount * p_mnt, const int fmode,
48828 + const int imode)
48829 +{
48830 + __u32 reqmode = GR_WRITE | GR_CREATE;
48831 + __u32 mode;
48832 +
48833 + if (unlikely(fmode & O_APPEND))
48834 + reqmode |= GR_APPEND;
48835 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48836 + reqmode |= GR_READ;
48837 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
48838 + reqmode |= GR_SETID;
48839 +
48840 + mode =
48841 + gr_check_create(dentry, p_dentry, p_mnt,
48842 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48843 +
48844 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48845 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48846 + reqmode & GR_READ ? " reading" : "",
48847 + reqmode & GR_WRITE ? " writing" : reqmode &
48848 + GR_APPEND ? " appending" : "");
48849 + return reqmode;
48850 + } else
48851 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48852 + {
48853 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48854 + reqmode & GR_READ ? " reading" : "",
48855 + reqmode & GR_WRITE ? " writing" : reqmode &
48856 + GR_APPEND ? " appending" : "");
48857 + return 0;
48858 + } else if (unlikely((mode & reqmode) != reqmode))
48859 + return 0;
48860 +
48861 + return reqmode;
48862 +}
48863 +
48864 +__u32
48865 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
48866 + const int fmode)
48867 +{
48868 + __u32 mode, reqmode = GR_FIND;
48869 +
48870 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
48871 + reqmode |= GR_EXEC;
48872 + if (fmode & S_IWOTH)
48873 + reqmode |= GR_WRITE;
48874 + if (fmode & S_IROTH)
48875 + reqmode |= GR_READ;
48876 +
48877 + mode =
48878 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48879 + mnt);
48880 +
48881 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48882 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48883 + reqmode & GR_READ ? " reading" : "",
48884 + reqmode & GR_WRITE ? " writing" : "",
48885 + reqmode & GR_EXEC ? " executing" : "");
48886 + return reqmode;
48887 + } else
48888 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48889 + {
48890 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48891 + reqmode & GR_READ ? " reading" : "",
48892 + reqmode & GR_WRITE ? " writing" : "",
48893 + reqmode & GR_EXEC ? " executing" : "");
48894 + return 0;
48895 + } else if (unlikely((mode & reqmode) != reqmode))
48896 + return 0;
48897 +
48898 + return reqmode;
48899 +}
48900 +
48901 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
48902 +{
48903 + __u32 mode;
48904 +
48905 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
48906 +
48907 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
48908 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
48909 + return mode;
48910 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
48911 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
48912 + return 0;
48913 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
48914 + return 0;
48915 +
48916 + return (reqmode);
48917 +}
48918 +
48919 +__u32
48920 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
48921 +{
48922 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
48923 +}
48924 +
48925 +__u32
48926 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
48927 +{
48928 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
48929 +}
48930 +
48931 +__u32
48932 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
48933 +{
48934 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
48935 +}
48936 +
48937 +__u32
48938 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
48939 +{
48940 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
48941 +}
48942 +
48943 +__u32
48944 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
48945 + mode_t mode)
48946 +{
48947 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
48948 + return 1;
48949 +
48950 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48951 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48952 + GR_FCHMOD_ACL_MSG);
48953 + } else {
48954 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
48955 + }
48956 +}
48957 +
48958 +__u32
48959 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
48960 + mode_t mode)
48961 +{
48962 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48963 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48964 + GR_CHMOD_ACL_MSG);
48965 + } else {
48966 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
48967 + }
48968 +}
48969 +
48970 +__u32
48971 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
48972 +{
48973 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
48974 +}
48975 +
48976 +__u32
48977 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
48978 +{
48979 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
48980 +}
48981 +
48982 +__u32
48983 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
48984 +{
48985 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
48986 +}
48987 +
48988 +__u32
48989 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
48990 +{
48991 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
48992 + GR_UNIXCONNECT_ACL_MSG);
48993 +}
48994 +
48995 +/* hardlinks require at minimum create permission,
48996 + any additional privilege required is based on the
48997 + privilege of the file being linked to
48998 +*/
48999 +__u32
49000 +gr_acl_handle_link(const struct dentry * new_dentry,
49001 + const struct dentry * parent_dentry,
49002 + const struct vfsmount * parent_mnt,
49003 + const struct dentry * old_dentry,
49004 + const struct vfsmount * old_mnt, const char *to)
49005 +{
49006 + __u32 mode;
49007 + __u32 needmode = GR_CREATE | GR_LINK;
49008 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
49009 +
49010 + mode =
49011 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
49012 + old_mnt);
49013 +
49014 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
49015 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
49016 + return mode;
49017 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
49018 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
49019 + return 0;
49020 + } else if (unlikely((mode & needmode) != needmode))
49021 + return 0;
49022 +
49023 + return 1;
49024 +}
49025 +
49026 +__u32
49027 +gr_acl_handle_symlink(const struct dentry * new_dentry,
49028 + const struct dentry * parent_dentry,
49029 + const struct vfsmount * parent_mnt, const char *from)
49030 +{
49031 + __u32 needmode = GR_WRITE | GR_CREATE;
49032 + __u32 mode;
49033 +
49034 + mode =
49035 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
49036 + GR_CREATE | GR_AUDIT_CREATE |
49037 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
49038 +
49039 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
49040 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
49041 + return mode;
49042 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
49043 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
49044 + return 0;
49045 + } else if (unlikely((mode & needmode) != needmode))
49046 + return 0;
49047 +
49048 + return (GR_WRITE | GR_CREATE);
49049 +}
49050 +
49051 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
49052 +{
49053 + __u32 mode;
49054 +
49055 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
49056 +
49057 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
49058 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
49059 + return mode;
49060 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
49061 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
49062 + return 0;
49063 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
49064 + return 0;
49065 +
49066 + return (reqmode);
49067 +}
49068 +
49069 +__u32
49070 +gr_acl_handle_mknod(const struct dentry * new_dentry,
49071 + const struct dentry * parent_dentry,
49072 + const struct vfsmount * parent_mnt,
49073 + const int mode)
49074 +{
49075 + __u32 reqmode = GR_WRITE | GR_CREATE;
49076 + if (unlikely(mode & (S_ISUID | S_ISGID)))
49077 + reqmode |= GR_SETID;
49078 +
49079 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
49080 + reqmode, GR_MKNOD_ACL_MSG);
49081 +}
49082 +
49083 +__u32
49084 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
49085 + const struct dentry *parent_dentry,
49086 + const struct vfsmount *parent_mnt)
49087 +{
49088 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
49089 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
49090 +}
49091 +
49092 +#define RENAME_CHECK_SUCCESS(old, new) \
49093 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
49094 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
49095 +
49096 +int
49097 +gr_acl_handle_rename(struct dentry *new_dentry,
49098 + struct dentry *parent_dentry,
49099 + const struct vfsmount *parent_mnt,
49100 + struct dentry *old_dentry,
49101 + struct inode *old_parent_inode,
49102 + struct vfsmount *old_mnt, const char *newname)
49103 +{
49104 + __u32 comp1, comp2;
49105 + int error = 0;
49106 +
49107 + if (unlikely(!gr_acl_is_enabled()))
49108 + return 0;
49109 +
49110 + if (!new_dentry->d_inode) {
49111 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
49112 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
49113 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
49114 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
49115 + GR_DELETE | GR_AUDIT_DELETE |
49116 + GR_AUDIT_READ | GR_AUDIT_WRITE |
49117 + GR_SUPPRESS, old_mnt);
49118 + } else {
49119 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
49120 + GR_CREATE | GR_DELETE |
49121 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
49122 + GR_AUDIT_READ | GR_AUDIT_WRITE |
49123 + GR_SUPPRESS, parent_mnt);
49124 + comp2 =
49125 + gr_search_file(old_dentry,
49126 + GR_READ | GR_WRITE | GR_AUDIT_READ |
49127 + GR_DELETE | GR_AUDIT_DELETE |
49128 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
49129 + }
49130 +
49131 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
49132 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
49133 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
49134 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
49135 + && !(comp2 & GR_SUPPRESS)) {
49136 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
49137 + error = -EACCES;
49138 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
49139 + error = -EACCES;
49140 +
49141 + return error;
49142 +}
49143 +
49144 +void
49145 +gr_acl_handle_exit(void)
49146 +{
49147 + u16 id;
49148 + char *rolename;
49149 + struct file *exec_file;
49150 +
49151 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
49152 + !(current->role->roletype & GR_ROLE_PERSIST))) {
49153 + id = current->acl_role_id;
49154 + rolename = current->role->rolename;
49155 + gr_set_acls(1);
49156 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
49157 + }
49158 +
49159 + write_lock(&grsec_exec_file_lock);
49160 + exec_file = current->exec_file;
49161 + current->exec_file = NULL;
49162 + write_unlock(&grsec_exec_file_lock);
49163 +
49164 + if (exec_file)
49165 + fput(exec_file);
49166 +}
49167 +
49168 +int
49169 +gr_acl_handle_procpidmem(const struct task_struct *task)
49170 +{
49171 + if (unlikely(!gr_acl_is_enabled()))
49172 + return 0;
49173 +
49174 + if (task != current && task->acl->mode & GR_PROTPROCFD)
49175 + return -EACCES;
49176 +
49177 + return 0;
49178 +}
49179 diff -urNp linux-2.6.32.43/grsecurity/gracl_ip.c linux-2.6.32.43/grsecurity/gracl_ip.c
49180 --- linux-2.6.32.43/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
49181 +++ linux-2.6.32.43/grsecurity/gracl_ip.c 2011-04-17 15:56:46.000000000 -0400
49182 @@ -0,0 +1,382 @@
49183 +#include <linux/kernel.h>
49184 +#include <asm/uaccess.h>
49185 +#include <asm/errno.h>
49186 +#include <net/sock.h>
49187 +#include <linux/file.h>
49188 +#include <linux/fs.h>
49189 +#include <linux/net.h>
49190 +#include <linux/in.h>
49191 +#include <linux/skbuff.h>
49192 +#include <linux/ip.h>
49193 +#include <linux/udp.h>
49194 +#include <linux/smp_lock.h>
49195 +#include <linux/types.h>
49196 +#include <linux/sched.h>
49197 +#include <linux/netdevice.h>
49198 +#include <linux/inetdevice.h>
49199 +#include <linux/gracl.h>
49200 +#include <linux/grsecurity.h>
49201 +#include <linux/grinternal.h>
49202 +
49203 +#define GR_BIND 0x01
49204 +#define GR_CONNECT 0x02
49205 +#define GR_INVERT 0x04
49206 +#define GR_BINDOVERRIDE 0x08
49207 +#define GR_CONNECTOVERRIDE 0x10
49208 +#define GR_SOCK_FAMILY 0x20
49209 +
49210 +static const char * gr_protocols[IPPROTO_MAX] = {
49211 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
49212 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
49213 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
49214 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
49215 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
49216 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
49217 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
49218 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
49219 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
49220 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
49221 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
49222 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
49223 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
49224 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
49225 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
49226 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
49227 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
49228 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
49229 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
49230 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
49231 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
49232 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
49233 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
49234 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
49235 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
49236 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
49237 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
49238 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
49239 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
49240 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
49241 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
49242 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
49243 + };
49244 +
49245 +static const char * gr_socktypes[SOCK_MAX] = {
49246 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
49247 + "unknown:7", "unknown:8", "unknown:9", "packet"
49248 + };
49249 +
49250 +static const char * gr_sockfamilies[AF_MAX+1] = {
49251 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
49252 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
49253 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
49254 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
49255 + };
49256 +
49257 +const char *
49258 +gr_proto_to_name(unsigned char proto)
49259 +{
49260 + return gr_protocols[proto];
49261 +}
49262 +
49263 +const char *
49264 +gr_socktype_to_name(unsigned char type)
49265 +{
49266 + return gr_socktypes[type];
49267 +}
49268 +
49269 +const char *
49270 +gr_sockfamily_to_name(unsigned char family)
49271 +{
49272 + return gr_sockfamilies[family];
49273 +}
49274 +
49275 +int
49276 +gr_search_socket(const int domain, const int type, const int protocol)
49277 +{
49278 + struct acl_subject_label *curr;
49279 + const struct cred *cred = current_cred();
49280 +
49281 + if (unlikely(!gr_acl_is_enabled()))
49282 + goto exit;
49283 +
49284 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
49285 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
49286 + goto exit; // let the kernel handle it
49287 +
49288 + curr = current->acl;
49289 +
49290 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
49291 + /* the family is allowed, if this is PF_INET allow it only if
49292 + the extra sock type/protocol checks pass */
49293 + if (domain == PF_INET)
49294 + goto inet_check;
49295 + goto exit;
49296 + } else {
49297 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
49298 + __u32 fakeip = 0;
49299 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49300 + current->role->roletype, cred->uid,
49301 + cred->gid, current->exec_file ?
49302 + gr_to_filename(current->exec_file->f_path.dentry,
49303 + current->exec_file->f_path.mnt) :
49304 + curr->filename, curr->filename,
49305 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
49306 + &current->signal->saved_ip);
49307 + goto exit;
49308 + }
49309 + goto exit_fail;
49310 + }
49311 +
49312 +inet_check:
49313 + /* the rest of this checking is for IPv4 only */
49314 + if (!curr->ips)
49315 + goto exit;
49316 +
49317 + if ((curr->ip_type & (1 << type)) &&
49318 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
49319 + goto exit;
49320 +
49321 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
49322 + /* we don't place acls on raw sockets , and sometimes
49323 + dgram/ip sockets are opened for ioctl and not
49324 + bind/connect, so we'll fake a bind learn log */
49325 + if (type == SOCK_RAW || type == SOCK_PACKET) {
49326 + __u32 fakeip = 0;
49327 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49328 + current->role->roletype, cred->uid,
49329 + cred->gid, current->exec_file ?
49330 + gr_to_filename(current->exec_file->f_path.dentry,
49331 + current->exec_file->f_path.mnt) :
49332 + curr->filename, curr->filename,
49333 + &fakeip, 0, type,
49334 + protocol, GR_CONNECT, &current->signal->saved_ip);
49335 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
49336 + __u32 fakeip = 0;
49337 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49338 + current->role->roletype, cred->uid,
49339 + cred->gid, current->exec_file ?
49340 + gr_to_filename(current->exec_file->f_path.dentry,
49341 + current->exec_file->f_path.mnt) :
49342 + curr->filename, curr->filename,
49343 + &fakeip, 0, type,
49344 + protocol, GR_BIND, &current->signal->saved_ip);
49345 + }
49346 + /* we'll log when they use connect or bind */
49347 + goto exit;
49348 + }
49349 +
49350 +exit_fail:
49351 + if (domain == PF_INET)
49352 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
49353 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
49354 + else
49355 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
49356 + gr_socktype_to_name(type), protocol);
49357 +
49358 + return 0;
49359 +exit:
49360 + return 1;
49361 +}
49362 +
49363 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
49364 +{
49365 + if ((ip->mode & mode) &&
49366 + (ip_port >= ip->low) &&
49367 + (ip_port <= ip->high) &&
49368 + ((ntohl(ip_addr) & our_netmask) ==
49369 + (ntohl(our_addr) & our_netmask))
49370 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
49371 + && (ip->type & (1 << type))) {
49372 + if (ip->mode & GR_INVERT)
49373 + return 2; // specifically denied
49374 + else
49375 + return 1; // allowed
49376 + }
49377 +
49378 + return 0; // not specifically allowed, may continue parsing
49379 +}
49380 +
49381 +static int
49382 +gr_search_connectbind(const int full_mode, struct sock *sk,
49383 + struct sockaddr_in *addr, const int type)
49384 +{
49385 + char iface[IFNAMSIZ] = {0};
49386 + struct acl_subject_label *curr;
49387 + struct acl_ip_label *ip;
49388 + struct inet_sock *isk;
49389 + struct net_device *dev;
49390 + struct in_device *idev;
49391 + unsigned long i;
49392 + int ret;
49393 + int mode = full_mode & (GR_BIND | GR_CONNECT);
49394 + __u32 ip_addr = 0;
49395 + __u32 our_addr;
49396 + __u32 our_netmask;
49397 + char *p;
49398 + __u16 ip_port = 0;
49399 + const struct cred *cred = current_cred();
49400 +
49401 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
49402 + return 0;
49403 +
49404 + curr = current->acl;
49405 + isk = inet_sk(sk);
49406 +
49407 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
49408 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
49409 + addr->sin_addr.s_addr = curr->inaddr_any_override;
49410 + if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
49411 + struct sockaddr_in saddr;
49412 + int err;
49413 +
49414 + saddr.sin_family = AF_INET;
49415 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
49416 + saddr.sin_port = isk->sport;
49417 +
49418 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
49419 + if (err)
49420 + return err;
49421 +
49422 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
49423 + if (err)
49424 + return err;
49425 + }
49426 +
49427 + if (!curr->ips)
49428 + return 0;
49429 +
49430 + ip_addr = addr->sin_addr.s_addr;
49431 + ip_port = ntohs(addr->sin_port);
49432 +
49433 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
49434 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49435 + current->role->roletype, cred->uid,
49436 + cred->gid, current->exec_file ?
49437 + gr_to_filename(current->exec_file->f_path.dentry,
49438 + current->exec_file->f_path.mnt) :
49439 + curr->filename, curr->filename,
49440 + &ip_addr, ip_port, type,
49441 + sk->sk_protocol, mode, &current->signal->saved_ip);
49442 + return 0;
49443 + }
49444 +
49445 + for (i = 0; i < curr->ip_num; i++) {
49446 + ip = *(curr->ips + i);
49447 + if (ip->iface != NULL) {
49448 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
49449 + p = strchr(iface, ':');
49450 + if (p != NULL)
49451 + *p = '\0';
49452 + dev = dev_get_by_name(sock_net(sk), iface);
49453 + if (dev == NULL)
49454 + continue;
49455 + idev = in_dev_get(dev);
49456 + if (idev == NULL) {
49457 + dev_put(dev);
49458 + continue;
49459 + }
49460 + rcu_read_lock();
49461 + for_ifa(idev) {
49462 + if (!strcmp(ip->iface, ifa->ifa_label)) {
49463 + our_addr = ifa->ifa_address;
49464 + our_netmask = 0xffffffff;
49465 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
49466 + if (ret == 1) {
49467 + rcu_read_unlock();
49468 + in_dev_put(idev);
49469 + dev_put(dev);
49470 + return 0;
49471 + } else if (ret == 2) {
49472 + rcu_read_unlock();
49473 + in_dev_put(idev);
49474 + dev_put(dev);
49475 + goto denied;
49476 + }
49477 + }
49478 + } endfor_ifa(idev);
49479 + rcu_read_unlock();
49480 + in_dev_put(idev);
49481 + dev_put(dev);
49482 + } else {
49483 + our_addr = ip->addr;
49484 + our_netmask = ip->netmask;
49485 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
49486 + if (ret == 1)
49487 + return 0;
49488 + else if (ret == 2)
49489 + goto denied;
49490 + }
49491 + }
49492 +
49493 +denied:
49494 + if (mode == GR_BIND)
49495 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
49496 + else if (mode == GR_CONNECT)
49497 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
49498 +
49499 + return -EACCES;
49500 +}
49501 +
49502 +int
49503 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
49504 +{
49505 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
49506 +}
49507 +
49508 +int
49509 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
49510 +{
49511 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
49512 +}
49513 +
49514 +int gr_search_listen(struct socket *sock)
49515 +{
49516 + struct sock *sk = sock->sk;
49517 + struct sockaddr_in addr;
49518 +
49519 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
49520 + addr.sin_port = inet_sk(sk)->sport;
49521 +
49522 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
49523 +}
49524 +
49525 +int gr_search_accept(struct socket *sock)
49526 +{
49527 + struct sock *sk = sock->sk;
49528 + struct sockaddr_in addr;
49529 +
49530 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
49531 + addr.sin_port = inet_sk(sk)->sport;
49532 +
49533 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
49534 +}
49535 +
49536 +int
49537 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
49538 +{
49539 + if (addr)
49540 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
49541 + else {
49542 + struct sockaddr_in sin;
49543 + const struct inet_sock *inet = inet_sk(sk);
49544 +
49545 + sin.sin_addr.s_addr = inet->daddr;
49546 + sin.sin_port = inet->dport;
49547 +
49548 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
49549 + }
49550 +}
49551 +
49552 +int
49553 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
49554 +{
49555 + struct sockaddr_in sin;
49556 +
49557 + if (unlikely(skb->len < sizeof (struct udphdr)))
49558 + return 0; // skip this packet
49559 +
49560 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
49561 + sin.sin_port = udp_hdr(skb)->source;
49562 +
49563 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
49564 +}
49565 diff -urNp linux-2.6.32.43/grsecurity/gracl_learn.c linux-2.6.32.43/grsecurity/gracl_learn.c
49566 --- linux-2.6.32.43/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
49567 +++ linux-2.6.32.43/grsecurity/gracl_learn.c 2011-07-14 21:02:03.000000000 -0400
49568 @@ -0,0 +1,208 @@
49569 +#include <linux/kernel.h>
49570 +#include <linux/mm.h>
49571 +#include <linux/sched.h>
49572 +#include <linux/poll.h>
49573 +#include <linux/smp_lock.h>
49574 +#include <linux/string.h>
49575 +#include <linux/file.h>
49576 +#include <linux/types.h>
49577 +#include <linux/vmalloc.h>
49578 +#include <linux/grinternal.h>
49579 +
49580 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
49581 + size_t count, loff_t *ppos);
49582 +extern int gr_acl_is_enabled(void);
49583 +
49584 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
49585 +static int gr_learn_attached;
49586 +
49587 +/* use a 512k buffer */
49588 +#define LEARN_BUFFER_SIZE (512 * 1024)
49589 +
49590 +static DEFINE_SPINLOCK(gr_learn_lock);
49591 +static DEFINE_MUTEX(gr_learn_user_mutex);
49592 +
49593 +/* we need to maintain two buffers, so that the kernel context of grlearn
49594 + uses a semaphore around the userspace copying, and the other kernel contexts
49595 + use a spinlock when copying into the buffer, since they cannot sleep
49596 +*/
49597 +static char *learn_buffer;
49598 +static char *learn_buffer_user;
49599 +static int learn_buffer_len;
49600 +static int learn_buffer_user_len;
49601 +
49602 +static ssize_t
49603 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
49604 +{
49605 + DECLARE_WAITQUEUE(wait, current);
49606 + ssize_t retval = 0;
49607 +
49608 + add_wait_queue(&learn_wait, &wait);
49609 + set_current_state(TASK_INTERRUPTIBLE);
49610 + do {
49611 + mutex_lock(&gr_learn_user_mutex);
49612 + spin_lock(&gr_learn_lock);
49613 + if (learn_buffer_len)
49614 + break;
49615 + spin_unlock(&gr_learn_lock);
49616 + mutex_unlock(&gr_learn_user_mutex);
49617 + if (file->f_flags & O_NONBLOCK) {
49618 + retval = -EAGAIN;
49619 + goto out;
49620 + }
49621 + if (signal_pending(current)) {
49622 + retval = -ERESTARTSYS;
49623 + goto out;
49624 + }
49625 +
49626 + schedule();
49627 + } while (1);
49628 +
49629 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
49630 + learn_buffer_user_len = learn_buffer_len;
49631 + retval = learn_buffer_len;
49632 + learn_buffer_len = 0;
49633 +
49634 + spin_unlock(&gr_learn_lock);
49635 +
49636 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
49637 + retval = -EFAULT;
49638 +
49639 + mutex_unlock(&gr_learn_user_mutex);
49640 +out:
49641 + set_current_state(TASK_RUNNING);
49642 + remove_wait_queue(&learn_wait, &wait);
49643 + return retval;
49644 +}
49645 +
49646 +static unsigned int
49647 +poll_learn(struct file * file, poll_table * wait)
49648 +{
49649 + poll_wait(file, &learn_wait, wait);
49650 +
49651 + if (learn_buffer_len)
49652 + return (POLLIN | POLLRDNORM);
49653 +
49654 + return 0;
49655 +}
49656 +
49657 +void
49658 +gr_clear_learn_entries(void)
49659 +{
49660 + char *tmp;
49661 +
49662 + mutex_lock(&gr_learn_user_mutex);
49663 + spin_lock(&gr_learn_lock);
49664 + tmp = learn_buffer;
49665 + learn_buffer = NULL;
49666 + spin_unlock(&gr_learn_lock);
49667 + if (tmp)
49668 + vfree(tmp);
49669 + if (learn_buffer_user != NULL) {
49670 + vfree(learn_buffer_user);
49671 + learn_buffer_user = NULL;
49672 + }
49673 + learn_buffer_len = 0;
49674 + mutex_unlock(&gr_learn_user_mutex);
49675 +
49676 + return;
49677 +}
49678 +
49679 +void
49680 +gr_add_learn_entry(const char *fmt, ...)
49681 +{
49682 + va_list args;
49683 + unsigned int len;
49684 +
49685 + if (!gr_learn_attached)
49686 + return;
49687 +
49688 + spin_lock(&gr_learn_lock);
49689 +
49690 + /* leave a gap at the end so we know when it's "full" but don't have to
49691 + compute the exact length of the string we're trying to append
49692 + */
49693 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
49694 + spin_unlock(&gr_learn_lock);
49695 + wake_up_interruptible(&learn_wait);
49696 + return;
49697 + }
49698 + if (learn_buffer == NULL) {
49699 + spin_unlock(&gr_learn_lock);
49700 + return;
49701 + }
49702 +
49703 + va_start(args, fmt);
49704 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
49705 + va_end(args);
49706 +
49707 + learn_buffer_len += len + 1;
49708 +
49709 + spin_unlock(&gr_learn_lock);
49710 + wake_up_interruptible(&learn_wait);
49711 +
49712 + return;
49713 +}
49714 +
49715 +static int
49716 +open_learn(struct inode *inode, struct file *file)
49717 +{
49718 + if (file->f_mode & FMODE_READ && gr_learn_attached)
49719 + return -EBUSY;
49720 + if (file->f_mode & FMODE_READ) {
49721 + int retval = 0;
49722 + mutex_lock(&gr_learn_user_mutex);
49723 + if (learn_buffer == NULL)
49724 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
49725 + if (learn_buffer_user == NULL)
49726 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
49727 + if (learn_buffer == NULL) {
49728 + retval = -ENOMEM;
49729 + goto out_error;
49730 + }
49731 + if (learn_buffer_user == NULL) {
49732 + retval = -ENOMEM;
49733 + goto out_error;
49734 + }
49735 + learn_buffer_len = 0;
49736 + learn_buffer_user_len = 0;
49737 + gr_learn_attached = 1;
49738 +out_error:
49739 + mutex_unlock(&gr_learn_user_mutex);
49740 + return retval;
49741 + }
49742 + return 0;
49743 +}
49744 +
49745 +static int
49746 +close_learn(struct inode *inode, struct file *file)
49747 +{
49748 + if (file->f_mode & FMODE_READ) {
49749 + char *tmp = NULL;
49750 + mutex_lock(&gr_learn_user_mutex);
49751 + spin_lock(&gr_learn_lock);
49752 + tmp = learn_buffer;
49753 + learn_buffer = NULL;
49754 + spin_unlock(&gr_learn_lock);
49755 + if (tmp)
49756 + vfree(tmp);
49757 + if (learn_buffer_user != NULL) {
49758 + vfree(learn_buffer_user);
49759 + learn_buffer_user = NULL;
49760 + }
49761 + learn_buffer_len = 0;
49762 + learn_buffer_user_len = 0;
49763 + gr_learn_attached = 0;
49764 + mutex_unlock(&gr_learn_user_mutex);
49765 + }
49766 +
49767 + return 0;
49768 +}
49769 +
49770 +const struct file_operations grsec_fops = {
49771 + .read = read_learn,
49772 + .write = write_grsec_handler,
49773 + .open = open_learn,
49774 + .release = close_learn,
49775 + .poll = poll_learn,
49776 +};
49777 diff -urNp linux-2.6.32.43/grsecurity/gracl_res.c linux-2.6.32.43/grsecurity/gracl_res.c
49778 --- linux-2.6.32.43/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
49779 +++ linux-2.6.32.43/grsecurity/gracl_res.c 2011-04-17 15:56:46.000000000 -0400
49780 @@ -0,0 +1,67 @@
49781 +#include <linux/kernel.h>
49782 +#include <linux/sched.h>
49783 +#include <linux/gracl.h>
49784 +#include <linux/grinternal.h>
49785 +
49786 +static const char *restab_log[] = {
49787 + [RLIMIT_CPU] = "RLIMIT_CPU",
49788 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
49789 + [RLIMIT_DATA] = "RLIMIT_DATA",
49790 + [RLIMIT_STACK] = "RLIMIT_STACK",
49791 + [RLIMIT_CORE] = "RLIMIT_CORE",
49792 + [RLIMIT_RSS] = "RLIMIT_RSS",
49793 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
49794 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
49795 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
49796 + [RLIMIT_AS] = "RLIMIT_AS",
49797 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
49798 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
49799 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
49800 + [RLIMIT_NICE] = "RLIMIT_NICE",
49801 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
49802 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
49803 + [GR_CRASH_RES] = "RLIMIT_CRASH"
49804 +};
49805 +
49806 +void
49807 +gr_log_resource(const struct task_struct *task,
49808 + const int res, const unsigned long wanted, const int gt)
49809 +{
49810 + const struct cred *cred;
49811 + unsigned long rlim;
49812 +
49813 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
49814 + return;
49815 +
49816 + // not yet supported resource
49817 + if (unlikely(!restab_log[res]))
49818 + return;
49819 +
49820 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
49821 + rlim = task->signal->rlim[res].rlim_max;
49822 + else
49823 + rlim = task->signal->rlim[res].rlim_cur;
49824 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
49825 + return;
49826 +
49827 + rcu_read_lock();
49828 + cred = __task_cred(task);
49829 +
49830 + if (res == RLIMIT_NPROC &&
49831 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
49832 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
49833 + goto out_rcu_unlock;
49834 + else if (res == RLIMIT_MEMLOCK &&
49835 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
49836 + goto out_rcu_unlock;
49837 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
49838 + goto out_rcu_unlock;
49839 + rcu_read_unlock();
49840 +
49841 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
49842 +
49843 + return;
49844 +out_rcu_unlock:
49845 + rcu_read_unlock();
49846 + return;
49847 +}
49848 diff -urNp linux-2.6.32.43/grsecurity/gracl_segv.c linux-2.6.32.43/grsecurity/gracl_segv.c
49849 --- linux-2.6.32.43/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
49850 +++ linux-2.6.32.43/grsecurity/gracl_segv.c 2011-04-17 15:56:46.000000000 -0400
49851 @@ -0,0 +1,284 @@
49852 +#include <linux/kernel.h>
49853 +#include <linux/mm.h>
49854 +#include <asm/uaccess.h>
49855 +#include <asm/errno.h>
49856 +#include <asm/mman.h>
49857 +#include <net/sock.h>
49858 +#include <linux/file.h>
49859 +#include <linux/fs.h>
49860 +#include <linux/net.h>
49861 +#include <linux/in.h>
49862 +#include <linux/smp_lock.h>
49863 +#include <linux/slab.h>
49864 +#include <linux/types.h>
49865 +#include <linux/sched.h>
49866 +#include <linux/timer.h>
49867 +#include <linux/gracl.h>
49868 +#include <linux/grsecurity.h>
49869 +#include <linux/grinternal.h>
49870 +
49871 +static struct crash_uid *uid_set;
49872 +static unsigned short uid_used;
49873 +static DEFINE_SPINLOCK(gr_uid_lock);
49874 +extern rwlock_t gr_inode_lock;
49875 +extern struct acl_subject_label *
49876 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
49877 + struct acl_role_label *role);
49878 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
49879 +
49880 +int
49881 +gr_init_uidset(void)
49882 +{
49883 + uid_set =
49884 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
49885 + uid_used = 0;
49886 +
49887 + return uid_set ? 1 : 0;
49888 +}
49889 +
49890 +void
49891 +gr_free_uidset(void)
49892 +{
49893 + if (uid_set)
49894 + kfree(uid_set);
49895 +
49896 + return;
49897 +}
49898 +
49899 +int
49900 +gr_find_uid(const uid_t uid)
49901 +{
49902 + struct crash_uid *tmp = uid_set;
49903 + uid_t buid;
49904 + int low = 0, high = uid_used - 1, mid;
49905 +
49906 + while (high >= low) {
49907 + mid = (low + high) >> 1;
49908 + buid = tmp[mid].uid;
49909 + if (buid == uid)
49910 + return mid;
49911 + if (buid > uid)
49912 + high = mid - 1;
49913 + if (buid < uid)
49914 + low = mid + 1;
49915 + }
49916 +
49917 + return -1;
49918 +}
49919 +
49920 +static __inline__ void
49921 +gr_insertsort(void)
49922 +{
49923 + unsigned short i, j;
49924 + struct crash_uid index;
49925 +
49926 + for (i = 1; i < uid_used; i++) {
49927 + index = uid_set[i];
49928 + j = i;
49929 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
49930 + uid_set[j] = uid_set[j - 1];
49931 + j--;
49932 + }
49933 + uid_set[j] = index;
49934 + }
49935 +
49936 + return;
49937 +}
49938 +
49939 +static __inline__ void
49940 +gr_insert_uid(const uid_t uid, const unsigned long expires)
49941 +{
49942 + int loc;
49943 +
49944 + if (uid_used == GR_UIDTABLE_MAX)
49945 + return;
49946 +
49947 + loc = gr_find_uid(uid);
49948 +
49949 + if (loc >= 0) {
49950 + uid_set[loc].expires = expires;
49951 + return;
49952 + }
49953 +
49954 + uid_set[uid_used].uid = uid;
49955 + uid_set[uid_used].expires = expires;
49956 + uid_used++;
49957 +
49958 + gr_insertsort();
49959 +
49960 + return;
49961 +}
49962 +
49963 +void
49964 +gr_remove_uid(const unsigned short loc)
49965 +{
49966 + unsigned short i;
49967 +
49968 + for (i = loc + 1; i < uid_used; i++)
49969 + uid_set[i - 1] = uid_set[i];
49970 +
49971 + uid_used--;
49972 +
49973 + return;
49974 +}
49975 +
49976 +int
49977 +gr_check_crash_uid(const uid_t uid)
49978 +{
49979 + int loc;
49980 + int ret = 0;
49981 +
49982 + if (unlikely(!gr_acl_is_enabled()))
49983 + return 0;
49984 +
49985 + spin_lock(&gr_uid_lock);
49986 + loc = gr_find_uid(uid);
49987 +
49988 + if (loc < 0)
49989 + goto out_unlock;
49990 +
49991 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
49992 + gr_remove_uid(loc);
49993 + else
49994 + ret = 1;
49995 +
49996 +out_unlock:
49997 + spin_unlock(&gr_uid_lock);
49998 + return ret;
49999 +}
50000 +
50001 +static __inline__ int
50002 +proc_is_setxid(const struct cred *cred)
50003 +{
50004 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
50005 + cred->uid != cred->fsuid)
50006 + return 1;
50007 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
50008 + cred->gid != cred->fsgid)
50009 + return 1;
50010 +
50011 + return 0;
50012 +}
50013 +
50014 +void
50015 +gr_handle_crash(struct task_struct *task, const int sig)
50016 +{
50017 + struct acl_subject_label *curr;
50018 + struct acl_subject_label *curr2;
50019 + struct task_struct *tsk, *tsk2;
50020 + const struct cred *cred;
50021 + const struct cred *cred2;
50022 +
50023 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
50024 + return;
50025 +
50026 + if (unlikely(!gr_acl_is_enabled()))
50027 + return;
50028 +
50029 + curr = task->acl;
50030 +
50031 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
50032 + return;
50033 +
50034 + if (time_before_eq(curr->expires, get_seconds())) {
50035 + curr->expires = 0;
50036 + curr->crashes = 0;
50037 + }
50038 +
50039 + curr->crashes++;
50040 +
50041 + if (!curr->expires)
50042 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
50043 +
50044 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
50045 + time_after(curr->expires, get_seconds())) {
50046 + rcu_read_lock();
50047 + cred = __task_cred(task);
50048 + if (cred->uid && proc_is_setxid(cred)) {
50049 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
50050 + spin_lock(&gr_uid_lock);
50051 + gr_insert_uid(cred->uid, curr->expires);
50052 + spin_unlock(&gr_uid_lock);
50053 + curr->expires = 0;
50054 + curr->crashes = 0;
50055 + read_lock(&tasklist_lock);
50056 + do_each_thread(tsk2, tsk) {
50057 + cred2 = __task_cred(tsk);
50058 + if (tsk != task && cred2->uid == cred->uid)
50059 + gr_fake_force_sig(SIGKILL, tsk);
50060 + } while_each_thread(tsk2, tsk);
50061 + read_unlock(&tasklist_lock);
50062 + } else {
50063 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
50064 + read_lock(&tasklist_lock);
50065 + do_each_thread(tsk2, tsk) {
50066 + if (likely(tsk != task)) {
50067 + curr2 = tsk->acl;
50068 +
50069 + if (curr2->device == curr->device &&
50070 + curr2->inode == curr->inode)
50071 + gr_fake_force_sig(SIGKILL, tsk);
50072 + }
50073 + } while_each_thread(tsk2, tsk);
50074 + read_unlock(&tasklist_lock);
50075 + }
50076 + rcu_read_unlock();
50077 + }
50078 +
50079 + return;
50080 +}
50081 +
50082 +int
50083 +gr_check_crash_exec(const struct file *filp)
50084 +{
50085 + struct acl_subject_label *curr;
50086 +
50087 + if (unlikely(!gr_acl_is_enabled()))
50088 + return 0;
50089 +
50090 + read_lock(&gr_inode_lock);
50091 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
50092 + filp->f_path.dentry->d_inode->i_sb->s_dev,
50093 + current->role);
50094 + read_unlock(&gr_inode_lock);
50095 +
50096 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
50097 + (!curr->crashes && !curr->expires))
50098 + return 0;
50099 +
50100 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
50101 + time_after(curr->expires, get_seconds()))
50102 + return 1;
50103 + else if (time_before_eq(curr->expires, get_seconds())) {
50104 + curr->crashes = 0;
50105 + curr->expires = 0;
50106 + }
50107 +
50108 + return 0;
50109 +}
50110 +
50111 +void
50112 +gr_handle_alertkill(struct task_struct *task)
50113 +{
50114 + struct acl_subject_label *curracl;
50115 + __u32 curr_ip;
50116 + struct task_struct *p, *p2;
50117 +
50118 + if (unlikely(!gr_acl_is_enabled()))
50119 + return;
50120 +
50121 + curracl = task->acl;
50122 + curr_ip = task->signal->curr_ip;
50123 +
50124 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
50125 + read_lock(&tasklist_lock);
50126 + do_each_thread(p2, p) {
50127 + if (p->signal->curr_ip == curr_ip)
50128 + gr_fake_force_sig(SIGKILL, p);
50129 + } while_each_thread(p2, p);
50130 + read_unlock(&tasklist_lock);
50131 + } else if (curracl->mode & GR_KILLPROC)
50132 + gr_fake_force_sig(SIGKILL, task);
50133 +
50134 + return;
50135 +}
50136 diff -urNp linux-2.6.32.43/grsecurity/gracl_shm.c linux-2.6.32.43/grsecurity/gracl_shm.c
50137 --- linux-2.6.32.43/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
50138 +++ linux-2.6.32.43/grsecurity/gracl_shm.c 2011-04-17 15:56:46.000000000 -0400
50139 @@ -0,0 +1,40 @@
50140 +#include <linux/kernel.h>
50141 +#include <linux/mm.h>
50142 +#include <linux/sched.h>
50143 +#include <linux/file.h>
50144 +#include <linux/ipc.h>
50145 +#include <linux/gracl.h>
50146 +#include <linux/grsecurity.h>
50147 +#include <linux/grinternal.h>
50148 +
50149 +int
50150 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50151 + const time_t shm_createtime, const uid_t cuid, const int shmid)
50152 +{
50153 + struct task_struct *task;
50154 +
50155 + if (!gr_acl_is_enabled())
50156 + return 1;
50157 +
50158 + rcu_read_lock();
50159 + read_lock(&tasklist_lock);
50160 +
50161 + task = find_task_by_vpid(shm_cprid);
50162 +
50163 + if (unlikely(!task))
50164 + task = find_task_by_vpid(shm_lapid);
50165 +
50166 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
50167 + (task->pid == shm_lapid)) &&
50168 + (task->acl->mode & GR_PROTSHM) &&
50169 + (task->acl != current->acl))) {
50170 + read_unlock(&tasklist_lock);
50171 + rcu_read_unlock();
50172 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
50173 + return 0;
50174 + }
50175 + read_unlock(&tasklist_lock);
50176 + rcu_read_unlock();
50177 +
50178 + return 1;
50179 +}
50180 diff -urNp linux-2.6.32.43/grsecurity/grsec_chdir.c linux-2.6.32.43/grsecurity/grsec_chdir.c
50181 --- linux-2.6.32.43/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
50182 +++ linux-2.6.32.43/grsecurity/grsec_chdir.c 2011-04-17 15:56:46.000000000 -0400
50183 @@ -0,0 +1,19 @@
50184 +#include <linux/kernel.h>
50185 +#include <linux/sched.h>
50186 +#include <linux/fs.h>
50187 +#include <linux/file.h>
50188 +#include <linux/grsecurity.h>
50189 +#include <linux/grinternal.h>
50190 +
50191 +void
50192 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
50193 +{
50194 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
50195 + if ((grsec_enable_chdir && grsec_enable_group &&
50196 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
50197 + !grsec_enable_group)) {
50198 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
50199 + }
50200 +#endif
50201 + return;
50202 +}
50203 diff -urNp linux-2.6.32.43/grsecurity/grsec_chroot.c linux-2.6.32.43/grsecurity/grsec_chroot.c
50204 --- linux-2.6.32.43/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
50205 +++ linux-2.6.32.43/grsecurity/grsec_chroot.c 2011-07-14 19:24:30.000000000 -0400
50206 @@ -0,0 +1,378 @@
50207 +#include <linux/kernel.h>
50208 +#include <linux/module.h>
50209 +#include <linux/sched.h>
50210 +#include <linux/file.h>
50211 +#include <linux/fs.h>
50212 +#include <linux/mount.h>
50213 +#include <linux/types.h>
50214 +#include <linux/pid_namespace.h>
50215 +#include <linux/grsecurity.h>
50216 +#include <linux/grinternal.h>
50217 +
50218 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
50219 +{
50220 +#ifdef CONFIG_GRKERNSEC
50221 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
50222 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
50223 + task->gr_is_chrooted = 1;
50224 + else
50225 + task->gr_is_chrooted = 0;
50226 +
50227 + task->gr_chroot_dentry = path->dentry;
50228 +#endif
50229 + return;
50230 +}
50231 +
50232 +void gr_clear_chroot_entries(struct task_struct *task)
50233 +{
50234 +#ifdef CONFIG_GRKERNSEC
50235 + task->gr_is_chrooted = 0;
50236 + task->gr_chroot_dentry = NULL;
50237 +#endif
50238 + return;
50239 +}
50240 +
50241 +int
50242 +gr_handle_chroot_unix(const pid_t pid)
50243 +{
50244 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
50245 + struct task_struct *p;
50246 +
50247 + if (unlikely(!grsec_enable_chroot_unix))
50248 + return 1;
50249 +
50250 + if (likely(!proc_is_chrooted(current)))
50251 + return 1;
50252 +
50253 + rcu_read_lock();
50254 + read_lock(&tasklist_lock);
50255 +
50256 + p = find_task_by_vpid_unrestricted(pid);
50257 + if (unlikely(p && !have_same_root(current, p))) {
50258 + read_unlock(&tasklist_lock);
50259 + rcu_read_unlock();
50260 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
50261 + return 0;
50262 + }
50263 + read_unlock(&tasklist_lock);
50264 + rcu_read_unlock();
50265 +#endif
50266 + return 1;
50267 +}
50268 +
50269 +int
50270 +gr_handle_chroot_nice(void)
50271 +{
50272 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
50273 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
50274 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
50275 + return -EPERM;
50276 + }
50277 +#endif
50278 + return 0;
50279 +}
50280 +
50281 +int
50282 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
50283 +{
50284 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
50285 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
50286 + && proc_is_chrooted(current)) {
50287 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
50288 + return -EACCES;
50289 + }
50290 +#endif
50291 + return 0;
50292 +}
50293 +
50294 +int
50295 +gr_handle_chroot_rawio(const struct inode *inode)
50296 +{
50297 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50298 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
50299 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
50300 + return 1;
50301 +#endif
50302 + return 0;
50303 +}
50304 +
50305 +int
50306 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
50307 +{
50308 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
50309 + struct task_struct *p;
50310 + int ret = 0;
50311 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
50312 + return ret;
50313 +
50314 + read_lock(&tasklist_lock);
50315 + do_each_pid_task(pid, type, p) {
50316 + if (!have_same_root(current, p)) {
50317 + ret = 1;
50318 + goto out;
50319 + }
50320 + } while_each_pid_task(pid, type, p);
50321 +out:
50322 + read_unlock(&tasklist_lock);
50323 + return ret;
50324 +#endif
50325 + return 0;
50326 +}
50327 +
50328 +int
50329 +gr_pid_is_chrooted(struct task_struct *p)
50330 +{
50331 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
50332 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
50333 + return 0;
50334 +
50335 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
50336 + !have_same_root(current, p)) {
50337 + return 1;
50338 + }
50339 +#endif
50340 + return 0;
50341 +}
50342 +
50343 +EXPORT_SYMBOL(gr_pid_is_chrooted);
50344 +
50345 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
50346 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
50347 +{
50348 + struct dentry *dentry = (struct dentry *)u_dentry;
50349 + struct vfsmount *mnt = (struct vfsmount *)u_mnt;
50350 + struct dentry *realroot;
50351 + struct vfsmount *realrootmnt;
50352 + struct dentry *currentroot;
50353 + struct vfsmount *currentmnt;
50354 + struct task_struct *reaper = &init_task;
50355 + int ret = 1;
50356 +
50357 + read_lock(&reaper->fs->lock);
50358 + realrootmnt = mntget(reaper->fs->root.mnt);
50359 + realroot = dget(reaper->fs->root.dentry);
50360 + read_unlock(&reaper->fs->lock);
50361 +
50362 + read_lock(&current->fs->lock);
50363 + currentmnt = mntget(current->fs->root.mnt);
50364 + currentroot = dget(current->fs->root.dentry);
50365 + read_unlock(&current->fs->lock);
50366 +
50367 + spin_lock(&dcache_lock);
50368 + for (;;) {
50369 + if (unlikely((dentry == realroot && mnt == realrootmnt)
50370 + || (dentry == currentroot && mnt == currentmnt)))
50371 + break;
50372 + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
50373 + if (mnt->mnt_parent == mnt)
50374 + break;
50375 + dentry = mnt->mnt_mountpoint;
50376 + mnt = mnt->mnt_parent;
50377 + continue;
50378 + }
50379 + dentry = dentry->d_parent;
50380 + }
50381 + spin_unlock(&dcache_lock);
50382 +
50383 + dput(currentroot);
50384 + mntput(currentmnt);
50385 +
50386 + /* access is outside of chroot */
50387 + if (dentry == realroot && mnt == realrootmnt)
50388 + ret = 0;
50389 +
50390 + dput(realroot);
50391 + mntput(realrootmnt);
50392 + return ret;
50393 +}
50394 +#endif
50395 +
50396 +int
50397 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
50398 +{
50399 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
50400 + if (!grsec_enable_chroot_fchdir)
50401 + return 1;
50402 +
50403 + if (!proc_is_chrooted(current))
50404 + return 1;
50405 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
50406 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
50407 + return 0;
50408 + }
50409 +#endif
50410 + return 1;
50411 +}
50412 +
50413 +int
50414 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50415 + const time_t shm_createtime)
50416 +{
50417 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
50418 + struct task_struct *p;
50419 + time_t starttime;
50420 +
50421 + if (unlikely(!grsec_enable_chroot_shmat))
50422 + return 1;
50423 +
50424 + if (likely(!proc_is_chrooted(current)))
50425 + return 1;
50426 +
50427 + rcu_read_lock();
50428 + read_lock(&tasklist_lock);
50429 +
50430 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
50431 + starttime = p->start_time.tv_sec;
50432 + if (unlikely(!have_same_root(current, p) &&
50433 + time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime))) {
50434 + read_unlock(&tasklist_lock);
50435 + rcu_read_unlock();
50436 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
50437 + return 0;
50438 + }
50439 + } else if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
50440 + if (unlikely(!have_same_root(current, p))) {
50441 + read_unlock(&tasklist_lock);
50442 + rcu_read_unlock();
50443 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
50444 + return 0;
50445 + }
50446 + }
50447 +
50448 + read_unlock(&tasklist_lock);
50449 + rcu_read_unlock();
50450 +#endif
50451 + return 1;
50452 +}
50453 +
50454 +void
50455 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
50456 +{
50457 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
50458 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
50459 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
50460 +#endif
50461 + return;
50462 +}
50463 +
50464 +int
50465 +gr_handle_chroot_mknod(const struct dentry *dentry,
50466 + const struct vfsmount *mnt, const int mode)
50467 +{
50468 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
50469 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
50470 + proc_is_chrooted(current)) {
50471 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
50472 + return -EPERM;
50473 + }
50474 +#endif
50475 + return 0;
50476 +}
50477 +
50478 +int
50479 +gr_handle_chroot_mount(const struct dentry *dentry,
50480 + const struct vfsmount *mnt, const char *dev_name)
50481 +{
50482 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
50483 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
50484 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
50485 + return -EPERM;
50486 + }
50487 +#endif
50488 + return 0;
50489 +}
50490 +
50491 +int
50492 +gr_handle_chroot_pivot(void)
50493 +{
50494 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
50495 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
50496 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
50497 + return -EPERM;
50498 + }
50499 +#endif
50500 + return 0;
50501 +}
50502 +
50503 +int
50504 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
50505 +{
50506 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
50507 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
50508 + !gr_is_outside_chroot(dentry, mnt)) {
50509 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
50510 + return -EPERM;
50511 + }
50512 +#endif
50513 + return 0;
50514 +}
50515 +
50516 +int
50517 +gr_handle_chroot_caps(struct path *path)
50518 +{
50519 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50520 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
50521 + (init_task.fs->root.dentry != path->dentry) &&
50522 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
50523 +
50524 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
50525 + const struct cred *old = current_cred();
50526 + struct cred *new = prepare_creds();
50527 + if (new == NULL)
50528 + return 1;
50529 +
50530 + new->cap_permitted = cap_drop(old->cap_permitted,
50531 + chroot_caps);
50532 + new->cap_inheritable = cap_drop(old->cap_inheritable,
50533 + chroot_caps);
50534 + new->cap_effective = cap_drop(old->cap_effective,
50535 + chroot_caps);
50536 +
50537 + commit_creds(new);
50538 +
50539 + return 0;
50540 + }
50541 +#endif
50542 + return 0;
50543 +}
50544 +
50545 +int
50546 +gr_handle_chroot_sysctl(const int op)
50547 +{
50548 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
50549 + if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
50550 + && (op & MAY_WRITE))
50551 + return -EACCES;
50552 +#endif
50553 + return 0;
50554 +}
50555 +
50556 +void
50557 +gr_handle_chroot_chdir(struct path *path)
50558 +{
50559 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
50560 + if (grsec_enable_chroot_chdir)
50561 + set_fs_pwd(current->fs, path);
50562 +#endif
50563 + return;
50564 +}
50565 +
50566 +int
50567 +gr_handle_chroot_chmod(const struct dentry *dentry,
50568 + const struct vfsmount *mnt, const int mode)
50569 +{
50570 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
50571 + /* allow chmod +s on directories, but not on files */
50572 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
50573 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
50574 + proc_is_chrooted(current)) {
50575 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
50576 + return -EPERM;
50577 + }
50578 +#endif
50579 + return 0;
50580 +}
50581 +
50582 +#ifdef CONFIG_SECURITY
50583 +EXPORT_SYMBOL(gr_handle_chroot_caps);
50584 +#endif
50585 diff -urNp linux-2.6.32.43/grsecurity/grsec_disabled.c linux-2.6.32.43/grsecurity/grsec_disabled.c
50586 --- linux-2.6.32.43/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
50587 +++ linux-2.6.32.43/grsecurity/grsec_disabled.c 2011-04-17 15:56:46.000000000 -0400
50588 @@ -0,0 +1,447 @@
50589 +#include <linux/kernel.h>
50590 +#include <linux/module.h>
50591 +#include <linux/sched.h>
50592 +#include <linux/file.h>
50593 +#include <linux/fs.h>
50594 +#include <linux/kdev_t.h>
50595 +#include <linux/net.h>
50596 +#include <linux/in.h>
50597 +#include <linux/ip.h>
50598 +#include <linux/skbuff.h>
50599 +#include <linux/sysctl.h>
50600 +
50601 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
50602 +void
50603 +pax_set_initial_flags(struct linux_binprm *bprm)
50604 +{
50605 + return;
50606 +}
50607 +#endif
50608 +
50609 +#ifdef CONFIG_SYSCTL
50610 +__u32
50611 +gr_handle_sysctl(const struct ctl_table * table, const int op)
50612 +{
50613 + return 0;
50614 +}
50615 +#endif
50616 +
50617 +#ifdef CONFIG_TASKSTATS
50618 +int gr_is_taskstats_denied(int pid)
50619 +{
50620 + return 0;
50621 +}
50622 +#endif
50623 +
50624 +int
50625 +gr_acl_is_enabled(void)
50626 +{
50627 + return 0;
50628 +}
50629 +
50630 +int
50631 +gr_handle_rawio(const struct inode *inode)
50632 +{
50633 + return 0;
50634 +}
50635 +
50636 +void
50637 +gr_acl_handle_psacct(struct task_struct *task, const long code)
50638 +{
50639 + return;
50640 +}
50641 +
50642 +int
50643 +gr_handle_ptrace(struct task_struct *task, const long request)
50644 +{
50645 + return 0;
50646 +}
50647 +
50648 +int
50649 +gr_handle_proc_ptrace(struct task_struct *task)
50650 +{
50651 + return 0;
50652 +}
50653 +
50654 +void
50655 +gr_learn_resource(const struct task_struct *task,
50656 + const int res, const unsigned long wanted, const int gt)
50657 +{
50658 + return;
50659 +}
50660 +
50661 +int
50662 +gr_set_acls(const int type)
50663 +{
50664 + return 0;
50665 +}
50666 +
50667 +int
50668 +gr_check_hidden_task(const struct task_struct *tsk)
50669 +{
50670 + return 0;
50671 +}
50672 +
50673 +int
50674 +gr_check_protected_task(const struct task_struct *task)
50675 +{
50676 + return 0;
50677 +}
50678 +
50679 +int
50680 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50681 +{
50682 + return 0;
50683 +}
50684 +
50685 +void
50686 +gr_copy_label(struct task_struct *tsk)
50687 +{
50688 + return;
50689 +}
50690 +
50691 +void
50692 +gr_set_pax_flags(struct task_struct *task)
50693 +{
50694 + return;
50695 +}
50696 +
50697 +int
50698 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
50699 + const int unsafe_share)
50700 +{
50701 + return 0;
50702 +}
50703 +
50704 +void
50705 +gr_handle_delete(const ino_t ino, const dev_t dev)
50706 +{
50707 + return;
50708 +}
50709 +
50710 +void
50711 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50712 +{
50713 + return;
50714 +}
50715 +
50716 +void
50717 +gr_handle_crash(struct task_struct *task, const int sig)
50718 +{
50719 + return;
50720 +}
50721 +
50722 +int
50723 +gr_check_crash_exec(const struct file *filp)
50724 +{
50725 + return 0;
50726 +}
50727 +
50728 +int
50729 +gr_check_crash_uid(const uid_t uid)
50730 +{
50731 + return 0;
50732 +}
50733 +
50734 +void
50735 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
50736 + struct dentry *old_dentry,
50737 + struct dentry *new_dentry,
50738 + struct vfsmount *mnt, const __u8 replace)
50739 +{
50740 + return;
50741 +}
50742 +
50743 +int
50744 +gr_search_socket(const int family, const int type, const int protocol)
50745 +{
50746 + return 1;
50747 +}
50748 +
50749 +int
50750 +gr_search_connectbind(const int mode, const struct socket *sock,
50751 + const struct sockaddr_in *addr)
50752 +{
50753 + return 0;
50754 +}
50755 +
50756 +int
50757 +gr_is_capable(const int cap)
50758 +{
50759 + return 1;
50760 +}
50761 +
50762 +int
50763 +gr_is_capable_nolog(const int cap)
50764 +{
50765 + return 1;
50766 +}
50767 +
50768 +void
50769 +gr_handle_alertkill(struct task_struct *task)
50770 +{
50771 + return;
50772 +}
50773 +
50774 +__u32
50775 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
50776 +{
50777 + return 1;
50778 +}
50779 +
50780 +__u32
50781 +gr_acl_handle_hidden_file(const struct dentry * dentry,
50782 + const struct vfsmount * mnt)
50783 +{
50784 + return 1;
50785 +}
50786 +
50787 +__u32
50788 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
50789 + const int fmode)
50790 +{
50791 + return 1;
50792 +}
50793 +
50794 +__u32
50795 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
50796 +{
50797 + return 1;
50798 +}
50799 +
50800 +__u32
50801 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
50802 +{
50803 + return 1;
50804 +}
50805 +
50806 +int
50807 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
50808 + unsigned int *vm_flags)
50809 +{
50810 + return 1;
50811 +}
50812 +
50813 +__u32
50814 +gr_acl_handle_truncate(const struct dentry * dentry,
50815 + const struct vfsmount * mnt)
50816 +{
50817 + return 1;
50818 +}
50819 +
50820 +__u32
50821 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
50822 +{
50823 + return 1;
50824 +}
50825 +
50826 +__u32
50827 +gr_acl_handle_access(const struct dentry * dentry,
50828 + const struct vfsmount * mnt, const int fmode)
50829 +{
50830 + return 1;
50831 +}
50832 +
50833 +__u32
50834 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
50835 + mode_t mode)
50836 +{
50837 + return 1;
50838 +}
50839 +
50840 +__u32
50841 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
50842 + mode_t mode)
50843 +{
50844 + return 1;
50845 +}
50846 +
50847 +__u32
50848 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
50849 +{
50850 + return 1;
50851 +}
50852 +
50853 +__u32
50854 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
50855 +{
50856 + return 1;
50857 +}
50858 +
50859 +void
50860 +grsecurity_init(void)
50861 +{
50862 + return;
50863 +}
50864 +
50865 +__u32
50866 +gr_acl_handle_mknod(const struct dentry * new_dentry,
50867 + const struct dentry * parent_dentry,
50868 + const struct vfsmount * parent_mnt,
50869 + const int mode)
50870 +{
50871 + return 1;
50872 +}
50873 +
50874 +__u32
50875 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
50876 + const struct dentry * parent_dentry,
50877 + const struct vfsmount * parent_mnt)
50878 +{
50879 + return 1;
50880 +}
50881 +
50882 +__u32
50883 +gr_acl_handle_symlink(const struct dentry * new_dentry,
50884 + const struct dentry * parent_dentry,
50885 + const struct vfsmount * parent_mnt, const char *from)
50886 +{
50887 + return 1;
50888 +}
50889 +
50890 +__u32
50891 +gr_acl_handle_link(const struct dentry * new_dentry,
50892 + const struct dentry * parent_dentry,
50893 + const struct vfsmount * parent_mnt,
50894 + const struct dentry * old_dentry,
50895 + const struct vfsmount * old_mnt, const char *to)
50896 +{
50897 + return 1;
50898 +}
50899 +
50900 +int
50901 +gr_acl_handle_rename(const struct dentry *new_dentry,
50902 + const struct dentry *parent_dentry,
50903 + const struct vfsmount *parent_mnt,
50904 + const struct dentry *old_dentry,
50905 + const struct inode *old_parent_inode,
50906 + const struct vfsmount *old_mnt, const char *newname)
50907 +{
50908 + return 0;
50909 +}
50910 +
50911 +int
50912 +gr_acl_handle_filldir(const struct file *file, const char *name,
50913 + const int namelen, const ino_t ino)
50914 +{
50915 + return 1;
50916 +}
50917 +
50918 +int
50919 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50920 + const time_t shm_createtime, const uid_t cuid, const int shmid)
50921 +{
50922 + return 1;
50923 +}
50924 +
50925 +int
50926 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
50927 +{
50928 + return 0;
50929 +}
50930 +
50931 +int
50932 +gr_search_accept(const struct socket *sock)
50933 +{
50934 + return 0;
50935 +}
50936 +
50937 +int
50938 +gr_search_listen(const struct socket *sock)
50939 +{
50940 + return 0;
50941 +}
50942 +
50943 +int
50944 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
50945 +{
50946 + return 0;
50947 +}
50948 +
50949 +__u32
50950 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
50951 +{
50952 + return 1;
50953 +}
50954 +
50955 +__u32
50956 +gr_acl_handle_creat(const struct dentry * dentry,
50957 + const struct dentry * p_dentry,
50958 + const struct vfsmount * p_mnt, const int fmode,
50959 + const int imode)
50960 +{
50961 + return 1;
50962 +}
50963 +
50964 +void
50965 +gr_acl_handle_exit(void)
50966 +{
50967 + return;
50968 +}
50969 +
50970 +int
50971 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50972 +{
50973 + return 1;
50974 +}
50975 +
50976 +void
50977 +gr_set_role_label(const uid_t uid, const gid_t gid)
50978 +{
50979 + return;
50980 +}
50981 +
50982 +int
50983 +gr_acl_handle_procpidmem(const struct task_struct *task)
50984 +{
50985 + return 0;
50986 +}
50987 +
50988 +int
50989 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
50990 +{
50991 + return 0;
50992 +}
50993 +
50994 +int
50995 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
50996 +{
50997 + return 0;
50998 +}
50999 +
51000 +void
51001 +gr_set_kernel_label(struct task_struct *task)
51002 +{
51003 + return;
51004 +}
51005 +
51006 +int
51007 +gr_check_user_change(int real, int effective, int fs)
51008 +{
51009 + return 0;
51010 +}
51011 +
51012 +int
51013 +gr_check_group_change(int real, int effective, int fs)
51014 +{
51015 + return 0;
51016 +}
51017 +
51018 +int gr_acl_enable_at_secure(void)
51019 +{
51020 + return 0;
51021 +}
51022 +
51023 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
51024 +{
51025 + return dentry->d_inode->i_sb->s_dev;
51026 +}
51027 +
51028 +EXPORT_SYMBOL(gr_is_capable);
51029 +EXPORT_SYMBOL(gr_is_capable_nolog);
51030 +EXPORT_SYMBOL(gr_learn_resource);
51031 +EXPORT_SYMBOL(gr_set_kernel_label);
51032 +#ifdef CONFIG_SECURITY
51033 +EXPORT_SYMBOL(gr_check_user_change);
51034 +EXPORT_SYMBOL(gr_check_group_change);
51035 +#endif
51036 diff -urNp linux-2.6.32.43/grsecurity/grsec_exec.c linux-2.6.32.43/grsecurity/grsec_exec.c
51037 --- linux-2.6.32.43/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
51038 +++ linux-2.6.32.43/grsecurity/grsec_exec.c 2011-04-17 15:56:46.000000000 -0400
51039 @@ -0,0 +1,148 @@
51040 +#include <linux/kernel.h>
51041 +#include <linux/sched.h>
51042 +#include <linux/file.h>
51043 +#include <linux/binfmts.h>
51044 +#include <linux/smp_lock.h>
51045 +#include <linux/fs.h>
51046 +#include <linux/types.h>
51047 +#include <linux/grdefs.h>
51048 +#include <linux/grinternal.h>
51049 +#include <linux/capability.h>
51050 +#include <linux/compat.h>
51051 +
51052 +#include <asm/uaccess.h>
51053 +
51054 +#ifdef CONFIG_GRKERNSEC_EXECLOG
51055 +static char gr_exec_arg_buf[132];
51056 +static DEFINE_MUTEX(gr_exec_arg_mutex);
51057 +#endif
51058 +
51059 +int
51060 +gr_handle_nproc(void)
51061 +{
51062 +#ifdef CONFIG_GRKERNSEC_EXECVE
51063 + const struct cred *cred = current_cred();
51064 + if (grsec_enable_execve && cred->user &&
51065 + (atomic_read(&cred->user->processes) >
51066 + current->signal->rlim[RLIMIT_NPROC].rlim_cur) &&
51067 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
51068 + gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
51069 + return -EAGAIN;
51070 + }
51071 +#endif
51072 + return 0;
51073 +}
51074 +
51075 +void
51076 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
51077 +{
51078 +#ifdef CONFIG_GRKERNSEC_EXECLOG
51079 + char *grarg = gr_exec_arg_buf;
51080 + unsigned int i, x, execlen = 0;
51081 + char c;
51082 +
51083 + if (!((grsec_enable_execlog && grsec_enable_group &&
51084 + in_group_p(grsec_audit_gid))
51085 + || (grsec_enable_execlog && !grsec_enable_group)))
51086 + return;
51087 +
51088 + mutex_lock(&gr_exec_arg_mutex);
51089 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
51090 +
51091 + if (unlikely(argv == NULL))
51092 + goto log;
51093 +
51094 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
51095 + const char __user *p;
51096 + unsigned int len;
51097 +
51098 + if (copy_from_user(&p, argv + i, sizeof(p)))
51099 + goto log;
51100 + if (!p)
51101 + goto log;
51102 + len = strnlen_user(p, 128 - execlen);
51103 + if (len > 128 - execlen)
51104 + len = 128 - execlen;
51105 + else if (len > 0)
51106 + len--;
51107 + if (copy_from_user(grarg + execlen, p, len))
51108 + goto log;
51109 +
51110 + /* rewrite unprintable characters */
51111 + for (x = 0; x < len; x++) {
51112 + c = *(grarg + execlen + x);
51113 + if (c < 32 || c > 126)
51114 + *(grarg + execlen + x) = ' ';
51115 + }
51116 +
51117 + execlen += len;
51118 + *(grarg + execlen) = ' ';
51119 + *(grarg + execlen + 1) = '\0';
51120 + execlen++;
51121 + }
51122 +
51123 + log:
51124 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
51125 + bprm->file->f_path.mnt, grarg);
51126 + mutex_unlock(&gr_exec_arg_mutex);
51127 +#endif
51128 + return;
51129 +}
51130 +
51131 +#ifdef CONFIG_COMPAT
51132 +void
51133 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
51134 +{
51135 +#ifdef CONFIG_GRKERNSEC_EXECLOG
51136 + char *grarg = gr_exec_arg_buf;
51137 + unsigned int i, x, execlen = 0;
51138 + char c;
51139 +
51140 + if (!((grsec_enable_execlog && grsec_enable_group &&
51141 + in_group_p(grsec_audit_gid))
51142 + || (grsec_enable_execlog && !grsec_enable_group)))
51143 + return;
51144 +
51145 + mutex_lock(&gr_exec_arg_mutex);
51146 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
51147 +
51148 + if (unlikely(argv == NULL))
51149 + goto log;
51150 +
51151 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
51152 + compat_uptr_t p;
51153 + unsigned int len;
51154 +
51155 + if (get_user(p, argv + i))
51156 + goto log;
51157 + len = strnlen_user(compat_ptr(p), 128 - execlen);
51158 + if (len > 128 - execlen)
51159 + len = 128 - execlen;
51160 + else if (len > 0)
51161 + len--;
51162 + else
51163 + goto log;
51164 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
51165 + goto log;
51166 +
51167 + /* rewrite unprintable characters */
51168 + for (x = 0; x < len; x++) {
51169 + c = *(grarg + execlen + x);
51170 + if (c < 32 || c > 126)
51171 + *(grarg + execlen + x) = ' ';
51172 + }
51173 +
51174 + execlen += len;
51175 + *(grarg + execlen) = ' ';
51176 + *(grarg + execlen + 1) = '\0';
51177 + execlen++;
51178 + }
51179 +
51180 + log:
51181 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
51182 + bprm->file->f_path.mnt, grarg);
51183 + mutex_unlock(&gr_exec_arg_mutex);
51184 +#endif
51185 + return;
51186 +}
51187 +#endif
51188 diff -urNp linux-2.6.32.43/grsecurity/grsec_fifo.c linux-2.6.32.43/grsecurity/grsec_fifo.c
51189 --- linux-2.6.32.43/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
51190 +++ linux-2.6.32.43/grsecurity/grsec_fifo.c 2011-04-17 15:56:46.000000000 -0400
51191 @@ -0,0 +1,24 @@
51192 +#include <linux/kernel.h>
51193 +#include <linux/sched.h>
51194 +#include <linux/fs.h>
51195 +#include <linux/file.h>
51196 +#include <linux/grinternal.h>
51197 +
51198 +int
51199 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
51200 + const struct dentry *dir, const int flag, const int acc_mode)
51201 +{
51202 +#ifdef CONFIG_GRKERNSEC_FIFO
51203 + const struct cred *cred = current_cred();
51204 +
51205 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
51206 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
51207 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
51208 + (cred->fsuid != dentry->d_inode->i_uid)) {
51209 + if (!inode_permission(dentry->d_inode, acc_mode))
51210 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
51211 + return -EACCES;
51212 + }
51213 +#endif
51214 + return 0;
51215 +}
51216 diff -urNp linux-2.6.32.43/grsecurity/grsec_fork.c linux-2.6.32.43/grsecurity/grsec_fork.c
51217 --- linux-2.6.32.43/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
51218 +++ linux-2.6.32.43/grsecurity/grsec_fork.c 2011-04-17 15:56:46.000000000 -0400
51219 @@ -0,0 +1,23 @@
51220 +#include <linux/kernel.h>
51221 +#include <linux/sched.h>
51222 +#include <linux/grsecurity.h>
51223 +#include <linux/grinternal.h>
51224 +#include <linux/errno.h>
51225 +
51226 +void
51227 +gr_log_forkfail(const int retval)
51228 +{
51229 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
51230 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
51231 + switch (retval) {
51232 + case -EAGAIN:
51233 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
51234 + break;
51235 + case -ENOMEM:
51236 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
51237 + break;
51238 + }
51239 + }
51240 +#endif
51241 + return;
51242 +}
51243 diff -urNp linux-2.6.32.43/grsecurity/grsec_init.c linux-2.6.32.43/grsecurity/grsec_init.c
51244 --- linux-2.6.32.43/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
51245 +++ linux-2.6.32.43/grsecurity/grsec_init.c 2011-06-29 19:35:26.000000000 -0400
51246 @@ -0,0 +1,274 @@
51247 +#include <linux/kernel.h>
51248 +#include <linux/sched.h>
51249 +#include <linux/mm.h>
51250 +#include <linux/smp_lock.h>
51251 +#include <linux/gracl.h>
51252 +#include <linux/slab.h>
51253 +#include <linux/vmalloc.h>
51254 +#include <linux/percpu.h>
51255 +#include <linux/module.h>
51256 +
51257 +int grsec_enable_brute;
51258 +int grsec_enable_link;
51259 +int grsec_enable_dmesg;
51260 +int grsec_enable_harden_ptrace;
51261 +int grsec_enable_fifo;
51262 +int grsec_enable_execve;
51263 +int grsec_enable_execlog;
51264 +int grsec_enable_signal;
51265 +int grsec_enable_forkfail;
51266 +int grsec_enable_audit_ptrace;
51267 +int grsec_enable_time;
51268 +int grsec_enable_audit_textrel;
51269 +int grsec_enable_group;
51270 +int grsec_audit_gid;
51271 +int grsec_enable_chdir;
51272 +int grsec_enable_mount;
51273 +int grsec_enable_rofs;
51274 +int grsec_enable_chroot_findtask;
51275 +int grsec_enable_chroot_mount;
51276 +int grsec_enable_chroot_shmat;
51277 +int grsec_enable_chroot_fchdir;
51278 +int grsec_enable_chroot_double;
51279 +int grsec_enable_chroot_pivot;
51280 +int grsec_enable_chroot_chdir;
51281 +int grsec_enable_chroot_chmod;
51282 +int grsec_enable_chroot_mknod;
51283 +int grsec_enable_chroot_nice;
51284 +int grsec_enable_chroot_execlog;
51285 +int grsec_enable_chroot_caps;
51286 +int grsec_enable_chroot_sysctl;
51287 +int grsec_enable_chroot_unix;
51288 +int grsec_enable_tpe;
51289 +int grsec_tpe_gid;
51290 +int grsec_enable_blackhole;
51291 +#ifdef CONFIG_IPV6_MODULE
51292 +EXPORT_SYMBOL(grsec_enable_blackhole);
51293 +#endif
51294 +int grsec_lastack_retries;
51295 +int grsec_enable_tpe_all;
51296 +int grsec_enable_tpe_invert;
51297 +int grsec_enable_socket_all;
51298 +int grsec_socket_all_gid;
51299 +int grsec_enable_socket_client;
51300 +int grsec_socket_client_gid;
51301 +int grsec_enable_socket_server;
51302 +int grsec_socket_server_gid;
51303 +int grsec_resource_logging;
51304 +int grsec_disable_privio;
51305 +int grsec_enable_log_rwxmaps;
51306 +int grsec_lock;
51307 +
51308 +DEFINE_SPINLOCK(grsec_alert_lock);
51309 +unsigned long grsec_alert_wtime = 0;
51310 +unsigned long grsec_alert_fyet = 0;
51311 +
51312 +DEFINE_SPINLOCK(grsec_audit_lock);
51313 +
51314 +DEFINE_RWLOCK(grsec_exec_file_lock);
51315 +
51316 +char *gr_shared_page[4];
51317 +
51318 +char *gr_alert_log_fmt;
51319 +char *gr_audit_log_fmt;
51320 +char *gr_alert_log_buf;
51321 +char *gr_audit_log_buf;
51322 +
51323 +extern struct gr_arg *gr_usermode;
51324 +extern unsigned char *gr_system_salt;
51325 +extern unsigned char *gr_system_sum;
51326 +
51327 +void __init
51328 +grsecurity_init(void)
51329 +{
51330 + int j;
51331 + /* create the per-cpu shared pages */
51332 +
51333 +#ifdef CONFIG_X86
51334 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
51335 +#endif
51336 +
51337 + for (j = 0; j < 4; j++) {
51338 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
51339 + if (gr_shared_page[j] == NULL) {
51340 + panic("Unable to allocate grsecurity shared page");
51341 + return;
51342 + }
51343 + }
51344 +
51345 + /* allocate log buffers */
51346 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
51347 + if (!gr_alert_log_fmt) {
51348 + panic("Unable to allocate grsecurity alert log format buffer");
51349 + return;
51350 + }
51351 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
51352 + if (!gr_audit_log_fmt) {
51353 + panic("Unable to allocate grsecurity audit log format buffer");
51354 + return;
51355 + }
51356 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
51357 + if (!gr_alert_log_buf) {
51358 + panic("Unable to allocate grsecurity alert log buffer");
51359 + return;
51360 + }
51361 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
51362 + if (!gr_audit_log_buf) {
51363 + panic("Unable to allocate grsecurity audit log buffer");
51364 + return;
51365 + }
51366 +
51367 + /* allocate memory for authentication structure */
51368 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
51369 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
51370 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
51371 +
51372 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
51373 + panic("Unable to allocate grsecurity authentication structure");
51374 + return;
51375 + }
51376 +
51377 +
51378 +#ifdef CONFIG_GRKERNSEC_IO
51379 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
51380 + grsec_disable_privio = 1;
51381 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
51382 + grsec_disable_privio = 1;
51383 +#else
51384 + grsec_disable_privio = 0;
51385 +#endif
51386 +#endif
51387 +
51388 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
51389 + /* for backward compatibility, tpe_invert always defaults to on if
51390 + enabled in the kernel
51391 + */
51392 + grsec_enable_tpe_invert = 1;
51393 +#endif
51394 +
51395 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
51396 +#ifndef CONFIG_GRKERNSEC_SYSCTL
51397 + grsec_lock = 1;
51398 +#endif
51399 +
51400 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
51401 + grsec_enable_audit_textrel = 1;
51402 +#endif
51403 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51404 + grsec_enable_log_rwxmaps = 1;
51405 +#endif
51406 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
51407 + grsec_enable_group = 1;
51408 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
51409 +#endif
51410 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
51411 + grsec_enable_chdir = 1;
51412 +#endif
51413 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51414 + grsec_enable_harden_ptrace = 1;
51415 +#endif
51416 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51417 + grsec_enable_mount = 1;
51418 +#endif
51419 +#ifdef CONFIG_GRKERNSEC_LINK
51420 + grsec_enable_link = 1;
51421 +#endif
51422 +#ifdef CONFIG_GRKERNSEC_BRUTE
51423 + grsec_enable_brute = 1;
51424 +#endif
51425 +#ifdef CONFIG_GRKERNSEC_DMESG
51426 + grsec_enable_dmesg = 1;
51427 +#endif
51428 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
51429 + grsec_enable_blackhole = 1;
51430 + grsec_lastack_retries = 4;
51431 +#endif
51432 +#ifdef CONFIG_GRKERNSEC_FIFO
51433 + grsec_enable_fifo = 1;
51434 +#endif
51435 +#ifdef CONFIG_GRKERNSEC_EXECVE
51436 + grsec_enable_execve = 1;
51437 +#endif
51438 +#ifdef CONFIG_GRKERNSEC_EXECLOG
51439 + grsec_enable_execlog = 1;
51440 +#endif
51441 +#ifdef CONFIG_GRKERNSEC_SIGNAL
51442 + grsec_enable_signal = 1;
51443 +#endif
51444 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
51445 + grsec_enable_forkfail = 1;
51446 +#endif
51447 +#ifdef CONFIG_GRKERNSEC_TIME
51448 + grsec_enable_time = 1;
51449 +#endif
51450 +#ifdef CONFIG_GRKERNSEC_RESLOG
51451 + grsec_resource_logging = 1;
51452 +#endif
51453 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
51454 + grsec_enable_chroot_findtask = 1;
51455 +#endif
51456 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
51457 + grsec_enable_chroot_unix = 1;
51458 +#endif
51459 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
51460 + grsec_enable_chroot_mount = 1;
51461 +#endif
51462 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
51463 + grsec_enable_chroot_fchdir = 1;
51464 +#endif
51465 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
51466 + grsec_enable_chroot_shmat = 1;
51467 +#endif
51468 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
51469 + grsec_enable_audit_ptrace = 1;
51470 +#endif
51471 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
51472 + grsec_enable_chroot_double = 1;
51473 +#endif
51474 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
51475 + grsec_enable_chroot_pivot = 1;
51476 +#endif
51477 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
51478 + grsec_enable_chroot_chdir = 1;
51479 +#endif
51480 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
51481 + grsec_enable_chroot_chmod = 1;
51482 +#endif
51483 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
51484 + grsec_enable_chroot_mknod = 1;
51485 +#endif
51486 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
51487 + grsec_enable_chroot_nice = 1;
51488 +#endif
51489 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
51490 + grsec_enable_chroot_execlog = 1;
51491 +#endif
51492 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51493 + grsec_enable_chroot_caps = 1;
51494 +#endif
51495 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
51496 + grsec_enable_chroot_sysctl = 1;
51497 +#endif
51498 +#ifdef CONFIG_GRKERNSEC_TPE
51499 + grsec_enable_tpe = 1;
51500 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
51501 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
51502 + grsec_enable_tpe_all = 1;
51503 +#endif
51504 +#endif
51505 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
51506 + grsec_enable_socket_all = 1;
51507 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
51508 +#endif
51509 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
51510 + grsec_enable_socket_client = 1;
51511 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
51512 +#endif
51513 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51514 + grsec_enable_socket_server = 1;
51515 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
51516 +#endif
51517 +#endif
51518 +
51519 + return;
51520 +}
51521 diff -urNp linux-2.6.32.43/grsecurity/grsec_link.c linux-2.6.32.43/grsecurity/grsec_link.c
51522 --- linux-2.6.32.43/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
51523 +++ linux-2.6.32.43/grsecurity/grsec_link.c 2011-04-17 15:56:46.000000000 -0400
51524 @@ -0,0 +1,43 @@
51525 +#include <linux/kernel.h>
51526 +#include <linux/sched.h>
51527 +#include <linux/fs.h>
51528 +#include <linux/file.h>
51529 +#include <linux/grinternal.h>
51530 +
51531 +int
51532 +gr_handle_follow_link(const struct inode *parent,
51533 + const struct inode *inode,
51534 + const struct dentry *dentry, const struct vfsmount *mnt)
51535 +{
51536 +#ifdef CONFIG_GRKERNSEC_LINK
51537 + const struct cred *cred = current_cred();
51538 +
51539 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
51540 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
51541 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
51542 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
51543 + return -EACCES;
51544 + }
51545 +#endif
51546 + return 0;
51547 +}
51548 +
51549 +int
51550 +gr_handle_hardlink(const struct dentry *dentry,
51551 + const struct vfsmount *mnt,
51552 + struct inode *inode, const int mode, const char *to)
51553 +{
51554 +#ifdef CONFIG_GRKERNSEC_LINK
51555 + const struct cred *cred = current_cred();
51556 +
51557 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
51558 + (!S_ISREG(mode) || (mode & S_ISUID) ||
51559 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
51560 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
51561 + !capable(CAP_FOWNER) && cred->uid) {
51562 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
51563 + return -EPERM;
51564 + }
51565 +#endif
51566 + return 0;
51567 +}
51568 diff -urNp linux-2.6.32.43/grsecurity/grsec_log.c linux-2.6.32.43/grsecurity/grsec_log.c
51569 --- linux-2.6.32.43/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
51570 +++ linux-2.6.32.43/grsecurity/grsec_log.c 2011-05-10 21:58:49.000000000 -0400
51571 @@ -0,0 +1,310 @@
51572 +#include <linux/kernel.h>
51573 +#include <linux/sched.h>
51574 +#include <linux/file.h>
51575 +#include <linux/tty.h>
51576 +#include <linux/fs.h>
51577 +#include <linux/grinternal.h>
51578 +
51579 +#ifdef CONFIG_TREE_PREEMPT_RCU
51580 +#define DISABLE_PREEMPT() preempt_disable()
51581 +#define ENABLE_PREEMPT() preempt_enable()
51582 +#else
51583 +#define DISABLE_PREEMPT()
51584 +#define ENABLE_PREEMPT()
51585 +#endif
51586 +
51587 +#define BEGIN_LOCKS(x) \
51588 + DISABLE_PREEMPT(); \
51589 + rcu_read_lock(); \
51590 + read_lock(&tasklist_lock); \
51591 + read_lock(&grsec_exec_file_lock); \
51592 + if (x != GR_DO_AUDIT) \
51593 + spin_lock(&grsec_alert_lock); \
51594 + else \
51595 + spin_lock(&grsec_audit_lock)
51596 +
51597 +#define END_LOCKS(x) \
51598 + if (x != GR_DO_AUDIT) \
51599 + spin_unlock(&grsec_alert_lock); \
51600 + else \
51601 + spin_unlock(&grsec_audit_lock); \
51602 + read_unlock(&grsec_exec_file_lock); \
51603 + read_unlock(&tasklist_lock); \
51604 + rcu_read_unlock(); \
51605 + ENABLE_PREEMPT(); \
51606 + if (x == GR_DONT_AUDIT) \
51607 + gr_handle_alertkill(current)
51608 +
51609 +enum {
51610 + FLOODING,
51611 + NO_FLOODING
51612 +};
51613 +
51614 +extern char *gr_alert_log_fmt;
51615 +extern char *gr_audit_log_fmt;
51616 +extern char *gr_alert_log_buf;
51617 +extern char *gr_audit_log_buf;
51618 +
51619 +static int gr_log_start(int audit)
51620 +{
51621 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
51622 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
51623 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51624 +
51625 + if (audit == GR_DO_AUDIT)
51626 + goto set_fmt;
51627 +
51628 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
51629 + grsec_alert_wtime = jiffies;
51630 + grsec_alert_fyet = 0;
51631 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
51632 + grsec_alert_fyet++;
51633 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
51634 + grsec_alert_wtime = jiffies;
51635 + grsec_alert_fyet++;
51636 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
51637 + return FLOODING;
51638 + } else return FLOODING;
51639 +
51640 +set_fmt:
51641 + memset(buf, 0, PAGE_SIZE);
51642 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
51643 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
51644 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
51645 + } else if (current->signal->curr_ip) {
51646 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
51647 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
51648 + } else if (gr_acl_is_enabled()) {
51649 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
51650 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
51651 + } else {
51652 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
51653 + strcpy(buf, fmt);
51654 + }
51655 +
51656 + return NO_FLOODING;
51657 +}
51658 +
51659 +static void gr_log_middle(int audit, const char *msg, va_list ap)
51660 + __attribute__ ((format (printf, 2, 0)));
51661 +
51662 +static void gr_log_middle(int audit, const char *msg, va_list ap)
51663 +{
51664 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51665 + unsigned int len = strlen(buf);
51666 +
51667 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
51668 +
51669 + return;
51670 +}
51671 +
51672 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
51673 + __attribute__ ((format (printf, 2, 3)));
51674 +
51675 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
51676 +{
51677 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51678 + unsigned int len = strlen(buf);
51679 + va_list ap;
51680 +
51681 + va_start(ap, msg);
51682 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
51683 + va_end(ap);
51684 +
51685 + return;
51686 +}
51687 +
51688 +static void gr_log_end(int audit)
51689 +{
51690 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51691 + unsigned int len = strlen(buf);
51692 +
51693 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
51694 + printk("%s\n", buf);
51695 +
51696 + return;
51697 +}
51698 +
51699 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
51700 +{
51701 + int logtype;
51702 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
51703 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
51704 + void *voidptr = NULL;
51705 + int num1 = 0, num2 = 0;
51706 + unsigned long ulong1 = 0, ulong2 = 0;
51707 + struct dentry *dentry = NULL;
51708 + struct vfsmount *mnt = NULL;
51709 + struct file *file = NULL;
51710 + struct task_struct *task = NULL;
51711 + const struct cred *cred, *pcred;
51712 + va_list ap;
51713 +
51714 + BEGIN_LOCKS(audit);
51715 + logtype = gr_log_start(audit);
51716 + if (logtype == FLOODING) {
51717 + END_LOCKS(audit);
51718 + return;
51719 + }
51720 + va_start(ap, argtypes);
51721 + switch (argtypes) {
51722 + case GR_TTYSNIFF:
51723 + task = va_arg(ap, struct task_struct *);
51724 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
51725 + break;
51726 + case GR_SYSCTL_HIDDEN:
51727 + str1 = va_arg(ap, char *);
51728 + gr_log_middle_varargs(audit, msg, result, str1);
51729 + break;
51730 + case GR_RBAC:
51731 + dentry = va_arg(ap, struct dentry *);
51732 + mnt = va_arg(ap, struct vfsmount *);
51733 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
51734 + break;
51735 + case GR_RBAC_STR:
51736 + dentry = va_arg(ap, struct dentry *);
51737 + mnt = va_arg(ap, struct vfsmount *);
51738 + str1 = va_arg(ap, char *);
51739 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
51740 + break;
51741 + case GR_STR_RBAC:
51742 + str1 = va_arg(ap, char *);
51743 + dentry = va_arg(ap, struct dentry *);
51744 + mnt = va_arg(ap, struct vfsmount *);
51745 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
51746 + break;
51747 + case GR_RBAC_MODE2:
51748 + dentry = va_arg(ap, struct dentry *);
51749 + mnt = va_arg(ap, struct vfsmount *);
51750 + str1 = va_arg(ap, char *);
51751 + str2 = va_arg(ap, char *);
51752 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
51753 + break;
51754 + case GR_RBAC_MODE3:
51755 + dentry = va_arg(ap, struct dentry *);
51756 + mnt = va_arg(ap, struct vfsmount *);
51757 + str1 = va_arg(ap, char *);
51758 + str2 = va_arg(ap, char *);
51759 + str3 = va_arg(ap, char *);
51760 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
51761 + break;
51762 + case GR_FILENAME:
51763 + dentry = va_arg(ap, struct dentry *);
51764 + mnt = va_arg(ap, struct vfsmount *);
51765 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
51766 + break;
51767 + case GR_STR_FILENAME:
51768 + str1 = va_arg(ap, char *);
51769 + dentry = va_arg(ap, struct dentry *);
51770 + mnt = va_arg(ap, struct vfsmount *);
51771 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
51772 + break;
51773 + case GR_FILENAME_STR:
51774 + dentry = va_arg(ap, struct dentry *);
51775 + mnt = va_arg(ap, struct vfsmount *);
51776 + str1 = va_arg(ap, char *);
51777 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
51778 + break;
51779 + case GR_FILENAME_TWO_INT:
51780 + dentry = va_arg(ap, struct dentry *);
51781 + mnt = va_arg(ap, struct vfsmount *);
51782 + num1 = va_arg(ap, int);
51783 + num2 = va_arg(ap, int);
51784 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
51785 + break;
51786 + case GR_FILENAME_TWO_INT_STR:
51787 + dentry = va_arg(ap, struct dentry *);
51788 + mnt = va_arg(ap, struct vfsmount *);
51789 + num1 = va_arg(ap, int);
51790 + num2 = va_arg(ap, int);
51791 + str1 = va_arg(ap, char *);
51792 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
51793 + break;
51794 + case GR_TEXTREL:
51795 + file = va_arg(ap, struct file *);
51796 + ulong1 = va_arg(ap, unsigned long);
51797 + ulong2 = va_arg(ap, unsigned long);
51798 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
51799 + break;
51800 + case GR_PTRACE:
51801 + task = va_arg(ap, struct task_struct *);
51802 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
51803 + break;
51804 + case GR_RESOURCE:
51805 + task = va_arg(ap, struct task_struct *);
51806 + cred = __task_cred(task);
51807 + pcred = __task_cred(task->real_parent);
51808 + ulong1 = va_arg(ap, unsigned long);
51809 + str1 = va_arg(ap, char *);
51810 + ulong2 = va_arg(ap, unsigned long);
51811 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51812 + break;
51813 + case GR_CAP:
51814 + task = va_arg(ap, struct task_struct *);
51815 + cred = __task_cred(task);
51816 + pcred = __task_cred(task->real_parent);
51817 + str1 = va_arg(ap, char *);
51818 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51819 + break;
51820 + case GR_SIG:
51821 + str1 = va_arg(ap, char *);
51822 + voidptr = va_arg(ap, void *);
51823 + gr_log_middle_varargs(audit, msg, str1, voidptr);
51824 + break;
51825 + case GR_SIG2:
51826 + task = va_arg(ap, struct task_struct *);
51827 + cred = __task_cred(task);
51828 + pcred = __task_cred(task->real_parent);
51829 + num1 = va_arg(ap, int);
51830 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51831 + break;
51832 + case GR_CRASH1:
51833 + task = va_arg(ap, struct task_struct *);
51834 + cred = __task_cred(task);
51835 + pcred = __task_cred(task->real_parent);
51836 + ulong1 = va_arg(ap, unsigned long);
51837 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
51838 + break;
51839 + case GR_CRASH2:
51840 + task = va_arg(ap, struct task_struct *);
51841 + cred = __task_cred(task);
51842 + pcred = __task_cred(task->real_parent);
51843 + ulong1 = va_arg(ap, unsigned long);
51844 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
51845 + break;
51846 + case GR_RWXMAP:
51847 + file = va_arg(ap, struct file *);
51848 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
51849 + break;
51850 + case GR_PSACCT:
51851 + {
51852 + unsigned int wday, cday;
51853 + __u8 whr, chr;
51854 + __u8 wmin, cmin;
51855 + __u8 wsec, csec;
51856 + char cur_tty[64] = { 0 };
51857 + char parent_tty[64] = { 0 };
51858 +
51859 + task = va_arg(ap, struct task_struct *);
51860 + wday = va_arg(ap, unsigned int);
51861 + cday = va_arg(ap, unsigned int);
51862 + whr = va_arg(ap, int);
51863 + chr = va_arg(ap, int);
51864 + wmin = va_arg(ap, int);
51865 + cmin = va_arg(ap, int);
51866 + wsec = va_arg(ap, int);
51867 + csec = va_arg(ap, int);
51868 + ulong1 = va_arg(ap, unsigned long);
51869 + cred = __task_cred(task);
51870 + pcred = __task_cred(task->real_parent);
51871 +
51872 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51873 + }
51874 + break;
51875 + default:
51876 + gr_log_middle(audit, msg, ap);
51877 + }
51878 + va_end(ap);
51879 + gr_log_end(audit);
51880 + END_LOCKS(audit);
51881 +}
51882 diff -urNp linux-2.6.32.43/grsecurity/grsec_mem.c linux-2.6.32.43/grsecurity/grsec_mem.c
51883 --- linux-2.6.32.43/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
51884 +++ linux-2.6.32.43/grsecurity/grsec_mem.c 2011-04-17 15:56:46.000000000 -0400
51885 @@ -0,0 +1,33 @@
51886 +#include <linux/kernel.h>
51887 +#include <linux/sched.h>
51888 +#include <linux/mm.h>
51889 +#include <linux/mman.h>
51890 +#include <linux/grinternal.h>
51891 +
51892 +void
51893 +gr_handle_ioperm(void)
51894 +{
51895 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
51896 + return;
51897 +}
51898 +
51899 +void
51900 +gr_handle_iopl(void)
51901 +{
51902 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
51903 + return;
51904 +}
51905 +
51906 +void
51907 +gr_handle_mem_readwrite(u64 from, u64 to)
51908 +{
51909 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
51910 + return;
51911 +}
51912 +
51913 +void
51914 +gr_handle_vm86(void)
51915 +{
51916 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
51917 + return;
51918 +}
51919 diff -urNp linux-2.6.32.43/grsecurity/grsec_mount.c linux-2.6.32.43/grsecurity/grsec_mount.c
51920 --- linux-2.6.32.43/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
51921 +++ linux-2.6.32.43/grsecurity/grsec_mount.c 2011-06-20 19:47:03.000000000 -0400
51922 @@ -0,0 +1,62 @@
51923 +#include <linux/kernel.h>
51924 +#include <linux/sched.h>
51925 +#include <linux/mount.h>
51926 +#include <linux/grsecurity.h>
51927 +#include <linux/grinternal.h>
51928 +
51929 +void
51930 +gr_log_remount(const char *devname, const int retval)
51931 +{
51932 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51933 + if (grsec_enable_mount && (retval >= 0))
51934 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
51935 +#endif
51936 + return;
51937 +}
51938 +
51939 +void
51940 +gr_log_unmount(const char *devname, const int retval)
51941 +{
51942 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51943 + if (grsec_enable_mount && (retval >= 0))
51944 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
51945 +#endif
51946 + return;
51947 +}
51948 +
51949 +void
51950 +gr_log_mount(const char *from, const char *to, const int retval)
51951 +{
51952 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51953 + if (grsec_enable_mount && (retval >= 0))
51954 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
51955 +#endif
51956 + return;
51957 +}
51958 +
51959 +int
51960 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
51961 +{
51962 +#ifdef CONFIG_GRKERNSEC_ROFS
51963 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
51964 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
51965 + return -EPERM;
51966 + } else
51967 + return 0;
51968 +#endif
51969 + return 0;
51970 +}
51971 +
51972 +int
51973 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
51974 +{
51975 +#ifdef CONFIG_GRKERNSEC_ROFS
51976 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
51977 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
51978 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
51979 + return -EPERM;
51980 + } else
51981 + return 0;
51982 +#endif
51983 + return 0;
51984 +}
51985 diff -urNp linux-2.6.32.43/grsecurity/grsec_pax.c linux-2.6.32.43/grsecurity/grsec_pax.c
51986 --- linux-2.6.32.43/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
51987 +++ linux-2.6.32.43/grsecurity/grsec_pax.c 2011-04-17 15:56:46.000000000 -0400
51988 @@ -0,0 +1,36 @@
51989 +#include <linux/kernel.h>
51990 +#include <linux/sched.h>
51991 +#include <linux/mm.h>
51992 +#include <linux/file.h>
51993 +#include <linux/grinternal.h>
51994 +#include <linux/grsecurity.h>
51995 +
51996 +void
51997 +gr_log_textrel(struct vm_area_struct * vma)
51998 +{
51999 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
52000 + if (grsec_enable_audit_textrel)
52001 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
52002 +#endif
52003 + return;
52004 +}
52005 +
52006 +void
52007 +gr_log_rwxmmap(struct file *file)
52008 +{
52009 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52010 + if (grsec_enable_log_rwxmaps)
52011 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
52012 +#endif
52013 + return;
52014 +}
52015 +
52016 +void
52017 +gr_log_rwxmprotect(struct file *file)
52018 +{
52019 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52020 + if (grsec_enable_log_rwxmaps)
52021 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
52022 +#endif
52023 + return;
52024 +}
52025 diff -urNp linux-2.6.32.43/grsecurity/grsec_ptrace.c linux-2.6.32.43/grsecurity/grsec_ptrace.c
52026 --- linux-2.6.32.43/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
52027 +++ linux-2.6.32.43/grsecurity/grsec_ptrace.c 2011-04-17 15:56:46.000000000 -0400
52028 @@ -0,0 +1,14 @@
52029 +#include <linux/kernel.h>
52030 +#include <linux/sched.h>
52031 +#include <linux/grinternal.h>
52032 +#include <linux/grsecurity.h>
52033 +
52034 +void
52035 +gr_audit_ptrace(struct task_struct *task)
52036 +{
52037 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
52038 + if (grsec_enable_audit_ptrace)
52039 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
52040 +#endif
52041 + return;
52042 +}
52043 diff -urNp linux-2.6.32.43/grsecurity/grsec_sig.c linux-2.6.32.43/grsecurity/grsec_sig.c
52044 --- linux-2.6.32.43/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
52045 +++ linux-2.6.32.43/grsecurity/grsec_sig.c 2011-06-29 19:40:31.000000000 -0400
52046 @@ -0,0 +1,205 @@
52047 +#include <linux/kernel.h>
52048 +#include <linux/sched.h>
52049 +#include <linux/delay.h>
52050 +#include <linux/grsecurity.h>
52051 +#include <linux/grinternal.h>
52052 +#include <linux/hardirq.h>
52053 +
52054 +char *signames[] = {
52055 + [SIGSEGV] = "Segmentation fault",
52056 + [SIGILL] = "Illegal instruction",
52057 + [SIGABRT] = "Abort",
52058 + [SIGBUS] = "Invalid alignment/Bus error"
52059 +};
52060 +
52061 +void
52062 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
52063 +{
52064 +#ifdef CONFIG_GRKERNSEC_SIGNAL
52065 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
52066 + (sig == SIGABRT) || (sig == SIGBUS))) {
52067 + if (t->pid == current->pid) {
52068 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
52069 + } else {
52070 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
52071 + }
52072 + }
52073 +#endif
52074 + return;
52075 +}
52076 +
52077 +int
52078 +gr_handle_signal(const struct task_struct *p, const int sig)
52079 +{
52080 +#ifdef CONFIG_GRKERNSEC
52081 + if (current->pid > 1 && gr_check_protected_task(p)) {
52082 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
52083 + return -EPERM;
52084 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
52085 + return -EPERM;
52086 + }
52087 +#endif
52088 + return 0;
52089 +}
52090 +
52091 +#ifdef CONFIG_GRKERNSEC
52092 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
52093 +
52094 +int gr_fake_force_sig(int sig, struct task_struct *t)
52095 +{
52096 + unsigned long int flags;
52097 + int ret, blocked, ignored;
52098 + struct k_sigaction *action;
52099 +
52100 + spin_lock_irqsave(&t->sighand->siglock, flags);
52101 + action = &t->sighand->action[sig-1];
52102 + ignored = action->sa.sa_handler == SIG_IGN;
52103 + blocked = sigismember(&t->blocked, sig);
52104 + if (blocked || ignored) {
52105 + action->sa.sa_handler = SIG_DFL;
52106 + if (blocked) {
52107 + sigdelset(&t->blocked, sig);
52108 + recalc_sigpending_and_wake(t);
52109 + }
52110 + }
52111 + if (action->sa.sa_handler == SIG_DFL)
52112 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
52113 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
52114 +
52115 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
52116 +
52117 + return ret;
52118 +}
52119 +#endif
52120 +
52121 +#ifdef CONFIG_GRKERNSEC_BRUTE
52122 +#define GR_USER_BAN_TIME (15 * 60)
52123 +
52124 +static int __get_dumpable(unsigned long mm_flags)
52125 +{
52126 + int ret;
52127 +
52128 + ret = mm_flags & MMF_DUMPABLE_MASK;
52129 + return (ret >= 2) ? 2 : ret;
52130 +}
52131 +#endif
52132 +
52133 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
52134 +{
52135 +#ifdef CONFIG_GRKERNSEC_BRUTE
52136 + uid_t uid = 0;
52137 +
52138 + if (!grsec_enable_brute)
52139 + return;
52140 +
52141 + rcu_read_lock();
52142 + read_lock(&tasklist_lock);
52143 + read_lock(&grsec_exec_file_lock);
52144 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
52145 + p->real_parent->brute = 1;
52146 + else {
52147 + const struct cred *cred = __task_cred(p), *cred2;
52148 + struct task_struct *tsk, *tsk2;
52149 +
52150 + if (!__get_dumpable(mm_flags) && cred->uid) {
52151 + struct user_struct *user;
52152 +
52153 + uid = cred->uid;
52154 +
52155 + /* this is put upon execution past expiration */
52156 + user = find_user(uid);
52157 + if (user == NULL)
52158 + goto unlock;
52159 + user->banned = 1;
52160 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
52161 + if (user->ban_expires == ~0UL)
52162 + user->ban_expires--;
52163 +
52164 + do_each_thread(tsk2, tsk) {
52165 + cred2 = __task_cred(tsk);
52166 + if (tsk != p && cred2->uid == uid)
52167 + gr_fake_force_sig(SIGKILL, tsk);
52168 + } while_each_thread(tsk2, tsk);
52169 + }
52170 + }
52171 +unlock:
52172 + read_unlock(&grsec_exec_file_lock);
52173 + read_unlock(&tasklist_lock);
52174 + rcu_read_unlock();
52175 +
52176 + if (uid)
52177 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
52178 +#endif
52179 + return;
52180 +}
52181 +
52182 +void gr_handle_brute_check(void)
52183 +{
52184 +#ifdef CONFIG_GRKERNSEC_BRUTE
52185 + if (current->brute)
52186 + msleep(30 * 1000);
52187 +#endif
52188 + return;
52189 +}
52190 +
52191 +void gr_handle_kernel_exploit(void)
52192 +{
52193 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
52194 + const struct cred *cred;
52195 + struct task_struct *tsk, *tsk2;
52196 + struct user_struct *user;
52197 + uid_t uid;
52198 +
52199 + if (in_irq() || in_serving_softirq() || in_nmi())
52200 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
52201 +
52202 + uid = current_uid();
52203 +
52204 + if (uid == 0)
52205 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
52206 + else {
52207 + /* kill all the processes of this user, hold a reference
52208 + to their creds struct, and prevent them from creating
52209 + another process until system reset
52210 + */
52211 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
52212 + /* we intentionally leak this ref */
52213 + user = get_uid(current->cred->user);
52214 + if (user) {
52215 + user->banned = 1;
52216 + user->ban_expires = ~0UL;
52217 + }
52218 +
52219 + read_lock(&tasklist_lock);
52220 + do_each_thread(tsk2, tsk) {
52221 + cred = __task_cred(tsk);
52222 + if (cred->uid == uid)
52223 + gr_fake_force_sig(SIGKILL, tsk);
52224 + } while_each_thread(tsk2, tsk);
52225 + read_unlock(&tasklist_lock);
52226 + }
52227 +#endif
52228 +}
52229 +
52230 +int __gr_process_user_ban(struct user_struct *user)
52231 +{
52232 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52233 + if (unlikely(user->banned)) {
52234 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
52235 + user->banned = 0;
52236 + user->ban_expires = 0;
52237 + free_uid(user);
52238 + } else
52239 + return -EPERM;
52240 + }
52241 +#endif
52242 + return 0;
52243 +}
52244 +
52245 +int gr_process_user_ban(void)
52246 +{
52247 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52248 + return __gr_process_user_ban(current->cred->user);
52249 +#endif
52250 + return 0;
52251 +}
52252 diff -urNp linux-2.6.32.43/grsecurity/grsec_sock.c linux-2.6.32.43/grsecurity/grsec_sock.c
52253 --- linux-2.6.32.43/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
52254 +++ linux-2.6.32.43/grsecurity/grsec_sock.c 2011-04-17 15:56:46.000000000 -0400
52255 @@ -0,0 +1,275 @@
52256 +#include <linux/kernel.h>
52257 +#include <linux/module.h>
52258 +#include <linux/sched.h>
52259 +#include <linux/file.h>
52260 +#include <linux/net.h>
52261 +#include <linux/in.h>
52262 +#include <linux/ip.h>
52263 +#include <net/sock.h>
52264 +#include <net/inet_sock.h>
52265 +#include <linux/grsecurity.h>
52266 +#include <linux/grinternal.h>
52267 +#include <linux/gracl.h>
52268 +
52269 +kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
52270 +EXPORT_SYMBOL(gr_cap_rtnetlink);
52271 +
52272 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
52273 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
52274 +
52275 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
52276 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
52277 +
52278 +#ifdef CONFIG_UNIX_MODULE
52279 +EXPORT_SYMBOL(gr_acl_handle_unix);
52280 +EXPORT_SYMBOL(gr_acl_handle_mknod);
52281 +EXPORT_SYMBOL(gr_handle_chroot_unix);
52282 +EXPORT_SYMBOL(gr_handle_create);
52283 +#endif
52284 +
52285 +#ifdef CONFIG_GRKERNSEC
52286 +#define gr_conn_table_size 32749
52287 +struct conn_table_entry {
52288 + struct conn_table_entry *next;
52289 + struct signal_struct *sig;
52290 +};
52291 +
52292 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
52293 +DEFINE_SPINLOCK(gr_conn_table_lock);
52294 +
52295 +extern const char * gr_socktype_to_name(unsigned char type);
52296 +extern const char * gr_proto_to_name(unsigned char proto);
52297 +extern const char * gr_sockfamily_to_name(unsigned char family);
52298 +
52299 +static __inline__ int
52300 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
52301 +{
52302 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
52303 +}
52304 +
52305 +static __inline__ int
52306 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
52307 + __u16 sport, __u16 dport)
52308 +{
52309 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
52310 + sig->gr_sport == sport && sig->gr_dport == dport))
52311 + return 1;
52312 + else
52313 + return 0;
52314 +}
52315 +
52316 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
52317 +{
52318 + struct conn_table_entry **match;
52319 + unsigned int index;
52320 +
52321 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
52322 + sig->gr_sport, sig->gr_dport,
52323 + gr_conn_table_size);
52324 +
52325 + newent->sig = sig;
52326 +
52327 + match = &gr_conn_table[index];
52328 + newent->next = *match;
52329 + *match = newent;
52330 +
52331 + return;
52332 +}
52333 +
52334 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
52335 +{
52336 + struct conn_table_entry *match, *last = NULL;
52337 + unsigned int index;
52338 +
52339 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
52340 + sig->gr_sport, sig->gr_dport,
52341 + gr_conn_table_size);
52342 +
52343 + match = gr_conn_table[index];
52344 + while (match && !conn_match(match->sig,
52345 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
52346 + sig->gr_dport)) {
52347 + last = match;
52348 + match = match->next;
52349 + }
52350 +
52351 + if (match) {
52352 + if (last)
52353 + last->next = match->next;
52354 + else
52355 + gr_conn_table[index] = NULL;
52356 + kfree(match);
52357 + }
52358 +
52359 + return;
52360 +}
52361 +
52362 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
52363 + __u16 sport, __u16 dport)
52364 +{
52365 + struct conn_table_entry *match;
52366 + unsigned int index;
52367 +
52368 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
52369 +
52370 + match = gr_conn_table[index];
52371 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
52372 + match = match->next;
52373 +
52374 + if (match)
52375 + return match->sig;
52376 + else
52377 + return NULL;
52378 +}
52379 +
52380 +#endif
52381 +
52382 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
52383 +{
52384 +#ifdef CONFIG_GRKERNSEC
52385 + struct signal_struct *sig = task->signal;
52386 + struct conn_table_entry *newent;
52387 +
52388 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
52389 + if (newent == NULL)
52390 + return;
52391 + /* no bh lock needed since we are called with bh disabled */
52392 + spin_lock(&gr_conn_table_lock);
52393 + gr_del_task_from_ip_table_nolock(sig);
52394 + sig->gr_saddr = inet->rcv_saddr;
52395 + sig->gr_daddr = inet->daddr;
52396 + sig->gr_sport = inet->sport;
52397 + sig->gr_dport = inet->dport;
52398 + gr_add_to_task_ip_table_nolock(sig, newent);
52399 + spin_unlock(&gr_conn_table_lock);
52400 +#endif
52401 + return;
52402 +}
52403 +
52404 +void gr_del_task_from_ip_table(struct task_struct *task)
52405 +{
52406 +#ifdef CONFIG_GRKERNSEC
52407 + spin_lock_bh(&gr_conn_table_lock);
52408 + gr_del_task_from_ip_table_nolock(task->signal);
52409 + spin_unlock_bh(&gr_conn_table_lock);
52410 +#endif
52411 + return;
52412 +}
52413 +
52414 +void
52415 +gr_attach_curr_ip(const struct sock *sk)
52416 +{
52417 +#ifdef CONFIG_GRKERNSEC
52418 + struct signal_struct *p, *set;
52419 + const struct inet_sock *inet = inet_sk(sk);
52420 +
52421 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
52422 + return;
52423 +
52424 + set = current->signal;
52425 +
52426 + spin_lock_bh(&gr_conn_table_lock);
52427 + p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
52428 + inet->dport, inet->sport);
52429 + if (unlikely(p != NULL)) {
52430 + set->curr_ip = p->curr_ip;
52431 + set->used_accept = 1;
52432 + gr_del_task_from_ip_table_nolock(p);
52433 + spin_unlock_bh(&gr_conn_table_lock);
52434 + return;
52435 + }
52436 + spin_unlock_bh(&gr_conn_table_lock);
52437 +
52438 + set->curr_ip = inet->daddr;
52439 + set->used_accept = 1;
52440 +#endif
52441 + return;
52442 +}
52443 +
52444 +int
52445 +gr_handle_sock_all(const int family, const int type, const int protocol)
52446 +{
52447 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
52448 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
52449 + (family != AF_UNIX)) {
52450 + if (family == AF_INET)
52451 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
52452 + else
52453 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
52454 + return -EACCES;
52455 + }
52456 +#endif
52457 + return 0;
52458 +}
52459 +
52460 +int
52461 +gr_handle_sock_server(const struct sockaddr *sck)
52462 +{
52463 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52464 + if (grsec_enable_socket_server &&
52465 + in_group_p(grsec_socket_server_gid) &&
52466 + sck && (sck->sa_family != AF_UNIX) &&
52467 + (sck->sa_family != AF_LOCAL)) {
52468 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
52469 + return -EACCES;
52470 + }
52471 +#endif
52472 + return 0;
52473 +}
52474 +
52475 +int
52476 +gr_handle_sock_server_other(const struct sock *sck)
52477 +{
52478 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52479 + if (grsec_enable_socket_server &&
52480 + in_group_p(grsec_socket_server_gid) &&
52481 + sck && (sck->sk_family != AF_UNIX) &&
52482 + (sck->sk_family != AF_LOCAL)) {
52483 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
52484 + return -EACCES;
52485 + }
52486 +#endif
52487 + return 0;
52488 +}
52489 +
52490 +int
52491 +gr_handle_sock_client(const struct sockaddr *sck)
52492 +{
52493 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52494 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
52495 + sck && (sck->sa_family != AF_UNIX) &&
52496 + (sck->sa_family != AF_LOCAL)) {
52497 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
52498 + return -EACCES;
52499 + }
52500 +#endif
52501 + return 0;
52502 +}
52503 +
52504 +kernel_cap_t
52505 +gr_cap_rtnetlink(struct sock *sock)
52506 +{
52507 +#ifdef CONFIG_GRKERNSEC
52508 + if (!gr_acl_is_enabled())
52509 + return current_cap();
52510 + else if (sock->sk_protocol == NETLINK_ISCSI &&
52511 + cap_raised(current_cap(), CAP_SYS_ADMIN) &&
52512 + gr_is_capable(CAP_SYS_ADMIN))
52513 + return current_cap();
52514 + else if (sock->sk_protocol == NETLINK_AUDIT &&
52515 + cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
52516 + gr_is_capable(CAP_AUDIT_WRITE) &&
52517 + cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
52518 + gr_is_capable(CAP_AUDIT_CONTROL))
52519 + return current_cap();
52520 + else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
52521 + ((sock->sk_protocol == NETLINK_ROUTE) ?
52522 + gr_is_capable_nolog(CAP_NET_ADMIN) :
52523 + gr_is_capable(CAP_NET_ADMIN)))
52524 + return current_cap();
52525 + else
52526 + return __cap_empty_set;
52527 +#else
52528 + return current_cap();
52529 +#endif
52530 +}
52531 diff -urNp linux-2.6.32.43/grsecurity/grsec_sysctl.c linux-2.6.32.43/grsecurity/grsec_sysctl.c
52532 --- linux-2.6.32.43/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
52533 +++ linux-2.6.32.43/grsecurity/grsec_sysctl.c 2011-06-29 19:37:19.000000000 -0400
52534 @@ -0,0 +1,489 @@
52535 +#include <linux/kernel.h>
52536 +#include <linux/sched.h>
52537 +#include <linux/sysctl.h>
52538 +#include <linux/grsecurity.h>
52539 +#include <linux/grinternal.h>
52540 +
52541 +int
52542 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
52543 +{
52544 +#ifdef CONFIG_GRKERNSEC_SYSCTL
52545 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
52546 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
52547 + return -EACCES;
52548 + }
52549 +#endif
52550 + return 0;
52551 +}
52552 +
52553 +#ifdef CONFIG_GRKERNSEC_ROFS
52554 +static int __maybe_unused one = 1;
52555 +#endif
52556 +
52557 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
52558 +ctl_table grsecurity_table[] = {
52559 +#ifdef CONFIG_GRKERNSEC_SYSCTL
52560 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
52561 +#ifdef CONFIG_GRKERNSEC_IO
52562 + {
52563 + .ctl_name = CTL_UNNUMBERED,
52564 + .procname = "disable_priv_io",
52565 + .data = &grsec_disable_privio,
52566 + .maxlen = sizeof(int),
52567 + .mode = 0600,
52568 + .proc_handler = &proc_dointvec,
52569 + },
52570 +#endif
52571 +#endif
52572 +#ifdef CONFIG_GRKERNSEC_LINK
52573 + {
52574 + .ctl_name = CTL_UNNUMBERED,
52575 + .procname = "linking_restrictions",
52576 + .data = &grsec_enable_link,
52577 + .maxlen = sizeof(int),
52578 + .mode = 0600,
52579 + .proc_handler = &proc_dointvec,
52580 + },
52581 +#endif
52582 +#ifdef CONFIG_GRKERNSEC_BRUTE
52583 + {
52584 + .ctl_name = CTL_UNNUMBERED,
52585 + .procname = "deter_bruteforce",
52586 + .data = &grsec_enable_brute,
52587 + .maxlen = sizeof(int),
52588 + .mode = 0600,
52589 + .proc_handler = &proc_dointvec,
52590 + },
52591 +#endif
52592 +#ifdef CONFIG_GRKERNSEC_FIFO
52593 + {
52594 + .ctl_name = CTL_UNNUMBERED,
52595 + .procname = "fifo_restrictions",
52596 + .data = &grsec_enable_fifo,
52597 + .maxlen = sizeof(int),
52598 + .mode = 0600,
52599 + .proc_handler = &proc_dointvec,
52600 + },
52601 +#endif
52602 +#ifdef CONFIG_GRKERNSEC_EXECVE
52603 + {
52604 + .ctl_name = CTL_UNNUMBERED,
52605 + .procname = "execve_limiting",
52606 + .data = &grsec_enable_execve,
52607 + .maxlen = sizeof(int),
52608 + .mode = 0600,
52609 + .proc_handler = &proc_dointvec,
52610 + },
52611 +#endif
52612 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52613 + {
52614 + .ctl_name = CTL_UNNUMBERED,
52615 + .procname = "ip_blackhole",
52616 + .data = &grsec_enable_blackhole,
52617 + .maxlen = sizeof(int),
52618 + .mode = 0600,
52619 + .proc_handler = &proc_dointvec,
52620 + },
52621 + {
52622 + .ctl_name = CTL_UNNUMBERED,
52623 + .procname = "lastack_retries",
52624 + .data = &grsec_lastack_retries,
52625 + .maxlen = sizeof(int),
52626 + .mode = 0600,
52627 + .proc_handler = &proc_dointvec,
52628 + },
52629 +#endif
52630 +#ifdef CONFIG_GRKERNSEC_EXECLOG
52631 + {
52632 + .ctl_name = CTL_UNNUMBERED,
52633 + .procname = "exec_logging",
52634 + .data = &grsec_enable_execlog,
52635 + .maxlen = sizeof(int),
52636 + .mode = 0600,
52637 + .proc_handler = &proc_dointvec,
52638 + },
52639 +#endif
52640 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52641 + {
52642 + .ctl_name = CTL_UNNUMBERED,
52643 + .procname = "rwxmap_logging",
52644 + .data = &grsec_enable_log_rwxmaps,
52645 + .maxlen = sizeof(int),
52646 + .mode = 0600,
52647 + .proc_handler = &proc_dointvec,
52648 + },
52649 +#endif
52650 +#ifdef CONFIG_GRKERNSEC_SIGNAL
52651 + {
52652 + .ctl_name = CTL_UNNUMBERED,
52653 + .procname = "signal_logging",
52654 + .data = &grsec_enable_signal,
52655 + .maxlen = sizeof(int),
52656 + .mode = 0600,
52657 + .proc_handler = &proc_dointvec,
52658 + },
52659 +#endif
52660 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
52661 + {
52662 + .ctl_name = CTL_UNNUMBERED,
52663 + .procname = "forkfail_logging",
52664 + .data = &grsec_enable_forkfail,
52665 + .maxlen = sizeof(int),
52666 + .mode = 0600,
52667 + .proc_handler = &proc_dointvec,
52668 + },
52669 +#endif
52670 +#ifdef CONFIG_GRKERNSEC_TIME
52671 + {
52672 + .ctl_name = CTL_UNNUMBERED,
52673 + .procname = "timechange_logging",
52674 + .data = &grsec_enable_time,
52675 + .maxlen = sizeof(int),
52676 + .mode = 0600,
52677 + .proc_handler = &proc_dointvec,
52678 + },
52679 +#endif
52680 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
52681 + {
52682 + .ctl_name = CTL_UNNUMBERED,
52683 + .procname = "chroot_deny_shmat",
52684 + .data = &grsec_enable_chroot_shmat,
52685 + .maxlen = sizeof(int),
52686 + .mode = 0600,
52687 + .proc_handler = &proc_dointvec,
52688 + },
52689 +#endif
52690 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52691 + {
52692 + .ctl_name = CTL_UNNUMBERED,
52693 + .procname = "chroot_deny_unix",
52694 + .data = &grsec_enable_chroot_unix,
52695 + .maxlen = sizeof(int),
52696 + .mode = 0600,
52697 + .proc_handler = &proc_dointvec,
52698 + },
52699 +#endif
52700 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52701 + {
52702 + .ctl_name = CTL_UNNUMBERED,
52703 + .procname = "chroot_deny_mount",
52704 + .data = &grsec_enable_chroot_mount,
52705 + .maxlen = sizeof(int),
52706 + .mode = 0600,
52707 + .proc_handler = &proc_dointvec,
52708 + },
52709 +#endif
52710 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52711 + {
52712 + .ctl_name = CTL_UNNUMBERED,
52713 + .procname = "chroot_deny_fchdir",
52714 + .data = &grsec_enable_chroot_fchdir,
52715 + .maxlen = sizeof(int),
52716 + .mode = 0600,
52717 + .proc_handler = &proc_dointvec,
52718 + },
52719 +#endif
52720 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52721 + {
52722 + .ctl_name = CTL_UNNUMBERED,
52723 + .procname = "chroot_deny_chroot",
52724 + .data = &grsec_enable_chroot_double,
52725 + .maxlen = sizeof(int),
52726 + .mode = 0600,
52727 + .proc_handler = &proc_dointvec,
52728 + },
52729 +#endif
52730 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52731 + {
52732 + .ctl_name = CTL_UNNUMBERED,
52733 + .procname = "chroot_deny_pivot",
52734 + .data = &grsec_enable_chroot_pivot,
52735 + .maxlen = sizeof(int),
52736 + .mode = 0600,
52737 + .proc_handler = &proc_dointvec,
52738 + },
52739 +#endif
52740 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
52741 + {
52742 + .ctl_name = CTL_UNNUMBERED,
52743 + .procname = "chroot_enforce_chdir",
52744 + .data = &grsec_enable_chroot_chdir,
52745 + .maxlen = sizeof(int),
52746 + .mode = 0600,
52747 + .proc_handler = &proc_dointvec,
52748 + },
52749 +#endif
52750 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
52751 + {
52752 + .ctl_name = CTL_UNNUMBERED,
52753 + .procname = "chroot_deny_chmod",
52754 + .data = &grsec_enable_chroot_chmod,
52755 + .maxlen = sizeof(int),
52756 + .mode = 0600,
52757 + .proc_handler = &proc_dointvec,
52758 + },
52759 +#endif
52760 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52761 + {
52762 + .ctl_name = CTL_UNNUMBERED,
52763 + .procname = "chroot_deny_mknod",
52764 + .data = &grsec_enable_chroot_mknod,
52765 + .maxlen = sizeof(int),
52766 + .mode = 0600,
52767 + .proc_handler = &proc_dointvec,
52768 + },
52769 +#endif
52770 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52771 + {
52772 + .ctl_name = CTL_UNNUMBERED,
52773 + .procname = "chroot_restrict_nice",
52774 + .data = &grsec_enable_chroot_nice,
52775 + .maxlen = sizeof(int),
52776 + .mode = 0600,
52777 + .proc_handler = &proc_dointvec,
52778 + },
52779 +#endif
52780 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52781 + {
52782 + .ctl_name = CTL_UNNUMBERED,
52783 + .procname = "chroot_execlog",
52784 + .data = &grsec_enable_chroot_execlog,
52785 + .maxlen = sizeof(int),
52786 + .mode = 0600,
52787 + .proc_handler = &proc_dointvec,
52788 + },
52789 +#endif
52790 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52791 + {
52792 + .ctl_name = CTL_UNNUMBERED,
52793 + .procname = "chroot_caps",
52794 + .data = &grsec_enable_chroot_caps,
52795 + .maxlen = sizeof(int),
52796 + .mode = 0600,
52797 + .proc_handler = &proc_dointvec,
52798 + },
52799 +#endif
52800 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52801 + {
52802 + .ctl_name = CTL_UNNUMBERED,
52803 + .procname = "chroot_deny_sysctl",
52804 + .data = &grsec_enable_chroot_sysctl,
52805 + .maxlen = sizeof(int),
52806 + .mode = 0600,
52807 + .proc_handler = &proc_dointvec,
52808 + },
52809 +#endif
52810 +#ifdef CONFIG_GRKERNSEC_TPE
52811 + {
52812 + .ctl_name = CTL_UNNUMBERED,
52813 + .procname = "tpe",
52814 + .data = &grsec_enable_tpe,
52815 + .maxlen = sizeof(int),
52816 + .mode = 0600,
52817 + .proc_handler = &proc_dointvec,
52818 + },
52819 + {
52820 + .ctl_name = CTL_UNNUMBERED,
52821 + .procname = "tpe_gid",
52822 + .data = &grsec_tpe_gid,
52823 + .maxlen = sizeof(int),
52824 + .mode = 0600,
52825 + .proc_handler = &proc_dointvec,
52826 + },
52827 +#endif
52828 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52829 + {
52830 + .ctl_name = CTL_UNNUMBERED,
52831 + .procname = "tpe_invert",
52832 + .data = &grsec_enable_tpe_invert,
52833 + .maxlen = sizeof(int),
52834 + .mode = 0600,
52835 + .proc_handler = &proc_dointvec,
52836 + },
52837 +#endif
52838 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
52839 + {
52840 + .ctl_name = CTL_UNNUMBERED,
52841 + .procname = "tpe_restrict_all",
52842 + .data = &grsec_enable_tpe_all,
52843 + .maxlen = sizeof(int),
52844 + .mode = 0600,
52845 + .proc_handler = &proc_dointvec,
52846 + },
52847 +#endif
52848 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
52849 + {
52850 + .ctl_name = CTL_UNNUMBERED,
52851 + .procname = "socket_all",
52852 + .data = &grsec_enable_socket_all,
52853 + .maxlen = sizeof(int),
52854 + .mode = 0600,
52855 + .proc_handler = &proc_dointvec,
52856 + },
52857 + {
52858 + .ctl_name = CTL_UNNUMBERED,
52859 + .procname = "socket_all_gid",
52860 + .data = &grsec_socket_all_gid,
52861 + .maxlen = sizeof(int),
52862 + .mode = 0600,
52863 + .proc_handler = &proc_dointvec,
52864 + },
52865 +#endif
52866 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52867 + {
52868 + .ctl_name = CTL_UNNUMBERED,
52869 + .procname = "socket_client",
52870 + .data = &grsec_enable_socket_client,
52871 + .maxlen = sizeof(int),
52872 + .mode = 0600,
52873 + .proc_handler = &proc_dointvec,
52874 + },
52875 + {
52876 + .ctl_name = CTL_UNNUMBERED,
52877 + .procname = "socket_client_gid",
52878 + .data = &grsec_socket_client_gid,
52879 + .maxlen = sizeof(int),
52880 + .mode = 0600,
52881 + .proc_handler = &proc_dointvec,
52882 + },
52883 +#endif
52884 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52885 + {
52886 + .ctl_name = CTL_UNNUMBERED,
52887 + .procname = "socket_server",
52888 + .data = &grsec_enable_socket_server,
52889 + .maxlen = sizeof(int),
52890 + .mode = 0600,
52891 + .proc_handler = &proc_dointvec,
52892 + },
52893 + {
52894 + .ctl_name = CTL_UNNUMBERED,
52895 + .procname = "socket_server_gid",
52896 + .data = &grsec_socket_server_gid,
52897 + .maxlen = sizeof(int),
52898 + .mode = 0600,
52899 + .proc_handler = &proc_dointvec,
52900 + },
52901 +#endif
52902 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
52903 + {
52904 + .ctl_name = CTL_UNNUMBERED,
52905 + .procname = "audit_group",
52906 + .data = &grsec_enable_group,
52907 + .maxlen = sizeof(int),
52908 + .mode = 0600,
52909 + .proc_handler = &proc_dointvec,
52910 + },
52911 + {
52912 + .ctl_name = CTL_UNNUMBERED,
52913 + .procname = "audit_gid",
52914 + .data = &grsec_audit_gid,
52915 + .maxlen = sizeof(int),
52916 + .mode = 0600,
52917 + .proc_handler = &proc_dointvec,
52918 + },
52919 +#endif
52920 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52921 + {
52922 + .ctl_name = CTL_UNNUMBERED,
52923 + .procname = "audit_chdir",
52924 + .data = &grsec_enable_chdir,
52925 + .maxlen = sizeof(int),
52926 + .mode = 0600,
52927 + .proc_handler = &proc_dointvec,
52928 + },
52929 +#endif
52930 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52931 + {
52932 + .ctl_name = CTL_UNNUMBERED,
52933 + .procname = "audit_mount",
52934 + .data = &grsec_enable_mount,
52935 + .maxlen = sizeof(int),
52936 + .mode = 0600,
52937 + .proc_handler = &proc_dointvec,
52938 + },
52939 +#endif
52940 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
52941 + {
52942 + .ctl_name = CTL_UNNUMBERED,
52943 + .procname = "audit_textrel",
52944 + .data = &grsec_enable_audit_textrel,
52945 + .maxlen = sizeof(int),
52946 + .mode = 0600,
52947 + .proc_handler = &proc_dointvec,
52948 + },
52949 +#endif
52950 +#ifdef CONFIG_GRKERNSEC_DMESG
52951 + {
52952 + .ctl_name = CTL_UNNUMBERED,
52953 + .procname = "dmesg",
52954 + .data = &grsec_enable_dmesg,
52955 + .maxlen = sizeof(int),
52956 + .mode = 0600,
52957 + .proc_handler = &proc_dointvec,
52958 + },
52959 +#endif
52960 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52961 + {
52962 + .ctl_name = CTL_UNNUMBERED,
52963 + .procname = "chroot_findtask",
52964 + .data = &grsec_enable_chroot_findtask,
52965 + .maxlen = sizeof(int),
52966 + .mode = 0600,
52967 + .proc_handler = &proc_dointvec,
52968 + },
52969 +#endif
52970 +#ifdef CONFIG_GRKERNSEC_RESLOG
52971 + {
52972 + .ctl_name = CTL_UNNUMBERED,
52973 + .procname = "resource_logging",
52974 + .data = &grsec_resource_logging,
52975 + .maxlen = sizeof(int),
52976 + .mode = 0600,
52977 + .proc_handler = &proc_dointvec,
52978 + },
52979 +#endif
52980 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
52981 + {
52982 + .ctl_name = CTL_UNNUMBERED,
52983 + .procname = "audit_ptrace",
52984 + .data = &grsec_enable_audit_ptrace,
52985 + .maxlen = sizeof(int),
52986 + .mode = 0600,
52987 + .proc_handler = &proc_dointvec,
52988 + },
52989 +#endif
52990 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52991 + {
52992 + .ctl_name = CTL_UNNUMBERED,
52993 + .procname = "harden_ptrace",
52994 + .data = &grsec_enable_harden_ptrace,
52995 + .maxlen = sizeof(int),
52996 + .mode = 0600,
52997 + .proc_handler = &proc_dointvec,
52998 + },
52999 +#endif
53000 + {
53001 + .ctl_name = CTL_UNNUMBERED,
53002 + .procname = "grsec_lock",
53003 + .data = &grsec_lock,
53004 + .maxlen = sizeof(int),
53005 + .mode = 0600,
53006 + .proc_handler = &proc_dointvec,
53007 + },
53008 +#endif
53009 +#ifdef CONFIG_GRKERNSEC_ROFS
53010 + {
53011 + .ctl_name = CTL_UNNUMBERED,
53012 + .procname = "romount_protect",
53013 + .data = &grsec_enable_rofs,
53014 + .maxlen = sizeof(int),
53015 + .mode = 0600,
53016 + .proc_handler = &proc_dointvec_minmax,
53017 + .extra1 = &one,
53018 + .extra2 = &one,
53019 + },
53020 +#endif
53021 + { .ctl_name = 0 }
53022 +};
53023 +#endif
53024 diff -urNp linux-2.6.32.43/grsecurity/grsec_time.c linux-2.6.32.43/grsecurity/grsec_time.c
53025 --- linux-2.6.32.43/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
53026 +++ linux-2.6.32.43/grsecurity/grsec_time.c 2011-04-17 15:56:46.000000000 -0400
53027 @@ -0,0 +1,16 @@
53028 +#include <linux/kernel.h>
53029 +#include <linux/sched.h>
53030 +#include <linux/grinternal.h>
53031 +#include <linux/module.h>
53032 +
53033 +void
53034 +gr_log_timechange(void)
53035 +{
53036 +#ifdef CONFIG_GRKERNSEC_TIME
53037 + if (grsec_enable_time)
53038 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
53039 +#endif
53040 + return;
53041 +}
53042 +
53043 +EXPORT_SYMBOL(gr_log_timechange);
53044 diff -urNp linux-2.6.32.43/grsecurity/grsec_tpe.c linux-2.6.32.43/grsecurity/grsec_tpe.c
53045 --- linux-2.6.32.43/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
53046 +++ linux-2.6.32.43/grsecurity/grsec_tpe.c 2011-04-17 15:56:46.000000000 -0400
53047 @@ -0,0 +1,39 @@
53048 +#include <linux/kernel.h>
53049 +#include <linux/sched.h>
53050 +#include <linux/file.h>
53051 +#include <linux/fs.h>
53052 +#include <linux/grinternal.h>
53053 +
53054 +extern int gr_acl_tpe_check(void);
53055 +
53056 +int
53057 +gr_tpe_allow(const struct file *file)
53058 +{
53059 +#ifdef CONFIG_GRKERNSEC
53060 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
53061 + const struct cred *cred = current_cred();
53062 +
53063 + if (cred->uid && ((grsec_enable_tpe &&
53064 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
53065 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
53066 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
53067 +#else
53068 + in_group_p(grsec_tpe_gid)
53069 +#endif
53070 + ) || gr_acl_tpe_check()) &&
53071 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
53072 + (inode->i_mode & S_IWOTH))))) {
53073 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
53074 + return 0;
53075 + }
53076 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
53077 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
53078 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
53079 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
53080 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
53081 + return 0;
53082 + }
53083 +#endif
53084 +#endif
53085 + return 1;
53086 +}
53087 diff -urNp linux-2.6.32.43/grsecurity/grsum.c linux-2.6.32.43/grsecurity/grsum.c
53088 --- linux-2.6.32.43/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
53089 +++ linux-2.6.32.43/grsecurity/grsum.c 2011-04-17 15:56:46.000000000 -0400
53090 @@ -0,0 +1,61 @@
53091 +#include <linux/err.h>
53092 +#include <linux/kernel.h>
53093 +#include <linux/sched.h>
53094 +#include <linux/mm.h>
53095 +#include <linux/scatterlist.h>
53096 +#include <linux/crypto.h>
53097 +#include <linux/gracl.h>
53098 +
53099 +
53100 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
53101 +#error "crypto and sha256 must be built into the kernel"
53102 +#endif
53103 +
53104 +int
53105 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
53106 +{
53107 + char *p;
53108 + struct crypto_hash *tfm;
53109 + struct hash_desc desc;
53110 + struct scatterlist sg;
53111 + unsigned char temp_sum[GR_SHA_LEN];
53112 + volatile int retval = 0;
53113 + volatile int dummy = 0;
53114 + unsigned int i;
53115 +
53116 + sg_init_table(&sg, 1);
53117 +
53118 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
53119 + if (IS_ERR(tfm)) {
53120 + /* should never happen, since sha256 should be built in */
53121 + return 1;
53122 + }
53123 +
53124 + desc.tfm = tfm;
53125 + desc.flags = 0;
53126 +
53127 + crypto_hash_init(&desc);
53128 +
53129 + p = salt;
53130 + sg_set_buf(&sg, p, GR_SALT_LEN);
53131 + crypto_hash_update(&desc, &sg, sg.length);
53132 +
53133 + p = entry->pw;
53134 + sg_set_buf(&sg, p, strlen(p));
53135 +
53136 + crypto_hash_update(&desc, &sg, sg.length);
53137 +
53138 + crypto_hash_final(&desc, temp_sum);
53139 +
53140 + memset(entry->pw, 0, GR_PW_LEN);
53141 +
53142 + for (i = 0; i < GR_SHA_LEN; i++)
53143 + if (sum[i] != temp_sum[i])
53144 + retval = 1;
53145 + else
53146 + dummy = 1; // waste a cycle
53147 +
53148 + crypto_free_hash(tfm);
53149 +
53150 + return retval;
53151 +}
53152 diff -urNp linux-2.6.32.43/grsecurity/Kconfig linux-2.6.32.43/grsecurity/Kconfig
53153 --- linux-2.6.32.43/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
53154 +++ linux-2.6.32.43/grsecurity/Kconfig 2011-07-06 19:57:57.000000000 -0400
53155 @@ -0,0 +1,1047 @@
53156 +#
53157 +# grecurity configuration
53158 +#
53159 +
53160 +menu "Grsecurity"
53161 +
53162 +config GRKERNSEC
53163 + bool "Grsecurity"
53164 + select CRYPTO
53165 + select CRYPTO_SHA256
53166 + help
53167 + If you say Y here, you will be able to configure many features
53168 + that will enhance the security of your system. It is highly
53169 + recommended that you say Y here and read through the help
53170 + for each option so that you fully understand the features and
53171 + can evaluate their usefulness for your machine.
53172 +
53173 +choice
53174 + prompt "Security Level"
53175 + depends on GRKERNSEC
53176 + default GRKERNSEC_CUSTOM
53177 +
53178 +config GRKERNSEC_LOW
53179 + bool "Low"
53180 + select GRKERNSEC_LINK
53181 + select GRKERNSEC_FIFO
53182 + select GRKERNSEC_EXECVE
53183 + select GRKERNSEC_RANDNET
53184 + select GRKERNSEC_DMESG
53185 + select GRKERNSEC_CHROOT
53186 + select GRKERNSEC_CHROOT_CHDIR
53187 +
53188 + help
53189 + If you choose this option, several of the grsecurity options will
53190 + be enabled that will give you greater protection against a number
53191 + of attacks, while assuring that none of your software will have any
53192 + conflicts with the additional security measures. If you run a lot
53193 + of unusual software, or you are having problems with the higher
53194 + security levels, you should say Y here. With this option, the
53195 + following features are enabled:
53196 +
53197 + - Linking restrictions
53198 + - FIFO restrictions
53199 + - Enforcing RLIMIT_NPROC on execve
53200 + - Restricted dmesg
53201 + - Enforced chdir("/") on chroot
53202 + - Runtime module disabling
53203 +
53204 +config GRKERNSEC_MEDIUM
53205 + bool "Medium"
53206 + select PAX
53207 + select PAX_EI_PAX
53208 + select PAX_PT_PAX_FLAGS
53209 + select PAX_HAVE_ACL_FLAGS
53210 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
53211 + select GRKERNSEC_CHROOT
53212 + select GRKERNSEC_CHROOT_SYSCTL
53213 + select GRKERNSEC_LINK
53214 + select GRKERNSEC_FIFO
53215 + select GRKERNSEC_EXECVE
53216 + select GRKERNSEC_DMESG
53217 + select GRKERNSEC_RANDNET
53218 + select GRKERNSEC_FORKFAIL
53219 + select GRKERNSEC_TIME
53220 + select GRKERNSEC_SIGNAL
53221 + select GRKERNSEC_CHROOT
53222 + select GRKERNSEC_CHROOT_UNIX
53223 + select GRKERNSEC_CHROOT_MOUNT
53224 + select GRKERNSEC_CHROOT_PIVOT
53225 + select GRKERNSEC_CHROOT_DOUBLE
53226 + select GRKERNSEC_CHROOT_CHDIR
53227 + select GRKERNSEC_CHROOT_MKNOD
53228 + select GRKERNSEC_PROC
53229 + select GRKERNSEC_PROC_USERGROUP
53230 + select PAX_RANDUSTACK
53231 + select PAX_ASLR
53232 + select PAX_RANDMMAP
53233 + select PAX_REFCOUNT if (X86 || SPARC64)
53234 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
53235 +
53236 + help
53237 + If you say Y here, several features in addition to those included
53238 + in the low additional security level will be enabled. These
53239 + features provide even more security to your system, though in rare
53240 + cases they may be incompatible with very old or poorly written
53241 + software. If you enable this option, make sure that your auth
53242 + service (identd) is running as gid 1001. With this option,
53243 + the following features (in addition to those provided in the
53244 + low additional security level) will be enabled:
53245 +
53246 + - Failed fork logging
53247 + - Time change logging
53248 + - Signal logging
53249 + - Deny mounts in chroot
53250 + - Deny double chrooting
53251 + - Deny sysctl writes in chroot
53252 + - Deny mknod in chroot
53253 + - Deny access to abstract AF_UNIX sockets out of chroot
53254 + - Deny pivot_root in chroot
53255 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
53256 + - /proc restrictions with special GID set to 10 (usually wheel)
53257 + - Address Space Layout Randomization (ASLR)
53258 + - Prevent exploitation of most refcount overflows
53259 + - Bounds checking of copying between the kernel and userland
53260 +
53261 +config GRKERNSEC_HIGH
53262 + bool "High"
53263 + select GRKERNSEC_LINK
53264 + select GRKERNSEC_FIFO
53265 + select GRKERNSEC_EXECVE
53266 + select GRKERNSEC_DMESG
53267 + select GRKERNSEC_FORKFAIL
53268 + select GRKERNSEC_TIME
53269 + select GRKERNSEC_SIGNAL
53270 + select GRKERNSEC_CHROOT
53271 + select GRKERNSEC_CHROOT_SHMAT
53272 + select GRKERNSEC_CHROOT_UNIX
53273 + select GRKERNSEC_CHROOT_MOUNT
53274 + select GRKERNSEC_CHROOT_FCHDIR
53275 + select GRKERNSEC_CHROOT_PIVOT
53276 + select GRKERNSEC_CHROOT_DOUBLE
53277 + select GRKERNSEC_CHROOT_CHDIR
53278 + select GRKERNSEC_CHROOT_MKNOD
53279 + select GRKERNSEC_CHROOT_CAPS
53280 + select GRKERNSEC_CHROOT_SYSCTL
53281 + select GRKERNSEC_CHROOT_FINDTASK
53282 + select GRKERNSEC_SYSFS_RESTRICT
53283 + select GRKERNSEC_PROC
53284 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
53285 + select GRKERNSEC_HIDESYM
53286 + select GRKERNSEC_BRUTE
53287 + select GRKERNSEC_PROC_USERGROUP
53288 + select GRKERNSEC_KMEM
53289 + select GRKERNSEC_RESLOG
53290 + select GRKERNSEC_RANDNET
53291 + select GRKERNSEC_PROC_ADD
53292 + select GRKERNSEC_CHROOT_CHMOD
53293 + select GRKERNSEC_CHROOT_NICE
53294 + select GRKERNSEC_AUDIT_MOUNT
53295 + select GRKERNSEC_MODHARDEN if (MODULES)
53296 + select GRKERNSEC_HARDEN_PTRACE
53297 + select GRKERNSEC_VM86 if (X86_32)
53298 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
53299 + select PAX
53300 + select PAX_RANDUSTACK
53301 + select PAX_ASLR
53302 + select PAX_RANDMMAP
53303 + select PAX_NOEXEC
53304 + select PAX_MPROTECT
53305 + select PAX_EI_PAX
53306 + select PAX_PT_PAX_FLAGS
53307 + select PAX_HAVE_ACL_FLAGS
53308 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
53309 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
53310 + select PAX_RANDKSTACK if (X86_TSC && X86)
53311 + select PAX_SEGMEXEC if (X86_32)
53312 + select PAX_PAGEEXEC
53313 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
53314 + select PAX_EMUTRAMP if (PARISC)
53315 + select PAX_EMUSIGRT if (PARISC)
53316 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
53317 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
53318 + select PAX_REFCOUNT if (X86 || SPARC64)
53319 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
53320 + help
53321 + If you say Y here, many of the features of grsecurity will be
53322 + enabled, which will protect you against many kinds of attacks
53323 + against your system. The heightened security comes at a cost
53324 + of an increased chance of incompatibilities with rare software
53325 + on your machine. Since this security level enables PaX, you should
53326 + view <http://pax.grsecurity.net> and read about the PaX
53327 + project. While you are there, download chpax and run it on
53328 + binaries that cause problems with PaX. Also remember that
53329 + since the /proc restrictions are enabled, you must run your
53330 + identd as gid 1001. This security level enables the following
53331 + features in addition to those listed in the low and medium
53332 + security levels:
53333 +
53334 + - Additional /proc restrictions
53335 + - Chmod restrictions in chroot
53336 + - No signals, ptrace, or viewing of processes outside of chroot
53337 + - Capability restrictions in chroot
53338 + - Deny fchdir out of chroot
53339 + - Priority restrictions in chroot
53340 + - Segmentation-based implementation of PaX
53341 + - Mprotect restrictions
53342 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
53343 + - Kernel stack randomization
53344 + - Mount/unmount/remount logging
53345 + - Kernel symbol hiding
53346 + - Prevention of memory exhaustion-based exploits
53347 + - Hardening of module auto-loading
53348 + - Ptrace restrictions
53349 + - Restricted vm86 mode
53350 + - Restricted sysfs/debugfs
53351 + - Active kernel exploit response
53352 +
53353 +config GRKERNSEC_CUSTOM
53354 + bool "Custom"
53355 + help
53356 + If you say Y here, you will be able to configure every grsecurity
53357 + option, which allows you to enable many more features that aren't
53358 + covered in the basic security levels. These additional features
53359 + include TPE, socket restrictions, and the sysctl system for
53360 + grsecurity. It is advised that you read through the help for
53361 + each option to determine its usefulness in your situation.
53362 +
53363 +endchoice
53364 +
53365 +menu "Address Space Protection"
53366 +depends on GRKERNSEC
53367 +
53368 +config GRKERNSEC_KMEM
53369 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
53370 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
53371 + help
53372 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
53373 + be written to via mmap or otherwise to modify the running kernel.
53374 + /dev/port will also not be allowed to be opened. If you have module
53375 + support disabled, enabling this will close up four ways that are
53376 + currently used to insert malicious code into the running kernel.
53377 + Even with all these features enabled, we still highly recommend that
53378 + you use the RBAC system, as it is still possible for an attacker to
53379 + modify the running kernel through privileged I/O granted by ioperm/iopl.
53380 + If you are not using XFree86, you may be able to stop this additional
53381 + case by enabling the 'Disable privileged I/O' option. Though nothing
53382 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
53383 + but only to video memory, which is the only writing we allow in this
53384 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
53385 + not be allowed to mprotect it with PROT_WRITE later.
53386 + It is highly recommended that you say Y here if you meet all the
53387 + conditions above.
53388 +
53389 +config GRKERNSEC_VM86
53390 + bool "Restrict VM86 mode"
53391 + depends on X86_32
53392 +
53393 + help
53394 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
53395 + make use of a special execution mode on 32bit x86 processors called
53396 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
53397 + video cards and will still work with this option enabled. The purpose
53398 + of the option is to prevent exploitation of emulation errors in
53399 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
53400 + Nearly all users should be able to enable this option.
53401 +
53402 +config GRKERNSEC_IO
53403 + bool "Disable privileged I/O"
53404 + depends on X86
53405 + select RTC_CLASS
53406 + select RTC_INTF_DEV
53407 + select RTC_DRV_CMOS
53408 +
53409 + help
53410 + If you say Y here, all ioperm and iopl calls will return an error.
53411 + Ioperm and iopl can be used to modify the running kernel.
53412 + Unfortunately, some programs need this access to operate properly,
53413 + the most notable of which are XFree86 and hwclock. hwclock can be
53414 + remedied by having RTC support in the kernel, so real-time
53415 + clock support is enabled if this option is enabled, to ensure
53416 + that hwclock operates correctly. XFree86 still will not
53417 + operate correctly with this option enabled, so DO NOT CHOOSE Y
53418 + IF YOU USE XFree86. If you use XFree86 and you still want to
53419 + protect your kernel against modification, use the RBAC system.
53420 +
53421 +config GRKERNSEC_PROC_MEMMAP
53422 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
53423 + default y if (PAX_NOEXEC || PAX_ASLR)
53424 + depends on PAX_NOEXEC || PAX_ASLR
53425 + help
53426 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
53427 + give no information about the addresses of its mappings if
53428 + PaX features that rely on random addresses are enabled on the task.
53429 + If you use PaX it is greatly recommended that you say Y here as it
53430 + closes up a hole that makes the full ASLR useless for suid
53431 + binaries.
53432 +
53433 +config GRKERNSEC_BRUTE
53434 + bool "Deter exploit bruteforcing"
53435 + help
53436 + If you say Y here, attempts to bruteforce exploits against forking
53437 + daemons such as apache or sshd, as well as against suid/sgid binaries
53438 + will be deterred. When a child of a forking daemon is killed by PaX
53439 + or crashes due to an illegal instruction or other suspicious signal,
53440 + the parent process will be delayed 30 seconds upon every subsequent
53441 + fork until the administrator is able to assess the situation and
53442 + restart the daemon.
53443 + In the suid/sgid case, the attempt is logged, the user has all their
53444 + processes terminated, and they are prevented from executing any further
53445 + processes for 15 minutes.
53446 + It is recommended that you also enable signal logging in the auditing
53447 + section so that logs are generated when a process triggers a suspicious
53448 + signal.
53449 + If the sysctl option is enabled, a sysctl option with name
53450 + "deter_bruteforce" is created.
53451 +
53452 +config GRKERNSEC_MODHARDEN
53453 + bool "Harden module auto-loading"
53454 + depends on MODULES
53455 + help
53456 + If you say Y here, module auto-loading in response to use of some
53457 + feature implemented by an unloaded module will be restricted to
53458 + root users. Enabling this option helps defend against attacks
53459 + by unprivileged users who abuse the auto-loading behavior to
53460 + cause a vulnerable module to load that is then exploited.
53461 +
53462 + If this option prevents a legitimate use of auto-loading for a
53463 + non-root user, the administrator can execute modprobe manually
53464 + with the exact name of the module mentioned in the alert log.
53465 + Alternatively, the administrator can add the module to the list
53466 + of modules loaded at boot by modifying init scripts.
53467 +
53468 + Modification of init scripts will most likely be needed on
53469 + Ubuntu servers with encrypted home directory support enabled,
53470 + as the first non-root user logging in will cause the ecb(aes),
53471 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
53472 +
53473 +config GRKERNSEC_HIDESYM
53474 + bool "Hide kernel symbols"
53475 + help
53476 + If you say Y here, getting information on loaded modules, and
53477 + displaying all kernel symbols through a syscall will be restricted
53478 + to users with CAP_SYS_MODULE. For software compatibility reasons,
53479 + /proc/kallsyms will be restricted to the root user. The RBAC
53480 + system can hide that entry even from root.
53481 +
53482 + This option also prevents leaking of kernel addresses through
53483 + several /proc entries.
53484 +
53485 + Note that this option is only effective provided the following
53486 + conditions are met:
53487 + 1) The kernel using grsecurity is not precompiled by some distribution
53488 + 2) You have also enabled GRKERNSEC_DMESG
53489 + 3) You are using the RBAC system and hiding other files such as your
53490 + kernel image and System.map. Alternatively, enabling this option
53491 + causes the permissions on /boot, /lib/modules, and the kernel
53492 + source directory to change at compile time to prevent
53493 + reading by non-root users.
53494 + If the above conditions are met, this option will aid in providing a
53495 + useful protection against local kernel exploitation of overflows
53496 + and arbitrary read/write vulnerabilities.
53497 +
53498 +config GRKERNSEC_KERN_LOCKOUT
53499 + bool "Active kernel exploit response"
53500 + depends on X86 || ARM || PPC || SPARC
53501 + help
53502 + If you say Y here, when a PaX alert is triggered due to suspicious
53503 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
53504 + or an OOPs occurs due to bad memory accesses, instead of just
53505 + terminating the offending process (and potentially allowing
53506 + a subsequent exploit from the same user), we will take one of two
53507 + actions:
53508 + If the user was root, we will panic the system
53509 + If the user was non-root, we will log the attempt, terminate
53510 + all processes owned by the user, then prevent them from creating
53511 + any new processes until the system is restarted
53512 + This deters repeated kernel exploitation/bruteforcing attempts
53513 + and is useful for later forensics.
53514 +
53515 +endmenu
53516 +menu "Role Based Access Control Options"
53517 +depends on GRKERNSEC
53518 +
53519 +config GRKERNSEC_RBAC_DEBUG
53520 + bool
53521 +
53522 +config GRKERNSEC_NO_RBAC
53523 + bool "Disable RBAC system"
53524 + help
53525 + If you say Y here, the /dev/grsec device will be removed from the kernel,
53526 + preventing the RBAC system from being enabled. You should only say Y
53527 + here if you have no intention of using the RBAC system, so as to prevent
53528 + an attacker with root access from misusing the RBAC system to hide files
53529 + and processes when loadable module support and /dev/[k]mem have been
53530 + locked down.
53531 +
53532 +config GRKERNSEC_ACL_HIDEKERN
53533 + bool "Hide kernel processes"
53534 + help
53535 + If you say Y here, all kernel threads will be hidden to all
53536 + processes but those whose subject has the "view hidden processes"
53537 + flag.
53538 +
53539 +config GRKERNSEC_ACL_MAXTRIES
53540 + int "Maximum tries before password lockout"
53541 + default 3
53542 + help
53543 + This option enforces the maximum number of times a user can attempt
53544 + to authorize themselves with the grsecurity RBAC system before being
53545 + denied the ability to attempt authorization again for a specified time.
53546 + The lower the number, the harder it will be to brute-force a password.
53547 +
53548 +config GRKERNSEC_ACL_TIMEOUT
53549 + int "Time to wait after max password tries, in seconds"
53550 + default 30
53551 + help
53552 + This option specifies the time the user must wait after attempting to
53553 + authorize to the RBAC system with the maximum number of invalid
53554 + passwords. The higher the number, the harder it will be to brute-force
53555 + a password.
53556 +
53557 +endmenu
53558 +menu "Filesystem Protections"
53559 +depends on GRKERNSEC
53560 +
53561 +config GRKERNSEC_PROC
53562 + bool "Proc restrictions"
53563 + help
53564 + If you say Y here, the permissions of the /proc filesystem
53565 + will be altered to enhance system security and privacy. You MUST
53566 + choose either a user only restriction or a user and group restriction.
53567 + Depending upon the option you choose, you can either restrict users to
53568 + see only the processes they themselves run, or choose a group that can
53569 + view all processes and files normally restricted to root if you choose
53570 + the "restrict to user only" option. NOTE: If you're running identd as
53571 + a non-root user, you will have to run it as the group you specify here.
53572 +
53573 +config GRKERNSEC_PROC_USER
53574 + bool "Restrict /proc to user only"
53575 + depends on GRKERNSEC_PROC
53576 + help
53577 + If you say Y here, non-root users will only be able to view their own
53578 + processes, and restricts them from viewing network-related information,
53579 + and viewing kernel symbol and module information.
53580 +
53581 +config GRKERNSEC_PROC_USERGROUP
53582 + bool "Allow special group"
53583 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
53584 + help
53585 + If you say Y here, you will be able to select a group that will be
53586 + able to view all processes and network-related information. If you've
53587 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
53588 + remain hidden. This option is useful if you want to run identd as
53589 + a non-root user.
53590 +
53591 +config GRKERNSEC_PROC_GID
53592 + int "GID for special group"
53593 + depends on GRKERNSEC_PROC_USERGROUP
53594 + default 1001
53595 +
53596 +config GRKERNSEC_PROC_ADD
53597 + bool "Additional restrictions"
53598 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
53599 + help
53600 + If you say Y here, additional restrictions will be placed on
53601 + /proc that keep normal users from viewing device information and
53602 + slabinfo information that could be useful for exploits.
53603 +
53604 +config GRKERNSEC_LINK
53605 + bool "Linking restrictions"
53606 + help
53607 + If you say Y here, /tmp race exploits will be prevented, since users
53608 + will no longer be able to follow symlinks owned by other users in
53609 + world-writable +t directories (e.g. /tmp), unless the owner of the
53610 + symlink is the owner of the directory. users will also not be
53611 + able to hardlink to files they do not own. If the sysctl option is
53612 + enabled, a sysctl option with name "linking_restrictions" is created.
53613 +
53614 +config GRKERNSEC_FIFO
53615 + bool "FIFO restrictions"
53616 + help
53617 + If you say Y here, users will not be able to write to FIFOs they don't
53618 + own in world-writable +t directories (e.g. /tmp), unless the owner of
53619 + the FIFO is the same owner of the directory it's held in. If the sysctl
53620 + option is enabled, a sysctl option with name "fifo_restrictions" is
53621 + created.
53622 +
53623 +config GRKERNSEC_SYSFS_RESTRICT
53624 + bool "Sysfs/debugfs restriction"
53625 + depends on SYSFS
53626 + help
53627 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
53628 + any filesystem normally mounted under it (e.g. debugfs) will only
53629 + be accessible by root. These filesystems generally provide access
53630 + to hardware and debug information that isn't appropriate for unprivileged
53631 + users of the system. Sysfs and debugfs have also become a large source
53632 + of new vulnerabilities, ranging from infoleaks to local compromise.
53633 + There has been very little oversight with an eye toward security involved
53634 + in adding new exporters of information to these filesystems, so their
53635 + use is discouraged.
53636 + This option is equivalent to a chmod 0700 of the mount paths.
53637 +
53638 +config GRKERNSEC_ROFS
53639 + bool "Runtime read-only mount protection"
53640 + help
53641 + If you say Y here, a sysctl option with name "romount_protect" will
53642 + be created. By setting this option to 1 at runtime, filesystems
53643 + will be protected in the following ways:
53644 + * No new writable mounts will be allowed
53645 + * Existing read-only mounts won't be able to be remounted read/write
53646 + * Write operations will be denied on all block devices
53647 + This option acts independently of grsec_lock: once it is set to 1,
53648 + it cannot be turned off. Therefore, please be mindful of the resulting
53649 + behavior if this option is enabled in an init script on a read-only
53650 + filesystem. This feature is mainly intended for secure embedded systems.
53651 +
53652 +config GRKERNSEC_CHROOT
53653 + bool "Chroot jail restrictions"
53654 + help
53655 + If you say Y here, you will be able to choose several options that will
53656 + make breaking out of a chrooted jail much more difficult. If you
53657 + encounter no software incompatibilities with the following options, it
53658 + is recommended that you enable each one.
53659 +
53660 +config GRKERNSEC_CHROOT_MOUNT
53661 + bool "Deny mounts"
53662 + depends on GRKERNSEC_CHROOT
53663 + help
53664 + If you say Y here, processes inside a chroot will not be able to
53665 + mount or remount filesystems. If the sysctl option is enabled, a
53666 + sysctl option with name "chroot_deny_mount" is created.
53667 +
53668 +config GRKERNSEC_CHROOT_DOUBLE
53669 + bool "Deny double-chroots"
53670 + depends on GRKERNSEC_CHROOT
53671 + help
53672 + If you say Y here, processes inside a chroot will not be able to chroot
53673 + again outside the chroot. This is a widely used method of breaking
53674 + out of a chroot jail and should not be allowed. If the sysctl
53675 + option is enabled, a sysctl option with name
53676 + "chroot_deny_chroot" is created.
53677 +
53678 +config GRKERNSEC_CHROOT_PIVOT
53679 + bool "Deny pivot_root in chroot"
53680 + depends on GRKERNSEC_CHROOT
53681 + help
53682 + If you say Y here, processes inside a chroot will not be able to use
53683 + a function called pivot_root() that was introduced in Linux 2.3.41. It
53684 + works similar to chroot in that it changes the root filesystem. This
53685 + function could be misused in a chrooted process to attempt to break out
53686 + of the chroot, and therefore should not be allowed. If the sysctl
53687 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
53688 + created.
53689 +
53690 +config GRKERNSEC_CHROOT_CHDIR
53691 + bool "Enforce chdir(\"/\") on all chroots"
53692 + depends on GRKERNSEC_CHROOT
53693 + help
53694 + If you say Y here, the current working directory of all newly-chrooted
53695 + applications will be set to the the root directory of the chroot.
53696 + The man page on chroot(2) states:
53697 + Note that this call does not change the current working
53698 + directory, so that `.' can be outside the tree rooted at
53699 + `/'. In particular, the super-user can escape from a
53700 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
53701 +
53702 + It is recommended that you say Y here, since it's not known to break
53703 + any software. If the sysctl option is enabled, a sysctl option with
53704 + name "chroot_enforce_chdir" is created.
53705 +
53706 +config GRKERNSEC_CHROOT_CHMOD
53707 + bool "Deny (f)chmod +s"
53708 + depends on GRKERNSEC_CHROOT
53709 + help
53710 + If you say Y here, processes inside a chroot will not be able to chmod
53711 + or fchmod files to make them have suid or sgid bits. This protects
53712 + against another published method of breaking a chroot. If the sysctl
53713 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
53714 + created.
53715 +
53716 +config GRKERNSEC_CHROOT_FCHDIR
53717 + bool "Deny fchdir out of chroot"
53718 + depends on GRKERNSEC_CHROOT
53719 + help
53720 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
53721 + to a file descriptor of the chrooting process that points to a directory
53722 + outside the filesystem will be stopped. If the sysctl option
53723 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
53724 +
53725 +config GRKERNSEC_CHROOT_MKNOD
53726 + bool "Deny mknod"
53727 + depends on GRKERNSEC_CHROOT
53728 + help
53729 + If you say Y here, processes inside a chroot will not be allowed to
53730 + mknod. The problem with using mknod inside a chroot is that it
53731 + would allow an attacker to create a device entry that is the same
53732 + as one on the physical root of your system, which could range from
53733 + anything from the console device to a device for your harddrive (which
53734 + they could then use to wipe the drive or steal data). It is recommended
53735 + that you say Y here, unless you run into software incompatibilities.
53736 + If the sysctl option is enabled, a sysctl option with name
53737 + "chroot_deny_mknod" is created.
53738 +
53739 +config GRKERNSEC_CHROOT_SHMAT
53740 + bool "Deny shmat() out of chroot"
53741 + depends on GRKERNSEC_CHROOT
53742 + help
53743 + If you say Y here, processes inside a chroot will not be able to attach
53744 + to shared memory segments that were created outside of the chroot jail.
53745 + It is recommended that you say Y here. If the sysctl option is enabled,
53746 + a sysctl option with name "chroot_deny_shmat" is created.
53747 +
53748 +config GRKERNSEC_CHROOT_UNIX
53749 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
53750 + depends on GRKERNSEC_CHROOT
53751 + help
53752 + If you say Y here, processes inside a chroot will not be able to
53753 + connect to abstract (meaning not belonging to a filesystem) Unix
53754 + domain sockets that were bound outside of a chroot. It is recommended
53755 + that you say Y here. If the sysctl option is enabled, a sysctl option
53756 + with name "chroot_deny_unix" is created.
53757 +
53758 +config GRKERNSEC_CHROOT_FINDTASK
53759 + bool "Protect outside processes"
53760 + depends on GRKERNSEC_CHROOT
53761 + help
53762 + If you say Y here, processes inside a chroot will not be able to
53763 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
53764 + getsid, or view any process outside of the chroot. If the sysctl
53765 + option is enabled, a sysctl option with name "chroot_findtask" is
53766 + created.
53767 +
53768 +config GRKERNSEC_CHROOT_NICE
53769 + bool "Restrict priority changes"
53770 + depends on GRKERNSEC_CHROOT
53771 + help
53772 + If you say Y here, processes inside a chroot will not be able to raise
53773 + the priority of processes in the chroot, or alter the priority of
53774 + processes outside the chroot. This provides more security than simply
53775 + removing CAP_SYS_NICE from the process' capability set. If the
53776 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
53777 + is created.
53778 +
53779 +config GRKERNSEC_CHROOT_SYSCTL
53780 + bool "Deny sysctl writes"
53781 + depends on GRKERNSEC_CHROOT
53782 + help
53783 + If you say Y here, an attacker in a chroot will not be able to
53784 + write to sysctl entries, either by sysctl(2) or through a /proc
53785 + interface. It is strongly recommended that you say Y here. If the
53786 + sysctl option is enabled, a sysctl option with name
53787 + "chroot_deny_sysctl" is created.
53788 +
53789 +config GRKERNSEC_CHROOT_CAPS
53790 + bool "Capability restrictions"
53791 + depends on GRKERNSEC_CHROOT
53792 + help
53793 + If you say Y here, the capabilities on all root processes within a
53794 + chroot jail will be lowered to stop module insertion, raw i/o,
53795 + system and net admin tasks, rebooting the system, modifying immutable
53796 + files, modifying IPC owned by another, and changing the system time.
53797 + This is left an option because it can break some apps. Disable this
53798 + if your chrooted apps are having problems performing those kinds of
53799 + tasks. If the sysctl option is enabled, a sysctl option with
53800 + name "chroot_caps" is created.
53801 +
53802 +endmenu
53803 +menu "Kernel Auditing"
53804 +depends on GRKERNSEC
53805 +
53806 +config GRKERNSEC_AUDIT_GROUP
53807 + bool "Single group for auditing"
53808 + help
53809 + If you say Y here, the exec, chdir, and (un)mount logging features
53810 + will only operate on a group you specify. This option is recommended
53811 + if you only want to watch certain users instead of having a large
53812 + amount of logs from the entire system. If the sysctl option is enabled,
53813 + a sysctl option with name "audit_group" is created.
53814 +
53815 +config GRKERNSEC_AUDIT_GID
53816 + int "GID for auditing"
53817 + depends on GRKERNSEC_AUDIT_GROUP
53818 + default 1007
53819 +
53820 +config GRKERNSEC_EXECLOG
53821 + bool "Exec logging"
53822 + help
53823 + If you say Y here, all execve() calls will be logged (since the
53824 + other exec*() calls are frontends to execve(), all execution
53825 + will be logged). Useful for shell-servers that like to keep track
53826 + of their users. If the sysctl option is enabled, a sysctl option with
53827 + name "exec_logging" is created.
53828 + WARNING: This option when enabled will produce a LOT of logs, especially
53829 + on an active system.
53830 +
53831 +config GRKERNSEC_RESLOG
53832 + bool "Resource logging"
53833 + help
53834 + If you say Y here, all attempts to overstep resource limits will
53835 + be logged with the resource name, the requested size, and the current
53836 + limit. It is highly recommended that you say Y here. If the sysctl
53837 + option is enabled, a sysctl option with name "resource_logging" is
53838 + created. If the RBAC system is enabled, the sysctl value is ignored.
53839 +
53840 +config GRKERNSEC_CHROOT_EXECLOG
53841 + bool "Log execs within chroot"
53842 + help
53843 + If you say Y here, all executions inside a chroot jail will be logged
53844 + to syslog. This can cause a large amount of logs if certain
53845 + applications (eg. djb's daemontools) are installed on the system, and
53846 + is therefore left as an option. If the sysctl option is enabled, a
53847 + sysctl option with name "chroot_execlog" is created.
53848 +
53849 +config GRKERNSEC_AUDIT_PTRACE
53850 + bool "Ptrace logging"
53851 + help
53852 + If you say Y here, all attempts to attach to a process via ptrace
53853 + will be logged. If the sysctl option is enabled, a sysctl option
53854 + with name "audit_ptrace" is created.
53855 +
53856 +config GRKERNSEC_AUDIT_CHDIR
53857 + bool "Chdir logging"
53858 + help
53859 + If you say Y here, all chdir() calls will be logged. If the sysctl
53860 + option is enabled, a sysctl option with name "audit_chdir" is created.
53861 +
53862 +config GRKERNSEC_AUDIT_MOUNT
53863 + bool "(Un)Mount logging"
53864 + help
53865 + If you say Y here, all mounts and unmounts will be logged. If the
53866 + sysctl option is enabled, a sysctl option with name "audit_mount" is
53867 + created.
53868 +
53869 +config GRKERNSEC_SIGNAL
53870 + bool "Signal logging"
53871 + help
53872 + If you say Y here, certain important signals will be logged, such as
53873 + SIGSEGV, which will as a result inform you of when a error in a program
53874 + occurred, which in some cases could mean a possible exploit attempt.
53875 + If the sysctl option is enabled, a sysctl option with name
53876 + "signal_logging" is created.
53877 +
53878 +config GRKERNSEC_FORKFAIL
53879 + bool "Fork failure logging"
53880 + help
53881 + If you say Y here, all failed fork() attempts will be logged.
53882 + This could suggest a fork bomb, or someone attempting to overstep
53883 + their process limit. If the sysctl option is enabled, a sysctl option
53884 + with name "forkfail_logging" is created.
53885 +
53886 +config GRKERNSEC_TIME
53887 + bool "Time change logging"
53888 + help
53889 + If you say Y here, any changes of the system clock will be logged.
53890 + If the sysctl option is enabled, a sysctl option with name
53891 + "timechange_logging" is created.
53892 +
53893 +config GRKERNSEC_PROC_IPADDR
53894 + bool "/proc/<pid>/ipaddr support"
53895 + help
53896 + If you say Y here, a new entry will be added to each /proc/<pid>
53897 + directory that contains the IP address of the person using the task.
53898 + The IP is carried across local TCP and AF_UNIX stream sockets.
53899 + This information can be useful for IDS/IPSes to perform remote response
53900 + to a local attack. The entry is readable by only the owner of the
53901 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
53902 + the RBAC system), and thus does not create privacy concerns.
53903 +
53904 +config GRKERNSEC_RWXMAP_LOG
53905 + bool 'Denied RWX mmap/mprotect logging'
53906 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
53907 + help
53908 + If you say Y here, calls to mmap() and mprotect() with explicit
53909 + usage of PROT_WRITE and PROT_EXEC together will be logged when
53910 + denied by the PAX_MPROTECT feature. If the sysctl option is
53911 + enabled, a sysctl option with name "rwxmap_logging" is created.
53912 +
53913 +config GRKERNSEC_AUDIT_TEXTREL
53914 + bool 'ELF text relocations logging (READ HELP)'
53915 + depends on PAX_MPROTECT
53916 + help
53917 + If you say Y here, text relocations will be logged with the filename
53918 + of the offending library or binary. The purpose of the feature is
53919 + to help Linux distribution developers get rid of libraries and
53920 + binaries that need text relocations which hinder the future progress
53921 + of PaX. Only Linux distribution developers should say Y here, and
53922 + never on a production machine, as this option creates an information
53923 + leak that could aid an attacker in defeating the randomization of
53924 + a single memory region. If the sysctl option is enabled, a sysctl
53925 + option with name "audit_textrel" is created.
53926 +
53927 +endmenu
53928 +
53929 +menu "Executable Protections"
53930 +depends on GRKERNSEC
53931 +
53932 +config GRKERNSEC_EXECVE
53933 + bool "Enforce RLIMIT_NPROC on execs"
53934 + help
53935 + If you say Y here, users with a resource limit on processes will
53936 + have the value checked during execve() calls. The current system
53937 + only checks the system limit during fork() calls. If the sysctl option
53938 + is enabled, a sysctl option with name "execve_limiting" is created.
53939 +
53940 +config GRKERNSEC_DMESG
53941 + bool "Dmesg(8) restriction"
53942 + help
53943 + If you say Y here, non-root users will not be able to use dmesg(8)
53944 + to view up to the last 4kb of messages in the kernel's log buffer.
53945 + The kernel's log buffer often contains kernel addresses and other
53946 + identifying information useful to an attacker in fingerprinting a
53947 + system for a targeted exploit.
53948 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
53949 + created.
53950 +
53951 +config GRKERNSEC_HARDEN_PTRACE
53952 + bool "Deter ptrace-based process snooping"
53953 + help
53954 + If you say Y here, TTY sniffers and other malicious monitoring
53955 + programs implemented through ptrace will be defeated. If you
53956 + have been using the RBAC system, this option has already been
53957 + enabled for several years for all users, with the ability to make
53958 + fine-grained exceptions.
53959 +
53960 + This option only affects the ability of non-root users to ptrace
53961 + processes that are not a descendent of the ptracing process.
53962 + This means that strace ./binary and gdb ./binary will still work,
53963 + but attaching to arbitrary processes will not. If the sysctl
53964 + option is enabled, a sysctl option with name "harden_ptrace" is
53965 + created.
53966 +
53967 +config GRKERNSEC_TPE
53968 + bool "Trusted Path Execution (TPE)"
53969 + help
53970 + If you say Y here, you will be able to choose a gid to add to the
53971 + supplementary groups of users you want to mark as "untrusted."
53972 + These users will not be able to execute any files that are not in
53973 + root-owned directories writable only by root. If the sysctl option
53974 + is enabled, a sysctl option with name "tpe" is created.
53975 +
53976 +config GRKERNSEC_TPE_ALL
53977 + bool "Partially restrict all non-root users"
53978 + depends on GRKERNSEC_TPE
53979 + help
53980 + If you say Y here, all non-root users will be covered under
53981 + a weaker TPE restriction. This is separate from, and in addition to,
53982 + the main TPE options that you have selected elsewhere. Thus, if a
53983 + "trusted" GID is chosen, this restriction applies to even that GID.
53984 + Under this restriction, all non-root users will only be allowed to
53985 + execute files in directories they own that are not group or
53986 + world-writable, or in directories owned by root and writable only by
53987 + root. If the sysctl option is enabled, a sysctl option with name
53988 + "tpe_restrict_all" is created.
53989 +
53990 +config GRKERNSEC_TPE_INVERT
53991 + bool "Invert GID option"
53992 + depends on GRKERNSEC_TPE
53993 + help
53994 + If you say Y here, the group you specify in the TPE configuration will
53995 + decide what group TPE restrictions will be *disabled* for. This
53996 + option is useful if you want TPE restrictions to be applied to most
53997 + users on the system. If the sysctl option is enabled, a sysctl option
53998 + with name "tpe_invert" is created. Unlike other sysctl options, this
53999 + entry will default to on for backward-compatibility.
54000 +
54001 +config GRKERNSEC_TPE_GID
54002 + int "GID for untrusted users"
54003 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
54004 + default 1005
54005 + help
54006 + Setting this GID determines what group TPE restrictions will be
54007 + *enabled* for. If the sysctl option is enabled, a sysctl option
54008 + with name "tpe_gid" is created.
54009 +
54010 +config GRKERNSEC_TPE_GID
54011 + int "GID for trusted users"
54012 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
54013 + default 1005
54014 + help
54015 + Setting this GID determines what group TPE restrictions will be
54016 + *disabled* for. If the sysctl option is enabled, a sysctl option
54017 + with name "tpe_gid" is created.
54018 +
54019 +endmenu
54020 +menu "Network Protections"
54021 +depends on GRKERNSEC
54022 +
54023 +config GRKERNSEC_RANDNET
54024 + bool "Larger entropy pools"
54025 + help
54026 + If you say Y here, the entropy pools used for many features of Linux
54027 + and grsecurity will be doubled in size. Since several grsecurity
54028 + features use additional randomness, it is recommended that you say Y
54029 + here. Saying Y here has a similar effect as modifying
54030 + /proc/sys/kernel/random/poolsize.
54031 +
54032 +config GRKERNSEC_BLACKHOLE
54033 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
54034 + help
54035 + If you say Y here, neither TCP resets nor ICMP
54036 + destination-unreachable packets will be sent in response to packets
54037 + sent to ports for which no associated listening process exists.
54038 + This feature supports both IPV4 and IPV6 and exempts the
54039 + loopback interface from blackholing. Enabling this feature
54040 + makes a host more resilient to DoS attacks and reduces network
54041 + visibility against scanners.
54042 +
54043 + The blackhole feature as-implemented is equivalent to the FreeBSD
54044 + blackhole feature, as it prevents RST responses to all packets, not
54045 + just SYNs. Under most application behavior this causes no
54046 + problems, but applications (like haproxy) may not close certain
54047 + connections in a way that cleanly terminates them on the remote
54048 + end, leaving the remote host in LAST_ACK state. Because of this
54049 + side-effect and to prevent intentional LAST_ACK DoSes, this
54050 + feature also adds automatic mitigation against such attacks.
54051 + The mitigation drastically reduces the amount of time a socket
54052 + can spend in LAST_ACK state. If you're using haproxy and not
54053 + all servers it connects to have this option enabled, consider
54054 + disabling this feature on the haproxy host.
54055 +
54056 + If the sysctl option is enabled, two sysctl options with names
54057 + "ip_blackhole" and "lastack_retries" will be created.
54058 + While "ip_blackhole" takes the standard zero/non-zero on/off
54059 + toggle, "lastack_retries" uses the same kinds of values as
54060 + "tcp_retries1" and "tcp_retries2". The default value of 4
54061 + prevents a socket from lasting more than 45 seconds in LAST_ACK
54062 + state.
54063 +
54064 +config GRKERNSEC_SOCKET
54065 + bool "Socket restrictions"
54066 + help
54067 + If you say Y here, you will be able to choose from several options.
54068 + If you assign a GID on your system and add it to the supplementary
54069 + groups of users you want to restrict socket access to, this patch
54070 + will perform up to three things, based on the option(s) you choose.
54071 +
54072 +config GRKERNSEC_SOCKET_ALL
54073 + bool "Deny any sockets to group"
54074 + depends on GRKERNSEC_SOCKET
54075 + help
54076 + If you say Y here, you will be able to choose a GID of whose users will
54077 + be unable to connect to other hosts from your machine or run server
54078 + applications from your machine. If the sysctl option is enabled, a
54079 + sysctl option with name "socket_all" is created.
54080 +
54081 +config GRKERNSEC_SOCKET_ALL_GID
54082 + int "GID to deny all sockets for"
54083 + depends on GRKERNSEC_SOCKET_ALL
54084 + default 1004
54085 + help
54086 + Here you can choose the GID to disable socket access for. Remember to
54087 + add the users you want socket access disabled for to the GID
54088 + specified here. If the sysctl option is enabled, a sysctl option
54089 + with name "socket_all_gid" is created.
54090 +
54091 +config GRKERNSEC_SOCKET_CLIENT
54092 + bool "Deny client sockets to group"
54093 + depends on GRKERNSEC_SOCKET
54094 + help
54095 + If you say Y here, you will be able to choose a GID of whose users will
54096 + be unable to connect to other hosts from your machine, but will be
54097 + able to run servers. If this option is enabled, all users in the group
54098 + you specify will have to use passive mode when initiating ftp transfers
54099 + from the shell on your machine. If the sysctl option is enabled, a
54100 + sysctl option with name "socket_client" is created.
54101 +
54102 +config GRKERNSEC_SOCKET_CLIENT_GID
54103 + int "GID to deny client sockets for"
54104 + depends on GRKERNSEC_SOCKET_CLIENT
54105 + default 1003
54106 + help
54107 + Here you can choose the GID to disable client socket access for.
54108 + Remember to add the users you want client socket access disabled for to
54109 + the GID specified here. If the sysctl option is enabled, a sysctl
54110 + option with name "socket_client_gid" is created.
54111 +
54112 +config GRKERNSEC_SOCKET_SERVER
54113 + bool "Deny server sockets to group"
54114 + depends on GRKERNSEC_SOCKET
54115 + help
54116 + If you say Y here, you will be able to choose a GID of whose users will
54117 + be unable to run server applications from your machine. If the sysctl
54118 + option is enabled, a sysctl option with name "socket_server" is created.
54119 +
54120 +config GRKERNSEC_SOCKET_SERVER_GID
54121 + int "GID to deny server sockets for"
54122 + depends on GRKERNSEC_SOCKET_SERVER
54123 + default 1002
54124 + help
54125 + Here you can choose the GID to disable server socket access for.
54126 + Remember to add the users you want server socket access disabled for to
54127 + the GID specified here. If the sysctl option is enabled, a sysctl
54128 + option with name "socket_server_gid" is created.
54129 +
54130 +endmenu
54131 +menu "Sysctl support"
54132 +depends on GRKERNSEC && SYSCTL
54133 +
54134 +config GRKERNSEC_SYSCTL
54135 + bool "Sysctl support"
54136 + help
54137 + If you say Y here, you will be able to change the options that
54138 + grsecurity runs with at bootup, without having to recompile your
54139 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
54140 + to enable (1) or disable (0) various features. All the sysctl entries
54141 + are mutable until the "grsec_lock" entry is set to a non-zero value.
54142 + All features enabled in the kernel configuration are disabled at boot
54143 + if you do not say Y to the "Turn on features by default" option.
54144 + All options should be set at startup, and the grsec_lock entry should
54145 + be set to a non-zero value after all the options are set.
54146 + *THIS IS EXTREMELY IMPORTANT*
54147 +
54148 +config GRKERNSEC_SYSCTL_DISTRO
54149 + bool "Extra sysctl support for distro makers (READ HELP)"
54150 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
54151 + help
54152 + If you say Y here, additional sysctl options will be created
54153 + for features that affect processes running as root. Therefore,
54154 + it is critical when using this option that the grsec_lock entry be
54155 + enabled after boot. Only distros with prebuilt kernel packages
54156 + with this option enabled that can ensure grsec_lock is enabled
54157 + after boot should use this option.
54158 + *Failure to set grsec_lock after boot makes all grsec features
54159 + this option covers useless*
54160 +
54161 + Currently this option creates the following sysctl entries:
54162 + "Disable Privileged I/O": "disable_priv_io"
54163 +
54164 +config GRKERNSEC_SYSCTL_ON
54165 + bool "Turn on features by default"
54166 + depends on GRKERNSEC_SYSCTL
54167 + help
54168 + If you say Y here, instead of having all features enabled in the
54169 + kernel configuration disabled at boot time, the features will be
54170 + enabled at boot time. It is recommended you say Y here unless
54171 + there is some reason you would want all sysctl-tunable features to
54172 + be disabled by default. As mentioned elsewhere, it is important
54173 + to enable the grsec_lock entry once you have finished modifying
54174 + the sysctl entries.
54175 +
54176 +endmenu
54177 +menu "Logging Options"
54178 +depends on GRKERNSEC
54179 +
54180 +config GRKERNSEC_FLOODTIME
54181 + int "Seconds in between log messages (minimum)"
54182 + default 10
54183 + help
54184 + This option allows you to enforce the number of seconds between
54185 + grsecurity log messages. The default should be suitable for most
54186 + people, however, if you choose to change it, choose a value small enough
54187 + to allow informative logs to be produced, but large enough to
54188 + prevent flooding.
54189 +
54190 +config GRKERNSEC_FLOODBURST
54191 + int "Number of messages in a burst (maximum)"
54192 + default 4
54193 + help
54194 + This option allows you to choose the maximum number of messages allowed
54195 + within the flood time interval you chose in a separate option. The
54196 + default should be suitable for most people, however if you find that
54197 + many of your logs are being interpreted as flooding, you may want to
54198 + raise this value.
54199 +
54200 +endmenu
54201 +
54202 +endmenu
54203 diff -urNp linux-2.6.32.43/grsecurity/Makefile linux-2.6.32.43/grsecurity/Makefile
54204 --- linux-2.6.32.43/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
54205 +++ linux-2.6.32.43/grsecurity/Makefile 2011-05-24 20:27:46.000000000 -0400
54206 @@ -0,0 +1,33 @@
54207 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
54208 +# during 2001-2009 it has been completely redesigned by Brad Spengler
54209 +# into an RBAC system
54210 +#
54211 +# All code in this directory and various hooks inserted throughout the kernel
54212 +# are copyright Brad Spengler - Open Source Security, Inc., and released
54213 +# under the GPL v2 or higher
54214 +
54215 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
54216 + grsec_mount.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
54217 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
54218 +
54219 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
54220 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
54221 + gracl_learn.o grsec_log.o
54222 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
54223 +
54224 +ifdef CONFIG_NET
54225 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
54226 +endif
54227 +
54228 +ifndef CONFIG_GRKERNSEC
54229 +obj-y += grsec_disabled.o
54230 +endif
54231 +
54232 +ifdef CONFIG_GRKERNSEC_HIDESYM
54233 +extra-y := grsec_hidesym.o
54234 +$(obj)/grsec_hidesym.o:
54235 + @-chmod -f 500 /boot
54236 + @-chmod -f 500 /lib/modules
54237 + @-chmod -f 700 .
54238 + @echo ' grsec: protected kernel image paths'
54239 +endif
54240 diff -urNp linux-2.6.32.43/include/acpi/acpi_drivers.h linux-2.6.32.43/include/acpi/acpi_drivers.h
54241 --- linux-2.6.32.43/include/acpi/acpi_drivers.h 2011-03-27 14:31:47.000000000 -0400
54242 +++ linux-2.6.32.43/include/acpi/acpi_drivers.h 2011-04-17 15:56:46.000000000 -0400
54243 @@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acp
54244 Dock Station
54245 -------------------------------------------------------------------------- */
54246 struct acpi_dock_ops {
54247 - acpi_notify_handler handler;
54248 - acpi_notify_handler uevent;
54249 + const acpi_notify_handler handler;
54250 + const acpi_notify_handler uevent;
54251 };
54252
54253 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
54254 @@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
54255 extern int register_dock_notifier(struct notifier_block *nb);
54256 extern void unregister_dock_notifier(struct notifier_block *nb);
54257 extern int register_hotplug_dock_device(acpi_handle handle,
54258 - struct acpi_dock_ops *ops,
54259 + const struct acpi_dock_ops *ops,
54260 void *context);
54261 extern void unregister_hotplug_dock_device(acpi_handle handle);
54262 #else
54263 @@ -144,7 +144,7 @@ static inline void unregister_dock_notif
54264 {
54265 }
54266 static inline int register_hotplug_dock_device(acpi_handle handle,
54267 - struct acpi_dock_ops *ops,
54268 + const struct acpi_dock_ops *ops,
54269 void *context)
54270 {
54271 return -ENODEV;
54272 diff -urNp linux-2.6.32.43/include/asm-generic/atomic-long.h linux-2.6.32.43/include/asm-generic/atomic-long.h
54273 --- linux-2.6.32.43/include/asm-generic/atomic-long.h 2011-03-27 14:31:47.000000000 -0400
54274 +++ linux-2.6.32.43/include/asm-generic/atomic-long.h 2011-07-13 22:21:25.000000000 -0400
54275 @@ -22,6 +22,12 @@
54276
54277 typedef atomic64_t atomic_long_t;
54278
54279 +#ifdef CONFIG_PAX_REFCOUNT
54280 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
54281 +#else
54282 +typedef atomic64_t atomic_long_unchecked_t;
54283 +#endif
54284 +
54285 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
54286
54287 static inline long atomic_long_read(atomic_long_t *l)
54288 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
54289 return (long)atomic64_read(v);
54290 }
54291
54292 +#ifdef CONFIG_PAX_REFCOUNT
54293 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
54294 +{
54295 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54296 +
54297 + return (long)atomic64_read_unchecked(v);
54298 +}
54299 +#endif
54300 +
54301 static inline void atomic_long_set(atomic_long_t *l, long i)
54302 {
54303 atomic64_t *v = (atomic64_t *)l;
54304 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
54305 atomic64_set(v, i);
54306 }
54307
54308 +#ifdef CONFIG_PAX_REFCOUNT
54309 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
54310 +{
54311 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54312 +
54313 + atomic64_set_unchecked(v, i);
54314 +}
54315 +#endif
54316 +
54317 static inline void atomic_long_inc(atomic_long_t *l)
54318 {
54319 atomic64_t *v = (atomic64_t *)l;
54320 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
54321 atomic64_inc(v);
54322 }
54323
54324 +#ifdef CONFIG_PAX_REFCOUNT
54325 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
54326 +{
54327 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54328 +
54329 + atomic64_inc_unchecked(v);
54330 +}
54331 +#endif
54332 +
54333 static inline void atomic_long_dec(atomic_long_t *l)
54334 {
54335 atomic64_t *v = (atomic64_t *)l;
54336 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
54337 atomic64_dec(v);
54338 }
54339
54340 +#ifdef CONFIG_PAX_REFCOUNT
54341 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
54342 +{
54343 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54344 +
54345 + atomic64_dec_unchecked(v);
54346 +}
54347 +#endif
54348 +
54349 static inline void atomic_long_add(long i, atomic_long_t *l)
54350 {
54351 atomic64_t *v = (atomic64_t *)l;
54352 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
54353 atomic64_add(i, v);
54354 }
54355
54356 +#ifdef CONFIG_PAX_REFCOUNT
54357 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
54358 +{
54359 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54360 +
54361 + atomic64_add_unchecked(i, v);
54362 +}
54363 +#endif
54364 +
54365 static inline void atomic_long_sub(long i, atomic_long_t *l)
54366 {
54367 atomic64_t *v = (atomic64_t *)l;
54368 @@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
54369 return (long)atomic64_inc_return(v);
54370 }
54371
54372 +#ifdef CONFIG_PAX_REFCOUNT
54373 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
54374 +{
54375 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54376 +
54377 + return (long)atomic64_inc_return_unchecked(v);
54378 +}
54379 +#endif
54380 +
54381 static inline long atomic_long_dec_return(atomic_long_t *l)
54382 {
54383 atomic64_t *v = (atomic64_t *)l;
54384 @@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
54385
54386 typedef atomic_t atomic_long_t;
54387
54388 +#ifdef CONFIG_PAX_REFCOUNT
54389 +typedef atomic_unchecked_t atomic_long_unchecked_t;
54390 +#else
54391 +typedef atomic_t atomic_long_unchecked_t;
54392 +#endif
54393 +
54394 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
54395 static inline long atomic_long_read(atomic_long_t *l)
54396 {
54397 @@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
54398 return (long)atomic_read(v);
54399 }
54400
54401 +#ifdef CONFIG_PAX_REFCOUNT
54402 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
54403 +{
54404 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54405 +
54406 + return (long)atomic_read_unchecked(v);
54407 +}
54408 +#endif
54409 +
54410 static inline void atomic_long_set(atomic_long_t *l, long i)
54411 {
54412 atomic_t *v = (atomic_t *)l;
54413 @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
54414 atomic_set(v, i);
54415 }
54416
54417 +#ifdef CONFIG_PAX_REFCOUNT
54418 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
54419 +{
54420 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54421 +
54422 + atomic_set_unchecked(v, i);
54423 +}
54424 +#endif
54425 +
54426 static inline void atomic_long_inc(atomic_long_t *l)
54427 {
54428 atomic_t *v = (atomic_t *)l;
54429 @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
54430 atomic_inc(v);
54431 }
54432
54433 +#ifdef CONFIG_PAX_REFCOUNT
54434 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
54435 +{
54436 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54437 +
54438 + atomic_inc_unchecked(v);
54439 +}
54440 +#endif
54441 +
54442 static inline void atomic_long_dec(atomic_long_t *l)
54443 {
54444 atomic_t *v = (atomic_t *)l;
54445 @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
54446 atomic_dec(v);
54447 }
54448
54449 +#ifdef CONFIG_PAX_REFCOUNT
54450 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
54451 +{
54452 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54453 +
54454 + atomic_dec_unchecked(v);
54455 +}
54456 +#endif
54457 +
54458 static inline void atomic_long_add(long i, atomic_long_t *l)
54459 {
54460 atomic_t *v = (atomic_t *)l;
54461 @@ -176,6 +278,15 @@ static inline void atomic_long_add(long
54462 atomic_add(i, v);
54463 }
54464
54465 +#ifdef CONFIG_PAX_REFCOUNT
54466 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
54467 +{
54468 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54469 +
54470 + atomic_add_unchecked(i, v);
54471 +}
54472 +#endif
54473 +
54474 static inline void atomic_long_sub(long i, atomic_long_t *l)
54475 {
54476 atomic_t *v = (atomic_t *)l;
54477 @@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
54478 return (long)atomic_inc_return(v);
54479 }
54480
54481 +#ifdef CONFIG_PAX_REFCOUNT
54482 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
54483 +{
54484 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54485 +
54486 + return (long)atomic_inc_return_unchecked(v);
54487 +}
54488 +#endif
54489 +
54490 static inline long atomic_long_dec_return(atomic_long_t *l)
54491 {
54492 atomic_t *v = (atomic_t *)l;
54493 @@ -255,4 +375,47 @@ static inline long atomic_long_add_unles
54494
54495 #endif /* BITS_PER_LONG == 64 */
54496
54497 +#ifdef CONFIG_PAX_REFCOUNT
54498 +static inline void pax_refcount_needs_these_functions(void)
54499 +{
54500 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
54501 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
54502 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
54503 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
54504 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
54505 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
54506 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
54507 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
54508 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
54509 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
54510 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
54511 +
54512 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
54513 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
54514 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
54515 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
54516 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
54517 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
54518 +}
54519 +#else
54520 +#define atomic_read_unchecked(v) atomic_read(v)
54521 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
54522 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
54523 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
54524 +#define atomic_inc_unchecked(v) atomic_inc(v)
54525 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
54526 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
54527 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
54528 +#define atomic_dec_unchecked(v) atomic_dec(v)
54529 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
54530 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
54531 +
54532 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
54533 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
54534 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
54535 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
54536 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
54537 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
54538 +#endif
54539 +
54540 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
54541 diff -urNp linux-2.6.32.43/include/asm-generic/cache.h linux-2.6.32.43/include/asm-generic/cache.h
54542 --- linux-2.6.32.43/include/asm-generic/cache.h 2011-03-27 14:31:47.000000000 -0400
54543 +++ linux-2.6.32.43/include/asm-generic/cache.h 2011-07-06 19:53:33.000000000 -0400
54544 @@ -6,7 +6,7 @@
54545 * cache lines need to provide their own cache.h.
54546 */
54547
54548 -#define L1_CACHE_SHIFT 5
54549 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
54550 +#define L1_CACHE_SHIFT 5UL
54551 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
54552
54553 #endif /* __ASM_GENERIC_CACHE_H */
54554 diff -urNp linux-2.6.32.43/include/asm-generic/dma-mapping-common.h linux-2.6.32.43/include/asm-generic/dma-mapping-common.h
54555 --- linux-2.6.32.43/include/asm-generic/dma-mapping-common.h 2011-03-27 14:31:47.000000000 -0400
54556 +++ linux-2.6.32.43/include/asm-generic/dma-mapping-common.h 2011-04-17 15:56:46.000000000 -0400
54557 @@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
54558 enum dma_data_direction dir,
54559 struct dma_attrs *attrs)
54560 {
54561 - struct dma_map_ops *ops = get_dma_ops(dev);
54562 + const struct dma_map_ops *ops = get_dma_ops(dev);
54563 dma_addr_t addr;
54564
54565 kmemcheck_mark_initialized(ptr, size);
54566 @@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
54567 enum dma_data_direction dir,
54568 struct dma_attrs *attrs)
54569 {
54570 - struct dma_map_ops *ops = get_dma_ops(dev);
54571 + const struct dma_map_ops *ops = get_dma_ops(dev);
54572
54573 BUG_ON(!valid_dma_direction(dir));
54574 if (ops->unmap_page)
54575 @@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
54576 int nents, enum dma_data_direction dir,
54577 struct dma_attrs *attrs)
54578 {
54579 - struct dma_map_ops *ops = get_dma_ops(dev);
54580 + const struct dma_map_ops *ops = get_dma_ops(dev);
54581 int i, ents;
54582 struct scatterlist *s;
54583
54584 @@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
54585 int nents, enum dma_data_direction dir,
54586 struct dma_attrs *attrs)
54587 {
54588 - struct dma_map_ops *ops = get_dma_ops(dev);
54589 + const struct dma_map_ops *ops = get_dma_ops(dev);
54590
54591 BUG_ON(!valid_dma_direction(dir));
54592 debug_dma_unmap_sg(dev, sg, nents, dir);
54593 @@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
54594 size_t offset, size_t size,
54595 enum dma_data_direction dir)
54596 {
54597 - struct dma_map_ops *ops = get_dma_ops(dev);
54598 + const struct dma_map_ops *ops = get_dma_ops(dev);
54599 dma_addr_t addr;
54600
54601 kmemcheck_mark_initialized(page_address(page) + offset, size);
54602 @@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
54603 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
54604 size_t size, enum dma_data_direction dir)
54605 {
54606 - struct dma_map_ops *ops = get_dma_ops(dev);
54607 + const struct dma_map_ops *ops = get_dma_ops(dev);
54608
54609 BUG_ON(!valid_dma_direction(dir));
54610 if (ops->unmap_page)
54611 @@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
54612 size_t size,
54613 enum dma_data_direction dir)
54614 {
54615 - struct dma_map_ops *ops = get_dma_ops(dev);
54616 + const struct dma_map_ops *ops = get_dma_ops(dev);
54617
54618 BUG_ON(!valid_dma_direction(dir));
54619 if (ops->sync_single_for_cpu)
54620 @@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
54621 dma_addr_t addr, size_t size,
54622 enum dma_data_direction dir)
54623 {
54624 - struct dma_map_ops *ops = get_dma_ops(dev);
54625 + const struct dma_map_ops *ops = get_dma_ops(dev);
54626
54627 BUG_ON(!valid_dma_direction(dir));
54628 if (ops->sync_single_for_device)
54629 @@ -123,7 +123,7 @@ static inline void dma_sync_single_range
54630 size_t size,
54631 enum dma_data_direction dir)
54632 {
54633 - struct dma_map_ops *ops = get_dma_ops(dev);
54634 + const struct dma_map_ops *ops = get_dma_ops(dev);
54635
54636 BUG_ON(!valid_dma_direction(dir));
54637 if (ops->sync_single_range_for_cpu) {
54638 @@ -140,7 +140,7 @@ static inline void dma_sync_single_range
54639 size_t size,
54640 enum dma_data_direction dir)
54641 {
54642 - struct dma_map_ops *ops = get_dma_ops(dev);
54643 + const struct dma_map_ops *ops = get_dma_ops(dev);
54644
54645 BUG_ON(!valid_dma_direction(dir));
54646 if (ops->sync_single_range_for_device) {
54647 @@ -155,7 +155,7 @@ static inline void
54648 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
54649 int nelems, enum dma_data_direction dir)
54650 {
54651 - struct dma_map_ops *ops = get_dma_ops(dev);
54652 + const struct dma_map_ops *ops = get_dma_ops(dev);
54653
54654 BUG_ON(!valid_dma_direction(dir));
54655 if (ops->sync_sg_for_cpu)
54656 @@ -167,7 +167,7 @@ static inline void
54657 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
54658 int nelems, enum dma_data_direction dir)
54659 {
54660 - struct dma_map_ops *ops = get_dma_ops(dev);
54661 + const struct dma_map_ops *ops = get_dma_ops(dev);
54662
54663 BUG_ON(!valid_dma_direction(dir));
54664 if (ops->sync_sg_for_device)
54665 diff -urNp linux-2.6.32.43/include/asm-generic/futex.h linux-2.6.32.43/include/asm-generic/futex.h
54666 --- linux-2.6.32.43/include/asm-generic/futex.h 2011-03-27 14:31:47.000000000 -0400
54667 +++ linux-2.6.32.43/include/asm-generic/futex.h 2011-04-17 15:56:46.000000000 -0400
54668 @@ -6,7 +6,7 @@
54669 #include <asm/errno.h>
54670
54671 static inline int
54672 -futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
54673 +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
54674 {
54675 int op = (encoded_op >> 28) & 7;
54676 int cmp = (encoded_op >> 24) & 15;
54677 @@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
54678 }
54679
54680 static inline int
54681 -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
54682 +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
54683 {
54684 return -ENOSYS;
54685 }
54686 diff -urNp linux-2.6.32.43/include/asm-generic/int-l64.h linux-2.6.32.43/include/asm-generic/int-l64.h
54687 --- linux-2.6.32.43/include/asm-generic/int-l64.h 2011-03-27 14:31:47.000000000 -0400
54688 +++ linux-2.6.32.43/include/asm-generic/int-l64.h 2011-04-17 15:56:46.000000000 -0400
54689 @@ -46,6 +46,8 @@ typedef unsigned int u32;
54690 typedef signed long s64;
54691 typedef unsigned long u64;
54692
54693 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
54694 +
54695 #define S8_C(x) x
54696 #define U8_C(x) x ## U
54697 #define S16_C(x) x
54698 diff -urNp linux-2.6.32.43/include/asm-generic/int-ll64.h linux-2.6.32.43/include/asm-generic/int-ll64.h
54699 --- linux-2.6.32.43/include/asm-generic/int-ll64.h 2011-03-27 14:31:47.000000000 -0400
54700 +++ linux-2.6.32.43/include/asm-generic/int-ll64.h 2011-04-17 15:56:46.000000000 -0400
54701 @@ -51,6 +51,8 @@ typedef unsigned int u32;
54702 typedef signed long long s64;
54703 typedef unsigned long long u64;
54704
54705 +typedef unsigned long long intoverflow_t;
54706 +
54707 #define S8_C(x) x
54708 #define U8_C(x) x ## U
54709 #define S16_C(x) x
54710 diff -urNp linux-2.6.32.43/include/asm-generic/kmap_types.h linux-2.6.32.43/include/asm-generic/kmap_types.h
54711 --- linux-2.6.32.43/include/asm-generic/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
54712 +++ linux-2.6.32.43/include/asm-generic/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
54713 @@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
54714 KMAP_D(16) KM_IRQ_PTE,
54715 KMAP_D(17) KM_NMI,
54716 KMAP_D(18) KM_NMI_PTE,
54717 -KMAP_D(19) KM_TYPE_NR
54718 +KMAP_D(19) KM_CLEARPAGE,
54719 +KMAP_D(20) KM_TYPE_NR
54720 };
54721
54722 #undef KMAP_D
54723 diff -urNp linux-2.6.32.43/include/asm-generic/pgtable.h linux-2.6.32.43/include/asm-generic/pgtable.h
54724 --- linux-2.6.32.43/include/asm-generic/pgtable.h 2011-03-27 14:31:47.000000000 -0400
54725 +++ linux-2.6.32.43/include/asm-generic/pgtable.h 2011-04-17 15:56:46.000000000 -0400
54726 @@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
54727 unsigned long size);
54728 #endif
54729
54730 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
54731 +static inline unsigned long pax_open_kernel(void) { return 0; }
54732 +#endif
54733 +
54734 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
54735 +static inline unsigned long pax_close_kernel(void) { return 0; }
54736 +#endif
54737 +
54738 #endif /* !__ASSEMBLY__ */
54739
54740 #endif /* _ASM_GENERIC_PGTABLE_H */
54741 diff -urNp linux-2.6.32.43/include/asm-generic/pgtable-nopmd.h linux-2.6.32.43/include/asm-generic/pgtable-nopmd.h
54742 --- linux-2.6.32.43/include/asm-generic/pgtable-nopmd.h 2011-03-27 14:31:47.000000000 -0400
54743 +++ linux-2.6.32.43/include/asm-generic/pgtable-nopmd.h 2011-04-17 15:56:46.000000000 -0400
54744 @@ -1,14 +1,19 @@
54745 #ifndef _PGTABLE_NOPMD_H
54746 #define _PGTABLE_NOPMD_H
54747
54748 -#ifndef __ASSEMBLY__
54749 -
54750 #include <asm-generic/pgtable-nopud.h>
54751
54752 -struct mm_struct;
54753 -
54754 #define __PAGETABLE_PMD_FOLDED
54755
54756 +#define PMD_SHIFT PUD_SHIFT
54757 +#define PTRS_PER_PMD 1
54758 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
54759 +#define PMD_MASK (~(PMD_SIZE-1))
54760 +
54761 +#ifndef __ASSEMBLY__
54762 +
54763 +struct mm_struct;
54764 +
54765 /*
54766 * Having the pmd type consist of a pud gets the size right, and allows
54767 * us to conceptually access the pud entry that this pmd is folded into
54768 @@ -16,11 +21,6 @@ struct mm_struct;
54769 */
54770 typedef struct { pud_t pud; } pmd_t;
54771
54772 -#define PMD_SHIFT PUD_SHIFT
54773 -#define PTRS_PER_PMD 1
54774 -#define PMD_SIZE (1UL << PMD_SHIFT)
54775 -#define PMD_MASK (~(PMD_SIZE-1))
54776 -
54777 /*
54778 * The "pud_xxx()" functions here are trivial for a folded two-level
54779 * setup: the pmd is never bad, and a pmd always exists (as it's folded
54780 diff -urNp linux-2.6.32.43/include/asm-generic/pgtable-nopud.h linux-2.6.32.43/include/asm-generic/pgtable-nopud.h
54781 --- linux-2.6.32.43/include/asm-generic/pgtable-nopud.h 2011-03-27 14:31:47.000000000 -0400
54782 +++ linux-2.6.32.43/include/asm-generic/pgtable-nopud.h 2011-04-17 15:56:46.000000000 -0400
54783 @@ -1,10 +1,15 @@
54784 #ifndef _PGTABLE_NOPUD_H
54785 #define _PGTABLE_NOPUD_H
54786
54787 -#ifndef __ASSEMBLY__
54788 -
54789 #define __PAGETABLE_PUD_FOLDED
54790
54791 +#define PUD_SHIFT PGDIR_SHIFT
54792 +#define PTRS_PER_PUD 1
54793 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
54794 +#define PUD_MASK (~(PUD_SIZE-1))
54795 +
54796 +#ifndef __ASSEMBLY__
54797 +
54798 /*
54799 * Having the pud type consist of a pgd gets the size right, and allows
54800 * us to conceptually access the pgd entry that this pud is folded into
54801 @@ -12,11 +17,6 @@
54802 */
54803 typedef struct { pgd_t pgd; } pud_t;
54804
54805 -#define PUD_SHIFT PGDIR_SHIFT
54806 -#define PTRS_PER_PUD 1
54807 -#define PUD_SIZE (1UL << PUD_SHIFT)
54808 -#define PUD_MASK (~(PUD_SIZE-1))
54809 -
54810 /*
54811 * The "pgd_xxx()" functions here are trivial for a folded two-level
54812 * setup: the pud is never bad, and a pud always exists (as it's folded
54813 diff -urNp linux-2.6.32.43/include/asm-generic/vmlinux.lds.h linux-2.6.32.43/include/asm-generic/vmlinux.lds.h
54814 --- linux-2.6.32.43/include/asm-generic/vmlinux.lds.h 2011-03-27 14:31:47.000000000 -0400
54815 +++ linux-2.6.32.43/include/asm-generic/vmlinux.lds.h 2011-04-17 15:56:46.000000000 -0400
54816 @@ -199,6 +199,7 @@
54817 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
54818 VMLINUX_SYMBOL(__start_rodata) = .; \
54819 *(.rodata) *(.rodata.*) \
54820 + *(.data.read_only) \
54821 *(__vermagic) /* Kernel version magic */ \
54822 *(__markers_strings) /* Markers: strings */ \
54823 *(__tracepoints_strings)/* Tracepoints: strings */ \
54824 @@ -656,22 +657,24 @@
54825 * section in the linker script will go there too. @phdr should have
54826 * a leading colon.
54827 *
54828 - * Note that this macros defines __per_cpu_load as an absolute symbol.
54829 + * Note that this macros defines per_cpu_load as an absolute symbol.
54830 * If there is no need to put the percpu section at a predetermined
54831 * address, use PERCPU().
54832 */
54833 #define PERCPU_VADDR(vaddr, phdr) \
54834 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
54835 - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
54836 + per_cpu_load = .; \
54837 + .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
54838 - LOAD_OFFSET) { \
54839 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
54840 VMLINUX_SYMBOL(__per_cpu_start) = .; \
54841 *(.data.percpu.first) \
54842 - *(.data.percpu.page_aligned) \
54843 *(.data.percpu) \
54844 + . = ALIGN(PAGE_SIZE); \
54845 + *(.data.percpu.page_aligned) \
54846 *(.data.percpu.shared_aligned) \
54847 VMLINUX_SYMBOL(__per_cpu_end) = .; \
54848 } phdr \
54849 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
54850 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
54851
54852 /**
54853 * PERCPU - define output section for percpu area, simple version
54854 diff -urNp linux-2.6.32.43/include/drm/drmP.h linux-2.6.32.43/include/drm/drmP.h
54855 --- linux-2.6.32.43/include/drm/drmP.h 2011-03-27 14:31:47.000000000 -0400
54856 +++ linux-2.6.32.43/include/drm/drmP.h 2011-04-17 15:56:46.000000000 -0400
54857 @@ -71,6 +71,7 @@
54858 #include <linux/workqueue.h>
54859 #include <linux/poll.h>
54860 #include <asm/pgalloc.h>
54861 +#include <asm/local.h>
54862 #include "drm.h"
54863
54864 #include <linux/idr.h>
54865 @@ -814,7 +815,7 @@ struct drm_driver {
54866 void (*vgaarb_irq)(struct drm_device *dev, bool state);
54867
54868 /* Driver private ops for this object */
54869 - struct vm_operations_struct *gem_vm_ops;
54870 + const struct vm_operations_struct *gem_vm_ops;
54871
54872 int major;
54873 int minor;
54874 @@ -917,7 +918,7 @@ struct drm_device {
54875
54876 /** \name Usage Counters */
54877 /*@{ */
54878 - int open_count; /**< Outstanding files open */
54879 + local_t open_count; /**< Outstanding files open */
54880 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
54881 atomic_t vma_count; /**< Outstanding vma areas open */
54882 int buf_use; /**< Buffers in use -- cannot alloc */
54883 @@ -928,7 +929,7 @@ struct drm_device {
54884 /*@{ */
54885 unsigned long counters;
54886 enum drm_stat_type types[15];
54887 - atomic_t counts[15];
54888 + atomic_unchecked_t counts[15];
54889 /*@} */
54890
54891 struct list_head filelist;
54892 @@ -1016,7 +1017,7 @@ struct drm_device {
54893 struct pci_controller *hose;
54894 #endif
54895 struct drm_sg_mem *sg; /**< Scatter gather memory */
54896 - unsigned int num_crtcs; /**< Number of CRTCs on this device */
54897 + unsigned int num_crtcs; /**< Number of CRTCs on this device */
54898 void *dev_private; /**< device private data */
54899 void *mm_private;
54900 struct address_space *dev_mapping;
54901 @@ -1042,11 +1043,11 @@ struct drm_device {
54902 spinlock_t object_name_lock;
54903 struct idr object_name_idr;
54904 atomic_t object_count;
54905 - atomic_t object_memory;
54906 + atomic_unchecked_t object_memory;
54907 atomic_t pin_count;
54908 - atomic_t pin_memory;
54909 + atomic_unchecked_t pin_memory;
54910 atomic_t gtt_count;
54911 - atomic_t gtt_memory;
54912 + atomic_unchecked_t gtt_memory;
54913 uint32_t gtt_total;
54914 uint32_t invalidate_domains; /* domains pending invalidation */
54915 uint32_t flush_domains; /* domains pending flush */
54916 diff -urNp linux-2.6.32.43/include/linux/a.out.h linux-2.6.32.43/include/linux/a.out.h
54917 --- linux-2.6.32.43/include/linux/a.out.h 2011-03-27 14:31:47.000000000 -0400
54918 +++ linux-2.6.32.43/include/linux/a.out.h 2011-04-17 15:56:46.000000000 -0400
54919 @@ -39,6 +39,14 @@ enum machine_type {
54920 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
54921 };
54922
54923 +/* Constants for the N_FLAGS field */
54924 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
54925 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
54926 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
54927 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
54928 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
54929 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
54930 +
54931 #if !defined (N_MAGIC)
54932 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
54933 #endif
54934 diff -urNp linux-2.6.32.43/include/linux/atmdev.h linux-2.6.32.43/include/linux/atmdev.h
54935 --- linux-2.6.32.43/include/linux/atmdev.h 2011-03-27 14:31:47.000000000 -0400
54936 +++ linux-2.6.32.43/include/linux/atmdev.h 2011-04-17 15:56:46.000000000 -0400
54937 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
54938 #endif
54939
54940 struct k_atm_aal_stats {
54941 -#define __HANDLE_ITEM(i) atomic_t i
54942 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
54943 __AAL_STAT_ITEMS
54944 #undef __HANDLE_ITEM
54945 };
54946 diff -urNp linux-2.6.32.43/include/linux/backlight.h linux-2.6.32.43/include/linux/backlight.h
54947 --- linux-2.6.32.43/include/linux/backlight.h 2011-03-27 14:31:47.000000000 -0400
54948 +++ linux-2.6.32.43/include/linux/backlight.h 2011-04-17 15:56:46.000000000 -0400
54949 @@ -36,18 +36,18 @@ struct backlight_device;
54950 struct fb_info;
54951
54952 struct backlight_ops {
54953 - unsigned int options;
54954 + const unsigned int options;
54955
54956 #define BL_CORE_SUSPENDRESUME (1 << 0)
54957
54958 /* Notify the backlight driver some property has changed */
54959 - int (*update_status)(struct backlight_device *);
54960 + int (* const update_status)(struct backlight_device *);
54961 /* Return the current backlight brightness (accounting for power,
54962 fb_blank etc.) */
54963 - int (*get_brightness)(struct backlight_device *);
54964 + int (* const get_brightness)(struct backlight_device *);
54965 /* Check if given framebuffer device is the one bound to this backlight;
54966 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
54967 - int (*check_fb)(struct fb_info *);
54968 + int (* const check_fb)(struct fb_info *);
54969 };
54970
54971 /* This structure defines all the properties of a backlight */
54972 @@ -86,7 +86,7 @@ struct backlight_device {
54973 registered this device has been unloaded, and if class_get_devdata()
54974 points to something in the body of that driver, it is also invalid. */
54975 struct mutex ops_lock;
54976 - struct backlight_ops *ops;
54977 + const struct backlight_ops *ops;
54978
54979 /* The framebuffer notifier block */
54980 struct notifier_block fb_notif;
54981 @@ -103,7 +103,7 @@ static inline void backlight_update_stat
54982 }
54983
54984 extern struct backlight_device *backlight_device_register(const char *name,
54985 - struct device *dev, void *devdata, struct backlight_ops *ops);
54986 + struct device *dev, void *devdata, const struct backlight_ops *ops);
54987 extern void backlight_device_unregister(struct backlight_device *bd);
54988 extern void backlight_force_update(struct backlight_device *bd,
54989 enum backlight_update_reason reason);
54990 diff -urNp linux-2.6.32.43/include/linux/binfmts.h linux-2.6.32.43/include/linux/binfmts.h
54991 --- linux-2.6.32.43/include/linux/binfmts.h 2011-04-17 17:00:52.000000000 -0400
54992 +++ linux-2.6.32.43/include/linux/binfmts.h 2011-04-17 15:56:46.000000000 -0400
54993 @@ -83,6 +83,7 @@ struct linux_binfmt {
54994 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
54995 int (*load_shlib)(struct file *);
54996 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
54997 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
54998 unsigned long min_coredump; /* minimal dump size */
54999 int hasvdso;
55000 };
55001 diff -urNp linux-2.6.32.43/include/linux/blkdev.h linux-2.6.32.43/include/linux/blkdev.h
55002 --- linux-2.6.32.43/include/linux/blkdev.h 2011-03-27 14:31:47.000000000 -0400
55003 +++ linux-2.6.32.43/include/linux/blkdev.h 2011-04-17 15:56:46.000000000 -0400
55004 @@ -1265,19 +1265,19 @@ static inline int blk_integrity_rq(struc
55005 #endif /* CONFIG_BLK_DEV_INTEGRITY */
55006
55007 struct block_device_operations {
55008 - int (*open) (struct block_device *, fmode_t);
55009 - int (*release) (struct gendisk *, fmode_t);
55010 - int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
55011 - int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
55012 - int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
55013 - int (*direct_access) (struct block_device *, sector_t,
55014 + int (* const open) (struct block_device *, fmode_t);
55015 + int (* const release) (struct gendisk *, fmode_t);
55016 + int (* const locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
55017 + int (* const ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
55018 + int (* const compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
55019 + int (* const direct_access) (struct block_device *, sector_t,
55020 void **, unsigned long *);
55021 - int (*media_changed) (struct gendisk *);
55022 - unsigned long long (*set_capacity) (struct gendisk *,
55023 + int (* const media_changed) (struct gendisk *);
55024 + unsigned long long (* const set_capacity) (struct gendisk *,
55025 unsigned long long);
55026 - int (*revalidate_disk) (struct gendisk *);
55027 - int (*getgeo)(struct block_device *, struct hd_geometry *);
55028 - struct module *owner;
55029 + int (* const revalidate_disk) (struct gendisk *);
55030 + int (*const getgeo)(struct block_device *, struct hd_geometry *);
55031 + struct module * const owner;
55032 };
55033
55034 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
55035 diff -urNp linux-2.6.32.43/include/linux/blktrace_api.h linux-2.6.32.43/include/linux/blktrace_api.h
55036 --- linux-2.6.32.43/include/linux/blktrace_api.h 2011-03-27 14:31:47.000000000 -0400
55037 +++ linux-2.6.32.43/include/linux/blktrace_api.h 2011-05-04 17:56:28.000000000 -0400
55038 @@ -160,7 +160,7 @@ struct blk_trace {
55039 struct dentry *dir;
55040 struct dentry *dropped_file;
55041 struct dentry *msg_file;
55042 - atomic_t dropped;
55043 + atomic_unchecked_t dropped;
55044 };
55045
55046 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
55047 diff -urNp linux-2.6.32.43/include/linux/byteorder/little_endian.h linux-2.6.32.43/include/linux/byteorder/little_endian.h
55048 --- linux-2.6.32.43/include/linux/byteorder/little_endian.h 2011-03-27 14:31:47.000000000 -0400
55049 +++ linux-2.6.32.43/include/linux/byteorder/little_endian.h 2011-04-17 15:56:46.000000000 -0400
55050 @@ -42,51 +42,51 @@
55051
55052 static inline __le64 __cpu_to_le64p(const __u64 *p)
55053 {
55054 - return (__force __le64)*p;
55055 + return (__force const __le64)*p;
55056 }
55057 static inline __u64 __le64_to_cpup(const __le64 *p)
55058 {
55059 - return (__force __u64)*p;
55060 + return (__force const __u64)*p;
55061 }
55062 static inline __le32 __cpu_to_le32p(const __u32 *p)
55063 {
55064 - return (__force __le32)*p;
55065 + return (__force const __le32)*p;
55066 }
55067 static inline __u32 __le32_to_cpup(const __le32 *p)
55068 {
55069 - return (__force __u32)*p;
55070 + return (__force const __u32)*p;
55071 }
55072 static inline __le16 __cpu_to_le16p(const __u16 *p)
55073 {
55074 - return (__force __le16)*p;
55075 + return (__force const __le16)*p;
55076 }
55077 static inline __u16 __le16_to_cpup(const __le16 *p)
55078 {
55079 - return (__force __u16)*p;
55080 + return (__force const __u16)*p;
55081 }
55082 static inline __be64 __cpu_to_be64p(const __u64 *p)
55083 {
55084 - return (__force __be64)__swab64p(p);
55085 + return (__force const __be64)__swab64p(p);
55086 }
55087 static inline __u64 __be64_to_cpup(const __be64 *p)
55088 {
55089 - return __swab64p((__u64 *)p);
55090 + return __swab64p((const __u64 *)p);
55091 }
55092 static inline __be32 __cpu_to_be32p(const __u32 *p)
55093 {
55094 - return (__force __be32)__swab32p(p);
55095 + return (__force const __be32)__swab32p(p);
55096 }
55097 static inline __u32 __be32_to_cpup(const __be32 *p)
55098 {
55099 - return __swab32p((__u32 *)p);
55100 + return __swab32p((const __u32 *)p);
55101 }
55102 static inline __be16 __cpu_to_be16p(const __u16 *p)
55103 {
55104 - return (__force __be16)__swab16p(p);
55105 + return (__force const __be16)__swab16p(p);
55106 }
55107 static inline __u16 __be16_to_cpup(const __be16 *p)
55108 {
55109 - return __swab16p((__u16 *)p);
55110 + return __swab16p((const __u16 *)p);
55111 }
55112 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
55113 #define __le64_to_cpus(x) do { (void)(x); } while (0)
55114 diff -urNp linux-2.6.32.43/include/linux/cache.h linux-2.6.32.43/include/linux/cache.h
55115 --- linux-2.6.32.43/include/linux/cache.h 2011-03-27 14:31:47.000000000 -0400
55116 +++ linux-2.6.32.43/include/linux/cache.h 2011-04-17 15:56:46.000000000 -0400
55117 @@ -16,6 +16,10 @@
55118 #define __read_mostly
55119 #endif
55120
55121 +#ifndef __read_only
55122 +#define __read_only __read_mostly
55123 +#endif
55124 +
55125 #ifndef ____cacheline_aligned
55126 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
55127 #endif
55128 diff -urNp linux-2.6.32.43/include/linux/capability.h linux-2.6.32.43/include/linux/capability.h
55129 --- linux-2.6.32.43/include/linux/capability.h 2011-03-27 14:31:47.000000000 -0400
55130 +++ linux-2.6.32.43/include/linux/capability.h 2011-04-17 15:56:46.000000000 -0400
55131 @@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff
55132 (security_real_capable_noaudit((t), (cap)) == 0)
55133
55134 extern int capable(int cap);
55135 +int capable_nolog(int cap);
55136
55137 /* audit system wants to get cap info from files as well */
55138 struct dentry;
55139 diff -urNp linux-2.6.32.43/include/linux/compiler-gcc4.h linux-2.6.32.43/include/linux/compiler-gcc4.h
55140 --- linux-2.6.32.43/include/linux/compiler-gcc4.h 2011-03-27 14:31:47.000000000 -0400
55141 +++ linux-2.6.32.43/include/linux/compiler-gcc4.h 2011-04-17 15:56:46.000000000 -0400
55142 @@ -36,4 +36,8 @@
55143 the kernel context */
55144 #define __cold __attribute__((__cold__))
55145
55146 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
55147 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
55148 +#define __bos0(ptr) __bos((ptr), 0)
55149 +#define __bos1(ptr) __bos((ptr), 1)
55150 #endif
55151 diff -urNp linux-2.6.32.43/include/linux/compiler.h linux-2.6.32.43/include/linux/compiler.h
55152 --- linux-2.6.32.43/include/linux/compiler.h 2011-03-27 14:31:47.000000000 -0400
55153 +++ linux-2.6.32.43/include/linux/compiler.h 2011-04-17 15:56:46.000000000 -0400
55154 @@ -256,6 +256,22 @@ void ftrace_likely_update(struct ftrace_
55155 #define __cold
55156 #endif
55157
55158 +#ifndef __alloc_size
55159 +#define __alloc_size
55160 +#endif
55161 +
55162 +#ifndef __bos
55163 +#define __bos
55164 +#endif
55165 +
55166 +#ifndef __bos0
55167 +#define __bos0
55168 +#endif
55169 +
55170 +#ifndef __bos1
55171 +#define __bos1
55172 +#endif
55173 +
55174 /* Simple shorthand for a section definition */
55175 #ifndef __section
55176 # define __section(S) __attribute__ ((__section__(#S)))
55177 @@ -278,6 +294,7 @@ void ftrace_likely_update(struct ftrace_
55178 * use is to mediate communication between process-level code and irq/NMI
55179 * handlers, all running on the same CPU.
55180 */
55181 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
55182 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
55183 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
55184
55185 #endif /* __LINUX_COMPILER_H */
55186 diff -urNp linux-2.6.32.43/include/linux/dcache.h linux-2.6.32.43/include/linux/dcache.h
55187 --- linux-2.6.32.43/include/linux/dcache.h 2011-03-27 14:31:47.000000000 -0400
55188 +++ linux-2.6.32.43/include/linux/dcache.h 2011-04-23 13:34:46.000000000 -0400
55189 @@ -119,6 +119,8 @@ struct dentry {
55190 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
55191 };
55192
55193 +#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
55194 +
55195 /*
55196 * dentry->d_lock spinlock nesting subclasses:
55197 *
55198 diff -urNp linux-2.6.32.43/include/linux/decompress/mm.h linux-2.6.32.43/include/linux/decompress/mm.h
55199 --- linux-2.6.32.43/include/linux/decompress/mm.h 2011-03-27 14:31:47.000000000 -0400
55200 +++ linux-2.6.32.43/include/linux/decompress/mm.h 2011-04-17 15:56:46.000000000 -0400
55201 @@ -78,7 +78,7 @@ static void free(void *where)
55202 * warnings when not needed (indeed large_malloc / large_free are not
55203 * needed by inflate */
55204
55205 -#define malloc(a) kmalloc(a, GFP_KERNEL)
55206 +#define malloc(a) kmalloc((a), GFP_KERNEL)
55207 #define free(a) kfree(a)
55208
55209 #define large_malloc(a) vmalloc(a)
55210 diff -urNp linux-2.6.32.43/include/linux/dma-mapping.h linux-2.6.32.43/include/linux/dma-mapping.h
55211 --- linux-2.6.32.43/include/linux/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
55212 +++ linux-2.6.32.43/include/linux/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
55213 @@ -16,50 +16,50 @@ enum dma_data_direction {
55214 };
55215
55216 struct dma_map_ops {
55217 - void* (*alloc_coherent)(struct device *dev, size_t size,
55218 + void* (* const alloc_coherent)(struct device *dev, size_t size,
55219 dma_addr_t *dma_handle, gfp_t gfp);
55220 - void (*free_coherent)(struct device *dev, size_t size,
55221 + void (* const free_coherent)(struct device *dev, size_t size,
55222 void *vaddr, dma_addr_t dma_handle);
55223 - dma_addr_t (*map_page)(struct device *dev, struct page *page,
55224 + dma_addr_t (* const map_page)(struct device *dev, struct page *page,
55225 unsigned long offset, size_t size,
55226 enum dma_data_direction dir,
55227 struct dma_attrs *attrs);
55228 - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
55229 + void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
55230 size_t size, enum dma_data_direction dir,
55231 struct dma_attrs *attrs);
55232 - int (*map_sg)(struct device *dev, struct scatterlist *sg,
55233 + int (* const map_sg)(struct device *dev, struct scatterlist *sg,
55234 int nents, enum dma_data_direction dir,
55235 struct dma_attrs *attrs);
55236 - void (*unmap_sg)(struct device *dev,
55237 + void (* const unmap_sg)(struct device *dev,
55238 struct scatterlist *sg, int nents,
55239 enum dma_data_direction dir,
55240 struct dma_attrs *attrs);
55241 - void (*sync_single_for_cpu)(struct device *dev,
55242 + void (* const sync_single_for_cpu)(struct device *dev,
55243 dma_addr_t dma_handle, size_t size,
55244 enum dma_data_direction dir);
55245 - void (*sync_single_for_device)(struct device *dev,
55246 + void (* const sync_single_for_device)(struct device *dev,
55247 dma_addr_t dma_handle, size_t size,
55248 enum dma_data_direction dir);
55249 - void (*sync_single_range_for_cpu)(struct device *dev,
55250 + void (* const sync_single_range_for_cpu)(struct device *dev,
55251 dma_addr_t dma_handle,
55252 unsigned long offset,
55253 size_t size,
55254 enum dma_data_direction dir);
55255 - void (*sync_single_range_for_device)(struct device *dev,
55256 + void (* const sync_single_range_for_device)(struct device *dev,
55257 dma_addr_t dma_handle,
55258 unsigned long offset,
55259 size_t size,
55260 enum dma_data_direction dir);
55261 - void (*sync_sg_for_cpu)(struct device *dev,
55262 + void (* const sync_sg_for_cpu)(struct device *dev,
55263 struct scatterlist *sg, int nents,
55264 enum dma_data_direction dir);
55265 - void (*sync_sg_for_device)(struct device *dev,
55266 + void (* const sync_sg_for_device)(struct device *dev,
55267 struct scatterlist *sg, int nents,
55268 enum dma_data_direction dir);
55269 - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
55270 - int (*dma_supported)(struct device *dev, u64 mask);
55271 + int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
55272 + int (* const dma_supported)(struct device *dev, u64 mask);
55273 int (*set_dma_mask)(struct device *dev, u64 mask);
55274 - int is_phys;
55275 + const int is_phys;
55276 };
55277
55278 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
55279 diff -urNp linux-2.6.32.43/include/linux/dst.h linux-2.6.32.43/include/linux/dst.h
55280 --- linux-2.6.32.43/include/linux/dst.h 2011-03-27 14:31:47.000000000 -0400
55281 +++ linux-2.6.32.43/include/linux/dst.h 2011-04-17 15:56:46.000000000 -0400
55282 @@ -380,7 +380,7 @@ struct dst_node
55283 struct thread_pool *pool;
55284
55285 /* Transaction IDs live here */
55286 - atomic_long_t gen;
55287 + atomic_long_unchecked_t gen;
55288
55289 /*
55290 * How frequently and how many times transaction
55291 diff -urNp linux-2.6.32.43/include/linux/elf.h linux-2.6.32.43/include/linux/elf.h
55292 --- linux-2.6.32.43/include/linux/elf.h 2011-03-27 14:31:47.000000000 -0400
55293 +++ linux-2.6.32.43/include/linux/elf.h 2011-04-17 15:56:46.000000000 -0400
55294 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
55295 #define PT_GNU_EH_FRAME 0x6474e550
55296
55297 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
55298 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
55299 +
55300 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
55301 +
55302 +/* Constants for the e_flags field */
55303 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
55304 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
55305 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
55306 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
55307 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
55308 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
55309
55310 /* These constants define the different elf file types */
55311 #define ET_NONE 0
55312 @@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
55313 #define DT_DEBUG 21
55314 #define DT_TEXTREL 22
55315 #define DT_JMPREL 23
55316 +#define DT_FLAGS 30
55317 + #define DF_TEXTREL 0x00000004
55318 #define DT_ENCODING 32
55319 #define OLD_DT_LOOS 0x60000000
55320 #define DT_LOOS 0x6000000d
55321 @@ -230,6 +243,19 @@ typedef struct elf64_hdr {
55322 #define PF_W 0x2
55323 #define PF_X 0x1
55324
55325 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
55326 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
55327 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
55328 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
55329 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
55330 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
55331 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
55332 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
55333 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
55334 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
55335 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
55336 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
55337 +
55338 typedef struct elf32_phdr{
55339 Elf32_Word p_type;
55340 Elf32_Off p_offset;
55341 @@ -322,6 +348,8 @@ typedef struct elf64_shdr {
55342 #define EI_OSABI 7
55343 #define EI_PAD 8
55344
55345 +#define EI_PAX 14
55346 +
55347 #define ELFMAG0 0x7f /* EI_MAG */
55348 #define ELFMAG1 'E'
55349 #define ELFMAG2 'L'
55350 @@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
55351 #define elf_phdr elf32_phdr
55352 #define elf_note elf32_note
55353 #define elf_addr_t Elf32_Off
55354 +#define elf_dyn Elf32_Dyn
55355
55356 #else
55357
55358 @@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
55359 #define elf_phdr elf64_phdr
55360 #define elf_note elf64_note
55361 #define elf_addr_t Elf64_Off
55362 +#define elf_dyn Elf64_Dyn
55363
55364 #endif
55365
55366 diff -urNp linux-2.6.32.43/include/linux/fscache-cache.h linux-2.6.32.43/include/linux/fscache-cache.h
55367 --- linux-2.6.32.43/include/linux/fscache-cache.h 2011-03-27 14:31:47.000000000 -0400
55368 +++ linux-2.6.32.43/include/linux/fscache-cache.h 2011-05-04 17:56:28.000000000 -0400
55369 @@ -116,7 +116,7 @@ struct fscache_operation {
55370 #endif
55371 };
55372
55373 -extern atomic_t fscache_op_debug_id;
55374 +extern atomic_unchecked_t fscache_op_debug_id;
55375 extern const struct slow_work_ops fscache_op_slow_work_ops;
55376
55377 extern void fscache_enqueue_operation(struct fscache_operation *);
55378 @@ -134,7 +134,7 @@ static inline void fscache_operation_ini
55379 fscache_operation_release_t release)
55380 {
55381 atomic_set(&op->usage, 1);
55382 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
55383 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
55384 op->release = release;
55385 INIT_LIST_HEAD(&op->pend_link);
55386 fscache_set_op_state(op, "Init");
55387 diff -urNp linux-2.6.32.43/include/linux/fs.h linux-2.6.32.43/include/linux/fs.h
55388 --- linux-2.6.32.43/include/linux/fs.h 2011-07-13 17:23:04.000000000 -0400
55389 +++ linux-2.6.32.43/include/linux/fs.h 2011-07-13 17:23:19.000000000 -0400
55390 @@ -90,6 +90,11 @@ struct inodes_stat_t {
55391 /* Expect random access pattern */
55392 #define FMODE_RANDOM ((__force fmode_t)4096)
55393
55394 +/* Hack for grsec so as not to require read permission simply to execute
55395 + * a binary
55396 + */
55397 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
55398 +
55399 /*
55400 * The below are the various read and write types that we support. Some of
55401 * them include behavioral modifiers that send information down to the
55402 @@ -568,41 +573,41 @@ typedef int (*read_actor_t)(read_descrip
55403 unsigned long, unsigned long);
55404
55405 struct address_space_operations {
55406 - int (*writepage)(struct page *page, struct writeback_control *wbc);
55407 - int (*readpage)(struct file *, struct page *);
55408 - void (*sync_page)(struct page *);
55409 + int (* const writepage)(struct page *page, struct writeback_control *wbc);
55410 + int (* const readpage)(struct file *, struct page *);
55411 + void (* const sync_page)(struct page *);
55412
55413 /* Write back some dirty pages from this mapping. */
55414 - int (*writepages)(struct address_space *, struct writeback_control *);
55415 + int (* const writepages)(struct address_space *, struct writeback_control *);
55416
55417 /* Set a page dirty. Return true if this dirtied it */
55418 - int (*set_page_dirty)(struct page *page);
55419 + int (* const set_page_dirty)(struct page *page);
55420
55421 - int (*readpages)(struct file *filp, struct address_space *mapping,
55422 + int (* const readpages)(struct file *filp, struct address_space *mapping,
55423 struct list_head *pages, unsigned nr_pages);
55424
55425 - int (*write_begin)(struct file *, struct address_space *mapping,
55426 + int (* const write_begin)(struct file *, struct address_space *mapping,
55427 loff_t pos, unsigned len, unsigned flags,
55428 struct page **pagep, void **fsdata);
55429 - int (*write_end)(struct file *, struct address_space *mapping,
55430 + int (* const write_end)(struct file *, struct address_space *mapping,
55431 loff_t pos, unsigned len, unsigned copied,
55432 struct page *page, void *fsdata);
55433
55434 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
55435 - sector_t (*bmap)(struct address_space *, sector_t);
55436 - void (*invalidatepage) (struct page *, unsigned long);
55437 - int (*releasepage) (struct page *, gfp_t);
55438 - ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
55439 + sector_t (* const bmap)(struct address_space *, sector_t);
55440 + void (* const invalidatepage) (struct page *, unsigned long);
55441 + int (* const releasepage) (struct page *, gfp_t);
55442 + ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
55443 loff_t offset, unsigned long nr_segs);
55444 - int (*get_xip_mem)(struct address_space *, pgoff_t, int,
55445 + int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
55446 void **, unsigned long *);
55447 /* migrate the contents of a page to the specified target */
55448 - int (*migratepage) (struct address_space *,
55449 + int (* const migratepage) (struct address_space *,
55450 struct page *, struct page *);
55451 - int (*launder_page) (struct page *);
55452 - int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
55453 + int (* const launder_page) (struct page *);
55454 + int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
55455 unsigned long);
55456 - int (*error_remove_page)(struct address_space *, struct page *);
55457 + int (* const error_remove_page)(struct address_space *, struct page *);
55458 };
55459
55460 /*
55461 @@ -1031,19 +1036,19 @@ static inline int file_check_writeable(s
55462 typedef struct files_struct *fl_owner_t;
55463
55464 struct file_lock_operations {
55465 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
55466 - void (*fl_release_private)(struct file_lock *);
55467 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
55468 + void (* const fl_release_private)(struct file_lock *);
55469 };
55470
55471 struct lock_manager_operations {
55472 - int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
55473 - void (*fl_notify)(struct file_lock *); /* unblock callback */
55474 - int (*fl_grant)(struct file_lock *, struct file_lock *, int);
55475 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
55476 - void (*fl_release_private)(struct file_lock *);
55477 - void (*fl_break)(struct file_lock *);
55478 - int (*fl_mylease)(struct file_lock *, struct file_lock *);
55479 - int (*fl_change)(struct file_lock **, int);
55480 + int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
55481 + void (* const fl_notify)(struct file_lock *); /* unblock callback */
55482 + int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
55483 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
55484 + void (* const fl_release_private)(struct file_lock *);
55485 + void (* const fl_break)(struct file_lock *);
55486 + int (* const fl_mylease)(struct file_lock *, struct file_lock *);
55487 + int (* const fl_change)(struct file_lock **, int);
55488 };
55489
55490 struct lock_manager {
55491 @@ -1442,7 +1447,7 @@ struct fiemap_extent_info {
55492 unsigned int fi_flags; /* Flags as passed from user */
55493 unsigned int fi_extents_mapped; /* Number of mapped extents */
55494 unsigned int fi_extents_max; /* Size of fiemap_extent array */
55495 - struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
55496 + struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
55497 * array */
55498 };
55499 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
55500 @@ -1559,30 +1564,30 @@ extern ssize_t vfs_writev(struct file *,
55501 unsigned long, loff_t *);
55502
55503 struct super_operations {
55504 - struct inode *(*alloc_inode)(struct super_block *sb);
55505 - void (*destroy_inode)(struct inode *);
55506 + struct inode *(* const alloc_inode)(struct super_block *sb);
55507 + void (* const destroy_inode)(struct inode *);
55508
55509 - void (*dirty_inode) (struct inode *);
55510 - int (*write_inode) (struct inode *, int);
55511 - void (*drop_inode) (struct inode *);
55512 - void (*delete_inode) (struct inode *);
55513 - void (*put_super) (struct super_block *);
55514 - void (*write_super) (struct super_block *);
55515 - int (*sync_fs)(struct super_block *sb, int wait);
55516 - int (*freeze_fs) (struct super_block *);
55517 - int (*unfreeze_fs) (struct super_block *);
55518 - int (*statfs) (struct dentry *, struct kstatfs *);
55519 - int (*remount_fs) (struct super_block *, int *, char *);
55520 - void (*clear_inode) (struct inode *);
55521 - void (*umount_begin) (struct super_block *);
55522 + void (* const dirty_inode) (struct inode *);
55523 + int (* const write_inode) (struct inode *, int);
55524 + void (* const drop_inode) (struct inode *);
55525 + void (* const delete_inode) (struct inode *);
55526 + void (* const put_super) (struct super_block *);
55527 + void (* const write_super) (struct super_block *);
55528 + int (* const sync_fs)(struct super_block *sb, int wait);
55529 + int (* const freeze_fs) (struct super_block *);
55530 + int (* const unfreeze_fs) (struct super_block *);
55531 + int (* const statfs) (struct dentry *, struct kstatfs *);
55532 + int (* const remount_fs) (struct super_block *, int *, char *);
55533 + void (* const clear_inode) (struct inode *);
55534 + void (* const umount_begin) (struct super_block *);
55535
55536 - int (*show_options)(struct seq_file *, struct vfsmount *);
55537 - int (*show_stats)(struct seq_file *, struct vfsmount *);
55538 + int (* const show_options)(struct seq_file *, struct vfsmount *);
55539 + int (* const show_stats)(struct seq_file *, struct vfsmount *);
55540 #ifdef CONFIG_QUOTA
55541 - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
55542 - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
55543 + ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
55544 + ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
55545 #endif
55546 - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
55547 + int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
55548 };
55549
55550 /*
55551 diff -urNp linux-2.6.32.43/include/linux/fs_struct.h linux-2.6.32.43/include/linux/fs_struct.h
55552 --- linux-2.6.32.43/include/linux/fs_struct.h 2011-03-27 14:31:47.000000000 -0400
55553 +++ linux-2.6.32.43/include/linux/fs_struct.h 2011-04-17 15:56:46.000000000 -0400
55554 @@ -4,7 +4,7 @@
55555 #include <linux/path.h>
55556
55557 struct fs_struct {
55558 - int users;
55559 + atomic_t users;
55560 rwlock_t lock;
55561 int umask;
55562 int in_exec;
55563 diff -urNp linux-2.6.32.43/include/linux/ftrace_event.h linux-2.6.32.43/include/linux/ftrace_event.h
55564 --- linux-2.6.32.43/include/linux/ftrace_event.h 2011-03-27 14:31:47.000000000 -0400
55565 +++ linux-2.6.32.43/include/linux/ftrace_event.h 2011-05-04 17:56:28.000000000 -0400
55566 @@ -163,7 +163,7 @@ extern int trace_define_field(struct ftr
55567 int filter_type);
55568 extern int trace_define_common_fields(struct ftrace_event_call *call);
55569
55570 -#define is_signed_type(type) (((type)(-1)) < 0)
55571 +#define is_signed_type(type) (((type)(-1)) < (type)1)
55572
55573 int trace_set_clr_event(const char *system, const char *event, int set);
55574
55575 diff -urNp linux-2.6.32.43/include/linux/genhd.h linux-2.6.32.43/include/linux/genhd.h
55576 --- linux-2.6.32.43/include/linux/genhd.h 2011-03-27 14:31:47.000000000 -0400
55577 +++ linux-2.6.32.43/include/linux/genhd.h 2011-04-17 15:56:46.000000000 -0400
55578 @@ -161,7 +161,7 @@ struct gendisk {
55579
55580 struct timer_rand_state *random;
55581
55582 - atomic_t sync_io; /* RAID */
55583 + atomic_unchecked_t sync_io; /* RAID */
55584 struct work_struct async_notify;
55585 #ifdef CONFIG_BLK_DEV_INTEGRITY
55586 struct blk_integrity *integrity;
55587 diff -urNp linux-2.6.32.43/include/linux/gracl.h linux-2.6.32.43/include/linux/gracl.h
55588 --- linux-2.6.32.43/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
55589 +++ linux-2.6.32.43/include/linux/gracl.h 2011-04-17 15:56:46.000000000 -0400
55590 @@ -0,0 +1,317 @@
55591 +#ifndef GR_ACL_H
55592 +#define GR_ACL_H
55593 +
55594 +#include <linux/grdefs.h>
55595 +#include <linux/resource.h>
55596 +#include <linux/capability.h>
55597 +#include <linux/dcache.h>
55598 +#include <asm/resource.h>
55599 +
55600 +/* Major status information */
55601 +
55602 +#define GR_VERSION "grsecurity 2.2.2"
55603 +#define GRSECURITY_VERSION 0x2202
55604 +
55605 +enum {
55606 + GR_SHUTDOWN = 0,
55607 + GR_ENABLE = 1,
55608 + GR_SPROLE = 2,
55609 + GR_RELOAD = 3,
55610 + GR_SEGVMOD = 4,
55611 + GR_STATUS = 5,
55612 + GR_UNSPROLE = 6,
55613 + GR_PASSSET = 7,
55614 + GR_SPROLEPAM = 8,
55615 +};
55616 +
55617 +/* Password setup definitions
55618 + * kernel/grhash.c */
55619 +enum {
55620 + GR_PW_LEN = 128,
55621 + GR_SALT_LEN = 16,
55622 + GR_SHA_LEN = 32,
55623 +};
55624 +
55625 +enum {
55626 + GR_SPROLE_LEN = 64,
55627 +};
55628 +
55629 +enum {
55630 + GR_NO_GLOB = 0,
55631 + GR_REG_GLOB,
55632 + GR_CREATE_GLOB
55633 +};
55634 +
55635 +#define GR_NLIMITS 32
55636 +
55637 +/* Begin Data Structures */
55638 +
55639 +struct sprole_pw {
55640 + unsigned char *rolename;
55641 + unsigned char salt[GR_SALT_LEN];
55642 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
55643 +};
55644 +
55645 +struct name_entry {
55646 + __u32 key;
55647 + ino_t inode;
55648 + dev_t device;
55649 + char *name;
55650 + __u16 len;
55651 + __u8 deleted;
55652 + struct name_entry *prev;
55653 + struct name_entry *next;
55654 +};
55655 +
55656 +struct inodev_entry {
55657 + struct name_entry *nentry;
55658 + struct inodev_entry *prev;
55659 + struct inodev_entry *next;
55660 +};
55661 +
55662 +struct acl_role_db {
55663 + struct acl_role_label **r_hash;
55664 + __u32 r_size;
55665 +};
55666 +
55667 +struct inodev_db {
55668 + struct inodev_entry **i_hash;
55669 + __u32 i_size;
55670 +};
55671 +
55672 +struct name_db {
55673 + struct name_entry **n_hash;
55674 + __u32 n_size;
55675 +};
55676 +
55677 +struct crash_uid {
55678 + uid_t uid;
55679 + unsigned long expires;
55680 +};
55681 +
55682 +struct gr_hash_struct {
55683 + void **table;
55684 + void **nametable;
55685 + void *first;
55686 + __u32 table_size;
55687 + __u32 used_size;
55688 + int type;
55689 +};
55690 +
55691 +/* Userspace Grsecurity ACL data structures */
55692 +
55693 +struct acl_subject_label {
55694 + char *filename;
55695 + ino_t inode;
55696 + dev_t device;
55697 + __u32 mode;
55698 + kernel_cap_t cap_mask;
55699 + kernel_cap_t cap_lower;
55700 + kernel_cap_t cap_invert_audit;
55701 +
55702 + struct rlimit res[GR_NLIMITS];
55703 + __u32 resmask;
55704 +
55705 + __u8 user_trans_type;
55706 + __u8 group_trans_type;
55707 + uid_t *user_transitions;
55708 + gid_t *group_transitions;
55709 + __u16 user_trans_num;
55710 + __u16 group_trans_num;
55711 +
55712 + __u32 sock_families[2];
55713 + __u32 ip_proto[8];
55714 + __u32 ip_type;
55715 + struct acl_ip_label **ips;
55716 + __u32 ip_num;
55717 + __u32 inaddr_any_override;
55718 +
55719 + __u32 crashes;
55720 + unsigned long expires;
55721 +
55722 + struct acl_subject_label *parent_subject;
55723 + struct gr_hash_struct *hash;
55724 + struct acl_subject_label *prev;
55725 + struct acl_subject_label *next;
55726 +
55727 + struct acl_object_label **obj_hash;
55728 + __u32 obj_hash_size;
55729 + __u16 pax_flags;
55730 +};
55731 +
55732 +struct role_allowed_ip {
55733 + __u32 addr;
55734 + __u32 netmask;
55735 +
55736 + struct role_allowed_ip *prev;
55737 + struct role_allowed_ip *next;
55738 +};
55739 +
55740 +struct role_transition {
55741 + char *rolename;
55742 +
55743 + struct role_transition *prev;
55744 + struct role_transition *next;
55745 +};
55746 +
55747 +struct acl_role_label {
55748 + char *rolename;
55749 + uid_t uidgid;
55750 + __u16 roletype;
55751 +
55752 + __u16 auth_attempts;
55753 + unsigned long expires;
55754 +
55755 + struct acl_subject_label *root_label;
55756 + struct gr_hash_struct *hash;
55757 +
55758 + struct acl_role_label *prev;
55759 + struct acl_role_label *next;
55760 +
55761 + struct role_transition *transitions;
55762 + struct role_allowed_ip *allowed_ips;
55763 + uid_t *domain_children;
55764 + __u16 domain_child_num;
55765 +
55766 + struct acl_subject_label **subj_hash;
55767 + __u32 subj_hash_size;
55768 +};
55769 +
55770 +struct user_acl_role_db {
55771 + struct acl_role_label **r_table;
55772 + __u32 num_pointers; /* Number of allocations to track */
55773 + __u32 num_roles; /* Number of roles */
55774 + __u32 num_domain_children; /* Number of domain children */
55775 + __u32 num_subjects; /* Number of subjects */
55776 + __u32 num_objects; /* Number of objects */
55777 +};
55778 +
55779 +struct acl_object_label {
55780 + char *filename;
55781 + ino_t inode;
55782 + dev_t device;
55783 + __u32 mode;
55784 +
55785 + struct acl_subject_label *nested;
55786 + struct acl_object_label *globbed;
55787 +
55788 + /* next two structures not used */
55789 +
55790 + struct acl_object_label *prev;
55791 + struct acl_object_label *next;
55792 +};
55793 +
55794 +struct acl_ip_label {
55795 + char *iface;
55796 + __u32 addr;
55797 + __u32 netmask;
55798 + __u16 low, high;
55799 + __u8 mode;
55800 + __u32 type;
55801 + __u32 proto[8];
55802 +
55803 + /* next two structures not used */
55804 +
55805 + struct acl_ip_label *prev;
55806 + struct acl_ip_label *next;
55807 +};
55808 +
55809 +struct gr_arg {
55810 + struct user_acl_role_db role_db;
55811 + unsigned char pw[GR_PW_LEN];
55812 + unsigned char salt[GR_SALT_LEN];
55813 + unsigned char sum[GR_SHA_LEN];
55814 + unsigned char sp_role[GR_SPROLE_LEN];
55815 + struct sprole_pw *sprole_pws;
55816 + dev_t segv_device;
55817 + ino_t segv_inode;
55818 + uid_t segv_uid;
55819 + __u16 num_sprole_pws;
55820 + __u16 mode;
55821 +};
55822 +
55823 +struct gr_arg_wrapper {
55824 + struct gr_arg *arg;
55825 + __u32 version;
55826 + __u32 size;
55827 +};
55828 +
55829 +struct subject_map {
55830 + struct acl_subject_label *user;
55831 + struct acl_subject_label *kernel;
55832 + struct subject_map *prev;
55833 + struct subject_map *next;
55834 +};
55835 +
55836 +struct acl_subj_map_db {
55837 + struct subject_map **s_hash;
55838 + __u32 s_size;
55839 +};
55840 +
55841 +/* End Data Structures Section */
55842 +
55843 +/* Hash functions generated by empirical testing by Brad Spengler
55844 + Makes good use of the low bits of the inode. Generally 0-1 times
55845 + in loop for successful match. 0-3 for unsuccessful match.
55846 + Shift/add algorithm with modulus of table size and an XOR*/
55847 +
55848 +static __inline__ unsigned int
55849 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
55850 +{
55851 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
55852 +}
55853 +
55854 + static __inline__ unsigned int
55855 +shash(const struct acl_subject_label *userp, const unsigned int sz)
55856 +{
55857 + return ((const unsigned long)userp % sz);
55858 +}
55859 +
55860 +static __inline__ unsigned int
55861 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
55862 +{
55863 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
55864 +}
55865 +
55866 +static __inline__ unsigned int
55867 +nhash(const char *name, const __u16 len, const unsigned int sz)
55868 +{
55869 + return full_name_hash((const unsigned char *)name, len) % sz;
55870 +}
55871 +
55872 +#define FOR_EACH_ROLE_START(role) \
55873 + role = role_list; \
55874 + while (role) {
55875 +
55876 +#define FOR_EACH_ROLE_END(role) \
55877 + role = role->prev; \
55878 + }
55879 +
55880 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
55881 + subj = NULL; \
55882 + iter = 0; \
55883 + while (iter < role->subj_hash_size) { \
55884 + if (subj == NULL) \
55885 + subj = role->subj_hash[iter]; \
55886 + if (subj == NULL) { \
55887 + iter++; \
55888 + continue; \
55889 + }
55890 +
55891 +#define FOR_EACH_SUBJECT_END(subj,iter) \
55892 + subj = subj->next; \
55893 + if (subj == NULL) \
55894 + iter++; \
55895 + }
55896 +
55897 +
55898 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
55899 + subj = role->hash->first; \
55900 + while (subj != NULL) {
55901 +
55902 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
55903 + subj = subj->next; \
55904 + }
55905 +
55906 +#endif
55907 +
55908 diff -urNp linux-2.6.32.43/include/linux/gralloc.h linux-2.6.32.43/include/linux/gralloc.h
55909 --- linux-2.6.32.43/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
55910 +++ linux-2.6.32.43/include/linux/gralloc.h 2011-04-17 15:56:46.000000000 -0400
55911 @@ -0,0 +1,9 @@
55912 +#ifndef __GRALLOC_H
55913 +#define __GRALLOC_H
55914 +
55915 +void acl_free_all(void);
55916 +int acl_alloc_stack_init(unsigned long size);
55917 +void *acl_alloc(unsigned long len);
55918 +void *acl_alloc_num(unsigned long num, unsigned long len);
55919 +
55920 +#endif
55921 diff -urNp linux-2.6.32.43/include/linux/grdefs.h linux-2.6.32.43/include/linux/grdefs.h
55922 --- linux-2.6.32.43/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
55923 +++ linux-2.6.32.43/include/linux/grdefs.h 2011-06-11 16:20:26.000000000 -0400
55924 @@ -0,0 +1,140 @@
55925 +#ifndef GRDEFS_H
55926 +#define GRDEFS_H
55927 +
55928 +/* Begin grsecurity status declarations */
55929 +
55930 +enum {
55931 + GR_READY = 0x01,
55932 + GR_STATUS_INIT = 0x00 // disabled state
55933 +};
55934 +
55935 +/* Begin ACL declarations */
55936 +
55937 +/* Role flags */
55938 +
55939 +enum {
55940 + GR_ROLE_USER = 0x0001,
55941 + GR_ROLE_GROUP = 0x0002,
55942 + GR_ROLE_DEFAULT = 0x0004,
55943 + GR_ROLE_SPECIAL = 0x0008,
55944 + GR_ROLE_AUTH = 0x0010,
55945 + GR_ROLE_NOPW = 0x0020,
55946 + GR_ROLE_GOD = 0x0040,
55947 + GR_ROLE_LEARN = 0x0080,
55948 + GR_ROLE_TPE = 0x0100,
55949 + GR_ROLE_DOMAIN = 0x0200,
55950 + GR_ROLE_PAM = 0x0400,
55951 + GR_ROLE_PERSIST = 0x800
55952 +};
55953 +
55954 +/* ACL Subject and Object mode flags */
55955 +enum {
55956 + GR_DELETED = 0x80000000
55957 +};
55958 +
55959 +/* ACL Object-only mode flags */
55960 +enum {
55961 + GR_READ = 0x00000001,
55962 + GR_APPEND = 0x00000002,
55963 + GR_WRITE = 0x00000004,
55964 + GR_EXEC = 0x00000008,
55965 + GR_FIND = 0x00000010,
55966 + GR_INHERIT = 0x00000020,
55967 + GR_SETID = 0x00000040,
55968 + GR_CREATE = 0x00000080,
55969 + GR_DELETE = 0x00000100,
55970 + GR_LINK = 0x00000200,
55971 + GR_AUDIT_READ = 0x00000400,
55972 + GR_AUDIT_APPEND = 0x00000800,
55973 + GR_AUDIT_WRITE = 0x00001000,
55974 + GR_AUDIT_EXEC = 0x00002000,
55975 + GR_AUDIT_FIND = 0x00004000,
55976 + GR_AUDIT_INHERIT= 0x00008000,
55977 + GR_AUDIT_SETID = 0x00010000,
55978 + GR_AUDIT_CREATE = 0x00020000,
55979 + GR_AUDIT_DELETE = 0x00040000,
55980 + GR_AUDIT_LINK = 0x00080000,
55981 + GR_PTRACERD = 0x00100000,
55982 + GR_NOPTRACE = 0x00200000,
55983 + GR_SUPPRESS = 0x00400000,
55984 + GR_NOLEARN = 0x00800000,
55985 + GR_INIT_TRANSFER= 0x01000000
55986 +};
55987 +
55988 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
55989 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
55990 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
55991 +
55992 +/* ACL subject-only mode flags */
55993 +enum {
55994 + GR_KILL = 0x00000001,
55995 + GR_VIEW = 0x00000002,
55996 + GR_PROTECTED = 0x00000004,
55997 + GR_LEARN = 0x00000008,
55998 + GR_OVERRIDE = 0x00000010,
55999 + /* just a placeholder, this mode is only used in userspace */
56000 + GR_DUMMY = 0x00000020,
56001 + GR_PROTSHM = 0x00000040,
56002 + GR_KILLPROC = 0x00000080,
56003 + GR_KILLIPPROC = 0x00000100,
56004 + /* just a placeholder, this mode is only used in userspace */
56005 + GR_NOTROJAN = 0x00000200,
56006 + GR_PROTPROCFD = 0x00000400,
56007 + GR_PROCACCT = 0x00000800,
56008 + GR_RELAXPTRACE = 0x00001000,
56009 + GR_NESTED = 0x00002000,
56010 + GR_INHERITLEARN = 0x00004000,
56011 + GR_PROCFIND = 0x00008000,
56012 + GR_POVERRIDE = 0x00010000,
56013 + GR_KERNELAUTH = 0x00020000,
56014 + GR_ATSECURE = 0x00040000,
56015 + GR_SHMEXEC = 0x00080000
56016 +};
56017 +
56018 +enum {
56019 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
56020 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
56021 + GR_PAX_ENABLE_MPROTECT = 0x0004,
56022 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
56023 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
56024 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
56025 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
56026 + GR_PAX_DISABLE_MPROTECT = 0x0400,
56027 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
56028 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
56029 +};
56030 +
56031 +enum {
56032 + GR_ID_USER = 0x01,
56033 + GR_ID_GROUP = 0x02,
56034 +};
56035 +
56036 +enum {
56037 + GR_ID_ALLOW = 0x01,
56038 + GR_ID_DENY = 0x02,
56039 +};
56040 +
56041 +#define GR_CRASH_RES 31
56042 +#define GR_UIDTABLE_MAX 500
56043 +
56044 +/* begin resource learning section */
56045 +enum {
56046 + GR_RLIM_CPU_BUMP = 60,
56047 + GR_RLIM_FSIZE_BUMP = 50000,
56048 + GR_RLIM_DATA_BUMP = 10000,
56049 + GR_RLIM_STACK_BUMP = 1000,
56050 + GR_RLIM_CORE_BUMP = 10000,
56051 + GR_RLIM_RSS_BUMP = 500000,
56052 + GR_RLIM_NPROC_BUMP = 1,
56053 + GR_RLIM_NOFILE_BUMP = 5,
56054 + GR_RLIM_MEMLOCK_BUMP = 50000,
56055 + GR_RLIM_AS_BUMP = 500000,
56056 + GR_RLIM_LOCKS_BUMP = 2,
56057 + GR_RLIM_SIGPENDING_BUMP = 5,
56058 + GR_RLIM_MSGQUEUE_BUMP = 10000,
56059 + GR_RLIM_NICE_BUMP = 1,
56060 + GR_RLIM_RTPRIO_BUMP = 1,
56061 + GR_RLIM_RTTIME_BUMP = 1000000
56062 +};
56063 +
56064 +#endif
56065 diff -urNp linux-2.6.32.43/include/linux/grinternal.h linux-2.6.32.43/include/linux/grinternal.h
56066 --- linux-2.6.32.43/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
56067 +++ linux-2.6.32.43/include/linux/grinternal.h 2011-07-14 20:35:29.000000000 -0400
56068 @@ -0,0 +1,218 @@
56069 +#ifndef __GRINTERNAL_H
56070 +#define __GRINTERNAL_H
56071 +
56072 +#ifdef CONFIG_GRKERNSEC
56073 +
56074 +#include <linux/fs.h>
56075 +#include <linux/mnt_namespace.h>
56076 +#include <linux/nsproxy.h>
56077 +#include <linux/gracl.h>
56078 +#include <linux/grdefs.h>
56079 +#include <linux/grmsg.h>
56080 +
56081 +void gr_add_learn_entry(const char *fmt, ...)
56082 + __attribute__ ((format (printf, 1, 2)));
56083 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
56084 + const struct vfsmount *mnt);
56085 +__u32 gr_check_create(const struct dentry *new_dentry,
56086 + const struct dentry *parent,
56087 + const struct vfsmount *mnt, const __u32 mode);
56088 +int gr_check_protected_task(const struct task_struct *task);
56089 +__u32 to_gr_audit(const __u32 reqmode);
56090 +int gr_set_acls(const int type);
56091 +int gr_apply_subject_to_task(struct task_struct *task);
56092 +int gr_acl_is_enabled(void);
56093 +char gr_roletype_to_char(void);
56094 +
56095 +void gr_handle_alertkill(struct task_struct *task);
56096 +char *gr_to_filename(const struct dentry *dentry,
56097 + const struct vfsmount *mnt);
56098 +char *gr_to_filename1(const struct dentry *dentry,
56099 + const struct vfsmount *mnt);
56100 +char *gr_to_filename2(const struct dentry *dentry,
56101 + const struct vfsmount *mnt);
56102 +char *gr_to_filename3(const struct dentry *dentry,
56103 + const struct vfsmount *mnt);
56104 +
56105 +extern int grsec_enable_harden_ptrace;
56106 +extern int grsec_enable_link;
56107 +extern int grsec_enable_fifo;
56108 +extern int grsec_enable_execve;
56109 +extern int grsec_enable_shm;
56110 +extern int grsec_enable_execlog;
56111 +extern int grsec_enable_signal;
56112 +extern int grsec_enable_audit_ptrace;
56113 +extern int grsec_enable_forkfail;
56114 +extern int grsec_enable_time;
56115 +extern int grsec_enable_rofs;
56116 +extern int grsec_enable_chroot_shmat;
56117 +extern int grsec_enable_chroot_mount;
56118 +extern int grsec_enable_chroot_double;
56119 +extern int grsec_enable_chroot_pivot;
56120 +extern int grsec_enable_chroot_chdir;
56121 +extern int grsec_enable_chroot_chmod;
56122 +extern int grsec_enable_chroot_mknod;
56123 +extern int grsec_enable_chroot_fchdir;
56124 +extern int grsec_enable_chroot_nice;
56125 +extern int grsec_enable_chroot_execlog;
56126 +extern int grsec_enable_chroot_caps;
56127 +extern int grsec_enable_chroot_sysctl;
56128 +extern int grsec_enable_chroot_unix;
56129 +extern int grsec_enable_tpe;
56130 +extern int grsec_tpe_gid;
56131 +extern int grsec_enable_tpe_all;
56132 +extern int grsec_enable_tpe_invert;
56133 +extern int grsec_enable_socket_all;
56134 +extern int grsec_socket_all_gid;
56135 +extern int grsec_enable_socket_client;
56136 +extern int grsec_socket_client_gid;
56137 +extern int grsec_enable_socket_server;
56138 +extern int grsec_socket_server_gid;
56139 +extern int grsec_audit_gid;
56140 +extern int grsec_enable_group;
56141 +extern int grsec_enable_audit_textrel;
56142 +extern int grsec_enable_log_rwxmaps;
56143 +extern int grsec_enable_mount;
56144 +extern int grsec_enable_chdir;
56145 +extern int grsec_resource_logging;
56146 +extern int grsec_enable_blackhole;
56147 +extern int grsec_lastack_retries;
56148 +extern int grsec_enable_brute;
56149 +extern int grsec_lock;
56150 +
56151 +extern spinlock_t grsec_alert_lock;
56152 +extern unsigned long grsec_alert_wtime;
56153 +extern unsigned long grsec_alert_fyet;
56154 +
56155 +extern spinlock_t grsec_audit_lock;
56156 +
56157 +extern rwlock_t grsec_exec_file_lock;
56158 +
56159 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
56160 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
56161 + (tsk)->exec_file->f_vfsmnt) : "/")
56162 +
56163 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
56164 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
56165 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
56166 +
56167 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
56168 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
56169 + (tsk)->exec_file->f_vfsmnt) : "/")
56170 +
56171 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
56172 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
56173 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
56174 +
56175 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
56176 +
56177 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
56178 +
56179 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
56180 + (task)->pid, (cred)->uid, \
56181 + (cred)->euid, (cred)->gid, (cred)->egid, \
56182 + gr_parent_task_fullpath(task), \
56183 + (task)->real_parent->comm, (task)->real_parent->pid, \
56184 + (pcred)->uid, (pcred)->euid, \
56185 + (pcred)->gid, (pcred)->egid
56186 +
56187 +#define GR_CHROOT_CAPS {{ \
56188 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
56189 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
56190 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
56191 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
56192 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
56193 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
56194 +
56195 +#define security_learn(normal_msg,args...) \
56196 +({ \
56197 + read_lock(&grsec_exec_file_lock); \
56198 + gr_add_learn_entry(normal_msg "\n", ## args); \
56199 + read_unlock(&grsec_exec_file_lock); \
56200 +})
56201 +
56202 +enum {
56203 + GR_DO_AUDIT,
56204 + GR_DONT_AUDIT,
56205 + GR_DONT_AUDIT_GOOD
56206 +};
56207 +
56208 +enum {
56209 + GR_TTYSNIFF,
56210 + GR_RBAC,
56211 + GR_RBAC_STR,
56212 + GR_STR_RBAC,
56213 + GR_RBAC_MODE2,
56214 + GR_RBAC_MODE3,
56215 + GR_FILENAME,
56216 + GR_SYSCTL_HIDDEN,
56217 + GR_NOARGS,
56218 + GR_ONE_INT,
56219 + GR_ONE_INT_TWO_STR,
56220 + GR_ONE_STR,
56221 + GR_STR_INT,
56222 + GR_TWO_STR_INT,
56223 + GR_TWO_INT,
56224 + GR_TWO_U64,
56225 + GR_THREE_INT,
56226 + GR_FIVE_INT_TWO_STR,
56227 + GR_TWO_STR,
56228 + GR_THREE_STR,
56229 + GR_FOUR_STR,
56230 + GR_STR_FILENAME,
56231 + GR_FILENAME_STR,
56232 + GR_FILENAME_TWO_INT,
56233 + GR_FILENAME_TWO_INT_STR,
56234 + GR_TEXTREL,
56235 + GR_PTRACE,
56236 + GR_RESOURCE,
56237 + GR_CAP,
56238 + GR_SIG,
56239 + GR_SIG2,
56240 + GR_CRASH1,
56241 + GR_CRASH2,
56242 + GR_PSACCT,
56243 + GR_RWXMAP
56244 +};
56245 +
56246 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
56247 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
56248 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
56249 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
56250 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
56251 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
56252 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
56253 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
56254 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
56255 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
56256 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
56257 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
56258 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
56259 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
56260 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
56261 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
56262 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
56263 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
56264 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
56265 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
56266 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
56267 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
56268 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
56269 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
56270 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
56271 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
56272 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
56273 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
56274 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
56275 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
56276 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
56277 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
56278 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
56279 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
56280 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
56281 +
56282 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
56283 +
56284 +#endif
56285 +
56286 +#endif
56287 diff -urNp linux-2.6.32.43/include/linux/grmsg.h linux-2.6.32.43/include/linux/grmsg.h
56288 --- linux-2.6.32.43/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
56289 +++ linux-2.6.32.43/include/linux/grmsg.h 2011-04-17 15:56:46.000000000 -0400
56290 @@ -0,0 +1,108 @@
56291 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
56292 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
56293 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
56294 +#define GR_STOPMOD_MSG "denied modification of module state by "
56295 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
56296 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
56297 +#define GR_IOPERM_MSG "denied use of ioperm() by "
56298 +#define GR_IOPL_MSG "denied use of iopl() by "
56299 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
56300 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
56301 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
56302 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
56303 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
56304 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
56305 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
56306 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
56307 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
56308 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
56309 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
56310 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
56311 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
56312 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
56313 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
56314 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
56315 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
56316 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
56317 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
56318 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
56319 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
56320 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
56321 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
56322 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
56323 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
56324 +#define GR_NPROC_MSG "denied overstep of process limit by "
56325 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
56326 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
56327 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
56328 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
56329 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
56330 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
56331 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
56332 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
56333 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
56334 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
56335 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
56336 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
56337 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
56338 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
56339 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
56340 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
56341 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
56342 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
56343 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
56344 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
56345 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
56346 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
56347 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
56348 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
56349 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
56350 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
56351 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
56352 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
56353 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
56354 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
56355 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
56356 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
56357 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
56358 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
56359 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
56360 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
56361 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
56362 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
56363 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
56364 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
56365 +#define GR_NICE_CHROOT_MSG "denied priority change by "
56366 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
56367 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
56368 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
56369 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
56370 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
56371 +#define GR_TIME_MSG "time set by "
56372 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
56373 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
56374 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
56375 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
56376 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
56377 +#define GR_BIND_MSG "denied bind() by "
56378 +#define GR_CONNECT_MSG "denied connect() by "
56379 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
56380 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
56381 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
56382 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
56383 +#define GR_CAP_ACL_MSG "use of %s denied for "
56384 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
56385 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
56386 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
56387 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
56388 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
56389 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
56390 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
56391 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
56392 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
56393 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
56394 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
56395 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
56396 +#define GR_VM86_MSG "denied use of vm86 by "
56397 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
56398 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
56399 diff -urNp linux-2.6.32.43/include/linux/grsecurity.h linux-2.6.32.43/include/linux/grsecurity.h
56400 --- linux-2.6.32.43/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
56401 +++ linux-2.6.32.43/include/linux/grsecurity.h 2011-07-14 20:35:17.000000000 -0400
56402 @@ -0,0 +1,215 @@
56403 +#ifndef GR_SECURITY_H
56404 +#define GR_SECURITY_H
56405 +#include <linux/fs.h>
56406 +#include <linux/fs_struct.h>
56407 +#include <linux/binfmts.h>
56408 +#include <linux/gracl.h>
56409 +#include <linux/compat.h>
56410 +
56411 +/* notify of brain-dead configs */
56412 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
56413 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
56414 +#endif
56415 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
56416 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
56417 +#endif
56418 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
56419 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
56420 +#endif
56421 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
56422 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
56423 +#endif
56424 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
56425 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
56426 +#endif
56427 +
56428 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
56429 +void gr_handle_brute_check(void);
56430 +void gr_handle_kernel_exploit(void);
56431 +int gr_process_user_ban(void);
56432 +
56433 +char gr_roletype_to_char(void);
56434 +
56435 +int gr_acl_enable_at_secure(void);
56436 +
56437 +int gr_check_user_change(int real, int effective, int fs);
56438 +int gr_check_group_change(int real, int effective, int fs);
56439 +
56440 +void gr_del_task_from_ip_table(struct task_struct *p);
56441 +
56442 +int gr_pid_is_chrooted(struct task_struct *p);
56443 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
56444 +int gr_handle_chroot_nice(void);
56445 +int gr_handle_chroot_sysctl(const int op);
56446 +int gr_handle_chroot_setpriority(struct task_struct *p,
56447 + const int niceval);
56448 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
56449 +int gr_handle_chroot_chroot(const struct dentry *dentry,
56450 + const struct vfsmount *mnt);
56451 +int gr_handle_chroot_caps(struct path *path);
56452 +void gr_handle_chroot_chdir(struct path *path);
56453 +int gr_handle_chroot_chmod(const struct dentry *dentry,
56454 + const struct vfsmount *mnt, const int mode);
56455 +int gr_handle_chroot_mknod(const struct dentry *dentry,
56456 + const struct vfsmount *mnt, const int mode);
56457 +int gr_handle_chroot_mount(const struct dentry *dentry,
56458 + const struct vfsmount *mnt,
56459 + const char *dev_name);
56460 +int gr_handle_chroot_pivot(void);
56461 +int gr_handle_chroot_unix(const pid_t pid);
56462 +
56463 +int gr_handle_rawio(const struct inode *inode);
56464 +int gr_handle_nproc(void);
56465 +
56466 +void gr_handle_ioperm(void);
56467 +void gr_handle_iopl(void);
56468 +
56469 +int gr_tpe_allow(const struct file *file);
56470 +
56471 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
56472 +void gr_clear_chroot_entries(struct task_struct *task);
56473 +
56474 +void gr_log_forkfail(const int retval);
56475 +void gr_log_timechange(void);
56476 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
56477 +void gr_log_chdir(const struct dentry *dentry,
56478 + const struct vfsmount *mnt);
56479 +void gr_log_chroot_exec(const struct dentry *dentry,
56480 + const struct vfsmount *mnt);
56481 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
56482 +#ifdef CONFIG_COMPAT
56483 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
56484 +#endif
56485 +void gr_log_remount(const char *devname, const int retval);
56486 +void gr_log_unmount(const char *devname, const int retval);
56487 +void gr_log_mount(const char *from, const char *to, const int retval);
56488 +void gr_log_textrel(struct vm_area_struct *vma);
56489 +void gr_log_rwxmmap(struct file *file);
56490 +void gr_log_rwxmprotect(struct file *file);
56491 +
56492 +int gr_handle_follow_link(const struct inode *parent,
56493 + const struct inode *inode,
56494 + const struct dentry *dentry,
56495 + const struct vfsmount *mnt);
56496 +int gr_handle_fifo(const struct dentry *dentry,
56497 + const struct vfsmount *mnt,
56498 + const struct dentry *dir, const int flag,
56499 + const int acc_mode);
56500 +int gr_handle_hardlink(const struct dentry *dentry,
56501 + const struct vfsmount *mnt,
56502 + struct inode *inode,
56503 + const int mode, const char *to);
56504 +
56505 +int gr_is_capable(const int cap);
56506 +int gr_is_capable_nolog(const int cap);
56507 +void gr_learn_resource(const struct task_struct *task, const int limit,
56508 + const unsigned long wanted, const int gt);
56509 +void gr_copy_label(struct task_struct *tsk);
56510 +void gr_handle_crash(struct task_struct *task, const int sig);
56511 +int gr_handle_signal(const struct task_struct *p, const int sig);
56512 +int gr_check_crash_uid(const uid_t uid);
56513 +int gr_check_protected_task(const struct task_struct *task);
56514 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
56515 +int gr_acl_handle_mmap(const struct file *file,
56516 + const unsigned long prot);
56517 +int gr_acl_handle_mprotect(const struct file *file,
56518 + const unsigned long prot);
56519 +int gr_check_hidden_task(const struct task_struct *tsk);
56520 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
56521 + const struct vfsmount *mnt);
56522 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
56523 + const struct vfsmount *mnt);
56524 +__u32 gr_acl_handle_access(const struct dentry *dentry,
56525 + const struct vfsmount *mnt, const int fmode);
56526 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
56527 + const struct vfsmount *mnt, mode_t mode);
56528 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
56529 + const struct vfsmount *mnt, mode_t mode);
56530 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
56531 + const struct vfsmount *mnt);
56532 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
56533 + const struct vfsmount *mnt);
56534 +int gr_handle_ptrace(struct task_struct *task, const long request);
56535 +int gr_handle_proc_ptrace(struct task_struct *task);
56536 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
56537 + const struct vfsmount *mnt);
56538 +int gr_check_crash_exec(const struct file *filp);
56539 +int gr_acl_is_enabled(void);
56540 +void gr_set_kernel_label(struct task_struct *task);
56541 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
56542 + const gid_t gid);
56543 +int gr_set_proc_label(const struct dentry *dentry,
56544 + const struct vfsmount *mnt,
56545 + const int unsafe_share);
56546 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
56547 + const struct vfsmount *mnt);
56548 +__u32 gr_acl_handle_open(const struct dentry *dentry,
56549 + const struct vfsmount *mnt, const int fmode);
56550 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
56551 + const struct dentry *p_dentry,
56552 + const struct vfsmount *p_mnt, const int fmode,
56553 + const int imode);
56554 +void gr_handle_create(const struct dentry *dentry,
56555 + const struct vfsmount *mnt);
56556 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
56557 + const struct dentry *parent_dentry,
56558 + const struct vfsmount *parent_mnt,
56559 + const int mode);
56560 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
56561 + const struct dentry *parent_dentry,
56562 + const struct vfsmount *parent_mnt);
56563 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
56564 + const struct vfsmount *mnt);
56565 +void gr_handle_delete(const ino_t ino, const dev_t dev);
56566 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
56567 + const struct vfsmount *mnt);
56568 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
56569 + const struct dentry *parent_dentry,
56570 + const struct vfsmount *parent_mnt,
56571 + const char *from);
56572 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
56573 + const struct dentry *parent_dentry,
56574 + const struct vfsmount *parent_mnt,
56575 + const struct dentry *old_dentry,
56576 + const struct vfsmount *old_mnt, const char *to);
56577 +int gr_acl_handle_rename(struct dentry *new_dentry,
56578 + struct dentry *parent_dentry,
56579 + const struct vfsmount *parent_mnt,
56580 + struct dentry *old_dentry,
56581 + struct inode *old_parent_inode,
56582 + struct vfsmount *old_mnt, const char *newname);
56583 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
56584 + struct dentry *old_dentry,
56585 + struct dentry *new_dentry,
56586 + struct vfsmount *mnt, const __u8 replace);
56587 +__u32 gr_check_link(const struct dentry *new_dentry,
56588 + const struct dentry *parent_dentry,
56589 + const struct vfsmount *parent_mnt,
56590 + const struct dentry *old_dentry,
56591 + const struct vfsmount *old_mnt);
56592 +int gr_acl_handle_filldir(const struct file *file, const char *name,
56593 + const unsigned int namelen, const ino_t ino);
56594 +
56595 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
56596 + const struct vfsmount *mnt);
56597 +void gr_acl_handle_exit(void);
56598 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
56599 +int gr_acl_handle_procpidmem(const struct task_struct *task);
56600 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
56601 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
56602 +void gr_audit_ptrace(struct task_struct *task);
56603 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
56604 +
56605 +#ifdef CONFIG_GRKERNSEC
56606 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
56607 +void gr_handle_vm86(void);
56608 +void gr_handle_mem_readwrite(u64 from, u64 to);
56609 +
56610 +extern int grsec_enable_dmesg;
56611 +extern int grsec_disable_privio;
56612 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56613 +extern int grsec_enable_chroot_findtask;
56614 +#endif
56615 +#endif
56616 +
56617 +#endif
56618 diff -urNp linux-2.6.32.43/include/linux/hdpu_features.h linux-2.6.32.43/include/linux/hdpu_features.h
56619 --- linux-2.6.32.43/include/linux/hdpu_features.h 2011-03-27 14:31:47.000000000 -0400
56620 +++ linux-2.6.32.43/include/linux/hdpu_features.h 2011-04-17 15:56:46.000000000 -0400
56621 @@ -3,7 +3,7 @@
56622 struct cpustate_t {
56623 spinlock_t lock;
56624 int excl;
56625 - int open_count;
56626 + atomic_t open_count;
56627 unsigned char cached_val;
56628 int inited;
56629 unsigned long *set_addr;
56630 diff -urNp linux-2.6.32.43/include/linux/highmem.h linux-2.6.32.43/include/linux/highmem.h
56631 --- linux-2.6.32.43/include/linux/highmem.h 2011-03-27 14:31:47.000000000 -0400
56632 +++ linux-2.6.32.43/include/linux/highmem.h 2011-04-17 15:56:46.000000000 -0400
56633 @@ -137,6 +137,18 @@ static inline void clear_highpage(struct
56634 kunmap_atomic(kaddr, KM_USER0);
56635 }
56636
56637 +static inline void sanitize_highpage(struct page *page)
56638 +{
56639 + void *kaddr;
56640 + unsigned long flags;
56641 +
56642 + local_irq_save(flags);
56643 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
56644 + clear_page(kaddr);
56645 + kunmap_atomic(kaddr, KM_CLEARPAGE);
56646 + local_irq_restore(flags);
56647 +}
56648 +
56649 static inline void zero_user_segments(struct page *page,
56650 unsigned start1, unsigned end1,
56651 unsigned start2, unsigned end2)
56652 diff -urNp linux-2.6.32.43/include/linux/i2o.h linux-2.6.32.43/include/linux/i2o.h
56653 --- linux-2.6.32.43/include/linux/i2o.h 2011-03-27 14:31:47.000000000 -0400
56654 +++ linux-2.6.32.43/include/linux/i2o.h 2011-05-04 17:56:28.000000000 -0400
56655 @@ -564,7 +564,7 @@ struct i2o_controller {
56656 struct i2o_device *exec; /* Executive */
56657 #if BITS_PER_LONG == 64
56658 spinlock_t context_list_lock; /* lock for context_list */
56659 - atomic_t context_list_counter; /* needed for unique contexts */
56660 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
56661 struct list_head context_list; /* list of context id's
56662 and pointers */
56663 #endif
56664 diff -urNp linux-2.6.32.43/include/linux/init_task.h linux-2.6.32.43/include/linux/init_task.h
56665 --- linux-2.6.32.43/include/linux/init_task.h 2011-03-27 14:31:47.000000000 -0400
56666 +++ linux-2.6.32.43/include/linux/init_task.h 2011-05-18 20:44:59.000000000 -0400
56667 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
56668 #define INIT_IDS
56669 #endif
56670
56671 +#ifdef CONFIG_X86
56672 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
56673 +#else
56674 +#define INIT_TASK_THREAD_INFO
56675 +#endif
56676 +
56677 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
56678 /*
56679 * Because of the reduced scope of CAP_SETPCAP when filesystem
56680 @@ -156,6 +162,7 @@ extern struct cred init_cred;
56681 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
56682 .comm = "swapper", \
56683 .thread = INIT_THREAD, \
56684 + INIT_TASK_THREAD_INFO \
56685 .fs = &init_fs, \
56686 .files = &init_files, \
56687 .signal = &init_signals, \
56688 diff -urNp linux-2.6.32.43/include/linux/interrupt.h linux-2.6.32.43/include/linux/interrupt.h
56689 --- linux-2.6.32.43/include/linux/interrupt.h 2011-06-25 12:55:35.000000000 -0400
56690 +++ linux-2.6.32.43/include/linux/interrupt.h 2011-06-25 12:56:37.000000000 -0400
56691 @@ -363,7 +363,7 @@ enum
56692 /* map softirq index to softirq name. update 'softirq_to_name' in
56693 * kernel/softirq.c when adding a new softirq.
56694 */
56695 -extern char *softirq_to_name[NR_SOFTIRQS];
56696 +extern const char * const softirq_to_name[NR_SOFTIRQS];
56697
56698 /* softirq mask and active fields moved to irq_cpustat_t in
56699 * asm/hardirq.h to get better cache usage. KAO
56700 @@ -371,12 +371,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
56701
56702 struct softirq_action
56703 {
56704 - void (*action)(struct softirq_action *);
56705 + void (*action)(void);
56706 };
56707
56708 asmlinkage void do_softirq(void);
56709 asmlinkage void __do_softirq(void);
56710 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
56711 +extern void open_softirq(int nr, void (*action)(void));
56712 extern void softirq_init(void);
56713 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
56714 extern void raise_softirq_irqoff(unsigned int nr);
56715 diff -urNp linux-2.6.32.43/include/linux/irq.h linux-2.6.32.43/include/linux/irq.h
56716 --- linux-2.6.32.43/include/linux/irq.h 2011-03-27 14:31:47.000000000 -0400
56717 +++ linux-2.6.32.43/include/linux/irq.h 2011-04-17 15:56:46.000000000 -0400
56718 @@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq,
56719 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
56720 bool boot)
56721 {
56722 +#ifdef CONFIG_CPUMASK_OFFSTACK
56723 gfp_t gfp = GFP_ATOMIC;
56724
56725 if (boot)
56726 gfp = GFP_NOWAIT;
56727
56728 -#ifdef CONFIG_CPUMASK_OFFSTACK
56729 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
56730 return false;
56731
56732 diff -urNp linux-2.6.32.43/include/linux/kallsyms.h linux-2.6.32.43/include/linux/kallsyms.h
56733 --- linux-2.6.32.43/include/linux/kallsyms.h 2011-03-27 14:31:47.000000000 -0400
56734 +++ linux-2.6.32.43/include/linux/kallsyms.h 2011-04-17 15:56:46.000000000 -0400
56735 @@ -15,7 +15,8 @@
56736
56737 struct module;
56738
56739 -#ifdef CONFIG_KALLSYMS
56740 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
56741 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
56742 /* Lookup the address for a symbol. Returns 0 if not found. */
56743 unsigned long kallsyms_lookup_name(const char *name);
56744
56745 @@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(un
56746 /* Stupid that this does nothing, but I didn't create this mess. */
56747 #define __print_symbol(fmt, addr)
56748 #endif /*CONFIG_KALLSYMS*/
56749 +#else /* when included by kallsyms.c, vsnprintf.c, or
56750 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
56751 +extern void __print_symbol(const char *fmt, unsigned long address);
56752 +extern int sprint_symbol(char *buffer, unsigned long address);
56753 +const char *kallsyms_lookup(unsigned long addr,
56754 + unsigned long *symbolsize,
56755 + unsigned long *offset,
56756 + char **modname, char *namebuf);
56757 +#endif
56758
56759 /* This macro allows us to keep printk typechecking */
56760 static void __check_printsym_format(const char *fmt, ...)
56761 diff -urNp linux-2.6.32.43/include/linux/kgdb.h linux-2.6.32.43/include/linux/kgdb.h
56762 --- linux-2.6.32.43/include/linux/kgdb.h 2011-03-27 14:31:47.000000000 -0400
56763 +++ linux-2.6.32.43/include/linux/kgdb.h 2011-05-04 17:56:20.000000000 -0400
56764 @@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
56765
56766 extern int kgdb_connected;
56767
56768 -extern atomic_t kgdb_setting_breakpoint;
56769 -extern atomic_t kgdb_cpu_doing_single_step;
56770 +extern atomic_unchecked_t kgdb_setting_breakpoint;
56771 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
56772
56773 extern struct task_struct *kgdb_usethread;
56774 extern struct task_struct *kgdb_contthread;
56775 @@ -251,20 +251,20 @@ struct kgdb_arch {
56776 */
56777 struct kgdb_io {
56778 const char *name;
56779 - int (*read_char) (void);
56780 - void (*write_char) (u8);
56781 - void (*flush) (void);
56782 - int (*init) (void);
56783 - void (*pre_exception) (void);
56784 - void (*post_exception) (void);
56785 + int (* const read_char) (void);
56786 + void (* const write_char) (u8);
56787 + void (* const flush) (void);
56788 + int (* const init) (void);
56789 + void (* const pre_exception) (void);
56790 + void (* const post_exception) (void);
56791 };
56792
56793 -extern struct kgdb_arch arch_kgdb_ops;
56794 +extern const struct kgdb_arch arch_kgdb_ops;
56795
56796 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
56797
56798 -extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
56799 -extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
56800 +extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
56801 +extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
56802
56803 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
56804 extern int kgdb_mem2hex(char *mem, char *buf, int count);
56805 diff -urNp linux-2.6.32.43/include/linux/kmod.h linux-2.6.32.43/include/linux/kmod.h
56806 --- linux-2.6.32.43/include/linux/kmod.h 2011-03-27 14:31:47.000000000 -0400
56807 +++ linux-2.6.32.43/include/linux/kmod.h 2011-04-17 15:56:46.000000000 -0400
56808 @@ -31,6 +31,8 @@
56809 * usually useless though. */
56810 extern int __request_module(bool wait, const char *name, ...) \
56811 __attribute__((format(printf, 2, 3)));
56812 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
56813 + __attribute__((format(printf, 3, 4)));
56814 #define request_module(mod...) __request_module(true, mod)
56815 #define request_module_nowait(mod...) __request_module(false, mod)
56816 #define try_then_request_module(x, mod...) \
56817 diff -urNp linux-2.6.32.43/include/linux/kobject.h linux-2.6.32.43/include/linux/kobject.h
56818 --- linux-2.6.32.43/include/linux/kobject.h 2011-03-27 14:31:47.000000000 -0400
56819 +++ linux-2.6.32.43/include/linux/kobject.h 2011-04-17 15:56:46.000000000 -0400
56820 @@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kob
56821
56822 struct kobj_type {
56823 void (*release)(struct kobject *kobj);
56824 - struct sysfs_ops *sysfs_ops;
56825 + const struct sysfs_ops *sysfs_ops;
56826 struct attribute **default_attrs;
56827 };
56828
56829 @@ -118,9 +118,9 @@ struct kobj_uevent_env {
56830 };
56831
56832 struct kset_uevent_ops {
56833 - int (*filter)(struct kset *kset, struct kobject *kobj);
56834 - const char *(*name)(struct kset *kset, struct kobject *kobj);
56835 - int (*uevent)(struct kset *kset, struct kobject *kobj,
56836 + int (* const filter)(struct kset *kset, struct kobject *kobj);
56837 + const char *(* const name)(struct kset *kset, struct kobject *kobj);
56838 + int (* const uevent)(struct kset *kset, struct kobject *kobj,
56839 struct kobj_uevent_env *env);
56840 };
56841
56842 @@ -132,7 +132,7 @@ struct kobj_attribute {
56843 const char *buf, size_t count);
56844 };
56845
56846 -extern struct sysfs_ops kobj_sysfs_ops;
56847 +extern const struct sysfs_ops kobj_sysfs_ops;
56848
56849 /**
56850 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
56851 @@ -155,14 +155,14 @@ struct kset {
56852 struct list_head list;
56853 spinlock_t list_lock;
56854 struct kobject kobj;
56855 - struct kset_uevent_ops *uevent_ops;
56856 + const struct kset_uevent_ops *uevent_ops;
56857 };
56858
56859 extern void kset_init(struct kset *kset);
56860 extern int __must_check kset_register(struct kset *kset);
56861 extern void kset_unregister(struct kset *kset);
56862 extern struct kset * __must_check kset_create_and_add(const char *name,
56863 - struct kset_uevent_ops *u,
56864 + const struct kset_uevent_ops *u,
56865 struct kobject *parent_kobj);
56866
56867 static inline struct kset *to_kset(struct kobject *kobj)
56868 diff -urNp linux-2.6.32.43/include/linux/kvm_host.h linux-2.6.32.43/include/linux/kvm_host.h
56869 --- linux-2.6.32.43/include/linux/kvm_host.h 2011-03-27 14:31:47.000000000 -0400
56870 +++ linux-2.6.32.43/include/linux/kvm_host.h 2011-04-17 15:56:46.000000000 -0400
56871 @@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
56872 void vcpu_load(struct kvm_vcpu *vcpu);
56873 void vcpu_put(struct kvm_vcpu *vcpu);
56874
56875 -int kvm_init(void *opaque, unsigned int vcpu_size,
56876 +int kvm_init(const void *opaque, unsigned int vcpu_size,
56877 struct module *module);
56878 void kvm_exit(void);
56879
56880 @@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
56881 struct kvm_guest_debug *dbg);
56882 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
56883
56884 -int kvm_arch_init(void *opaque);
56885 +int kvm_arch_init(const void *opaque);
56886 void kvm_arch_exit(void);
56887
56888 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
56889 diff -urNp linux-2.6.32.43/include/linux/libata.h linux-2.6.32.43/include/linux/libata.h
56890 --- linux-2.6.32.43/include/linux/libata.h 2011-03-27 14:31:47.000000000 -0400
56891 +++ linux-2.6.32.43/include/linux/libata.h 2011-04-23 12:56:11.000000000 -0400
56892 @@ -525,11 +525,11 @@ struct ata_ioports {
56893
56894 struct ata_host {
56895 spinlock_t lock;
56896 - struct device *dev;
56897 + struct device *dev;
56898 void __iomem * const *iomap;
56899 unsigned int n_ports;
56900 void *private_data;
56901 - struct ata_port_operations *ops;
56902 + const struct ata_port_operations *ops;
56903 unsigned long flags;
56904 #ifdef CONFIG_ATA_ACPI
56905 acpi_handle acpi_handle;
56906 @@ -710,7 +710,7 @@ struct ata_link {
56907
56908 struct ata_port {
56909 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
56910 - struct ata_port_operations *ops;
56911 + const struct ata_port_operations *ops;
56912 spinlock_t *lock;
56913 /* Flags owned by the EH context. Only EH should touch these once the
56914 port is active */
56915 @@ -892,7 +892,7 @@ struct ata_port_info {
56916 unsigned long pio_mask;
56917 unsigned long mwdma_mask;
56918 unsigned long udma_mask;
56919 - struct ata_port_operations *port_ops;
56920 + const struct ata_port_operations *port_ops;
56921 void *private_data;
56922 };
56923
56924 @@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timi
56925 extern const unsigned long sata_deb_timing_hotplug[];
56926 extern const unsigned long sata_deb_timing_long[];
56927
56928 -extern struct ata_port_operations ata_dummy_port_ops;
56929 +extern const struct ata_port_operations ata_dummy_port_ops;
56930 extern const struct ata_port_info ata_dummy_port_info;
56931
56932 static inline const unsigned long *
56933 @@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_
56934 struct scsi_host_template *sht);
56935 extern void ata_host_detach(struct ata_host *host);
56936 extern void ata_host_init(struct ata_host *, struct device *,
56937 - unsigned long, struct ata_port_operations *);
56938 + unsigned long, const struct ata_port_operations *);
56939 extern int ata_scsi_detect(struct scsi_host_template *sht);
56940 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
56941 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
56942 diff -urNp linux-2.6.32.43/include/linux/lockd/bind.h linux-2.6.32.43/include/linux/lockd/bind.h
56943 --- linux-2.6.32.43/include/linux/lockd/bind.h 2011-03-27 14:31:47.000000000 -0400
56944 +++ linux-2.6.32.43/include/linux/lockd/bind.h 2011-04-17 15:56:46.000000000 -0400
56945 @@ -23,13 +23,13 @@ struct svc_rqst;
56946 * This is the set of functions for lockd->nfsd communication
56947 */
56948 struct nlmsvc_binding {
56949 - __be32 (*fopen)(struct svc_rqst *,
56950 + __be32 (* const fopen)(struct svc_rqst *,
56951 struct nfs_fh *,
56952 struct file **);
56953 - void (*fclose)(struct file *);
56954 + void (* const fclose)(struct file *);
56955 };
56956
56957 -extern struct nlmsvc_binding * nlmsvc_ops;
56958 +extern const struct nlmsvc_binding * nlmsvc_ops;
56959
56960 /*
56961 * Similar to nfs_client_initdata, but without the NFS-specific
56962 diff -urNp linux-2.6.32.43/include/linux/mm.h linux-2.6.32.43/include/linux/mm.h
56963 --- linux-2.6.32.43/include/linux/mm.h 2011-03-27 14:31:47.000000000 -0400
56964 +++ linux-2.6.32.43/include/linux/mm.h 2011-04-17 15:56:46.000000000 -0400
56965 @@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void
56966
56967 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
56968 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
56969 +
56970 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
56971 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
56972 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
56973 +#else
56974 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
56975 +#endif
56976 +
56977 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
56978 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
56979
56980 @@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
56981 int set_page_dirty_lock(struct page *page);
56982 int clear_page_dirty_for_io(struct page *page);
56983
56984 -/* Is the vma a continuation of the stack vma above it? */
56985 -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
56986 -{
56987 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
56988 -}
56989 -
56990 extern unsigned long move_page_tables(struct vm_area_struct *vma,
56991 unsigned long old_addr, struct vm_area_struct *new_vma,
56992 unsigned long new_addr, unsigned long len);
56993 @@ -890,6 +891,8 @@ struct shrinker {
56994 extern void register_shrinker(struct shrinker *);
56995 extern void unregister_shrinker(struct shrinker *);
56996
56997 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
56998 +
56999 int vma_wants_writenotify(struct vm_area_struct *vma);
57000
57001 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
57002 @@ -1162,6 +1165,7 @@ out:
57003 }
57004
57005 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
57006 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
57007
57008 extern unsigned long do_brk(unsigned long, unsigned long);
57009
57010 @@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(
57011 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
57012 struct vm_area_struct **pprev);
57013
57014 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
57015 +extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
57016 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
57017 +
57018 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
57019 NULL if none. Assume start_addr < end_addr. */
57020 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
57021 @@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(st
57022 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
57023 }
57024
57025 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
57026 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
57027 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
57028 unsigned long pfn, unsigned long size, pgprot_t);
57029 @@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long
57030 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
57031 extern int sysctl_memory_failure_early_kill;
57032 extern int sysctl_memory_failure_recovery;
57033 -extern atomic_long_t mce_bad_pages;
57034 +extern atomic_long_unchecked_t mce_bad_pages;
57035 +
57036 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
57037 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
57038 +#else
57039 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
57040 +#endif
57041
57042 #endif /* __KERNEL__ */
57043 #endif /* _LINUX_MM_H */
57044 diff -urNp linux-2.6.32.43/include/linux/mm_types.h linux-2.6.32.43/include/linux/mm_types.h
57045 --- linux-2.6.32.43/include/linux/mm_types.h 2011-03-27 14:31:47.000000000 -0400
57046 +++ linux-2.6.32.43/include/linux/mm_types.h 2011-04-17 15:56:46.000000000 -0400
57047 @@ -186,6 +186,8 @@ struct vm_area_struct {
57048 #ifdef CONFIG_NUMA
57049 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
57050 #endif
57051 +
57052 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
57053 };
57054
57055 struct core_thread {
57056 @@ -287,6 +289,24 @@ struct mm_struct {
57057 #ifdef CONFIG_MMU_NOTIFIER
57058 struct mmu_notifier_mm *mmu_notifier_mm;
57059 #endif
57060 +
57061 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
57062 + unsigned long pax_flags;
57063 +#endif
57064 +
57065 +#ifdef CONFIG_PAX_DLRESOLVE
57066 + unsigned long call_dl_resolve;
57067 +#endif
57068 +
57069 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
57070 + unsigned long call_syscall;
57071 +#endif
57072 +
57073 +#ifdef CONFIG_PAX_ASLR
57074 + unsigned long delta_mmap; /* randomized offset */
57075 + unsigned long delta_stack; /* randomized offset */
57076 +#endif
57077 +
57078 };
57079
57080 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
57081 diff -urNp linux-2.6.32.43/include/linux/mmu_notifier.h linux-2.6.32.43/include/linux/mmu_notifier.h
57082 --- linux-2.6.32.43/include/linux/mmu_notifier.h 2011-03-27 14:31:47.000000000 -0400
57083 +++ linux-2.6.32.43/include/linux/mmu_notifier.h 2011-04-17 15:56:46.000000000 -0400
57084 @@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
57085 */
57086 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
57087 ({ \
57088 - pte_t __pte; \
57089 + pte_t ___pte; \
57090 struct vm_area_struct *___vma = __vma; \
57091 unsigned long ___address = __address; \
57092 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
57093 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
57094 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
57095 - __pte; \
57096 + ___pte; \
57097 })
57098
57099 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
57100 diff -urNp linux-2.6.32.43/include/linux/mmzone.h linux-2.6.32.43/include/linux/mmzone.h
57101 --- linux-2.6.32.43/include/linux/mmzone.h 2011-03-27 14:31:47.000000000 -0400
57102 +++ linux-2.6.32.43/include/linux/mmzone.h 2011-04-17 15:56:46.000000000 -0400
57103 @@ -350,7 +350,7 @@ struct zone {
57104 unsigned long flags; /* zone flags, see below */
57105
57106 /* Zone statistics */
57107 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57108 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57109
57110 /*
57111 * prev_priority holds the scanning priority for this zone. It is
57112 diff -urNp linux-2.6.32.43/include/linux/mod_devicetable.h linux-2.6.32.43/include/linux/mod_devicetable.h
57113 --- linux-2.6.32.43/include/linux/mod_devicetable.h 2011-03-27 14:31:47.000000000 -0400
57114 +++ linux-2.6.32.43/include/linux/mod_devicetable.h 2011-04-17 15:56:46.000000000 -0400
57115 @@ -12,7 +12,7 @@
57116 typedef unsigned long kernel_ulong_t;
57117 #endif
57118
57119 -#define PCI_ANY_ID (~0)
57120 +#define PCI_ANY_ID ((__u16)~0)
57121
57122 struct pci_device_id {
57123 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
57124 @@ -131,7 +131,7 @@ struct usb_device_id {
57125 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
57126 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
57127
57128 -#define HID_ANY_ID (~0)
57129 +#define HID_ANY_ID (~0U)
57130
57131 struct hid_device_id {
57132 __u16 bus;
57133 diff -urNp linux-2.6.32.43/include/linux/module.h linux-2.6.32.43/include/linux/module.h
57134 --- linux-2.6.32.43/include/linux/module.h 2011-03-27 14:31:47.000000000 -0400
57135 +++ linux-2.6.32.43/include/linux/module.h 2011-04-17 15:56:46.000000000 -0400
57136 @@ -287,16 +287,16 @@ struct module
57137 int (*init)(void);
57138
57139 /* If this is non-NULL, vfree after init() returns */
57140 - void *module_init;
57141 + void *module_init_rx, *module_init_rw;
57142
57143 /* Here is the actual code + data, vfree'd on unload. */
57144 - void *module_core;
57145 + void *module_core_rx, *module_core_rw;
57146
57147 /* Here are the sizes of the init and core sections */
57148 - unsigned int init_size, core_size;
57149 + unsigned int init_size_rw, core_size_rw;
57150
57151 /* The size of the executable code in each section. */
57152 - unsigned int init_text_size, core_text_size;
57153 + unsigned int init_size_rx, core_size_rx;
57154
57155 /* Arch-specific module values */
57156 struct mod_arch_specific arch;
57157 @@ -393,16 +393,46 @@ struct module *__module_address(unsigned
57158 bool is_module_address(unsigned long addr);
57159 bool is_module_text_address(unsigned long addr);
57160
57161 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
57162 +{
57163 +
57164 +#ifdef CONFIG_PAX_KERNEXEC
57165 + if (ktla_ktva(addr) >= (unsigned long)start &&
57166 + ktla_ktva(addr) < (unsigned long)start + size)
57167 + return 1;
57168 +#endif
57169 +
57170 + return ((void *)addr >= start && (void *)addr < start + size);
57171 +}
57172 +
57173 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
57174 +{
57175 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
57176 +}
57177 +
57178 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
57179 +{
57180 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
57181 +}
57182 +
57183 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
57184 +{
57185 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
57186 +}
57187 +
57188 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
57189 +{
57190 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
57191 +}
57192 +
57193 static inline int within_module_core(unsigned long addr, struct module *mod)
57194 {
57195 - return (unsigned long)mod->module_core <= addr &&
57196 - addr < (unsigned long)mod->module_core + mod->core_size;
57197 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
57198 }
57199
57200 static inline int within_module_init(unsigned long addr, struct module *mod)
57201 {
57202 - return (unsigned long)mod->module_init <= addr &&
57203 - addr < (unsigned long)mod->module_init + mod->init_size;
57204 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
57205 }
57206
57207 /* Search for module by name: must hold module_mutex. */
57208 diff -urNp linux-2.6.32.43/include/linux/moduleloader.h linux-2.6.32.43/include/linux/moduleloader.h
57209 --- linux-2.6.32.43/include/linux/moduleloader.h 2011-03-27 14:31:47.000000000 -0400
57210 +++ linux-2.6.32.43/include/linux/moduleloader.h 2011-04-17 15:56:46.000000000 -0400
57211 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
57212 sections. Returns NULL on failure. */
57213 void *module_alloc(unsigned long size);
57214
57215 +#ifdef CONFIG_PAX_KERNEXEC
57216 +void *module_alloc_exec(unsigned long size);
57217 +#else
57218 +#define module_alloc_exec(x) module_alloc(x)
57219 +#endif
57220 +
57221 /* Free memory returned from module_alloc. */
57222 void module_free(struct module *mod, void *module_region);
57223
57224 +#ifdef CONFIG_PAX_KERNEXEC
57225 +void module_free_exec(struct module *mod, void *module_region);
57226 +#else
57227 +#define module_free_exec(x, y) module_free((x), (y))
57228 +#endif
57229 +
57230 /* Apply the given relocation to the (simplified) ELF. Return -error
57231 or 0. */
57232 int apply_relocate(Elf_Shdr *sechdrs,
57233 diff -urNp linux-2.6.32.43/include/linux/moduleparam.h linux-2.6.32.43/include/linux/moduleparam.h
57234 --- linux-2.6.32.43/include/linux/moduleparam.h 2011-03-27 14:31:47.000000000 -0400
57235 +++ linux-2.6.32.43/include/linux/moduleparam.h 2011-04-17 15:56:46.000000000 -0400
57236 @@ -132,7 +132,7 @@ struct kparam_array
57237
57238 /* Actually copy string: maxlen param is usually sizeof(string). */
57239 #define module_param_string(name, string, len, perm) \
57240 - static const struct kparam_string __param_string_##name \
57241 + static const struct kparam_string __param_string_##name __used \
57242 = { len, string }; \
57243 __module_param_call(MODULE_PARAM_PREFIX, name, \
57244 param_set_copystring, param_get_string, \
57245 @@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffe
57246
57247 /* Comma-separated array: *nump is set to number they actually specified. */
57248 #define module_param_array_named(name, array, type, nump, perm) \
57249 - static const struct kparam_array __param_arr_##name \
57250 + static const struct kparam_array __param_arr_##name __used \
57251 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
57252 sizeof(array[0]), array }; \
57253 __module_param_call(MODULE_PARAM_PREFIX, name, \
57254 diff -urNp linux-2.6.32.43/include/linux/mutex.h linux-2.6.32.43/include/linux/mutex.h
57255 --- linux-2.6.32.43/include/linux/mutex.h 2011-03-27 14:31:47.000000000 -0400
57256 +++ linux-2.6.32.43/include/linux/mutex.h 2011-04-17 15:56:46.000000000 -0400
57257 @@ -51,7 +51,7 @@ struct mutex {
57258 spinlock_t wait_lock;
57259 struct list_head wait_list;
57260 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
57261 - struct thread_info *owner;
57262 + struct task_struct *owner;
57263 #endif
57264 #ifdef CONFIG_DEBUG_MUTEXES
57265 const char *name;
57266 diff -urNp linux-2.6.32.43/include/linux/namei.h linux-2.6.32.43/include/linux/namei.h
57267 --- linux-2.6.32.43/include/linux/namei.h 2011-03-27 14:31:47.000000000 -0400
57268 +++ linux-2.6.32.43/include/linux/namei.h 2011-04-17 15:56:46.000000000 -0400
57269 @@ -22,7 +22,7 @@ struct nameidata {
57270 unsigned int flags;
57271 int last_type;
57272 unsigned depth;
57273 - char *saved_names[MAX_NESTED_LINKS + 1];
57274 + const char *saved_names[MAX_NESTED_LINKS + 1];
57275
57276 /* Intent data */
57277 union {
57278 @@ -84,12 +84,12 @@ extern int follow_up(struct path *);
57279 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
57280 extern void unlock_rename(struct dentry *, struct dentry *);
57281
57282 -static inline void nd_set_link(struct nameidata *nd, char *path)
57283 +static inline void nd_set_link(struct nameidata *nd, const char *path)
57284 {
57285 nd->saved_names[nd->depth] = path;
57286 }
57287
57288 -static inline char *nd_get_link(struct nameidata *nd)
57289 +static inline const char *nd_get_link(const struct nameidata *nd)
57290 {
57291 return nd->saved_names[nd->depth];
57292 }
57293 diff -urNp linux-2.6.32.43/include/linux/netfilter/xt_gradm.h linux-2.6.32.43/include/linux/netfilter/xt_gradm.h
57294 --- linux-2.6.32.43/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
57295 +++ linux-2.6.32.43/include/linux/netfilter/xt_gradm.h 2011-04-17 15:56:46.000000000 -0400
57296 @@ -0,0 +1,9 @@
57297 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
57298 +#define _LINUX_NETFILTER_XT_GRADM_H 1
57299 +
57300 +struct xt_gradm_mtinfo {
57301 + __u16 flags;
57302 + __u16 invflags;
57303 +};
57304 +
57305 +#endif
57306 diff -urNp linux-2.6.32.43/include/linux/nodemask.h linux-2.6.32.43/include/linux/nodemask.h
57307 --- linux-2.6.32.43/include/linux/nodemask.h 2011-03-27 14:31:47.000000000 -0400
57308 +++ linux-2.6.32.43/include/linux/nodemask.h 2011-04-17 15:56:46.000000000 -0400
57309 @@ -464,11 +464,11 @@ static inline int num_node_state(enum no
57310
57311 #define any_online_node(mask) \
57312 ({ \
57313 - int node; \
57314 - for_each_node_mask(node, (mask)) \
57315 - if (node_online(node)) \
57316 + int __node; \
57317 + for_each_node_mask(__node, (mask)) \
57318 + if (node_online(__node)) \
57319 break; \
57320 - node; \
57321 + __node; \
57322 })
57323
57324 #define num_online_nodes() num_node_state(N_ONLINE)
57325 diff -urNp linux-2.6.32.43/include/linux/oprofile.h linux-2.6.32.43/include/linux/oprofile.h
57326 --- linux-2.6.32.43/include/linux/oprofile.h 2011-03-27 14:31:47.000000000 -0400
57327 +++ linux-2.6.32.43/include/linux/oprofile.h 2011-04-17 15:56:46.000000000 -0400
57328 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
57329 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
57330 char const * name, ulong * val);
57331
57332 -/** Create a file for read-only access to an atomic_t. */
57333 +/** Create a file for read-only access to an atomic_unchecked_t. */
57334 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
57335 - char const * name, atomic_t * val);
57336 + char const * name, atomic_unchecked_t * val);
57337
57338 /** create a directory */
57339 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
57340 diff -urNp linux-2.6.32.43/include/linux/perf_event.h linux-2.6.32.43/include/linux/perf_event.h
57341 --- linux-2.6.32.43/include/linux/perf_event.h 2011-03-27 14:31:47.000000000 -0400
57342 +++ linux-2.6.32.43/include/linux/perf_event.h 2011-05-04 17:56:28.000000000 -0400
57343 @@ -476,7 +476,7 @@ struct hw_perf_event {
57344 struct hrtimer hrtimer;
57345 };
57346 };
57347 - atomic64_t prev_count;
57348 + atomic64_unchecked_t prev_count;
57349 u64 sample_period;
57350 u64 last_period;
57351 atomic64_t period_left;
57352 @@ -557,7 +557,7 @@ struct perf_event {
57353 const struct pmu *pmu;
57354
57355 enum perf_event_active_state state;
57356 - atomic64_t count;
57357 + atomic64_unchecked_t count;
57358
57359 /*
57360 * These are the total time in nanoseconds that the event
57361 @@ -595,8 +595,8 @@ struct perf_event {
57362 * These accumulate total time (in nanoseconds) that children
57363 * events have been enabled and running, respectively.
57364 */
57365 - atomic64_t child_total_time_enabled;
57366 - atomic64_t child_total_time_running;
57367 + atomic64_unchecked_t child_total_time_enabled;
57368 + atomic64_unchecked_t child_total_time_running;
57369
57370 /*
57371 * Protect attach/detach and child_list:
57372 diff -urNp linux-2.6.32.43/include/linux/pipe_fs_i.h linux-2.6.32.43/include/linux/pipe_fs_i.h
57373 --- linux-2.6.32.43/include/linux/pipe_fs_i.h 2011-03-27 14:31:47.000000000 -0400
57374 +++ linux-2.6.32.43/include/linux/pipe_fs_i.h 2011-04-17 15:56:46.000000000 -0400
57375 @@ -46,9 +46,9 @@ struct pipe_inode_info {
57376 wait_queue_head_t wait;
57377 unsigned int nrbufs, curbuf;
57378 struct page *tmp_page;
57379 - unsigned int readers;
57380 - unsigned int writers;
57381 - unsigned int waiting_writers;
57382 + atomic_t readers;
57383 + atomic_t writers;
57384 + atomic_t waiting_writers;
57385 unsigned int r_counter;
57386 unsigned int w_counter;
57387 struct fasync_struct *fasync_readers;
57388 diff -urNp linux-2.6.32.43/include/linux/poison.h linux-2.6.32.43/include/linux/poison.h
57389 --- linux-2.6.32.43/include/linux/poison.h 2011-03-27 14:31:47.000000000 -0400
57390 +++ linux-2.6.32.43/include/linux/poison.h 2011-04-17 15:56:46.000000000 -0400
57391 @@ -19,8 +19,8 @@
57392 * under normal circumstances, used to verify that nobody uses
57393 * non-initialized list entries.
57394 */
57395 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
57396 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
57397 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
57398 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
57399
57400 /********** include/linux/timer.h **********/
57401 /*
57402 diff -urNp linux-2.6.32.43/include/linux/proc_fs.h linux-2.6.32.43/include/linux/proc_fs.h
57403 --- linux-2.6.32.43/include/linux/proc_fs.h 2011-03-27 14:31:47.000000000 -0400
57404 +++ linux-2.6.32.43/include/linux/proc_fs.h 2011-04-17 15:56:46.000000000 -0400
57405 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
57406 return proc_create_data(name, mode, parent, proc_fops, NULL);
57407 }
57408
57409 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
57410 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
57411 +{
57412 +#ifdef CONFIG_GRKERNSEC_PROC_USER
57413 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
57414 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57415 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
57416 +#else
57417 + return proc_create_data(name, mode, parent, proc_fops, NULL);
57418 +#endif
57419 +}
57420 +
57421 +
57422 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
57423 mode_t mode, struct proc_dir_entry *base,
57424 read_proc_t *read_proc, void * data)
57425 diff -urNp linux-2.6.32.43/include/linux/ptrace.h linux-2.6.32.43/include/linux/ptrace.h
57426 --- linux-2.6.32.43/include/linux/ptrace.h 2011-03-27 14:31:47.000000000 -0400
57427 +++ linux-2.6.32.43/include/linux/ptrace.h 2011-04-17 15:56:46.000000000 -0400
57428 @@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_
57429 extern void exit_ptrace(struct task_struct *tracer);
57430 #define PTRACE_MODE_READ 1
57431 #define PTRACE_MODE_ATTACH 2
57432 -/* Returns 0 on success, -errno on denial. */
57433 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
57434 /* Returns true on success, false on denial. */
57435 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
57436 +/* Returns true on success, false on denial. */
57437 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
57438
57439 static inline int ptrace_reparented(struct task_struct *child)
57440 {
57441 diff -urNp linux-2.6.32.43/include/linux/random.h linux-2.6.32.43/include/linux/random.h
57442 --- linux-2.6.32.43/include/linux/random.h 2011-03-27 14:31:47.000000000 -0400
57443 +++ linux-2.6.32.43/include/linux/random.h 2011-04-17 15:56:46.000000000 -0400
57444 @@ -74,6 +74,11 @@ unsigned long randomize_range(unsigned l
57445 u32 random32(void);
57446 void srandom32(u32 seed);
57447
57448 +static inline unsigned long pax_get_random_long(void)
57449 +{
57450 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
57451 +}
57452 +
57453 #endif /* __KERNEL___ */
57454
57455 #endif /* _LINUX_RANDOM_H */
57456 diff -urNp linux-2.6.32.43/include/linux/reboot.h linux-2.6.32.43/include/linux/reboot.h
57457 --- linux-2.6.32.43/include/linux/reboot.h 2011-03-27 14:31:47.000000000 -0400
57458 +++ linux-2.6.32.43/include/linux/reboot.h 2011-05-22 23:02:06.000000000 -0400
57459 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
57460 * Architecture-specific implementations of sys_reboot commands.
57461 */
57462
57463 -extern void machine_restart(char *cmd);
57464 -extern void machine_halt(void);
57465 -extern void machine_power_off(void);
57466 +extern void machine_restart(char *cmd) __noreturn;
57467 +extern void machine_halt(void) __noreturn;
57468 +extern void machine_power_off(void) __noreturn;
57469
57470 extern void machine_shutdown(void);
57471 struct pt_regs;
57472 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
57473 */
57474
57475 extern void kernel_restart_prepare(char *cmd);
57476 -extern void kernel_restart(char *cmd);
57477 -extern void kernel_halt(void);
57478 -extern void kernel_power_off(void);
57479 +extern void kernel_restart(char *cmd) __noreturn;
57480 +extern void kernel_halt(void) __noreturn;
57481 +extern void kernel_power_off(void) __noreturn;
57482
57483 void ctrl_alt_del(void);
57484
57485 @@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
57486 * Emergency restart, callable from an interrupt handler.
57487 */
57488
57489 -extern void emergency_restart(void);
57490 +extern void emergency_restart(void) __noreturn;
57491 #include <asm/emergency-restart.h>
57492
57493 #endif
57494 diff -urNp linux-2.6.32.43/include/linux/reiserfs_fs.h linux-2.6.32.43/include/linux/reiserfs_fs.h
57495 --- linux-2.6.32.43/include/linux/reiserfs_fs.h 2011-03-27 14:31:47.000000000 -0400
57496 +++ linux-2.6.32.43/include/linux/reiserfs_fs.h 2011-04-17 15:56:46.000000000 -0400
57497 @@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset
57498 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
57499
57500 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
57501 -#define get_generation(s) atomic_read (&fs_generation(s))
57502 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
57503 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
57504 #define __fs_changed(gen,s) (gen != get_generation (s))
57505 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
57506 @@ -1534,24 +1534,24 @@ static inline struct super_block *sb_fro
57507 */
57508
57509 struct item_operations {
57510 - int (*bytes_number) (struct item_head * ih, int block_size);
57511 - void (*decrement_key) (struct cpu_key *);
57512 - int (*is_left_mergeable) (struct reiserfs_key * ih,
57513 + int (* const bytes_number) (struct item_head * ih, int block_size);
57514 + void (* const decrement_key) (struct cpu_key *);
57515 + int (* const is_left_mergeable) (struct reiserfs_key * ih,
57516 unsigned long bsize);
57517 - void (*print_item) (struct item_head *, char *item);
57518 - void (*check_item) (struct item_head *, char *item);
57519 + void (* const print_item) (struct item_head *, char *item);
57520 + void (* const check_item) (struct item_head *, char *item);
57521
57522 - int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
57523 + int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
57524 int is_affected, int insert_size);
57525 - int (*check_left) (struct virtual_item * vi, int free,
57526 + int (* const check_left) (struct virtual_item * vi, int free,
57527 int start_skip, int end_skip);
57528 - int (*check_right) (struct virtual_item * vi, int free);
57529 - int (*part_size) (struct virtual_item * vi, int from, int to);
57530 - int (*unit_num) (struct virtual_item * vi);
57531 - void (*print_vi) (struct virtual_item * vi);
57532 + int (* const check_right) (struct virtual_item * vi, int free);
57533 + int (* const part_size) (struct virtual_item * vi, int from, int to);
57534 + int (* const unit_num) (struct virtual_item * vi);
57535 + void (* const print_vi) (struct virtual_item * vi);
57536 };
57537
57538 -extern struct item_operations *item_ops[TYPE_ANY + 1];
57539 +extern const struct item_operations * const item_ops[TYPE_ANY + 1];
57540
57541 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
57542 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
57543 diff -urNp linux-2.6.32.43/include/linux/reiserfs_fs_sb.h linux-2.6.32.43/include/linux/reiserfs_fs_sb.h
57544 --- linux-2.6.32.43/include/linux/reiserfs_fs_sb.h 2011-03-27 14:31:47.000000000 -0400
57545 +++ linux-2.6.32.43/include/linux/reiserfs_fs_sb.h 2011-04-17 15:56:46.000000000 -0400
57546 @@ -377,7 +377,7 @@ struct reiserfs_sb_info {
57547 /* Comment? -Hans */
57548 wait_queue_head_t s_wait;
57549 /* To be obsoleted soon by per buffer seals.. -Hans */
57550 - atomic_t s_generation_counter; // increased by one every time the
57551 + atomic_unchecked_t s_generation_counter; // increased by one every time the
57552 // tree gets re-balanced
57553 unsigned long s_properties; /* File system properties. Currently holds
57554 on-disk FS format */
57555 diff -urNp linux-2.6.32.43/include/linux/sched.h linux-2.6.32.43/include/linux/sched.h
57556 --- linux-2.6.32.43/include/linux/sched.h 2011-03-27 14:31:47.000000000 -0400
57557 +++ linux-2.6.32.43/include/linux/sched.h 2011-07-14 19:16:12.000000000 -0400
57558 @@ -101,6 +101,7 @@ struct bio;
57559 struct fs_struct;
57560 struct bts_context;
57561 struct perf_event_context;
57562 +struct linux_binprm;
57563
57564 /*
57565 * List of flags we want to share for kernel threads,
57566 @@ -350,7 +351,7 @@ extern signed long schedule_timeout_kill
57567 extern signed long schedule_timeout_uninterruptible(signed long timeout);
57568 asmlinkage void __schedule(void);
57569 asmlinkage void schedule(void);
57570 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
57571 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
57572
57573 struct nsproxy;
57574 struct user_namespace;
57575 @@ -371,9 +372,12 @@ struct user_namespace;
57576 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
57577
57578 extern int sysctl_max_map_count;
57579 +extern unsigned long sysctl_heap_stack_gap;
57580
57581 #include <linux/aio.h>
57582
57583 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
57584 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
57585 extern unsigned long
57586 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
57587 unsigned long, unsigned long);
57588 @@ -666,6 +670,16 @@ struct signal_struct {
57589 struct tty_audit_buf *tty_audit_buf;
57590 #endif
57591
57592 +#ifdef CONFIG_GRKERNSEC
57593 + u32 curr_ip;
57594 + u32 saved_ip;
57595 + u32 gr_saddr;
57596 + u32 gr_daddr;
57597 + u16 gr_sport;
57598 + u16 gr_dport;
57599 + u8 used_accept:1;
57600 +#endif
57601 +
57602 int oom_adj; /* OOM kill score adjustment (bit shift) */
57603 };
57604
57605 @@ -723,6 +737,11 @@ struct user_struct {
57606 struct key *session_keyring; /* UID's default session keyring */
57607 #endif
57608
57609 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57610 + unsigned int banned;
57611 + unsigned long ban_expires;
57612 +#endif
57613 +
57614 /* Hash table maintenance information */
57615 struct hlist_node uidhash_node;
57616 uid_t uid;
57617 @@ -1328,8 +1347,8 @@ struct task_struct {
57618 struct list_head thread_group;
57619
57620 struct completion *vfork_done; /* for vfork() */
57621 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
57622 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
57623 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
57624 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
57625
57626 cputime_t utime, stime, utimescaled, stimescaled;
57627 cputime_t gtime;
57628 @@ -1343,16 +1362,6 @@ struct task_struct {
57629 struct task_cputime cputime_expires;
57630 struct list_head cpu_timers[3];
57631
57632 -/* process credentials */
57633 - const struct cred *real_cred; /* objective and real subjective task
57634 - * credentials (COW) */
57635 - const struct cred *cred; /* effective (overridable) subjective task
57636 - * credentials (COW) */
57637 - struct mutex cred_guard_mutex; /* guard against foreign influences on
57638 - * credential calculations
57639 - * (notably. ptrace) */
57640 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
57641 -
57642 char comm[TASK_COMM_LEN]; /* executable name excluding path
57643 - access with [gs]et_task_comm (which lock
57644 it with task_lock())
57645 @@ -1369,6 +1378,10 @@ struct task_struct {
57646 #endif
57647 /* CPU-specific state of this task */
57648 struct thread_struct thread;
57649 +/* thread_info moved to task_struct */
57650 +#ifdef CONFIG_X86
57651 + struct thread_info tinfo;
57652 +#endif
57653 /* filesystem information */
57654 struct fs_struct *fs;
57655 /* open file information */
57656 @@ -1436,6 +1449,15 @@ struct task_struct {
57657 int hardirq_context;
57658 int softirq_context;
57659 #endif
57660 +
57661 +/* process credentials */
57662 + const struct cred *real_cred; /* objective and real subjective task
57663 + * credentials (COW) */
57664 + struct mutex cred_guard_mutex; /* guard against foreign influences on
57665 + * credential calculations
57666 + * (notably. ptrace) */
57667 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
57668 +
57669 #ifdef CONFIG_LOCKDEP
57670 # define MAX_LOCK_DEPTH 48UL
57671 u64 curr_chain_key;
57672 @@ -1456,6 +1478,9 @@ struct task_struct {
57673
57674 struct backing_dev_info *backing_dev_info;
57675
57676 + const struct cred *cred; /* effective (overridable) subjective task
57677 + * credentials (COW) */
57678 +
57679 struct io_context *io_context;
57680
57681 unsigned long ptrace_message;
57682 @@ -1519,6 +1544,21 @@ struct task_struct {
57683 unsigned long default_timer_slack_ns;
57684
57685 struct list_head *scm_work_list;
57686 +
57687 +#ifdef CONFIG_GRKERNSEC
57688 + /* grsecurity */
57689 + struct dentry *gr_chroot_dentry;
57690 + struct acl_subject_label *acl;
57691 + struct acl_role_label *role;
57692 + struct file *exec_file;
57693 + u16 acl_role_id;
57694 + /* is this the task that authenticated to the special role */
57695 + u8 acl_sp_role;
57696 + u8 is_writable;
57697 + u8 brute;
57698 + u8 gr_is_chrooted;
57699 +#endif
57700 +
57701 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
57702 /* Index of current stored adress in ret_stack */
57703 int curr_ret_stack;
57704 @@ -1542,6 +1582,57 @@ struct task_struct {
57705 #endif /* CONFIG_TRACING */
57706 };
57707
57708 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
57709 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
57710 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
57711 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
57712 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
57713 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
57714 +
57715 +#ifdef CONFIG_PAX_SOFTMODE
57716 +extern unsigned int pax_softmode;
57717 +#endif
57718 +
57719 +extern int pax_check_flags(unsigned long *);
57720 +
57721 +/* if tsk != current then task_lock must be held on it */
57722 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
57723 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
57724 +{
57725 + if (likely(tsk->mm))
57726 + return tsk->mm->pax_flags;
57727 + else
57728 + return 0UL;
57729 +}
57730 +
57731 +/* if tsk != current then task_lock must be held on it */
57732 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
57733 +{
57734 + if (likely(tsk->mm)) {
57735 + tsk->mm->pax_flags = flags;
57736 + return 0;
57737 + }
57738 + return -EINVAL;
57739 +}
57740 +#endif
57741 +
57742 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
57743 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
57744 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
57745 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
57746 +#endif
57747 +
57748 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
57749 +extern void pax_report_insns(void *pc, void *sp);
57750 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
57751 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
57752 +
57753 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
57754 +extern void pax_track_stack(void);
57755 +#else
57756 +static inline void pax_track_stack(void) {}
57757 +#endif
57758 +
57759 /* Future-safe accessor for struct task_struct's cpus_allowed. */
57760 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
57761
57762 @@ -1978,7 +2069,9 @@ void yield(void);
57763 extern struct exec_domain default_exec_domain;
57764
57765 union thread_union {
57766 +#ifndef CONFIG_X86
57767 struct thread_info thread_info;
57768 +#endif
57769 unsigned long stack[THREAD_SIZE/sizeof(long)];
57770 };
57771
57772 @@ -2011,6 +2104,7 @@ extern struct pid_namespace init_pid_ns;
57773 */
57774
57775 extern struct task_struct *find_task_by_vpid(pid_t nr);
57776 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
57777 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
57778 struct pid_namespace *ns);
57779
57780 @@ -2155,7 +2249,7 @@ extern void __cleanup_sighand(struct sig
57781 extern void exit_itimers(struct signal_struct *);
57782 extern void flush_itimer_signals(void);
57783
57784 -extern NORET_TYPE void do_group_exit(int);
57785 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
57786
57787 extern void daemonize(const char *, ...);
57788 extern int allow_signal(int);
57789 @@ -2284,13 +2378,17 @@ static inline unsigned long *end_of_stac
57790
57791 #endif
57792
57793 -static inline int object_is_on_stack(void *obj)
57794 +static inline int object_starts_on_stack(void *obj)
57795 {
57796 - void *stack = task_stack_page(current);
57797 + const void *stack = task_stack_page(current);
57798
57799 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
57800 }
57801
57802 +#ifdef CONFIG_PAX_USERCOPY
57803 +extern int object_is_on_stack(const void *obj, unsigned long len);
57804 +#endif
57805 +
57806 extern void thread_info_cache_init(void);
57807
57808 #ifdef CONFIG_DEBUG_STACK_USAGE
57809 diff -urNp linux-2.6.32.43/include/linux/screen_info.h linux-2.6.32.43/include/linux/screen_info.h
57810 --- linux-2.6.32.43/include/linux/screen_info.h 2011-03-27 14:31:47.000000000 -0400
57811 +++ linux-2.6.32.43/include/linux/screen_info.h 2011-04-17 15:56:46.000000000 -0400
57812 @@ -42,7 +42,8 @@ struct screen_info {
57813 __u16 pages; /* 0x32 */
57814 __u16 vesa_attributes; /* 0x34 */
57815 __u32 capabilities; /* 0x36 */
57816 - __u8 _reserved[6]; /* 0x3a */
57817 + __u16 vesapm_size; /* 0x3a */
57818 + __u8 _reserved[4]; /* 0x3c */
57819 } __attribute__((packed));
57820
57821 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
57822 diff -urNp linux-2.6.32.43/include/linux/security.h linux-2.6.32.43/include/linux/security.h
57823 --- linux-2.6.32.43/include/linux/security.h 2011-03-27 14:31:47.000000000 -0400
57824 +++ linux-2.6.32.43/include/linux/security.h 2011-04-17 15:56:46.000000000 -0400
57825 @@ -34,6 +34,7 @@
57826 #include <linux/key.h>
57827 #include <linux/xfrm.h>
57828 #include <linux/gfp.h>
57829 +#include <linux/grsecurity.h>
57830 #include <net/flow.h>
57831
57832 /* Maximum number of letters for an LSM name string */
57833 diff -urNp linux-2.6.32.43/include/linux/shm.h linux-2.6.32.43/include/linux/shm.h
57834 --- linux-2.6.32.43/include/linux/shm.h 2011-03-27 14:31:47.000000000 -0400
57835 +++ linux-2.6.32.43/include/linux/shm.h 2011-04-17 15:56:46.000000000 -0400
57836 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
57837 pid_t shm_cprid;
57838 pid_t shm_lprid;
57839 struct user_struct *mlock_user;
57840 +#ifdef CONFIG_GRKERNSEC
57841 + time_t shm_createtime;
57842 + pid_t shm_lapid;
57843 +#endif
57844 };
57845
57846 /* shm_mode upper byte flags */
57847 diff -urNp linux-2.6.32.43/include/linux/skbuff.h linux-2.6.32.43/include/linux/skbuff.h
57848 --- linux-2.6.32.43/include/linux/skbuff.h 2011-03-27 14:31:47.000000000 -0400
57849 +++ linux-2.6.32.43/include/linux/skbuff.h 2011-07-06 19:53:33.000000000 -0400
57850 @@ -544,7 +544,7 @@ static inline union skb_shared_tx *skb_t
57851 */
57852 static inline int skb_queue_empty(const struct sk_buff_head *list)
57853 {
57854 - return list->next == (struct sk_buff *)list;
57855 + return list->next == (const struct sk_buff *)list;
57856 }
57857
57858 /**
57859 @@ -557,7 +557,7 @@ static inline int skb_queue_empty(const
57860 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
57861 const struct sk_buff *skb)
57862 {
57863 - return (skb->next == (struct sk_buff *) list);
57864 + return (skb->next == (const struct sk_buff *) list);
57865 }
57866
57867 /**
57868 @@ -570,7 +570,7 @@ static inline bool skb_queue_is_last(con
57869 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
57870 const struct sk_buff *skb)
57871 {
57872 - return (skb->prev == (struct sk_buff *) list);
57873 + return (skb->prev == (const struct sk_buff *) list);
57874 }
57875
57876 /**
57877 @@ -1367,7 +1367,7 @@ static inline int skb_network_offset(con
57878 * headroom, you should not reduce this.
57879 */
57880 #ifndef NET_SKB_PAD
57881 -#define NET_SKB_PAD 32
57882 +#define NET_SKB_PAD (_AC(32,UL))
57883 #endif
57884
57885 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
57886 diff -urNp linux-2.6.32.43/include/linux/slab_def.h linux-2.6.32.43/include/linux/slab_def.h
57887 --- linux-2.6.32.43/include/linux/slab_def.h 2011-03-27 14:31:47.000000000 -0400
57888 +++ linux-2.6.32.43/include/linux/slab_def.h 2011-05-04 17:56:28.000000000 -0400
57889 @@ -69,10 +69,10 @@ struct kmem_cache {
57890 unsigned long node_allocs;
57891 unsigned long node_frees;
57892 unsigned long node_overflow;
57893 - atomic_t allochit;
57894 - atomic_t allocmiss;
57895 - atomic_t freehit;
57896 - atomic_t freemiss;
57897 + atomic_unchecked_t allochit;
57898 + atomic_unchecked_t allocmiss;
57899 + atomic_unchecked_t freehit;
57900 + atomic_unchecked_t freemiss;
57901
57902 /*
57903 * If debugging is enabled, then the allocator can add additional
57904 diff -urNp linux-2.6.32.43/include/linux/slab.h linux-2.6.32.43/include/linux/slab.h
57905 --- linux-2.6.32.43/include/linux/slab.h 2011-03-27 14:31:47.000000000 -0400
57906 +++ linux-2.6.32.43/include/linux/slab.h 2011-04-17 15:56:46.000000000 -0400
57907 @@ -11,12 +11,20 @@
57908
57909 #include <linux/gfp.h>
57910 #include <linux/types.h>
57911 +#include <linux/err.h>
57912
57913 /*
57914 * Flags to pass to kmem_cache_create().
57915 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
57916 */
57917 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
57918 +
57919 +#ifdef CONFIG_PAX_USERCOPY
57920 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
57921 +#else
57922 +#define SLAB_USERCOPY 0x00000000UL
57923 +#endif
57924 +
57925 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
57926 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
57927 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
57928 @@ -82,10 +90,13 @@
57929 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
57930 * Both make kfree a no-op.
57931 */
57932 -#define ZERO_SIZE_PTR ((void *)16)
57933 +#define ZERO_SIZE_PTR \
57934 +({ \
57935 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
57936 + (void *)(-MAX_ERRNO-1L); \
57937 +})
57938
57939 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
57940 - (unsigned long)ZERO_SIZE_PTR)
57941 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
57942
57943 /*
57944 * struct kmem_cache related prototypes
57945 @@ -138,6 +149,7 @@ void * __must_check krealloc(const void
57946 void kfree(const void *);
57947 void kzfree(const void *);
57948 size_t ksize(const void *);
57949 +void check_object_size(const void *ptr, unsigned long n, bool to);
57950
57951 /*
57952 * Allocator specific definitions. These are mainly used to establish optimized
57953 @@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t
57954
57955 void __init kmem_cache_init_late(void);
57956
57957 +#define kmalloc(x, y) \
57958 +({ \
57959 + void *___retval; \
57960 + intoverflow_t ___x = (intoverflow_t)x; \
57961 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
57962 + ___retval = NULL; \
57963 + else \
57964 + ___retval = kmalloc((size_t)___x, (y)); \
57965 + ___retval; \
57966 +})
57967 +
57968 +#define kmalloc_node(x, y, z) \
57969 +({ \
57970 + void *___retval; \
57971 + intoverflow_t ___x = (intoverflow_t)x; \
57972 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
57973 + ___retval = NULL; \
57974 + else \
57975 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
57976 + ___retval; \
57977 +})
57978 +
57979 +#define kzalloc(x, y) \
57980 +({ \
57981 + void *___retval; \
57982 + intoverflow_t ___x = (intoverflow_t)x; \
57983 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
57984 + ___retval = NULL; \
57985 + else \
57986 + ___retval = kzalloc((size_t)___x, (y)); \
57987 + ___retval; \
57988 +})
57989 +
57990 #endif /* _LINUX_SLAB_H */
57991 diff -urNp linux-2.6.32.43/include/linux/slub_def.h linux-2.6.32.43/include/linux/slub_def.h
57992 --- linux-2.6.32.43/include/linux/slub_def.h 2011-03-27 14:31:47.000000000 -0400
57993 +++ linux-2.6.32.43/include/linux/slub_def.h 2011-04-17 15:56:46.000000000 -0400
57994 @@ -86,7 +86,7 @@ struct kmem_cache {
57995 struct kmem_cache_order_objects max;
57996 struct kmem_cache_order_objects min;
57997 gfp_t allocflags; /* gfp flags to use on each alloc */
57998 - int refcount; /* Refcount for slab cache destroy */
57999 + atomic_t refcount; /* Refcount for slab cache destroy */
58000 void (*ctor)(void *);
58001 int inuse; /* Offset to metadata */
58002 int align; /* Alignment */
58003 diff -urNp linux-2.6.32.43/include/linux/sonet.h linux-2.6.32.43/include/linux/sonet.h
58004 --- linux-2.6.32.43/include/linux/sonet.h 2011-03-27 14:31:47.000000000 -0400
58005 +++ linux-2.6.32.43/include/linux/sonet.h 2011-04-17 15:56:46.000000000 -0400
58006 @@ -61,7 +61,7 @@ struct sonet_stats {
58007 #include <asm/atomic.h>
58008
58009 struct k_sonet_stats {
58010 -#define __HANDLE_ITEM(i) atomic_t i
58011 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
58012 __SONET_ITEMS
58013 #undef __HANDLE_ITEM
58014 };
58015 diff -urNp linux-2.6.32.43/include/linux/sunrpc/clnt.h linux-2.6.32.43/include/linux/sunrpc/clnt.h
58016 --- linux-2.6.32.43/include/linux/sunrpc/clnt.h 2011-03-27 14:31:47.000000000 -0400
58017 +++ linux-2.6.32.43/include/linux/sunrpc/clnt.h 2011-04-17 15:56:46.000000000 -0400
58018 @@ -167,9 +167,9 @@ static inline unsigned short rpc_get_por
58019 {
58020 switch (sap->sa_family) {
58021 case AF_INET:
58022 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
58023 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
58024 case AF_INET6:
58025 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
58026 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
58027 }
58028 return 0;
58029 }
58030 @@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const
58031 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
58032 const struct sockaddr *src)
58033 {
58034 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
58035 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
58036 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
58037
58038 dsin->sin_family = ssin->sin_family;
58039 @@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const
58040 if (sa->sa_family != AF_INET6)
58041 return 0;
58042
58043 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
58044 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
58045 }
58046
58047 #endif /* __KERNEL__ */
58048 diff -urNp linux-2.6.32.43/include/linux/sunrpc/svc_rdma.h linux-2.6.32.43/include/linux/sunrpc/svc_rdma.h
58049 --- linux-2.6.32.43/include/linux/sunrpc/svc_rdma.h 2011-03-27 14:31:47.000000000 -0400
58050 +++ linux-2.6.32.43/include/linux/sunrpc/svc_rdma.h 2011-05-04 17:56:28.000000000 -0400
58051 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
58052 extern unsigned int svcrdma_max_requests;
58053 extern unsigned int svcrdma_max_req_size;
58054
58055 -extern atomic_t rdma_stat_recv;
58056 -extern atomic_t rdma_stat_read;
58057 -extern atomic_t rdma_stat_write;
58058 -extern atomic_t rdma_stat_sq_starve;
58059 -extern atomic_t rdma_stat_rq_starve;
58060 -extern atomic_t rdma_stat_rq_poll;
58061 -extern atomic_t rdma_stat_rq_prod;
58062 -extern atomic_t rdma_stat_sq_poll;
58063 -extern atomic_t rdma_stat_sq_prod;
58064 +extern atomic_unchecked_t rdma_stat_recv;
58065 +extern atomic_unchecked_t rdma_stat_read;
58066 +extern atomic_unchecked_t rdma_stat_write;
58067 +extern atomic_unchecked_t rdma_stat_sq_starve;
58068 +extern atomic_unchecked_t rdma_stat_rq_starve;
58069 +extern atomic_unchecked_t rdma_stat_rq_poll;
58070 +extern atomic_unchecked_t rdma_stat_rq_prod;
58071 +extern atomic_unchecked_t rdma_stat_sq_poll;
58072 +extern atomic_unchecked_t rdma_stat_sq_prod;
58073
58074 #define RPCRDMA_VERSION 1
58075
58076 diff -urNp linux-2.6.32.43/include/linux/suspend.h linux-2.6.32.43/include/linux/suspend.h
58077 --- linux-2.6.32.43/include/linux/suspend.h 2011-03-27 14:31:47.000000000 -0400
58078 +++ linux-2.6.32.43/include/linux/suspend.h 2011-04-17 15:56:46.000000000 -0400
58079 @@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
58080 * which require special recovery actions in that situation.
58081 */
58082 struct platform_suspend_ops {
58083 - int (*valid)(suspend_state_t state);
58084 - int (*begin)(suspend_state_t state);
58085 - int (*prepare)(void);
58086 - int (*prepare_late)(void);
58087 - int (*enter)(suspend_state_t state);
58088 - void (*wake)(void);
58089 - void (*finish)(void);
58090 - void (*end)(void);
58091 - void (*recover)(void);
58092 + int (* const valid)(suspend_state_t state);
58093 + int (* const begin)(suspend_state_t state);
58094 + int (* const prepare)(void);
58095 + int (* const prepare_late)(void);
58096 + int (* const enter)(suspend_state_t state);
58097 + void (* const wake)(void);
58098 + void (* const finish)(void);
58099 + void (* const end)(void);
58100 + void (* const recover)(void);
58101 };
58102
58103 #ifdef CONFIG_SUSPEND
58104 @@ -120,7 +120,7 @@ struct platform_suspend_ops {
58105 * suspend_set_ops - set platform dependent suspend operations
58106 * @ops: The new suspend operations to set.
58107 */
58108 -extern void suspend_set_ops(struct platform_suspend_ops *ops);
58109 +extern void suspend_set_ops(const struct platform_suspend_ops *ops);
58110 extern int suspend_valid_only_mem(suspend_state_t state);
58111
58112 /**
58113 @@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
58114 #else /* !CONFIG_SUSPEND */
58115 #define suspend_valid_only_mem NULL
58116
58117 -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
58118 +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
58119 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
58120 #endif /* !CONFIG_SUSPEND */
58121
58122 @@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
58123 * platforms which require special recovery actions in that situation.
58124 */
58125 struct platform_hibernation_ops {
58126 - int (*begin)(void);
58127 - void (*end)(void);
58128 - int (*pre_snapshot)(void);
58129 - void (*finish)(void);
58130 - int (*prepare)(void);
58131 - int (*enter)(void);
58132 - void (*leave)(void);
58133 - int (*pre_restore)(void);
58134 - void (*restore_cleanup)(void);
58135 - void (*recover)(void);
58136 + int (* const begin)(void);
58137 + void (* const end)(void);
58138 + int (* const pre_snapshot)(void);
58139 + void (* const finish)(void);
58140 + int (* const prepare)(void);
58141 + int (* const enter)(void);
58142 + void (* const leave)(void);
58143 + int (* const pre_restore)(void);
58144 + void (* const restore_cleanup)(void);
58145 + void (* const recover)(void);
58146 };
58147
58148 #ifdef CONFIG_HIBERNATION
58149 @@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
58150 extern void swsusp_unset_page_free(struct page *);
58151 extern unsigned long get_safe_page(gfp_t gfp_mask);
58152
58153 -extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
58154 +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
58155 extern int hibernate(void);
58156 extern bool system_entering_hibernation(void);
58157 #else /* CONFIG_HIBERNATION */
58158 @@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
58159 static inline void swsusp_set_page_free(struct page *p) {}
58160 static inline void swsusp_unset_page_free(struct page *p) {}
58161
58162 -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
58163 +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
58164 static inline int hibernate(void) { return -ENOSYS; }
58165 static inline bool system_entering_hibernation(void) { return false; }
58166 #endif /* CONFIG_HIBERNATION */
58167 diff -urNp linux-2.6.32.43/include/linux/sysctl.h linux-2.6.32.43/include/linux/sysctl.h
58168 --- linux-2.6.32.43/include/linux/sysctl.h 2011-03-27 14:31:47.000000000 -0400
58169 +++ linux-2.6.32.43/include/linux/sysctl.h 2011-04-17 15:56:46.000000000 -0400
58170 @@ -164,7 +164,11 @@ enum
58171 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
58172 };
58173
58174 -
58175 +#ifdef CONFIG_PAX_SOFTMODE
58176 +enum {
58177 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
58178 +};
58179 +#endif
58180
58181 /* CTL_VM names: */
58182 enum
58183 @@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_tab
58184
58185 extern int proc_dostring(struct ctl_table *, int,
58186 void __user *, size_t *, loff_t *);
58187 +extern int proc_dostring_modpriv(struct ctl_table *, int,
58188 + void __user *, size_t *, loff_t *);
58189 extern int proc_dointvec(struct ctl_table *, int,
58190 void __user *, size_t *, loff_t *);
58191 extern int proc_dointvec_minmax(struct ctl_table *, int,
58192 @@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name,
58193
58194 extern ctl_handler sysctl_data;
58195 extern ctl_handler sysctl_string;
58196 +extern ctl_handler sysctl_string_modpriv;
58197 extern ctl_handler sysctl_intvec;
58198 extern ctl_handler sysctl_jiffies;
58199 extern ctl_handler sysctl_ms_jiffies;
58200 diff -urNp linux-2.6.32.43/include/linux/sysfs.h linux-2.6.32.43/include/linux/sysfs.h
58201 --- linux-2.6.32.43/include/linux/sysfs.h 2011-03-27 14:31:47.000000000 -0400
58202 +++ linux-2.6.32.43/include/linux/sysfs.h 2011-04-17 15:56:46.000000000 -0400
58203 @@ -75,8 +75,8 @@ struct bin_attribute {
58204 };
58205
58206 struct sysfs_ops {
58207 - ssize_t (*show)(struct kobject *, struct attribute *,char *);
58208 - ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
58209 + ssize_t (* const show)(struct kobject *, struct attribute *,char *);
58210 + ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
58211 };
58212
58213 struct sysfs_dirent;
58214 diff -urNp linux-2.6.32.43/include/linux/thread_info.h linux-2.6.32.43/include/linux/thread_info.h
58215 --- linux-2.6.32.43/include/linux/thread_info.h 2011-03-27 14:31:47.000000000 -0400
58216 +++ linux-2.6.32.43/include/linux/thread_info.h 2011-04-17 15:56:46.000000000 -0400
58217 @@ -23,7 +23,7 @@ struct restart_block {
58218 };
58219 /* For futex_wait and futex_wait_requeue_pi */
58220 struct {
58221 - u32 *uaddr;
58222 + u32 __user *uaddr;
58223 u32 val;
58224 u32 flags;
58225 u32 bitset;
58226 diff -urNp linux-2.6.32.43/include/linux/tty.h linux-2.6.32.43/include/linux/tty.h
58227 --- linux-2.6.32.43/include/linux/tty.h 2011-03-27 14:31:47.000000000 -0400
58228 +++ linux-2.6.32.43/include/linux/tty.h 2011-04-17 15:56:46.000000000 -0400
58229 @@ -13,6 +13,7 @@
58230 #include <linux/tty_driver.h>
58231 #include <linux/tty_ldisc.h>
58232 #include <linux/mutex.h>
58233 +#include <linux/poll.h>
58234
58235 #include <asm/system.h>
58236
58237 @@ -443,7 +444,6 @@ extern int tty_perform_flush(struct tty_
58238 extern dev_t tty_devnum(struct tty_struct *tty);
58239 extern void proc_clear_tty(struct task_struct *p);
58240 extern struct tty_struct *get_current_tty(void);
58241 -extern void tty_default_fops(struct file_operations *fops);
58242 extern struct tty_struct *alloc_tty_struct(void);
58243 extern void free_tty_struct(struct tty_struct *tty);
58244 extern void initialize_tty_struct(struct tty_struct *tty,
58245 @@ -493,6 +493,18 @@ extern void tty_ldisc_begin(void);
58246 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
58247 extern void tty_ldisc_enable(struct tty_struct *tty);
58248
58249 +/* tty_io.c */
58250 +extern ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
58251 +extern ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
58252 +extern unsigned int tty_poll(struct file *, poll_table *);
58253 +#ifdef CONFIG_COMPAT
58254 +extern long tty_compat_ioctl(struct file *file, unsigned int cmd,
58255 + unsigned long arg);
58256 +#else
58257 +#define tty_compat_ioctl NULL
58258 +#endif
58259 +extern int tty_release(struct inode *, struct file *);
58260 +extern int tty_fasync(int fd, struct file *filp, int on);
58261
58262 /* n_tty.c */
58263 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
58264 diff -urNp linux-2.6.32.43/include/linux/tty_ldisc.h linux-2.6.32.43/include/linux/tty_ldisc.h
58265 --- linux-2.6.32.43/include/linux/tty_ldisc.h 2011-03-27 14:31:47.000000000 -0400
58266 +++ linux-2.6.32.43/include/linux/tty_ldisc.h 2011-04-17 15:56:46.000000000 -0400
58267 @@ -139,7 +139,7 @@ struct tty_ldisc_ops {
58268
58269 struct module *owner;
58270
58271 - int refcount;
58272 + atomic_t refcount;
58273 };
58274
58275 struct tty_ldisc {
58276 diff -urNp linux-2.6.32.43/include/linux/types.h linux-2.6.32.43/include/linux/types.h
58277 --- linux-2.6.32.43/include/linux/types.h 2011-03-27 14:31:47.000000000 -0400
58278 +++ linux-2.6.32.43/include/linux/types.h 2011-04-17 15:56:46.000000000 -0400
58279 @@ -191,10 +191,26 @@ typedef struct {
58280 volatile int counter;
58281 } atomic_t;
58282
58283 +#ifdef CONFIG_PAX_REFCOUNT
58284 +typedef struct {
58285 + volatile int counter;
58286 +} atomic_unchecked_t;
58287 +#else
58288 +typedef atomic_t atomic_unchecked_t;
58289 +#endif
58290 +
58291 #ifdef CONFIG_64BIT
58292 typedef struct {
58293 volatile long counter;
58294 } atomic64_t;
58295 +
58296 +#ifdef CONFIG_PAX_REFCOUNT
58297 +typedef struct {
58298 + volatile long counter;
58299 +} atomic64_unchecked_t;
58300 +#else
58301 +typedef atomic64_t atomic64_unchecked_t;
58302 +#endif
58303 #endif
58304
58305 struct ustat {
58306 diff -urNp linux-2.6.32.43/include/linux/uaccess.h linux-2.6.32.43/include/linux/uaccess.h
58307 --- linux-2.6.32.43/include/linux/uaccess.h 2011-03-27 14:31:47.000000000 -0400
58308 +++ linux-2.6.32.43/include/linux/uaccess.h 2011-04-17 15:56:46.000000000 -0400
58309 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
58310 long ret; \
58311 mm_segment_t old_fs = get_fs(); \
58312 \
58313 - set_fs(KERNEL_DS); \
58314 pagefault_disable(); \
58315 + set_fs(KERNEL_DS); \
58316 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
58317 - pagefault_enable(); \
58318 set_fs(old_fs); \
58319 + pagefault_enable(); \
58320 ret; \
58321 })
58322
58323 @@ -93,7 +93,7 @@ static inline unsigned long __copy_from_
58324 * Safely read from address @src to the buffer at @dst. If a kernel fault
58325 * happens, handle that and return -EFAULT.
58326 */
58327 -extern long probe_kernel_read(void *dst, void *src, size_t size);
58328 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
58329
58330 /*
58331 * probe_kernel_write(): safely attempt to write to a location
58332 @@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst,
58333 * Safely write to address @dst from the buffer at @src. If a kernel fault
58334 * happens, handle that and return -EFAULT.
58335 */
58336 -extern long probe_kernel_write(void *dst, void *src, size_t size);
58337 +extern long probe_kernel_write(void *dst, const void *src, size_t size);
58338
58339 #endif /* __LINUX_UACCESS_H__ */
58340 diff -urNp linux-2.6.32.43/include/linux/unaligned/access_ok.h linux-2.6.32.43/include/linux/unaligned/access_ok.h
58341 --- linux-2.6.32.43/include/linux/unaligned/access_ok.h 2011-03-27 14:31:47.000000000 -0400
58342 +++ linux-2.6.32.43/include/linux/unaligned/access_ok.h 2011-04-17 15:56:46.000000000 -0400
58343 @@ -6,32 +6,32 @@
58344
58345 static inline u16 get_unaligned_le16(const void *p)
58346 {
58347 - return le16_to_cpup((__le16 *)p);
58348 + return le16_to_cpup((const __le16 *)p);
58349 }
58350
58351 static inline u32 get_unaligned_le32(const void *p)
58352 {
58353 - return le32_to_cpup((__le32 *)p);
58354 + return le32_to_cpup((const __le32 *)p);
58355 }
58356
58357 static inline u64 get_unaligned_le64(const void *p)
58358 {
58359 - return le64_to_cpup((__le64 *)p);
58360 + return le64_to_cpup((const __le64 *)p);
58361 }
58362
58363 static inline u16 get_unaligned_be16(const void *p)
58364 {
58365 - return be16_to_cpup((__be16 *)p);
58366 + return be16_to_cpup((const __be16 *)p);
58367 }
58368
58369 static inline u32 get_unaligned_be32(const void *p)
58370 {
58371 - return be32_to_cpup((__be32 *)p);
58372 + return be32_to_cpup((const __be32 *)p);
58373 }
58374
58375 static inline u64 get_unaligned_be64(const void *p)
58376 {
58377 - return be64_to_cpup((__be64 *)p);
58378 + return be64_to_cpup((const __be64 *)p);
58379 }
58380
58381 static inline void put_unaligned_le16(u16 val, void *p)
58382 diff -urNp linux-2.6.32.43/include/linux/vmalloc.h linux-2.6.32.43/include/linux/vmalloc.h
58383 --- linux-2.6.32.43/include/linux/vmalloc.h 2011-03-27 14:31:47.000000000 -0400
58384 +++ linux-2.6.32.43/include/linux/vmalloc.h 2011-04-17 15:56:46.000000000 -0400
58385 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
58386 #define VM_MAP 0x00000004 /* vmap()ed pages */
58387 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
58388 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
58389 +
58390 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
58391 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
58392 +#endif
58393 +
58394 /* bits [20..32] reserved for arch specific ioremap internals */
58395
58396 /*
58397 @@ -123,4 +128,81 @@ struct vm_struct **pcpu_get_vm_areas(con
58398
58399 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
58400
58401 +#define vmalloc(x) \
58402 +({ \
58403 + void *___retval; \
58404 + intoverflow_t ___x = (intoverflow_t)x; \
58405 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
58406 + ___retval = NULL; \
58407 + else \
58408 + ___retval = vmalloc((unsigned long)___x); \
58409 + ___retval; \
58410 +})
58411 +
58412 +#define __vmalloc(x, y, z) \
58413 +({ \
58414 + void *___retval; \
58415 + intoverflow_t ___x = (intoverflow_t)x; \
58416 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
58417 + ___retval = NULL; \
58418 + else \
58419 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
58420 + ___retval; \
58421 +})
58422 +
58423 +#define vmalloc_user(x) \
58424 +({ \
58425 + void *___retval; \
58426 + intoverflow_t ___x = (intoverflow_t)x; \
58427 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
58428 + ___retval = NULL; \
58429 + else \
58430 + ___retval = vmalloc_user((unsigned long)___x); \
58431 + ___retval; \
58432 +})
58433 +
58434 +#define vmalloc_exec(x) \
58435 +({ \
58436 + void *___retval; \
58437 + intoverflow_t ___x = (intoverflow_t)x; \
58438 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
58439 + ___retval = NULL; \
58440 + else \
58441 + ___retval = vmalloc_exec((unsigned long)___x); \
58442 + ___retval; \
58443 +})
58444 +
58445 +#define vmalloc_node(x, y) \
58446 +({ \
58447 + void *___retval; \
58448 + intoverflow_t ___x = (intoverflow_t)x; \
58449 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
58450 + ___retval = NULL; \
58451 + else \
58452 + ___retval = vmalloc_node((unsigned long)___x, (y));\
58453 + ___retval; \
58454 +})
58455 +
58456 +#define vmalloc_32(x) \
58457 +({ \
58458 + void *___retval; \
58459 + intoverflow_t ___x = (intoverflow_t)x; \
58460 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
58461 + ___retval = NULL; \
58462 + else \
58463 + ___retval = vmalloc_32((unsigned long)___x); \
58464 + ___retval; \
58465 +})
58466 +
58467 +#define vmalloc_32_user(x) \
58468 +({ \
58469 + void *___retval; \
58470 + intoverflow_t ___x = (intoverflow_t)x; \
58471 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
58472 + ___retval = NULL; \
58473 + else \
58474 + ___retval = vmalloc_32_user((unsigned long)___x);\
58475 + ___retval; \
58476 +})
58477 +
58478 #endif /* _LINUX_VMALLOC_H */
58479 diff -urNp linux-2.6.32.43/include/linux/vmstat.h linux-2.6.32.43/include/linux/vmstat.h
58480 --- linux-2.6.32.43/include/linux/vmstat.h 2011-03-27 14:31:47.000000000 -0400
58481 +++ linux-2.6.32.43/include/linux/vmstat.h 2011-04-17 15:56:46.000000000 -0400
58482 @@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in
58483 /*
58484 * Zone based page accounting with per cpu differentials.
58485 */
58486 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
58487 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
58488
58489 static inline void zone_page_state_add(long x, struct zone *zone,
58490 enum zone_stat_item item)
58491 {
58492 - atomic_long_add(x, &zone->vm_stat[item]);
58493 - atomic_long_add(x, &vm_stat[item]);
58494 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
58495 + atomic_long_add_unchecked(x, &vm_stat[item]);
58496 }
58497
58498 static inline unsigned long global_page_state(enum zone_stat_item item)
58499 {
58500 - long x = atomic_long_read(&vm_stat[item]);
58501 + long x = atomic_long_read_unchecked(&vm_stat[item]);
58502 #ifdef CONFIG_SMP
58503 if (x < 0)
58504 x = 0;
58505 @@ -158,7 +158,7 @@ static inline unsigned long global_page_
58506 static inline unsigned long zone_page_state(struct zone *zone,
58507 enum zone_stat_item item)
58508 {
58509 - long x = atomic_long_read(&zone->vm_stat[item]);
58510 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
58511 #ifdef CONFIG_SMP
58512 if (x < 0)
58513 x = 0;
58514 @@ -175,7 +175,7 @@ static inline unsigned long zone_page_st
58515 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
58516 enum zone_stat_item item)
58517 {
58518 - long x = atomic_long_read(&zone->vm_stat[item]);
58519 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
58520
58521 #ifdef CONFIG_SMP
58522 int cpu;
58523 @@ -264,8 +264,8 @@ static inline void __mod_zone_page_state
58524
58525 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
58526 {
58527 - atomic_long_inc(&zone->vm_stat[item]);
58528 - atomic_long_inc(&vm_stat[item]);
58529 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
58530 + atomic_long_inc_unchecked(&vm_stat[item]);
58531 }
58532
58533 static inline void __inc_zone_page_state(struct page *page,
58534 @@ -276,8 +276,8 @@ static inline void __inc_zone_page_state
58535
58536 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
58537 {
58538 - atomic_long_dec(&zone->vm_stat[item]);
58539 - atomic_long_dec(&vm_stat[item]);
58540 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
58541 + atomic_long_dec_unchecked(&vm_stat[item]);
58542 }
58543
58544 static inline void __dec_zone_page_state(struct page *page,
58545 diff -urNp linux-2.6.32.43/include/media/v4l2-device.h linux-2.6.32.43/include/media/v4l2-device.h
58546 --- linux-2.6.32.43/include/media/v4l2-device.h 2011-03-27 14:31:47.000000000 -0400
58547 +++ linux-2.6.32.43/include/media/v4l2-device.h 2011-05-04 17:56:28.000000000 -0400
58548 @@ -71,7 +71,7 @@ int __must_check v4l2_device_register(st
58549 this function returns 0. If the name ends with a digit (e.g. cx18),
58550 then the name will be set to cx18-0 since cx180 looks really odd. */
58551 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
58552 - atomic_t *instance);
58553 + atomic_unchecked_t *instance);
58554
58555 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
58556 Since the parent disappears this ensures that v4l2_dev doesn't have an
58557 diff -urNp linux-2.6.32.43/include/net/flow.h linux-2.6.32.43/include/net/flow.h
58558 --- linux-2.6.32.43/include/net/flow.h 2011-03-27 14:31:47.000000000 -0400
58559 +++ linux-2.6.32.43/include/net/flow.h 2011-05-04 17:56:28.000000000 -0400
58560 @@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net
58561 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
58562 u8 dir, flow_resolve_t resolver);
58563 extern void flow_cache_flush(void);
58564 -extern atomic_t flow_cache_genid;
58565 +extern atomic_unchecked_t flow_cache_genid;
58566
58567 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
58568 {
58569 diff -urNp linux-2.6.32.43/include/net/inetpeer.h linux-2.6.32.43/include/net/inetpeer.h
58570 --- linux-2.6.32.43/include/net/inetpeer.h 2011-03-27 14:31:47.000000000 -0400
58571 +++ linux-2.6.32.43/include/net/inetpeer.h 2011-04-17 15:56:46.000000000 -0400
58572 @@ -24,7 +24,7 @@ struct inet_peer
58573 __u32 dtime; /* the time of last use of not
58574 * referenced entries */
58575 atomic_t refcnt;
58576 - atomic_t rid; /* Frag reception counter */
58577 + atomic_unchecked_t rid; /* Frag reception counter */
58578 __u32 tcp_ts;
58579 unsigned long tcp_ts_stamp;
58580 };
58581 diff -urNp linux-2.6.32.43/include/net/ip_vs.h linux-2.6.32.43/include/net/ip_vs.h
58582 --- linux-2.6.32.43/include/net/ip_vs.h 2011-03-27 14:31:47.000000000 -0400
58583 +++ linux-2.6.32.43/include/net/ip_vs.h 2011-05-04 17:56:28.000000000 -0400
58584 @@ -365,7 +365,7 @@ struct ip_vs_conn {
58585 struct ip_vs_conn *control; /* Master control connection */
58586 atomic_t n_control; /* Number of controlled ones */
58587 struct ip_vs_dest *dest; /* real server */
58588 - atomic_t in_pkts; /* incoming packet counter */
58589 + atomic_unchecked_t in_pkts; /* incoming packet counter */
58590
58591 /* packet transmitter for different forwarding methods. If it
58592 mangles the packet, it must return NF_DROP or better NF_STOLEN,
58593 @@ -466,7 +466,7 @@ struct ip_vs_dest {
58594 union nf_inet_addr addr; /* IP address of the server */
58595 __be16 port; /* port number of the server */
58596 volatile unsigned flags; /* dest status flags */
58597 - atomic_t conn_flags; /* flags to copy to conn */
58598 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
58599 atomic_t weight; /* server weight */
58600
58601 atomic_t refcnt; /* reference counter */
58602 diff -urNp linux-2.6.32.43/include/net/irda/ircomm_tty.h linux-2.6.32.43/include/net/irda/ircomm_tty.h
58603 --- linux-2.6.32.43/include/net/irda/ircomm_tty.h 2011-03-27 14:31:47.000000000 -0400
58604 +++ linux-2.6.32.43/include/net/irda/ircomm_tty.h 2011-04-17 15:56:46.000000000 -0400
58605 @@ -35,6 +35,7 @@
58606 #include <linux/termios.h>
58607 #include <linux/timer.h>
58608 #include <linux/tty.h> /* struct tty_struct */
58609 +#include <asm/local.h>
58610
58611 #include <net/irda/irias_object.h>
58612 #include <net/irda/ircomm_core.h>
58613 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
58614 unsigned short close_delay;
58615 unsigned short closing_wait; /* time to wait before closing */
58616
58617 - int open_count;
58618 - int blocked_open; /* # of blocked opens */
58619 + local_t open_count;
58620 + local_t blocked_open; /* # of blocked opens */
58621
58622 /* Protect concurent access to :
58623 * o self->open_count
58624 diff -urNp linux-2.6.32.43/include/net/iucv/af_iucv.h linux-2.6.32.43/include/net/iucv/af_iucv.h
58625 --- linux-2.6.32.43/include/net/iucv/af_iucv.h 2011-03-27 14:31:47.000000000 -0400
58626 +++ linux-2.6.32.43/include/net/iucv/af_iucv.h 2011-05-04 17:56:28.000000000 -0400
58627 @@ -87,7 +87,7 @@ struct iucv_sock {
58628 struct iucv_sock_list {
58629 struct hlist_head head;
58630 rwlock_t lock;
58631 - atomic_t autobind_name;
58632 + atomic_unchecked_t autobind_name;
58633 };
58634
58635 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
58636 diff -urNp linux-2.6.32.43/include/net/neighbour.h linux-2.6.32.43/include/net/neighbour.h
58637 --- linux-2.6.32.43/include/net/neighbour.h 2011-03-27 14:31:47.000000000 -0400
58638 +++ linux-2.6.32.43/include/net/neighbour.h 2011-04-17 15:56:46.000000000 -0400
58639 @@ -125,12 +125,12 @@ struct neighbour
58640 struct neigh_ops
58641 {
58642 int family;
58643 - void (*solicit)(struct neighbour *, struct sk_buff*);
58644 - void (*error_report)(struct neighbour *, struct sk_buff*);
58645 - int (*output)(struct sk_buff*);
58646 - int (*connected_output)(struct sk_buff*);
58647 - int (*hh_output)(struct sk_buff*);
58648 - int (*queue_xmit)(struct sk_buff*);
58649 + void (* const solicit)(struct neighbour *, struct sk_buff*);
58650 + void (* const error_report)(struct neighbour *, struct sk_buff*);
58651 + int (* const output)(struct sk_buff*);
58652 + int (* const connected_output)(struct sk_buff*);
58653 + int (* const hh_output)(struct sk_buff*);
58654 + int (* const queue_xmit)(struct sk_buff*);
58655 };
58656
58657 struct pneigh_entry
58658 diff -urNp linux-2.6.32.43/include/net/netlink.h linux-2.6.32.43/include/net/netlink.h
58659 --- linux-2.6.32.43/include/net/netlink.h 2011-07-13 17:23:04.000000000 -0400
58660 +++ linux-2.6.32.43/include/net/netlink.h 2011-07-13 17:23:19.000000000 -0400
58661 @@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct
58662 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
58663 {
58664 if (mark)
58665 - skb_trim(skb, (unsigned char *) mark - skb->data);
58666 + skb_trim(skb, (const unsigned char *) mark - skb->data);
58667 }
58668
58669 /**
58670 diff -urNp linux-2.6.32.43/include/net/netns/ipv4.h linux-2.6.32.43/include/net/netns/ipv4.h
58671 --- linux-2.6.32.43/include/net/netns/ipv4.h 2011-03-27 14:31:47.000000000 -0400
58672 +++ linux-2.6.32.43/include/net/netns/ipv4.h 2011-05-04 17:56:28.000000000 -0400
58673 @@ -54,7 +54,7 @@ struct netns_ipv4 {
58674 int current_rt_cache_rebuild_count;
58675
58676 struct timer_list rt_secret_timer;
58677 - atomic_t rt_genid;
58678 + atomic_unchecked_t rt_genid;
58679
58680 #ifdef CONFIG_IP_MROUTE
58681 struct sock *mroute_sk;
58682 diff -urNp linux-2.6.32.43/include/net/sctp/sctp.h linux-2.6.32.43/include/net/sctp/sctp.h
58683 --- linux-2.6.32.43/include/net/sctp/sctp.h 2011-03-27 14:31:47.000000000 -0400
58684 +++ linux-2.6.32.43/include/net/sctp/sctp.h 2011-04-17 15:56:46.000000000 -0400
58685 @@ -305,8 +305,8 @@ extern int sctp_debug_flag;
58686
58687 #else /* SCTP_DEBUG */
58688
58689 -#define SCTP_DEBUG_PRINTK(whatever...)
58690 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
58691 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
58692 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
58693 #define SCTP_ENABLE_DEBUG
58694 #define SCTP_DISABLE_DEBUG
58695 #define SCTP_ASSERT(expr, str, func)
58696 diff -urNp linux-2.6.32.43/include/net/sock.h linux-2.6.32.43/include/net/sock.h
58697 --- linux-2.6.32.43/include/net/sock.h 2011-03-27 14:31:47.000000000 -0400
58698 +++ linux-2.6.32.43/include/net/sock.h 2011-05-04 17:56:28.000000000 -0400
58699 @@ -272,7 +272,7 @@ struct sock {
58700 rwlock_t sk_callback_lock;
58701 int sk_err,
58702 sk_err_soft;
58703 - atomic_t sk_drops;
58704 + atomic_unchecked_t sk_drops;
58705 unsigned short sk_ack_backlog;
58706 unsigned short sk_max_ack_backlog;
58707 __u32 sk_priority;
58708 diff -urNp linux-2.6.32.43/include/net/tcp.h linux-2.6.32.43/include/net/tcp.h
58709 --- linux-2.6.32.43/include/net/tcp.h 2011-03-27 14:31:47.000000000 -0400
58710 +++ linux-2.6.32.43/include/net/tcp.h 2011-04-17 15:56:46.000000000 -0400
58711 @@ -1444,6 +1444,7 @@ enum tcp_seq_states {
58712 struct tcp_seq_afinfo {
58713 char *name;
58714 sa_family_t family;
58715 + /* cannot be const */
58716 struct file_operations seq_fops;
58717 struct seq_operations seq_ops;
58718 };
58719 diff -urNp linux-2.6.32.43/include/net/udp.h linux-2.6.32.43/include/net/udp.h
58720 --- linux-2.6.32.43/include/net/udp.h 2011-03-27 14:31:47.000000000 -0400
58721 +++ linux-2.6.32.43/include/net/udp.h 2011-04-17 15:56:46.000000000 -0400
58722 @@ -187,6 +187,7 @@ struct udp_seq_afinfo {
58723 char *name;
58724 sa_family_t family;
58725 struct udp_table *udp_table;
58726 + /* cannot be const */
58727 struct file_operations seq_fops;
58728 struct seq_operations seq_ops;
58729 };
58730 diff -urNp linux-2.6.32.43/include/scsi/scsi_device.h linux-2.6.32.43/include/scsi/scsi_device.h
58731 --- linux-2.6.32.43/include/scsi/scsi_device.h 2011-04-17 17:00:52.000000000 -0400
58732 +++ linux-2.6.32.43/include/scsi/scsi_device.h 2011-05-04 17:56:28.000000000 -0400
58733 @@ -156,9 +156,9 @@ struct scsi_device {
58734 unsigned int max_device_blocked; /* what device_blocked counts down from */
58735 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
58736
58737 - atomic_t iorequest_cnt;
58738 - atomic_t iodone_cnt;
58739 - atomic_t ioerr_cnt;
58740 + atomic_unchecked_t iorequest_cnt;
58741 + atomic_unchecked_t iodone_cnt;
58742 + atomic_unchecked_t ioerr_cnt;
58743
58744 struct device sdev_gendev,
58745 sdev_dev;
58746 diff -urNp linux-2.6.32.43/include/sound/ac97_codec.h linux-2.6.32.43/include/sound/ac97_codec.h
58747 --- linux-2.6.32.43/include/sound/ac97_codec.h 2011-03-27 14:31:47.000000000 -0400
58748 +++ linux-2.6.32.43/include/sound/ac97_codec.h 2011-04-17 15:56:46.000000000 -0400
58749 @@ -419,15 +419,15 @@
58750 struct snd_ac97;
58751
58752 struct snd_ac97_build_ops {
58753 - int (*build_3d) (struct snd_ac97 *ac97);
58754 - int (*build_specific) (struct snd_ac97 *ac97);
58755 - int (*build_spdif) (struct snd_ac97 *ac97);
58756 - int (*build_post_spdif) (struct snd_ac97 *ac97);
58757 + int (* const build_3d) (struct snd_ac97 *ac97);
58758 + int (* const build_specific) (struct snd_ac97 *ac97);
58759 + int (* const build_spdif) (struct snd_ac97 *ac97);
58760 + int (* const build_post_spdif) (struct snd_ac97 *ac97);
58761 #ifdef CONFIG_PM
58762 - void (*suspend) (struct snd_ac97 *ac97);
58763 - void (*resume) (struct snd_ac97 *ac97);
58764 + void (* const suspend) (struct snd_ac97 *ac97);
58765 + void (* const resume) (struct snd_ac97 *ac97);
58766 #endif
58767 - void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
58768 + void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
58769 };
58770
58771 struct snd_ac97_bus_ops {
58772 @@ -477,7 +477,7 @@ struct snd_ac97_template {
58773
58774 struct snd_ac97 {
58775 /* -- lowlevel (hardware) driver specific -- */
58776 - struct snd_ac97_build_ops * build_ops;
58777 + const struct snd_ac97_build_ops * build_ops;
58778 void *private_data;
58779 void (*private_free) (struct snd_ac97 *ac97);
58780 /* --- */
58781 diff -urNp linux-2.6.32.43/include/sound/ymfpci.h linux-2.6.32.43/include/sound/ymfpci.h
58782 --- linux-2.6.32.43/include/sound/ymfpci.h 2011-03-27 14:31:47.000000000 -0400
58783 +++ linux-2.6.32.43/include/sound/ymfpci.h 2011-05-04 17:56:28.000000000 -0400
58784 @@ -358,7 +358,7 @@ struct snd_ymfpci {
58785 spinlock_t reg_lock;
58786 spinlock_t voice_lock;
58787 wait_queue_head_t interrupt_sleep;
58788 - atomic_t interrupt_sleep_count;
58789 + atomic_unchecked_t interrupt_sleep_count;
58790 struct snd_info_entry *proc_entry;
58791 const struct firmware *dsp_microcode;
58792 const struct firmware *controller_microcode;
58793 diff -urNp linux-2.6.32.43/include/trace/events/irq.h linux-2.6.32.43/include/trace/events/irq.h
58794 --- linux-2.6.32.43/include/trace/events/irq.h 2011-03-27 14:31:47.000000000 -0400
58795 +++ linux-2.6.32.43/include/trace/events/irq.h 2011-04-17 15:56:46.000000000 -0400
58796 @@ -34,7 +34,7 @@
58797 */
58798 TRACE_EVENT(irq_handler_entry,
58799
58800 - TP_PROTO(int irq, struct irqaction *action),
58801 + TP_PROTO(int irq, const struct irqaction *action),
58802
58803 TP_ARGS(irq, action),
58804
58805 @@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
58806 */
58807 TRACE_EVENT(irq_handler_exit,
58808
58809 - TP_PROTO(int irq, struct irqaction *action, int ret),
58810 + TP_PROTO(int irq, const struct irqaction *action, int ret),
58811
58812 TP_ARGS(irq, action, ret),
58813
58814 @@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
58815 */
58816 TRACE_EVENT(softirq_entry,
58817
58818 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
58819 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
58820
58821 TP_ARGS(h, vec),
58822
58823 @@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
58824 */
58825 TRACE_EVENT(softirq_exit,
58826
58827 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
58828 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
58829
58830 TP_ARGS(h, vec),
58831
58832 diff -urNp linux-2.6.32.43/include/video/uvesafb.h linux-2.6.32.43/include/video/uvesafb.h
58833 --- linux-2.6.32.43/include/video/uvesafb.h 2011-03-27 14:31:47.000000000 -0400
58834 +++ linux-2.6.32.43/include/video/uvesafb.h 2011-04-17 15:56:46.000000000 -0400
58835 @@ -177,6 +177,7 @@ struct uvesafb_par {
58836 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
58837 u8 pmi_setpal; /* PMI for palette changes */
58838 u16 *pmi_base; /* protected mode interface location */
58839 + u8 *pmi_code; /* protected mode code location */
58840 void *pmi_start;
58841 void *pmi_pal;
58842 u8 *vbe_state_orig; /*
58843 diff -urNp linux-2.6.32.43/init/do_mounts.c linux-2.6.32.43/init/do_mounts.c
58844 --- linux-2.6.32.43/init/do_mounts.c 2011-03-27 14:31:47.000000000 -0400
58845 +++ linux-2.6.32.43/init/do_mounts.c 2011-04-17 15:56:46.000000000 -0400
58846 @@ -216,11 +216,11 @@ static void __init get_fs_names(char *pa
58847
58848 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
58849 {
58850 - int err = sys_mount(name, "/root", fs, flags, data);
58851 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
58852 if (err)
58853 return err;
58854
58855 - sys_chdir("/root");
58856 + sys_chdir((__force const char __user *)"/root");
58857 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
58858 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
58859 current->fs->pwd.mnt->mnt_sb->s_type->name,
58860 @@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...
58861 va_start(args, fmt);
58862 vsprintf(buf, fmt, args);
58863 va_end(args);
58864 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
58865 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
58866 if (fd >= 0) {
58867 sys_ioctl(fd, FDEJECT, 0);
58868 sys_close(fd);
58869 }
58870 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
58871 - fd = sys_open("/dev/console", O_RDWR, 0);
58872 + fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
58873 if (fd >= 0) {
58874 sys_ioctl(fd, TCGETS, (long)&termios);
58875 termios.c_lflag &= ~ICANON;
58876 sys_ioctl(fd, TCSETSF, (long)&termios);
58877 - sys_read(fd, &c, 1);
58878 + sys_read(fd, (char __user *)&c, 1);
58879 termios.c_lflag |= ICANON;
58880 sys_ioctl(fd, TCSETSF, (long)&termios);
58881 sys_close(fd);
58882 @@ -416,6 +416,6 @@ void __init prepare_namespace(void)
58883 mount_root();
58884 out:
58885 devtmpfs_mount("dev");
58886 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
58887 - sys_chroot(".");
58888 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58889 + sys_chroot((__force char __user *)".");
58890 }
58891 diff -urNp linux-2.6.32.43/init/do_mounts.h linux-2.6.32.43/init/do_mounts.h
58892 --- linux-2.6.32.43/init/do_mounts.h 2011-03-27 14:31:47.000000000 -0400
58893 +++ linux-2.6.32.43/init/do_mounts.h 2011-04-17 15:56:46.000000000 -0400
58894 @@ -15,15 +15,15 @@ extern int root_mountflags;
58895
58896 static inline int create_dev(char *name, dev_t dev)
58897 {
58898 - sys_unlink(name);
58899 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
58900 + sys_unlink((__force char __user *)name);
58901 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
58902 }
58903
58904 #if BITS_PER_LONG == 32
58905 static inline u32 bstat(char *name)
58906 {
58907 struct stat64 stat;
58908 - if (sys_stat64(name, &stat) != 0)
58909 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
58910 return 0;
58911 if (!S_ISBLK(stat.st_mode))
58912 return 0;
58913 diff -urNp linux-2.6.32.43/init/do_mounts_initrd.c linux-2.6.32.43/init/do_mounts_initrd.c
58914 --- linux-2.6.32.43/init/do_mounts_initrd.c 2011-03-27 14:31:47.000000000 -0400
58915 +++ linux-2.6.32.43/init/do_mounts_initrd.c 2011-04-17 15:56:46.000000000 -0400
58916 @@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shel
58917 sys_close(old_fd);sys_close(root_fd);
58918 sys_close(0);sys_close(1);sys_close(2);
58919 sys_setsid();
58920 - (void) sys_open("/dev/console",O_RDWR,0);
58921 + (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
58922 (void) sys_dup(0);
58923 (void) sys_dup(0);
58924 return kernel_execve(shell, argv, envp_init);
58925 @@ -47,13 +47,13 @@ static void __init handle_initrd(void)
58926 create_dev("/dev/root.old", Root_RAM0);
58927 /* mount initrd on rootfs' /root */
58928 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
58929 - sys_mkdir("/old", 0700);
58930 - root_fd = sys_open("/", 0, 0);
58931 - old_fd = sys_open("/old", 0, 0);
58932 + sys_mkdir((__force const char __user *)"/old", 0700);
58933 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
58934 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
58935 /* move initrd over / and chdir/chroot in initrd root */
58936 - sys_chdir("/root");
58937 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
58938 - sys_chroot(".");
58939 + sys_chdir((__force const char __user *)"/root");
58940 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58941 + sys_chroot((__force const char __user *)".");
58942
58943 /*
58944 * In case that a resume from disk is carried out by linuxrc or one of
58945 @@ -70,15 +70,15 @@ static void __init handle_initrd(void)
58946
58947 /* move initrd to rootfs' /old */
58948 sys_fchdir(old_fd);
58949 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
58950 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
58951 /* switch root and cwd back to / of rootfs */
58952 sys_fchdir(root_fd);
58953 - sys_chroot(".");
58954 + sys_chroot((__force const char __user *)".");
58955 sys_close(old_fd);
58956 sys_close(root_fd);
58957
58958 if (new_decode_dev(real_root_dev) == Root_RAM0) {
58959 - sys_chdir("/old");
58960 + sys_chdir((__force const char __user *)"/old");
58961 return;
58962 }
58963
58964 @@ -86,17 +86,17 @@ static void __init handle_initrd(void)
58965 mount_root();
58966
58967 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
58968 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
58969 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
58970 if (!error)
58971 printk("okay\n");
58972 else {
58973 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
58974 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
58975 if (error == -ENOENT)
58976 printk("/initrd does not exist. Ignored.\n");
58977 else
58978 printk("failed\n");
58979 printk(KERN_NOTICE "Unmounting old root\n");
58980 - sys_umount("/old", MNT_DETACH);
58981 + sys_umount((__force char __user *)"/old", MNT_DETACH);
58982 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
58983 if (fd < 0) {
58984 error = fd;
58985 @@ -119,11 +119,11 @@ int __init initrd_load(void)
58986 * mounted in the normal path.
58987 */
58988 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
58989 - sys_unlink("/initrd.image");
58990 + sys_unlink((__force const char __user *)"/initrd.image");
58991 handle_initrd();
58992 return 1;
58993 }
58994 }
58995 - sys_unlink("/initrd.image");
58996 + sys_unlink((__force const char __user *)"/initrd.image");
58997 return 0;
58998 }
58999 diff -urNp linux-2.6.32.43/init/do_mounts_md.c linux-2.6.32.43/init/do_mounts_md.c
59000 --- linux-2.6.32.43/init/do_mounts_md.c 2011-03-27 14:31:47.000000000 -0400
59001 +++ linux-2.6.32.43/init/do_mounts_md.c 2011-04-17 15:56:46.000000000 -0400
59002 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
59003 partitioned ? "_d" : "", minor,
59004 md_setup_args[ent].device_names);
59005
59006 - fd = sys_open(name, 0, 0);
59007 + fd = sys_open((__force char __user *)name, 0, 0);
59008 if (fd < 0) {
59009 printk(KERN_ERR "md: open failed - cannot start "
59010 "array %s\n", name);
59011 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
59012 * array without it
59013 */
59014 sys_close(fd);
59015 - fd = sys_open(name, 0, 0);
59016 + fd = sys_open((__force char __user *)name, 0, 0);
59017 sys_ioctl(fd, BLKRRPART, 0);
59018 }
59019 sys_close(fd);
59020 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
59021
59022 wait_for_device_probe();
59023
59024 - fd = sys_open("/dev/md0", 0, 0);
59025 + fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
59026 if (fd >= 0) {
59027 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
59028 sys_close(fd);
59029 diff -urNp linux-2.6.32.43/init/initramfs.c linux-2.6.32.43/init/initramfs.c
59030 --- linux-2.6.32.43/init/initramfs.c 2011-03-27 14:31:47.000000000 -0400
59031 +++ linux-2.6.32.43/init/initramfs.c 2011-04-17 15:56:46.000000000 -0400
59032 @@ -74,7 +74,7 @@ static void __init free_hash(void)
59033 }
59034 }
59035
59036 -static long __init do_utime(char __user *filename, time_t mtime)
59037 +static long __init do_utime(__force char __user *filename, time_t mtime)
59038 {
59039 struct timespec t[2];
59040
59041 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
59042 struct dir_entry *de, *tmp;
59043 list_for_each_entry_safe(de, tmp, &dir_list, list) {
59044 list_del(&de->list);
59045 - do_utime(de->name, de->mtime);
59046 + do_utime((__force char __user *)de->name, de->mtime);
59047 kfree(de->name);
59048 kfree(de);
59049 }
59050 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
59051 if (nlink >= 2) {
59052 char *old = find_link(major, minor, ino, mode, collected);
59053 if (old)
59054 - return (sys_link(old, collected) < 0) ? -1 : 1;
59055 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
59056 }
59057 return 0;
59058 }
59059 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
59060 {
59061 struct stat st;
59062
59063 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
59064 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
59065 if (S_ISDIR(st.st_mode))
59066 - sys_rmdir(path);
59067 + sys_rmdir((__force char __user *)path);
59068 else
59069 - sys_unlink(path);
59070 + sys_unlink((__force char __user *)path);
59071 }
59072 }
59073
59074 @@ -305,7 +305,7 @@ static int __init do_name(void)
59075 int openflags = O_WRONLY|O_CREAT;
59076 if (ml != 1)
59077 openflags |= O_TRUNC;
59078 - wfd = sys_open(collected, openflags, mode);
59079 + wfd = sys_open((__force char __user *)collected, openflags, mode);
59080
59081 if (wfd >= 0) {
59082 sys_fchown(wfd, uid, gid);
59083 @@ -317,17 +317,17 @@ static int __init do_name(void)
59084 }
59085 }
59086 } else if (S_ISDIR(mode)) {
59087 - sys_mkdir(collected, mode);
59088 - sys_chown(collected, uid, gid);
59089 - sys_chmod(collected, mode);
59090 + sys_mkdir((__force char __user *)collected, mode);
59091 + sys_chown((__force char __user *)collected, uid, gid);
59092 + sys_chmod((__force char __user *)collected, mode);
59093 dir_add(collected, mtime);
59094 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
59095 S_ISFIFO(mode) || S_ISSOCK(mode)) {
59096 if (maybe_link() == 0) {
59097 - sys_mknod(collected, mode, rdev);
59098 - sys_chown(collected, uid, gid);
59099 - sys_chmod(collected, mode);
59100 - do_utime(collected, mtime);
59101 + sys_mknod((__force char __user *)collected, mode, rdev);
59102 + sys_chown((__force char __user *)collected, uid, gid);
59103 + sys_chmod((__force char __user *)collected, mode);
59104 + do_utime((__force char __user *)collected, mtime);
59105 }
59106 }
59107 return 0;
59108 @@ -336,15 +336,15 @@ static int __init do_name(void)
59109 static int __init do_copy(void)
59110 {
59111 if (count >= body_len) {
59112 - sys_write(wfd, victim, body_len);
59113 + sys_write(wfd, (__force char __user *)victim, body_len);
59114 sys_close(wfd);
59115 - do_utime(vcollected, mtime);
59116 + do_utime((__force char __user *)vcollected, mtime);
59117 kfree(vcollected);
59118 eat(body_len);
59119 state = SkipIt;
59120 return 0;
59121 } else {
59122 - sys_write(wfd, victim, count);
59123 + sys_write(wfd, (__force char __user *)victim, count);
59124 body_len -= count;
59125 eat(count);
59126 return 1;
59127 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
59128 {
59129 collected[N_ALIGN(name_len) + body_len] = '\0';
59130 clean_path(collected, 0);
59131 - sys_symlink(collected + N_ALIGN(name_len), collected);
59132 - sys_lchown(collected, uid, gid);
59133 - do_utime(collected, mtime);
59134 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
59135 + sys_lchown((__force char __user *)collected, uid, gid);
59136 + do_utime((__force char __user *)collected, mtime);
59137 state = SkipIt;
59138 next_state = Reset;
59139 return 0;
59140 diff -urNp linux-2.6.32.43/init/Kconfig linux-2.6.32.43/init/Kconfig
59141 --- linux-2.6.32.43/init/Kconfig 2011-05-10 22:12:01.000000000 -0400
59142 +++ linux-2.6.32.43/init/Kconfig 2011-05-10 22:12:34.000000000 -0400
59143 @@ -1004,7 +1004,7 @@ config SLUB_DEBUG
59144
59145 config COMPAT_BRK
59146 bool "Disable heap randomization"
59147 - default y
59148 + default n
59149 help
59150 Randomizing heap placement makes heap exploits harder, but it
59151 also breaks ancient binaries (including anything libc5 based).
59152 diff -urNp linux-2.6.32.43/init/main.c linux-2.6.32.43/init/main.c
59153 --- linux-2.6.32.43/init/main.c 2011-05-10 22:12:01.000000000 -0400
59154 +++ linux-2.6.32.43/init/main.c 2011-05-22 23:02:06.000000000 -0400
59155 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void)
59156 #ifdef CONFIG_TC
59157 extern void tc_init(void);
59158 #endif
59159 +extern void grsecurity_init(void);
59160
59161 enum system_states system_state __read_mostly;
59162 EXPORT_SYMBOL(system_state);
59163 @@ -183,6 +184,49 @@ static int __init set_reset_devices(char
59164
59165 __setup("reset_devices", set_reset_devices);
59166
59167 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
59168 +extern char pax_enter_kernel_user[];
59169 +extern char pax_exit_kernel_user[];
59170 +extern pgdval_t clone_pgd_mask;
59171 +#endif
59172 +
59173 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
59174 +static int __init setup_pax_nouderef(char *str)
59175 +{
59176 +#ifdef CONFIG_X86_32
59177 + unsigned int cpu;
59178 + struct desc_struct *gdt;
59179 +
59180 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
59181 + gdt = get_cpu_gdt_table(cpu);
59182 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
59183 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
59184 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
59185 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
59186 + }
59187 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
59188 +#else
59189 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
59190 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
59191 + clone_pgd_mask = ~(pgdval_t)0UL;
59192 +#endif
59193 +
59194 + return 0;
59195 +}
59196 +early_param("pax_nouderef", setup_pax_nouderef);
59197 +#endif
59198 +
59199 +#ifdef CONFIG_PAX_SOFTMODE
59200 +unsigned int pax_softmode;
59201 +
59202 +static int __init setup_pax_softmode(char *str)
59203 +{
59204 + get_option(&str, &pax_softmode);
59205 + return 1;
59206 +}
59207 +__setup("pax_softmode=", setup_pax_softmode);
59208 +#endif
59209 +
59210 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
59211 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
59212 static const char *panic_later, *panic_param;
59213 @@ -705,52 +749,53 @@ int initcall_debug;
59214 core_param(initcall_debug, initcall_debug, bool, 0644);
59215
59216 static char msgbuf[64];
59217 -static struct boot_trace_call call;
59218 -static struct boot_trace_ret ret;
59219 +static struct boot_trace_call trace_call;
59220 +static struct boot_trace_ret trace_ret;
59221
59222 int do_one_initcall(initcall_t fn)
59223 {
59224 int count = preempt_count();
59225 ktime_t calltime, delta, rettime;
59226 + const char *msg1 = "", *msg2 = "";
59227
59228 if (initcall_debug) {
59229 - call.caller = task_pid_nr(current);
59230 - printk("calling %pF @ %i\n", fn, call.caller);
59231 + trace_call.caller = task_pid_nr(current);
59232 + printk("calling %pF @ %i\n", fn, trace_call.caller);
59233 calltime = ktime_get();
59234 - trace_boot_call(&call, fn);
59235 + trace_boot_call(&trace_call, fn);
59236 enable_boot_trace();
59237 }
59238
59239 - ret.result = fn();
59240 + trace_ret.result = fn();
59241
59242 if (initcall_debug) {
59243 disable_boot_trace();
59244 rettime = ktime_get();
59245 delta = ktime_sub(rettime, calltime);
59246 - ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
59247 - trace_boot_ret(&ret, fn);
59248 + trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
59249 + trace_boot_ret(&trace_ret, fn);
59250 printk("initcall %pF returned %d after %Ld usecs\n", fn,
59251 - ret.result, ret.duration);
59252 + trace_ret.result, trace_ret.duration);
59253 }
59254
59255 msgbuf[0] = 0;
59256
59257 - if (ret.result && ret.result != -ENODEV && initcall_debug)
59258 - sprintf(msgbuf, "error code %d ", ret.result);
59259 + if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
59260 + sprintf(msgbuf, "error code %d ", trace_ret.result);
59261
59262 if (preempt_count() != count) {
59263 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
59264 + msg1 = " preemption imbalance";
59265 preempt_count() = count;
59266 }
59267 if (irqs_disabled()) {
59268 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
59269 + msg2 = " disabled interrupts";
59270 local_irq_enable();
59271 }
59272 - if (msgbuf[0]) {
59273 - printk("initcall %pF returned with %s\n", fn, msgbuf);
59274 + if (msgbuf[0] || *msg1 || *msg2) {
59275 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
59276 }
59277
59278 - return ret.result;
59279 + return trace_ret.result;
59280 }
59281
59282
59283 @@ -893,11 +938,13 @@ static int __init kernel_init(void * unu
59284 if (!ramdisk_execute_command)
59285 ramdisk_execute_command = "/init";
59286
59287 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
59288 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
59289 ramdisk_execute_command = NULL;
59290 prepare_namespace();
59291 }
59292
59293 + grsecurity_init();
59294 +
59295 /*
59296 * Ok, we have completed the initial bootup, and
59297 * we're essentially up and running. Get rid of the
59298 diff -urNp linux-2.6.32.43/init/noinitramfs.c linux-2.6.32.43/init/noinitramfs.c
59299 --- linux-2.6.32.43/init/noinitramfs.c 2011-03-27 14:31:47.000000000 -0400
59300 +++ linux-2.6.32.43/init/noinitramfs.c 2011-04-17 15:56:46.000000000 -0400
59301 @@ -29,7 +29,7 @@ static int __init default_rootfs(void)
59302 {
59303 int err;
59304
59305 - err = sys_mkdir("/dev", 0755);
59306 + err = sys_mkdir((const char __user *)"/dev", 0755);
59307 if (err < 0)
59308 goto out;
59309
59310 @@ -39,7 +39,7 @@ static int __init default_rootfs(void)
59311 if (err < 0)
59312 goto out;
59313
59314 - err = sys_mkdir("/root", 0700);
59315 + err = sys_mkdir((const char __user *)"/root", 0700);
59316 if (err < 0)
59317 goto out;
59318
59319 diff -urNp linux-2.6.32.43/ipc/mqueue.c linux-2.6.32.43/ipc/mqueue.c
59320 --- linux-2.6.32.43/ipc/mqueue.c 2011-03-27 14:31:47.000000000 -0400
59321 +++ linux-2.6.32.43/ipc/mqueue.c 2011-04-17 15:56:46.000000000 -0400
59322 @@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(st
59323 mq_bytes = (mq_msg_tblsz +
59324 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
59325
59326 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
59327 spin_lock(&mq_lock);
59328 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
59329 u->mq_bytes + mq_bytes >
59330 diff -urNp linux-2.6.32.43/ipc/sem.c linux-2.6.32.43/ipc/sem.c
59331 --- linux-2.6.32.43/ipc/sem.c 2011-03-27 14:31:47.000000000 -0400
59332 +++ linux-2.6.32.43/ipc/sem.c 2011-05-16 21:46:57.000000000 -0400
59333 @@ -671,6 +671,8 @@ static int semctl_main(struct ipc_namesp
59334 ushort* sem_io = fast_sem_io;
59335 int nsems;
59336
59337 + pax_track_stack();
59338 +
59339 sma = sem_lock_check(ns, semid);
59340 if (IS_ERR(sma))
59341 return PTR_ERR(sma);
59342 @@ -1071,6 +1073,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
59343 unsigned long jiffies_left = 0;
59344 struct ipc_namespace *ns;
59345
59346 + pax_track_stack();
59347 +
59348 ns = current->nsproxy->ipc_ns;
59349
59350 if (nsops < 1 || semid < 0)
59351 diff -urNp linux-2.6.32.43/ipc/shm.c linux-2.6.32.43/ipc/shm.c
59352 --- linux-2.6.32.43/ipc/shm.c 2011-03-27 14:31:47.000000000 -0400
59353 +++ linux-2.6.32.43/ipc/shm.c 2011-04-17 15:56:46.000000000 -0400
59354 @@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_name
59355 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
59356 #endif
59357
59358 +#ifdef CONFIG_GRKERNSEC
59359 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
59360 + const time_t shm_createtime, const uid_t cuid,
59361 + const int shmid);
59362 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
59363 + const time_t shm_createtime);
59364 +#endif
59365 +
59366 void shm_init_ns(struct ipc_namespace *ns)
59367 {
59368 ns->shm_ctlmax = SHMMAX;
59369 @@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *
59370 shp->shm_lprid = 0;
59371 shp->shm_atim = shp->shm_dtim = 0;
59372 shp->shm_ctim = get_seconds();
59373 +#ifdef CONFIG_GRKERNSEC
59374 + {
59375 + struct timespec timeval;
59376 + do_posix_clock_monotonic_gettime(&timeval);
59377 +
59378 + shp->shm_createtime = timeval.tv_sec;
59379 + }
59380 +#endif
59381 shp->shm_segsz = size;
59382 shp->shm_nattch = 0;
59383 shp->shm_file = file;
59384 @@ -880,9 +896,21 @@ long do_shmat(int shmid, char __user *sh
59385 if (err)
59386 goto out_unlock;
59387
59388 +#ifdef CONFIG_GRKERNSEC
59389 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
59390 + shp->shm_perm.cuid, shmid) ||
59391 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
59392 + err = -EACCES;
59393 + goto out_unlock;
59394 + }
59395 +#endif
59396 +
59397 path.dentry = dget(shp->shm_file->f_path.dentry);
59398 path.mnt = shp->shm_file->f_path.mnt;
59399 shp->shm_nattch++;
59400 +#ifdef CONFIG_GRKERNSEC
59401 + shp->shm_lapid = current->pid;
59402 +#endif
59403 size = i_size_read(path.dentry->d_inode);
59404 shm_unlock(shp);
59405
59406 diff -urNp linux-2.6.32.43/kernel/acct.c linux-2.6.32.43/kernel/acct.c
59407 --- linux-2.6.32.43/kernel/acct.c 2011-03-27 14:31:47.000000000 -0400
59408 +++ linux-2.6.32.43/kernel/acct.c 2011-04-17 15:56:46.000000000 -0400
59409 @@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_a
59410 */
59411 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
59412 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
59413 - file->f_op->write(file, (char *)&ac,
59414 + file->f_op->write(file, (__force char __user *)&ac,
59415 sizeof(acct_t), &file->f_pos);
59416 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
59417 set_fs(fs);
59418 diff -urNp linux-2.6.32.43/kernel/audit.c linux-2.6.32.43/kernel/audit.c
59419 --- linux-2.6.32.43/kernel/audit.c 2011-03-27 14:31:47.000000000 -0400
59420 +++ linux-2.6.32.43/kernel/audit.c 2011-05-04 17:56:28.000000000 -0400
59421 @@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
59422 3) suppressed due to audit_rate_limit
59423 4) suppressed due to audit_backlog_limit
59424 */
59425 -static atomic_t audit_lost = ATOMIC_INIT(0);
59426 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
59427
59428 /* The netlink socket. */
59429 static struct sock *audit_sock;
59430 @@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
59431 unsigned long now;
59432 int print;
59433
59434 - atomic_inc(&audit_lost);
59435 + atomic_inc_unchecked(&audit_lost);
59436
59437 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
59438
59439 @@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
59440 printk(KERN_WARNING
59441 "audit: audit_lost=%d audit_rate_limit=%d "
59442 "audit_backlog_limit=%d\n",
59443 - atomic_read(&audit_lost),
59444 + atomic_read_unchecked(&audit_lost),
59445 audit_rate_limit,
59446 audit_backlog_limit);
59447 audit_panic(message);
59448 @@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_b
59449 status_set.pid = audit_pid;
59450 status_set.rate_limit = audit_rate_limit;
59451 status_set.backlog_limit = audit_backlog_limit;
59452 - status_set.lost = atomic_read(&audit_lost);
59453 + status_set.lost = atomic_read_unchecked(&audit_lost);
59454 status_set.backlog = skb_queue_len(&audit_skb_queue);
59455 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
59456 &status_set, sizeof(status_set));
59457 @@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_b
59458 spin_unlock_irq(&tsk->sighand->siglock);
59459 }
59460 read_unlock(&tasklist_lock);
59461 - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
59462 - &s, sizeof(s));
59463 +
59464 + if (!err)
59465 + audit_send_reply(NETLINK_CB(skb).pid, seq,
59466 + AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
59467 break;
59468 }
59469 case AUDIT_TTY_SET: {
59470 diff -urNp linux-2.6.32.43/kernel/auditsc.c linux-2.6.32.43/kernel/auditsc.c
59471 --- linux-2.6.32.43/kernel/auditsc.c 2011-03-27 14:31:47.000000000 -0400
59472 +++ linux-2.6.32.43/kernel/auditsc.c 2011-05-04 17:56:28.000000000 -0400
59473 @@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_conte
59474 }
59475
59476 /* global counter which is incremented every time something logs in */
59477 -static atomic_t session_id = ATOMIC_INIT(0);
59478 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
59479
59480 /**
59481 * audit_set_loginuid - set a task's audit_context loginuid
59482 @@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT
59483 */
59484 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
59485 {
59486 - unsigned int sessionid = atomic_inc_return(&session_id);
59487 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
59488 struct audit_context *context = task->audit_context;
59489
59490 if (context && context->in_syscall) {
59491 diff -urNp linux-2.6.32.43/kernel/capability.c linux-2.6.32.43/kernel/capability.c
59492 --- linux-2.6.32.43/kernel/capability.c 2011-03-27 14:31:47.000000000 -0400
59493 +++ linux-2.6.32.43/kernel/capability.c 2011-04-17 15:56:46.000000000 -0400
59494 @@ -305,10 +305,26 @@ int capable(int cap)
59495 BUG();
59496 }
59497
59498 - if (security_capable(cap) == 0) {
59499 + if (security_capable(cap) == 0 && gr_is_capable(cap)) {
59500 current->flags |= PF_SUPERPRIV;
59501 return 1;
59502 }
59503 return 0;
59504 }
59505 +
59506 +int capable_nolog(int cap)
59507 +{
59508 + if (unlikely(!cap_valid(cap))) {
59509 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
59510 + BUG();
59511 + }
59512 +
59513 + if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
59514 + current->flags |= PF_SUPERPRIV;
59515 + return 1;
59516 + }
59517 + return 0;
59518 +}
59519 +
59520 EXPORT_SYMBOL(capable);
59521 +EXPORT_SYMBOL(capable_nolog);
59522 diff -urNp linux-2.6.32.43/kernel/cgroup.c linux-2.6.32.43/kernel/cgroup.c
59523 --- linux-2.6.32.43/kernel/cgroup.c 2011-03-27 14:31:47.000000000 -0400
59524 +++ linux-2.6.32.43/kernel/cgroup.c 2011-05-16 21:46:57.000000000 -0400
59525 @@ -536,6 +536,8 @@ static struct css_set *find_css_set(
59526 struct hlist_head *hhead;
59527 struct cg_cgroup_link *link;
59528
59529 + pax_track_stack();
59530 +
59531 /* First see if we already have a cgroup group that matches
59532 * the desired set */
59533 read_lock(&css_set_lock);
59534 diff -urNp linux-2.6.32.43/kernel/configs.c linux-2.6.32.43/kernel/configs.c
59535 --- linux-2.6.32.43/kernel/configs.c 2011-03-27 14:31:47.000000000 -0400
59536 +++ linux-2.6.32.43/kernel/configs.c 2011-04-17 15:56:46.000000000 -0400
59537 @@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
59538 struct proc_dir_entry *entry;
59539
59540 /* create the current config file */
59541 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
59542 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
59543 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
59544 + &ikconfig_file_ops);
59545 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59546 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
59547 + &ikconfig_file_ops);
59548 +#endif
59549 +#else
59550 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
59551 &ikconfig_file_ops);
59552 +#endif
59553 +
59554 if (!entry)
59555 return -ENOMEM;
59556
59557 diff -urNp linux-2.6.32.43/kernel/cpu.c linux-2.6.32.43/kernel/cpu.c
59558 --- linux-2.6.32.43/kernel/cpu.c 2011-03-27 14:31:47.000000000 -0400
59559 +++ linux-2.6.32.43/kernel/cpu.c 2011-04-17 15:56:46.000000000 -0400
59560 @@ -19,7 +19,7 @@
59561 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
59562 static DEFINE_MUTEX(cpu_add_remove_lock);
59563
59564 -static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
59565 +static RAW_NOTIFIER_HEAD(cpu_chain);
59566
59567 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
59568 * Should always be manipulated under cpu_add_remove_lock
59569 diff -urNp linux-2.6.32.43/kernel/cred.c linux-2.6.32.43/kernel/cred.c
59570 --- linux-2.6.32.43/kernel/cred.c 2011-03-27 14:31:47.000000000 -0400
59571 +++ linux-2.6.32.43/kernel/cred.c 2011-05-17 19:26:34.000000000 -0400
59572 @@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head
59573 */
59574 void __put_cred(struct cred *cred)
59575 {
59576 + pax_track_stack();
59577 +
59578 kdebug("__put_cred(%p{%d,%d})", cred,
59579 atomic_read(&cred->usage),
59580 read_cred_subscribers(cred));
59581 @@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
59582 {
59583 struct cred *cred;
59584
59585 + pax_track_stack();
59586 +
59587 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
59588 atomic_read(&tsk->cred->usage),
59589 read_cred_subscribers(tsk->cred));
59590 @@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct
59591 {
59592 const struct cred *cred;
59593
59594 + pax_track_stack();
59595 +
59596 rcu_read_lock();
59597
59598 do {
59599 @@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
59600 {
59601 struct cred *new;
59602
59603 + pax_track_stack();
59604 +
59605 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
59606 if (!new)
59607 return NULL;
59608 @@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
59609 const struct cred *old;
59610 struct cred *new;
59611
59612 + pax_track_stack();
59613 +
59614 validate_process_creds();
59615
59616 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
59617 @@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
59618 struct thread_group_cred *tgcred = NULL;
59619 struct cred *new;
59620
59621 + pax_track_stack();
59622 +
59623 #ifdef CONFIG_KEYS
59624 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
59625 if (!tgcred)
59626 @@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, un
59627 struct cred *new;
59628 int ret;
59629
59630 + pax_track_stack();
59631 +
59632 mutex_init(&p->cred_guard_mutex);
59633
59634 if (
59635 @@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
59636 struct task_struct *task = current;
59637 const struct cred *old = task->real_cred;
59638
59639 + pax_track_stack();
59640 +
59641 kdebug("commit_creds(%p{%d,%d})", new,
59642 atomic_read(&new->usage),
59643 read_cred_subscribers(new));
59644 @@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
59645
59646 get_cred(new); /* we will require a ref for the subj creds too */
59647
59648 + gr_set_role_label(task, new->uid, new->gid);
59649 +
59650 /* dumpability changes */
59651 if (old->euid != new->euid ||
59652 old->egid != new->egid ||
59653 @@ -606,6 +624,8 @@ EXPORT_SYMBOL(commit_creds);
59654 */
59655 void abort_creds(struct cred *new)
59656 {
59657 + pax_track_stack();
59658 +
59659 kdebug("abort_creds(%p{%d,%d})", new,
59660 atomic_read(&new->usage),
59661 read_cred_subscribers(new));
59662 @@ -629,6 +649,8 @@ const struct cred *override_creds(const
59663 {
59664 const struct cred *old = current->cred;
59665
59666 + pax_track_stack();
59667 +
59668 kdebug("override_creds(%p{%d,%d})", new,
59669 atomic_read(&new->usage),
59670 read_cred_subscribers(new));
59671 @@ -658,6 +680,8 @@ void revert_creds(const struct cred *old
59672 {
59673 const struct cred *override = current->cred;
59674
59675 + pax_track_stack();
59676 +
59677 kdebug("revert_creds(%p{%d,%d})", old,
59678 atomic_read(&old->usage),
59679 read_cred_subscribers(old));
59680 @@ -704,6 +728,8 @@ struct cred *prepare_kernel_cred(struct
59681 const struct cred *old;
59682 struct cred *new;
59683
59684 + pax_track_stack();
59685 +
59686 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
59687 if (!new)
59688 return NULL;
59689 @@ -758,6 +784,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
59690 */
59691 int set_security_override(struct cred *new, u32 secid)
59692 {
59693 + pax_track_stack();
59694 +
59695 return security_kernel_act_as(new, secid);
59696 }
59697 EXPORT_SYMBOL(set_security_override);
59698 @@ -777,6 +805,8 @@ int set_security_override_from_ctx(struc
59699 u32 secid;
59700 int ret;
59701
59702 + pax_track_stack();
59703 +
59704 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
59705 if (ret < 0)
59706 return ret;
59707 diff -urNp linux-2.6.32.43/kernel/exit.c linux-2.6.32.43/kernel/exit.c
59708 --- linux-2.6.32.43/kernel/exit.c 2011-03-27 14:31:47.000000000 -0400
59709 +++ linux-2.6.32.43/kernel/exit.c 2011-04-17 15:56:46.000000000 -0400
59710 @@ -55,6 +55,10 @@
59711 #include <asm/pgtable.h>
59712 #include <asm/mmu_context.h>
59713
59714 +#ifdef CONFIG_GRKERNSEC
59715 +extern rwlock_t grsec_exec_file_lock;
59716 +#endif
59717 +
59718 static void exit_mm(struct task_struct * tsk);
59719
59720 static void __unhash_process(struct task_struct *p)
59721 @@ -174,6 +178,8 @@ void release_task(struct task_struct * p
59722 struct task_struct *leader;
59723 int zap_leader;
59724 repeat:
59725 + gr_del_task_from_ip_table(p);
59726 +
59727 tracehook_prepare_release_task(p);
59728 /* don't need to get the RCU readlock here - the process is dead and
59729 * can't be modifying its own credentials */
59730 @@ -341,11 +347,22 @@ static void reparent_to_kthreadd(void)
59731 {
59732 write_lock_irq(&tasklist_lock);
59733
59734 +#ifdef CONFIG_GRKERNSEC
59735 + write_lock(&grsec_exec_file_lock);
59736 + if (current->exec_file) {
59737 + fput(current->exec_file);
59738 + current->exec_file = NULL;
59739 + }
59740 + write_unlock(&grsec_exec_file_lock);
59741 +#endif
59742 +
59743 ptrace_unlink(current);
59744 /* Reparent to init */
59745 current->real_parent = current->parent = kthreadd_task;
59746 list_move_tail(&current->sibling, &current->real_parent->children);
59747
59748 + gr_set_kernel_label(current);
59749 +
59750 /* Set the exit signal to SIGCHLD so we signal init on exit */
59751 current->exit_signal = SIGCHLD;
59752
59753 @@ -397,7 +414,7 @@ int allow_signal(int sig)
59754 * know it'll be handled, so that they don't get converted to
59755 * SIGKILL or just silently dropped.
59756 */
59757 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
59758 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
59759 recalc_sigpending();
59760 spin_unlock_irq(&current->sighand->siglock);
59761 return 0;
59762 @@ -433,6 +450,17 @@ void daemonize(const char *name, ...)
59763 vsnprintf(current->comm, sizeof(current->comm), name, args);
59764 va_end(args);
59765
59766 +#ifdef CONFIG_GRKERNSEC
59767 + write_lock(&grsec_exec_file_lock);
59768 + if (current->exec_file) {
59769 + fput(current->exec_file);
59770 + current->exec_file = NULL;
59771 + }
59772 + write_unlock(&grsec_exec_file_lock);
59773 +#endif
59774 +
59775 + gr_set_kernel_label(current);
59776 +
59777 /*
59778 * If we were started as result of loading a module, close all of the
59779 * user space pages. We don't need them, and if we didn't close them
59780 @@ -897,17 +925,17 @@ NORET_TYPE void do_exit(long code)
59781 struct task_struct *tsk = current;
59782 int group_dead;
59783
59784 - profile_task_exit(tsk);
59785 -
59786 - WARN_ON(atomic_read(&tsk->fs_excl));
59787 -
59788 + /*
59789 + * Check this first since set_fs() below depends on
59790 + * current_thread_info(), which we better not access when we're in
59791 + * interrupt context. Other than that, we want to do the set_fs()
59792 + * as early as possible.
59793 + */
59794 if (unlikely(in_interrupt()))
59795 panic("Aiee, killing interrupt handler!");
59796 - if (unlikely(!tsk->pid))
59797 - panic("Attempted to kill the idle task!");
59798
59799 /*
59800 - * If do_exit is called because this processes oopsed, it's possible
59801 + * If do_exit is called because this processes Oops'ed, it's possible
59802 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
59803 * continuing. Amongst other possible reasons, this is to prevent
59804 * mm_release()->clear_child_tid() from writing to a user-controlled
59805 @@ -915,6 +943,13 @@ NORET_TYPE void do_exit(long code)
59806 */
59807 set_fs(USER_DS);
59808
59809 + profile_task_exit(tsk);
59810 +
59811 + WARN_ON(atomic_read(&tsk->fs_excl));
59812 +
59813 + if (unlikely(!tsk->pid))
59814 + panic("Attempted to kill the idle task!");
59815 +
59816 tracehook_report_exit(&code);
59817
59818 validate_creds_for_do_exit(tsk);
59819 @@ -973,6 +1008,9 @@ NORET_TYPE void do_exit(long code)
59820 tsk->exit_code = code;
59821 taskstats_exit(tsk, group_dead);
59822
59823 + gr_acl_handle_psacct(tsk, code);
59824 + gr_acl_handle_exit();
59825 +
59826 exit_mm(tsk);
59827
59828 if (group_dead)
59829 @@ -1188,7 +1226,7 @@ static int wait_task_zombie(struct wait_
59830
59831 if (unlikely(wo->wo_flags & WNOWAIT)) {
59832 int exit_code = p->exit_code;
59833 - int why, status;
59834 + int why;
59835
59836 get_task_struct(p);
59837 read_unlock(&tasklist_lock);
59838 diff -urNp linux-2.6.32.43/kernel/fork.c linux-2.6.32.43/kernel/fork.c
59839 --- linux-2.6.32.43/kernel/fork.c 2011-03-27 14:31:47.000000000 -0400
59840 +++ linux-2.6.32.43/kernel/fork.c 2011-04-17 15:56:46.000000000 -0400
59841 @@ -253,7 +253,7 @@ static struct task_struct *dup_task_stru
59842 *stackend = STACK_END_MAGIC; /* for overflow detection */
59843
59844 #ifdef CONFIG_CC_STACKPROTECTOR
59845 - tsk->stack_canary = get_random_int();
59846 + tsk->stack_canary = pax_get_random_long();
59847 #endif
59848
59849 /* One for us, one for whoever does the "release_task()" (usually parent) */
59850 @@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm
59851 mm->locked_vm = 0;
59852 mm->mmap = NULL;
59853 mm->mmap_cache = NULL;
59854 - mm->free_area_cache = oldmm->mmap_base;
59855 - mm->cached_hole_size = ~0UL;
59856 + mm->free_area_cache = oldmm->free_area_cache;
59857 + mm->cached_hole_size = oldmm->cached_hole_size;
59858 mm->map_count = 0;
59859 cpumask_clear(mm_cpumask(mm));
59860 mm->mm_rb = RB_ROOT;
59861 @@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm
59862 tmp->vm_flags &= ~VM_LOCKED;
59863 tmp->vm_mm = mm;
59864 tmp->vm_next = tmp->vm_prev = NULL;
59865 + tmp->vm_mirror = NULL;
59866 anon_vma_link(tmp);
59867 file = tmp->vm_file;
59868 if (file) {
59869 @@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm
59870 if (retval)
59871 goto out;
59872 }
59873 +
59874 +#ifdef CONFIG_PAX_SEGMEXEC
59875 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
59876 + struct vm_area_struct *mpnt_m;
59877 +
59878 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
59879 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
59880 +
59881 + if (!mpnt->vm_mirror)
59882 + continue;
59883 +
59884 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
59885 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
59886 + mpnt->vm_mirror = mpnt_m;
59887 + } else {
59888 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
59889 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
59890 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
59891 + mpnt->vm_mirror->vm_mirror = mpnt;
59892 + }
59893 + }
59894 + BUG_ON(mpnt_m);
59895 + }
59896 +#endif
59897 +
59898 /* a new mm has just been created */
59899 arch_dup_mmap(oldmm, mm);
59900 retval = 0;
59901 @@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_f
59902 write_unlock(&fs->lock);
59903 return -EAGAIN;
59904 }
59905 - fs->users++;
59906 + atomic_inc(&fs->users);
59907 write_unlock(&fs->lock);
59908 return 0;
59909 }
59910 tsk->fs = copy_fs_struct(fs);
59911 if (!tsk->fs)
59912 return -ENOMEM;
59913 + gr_set_chroot_entries(tsk, &tsk->fs->root);
59914 return 0;
59915 }
59916
59917 @@ -1033,10 +1060,13 @@ static struct task_struct *copy_process(
59918 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
59919 #endif
59920 retval = -EAGAIN;
59921 +
59922 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
59923 +
59924 if (atomic_read(&p->real_cred->user->processes) >=
59925 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
59926 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
59927 - p->real_cred->user != INIT_USER)
59928 + if (p->real_cred->user != INIT_USER &&
59929 + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
59930 goto bad_fork_free;
59931 }
59932
59933 @@ -1183,6 +1213,8 @@ static struct task_struct *copy_process(
59934 goto bad_fork_free_pid;
59935 }
59936
59937 + gr_copy_label(p);
59938 +
59939 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
59940 /*
59941 * Clear TID on mm_release()?
59942 @@ -1333,6 +1365,8 @@ bad_fork_cleanup_count:
59943 bad_fork_free:
59944 free_task(p);
59945 fork_out:
59946 + gr_log_forkfail(retval);
59947 +
59948 return ERR_PTR(retval);
59949 }
59950
59951 @@ -1426,6 +1460,8 @@ long do_fork(unsigned long clone_flags,
59952 if (clone_flags & CLONE_PARENT_SETTID)
59953 put_user(nr, parent_tidptr);
59954
59955 + gr_handle_brute_check();
59956 +
59957 if (clone_flags & CLONE_VFORK) {
59958 p->vfork_done = &vfork;
59959 init_completion(&vfork);
59960 @@ -1558,7 +1594,7 @@ static int unshare_fs(unsigned long unsh
59961 return 0;
59962
59963 /* don't need lock here; in the worst case we'll do useless copy */
59964 - if (fs->users == 1)
59965 + if (atomic_read(&fs->users) == 1)
59966 return 0;
59967
59968 *new_fsp = copy_fs_struct(fs);
59969 @@ -1681,7 +1717,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
59970 fs = current->fs;
59971 write_lock(&fs->lock);
59972 current->fs = new_fs;
59973 - if (--fs->users)
59974 + gr_set_chroot_entries(current, &current->fs->root);
59975 + if (atomic_dec_return(&fs->users))
59976 new_fs = NULL;
59977 else
59978 new_fs = fs;
59979 diff -urNp linux-2.6.32.43/kernel/futex.c linux-2.6.32.43/kernel/futex.c
59980 --- linux-2.6.32.43/kernel/futex.c 2011-03-27 14:31:47.000000000 -0400
59981 +++ linux-2.6.32.43/kernel/futex.c 2011-05-16 21:46:57.000000000 -0400
59982 @@ -54,6 +54,7 @@
59983 #include <linux/mount.h>
59984 #include <linux/pagemap.h>
59985 #include <linux/syscalls.h>
59986 +#include <linux/ptrace.h>
59987 #include <linux/signal.h>
59988 #include <linux/module.h>
59989 #include <linux/magic.h>
59990 @@ -221,6 +222,11 @@ get_futex_key(u32 __user *uaddr, int fsh
59991 struct page *page;
59992 int err;
59993
59994 +#ifdef CONFIG_PAX_SEGMEXEC
59995 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
59996 + return -EFAULT;
59997 +#endif
59998 +
59999 /*
60000 * The futex address must be "naturally" aligned.
60001 */
60002 @@ -1789,6 +1795,8 @@ static int futex_wait(u32 __user *uaddr,
60003 struct futex_q q;
60004 int ret;
60005
60006 + pax_track_stack();
60007 +
60008 if (!bitset)
60009 return -EINVAL;
60010
60011 @@ -1841,7 +1849,7 @@ retry:
60012
60013 restart = &current_thread_info()->restart_block;
60014 restart->fn = futex_wait_restart;
60015 - restart->futex.uaddr = (u32 *)uaddr;
60016 + restart->futex.uaddr = uaddr;
60017 restart->futex.val = val;
60018 restart->futex.time = abs_time->tv64;
60019 restart->futex.bitset = bitset;
60020 @@ -2203,6 +2211,8 @@ static int futex_wait_requeue_pi(u32 __u
60021 struct futex_q q;
60022 int res, ret;
60023
60024 + pax_track_stack();
60025 +
60026 if (!bitset)
60027 return -EINVAL;
60028
60029 @@ -2377,7 +2387,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
60030 {
60031 struct robust_list_head __user *head;
60032 unsigned long ret;
60033 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
60034 const struct cred *cred = current_cred(), *pcred;
60035 +#endif
60036
60037 if (!futex_cmpxchg_enabled)
60038 return -ENOSYS;
60039 @@ -2393,11 +2405,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
60040 if (!p)
60041 goto err_unlock;
60042 ret = -EPERM;
60043 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60044 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
60045 + goto err_unlock;
60046 +#else
60047 pcred = __task_cred(p);
60048 if (cred->euid != pcred->euid &&
60049 cred->euid != pcred->uid &&
60050 !capable(CAP_SYS_PTRACE))
60051 goto err_unlock;
60052 +#endif
60053 head = p->robust_list;
60054 rcu_read_unlock();
60055 }
60056 @@ -2459,7 +2476,7 @@ retry:
60057 */
60058 static inline int fetch_robust_entry(struct robust_list __user **entry,
60059 struct robust_list __user * __user *head,
60060 - int *pi)
60061 + unsigned int *pi)
60062 {
60063 unsigned long uentry;
60064
60065 @@ -2640,6 +2657,7 @@ static int __init futex_init(void)
60066 {
60067 u32 curval;
60068 int i;
60069 + mm_segment_t oldfs;
60070
60071 /*
60072 * This will fail and we want it. Some arch implementations do
60073 @@ -2651,7 +2669,10 @@ static int __init futex_init(void)
60074 * implementation, the non functional ones will return
60075 * -ENOSYS.
60076 */
60077 + oldfs = get_fs();
60078 + set_fs(USER_DS);
60079 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
60080 + set_fs(oldfs);
60081 if (curval == -EFAULT)
60082 futex_cmpxchg_enabled = 1;
60083
60084 diff -urNp linux-2.6.32.43/kernel/futex_compat.c linux-2.6.32.43/kernel/futex_compat.c
60085 --- linux-2.6.32.43/kernel/futex_compat.c 2011-03-27 14:31:47.000000000 -0400
60086 +++ linux-2.6.32.43/kernel/futex_compat.c 2011-04-17 15:56:46.000000000 -0400
60087 @@ -10,6 +10,7 @@
60088 #include <linux/compat.h>
60089 #include <linux/nsproxy.h>
60090 #include <linux/futex.h>
60091 +#include <linux/ptrace.h>
60092
60093 #include <asm/uaccess.h>
60094
60095 @@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
60096 {
60097 struct compat_robust_list_head __user *head;
60098 unsigned long ret;
60099 - const struct cred *cred = current_cred(), *pcred;
60100 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
60101 + const struct cred *cred = current_cred();
60102 + const struct cred *pcred;
60103 +#endif
60104
60105 if (!futex_cmpxchg_enabled)
60106 return -ENOSYS;
60107 @@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
60108 if (!p)
60109 goto err_unlock;
60110 ret = -EPERM;
60111 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60112 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
60113 + goto err_unlock;
60114 +#else
60115 pcred = __task_cred(p);
60116 if (cred->euid != pcred->euid &&
60117 cred->euid != pcred->uid &&
60118 !capable(CAP_SYS_PTRACE))
60119 goto err_unlock;
60120 +#endif
60121 head = p->compat_robust_list;
60122 read_unlock(&tasklist_lock);
60123 }
60124 diff -urNp linux-2.6.32.43/kernel/gcov/base.c linux-2.6.32.43/kernel/gcov/base.c
60125 --- linux-2.6.32.43/kernel/gcov/base.c 2011-03-27 14:31:47.000000000 -0400
60126 +++ linux-2.6.32.43/kernel/gcov/base.c 2011-04-17 15:56:46.000000000 -0400
60127 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
60128 }
60129
60130 #ifdef CONFIG_MODULES
60131 -static inline int within(void *addr, void *start, unsigned long size)
60132 -{
60133 - return ((addr >= start) && (addr < start + size));
60134 -}
60135 -
60136 /* Update list and generate events when modules are unloaded. */
60137 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
60138 void *data)
60139 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
60140 prev = NULL;
60141 /* Remove entries located in module from linked list. */
60142 for (info = gcov_info_head; info; info = info->next) {
60143 - if (within(info, mod->module_core, mod->core_size)) {
60144 + if (within_module_core_rw((unsigned long)info, mod)) {
60145 if (prev)
60146 prev->next = info->next;
60147 else
60148 diff -urNp linux-2.6.32.43/kernel/hrtimer.c linux-2.6.32.43/kernel/hrtimer.c
60149 --- linux-2.6.32.43/kernel/hrtimer.c 2011-03-27 14:31:47.000000000 -0400
60150 +++ linux-2.6.32.43/kernel/hrtimer.c 2011-04-17 15:56:46.000000000 -0400
60151 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
60152 local_irq_restore(flags);
60153 }
60154
60155 -static void run_hrtimer_softirq(struct softirq_action *h)
60156 +static void run_hrtimer_softirq(void)
60157 {
60158 hrtimer_peek_ahead_timers();
60159 }
60160 diff -urNp linux-2.6.32.43/kernel/kallsyms.c linux-2.6.32.43/kernel/kallsyms.c
60161 --- linux-2.6.32.43/kernel/kallsyms.c 2011-03-27 14:31:47.000000000 -0400
60162 +++ linux-2.6.32.43/kernel/kallsyms.c 2011-04-17 15:56:46.000000000 -0400
60163 @@ -11,6 +11,9 @@
60164 * Changed the compression method from stem compression to "table lookup"
60165 * compression (see scripts/kallsyms.c for a more complete description)
60166 */
60167 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60168 +#define __INCLUDED_BY_HIDESYM 1
60169 +#endif
60170 #include <linux/kallsyms.h>
60171 #include <linux/module.h>
60172 #include <linux/init.h>
60173 @@ -51,12 +54,33 @@ extern const unsigned long kallsyms_mark
60174
60175 static inline int is_kernel_inittext(unsigned long addr)
60176 {
60177 + if (system_state != SYSTEM_BOOTING)
60178 + return 0;
60179 +
60180 if (addr >= (unsigned long)_sinittext
60181 && addr <= (unsigned long)_einittext)
60182 return 1;
60183 return 0;
60184 }
60185
60186 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
60187 +#ifdef CONFIG_MODULES
60188 +static inline int is_module_text(unsigned long addr)
60189 +{
60190 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
60191 + return 1;
60192 +
60193 + addr = ktla_ktva(addr);
60194 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
60195 +}
60196 +#else
60197 +static inline int is_module_text(unsigned long addr)
60198 +{
60199 + return 0;
60200 +}
60201 +#endif
60202 +#endif
60203 +
60204 static inline int is_kernel_text(unsigned long addr)
60205 {
60206 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
60207 @@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigne
60208
60209 static inline int is_kernel(unsigned long addr)
60210 {
60211 +
60212 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
60213 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
60214 + return 1;
60215 +
60216 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
60217 +#else
60218 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
60219 +#endif
60220 +
60221 return 1;
60222 return in_gate_area_no_task(addr);
60223 }
60224
60225 static int is_ksym_addr(unsigned long addr)
60226 {
60227 +
60228 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
60229 + if (is_module_text(addr))
60230 + return 0;
60231 +#endif
60232 +
60233 if (all_var)
60234 return is_kernel(addr);
60235
60236 @@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(st
60237
60238 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
60239 {
60240 - iter->name[0] = '\0';
60241 iter->nameoff = get_symbol_offset(new_pos);
60242 iter->pos = new_pos;
60243 }
60244 @@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, vo
60245 {
60246 struct kallsym_iter *iter = m->private;
60247
60248 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60249 + if (current_uid())
60250 + return 0;
60251 +#endif
60252 +
60253 /* Some debugging symbols have no name. Ignore them. */
60254 if (!iter->name[0])
60255 return 0;
60256 @@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *i
60257 struct kallsym_iter *iter;
60258 int ret;
60259
60260 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
60261 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
60262 if (!iter)
60263 return -ENOMEM;
60264 reset_iter(iter, 0);
60265 diff -urNp linux-2.6.32.43/kernel/kgdb.c linux-2.6.32.43/kernel/kgdb.c
60266 --- linux-2.6.32.43/kernel/kgdb.c 2011-04-17 17:00:52.000000000 -0400
60267 +++ linux-2.6.32.43/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
60268 @@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
60269 /* Guard for recursive entry */
60270 static int exception_level;
60271
60272 -static struct kgdb_io *kgdb_io_ops;
60273 +static const struct kgdb_io *kgdb_io_ops;
60274 static DEFINE_SPINLOCK(kgdb_registration_lock);
60275
60276 /* kgdb console driver is loaded */
60277 @@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1)
60278 */
60279 static atomic_t passive_cpu_wait[NR_CPUS];
60280 static atomic_t cpu_in_kgdb[NR_CPUS];
60281 -atomic_t kgdb_setting_breakpoint;
60282 +atomic_unchecked_t kgdb_setting_breakpoint;
60283
60284 struct task_struct *kgdb_usethread;
60285 struct task_struct *kgdb_contthread;
60286 @@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBY
60287 sizeof(unsigned long)];
60288
60289 /* to keep track of the CPU which is doing the single stepping*/
60290 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
60291 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
60292
60293 /*
60294 * If you are debugging a problem where roundup (the collection of
60295 @@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
60296 return 0;
60297 if (kgdb_connected)
60298 return 1;
60299 - if (atomic_read(&kgdb_setting_breakpoint))
60300 + if (atomic_read_unchecked(&kgdb_setting_breakpoint))
60301 return 1;
60302 if (print_wait)
60303 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
60304 @@ -1426,8 +1426,8 @@ acquirelock:
60305 * instance of the exception handler wanted to come into the
60306 * debugger on a different CPU via a single step
60307 */
60308 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
60309 - atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
60310 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
60311 + atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
60312
60313 atomic_set(&kgdb_active, -1);
60314 touch_softlockup_watchdog();
60315 @@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void
60316 *
60317 * Register it with the KGDB core.
60318 */
60319 -int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
60320 +int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
60321 {
60322 int err;
60323
60324 @@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
60325 *
60326 * Unregister it with the KGDB core.
60327 */
60328 -void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
60329 +void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
60330 {
60331 BUG_ON(kgdb_connected);
60332
60333 @@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod
60334 */
60335 void kgdb_breakpoint(void)
60336 {
60337 - atomic_set(&kgdb_setting_breakpoint, 1);
60338 + atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
60339 wmb(); /* Sync point before breakpoint */
60340 arch_kgdb_breakpoint();
60341 wmb(); /* Sync point after breakpoint */
60342 - atomic_set(&kgdb_setting_breakpoint, 0);
60343 + atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
60344 }
60345 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
60346
60347 diff -urNp linux-2.6.32.43/kernel/kmod.c linux-2.6.32.43/kernel/kmod.c
60348 --- linux-2.6.32.43/kernel/kmod.c 2011-03-27 14:31:47.000000000 -0400
60349 +++ linux-2.6.32.43/kernel/kmod.c 2011-04-17 15:56:46.000000000 -0400
60350 @@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
60351 * If module auto-loading support is disabled then this function
60352 * becomes a no-operation.
60353 */
60354 -int __request_module(bool wait, const char *fmt, ...)
60355 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
60356 {
60357 - va_list args;
60358 char module_name[MODULE_NAME_LEN];
60359 unsigned int max_modprobes;
60360 int ret;
60361 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
60362 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
60363 static char *envp[] = { "HOME=/",
60364 "TERM=linux",
60365 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
60366 @@ -84,12 +83,24 @@ int __request_module(bool wait, const ch
60367 if (ret)
60368 return ret;
60369
60370 - va_start(args, fmt);
60371 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
60372 - va_end(args);
60373 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
60374 if (ret >= MODULE_NAME_LEN)
60375 return -ENAMETOOLONG;
60376
60377 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60378 + if (!current_uid()) {
60379 + /* hack to workaround consolekit/udisks stupidity */
60380 + read_lock(&tasklist_lock);
60381 + if (!strcmp(current->comm, "mount") &&
60382 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
60383 + read_unlock(&tasklist_lock);
60384 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
60385 + return -EPERM;
60386 + }
60387 + read_unlock(&tasklist_lock);
60388 + }
60389 +#endif
60390 +
60391 /* If modprobe needs a service that is in a module, we get a recursive
60392 * loop. Limit the number of running kmod threads to max_threads/2 or
60393 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
60394 @@ -121,6 +132,48 @@ int __request_module(bool wait, const ch
60395 atomic_dec(&kmod_concurrent);
60396 return ret;
60397 }
60398 +
60399 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
60400 +{
60401 + va_list args;
60402 + int ret;
60403 +
60404 + va_start(args, fmt);
60405 + ret = ____request_module(wait, module_param, fmt, args);
60406 + va_end(args);
60407 +
60408 + return ret;
60409 +}
60410 +
60411 +int __request_module(bool wait, const char *fmt, ...)
60412 +{
60413 + va_list args;
60414 + int ret;
60415 +
60416 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60417 + if (current_uid()) {
60418 + char module_param[MODULE_NAME_LEN];
60419 +
60420 + memset(module_param, 0, sizeof(module_param));
60421 +
60422 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
60423 +
60424 + va_start(args, fmt);
60425 + ret = ____request_module(wait, module_param, fmt, args);
60426 + va_end(args);
60427 +
60428 + return ret;
60429 + }
60430 +#endif
60431 +
60432 + va_start(args, fmt);
60433 + ret = ____request_module(wait, NULL, fmt, args);
60434 + va_end(args);
60435 +
60436 + return ret;
60437 +}
60438 +
60439 +
60440 EXPORT_SYMBOL(__request_module);
60441 #endif /* CONFIG_MODULES */
60442
60443 diff -urNp linux-2.6.32.43/kernel/kprobes.c linux-2.6.32.43/kernel/kprobes.c
60444 --- linux-2.6.32.43/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
60445 +++ linux-2.6.32.43/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
60446 @@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
60447 * kernel image and loaded module images reside. This is required
60448 * so x86_64 can correctly handle the %rip-relative fixups.
60449 */
60450 - kip->insns = module_alloc(PAGE_SIZE);
60451 + kip->insns = module_alloc_exec(PAGE_SIZE);
60452 if (!kip->insns) {
60453 kfree(kip);
60454 return NULL;
60455 @@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(st
60456 */
60457 if (!list_is_singular(&kprobe_insn_pages)) {
60458 list_del(&kip->list);
60459 - module_free(NULL, kip->insns);
60460 + module_free_exec(NULL, kip->insns);
60461 kfree(kip);
60462 }
60463 return 1;
60464 @@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
60465 {
60466 int i, err = 0;
60467 unsigned long offset = 0, size = 0;
60468 - char *modname, namebuf[128];
60469 + char *modname, namebuf[KSYM_NAME_LEN];
60470 const char *symbol_name;
60471 void *addr;
60472 struct kprobe_blackpoint *kb;
60473 @@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(st
60474 const char *sym = NULL;
60475 unsigned int i = *(loff_t *) v;
60476 unsigned long offset = 0;
60477 - char *modname, namebuf[128];
60478 + char *modname, namebuf[KSYM_NAME_LEN];
60479
60480 head = &kprobe_table[i];
60481 preempt_disable();
60482 diff -urNp linux-2.6.32.43/kernel/lockdep.c linux-2.6.32.43/kernel/lockdep.c
60483 --- linux-2.6.32.43/kernel/lockdep.c 2011-06-25 12:55:35.000000000 -0400
60484 +++ linux-2.6.32.43/kernel/lockdep.c 2011-06-25 12:56:37.000000000 -0400
60485 @@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_t
60486 /*
60487 * Various lockdep statistics:
60488 */
60489 -atomic_t chain_lookup_hits;
60490 -atomic_t chain_lookup_misses;
60491 -atomic_t hardirqs_on_events;
60492 -atomic_t hardirqs_off_events;
60493 -atomic_t redundant_hardirqs_on;
60494 -atomic_t redundant_hardirqs_off;
60495 -atomic_t softirqs_on_events;
60496 -atomic_t softirqs_off_events;
60497 -atomic_t redundant_softirqs_on;
60498 -atomic_t redundant_softirqs_off;
60499 -atomic_t nr_unused_locks;
60500 -atomic_t nr_cyclic_checks;
60501 -atomic_t nr_find_usage_forwards_checks;
60502 -atomic_t nr_find_usage_backwards_checks;
60503 +atomic_unchecked_t chain_lookup_hits;
60504 +atomic_unchecked_t chain_lookup_misses;
60505 +atomic_unchecked_t hardirqs_on_events;
60506 +atomic_unchecked_t hardirqs_off_events;
60507 +atomic_unchecked_t redundant_hardirqs_on;
60508 +atomic_unchecked_t redundant_hardirqs_off;
60509 +atomic_unchecked_t softirqs_on_events;
60510 +atomic_unchecked_t softirqs_off_events;
60511 +atomic_unchecked_t redundant_softirqs_on;
60512 +atomic_unchecked_t redundant_softirqs_off;
60513 +atomic_unchecked_t nr_unused_locks;
60514 +atomic_unchecked_t nr_cyclic_checks;
60515 +atomic_unchecked_t nr_find_usage_forwards_checks;
60516 +atomic_unchecked_t nr_find_usage_backwards_checks;
60517 #endif
60518
60519 /*
60520 @@ -577,6 +577,10 @@ static int static_obj(void *obj)
60521 int i;
60522 #endif
60523
60524 +#ifdef CONFIG_PAX_KERNEXEC
60525 + start = ktla_ktva(start);
60526 +#endif
60527 +
60528 /*
60529 * static variable?
60530 */
60531 @@ -592,8 +596,7 @@ static int static_obj(void *obj)
60532 */
60533 for_each_possible_cpu(i) {
60534 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
60535 - end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
60536 - + per_cpu_offset(i);
60537 + end = start + PERCPU_ENOUGH_ROOM;
60538
60539 if ((addr >= start) && (addr < end))
60540 return 1;
60541 @@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *
60542 if (!static_obj(lock->key)) {
60543 debug_locks_off();
60544 printk("INFO: trying to register non-static key.\n");
60545 + printk("lock:%pS key:%pS.\n", lock, lock->key);
60546 printk("the code is fine but needs lockdep annotation.\n");
60547 printk("turning off the locking correctness validator.\n");
60548 dump_stack();
60549 @@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep
60550 if (!class)
60551 return 0;
60552 }
60553 - debug_atomic_inc((atomic_t *)&class->ops);
60554 + debug_atomic_inc((atomic_unchecked_t *)&class->ops);
60555 if (very_verbose(class)) {
60556 printk("\nacquire class [%p] %s", class->key, class->name);
60557 if (class->name_version > 1)
60558 diff -urNp linux-2.6.32.43/kernel/lockdep_internals.h linux-2.6.32.43/kernel/lockdep_internals.h
60559 --- linux-2.6.32.43/kernel/lockdep_internals.h 2011-03-27 14:31:47.000000000 -0400
60560 +++ linux-2.6.32.43/kernel/lockdep_internals.h 2011-04-17 15:56:46.000000000 -0400
60561 @@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_
60562 /*
60563 * Various lockdep statistics:
60564 */
60565 -extern atomic_t chain_lookup_hits;
60566 -extern atomic_t chain_lookup_misses;
60567 -extern atomic_t hardirqs_on_events;
60568 -extern atomic_t hardirqs_off_events;
60569 -extern atomic_t redundant_hardirqs_on;
60570 -extern atomic_t redundant_hardirqs_off;
60571 -extern atomic_t softirqs_on_events;
60572 -extern atomic_t softirqs_off_events;
60573 -extern atomic_t redundant_softirqs_on;
60574 -extern atomic_t redundant_softirqs_off;
60575 -extern atomic_t nr_unused_locks;
60576 -extern atomic_t nr_cyclic_checks;
60577 -extern atomic_t nr_cyclic_check_recursions;
60578 -extern atomic_t nr_find_usage_forwards_checks;
60579 -extern atomic_t nr_find_usage_forwards_recursions;
60580 -extern atomic_t nr_find_usage_backwards_checks;
60581 -extern atomic_t nr_find_usage_backwards_recursions;
60582 -# define debug_atomic_inc(ptr) atomic_inc(ptr)
60583 -# define debug_atomic_dec(ptr) atomic_dec(ptr)
60584 -# define debug_atomic_read(ptr) atomic_read(ptr)
60585 +extern atomic_unchecked_t chain_lookup_hits;
60586 +extern atomic_unchecked_t chain_lookup_misses;
60587 +extern atomic_unchecked_t hardirqs_on_events;
60588 +extern atomic_unchecked_t hardirqs_off_events;
60589 +extern atomic_unchecked_t redundant_hardirqs_on;
60590 +extern atomic_unchecked_t redundant_hardirqs_off;
60591 +extern atomic_unchecked_t softirqs_on_events;
60592 +extern atomic_unchecked_t softirqs_off_events;
60593 +extern atomic_unchecked_t redundant_softirqs_on;
60594 +extern atomic_unchecked_t redundant_softirqs_off;
60595 +extern atomic_unchecked_t nr_unused_locks;
60596 +extern atomic_unchecked_t nr_cyclic_checks;
60597 +extern atomic_unchecked_t nr_cyclic_check_recursions;
60598 +extern atomic_unchecked_t nr_find_usage_forwards_checks;
60599 +extern atomic_unchecked_t nr_find_usage_forwards_recursions;
60600 +extern atomic_unchecked_t nr_find_usage_backwards_checks;
60601 +extern atomic_unchecked_t nr_find_usage_backwards_recursions;
60602 +# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
60603 +# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
60604 +# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
60605 #else
60606 # define debug_atomic_inc(ptr) do { } while (0)
60607 # define debug_atomic_dec(ptr) do { } while (0)
60608 diff -urNp linux-2.6.32.43/kernel/lockdep_proc.c linux-2.6.32.43/kernel/lockdep_proc.c
60609 --- linux-2.6.32.43/kernel/lockdep_proc.c 2011-03-27 14:31:47.000000000 -0400
60610 +++ linux-2.6.32.43/kernel/lockdep_proc.c 2011-04-17 15:56:46.000000000 -0400
60611 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
60612
60613 static void print_name(struct seq_file *m, struct lock_class *class)
60614 {
60615 - char str[128];
60616 + char str[KSYM_NAME_LEN];
60617 const char *name = class->name;
60618
60619 if (!name) {
60620 diff -urNp linux-2.6.32.43/kernel/module.c linux-2.6.32.43/kernel/module.c
60621 --- linux-2.6.32.43/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
60622 +++ linux-2.6.32.43/kernel/module.c 2011-04-29 18:52:40.000000000 -0400
60623 @@ -55,6 +55,7 @@
60624 #include <linux/async.h>
60625 #include <linux/percpu.h>
60626 #include <linux/kmemleak.h>
60627 +#include <linux/grsecurity.h>
60628
60629 #define CREATE_TRACE_POINTS
60630 #include <trace/events/module.h>
60631 @@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
60632 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
60633
60634 /* Bounds of module allocation, for speeding __module_address */
60635 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
60636 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
60637 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
60638
60639 int register_module_notifier(struct notifier_block * nb)
60640 {
60641 @@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct
60642 return true;
60643
60644 list_for_each_entry_rcu(mod, &modules, list) {
60645 - struct symsearch arr[] = {
60646 + struct symsearch modarr[] = {
60647 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
60648 NOT_GPL_ONLY, false },
60649 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
60650 @@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct
60651 #endif
60652 };
60653
60654 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
60655 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
60656 return true;
60657 }
60658 return false;
60659 @@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned lo
60660 void *ptr;
60661 int cpu;
60662
60663 - if (align > PAGE_SIZE) {
60664 + if (align-1 >= PAGE_SIZE) {
60665 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
60666 name, align, PAGE_SIZE);
60667 align = PAGE_SIZE;
60668 @@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resol
60669 * /sys/module/foo/sections stuff
60670 * J. Corbet <corbet@lwn.net>
60671 */
60672 -#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
60673 +#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60674
60675 static inline bool sect_empty(const Elf_Shdr *sect)
60676 {
60677 @@ -1545,7 +1547,8 @@ static void free_module(struct module *m
60678 destroy_params(mod->kp, mod->num_kp);
60679
60680 /* This may be NULL, but that's OK */
60681 - module_free(mod, mod->module_init);
60682 + module_free(mod, mod->module_init_rw);
60683 + module_free_exec(mod, mod->module_init_rx);
60684 kfree(mod->args);
60685 if (mod->percpu)
60686 percpu_modfree(mod->percpu);
60687 @@ -1554,10 +1557,12 @@ static void free_module(struct module *m
60688 percpu_modfree(mod->refptr);
60689 #endif
60690 /* Free lock-classes: */
60691 - lockdep_free_key_range(mod->module_core, mod->core_size);
60692 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
60693 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
60694
60695 /* Finally, free the core (containing the module structure) */
60696 - module_free(mod, mod->module_core);
60697 + module_free_exec(mod, mod->module_core_rx);
60698 + module_free(mod, mod->module_core_rw);
60699
60700 #ifdef CONFIG_MPU
60701 update_protections(current->mm);
60702 @@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *se
60703 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
60704 int ret = 0;
60705 const struct kernel_symbol *ksym;
60706 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60707 + int is_fs_load = 0;
60708 + int register_filesystem_found = 0;
60709 + char *p;
60710 +
60711 + p = strstr(mod->args, "grsec_modharden_fs");
60712 +
60713 + if (p) {
60714 + char *endptr = p + strlen("grsec_modharden_fs");
60715 + /* copy \0 as well */
60716 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
60717 + is_fs_load = 1;
60718 + }
60719 +#endif
60720 +
60721
60722 for (i = 1; i < n; i++) {
60723 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60724 + const char *name = strtab + sym[i].st_name;
60725 +
60726 + /* it's a real shame this will never get ripped and copied
60727 + upstream! ;(
60728 + */
60729 + if (is_fs_load && !strcmp(name, "register_filesystem"))
60730 + register_filesystem_found = 1;
60731 +#endif
60732 switch (sym[i].st_shndx) {
60733 case SHN_COMMON:
60734 /* We compiled with -fno-common. These are not
60735 @@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *se
60736 strtab + sym[i].st_name, mod);
60737 /* Ok if resolved. */
60738 if (ksym) {
60739 + pax_open_kernel();
60740 sym[i].st_value = ksym->value;
60741 + pax_close_kernel();
60742 break;
60743 }
60744
60745 @@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *se
60746 secbase = (unsigned long)mod->percpu;
60747 else
60748 secbase = sechdrs[sym[i].st_shndx].sh_addr;
60749 + pax_open_kernel();
60750 sym[i].st_value += secbase;
60751 + pax_close_kernel();
60752 break;
60753 }
60754 }
60755
60756 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60757 + if (is_fs_load && !register_filesystem_found) {
60758 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
60759 + ret = -EPERM;
60760 + }
60761 +#endif
60762 +
60763 return ret;
60764 }
60765
60766 @@ -1731,11 +1771,12 @@ static void layout_sections(struct modul
60767 || s->sh_entsize != ~0UL
60768 || strstarts(secstrings + s->sh_name, ".init"))
60769 continue;
60770 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
60771 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60772 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
60773 + else
60774 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
60775 DEBUGP("\t%s\n", secstrings + s->sh_name);
60776 }
60777 - if (m == 0)
60778 - mod->core_text_size = mod->core_size;
60779 }
60780
60781 DEBUGP("Init section allocation order:\n");
60782 @@ -1748,12 +1789,13 @@ static void layout_sections(struct modul
60783 || s->sh_entsize != ~0UL
60784 || !strstarts(secstrings + s->sh_name, ".init"))
60785 continue;
60786 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
60787 - | INIT_OFFSET_MASK);
60788 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60789 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
60790 + else
60791 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
60792 + s->sh_entsize |= INIT_OFFSET_MASK;
60793 DEBUGP("\t%s\n", secstrings + s->sh_name);
60794 }
60795 - if (m == 0)
60796 - mod->init_text_size = mod->init_size;
60797 }
60798 }
60799
60800 @@ -1857,9 +1899,8 @@ static int is_exported(const char *name,
60801
60802 /* As per nm */
60803 static char elf_type(const Elf_Sym *sym,
60804 - Elf_Shdr *sechdrs,
60805 - const char *secstrings,
60806 - struct module *mod)
60807 + const Elf_Shdr *sechdrs,
60808 + const char *secstrings)
60809 {
60810 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
60811 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
60812 @@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struc
60813
60814 /* Put symbol section at end of init part of module. */
60815 symsect->sh_flags |= SHF_ALLOC;
60816 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
60817 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
60818 symindex) | INIT_OFFSET_MASK;
60819 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
60820
60821 @@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struc
60822 }
60823
60824 /* Append room for core symbols at end of core part. */
60825 - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
60826 - mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
60827 + symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
60828 + mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
60829
60830 /* Put string table section at end of init part of module. */
60831 strsect->sh_flags |= SHF_ALLOC;
60832 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
60833 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
60834 strindex) | INIT_OFFSET_MASK;
60835 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
60836
60837 /* Append room for core symbols' strings at end of core part. */
60838 - *pstroffs = mod->core_size;
60839 + *pstroffs = mod->core_size_rx;
60840 __set_bit(0, strmap);
60841 - mod->core_size += bitmap_weight(strmap, strsect->sh_size);
60842 + mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
60843
60844 return symoffs;
60845 }
60846 @@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *
60847 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
60848 mod->strtab = (void *)sechdrs[strindex].sh_addr;
60849
60850 + pax_open_kernel();
60851 +
60852 /* Set types up while we still have access to sections. */
60853 for (i = 0; i < mod->num_symtab; i++)
60854 mod->symtab[i].st_info
60855 - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
60856 + = elf_type(&mod->symtab[i], sechdrs, secstrings);
60857
60858 - mod->core_symtab = dst = mod->module_core + symoffs;
60859 + mod->core_symtab = dst = mod->module_core_rx + symoffs;
60860 src = mod->symtab;
60861 *dst = *src;
60862 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
60863 @@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *
60864 }
60865 mod->core_num_syms = ndst;
60866
60867 - mod->core_strtab = s = mod->module_core + stroffs;
60868 + mod->core_strtab = s = mod->module_core_rx + stroffs;
60869 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
60870 if (test_bit(i, strmap))
60871 *++s = mod->strtab[i];
60872 +
60873 + pax_close_kernel();
60874 }
60875 #else
60876 static inline unsigned long layout_symtab(struct module *mod,
60877 @@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _
60878 #endif
60879 }
60880
60881 -static void *module_alloc_update_bounds(unsigned long size)
60882 +static void *module_alloc_update_bounds_rw(unsigned long size)
60883 {
60884 void *ret = module_alloc(size);
60885
60886 if (ret) {
60887 /* Update module bounds. */
60888 - if ((unsigned long)ret < module_addr_min)
60889 - module_addr_min = (unsigned long)ret;
60890 - if ((unsigned long)ret + size > module_addr_max)
60891 - module_addr_max = (unsigned long)ret + size;
60892 + if ((unsigned long)ret < module_addr_min_rw)
60893 + module_addr_min_rw = (unsigned long)ret;
60894 + if ((unsigned long)ret + size > module_addr_max_rw)
60895 + module_addr_max_rw = (unsigned long)ret + size;
60896 + }
60897 + return ret;
60898 +}
60899 +
60900 +static void *module_alloc_update_bounds_rx(unsigned long size)
60901 +{
60902 + void *ret = module_alloc_exec(size);
60903 +
60904 + if (ret) {
60905 + /* Update module bounds. */
60906 + if ((unsigned long)ret < module_addr_min_rx)
60907 + module_addr_min_rx = (unsigned long)ret;
60908 + if ((unsigned long)ret + size > module_addr_max_rx)
60909 + module_addr_max_rx = (unsigned long)ret + size;
60910 }
60911 return ret;
60912 }
60913 @@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct
60914 unsigned int i;
60915
60916 /* only scan the sections containing data */
60917 - kmemleak_scan_area(mod->module_core, (unsigned long)mod -
60918 - (unsigned long)mod->module_core,
60919 + kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
60920 + (unsigned long)mod->module_core_rw,
60921 sizeof(struct module), GFP_KERNEL);
60922
60923 for (i = 1; i < hdr->e_shnum; i++) {
60924 @@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct
60925 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
60926 continue;
60927
60928 - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
60929 - (unsigned long)mod->module_core,
60930 + kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
60931 + (unsigned long)mod->module_core_rw,
60932 sechdrs[i].sh_size, GFP_KERNEL);
60933 }
60934 }
60935 @@ -2263,7 +2322,7 @@ static noinline struct module *load_modu
60936 secstrings, &stroffs, strmap);
60937
60938 /* Do the allocs. */
60939 - ptr = module_alloc_update_bounds(mod->core_size);
60940 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
60941 /*
60942 * The pointer to this block is stored in the module structure
60943 * which is inside the block. Just mark it as not being a
60944 @@ -2274,23 +2333,47 @@ static noinline struct module *load_modu
60945 err = -ENOMEM;
60946 goto free_percpu;
60947 }
60948 - memset(ptr, 0, mod->core_size);
60949 - mod->module_core = ptr;
60950 + memset(ptr, 0, mod->core_size_rw);
60951 + mod->module_core_rw = ptr;
60952
60953 - ptr = module_alloc_update_bounds(mod->init_size);
60954 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
60955 /*
60956 * The pointer to this block is stored in the module structure
60957 * which is inside the block. This block doesn't need to be
60958 * scanned as it contains data and code that will be freed
60959 * after the module is initialized.
60960 */
60961 - kmemleak_ignore(ptr);
60962 - if (!ptr && mod->init_size) {
60963 + kmemleak_not_leak(ptr);
60964 + if (!ptr && mod->init_size_rw) {
60965 + err = -ENOMEM;
60966 + goto free_core_rw;
60967 + }
60968 + memset(ptr, 0, mod->init_size_rw);
60969 + mod->module_init_rw = ptr;
60970 +
60971 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
60972 + kmemleak_not_leak(ptr);
60973 + if (!ptr) {
60974 err = -ENOMEM;
60975 - goto free_core;
60976 + goto free_init_rw;
60977 }
60978 - memset(ptr, 0, mod->init_size);
60979 - mod->module_init = ptr;
60980 +
60981 + pax_open_kernel();
60982 + memset(ptr, 0, mod->core_size_rx);
60983 + pax_close_kernel();
60984 + mod->module_core_rx = ptr;
60985 +
60986 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
60987 + kmemleak_not_leak(ptr);
60988 + if (!ptr && mod->init_size_rx) {
60989 + err = -ENOMEM;
60990 + goto free_core_rx;
60991 + }
60992 +
60993 + pax_open_kernel();
60994 + memset(ptr, 0, mod->init_size_rx);
60995 + pax_close_kernel();
60996 + mod->module_init_rx = ptr;
60997
60998 /* Transfer each section which specifies SHF_ALLOC */
60999 DEBUGP("final section addresses:\n");
61000 @@ -2300,17 +2383,45 @@ static noinline struct module *load_modu
61001 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
61002 continue;
61003
61004 - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
61005 - dest = mod->module_init
61006 - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
61007 - else
61008 - dest = mod->module_core + sechdrs[i].sh_entsize;
61009 + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
61010 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
61011 + dest = mod->module_init_rw
61012 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
61013 + else
61014 + dest = mod->module_init_rx
61015 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
61016 + } else {
61017 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
61018 + dest = mod->module_core_rw + sechdrs[i].sh_entsize;
61019 + else
61020 + dest = mod->module_core_rx + sechdrs[i].sh_entsize;
61021 + }
61022 +
61023 + if (sechdrs[i].sh_type != SHT_NOBITS) {
61024
61025 - if (sechdrs[i].sh_type != SHT_NOBITS)
61026 - memcpy(dest, (void *)sechdrs[i].sh_addr,
61027 - sechdrs[i].sh_size);
61028 +#ifdef CONFIG_PAX_KERNEXEC
61029 +#ifdef CONFIG_X86_64
61030 + if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
61031 + set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
61032 +#endif
61033 + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
61034 + pax_open_kernel();
61035 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
61036 + pax_close_kernel();
61037 + } else
61038 +#endif
61039 +
61040 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
61041 + }
61042 /* Update sh_addr to point to copy in image. */
61043 - sechdrs[i].sh_addr = (unsigned long)dest;
61044 +
61045 +#ifdef CONFIG_PAX_KERNEXEC
61046 + if (sechdrs[i].sh_flags & SHF_EXECINSTR)
61047 + sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
61048 + else
61049 +#endif
61050 +
61051 + sechdrs[i].sh_addr = (unsigned long)dest;
61052 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
61053 }
61054 /* Module has been moved. */
61055 @@ -2322,7 +2433,7 @@ static noinline struct module *load_modu
61056 mod->name);
61057 if (!mod->refptr) {
61058 err = -ENOMEM;
61059 - goto free_init;
61060 + goto free_init_rx;
61061 }
61062 #endif
61063 /* Now we've moved module, initialize linked lists, etc. */
61064 @@ -2351,6 +2462,31 @@ static noinline struct module *load_modu
61065 /* Set up MODINFO_ATTR fields */
61066 setup_modinfo(mod, sechdrs, infoindex);
61067
61068 + mod->args = args;
61069 +
61070 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
61071 + {
61072 + char *p, *p2;
61073 +
61074 + if (strstr(mod->args, "grsec_modharden_netdev")) {
61075 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
61076 + err = -EPERM;
61077 + goto cleanup;
61078 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
61079 + p += strlen("grsec_modharden_normal");
61080 + p2 = strstr(p, "_");
61081 + if (p2) {
61082 + *p2 = '\0';
61083 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
61084 + *p2 = '_';
61085 + }
61086 + err = -EPERM;
61087 + goto cleanup;
61088 + }
61089 + }
61090 +#endif
61091 +
61092 +
61093 /* Fix up syms, so that st_value is a pointer to location. */
61094 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
61095 mod);
61096 @@ -2431,8 +2567,8 @@ static noinline struct module *load_modu
61097
61098 /* Now do relocations. */
61099 for (i = 1; i < hdr->e_shnum; i++) {
61100 - const char *strtab = (char *)sechdrs[strindex].sh_addr;
61101 unsigned int info = sechdrs[i].sh_info;
61102 + strtab = (char *)sechdrs[strindex].sh_addr;
61103
61104 /* Not a valid relocation section? */
61105 if (info >= hdr->e_shnum)
61106 @@ -2493,16 +2629,15 @@ static noinline struct module *load_modu
61107 * Do it before processing of module parameters, so the module
61108 * can provide parameter accessor functions of its own.
61109 */
61110 - if (mod->module_init)
61111 - flush_icache_range((unsigned long)mod->module_init,
61112 - (unsigned long)mod->module_init
61113 - + mod->init_size);
61114 - flush_icache_range((unsigned long)mod->module_core,
61115 - (unsigned long)mod->module_core + mod->core_size);
61116 + if (mod->module_init_rx)
61117 + flush_icache_range((unsigned long)mod->module_init_rx,
61118 + (unsigned long)mod->module_init_rx
61119 + + mod->init_size_rx);
61120 + flush_icache_range((unsigned long)mod->module_core_rx,
61121 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
61122
61123 set_fs(old_fs);
61124
61125 - mod->args = args;
61126 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
61127 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
61128 mod->name);
61129 @@ -2546,12 +2681,16 @@ static noinline struct module *load_modu
61130 free_unload:
61131 module_unload_free(mod);
61132 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
61133 + free_init_rx:
61134 percpu_modfree(mod->refptr);
61135 - free_init:
61136 #endif
61137 - module_free(mod, mod->module_init);
61138 - free_core:
61139 - module_free(mod, mod->module_core);
61140 + module_free_exec(mod, mod->module_init_rx);
61141 + free_core_rx:
61142 + module_free_exec(mod, mod->module_core_rx);
61143 + free_init_rw:
61144 + module_free(mod, mod->module_init_rw);
61145 + free_core_rw:
61146 + module_free(mod, mod->module_core_rw);
61147 /* mod will be freed with core. Don't access it beyond this line! */
61148 free_percpu:
61149 if (percpu)
61150 @@ -2653,10 +2792,12 @@ SYSCALL_DEFINE3(init_module, void __user
61151 mod->symtab = mod->core_symtab;
61152 mod->strtab = mod->core_strtab;
61153 #endif
61154 - module_free(mod, mod->module_init);
61155 - mod->module_init = NULL;
61156 - mod->init_size = 0;
61157 - mod->init_text_size = 0;
61158 + module_free(mod, mod->module_init_rw);
61159 + module_free_exec(mod, mod->module_init_rx);
61160 + mod->module_init_rw = NULL;
61161 + mod->module_init_rx = NULL;
61162 + mod->init_size_rw = 0;
61163 + mod->init_size_rx = 0;
61164 mutex_unlock(&module_mutex);
61165
61166 return 0;
61167 @@ -2687,10 +2828,16 @@ static const char *get_ksymbol(struct mo
61168 unsigned long nextval;
61169
61170 /* At worse, next value is at end of module */
61171 - if (within_module_init(addr, mod))
61172 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
61173 + if (within_module_init_rx(addr, mod))
61174 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
61175 + else if (within_module_init_rw(addr, mod))
61176 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
61177 + else if (within_module_core_rx(addr, mod))
61178 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
61179 + else if (within_module_core_rw(addr, mod))
61180 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
61181 else
61182 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
61183 + return NULL;
61184
61185 /* Scan for closest preceeding symbol, and next symbol. (ELF
61186 starts real symbols at 1). */
61187 @@ -2936,7 +3083,7 @@ static int m_show(struct seq_file *m, vo
61188 char buf[8];
61189
61190 seq_printf(m, "%s %u",
61191 - mod->name, mod->init_size + mod->core_size);
61192 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
61193 print_unload_info(m, mod);
61194
61195 /* Informative for users. */
61196 @@ -2945,7 +3092,7 @@ static int m_show(struct seq_file *m, vo
61197 mod->state == MODULE_STATE_COMING ? "Loading":
61198 "Live");
61199 /* Used by oprofile and other similar tools. */
61200 - seq_printf(m, " 0x%p", mod->module_core);
61201 + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
61202
61203 /* Taints info */
61204 if (mod->taints)
61205 @@ -2981,7 +3128,17 @@ static const struct file_operations proc
61206
61207 static int __init proc_modules_init(void)
61208 {
61209 +#ifndef CONFIG_GRKERNSEC_HIDESYM
61210 +#ifdef CONFIG_GRKERNSEC_PROC_USER
61211 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
61212 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61213 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
61214 +#else
61215 proc_create("modules", 0, NULL, &proc_modules_operations);
61216 +#endif
61217 +#else
61218 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
61219 +#endif
61220 return 0;
61221 }
61222 module_init(proc_modules_init);
61223 @@ -3040,12 +3197,12 @@ struct module *__module_address(unsigned
61224 {
61225 struct module *mod;
61226
61227 - if (addr < module_addr_min || addr > module_addr_max)
61228 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
61229 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
61230 return NULL;
61231
61232 list_for_each_entry_rcu(mod, &modules, list)
61233 - if (within_module_core(addr, mod)
61234 - || within_module_init(addr, mod))
61235 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
61236 return mod;
61237 return NULL;
61238 }
61239 @@ -3079,11 +3236,20 @@ bool is_module_text_address(unsigned lon
61240 */
61241 struct module *__module_text_address(unsigned long addr)
61242 {
61243 - struct module *mod = __module_address(addr);
61244 + struct module *mod;
61245 +
61246 +#ifdef CONFIG_X86_32
61247 + addr = ktla_ktva(addr);
61248 +#endif
61249 +
61250 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
61251 + return NULL;
61252 +
61253 + mod = __module_address(addr);
61254 +
61255 if (mod) {
61256 /* Make sure it's within the text section. */
61257 - if (!within(addr, mod->module_init, mod->init_text_size)
61258 - && !within(addr, mod->module_core, mod->core_text_size))
61259 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
61260 mod = NULL;
61261 }
61262 return mod;
61263 diff -urNp linux-2.6.32.43/kernel/mutex.c linux-2.6.32.43/kernel/mutex.c
61264 --- linux-2.6.32.43/kernel/mutex.c 2011-03-27 14:31:47.000000000 -0400
61265 +++ linux-2.6.32.43/kernel/mutex.c 2011-04-17 15:56:46.000000000 -0400
61266 @@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock,
61267 */
61268
61269 for (;;) {
61270 - struct thread_info *owner;
61271 + struct task_struct *owner;
61272
61273 /*
61274 * If we own the BKL, then don't spin. The owner of
61275 @@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock,
61276 spin_lock_mutex(&lock->wait_lock, flags);
61277
61278 debug_mutex_lock_common(lock, &waiter);
61279 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
61280 + debug_mutex_add_waiter(lock, &waiter, task);
61281
61282 /* add waiting tasks to the end of the waitqueue (FIFO): */
61283 list_add_tail(&waiter.list, &lock->wait_list);
61284 @@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock,
61285 * TASK_UNINTERRUPTIBLE case.)
61286 */
61287 if (unlikely(signal_pending_state(state, task))) {
61288 - mutex_remove_waiter(lock, &waiter,
61289 - task_thread_info(task));
61290 + mutex_remove_waiter(lock, &waiter, task);
61291 mutex_release(&lock->dep_map, 1, ip);
61292 spin_unlock_mutex(&lock->wait_lock, flags);
61293
61294 @@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock,
61295 done:
61296 lock_acquired(&lock->dep_map, ip);
61297 /* got the lock - rejoice! */
61298 - mutex_remove_waiter(lock, &waiter, current_thread_info());
61299 + mutex_remove_waiter(lock, &waiter, task);
61300 mutex_set_owner(lock);
61301
61302 /* set it to 0 if there are no waiters left: */
61303 diff -urNp linux-2.6.32.43/kernel/mutex-debug.c linux-2.6.32.43/kernel/mutex-debug.c
61304 --- linux-2.6.32.43/kernel/mutex-debug.c 2011-03-27 14:31:47.000000000 -0400
61305 +++ linux-2.6.32.43/kernel/mutex-debug.c 2011-04-17 15:56:46.000000000 -0400
61306 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
61307 }
61308
61309 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
61310 - struct thread_info *ti)
61311 + struct task_struct *task)
61312 {
61313 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
61314
61315 /* Mark the current thread as blocked on the lock: */
61316 - ti->task->blocked_on = waiter;
61317 + task->blocked_on = waiter;
61318 }
61319
61320 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
61321 - struct thread_info *ti)
61322 + struct task_struct *task)
61323 {
61324 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
61325 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
61326 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
61327 - ti->task->blocked_on = NULL;
61328 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
61329 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
61330 + task->blocked_on = NULL;
61331
61332 list_del_init(&waiter->list);
61333 waiter->task = NULL;
61334 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
61335 return;
61336
61337 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
61338 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
61339 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
61340 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
61341 mutex_clear_owner(lock);
61342 }
61343 diff -urNp linux-2.6.32.43/kernel/mutex-debug.h linux-2.6.32.43/kernel/mutex-debug.h
61344 --- linux-2.6.32.43/kernel/mutex-debug.h 2011-03-27 14:31:47.000000000 -0400
61345 +++ linux-2.6.32.43/kernel/mutex-debug.h 2011-04-17 15:56:46.000000000 -0400
61346 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
61347 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
61348 extern void debug_mutex_add_waiter(struct mutex *lock,
61349 struct mutex_waiter *waiter,
61350 - struct thread_info *ti);
61351 + struct task_struct *task);
61352 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
61353 - struct thread_info *ti);
61354 + struct task_struct *task);
61355 extern void debug_mutex_unlock(struct mutex *lock);
61356 extern void debug_mutex_init(struct mutex *lock, const char *name,
61357 struct lock_class_key *key);
61358
61359 static inline void mutex_set_owner(struct mutex *lock)
61360 {
61361 - lock->owner = current_thread_info();
61362 + lock->owner = current;
61363 }
61364
61365 static inline void mutex_clear_owner(struct mutex *lock)
61366 diff -urNp linux-2.6.32.43/kernel/mutex.h linux-2.6.32.43/kernel/mutex.h
61367 --- linux-2.6.32.43/kernel/mutex.h 2011-03-27 14:31:47.000000000 -0400
61368 +++ linux-2.6.32.43/kernel/mutex.h 2011-04-17 15:56:46.000000000 -0400
61369 @@ -19,7 +19,7 @@
61370 #ifdef CONFIG_SMP
61371 static inline void mutex_set_owner(struct mutex *lock)
61372 {
61373 - lock->owner = current_thread_info();
61374 + lock->owner = current;
61375 }
61376
61377 static inline void mutex_clear_owner(struct mutex *lock)
61378 diff -urNp linux-2.6.32.43/kernel/panic.c linux-2.6.32.43/kernel/panic.c
61379 --- linux-2.6.32.43/kernel/panic.c 2011-03-27 14:31:47.000000000 -0400
61380 +++ linux-2.6.32.43/kernel/panic.c 2011-04-17 15:56:46.000000000 -0400
61381 @@ -352,7 +352,7 @@ static void warn_slowpath_common(const c
61382 const char *board;
61383
61384 printk(KERN_WARNING "------------[ cut here ]------------\n");
61385 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
61386 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
61387 board = dmi_get_system_info(DMI_PRODUCT_NAME);
61388 if (board)
61389 printk(KERN_WARNING "Hardware name: %s\n", board);
61390 @@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
61391 */
61392 void __stack_chk_fail(void)
61393 {
61394 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
61395 + dump_stack();
61396 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
61397 __builtin_return_address(0));
61398 }
61399 EXPORT_SYMBOL(__stack_chk_fail);
61400 diff -urNp linux-2.6.32.43/kernel/params.c linux-2.6.32.43/kernel/params.c
61401 --- linux-2.6.32.43/kernel/params.c 2011-03-27 14:31:47.000000000 -0400
61402 +++ linux-2.6.32.43/kernel/params.c 2011-04-17 15:56:46.000000000 -0400
61403 @@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct
61404 return ret;
61405 }
61406
61407 -static struct sysfs_ops module_sysfs_ops = {
61408 +static const struct sysfs_ops module_sysfs_ops = {
61409 .show = module_attr_show,
61410 .store = module_attr_store,
61411 };
61412 @@ -739,7 +739,7 @@ static int uevent_filter(struct kset *ks
61413 return 0;
61414 }
61415
61416 -static struct kset_uevent_ops module_uevent_ops = {
61417 +static const struct kset_uevent_ops module_uevent_ops = {
61418 .filter = uevent_filter,
61419 };
61420
61421 diff -urNp linux-2.6.32.43/kernel/perf_event.c linux-2.6.32.43/kernel/perf_event.c
61422 --- linux-2.6.32.43/kernel/perf_event.c 2011-04-17 17:00:52.000000000 -0400
61423 +++ linux-2.6.32.43/kernel/perf_event.c 2011-05-04 17:56:28.000000000 -0400
61424 @@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostl
61425 */
61426 int sysctl_perf_event_sample_rate __read_mostly = 100000;
61427
61428 -static atomic64_t perf_event_id;
61429 +static atomic64_unchecked_t perf_event_id;
61430
61431 /*
61432 * Lock for (sysadmin-configurable) event reservations:
61433 @@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struc
61434 * In order to keep per-task stats reliable we need to flip the event
61435 * values when we flip the contexts.
61436 */
61437 - value = atomic64_read(&next_event->count);
61438 - value = atomic64_xchg(&event->count, value);
61439 - atomic64_set(&next_event->count, value);
61440 + value = atomic64_read_unchecked(&next_event->count);
61441 + value = atomic64_xchg_unchecked(&event->count, value);
61442 + atomic64_set_unchecked(&next_event->count, value);
61443
61444 swap(event->total_time_enabled, next_event->total_time_enabled);
61445 swap(event->total_time_running, next_event->total_time_running);
61446 @@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_e
61447 update_event_times(event);
61448 }
61449
61450 - return atomic64_read(&event->count);
61451 + return atomic64_read_unchecked(&event->count);
61452 }
61453
61454 /*
61455 @@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct
61456 values[n++] = 1 + leader->nr_siblings;
61457 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
61458 values[n++] = leader->total_time_enabled +
61459 - atomic64_read(&leader->child_total_time_enabled);
61460 + atomic64_read_unchecked(&leader->child_total_time_enabled);
61461 }
61462 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
61463 values[n++] = leader->total_time_running +
61464 - atomic64_read(&leader->child_total_time_running);
61465 + atomic64_read_unchecked(&leader->child_total_time_running);
61466 }
61467
61468 size = n * sizeof(u64);
61469 @@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct pe
61470 values[n++] = perf_event_read_value(event);
61471 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
61472 values[n++] = event->total_time_enabled +
61473 - atomic64_read(&event->child_total_time_enabled);
61474 + atomic64_read_unchecked(&event->child_total_time_enabled);
61475 }
61476 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
61477 values[n++] = event->total_time_running +
61478 - atomic64_read(&event->child_total_time_running);
61479 + atomic64_read_unchecked(&event->child_total_time_running);
61480 }
61481 if (read_format & PERF_FORMAT_ID)
61482 values[n++] = primary_event_id(event);
61483 @@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct fil
61484 static void perf_event_reset(struct perf_event *event)
61485 {
61486 (void)perf_event_read(event);
61487 - atomic64_set(&event->count, 0);
61488 + atomic64_set_unchecked(&event->count, 0);
61489 perf_event_update_userpage(event);
61490 }
61491
61492 @@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct p
61493 ++userpg->lock;
61494 barrier();
61495 userpg->index = perf_event_index(event);
61496 - userpg->offset = atomic64_read(&event->count);
61497 + userpg->offset = atomic64_read_unchecked(&event->count);
61498 if (event->state == PERF_EVENT_STATE_ACTIVE)
61499 - userpg->offset -= atomic64_read(&event->hw.prev_count);
61500 + userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
61501
61502 userpg->time_enabled = event->total_time_enabled +
61503 - atomic64_read(&event->child_total_time_enabled);
61504 + atomic64_read_unchecked(&event->child_total_time_enabled);
61505
61506 userpg->time_running = event->total_time_running +
61507 - atomic64_read(&event->child_total_time_running);
61508 + atomic64_read_unchecked(&event->child_total_time_running);
61509
61510 barrier();
61511 ++userpg->lock;
61512 @@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct
61513 u64 values[4];
61514 int n = 0;
61515
61516 - values[n++] = atomic64_read(&event->count);
61517 + values[n++] = atomic64_read_unchecked(&event->count);
61518 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
61519 values[n++] = event->total_time_enabled +
61520 - atomic64_read(&event->child_total_time_enabled);
61521 + atomic64_read_unchecked(&event->child_total_time_enabled);
61522 }
61523 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
61524 values[n++] = event->total_time_running +
61525 - atomic64_read(&event->child_total_time_running);
61526 + atomic64_read_unchecked(&event->child_total_time_running);
61527 }
61528 if (read_format & PERF_FORMAT_ID)
61529 values[n++] = primary_event_id(event);
61530 @@ -2940,7 +2940,7 @@ static void perf_output_read_group(struc
61531 if (leader != event)
61532 leader->pmu->read(leader);
61533
61534 - values[n++] = atomic64_read(&leader->count);
61535 + values[n++] = atomic64_read_unchecked(&leader->count);
61536 if (read_format & PERF_FORMAT_ID)
61537 values[n++] = primary_event_id(leader);
61538
61539 @@ -2952,7 +2952,7 @@ static void perf_output_read_group(struc
61540 if (sub != event)
61541 sub->pmu->read(sub);
61542
61543 - values[n++] = atomic64_read(&sub->count);
61544 + values[n++] = atomic64_read_unchecked(&sub->count);
61545 if (read_format & PERF_FORMAT_ID)
61546 values[n++] = primary_event_id(sub);
61547
61548 @@ -3787,7 +3787,7 @@ static void perf_swevent_add(struct perf
61549 {
61550 struct hw_perf_event *hwc = &event->hw;
61551
61552 - atomic64_add(nr, &event->count);
61553 + atomic64_add_unchecked(nr, &event->count);
61554
61555 if (!hwc->sample_period)
61556 return;
61557 @@ -4044,9 +4044,9 @@ static void cpu_clock_perf_event_update(
61558 u64 now;
61559
61560 now = cpu_clock(cpu);
61561 - prev = atomic64_read(&event->hw.prev_count);
61562 - atomic64_set(&event->hw.prev_count, now);
61563 - atomic64_add(now - prev, &event->count);
61564 + prev = atomic64_read_unchecked(&event->hw.prev_count);
61565 + atomic64_set_unchecked(&event->hw.prev_count, now);
61566 + atomic64_add_unchecked(now - prev, &event->count);
61567 }
61568
61569 static int cpu_clock_perf_event_enable(struct perf_event *event)
61570 @@ -4054,7 +4054,7 @@ static int cpu_clock_perf_event_enable(s
61571 struct hw_perf_event *hwc = &event->hw;
61572 int cpu = raw_smp_processor_id();
61573
61574 - atomic64_set(&hwc->prev_count, cpu_clock(cpu));
61575 + atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
61576 perf_swevent_start_hrtimer(event);
61577
61578 return 0;
61579 @@ -4086,9 +4086,9 @@ static void task_clock_perf_event_update
61580 u64 prev;
61581 s64 delta;
61582
61583 - prev = atomic64_xchg(&event->hw.prev_count, now);
61584 + prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
61585 delta = now - prev;
61586 - atomic64_add(delta, &event->count);
61587 + atomic64_add_unchecked(delta, &event->count);
61588 }
61589
61590 static int task_clock_perf_event_enable(struct perf_event *event)
61591 @@ -4098,7 +4098,7 @@ static int task_clock_perf_event_enable(
61592
61593 now = event->ctx->time;
61594
61595 - atomic64_set(&hwc->prev_count, now);
61596 + atomic64_set_unchecked(&hwc->prev_count, now);
61597
61598 perf_swevent_start_hrtimer(event);
61599
61600 @@ -4293,7 +4293,7 @@ perf_event_alloc(struct perf_event_attr
61601 event->parent = parent_event;
61602
61603 event->ns = get_pid_ns(current->nsproxy->pid_ns);
61604 - event->id = atomic64_inc_return(&perf_event_id);
61605 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
61606
61607 event->state = PERF_EVENT_STATE_INACTIVE;
61608
61609 @@ -4724,15 +4724,15 @@ static void sync_child_event(struct perf
61610 if (child_event->attr.inherit_stat)
61611 perf_event_read_event(child_event, child);
61612
61613 - child_val = atomic64_read(&child_event->count);
61614 + child_val = atomic64_read_unchecked(&child_event->count);
61615
61616 /*
61617 * Add back the child's count to the parent's count:
61618 */
61619 - atomic64_add(child_val, &parent_event->count);
61620 - atomic64_add(child_event->total_time_enabled,
61621 + atomic64_add_unchecked(child_val, &parent_event->count);
61622 + atomic64_add_unchecked(child_event->total_time_enabled,
61623 &parent_event->child_total_time_enabled);
61624 - atomic64_add(child_event->total_time_running,
61625 + atomic64_add_unchecked(child_event->total_time_running,
61626 &parent_event->child_total_time_running);
61627
61628 /*
61629 diff -urNp linux-2.6.32.43/kernel/pid.c linux-2.6.32.43/kernel/pid.c
61630 --- linux-2.6.32.43/kernel/pid.c 2011-04-22 19:16:29.000000000 -0400
61631 +++ linux-2.6.32.43/kernel/pid.c 2011-07-14 19:15:33.000000000 -0400
61632 @@ -33,6 +33,7 @@
61633 #include <linux/rculist.h>
61634 #include <linux/bootmem.h>
61635 #include <linux/hash.h>
61636 +#include <linux/security.h>
61637 #include <linux/pid_namespace.h>
61638 #include <linux/init_task.h>
61639 #include <linux/syscalls.h>
61640 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
61641
61642 int pid_max = PID_MAX_DEFAULT;
61643
61644 -#define RESERVED_PIDS 300
61645 +#define RESERVED_PIDS 500
61646
61647 int pid_max_min = RESERVED_PIDS + 1;
61648 int pid_max_max = PID_MAX_LIMIT;
61649 @@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
61650 */
61651 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
61652 {
61653 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
61654 + struct task_struct *task;
61655 +
61656 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
61657 +
61658 + if (gr_pid_is_chrooted(task))
61659 + return NULL;
61660 +
61661 + return task;
61662 }
61663
61664 struct task_struct *find_task_by_vpid(pid_t vnr)
61665 @@ -391,6 +399,13 @@ struct task_struct *find_task_by_vpid(pi
61666 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
61667 }
61668
61669 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
61670 +{
61671 + struct task_struct *task;
61672 +
61673 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
61674 +}
61675 +
61676 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
61677 {
61678 struct pid *pid;
61679 diff -urNp linux-2.6.32.43/kernel/posix-cpu-timers.c linux-2.6.32.43/kernel/posix-cpu-timers.c
61680 --- linux-2.6.32.43/kernel/posix-cpu-timers.c 2011-03-27 14:31:47.000000000 -0400
61681 +++ linux-2.6.32.43/kernel/posix-cpu-timers.c 2011-04-17 15:56:46.000000000 -0400
61682 @@ -6,6 +6,7 @@
61683 #include <linux/posix-timers.h>
61684 #include <linux/errno.h>
61685 #include <linux/math64.h>
61686 +#include <linux/security.h>
61687 #include <asm/uaccess.h>
61688 #include <linux/kernel_stat.h>
61689 #include <trace/events/timer.h>
61690 diff -urNp linux-2.6.32.43/kernel/posix-timers.c linux-2.6.32.43/kernel/posix-timers.c
61691 --- linux-2.6.32.43/kernel/posix-timers.c 2011-03-27 14:31:47.000000000 -0400
61692 +++ linux-2.6.32.43/kernel/posix-timers.c 2011-05-16 21:46:57.000000000 -0400
61693 @@ -42,6 +42,7 @@
61694 #include <linux/compiler.h>
61695 #include <linux/idr.h>
61696 #include <linux/posix-timers.h>
61697 +#include <linux/grsecurity.h>
61698 #include <linux/syscalls.h>
61699 #include <linux/wait.h>
61700 #include <linux/workqueue.h>
61701 @@ -296,6 +297,8 @@ static __init int init_posix_timers(void
61702 .nsleep = no_nsleep,
61703 };
61704
61705 + pax_track_stack();
61706 +
61707 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
61708 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
61709 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
61710 @@ -948,6 +951,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
61711 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
61712 return -EFAULT;
61713
61714 + /* only the CLOCK_REALTIME clock can be set, all other clocks
61715 + have their clock_set fptr set to a nosettime dummy function
61716 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
61717 + call common_clock_set, which calls do_sys_settimeofday, which
61718 + we hook
61719 + */
61720 +
61721 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
61722 }
61723
61724 diff -urNp linux-2.6.32.43/kernel/power/hibernate.c linux-2.6.32.43/kernel/power/hibernate.c
61725 --- linux-2.6.32.43/kernel/power/hibernate.c 2011-03-27 14:31:47.000000000 -0400
61726 +++ linux-2.6.32.43/kernel/power/hibernate.c 2011-04-17 15:56:46.000000000 -0400
61727 @@ -48,14 +48,14 @@ enum {
61728
61729 static int hibernation_mode = HIBERNATION_SHUTDOWN;
61730
61731 -static struct platform_hibernation_ops *hibernation_ops;
61732 +static const struct platform_hibernation_ops *hibernation_ops;
61733
61734 /**
61735 * hibernation_set_ops - set the global hibernate operations
61736 * @ops: the hibernation operations to use in subsequent hibernation transitions
61737 */
61738
61739 -void hibernation_set_ops(struct platform_hibernation_ops *ops)
61740 +void hibernation_set_ops(const struct platform_hibernation_ops *ops)
61741 {
61742 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
61743 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
61744 diff -urNp linux-2.6.32.43/kernel/power/poweroff.c linux-2.6.32.43/kernel/power/poweroff.c
61745 --- linux-2.6.32.43/kernel/power/poweroff.c 2011-03-27 14:31:47.000000000 -0400
61746 +++ linux-2.6.32.43/kernel/power/poweroff.c 2011-04-17 15:56:46.000000000 -0400
61747 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
61748 .enable_mask = SYSRQ_ENABLE_BOOT,
61749 };
61750
61751 -static int pm_sysrq_init(void)
61752 +static int __init pm_sysrq_init(void)
61753 {
61754 register_sysrq_key('o', &sysrq_poweroff_op);
61755 return 0;
61756 diff -urNp linux-2.6.32.43/kernel/power/process.c linux-2.6.32.43/kernel/power/process.c
61757 --- linux-2.6.32.43/kernel/power/process.c 2011-03-27 14:31:47.000000000 -0400
61758 +++ linux-2.6.32.43/kernel/power/process.c 2011-04-17 15:56:46.000000000 -0400
61759 @@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_
61760 struct timeval start, end;
61761 u64 elapsed_csecs64;
61762 unsigned int elapsed_csecs;
61763 + bool timedout = false;
61764
61765 do_gettimeofday(&start);
61766
61767 end_time = jiffies + TIMEOUT;
61768 do {
61769 todo = 0;
61770 + if (time_after(jiffies, end_time))
61771 + timedout = true;
61772 read_lock(&tasklist_lock);
61773 do_each_thread(g, p) {
61774 if (frozen(p) || !freezeable(p))
61775 @@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_
61776 * It is "frozen enough". If the task does wake
61777 * up, it will immediately call try_to_freeze.
61778 */
61779 - if (!task_is_stopped_or_traced(p) &&
61780 - !freezer_should_skip(p))
61781 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
61782 todo++;
61783 + if (timedout) {
61784 + printk(KERN_ERR "Task refusing to freeze:\n");
61785 + sched_show_task(p);
61786 + }
61787 + }
61788 } while_each_thread(g, p);
61789 read_unlock(&tasklist_lock);
61790 yield(); /* Yield is okay here */
61791 - if (time_after(jiffies, end_time))
61792 - break;
61793 - } while (todo);
61794 + } while (todo && !timedout);
61795
61796 do_gettimeofday(&end);
61797 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
61798 diff -urNp linux-2.6.32.43/kernel/power/suspend.c linux-2.6.32.43/kernel/power/suspend.c
61799 --- linux-2.6.32.43/kernel/power/suspend.c 2011-03-27 14:31:47.000000000 -0400
61800 +++ linux-2.6.32.43/kernel/power/suspend.c 2011-04-17 15:56:46.000000000 -0400
61801 @@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_M
61802 [PM_SUSPEND_MEM] = "mem",
61803 };
61804
61805 -static struct platform_suspend_ops *suspend_ops;
61806 +static const struct platform_suspend_ops *suspend_ops;
61807
61808 /**
61809 * suspend_set_ops - Set the global suspend method table.
61810 * @ops: Pointer to ops structure.
61811 */
61812 -void suspend_set_ops(struct platform_suspend_ops *ops)
61813 +void suspend_set_ops(const struct platform_suspend_ops *ops)
61814 {
61815 mutex_lock(&pm_mutex);
61816 suspend_ops = ops;
61817 diff -urNp linux-2.6.32.43/kernel/printk.c linux-2.6.32.43/kernel/printk.c
61818 --- linux-2.6.32.43/kernel/printk.c 2011-03-27 14:31:47.000000000 -0400
61819 +++ linux-2.6.32.43/kernel/printk.c 2011-04-17 15:56:46.000000000 -0400
61820 @@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf
61821 char c;
61822 int error = 0;
61823
61824 +#ifdef CONFIG_GRKERNSEC_DMESG
61825 + if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
61826 + return -EPERM;
61827 +#endif
61828 +
61829 error = security_syslog(type);
61830 if (error)
61831 return error;
61832 diff -urNp linux-2.6.32.43/kernel/profile.c linux-2.6.32.43/kernel/profile.c
61833 --- linux-2.6.32.43/kernel/profile.c 2011-03-27 14:31:47.000000000 -0400
61834 +++ linux-2.6.32.43/kernel/profile.c 2011-05-04 17:56:28.000000000 -0400
61835 @@ -39,7 +39,7 @@ struct profile_hit {
61836 /* Oprofile timer tick hook */
61837 static int (*timer_hook)(struct pt_regs *) __read_mostly;
61838
61839 -static atomic_t *prof_buffer;
61840 +static atomic_unchecked_t *prof_buffer;
61841 static unsigned long prof_len, prof_shift;
61842
61843 int prof_on __read_mostly;
61844 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
61845 hits[i].pc = 0;
61846 continue;
61847 }
61848 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61849 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61850 hits[i].hits = hits[i].pc = 0;
61851 }
61852 }
61853 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
61854 * Add the current hit(s) and flush the write-queue out
61855 * to the global buffer:
61856 */
61857 - atomic_add(nr_hits, &prof_buffer[pc]);
61858 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
61859 for (i = 0; i < NR_PROFILE_HIT; ++i) {
61860 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61861 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61862 hits[i].pc = hits[i].hits = 0;
61863 }
61864 out:
61865 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
61866 if (prof_on != type || !prof_buffer)
61867 return;
61868 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
61869 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61870 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61871 }
61872 #endif /* !CONFIG_SMP */
61873 EXPORT_SYMBOL_GPL(profile_hits);
61874 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
61875 return -EFAULT;
61876 buf++; p++; count--; read++;
61877 }
61878 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
61879 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
61880 if (copy_to_user(buf, (void *)pnt, count))
61881 return -EFAULT;
61882 read += count;
61883 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
61884 }
61885 #endif
61886 profile_discard_flip_buffers();
61887 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
61888 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
61889 return count;
61890 }
61891
61892 diff -urNp linux-2.6.32.43/kernel/ptrace.c linux-2.6.32.43/kernel/ptrace.c
61893 --- linux-2.6.32.43/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
61894 +++ linux-2.6.32.43/kernel/ptrace.c 2011-05-22 23:02:06.000000000 -0400
61895 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
61896 return ret;
61897 }
61898
61899 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
61900 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
61901 + unsigned int log)
61902 {
61903 const struct cred *cred = current_cred(), *tcred;
61904
61905 @@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_stru
61906 cred->gid != tcred->egid ||
61907 cred->gid != tcred->sgid ||
61908 cred->gid != tcred->gid) &&
61909 - !capable(CAP_SYS_PTRACE)) {
61910 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
61911 + (log && !capable(CAP_SYS_PTRACE)))
61912 + ) {
61913 rcu_read_unlock();
61914 return -EPERM;
61915 }
61916 @@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_stru
61917 smp_rmb();
61918 if (task->mm)
61919 dumpable = get_dumpable(task->mm);
61920 - if (!dumpable && !capable(CAP_SYS_PTRACE))
61921 + if (!dumpable &&
61922 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
61923 + (log && !capable(CAP_SYS_PTRACE))))
61924 return -EPERM;
61925
61926 return security_ptrace_access_check(task, mode);
61927 @@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struc
61928 {
61929 int err;
61930 task_lock(task);
61931 - err = __ptrace_may_access(task, mode);
61932 + err = __ptrace_may_access(task, mode, 0);
61933 + task_unlock(task);
61934 + return !err;
61935 +}
61936 +
61937 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
61938 +{
61939 + int err;
61940 + task_lock(task);
61941 + err = __ptrace_may_access(task, mode, 1);
61942 task_unlock(task);
61943 return !err;
61944 }
61945 @@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *ta
61946 goto out;
61947
61948 task_lock(task);
61949 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
61950 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
61951 task_unlock(task);
61952 if (retval)
61953 goto unlock_creds;
61954 @@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *ta
61955 goto unlock_tasklist;
61956
61957 task->ptrace = PT_PTRACED;
61958 - if (capable(CAP_SYS_PTRACE))
61959 + if (capable_nolog(CAP_SYS_PTRACE))
61960 task->ptrace |= PT_PTRACE_CAP;
61961
61962 __ptrace_link(task, current);
61963 @@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *
61964 {
61965 int copied = 0;
61966
61967 + pax_track_stack();
61968 +
61969 while (len > 0) {
61970 char buf[128];
61971 int this_len, retval;
61972 @@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct
61973 {
61974 int copied = 0;
61975
61976 + pax_track_stack();
61977 +
61978 while (len > 0) {
61979 char buf[128];
61980 int this_len, retval;
61981 @@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *c
61982 int ret = -EIO;
61983 siginfo_t siginfo;
61984
61985 + pax_track_stack();
61986 +
61987 switch (request) {
61988 case PTRACE_PEEKTEXT:
61989 case PTRACE_PEEKDATA:
61990 @@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *c
61991 ret = ptrace_setoptions(child, data);
61992 break;
61993 case PTRACE_GETEVENTMSG:
61994 - ret = put_user(child->ptrace_message, (unsigned long __user *) data);
61995 + ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
61996 break;
61997
61998 case PTRACE_GETSIGINFO:
61999 ret = ptrace_getsiginfo(child, &siginfo);
62000 if (!ret)
62001 - ret = copy_siginfo_to_user((siginfo_t __user *) data,
62002 + ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
62003 &siginfo);
62004 break;
62005
62006 case PTRACE_SETSIGINFO:
62007 - if (copy_from_user(&siginfo, (siginfo_t __user *) data,
62008 + if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
62009 sizeof siginfo))
62010 ret = -EFAULT;
62011 else
62012 @@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
62013 goto out;
62014 }
62015
62016 + if (gr_handle_ptrace(child, request)) {
62017 + ret = -EPERM;
62018 + goto out_put_task_struct;
62019 + }
62020 +
62021 if (request == PTRACE_ATTACH) {
62022 ret = ptrace_attach(child);
62023 /*
62024 * Some architectures need to do book-keeping after
62025 * a ptrace attach.
62026 */
62027 - if (!ret)
62028 + if (!ret) {
62029 arch_ptrace_attach(child);
62030 + gr_audit_ptrace(child);
62031 + }
62032 goto out_put_task_struct;
62033 }
62034
62035 @@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_
62036 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
62037 if (copied != sizeof(tmp))
62038 return -EIO;
62039 - return put_user(tmp, (unsigned long __user *)data);
62040 + return put_user(tmp, (__force unsigned long __user *)data);
62041 }
62042
62043 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
62044 @@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_st
62045 siginfo_t siginfo;
62046 int ret;
62047
62048 + pax_track_stack();
62049 +
62050 switch (request) {
62051 case PTRACE_PEEKTEXT:
62052 case PTRACE_PEEKDATA:
62053 @@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat
62054 goto out;
62055 }
62056
62057 + if (gr_handle_ptrace(child, request)) {
62058 + ret = -EPERM;
62059 + goto out_put_task_struct;
62060 + }
62061 +
62062 if (request == PTRACE_ATTACH) {
62063 ret = ptrace_attach(child);
62064 /*
62065 * Some architectures need to do book-keeping after
62066 * a ptrace attach.
62067 */
62068 - if (!ret)
62069 + if (!ret) {
62070 arch_ptrace_attach(child);
62071 + gr_audit_ptrace(child);
62072 + }
62073 goto out_put_task_struct;
62074 }
62075
62076 diff -urNp linux-2.6.32.43/kernel/rcutorture.c linux-2.6.32.43/kernel/rcutorture.c
62077 --- linux-2.6.32.43/kernel/rcutorture.c 2011-03-27 14:31:47.000000000 -0400
62078 +++ linux-2.6.32.43/kernel/rcutorture.c 2011-05-04 17:56:28.000000000 -0400
62079 @@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
62080 { 0 };
62081 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
62082 { 0 };
62083 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
62084 -static atomic_t n_rcu_torture_alloc;
62085 -static atomic_t n_rcu_torture_alloc_fail;
62086 -static atomic_t n_rcu_torture_free;
62087 -static atomic_t n_rcu_torture_mberror;
62088 -static atomic_t n_rcu_torture_error;
62089 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
62090 +static atomic_unchecked_t n_rcu_torture_alloc;
62091 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
62092 +static atomic_unchecked_t n_rcu_torture_free;
62093 +static atomic_unchecked_t n_rcu_torture_mberror;
62094 +static atomic_unchecked_t n_rcu_torture_error;
62095 static long n_rcu_torture_timers;
62096 static struct list_head rcu_torture_removed;
62097 static cpumask_var_t shuffle_tmp_mask;
62098 @@ -187,11 +187,11 @@ rcu_torture_alloc(void)
62099
62100 spin_lock_bh(&rcu_torture_lock);
62101 if (list_empty(&rcu_torture_freelist)) {
62102 - atomic_inc(&n_rcu_torture_alloc_fail);
62103 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
62104 spin_unlock_bh(&rcu_torture_lock);
62105 return NULL;
62106 }
62107 - atomic_inc(&n_rcu_torture_alloc);
62108 + atomic_inc_unchecked(&n_rcu_torture_alloc);
62109 p = rcu_torture_freelist.next;
62110 list_del_init(p);
62111 spin_unlock_bh(&rcu_torture_lock);
62112 @@ -204,7 +204,7 @@ rcu_torture_alloc(void)
62113 static void
62114 rcu_torture_free(struct rcu_torture *p)
62115 {
62116 - atomic_inc(&n_rcu_torture_free);
62117 + atomic_inc_unchecked(&n_rcu_torture_free);
62118 spin_lock_bh(&rcu_torture_lock);
62119 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
62120 spin_unlock_bh(&rcu_torture_lock);
62121 @@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
62122 i = rp->rtort_pipe_count;
62123 if (i > RCU_TORTURE_PIPE_LEN)
62124 i = RCU_TORTURE_PIPE_LEN;
62125 - atomic_inc(&rcu_torture_wcount[i]);
62126 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
62127 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
62128 rp->rtort_mbtest = 0;
62129 rcu_torture_free(rp);
62130 @@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_fr
62131 i = rp->rtort_pipe_count;
62132 if (i > RCU_TORTURE_PIPE_LEN)
62133 i = RCU_TORTURE_PIPE_LEN;
62134 - atomic_inc(&rcu_torture_wcount[i]);
62135 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
62136 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
62137 rp->rtort_mbtest = 0;
62138 list_del(&rp->rtort_free);
62139 @@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
62140 i = old_rp->rtort_pipe_count;
62141 if (i > RCU_TORTURE_PIPE_LEN)
62142 i = RCU_TORTURE_PIPE_LEN;
62143 - atomic_inc(&rcu_torture_wcount[i]);
62144 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
62145 old_rp->rtort_pipe_count++;
62146 cur_ops->deferred_free(old_rp);
62147 }
62148 @@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned l
62149 return;
62150 }
62151 if (p->rtort_mbtest == 0)
62152 - atomic_inc(&n_rcu_torture_mberror);
62153 + atomic_inc_unchecked(&n_rcu_torture_mberror);
62154 spin_lock(&rand_lock);
62155 cur_ops->read_delay(&rand);
62156 n_rcu_torture_timers++;
62157 @@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
62158 continue;
62159 }
62160 if (p->rtort_mbtest == 0)
62161 - atomic_inc(&n_rcu_torture_mberror);
62162 + atomic_inc_unchecked(&n_rcu_torture_mberror);
62163 cur_ops->read_delay(&rand);
62164 preempt_disable();
62165 pipe_count = p->rtort_pipe_count;
62166 @@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
62167 rcu_torture_current,
62168 rcu_torture_current_version,
62169 list_empty(&rcu_torture_freelist),
62170 - atomic_read(&n_rcu_torture_alloc),
62171 - atomic_read(&n_rcu_torture_alloc_fail),
62172 - atomic_read(&n_rcu_torture_free),
62173 - atomic_read(&n_rcu_torture_mberror),
62174 + atomic_read_unchecked(&n_rcu_torture_alloc),
62175 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
62176 + atomic_read_unchecked(&n_rcu_torture_free),
62177 + atomic_read_unchecked(&n_rcu_torture_mberror),
62178 n_rcu_torture_timers);
62179 - if (atomic_read(&n_rcu_torture_mberror) != 0)
62180 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
62181 cnt += sprintf(&page[cnt], " !!!");
62182 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
62183 if (i > 1) {
62184 cnt += sprintf(&page[cnt], "!!! ");
62185 - atomic_inc(&n_rcu_torture_error);
62186 + atomic_inc_unchecked(&n_rcu_torture_error);
62187 WARN_ON_ONCE(1);
62188 }
62189 cnt += sprintf(&page[cnt], "Reader Pipe: ");
62190 @@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
62191 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
62192 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
62193 cnt += sprintf(&page[cnt], " %d",
62194 - atomic_read(&rcu_torture_wcount[i]));
62195 + atomic_read_unchecked(&rcu_torture_wcount[i]));
62196 }
62197 cnt += sprintf(&page[cnt], "\n");
62198 if (cur_ops->stats)
62199 @@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
62200
62201 if (cur_ops->cleanup)
62202 cur_ops->cleanup();
62203 - if (atomic_read(&n_rcu_torture_error))
62204 + if (atomic_read_unchecked(&n_rcu_torture_error))
62205 rcu_torture_print_module_parms("End of test: FAILURE");
62206 else
62207 rcu_torture_print_module_parms("End of test: SUCCESS");
62208 @@ -1138,13 +1138,13 @@ rcu_torture_init(void)
62209
62210 rcu_torture_current = NULL;
62211 rcu_torture_current_version = 0;
62212 - atomic_set(&n_rcu_torture_alloc, 0);
62213 - atomic_set(&n_rcu_torture_alloc_fail, 0);
62214 - atomic_set(&n_rcu_torture_free, 0);
62215 - atomic_set(&n_rcu_torture_mberror, 0);
62216 - atomic_set(&n_rcu_torture_error, 0);
62217 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
62218 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
62219 + atomic_set_unchecked(&n_rcu_torture_free, 0);
62220 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
62221 + atomic_set_unchecked(&n_rcu_torture_error, 0);
62222 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
62223 - atomic_set(&rcu_torture_wcount[i], 0);
62224 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
62225 for_each_possible_cpu(cpu) {
62226 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
62227 per_cpu(rcu_torture_count, cpu)[i] = 0;
62228 diff -urNp linux-2.6.32.43/kernel/rcutree.c linux-2.6.32.43/kernel/rcutree.c
62229 --- linux-2.6.32.43/kernel/rcutree.c 2011-03-27 14:31:47.000000000 -0400
62230 +++ linux-2.6.32.43/kernel/rcutree.c 2011-04-17 15:56:46.000000000 -0400
62231 @@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state
62232 /*
62233 * Do softirq processing for the current CPU.
62234 */
62235 -static void rcu_process_callbacks(struct softirq_action *unused)
62236 +static void rcu_process_callbacks(void)
62237 {
62238 /*
62239 * Memory references from any prior RCU read-side critical sections
62240 diff -urNp linux-2.6.32.43/kernel/rcutree_plugin.h linux-2.6.32.43/kernel/rcutree_plugin.h
62241 --- linux-2.6.32.43/kernel/rcutree_plugin.h 2011-03-27 14:31:47.000000000 -0400
62242 +++ linux-2.6.32.43/kernel/rcutree_plugin.h 2011-04-17 15:56:46.000000000 -0400
62243 @@ -145,7 +145,7 @@ static void rcu_preempt_note_context_swi
62244 */
62245 void __rcu_read_lock(void)
62246 {
62247 - ACCESS_ONCE(current->rcu_read_lock_nesting)++;
62248 + ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
62249 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
62250 }
62251 EXPORT_SYMBOL_GPL(__rcu_read_lock);
62252 @@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
62253 struct task_struct *t = current;
62254
62255 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
62256 - if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
62257 + if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
62258 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
62259 rcu_read_unlock_special(t);
62260 }
62261 diff -urNp linux-2.6.32.43/kernel/relay.c linux-2.6.32.43/kernel/relay.c
62262 --- linux-2.6.32.43/kernel/relay.c 2011-03-27 14:31:47.000000000 -0400
62263 +++ linux-2.6.32.43/kernel/relay.c 2011-05-16 21:46:57.000000000 -0400
62264 @@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct fi
62265 unsigned int flags,
62266 int *nonpad_ret)
62267 {
62268 - unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
62269 + unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
62270 struct rchan_buf *rbuf = in->private_data;
62271 unsigned int subbuf_size = rbuf->chan->subbuf_size;
62272 uint64_t pos = (uint64_t) *ppos;
62273 @@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct fi
62274 .ops = &relay_pipe_buf_ops,
62275 .spd_release = relay_page_release,
62276 };
62277 + ssize_t ret;
62278 +
62279 + pax_track_stack();
62280
62281 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
62282 return 0;
62283 diff -urNp linux-2.6.32.43/kernel/resource.c linux-2.6.32.43/kernel/resource.c
62284 --- linux-2.6.32.43/kernel/resource.c 2011-03-27 14:31:47.000000000 -0400
62285 +++ linux-2.6.32.43/kernel/resource.c 2011-04-17 15:56:46.000000000 -0400
62286 @@ -132,8 +132,18 @@ static const struct file_operations proc
62287
62288 static int __init ioresources_init(void)
62289 {
62290 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
62291 +#ifdef CONFIG_GRKERNSEC_PROC_USER
62292 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
62293 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
62294 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62295 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
62296 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
62297 +#endif
62298 +#else
62299 proc_create("ioports", 0, NULL, &proc_ioports_operations);
62300 proc_create("iomem", 0, NULL, &proc_iomem_operations);
62301 +#endif
62302 return 0;
62303 }
62304 __initcall(ioresources_init);
62305 diff -urNp linux-2.6.32.43/kernel/rtmutex.c linux-2.6.32.43/kernel/rtmutex.c
62306 --- linux-2.6.32.43/kernel/rtmutex.c 2011-03-27 14:31:47.000000000 -0400
62307 +++ linux-2.6.32.43/kernel/rtmutex.c 2011-04-17 15:56:46.000000000 -0400
62308 @@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt
62309 */
62310 spin_lock_irqsave(&pendowner->pi_lock, flags);
62311
62312 - WARN_ON(!pendowner->pi_blocked_on);
62313 + BUG_ON(!pendowner->pi_blocked_on);
62314 WARN_ON(pendowner->pi_blocked_on != waiter);
62315 WARN_ON(pendowner->pi_blocked_on->lock != lock);
62316
62317 diff -urNp linux-2.6.32.43/kernel/rtmutex-tester.c linux-2.6.32.43/kernel/rtmutex-tester.c
62318 --- linux-2.6.32.43/kernel/rtmutex-tester.c 2011-03-27 14:31:47.000000000 -0400
62319 +++ linux-2.6.32.43/kernel/rtmutex-tester.c 2011-05-04 17:56:28.000000000 -0400
62320 @@ -21,7 +21,7 @@
62321 #define MAX_RT_TEST_MUTEXES 8
62322
62323 static spinlock_t rttest_lock;
62324 -static atomic_t rttest_event;
62325 +static atomic_unchecked_t rttest_event;
62326
62327 struct test_thread_data {
62328 int opcode;
62329 @@ -64,7 +64,7 @@ static int handle_op(struct test_thread_
62330
62331 case RTTEST_LOCKCONT:
62332 td->mutexes[td->opdata] = 1;
62333 - td->event = atomic_add_return(1, &rttest_event);
62334 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62335 return 0;
62336
62337 case RTTEST_RESET:
62338 @@ -82,7 +82,7 @@ static int handle_op(struct test_thread_
62339 return 0;
62340
62341 case RTTEST_RESETEVENT:
62342 - atomic_set(&rttest_event, 0);
62343 + atomic_set_unchecked(&rttest_event, 0);
62344 return 0;
62345
62346 default:
62347 @@ -99,9 +99,9 @@ static int handle_op(struct test_thread_
62348 return ret;
62349
62350 td->mutexes[id] = 1;
62351 - td->event = atomic_add_return(1, &rttest_event);
62352 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62353 rt_mutex_lock(&mutexes[id]);
62354 - td->event = atomic_add_return(1, &rttest_event);
62355 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62356 td->mutexes[id] = 4;
62357 return 0;
62358
62359 @@ -112,9 +112,9 @@ static int handle_op(struct test_thread_
62360 return ret;
62361
62362 td->mutexes[id] = 1;
62363 - td->event = atomic_add_return(1, &rttest_event);
62364 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62365 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
62366 - td->event = atomic_add_return(1, &rttest_event);
62367 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62368 td->mutexes[id] = ret ? 0 : 4;
62369 return ret ? -EINTR : 0;
62370
62371 @@ -123,9 +123,9 @@ static int handle_op(struct test_thread_
62372 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
62373 return ret;
62374
62375 - td->event = atomic_add_return(1, &rttest_event);
62376 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62377 rt_mutex_unlock(&mutexes[id]);
62378 - td->event = atomic_add_return(1, &rttest_event);
62379 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62380 td->mutexes[id] = 0;
62381 return 0;
62382
62383 @@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mu
62384 break;
62385
62386 td->mutexes[dat] = 2;
62387 - td->event = atomic_add_return(1, &rttest_event);
62388 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62389 break;
62390
62391 case RTTEST_LOCKBKL:
62392 @@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mu
62393 return;
62394
62395 td->mutexes[dat] = 3;
62396 - td->event = atomic_add_return(1, &rttest_event);
62397 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62398 break;
62399
62400 case RTTEST_LOCKNOWAIT:
62401 @@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mu
62402 return;
62403
62404 td->mutexes[dat] = 1;
62405 - td->event = atomic_add_return(1, &rttest_event);
62406 + td->event = atomic_add_return_unchecked(1, &rttest_event);
62407 return;
62408
62409 case RTTEST_LOCKBKL:
62410 diff -urNp linux-2.6.32.43/kernel/sched.c linux-2.6.32.43/kernel/sched.c
62411 --- linux-2.6.32.43/kernel/sched.c 2011-03-27 14:31:47.000000000 -0400
62412 +++ linux-2.6.32.43/kernel/sched.c 2011-05-22 23:02:06.000000000 -0400
62413 @@ -5043,7 +5043,7 @@ out:
62414 * In CONFIG_NO_HZ case, the idle load balance owner will do the
62415 * rebalancing for all the cpus for whom scheduler ticks are stopped.
62416 */
62417 -static void run_rebalance_domains(struct softirq_action *h)
62418 +static void run_rebalance_domains(void)
62419 {
62420 int this_cpu = smp_processor_id();
62421 struct rq *this_rq = cpu_rq(this_cpu);
62422 @@ -5700,6 +5700,8 @@ asmlinkage void __sched schedule(void)
62423 struct rq *rq;
62424 int cpu;
62425
62426 + pax_track_stack();
62427 +
62428 need_resched:
62429 preempt_disable();
62430 cpu = smp_processor_id();
62431 @@ -5770,7 +5772,7 @@ EXPORT_SYMBOL(schedule);
62432 * Look out! "owner" is an entirely speculative pointer
62433 * access and not reliable.
62434 */
62435 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
62436 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
62437 {
62438 unsigned int cpu;
62439 struct rq *rq;
62440 @@ -5784,10 +5786,10 @@ int mutex_spin_on_owner(struct mutex *lo
62441 * DEBUG_PAGEALLOC could have unmapped it if
62442 * the mutex owner just released it and exited.
62443 */
62444 - if (probe_kernel_address(&owner->cpu, cpu))
62445 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
62446 return 0;
62447 #else
62448 - cpu = owner->cpu;
62449 + cpu = task_thread_info(owner)->cpu;
62450 #endif
62451
62452 /*
62453 @@ -5816,7 +5818,7 @@ int mutex_spin_on_owner(struct mutex *lo
62454 /*
62455 * Is that owner really running on that cpu?
62456 */
62457 - if (task_thread_info(rq->curr) != owner || need_resched())
62458 + if (rq->curr != owner || need_resched())
62459 return 0;
62460
62461 cpu_relax();
62462 @@ -6359,6 +6361,8 @@ int can_nice(const struct task_struct *p
62463 /* convert nice value [19,-20] to rlimit style value [1,40] */
62464 int nice_rlim = 20 - nice;
62465
62466 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
62467 +
62468 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
62469 capable(CAP_SYS_NICE));
62470 }
62471 @@ -6392,7 +6396,8 @@ SYSCALL_DEFINE1(nice, int, increment)
62472 if (nice > 19)
62473 nice = 19;
62474
62475 - if (increment < 0 && !can_nice(current, nice))
62476 + if (increment < 0 && (!can_nice(current, nice) ||
62477 + gr_handle_chroot_nice()))
62478 return -EPERM;
62479
62480 retval = security_task_setnice(current, nice);
62481 @@ -8774,7 +8779,7 @@ static void init_sched_groups_power(int
62482 long power;
62483 int weight;
62484
62485 - WARN_ON(!sd || !sd->groups);
62486 + BUG_ON(!sd || !sd->groups);
62487
62488 if (cpu != group_first_cpu(sd->groups))
62489 return;
62490 diff -urNp linux-2.6.32.43/kernel/signal.c linux-2.6.32.43/kernel/signal.c
62491 --- linux-2.6.32.43/kernel/signal.c 2011-04-17 17:00:52.000000000 -0400
62492 +++ linux-2.6.32.43/kernel/signal.c 2011-07-14 20:33:33.000000000 -0400
62493 @@ -41,12 +41,12 @@
62494
62495 static struct kmem_cache *sigqueue_cachep;
62496
62497 -static void __user *sig_handler(struct task_struct *t, int sig)
62498 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
62499 {
62500 return t->sighand->action[sig - 1].sa.sa_handler;
62501 }
62502
62503 -static int sig_handler_ignored(void __user *handler, int sig)
62504 +static int sig_handler_ignored(__sighandler_t handler, int sig)
62505 {
62506 /* Is it explicitly or implicitly ignored? */
62507 return handler == SIG_IGN ||
62508 @@ -56,7 +56,7 @@ static int sig_handler_ignored(void __us
62509 static int sig_task_ignored(struct task_struct *t, int sig,
62510 int from_ancestor_ns)
62511 {
62512 - void __user *handler;
62513 + __sighandler_t handler;
62514
62515 handler = sig_handler(t, sig);
62516
62517 @@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc
62518 */
62519 user = get_uid(__task_cred(t)->user);
62520 atomic_inc(&user->sigpending);
62521 +
62522 + if (!override_rlimit)
62523 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
62524 if (override_rlimit ||
62525 atomic_read(&user->sigpending) <=
62526 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
62527 @@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct
62528
62529 int unhandled_signal(struct task_struct *tsk, int sig)
62530 {
62531 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
62532 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
62533 if (is_global_init(tsk))
62534 return 1;
62535 if (handler != SIG_IGN && handler != SIG_DFL)
62536 @@ -627,6 +630,12 @@ static int check_kill_permission(int sig
62537 }
62538 }
62539
62540 + /* allow glibc communication via tgkill to other threads in our
62541 + thread group */
62542 + if ((info->si_code != SI_TKILL || sig != (SIGRTMIN+1) ||
62543 + task_tgid_vnr(t) != info->si_pid) && gr_handle_signal(t, sig))
62544 + return -EPERM;
62545 +
62546 return security_task_kill(t, info, sig, 0);
62547 }
62548
62549 @@ -968,7 +977,7 @@ __group_send_sig_info(int sig, struct si
62550 return send_signal(sig, info, p, 1);
62551 }
62552
62553 -static int
62554 +int
62555 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
62556 {
62557 return send_signal(sig, info, t, 0);
62558 @@ -1005,6 +1014,7 @@ force_sig_info(int sig, struct siginfo *
62559 unsigned long int flags;
62560 int ret, blocked, ignored;
62561 struct k_sigaction *action;
62562 + int is_unhandled = 0;
62563
62564 spin_lock_irqsave(&t->sighand->siglock, flags);
62565 action = &t->sighand->action[sig-1];
62566 @@ -1019,9 +1029,18 @@ force_sig_info(int sig, struct siginfo *
62567 }
62568 if (action->sa.sa_handler == SIG_DFL)
62569 t->signal->flags &= ~SIGNAL_UNKILLABLE;
62570 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
62571 + is_unhandled = 1;
62572 ret = specific_send_sig_info(sig, info, t);
62573 spin_unlock_irqrestore(&t->sighand->siglock, flags);
62574
62575 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
62576 + normal operation */
62577 + if (is_unhandled) {
62578 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
62579 + gr_handle_crash(t, sig);
62580 + }
62581 +
62582 return ret;
62583 }
62584
62585 @@ -1081,8 +1100,11 @@ int group_send_sig_info(int sig, struct
62586 {
62587 int ret = check_kill_permission(sig, info, p);
62588
62589 - if (!ret && sig)
62590 + if (!ret && sig) {
62591 ret = do_send_sig_info(sig, info, p, true);
62592 + if (!ret)
62593 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
62594 + }
62595
62596 return ret;
62597 }
62598 @@ -1644,6 +1666,8 @@ void ptrace_notify(int exit_code)
62599 {
62600 siginfo_t info;
62601
62602 + pax_track_stack();
62603 +
62604 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
62605
62606 memset(&info, 0, sizeof info);
62607 @@ -2275,7 +2299,15 @@ do_send_specific(pid_t tgid, pid_t pid,
62608 int error = -ESRCH;
62609
62610 rcu_read_lock();
62611 - p = find_task_by_vpid(pid);
62612 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62613 + /* allow glibc communication via tgkill to other threads in our
62614 + thread group */
62615 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
62616 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
62617 + p = find_task_by_vpid_unrestricted(pid);
62618 + else
62619 +#endif
62620 + p = find_task_by_vpid(pid);
62621 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
62622 error = check_kill_permission(sig, info, p);
62623 /*
62624 diff -urNp linux-2.6.32.43/kernel/smp.c linux-2.6.32.43/kernel/smp.c
62625 --- linux-2.6.32.43/kernel/smp.c 2011-03-27 14:31:47.000000000 -0400
62626 +++ linux-2.6.32.43/kernel/smp.c 2011-04-17 15:56:46.000000000 -0400
62627 @@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void
62628 }
62629 EXPORT_SYMBOL(smp_call_function);
62630
62631 -void ipi_call_lock(void)
62632 +void ipi_call_lock(void) __acquires(call_function.lock)
62633 {
62634 spin_lock(&call_function.lock);
62635 }
62636
62637 -void ipi_call_unlock(void)
62638 +void ipi_call_unlock(void) __releases(call_function.lock)
62639 {
62640 spin_unlock(&call_function.lock);
62641 }
62642
62643 -void ipi_call_lock_irq(void)
62644 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
62645 {
62646 spin_lock_irq(&call_function.lock);
62647 }
62648
62649 -void ipi_call_unlock_irq(void)
62650 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
62651 {
62652 spin_unlock_irq(&call_function.lock);
62653 }
62654 diff -urNp linux-2.6.32.43/kernel/softirq.c linux-2.6.32.43/kernel/softirq.c
62655 --- linux-2.6.32.43/kernel/softirq.c 2011-03-27 14:31:47.000000000 -0400
62656 +++ linux-2.6.32.43/kernel/softirq.c 2011-04-17 15:56:46.000000000 -0400
62657 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
62658
62659 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
62660
62661 -char *softirq_to_name[NR_SOFTIRQS] = {
62662 +const char * const softirq_to_name[NR_SOFTIRQS] = {
62663 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
62664 "TASKLET", "SCHED", "HRTIMER", "RCU"
62665 };
62666 @@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
62667
62668 asmlinkage void __do_softirq(void)
62669 {
62670 - struct softirq_action *h;
62671 + const struct softirq_action *h;
62672 __u32 pending;
62673 int max_restart = MAX_SOFTIRQ_RESTART;
62674 int cpu;
62675 @@ -233,7 +233,7 @@ restart:
62676 kstat_incr_softirqs_this_cpu(h - softirq_vec);
62677
62678 trace_softirq_entry(h, softirq_vec);
62679 - h->action(h);
62680 + h->action();
62681 trace_softirq_exit(h, softirq_vec);
62682 if (unlikely(prev_count != preempt_count())) {
62683 printk(KERN_ERR "huh, entered softirq %td %s %p"
62684 @@ -363,7 +363,7 @@ void raise_softirq(unsigned int nr)
62685 local_irq_restore(flags);
62686 }
62687
62688 -void open_softirq(int nr, void (*action)(struct softirq_action *))
62689 +void open_softirq(int nr, void (*action)(void))
62690 {
62691 softirq_vec[nr].action = action;
62692 }
62693 @@ -419,7 +419,7 @@ void __tasklet_hi_schedule_first(struct
62694
62695 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
62696
62697 -static void tasklet_action(struct softirq_action *a)
62698 +static void tasklet_action(void)
62699 {
62700 struct tasklet_struct *list;
62701
62702 @@ -454,7 +454,7 @@ static void tasklet_action(struct softir
62703 }
62704 }
62705
62706 -static void tasklet_hi_action(struct softirq_action *a)
62707 +static void tasklet_hi_action(void)
62708 {
62709 struct tasklet_struct *list;
62710
62711 diff -urNp linux-2.6.32.43/kernel/sys.c linux-2.6.32.43/kernel/sys.c
62712 --- linux-2.6.32.43/kernel/sys.c 2011-03-27 14:31:47.000000000 -0400
62713 +++ linux-2.6.32.43/kernel/sys.c 2011-04-17 15:56:46.000000000 -0400
62714 @@ -133,6 +133,12 @@ static int set_one_prio(struct task_stru
62715 error = -EACCES;
62716 goto out;
62717 }
62718 +
62719 + if (gr_handle_chroot_setpriority(p, niceval)) {
62720 + error = -EACCES;
62721 + goto out;
62722 + }
62723 +
62724 no_nice = security_task_setnice(p, niceval);
62725 if (no_nice) {
62726 error = no_nice;
62727 @@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which,
62728 !(user = find_user(who)))
62729 goto out_unlock; /* No processes for this user */
62730
62731 - do_each_thread(g, p)
62732 + do_each_thread(g, p) {
62733 if (__task_cred(p)->uid == who)
62734 error = set_one_prio(p, niceval, error);
62735 - while_each_thread(g, p);
62736 + } while_each_thread(g, p);
62737 if (who != cred->uid)
62738 free_uid(user); /* For find_user() */
62739 break;
62740 @@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which,
62741 !(user = find_user(who)))
62742 goto out_unlock; /* No processes for this user */
62743
62744 - do_each_thread(g, p)
62745 + do_each_thread(g, p) {
62746 if (__task_cred(p)->uid == who) {
62747 niceval = 20 - task_nice(p);
62748 if (niceval > retval)
62749 retval = niceval;
62750 }
62751 - while_each_thread(g, p);
62752 + } while_each_thread(g, p);
62753 if (who != cred->uid)
62754 free_uid(user); /* for find_user() */
62755 break;
62756 @@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
62757 goto error;
62758 }
62759
62760 + if (gr_check_group_change(new->gid, new->egid, -1))
62761 + goto error;
62762 +
62763 if (rgid != (gid_t) -1 ||
62764 (egid != (gid_t) -1 && egid != old->gid))
62765 new->sgid = new->egid;
62766 @@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
62767 goto error;
62768
62769 retval = -EPERM;
62770 +
62771 + if (gr_check_group_change(gid, gid, gid))
62772 + goto error;
62773 +
62774 if (capable(CAP_SETGID))
62775 new->gid = new->egid = new->sgid = new->fsgid = gid;
62776 else if (gid == old->gid || gid == old->sgid)
62777 @@ -627,6 +640,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
62778 goto error;
62779 }
62780
62781 + if (gr_check_user_change(new->uid, new->euid, -1))
62782 + goto error;
62783 +
62784 if (new->uid != old->uid) {
62785 retval = set_user(new);
62786 if (retval < 0)
62787 @@ -675,6 +691,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
62788 goto error;
62789
62790 retval = -EPERM;
62791 +
62792 + if (gr_check_crash_uid(uid))
62793 + goto error;
62794 + if (gr_check_user_change(uid, uid, uid))
62795 + goto error;
62796 +
62797 if (capable(CAP_SETUID)) {
62798 new->suid = new->uid = uid;
62799 if (uid != old->uid) {
62800 @@ -732,6 +754,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
62801 goto error;
62802 }
62803
62804 + if (gr_check_user_change(ruid, euid, -1))
62805 + goto error;
62806 +
62807 if (ruid != (uid_t) -1) {
62808 new->uid = ruid;
62809 if (ruid != old->uid) {
62810 @@ -800,6 +825,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
62811 goto error;
62812 }
62813
62814 + if (gr_check_group_change(rgid, egid, -1))
62815 + goto error;
62816 +
62817 if (rgid != (gid_t) -1)
62818 new->gid = rgid;
62819 if (egid != (gid_t) -1)
62820 @@ -849,6 +877,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
62821 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
62822 goto error;
62823
62824 + if (gr_check_user_change(-1, -1, uid))
62825 + goto error;
62826 +
62827 if (uid == old->uid || uid == old->euid ||
62828 uid == old->suid || uid == old->fsuid ||
62829 capable(CAP_SETUID)) {
62830 @@ -889,6 +920,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
62831 if (gid == old->gid || gid == old->egid ||
62832 gid == old->sgid || gid == old->fsgid ||
62833 capable(CAP_SETGID)) {
62834 + if (gr_check_group_change(-1, -1, gid))
62835 + goto error;
62836 +
62837 if (gid != old_fsgid) {
62838 new->fsgid = gid;
62839 goto change_okay;
62840 @@ -1454,7 +1488,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
62841 error = get_dumpable(me->mm);
62842 break;
62843 case PR_SET_DUMPABLE:
62844 - if (arg2 < 0 || arg2 > 1) {
62845 + if (arg2 > 1) {
62846 error = -EINVAL;
62847 break;
62848 }
62849 diff -urNp linux-2.6.32.43/kernel/sysctl.c linux-2.6.32.43/kernel/sysctl.c
62850 --- linux-2.6.32.43/kernel/sysctl.c 2011-03-27 14:31:47.000000000 -0400
62851 +++ linux-2.6.32.43/kernel/sysctl.c 2011-04-17 15:56:46.000000000 -0400
62852 @@ -63,6 +63,13 @@
62853 static int deprecated_sysctl_warning(struct __sysctl_args *args);
62854
62855 #if defined(CONFIG_SYSCTL)
62856 +#include <linux/grsecurity.h>
62857 +#include <linux/grinternal.h>
62858 +
62859 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
62860 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
62861 + const int op);
62862 +extern int gr_handle_chroot_sysctl(const int op);
62863
62864 /* External variables not in a header file. */
62865 extern int C_A_D;
62866 @@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_ta
62867 static int proc_taint(struct ctl_table *table, int write,
62868 void __user *buffer, size_t *lenp, loff_t *ppos);
62869 #endif
62870 +extern ctl_table grsecurity_table[];
62871
62872 static struct ctl_table root_table[];
62873 static struct ctl_table_root sysctl_table_root;
62874 @@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
62875 int sysctl_legacy_va_layout;
62876 #endif
62877
62878 +#ifdef CONFIG_PAX_SOFTMODE
62879 +static ctl_table pax_table[] = {
62880 + {
62881 + .ctl_name = CTL_UNNUMBERED,
62882 + .procname = "softmode",
62883 + .data = &pax_softmode,
62884 + .maxlen = sizeof(unsigned int),
62885 + .mode = 0600,
62886 + .proc_handler = &proc_dointvec,
62887 + },
62888 +
62889 + { .ctl_name = 0 }
62890 +};
62891 +#endif
62892 +
62893 extern int prove_locking;
62894 extern int lock_stat;
62895
62896 @@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = N
62897 #endif
62898
62899 static struct ctl_table kern_table[] = {
62900 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
62901 + {
62902 + .ctl_name = CTL_UNNUMBERED,
62903 + .procname = "grsecurity",
62904 + .mode = 0500,
62905 + .child = grsecurity_table,
62906 + },
62907 +#endif
62908 +
62909 +#ifdef CONFIG_PAX_SOFTMODE
62910 + {
62911 + .ctl_name = CTL_UNNUMBERED,
62912 + .procname = "pax",
62913 + .mode = 0500,
62914 + .child = pax_table,
62915 + },
62916 +#endif
62917 +
62918 {
62919 .ctl_name = CTL_UNNUMBERED,
62920 .procname = "sched_child_runs_first",
62921 @@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
62922 .data = &modprobe_path,
62923 .maxlen = KMOD_PATH_LEN,
62924 .mode = 0644,
62925 - .proc_handler = &proc_dostring,
62926 - .strategy = &sysctl_string,
62927 + .proc_handler = &proc_dostring_modpriv,
62928 + .strategy = &sysctl_string_modpriv,
62929 },
62930 {
62931 .ctl_name = CTL_UNNUMBERED,
62932 @@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
62933 .mode = 0644,
62934 .proc_handler = &proc_dointvec
62935 },
62936 + {
62937 + .procname = "heap_stack_gap",
62938 + .data = &sysctl_heap_stack_gap,
62939 + .maxlen = sizeof(sysctl_heap_stack_gap),
62940 + .mode = 0644,
62941 + .proc_handler = proc_doulongvec_minmax,
62942 + },
62943 #else
62944 {
62945 .ctl_name = CTL_UNNUMBERED,
62946 @@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
62947 return 0;
62948 }
62949
62950 +static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
62951 +
62952 static int parse_table(int __user *name, int nlen,
62953 void __user *oldval, size_t __user *oldlenp,
62954 void __user *newval, size_t newlen,
62955 @@ -1821,7 +1871,7 @@ repeat:
62956 if (n == table->ctl_name) {
62957 int error;
62958 if (table->child) {
62959 - if (sysctl_perm(root, table, MAY_EXEC))
62960 + if (sysctl_perm_nochk(root, table, MAY_EXEC))
62961 return -EPERM;
62962 name++;
62963 nlen--;
62964 @@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
62965 int error;
62966 int mode;
62967
62968 + if (table->parent != NULL && table->parent->procname != NULL &&
62969 + table->procname != NULL &&
62970 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
62971 + return -EACCES;
62972 + if (gr_handle_chroot_sysctl(op))
62973 + return -EACCES;
62974 + error = gr_handle_sysctl(table, op);
62975 + if (error)
62976 + return error;
62977 +
62978 + error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
62979 + if (error)
62980 + return error;
62981 +
62982 + if (root->permissions)
62983 + mode = root->permissions(root, current->nsproxy, table);
62984 + else
62985 + mode = table->mode;
62986 +
62987 + return test_perm(mode, op);
62988 +}
62989 +
62990 +int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
62991 +{
62992 + int error;
62993 + int mode;
62994 +
62995 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
62996 if (error)
62997 return error;
62998 @@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *tabl
62999 buffer, lenp, ppos);
63000 }
63001
63002 +int proc_dostring_modpriv(struct ctl_table *table, int write,
63003 + void __user *buffer, size_t *lenp, loff_t *ppos)
63004 +{
63005 + if (write && !capable(CAP_SYS_MODULE))
63006 + return -EPERM;
63007 +
63008 + return _proc_do_string(table->data, table->maxlen, write,
63009 + buffer, lenp, ppos);
63010 +}
63011 +
63012
63013 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
63014 int *valp,
63015 @@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(v
63016 vleft = table->maxlen / sizeof(unsigned long);
63017 left = *lenp;
63018
63019 - for (; left && vleft--; i++, min++, max++, first=0) {
63020 + for (; left && vleft--; i++, first=0) {
63021 if (write) {
63022 while (left) {
63023 char c;
63024 @@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *tabl
63025 return -ENOSYS;
63026 }
63027
63028 +int proc_dostring_modpriv(struct ctl_table *table, int write,
63029 + void __user *buffer, size_t *lenp, loff_t *ppos)
63030 +{
63031 + return -ENOSYS;
63032 +}
63033 +
63034 int proc_dointvec(struct ctl_table *table, int write,
63035 void __user *buffer, size_t *lenp, loff_t *ppos)
63036 {
63037 @@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *tabl
63038 return 1;
63039 }
63040
63041 +int sysctl_string_modpriv(struct ctl_table *table,
63042 + void __user *oldval, size_t __user *oldlenp,
63043 + void __user *newval, size_t newlen)
63044 +{
63045 + if (newval && newlen && !capable(CAP_SYS_MODULE))
63046 + return -EPERM;
63047 +
63048 + return sysctl_string(table, oldval, oldlenp, newval, newlen);
63049 +}
63050 +
63051 /*
63052 * This function makes sure that all of the integers in the vector
63053 * are between the minimum and maximum values given in the arrays
63054 @@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *tabl
63055 return -ENOSYS;
63056 }
63057
63058 +int sysctl_string_modpriv(struct ctl_table *table,
63059 + void __user *oldval, size_t __user *oldlenp,
63060 + void __user *newval, size_t newlen)
63061 +{
63062 + return -ENOSYS;
63063 +}
63064 +
63065 int sysctl_intvec(struct ctl_table *table,
63066 void __user *oldval, size_t __user *oldlenp,
63067 void __user *newval, size_t newlen)
63068 @@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
63069 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
63070 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
63071 EXPORT_SYMBOL(proc_dostring);
63072 +EXPORT_SYMBOL(proc_dostring_modpriv);
63073 EXPORT_SYMBOL(proc_doulongvec_minmax);
63074 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
63075 EXPORT_SYMBOL(register_sysctl_table);
63076 @@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
63077 EXPORT_SYMBOL(sysctl_jiffies);
63078 EXPORT_SYMBOL(sysctl_ms_jiffies);
63079 EXPORT_SYMBOL(sysctl_string);
63080 +EXPORT_SYMBOL(sysctl_string_modpriv);
63081 EXPORT_SYMBOL(sysctl_data);
63082 EXPORT_SYMBOL(unregister_sysctl_table);
63083 diff -urNp linux-2.6.32.43/kernel/sysctl_check.c linux-2.6.32.43/kernel/sysctl_check.c
63084 --- linux-2.6.32.43/kernel/sysctl_check.c 2011-03-27 14:31:47.000000000 -0400
63085 +++ linux-2.6.32.43/kernel/sysctl_check.c 2011-04-17 15:56:46.000000000 -0400
63086 @@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *n
63087 } else {
63088 if ((table->strategy == sysctl_data) ||
63089 (table->strategy == sysctl_string) ||
63090 + (table->strategy == sysctl_string_modpriv) ||
63091 (table->strategy == sysctl_intvec) ||
63092 (table->strategy == sysctl_jiffies) ||
63093 (table->strategy == sysctl_ms_jiffies) ||
63094 (table->proc_handler == proc_dostring) ||
63095 + (table->proc_handler == proc_dostring_modpriv) ||
63096 (table->proc_handler == proc_dointvec) ||
63097 (table->proc_handler == proc_dointvec_minmax) ||
63098 (table->proc_handler == proc_dointvec_jiffies) ||
63099 diff -urNp linux-2.6.32.43/kernel/taskstats.c linux-2.6.32.43/kernel/taskstats.c
63100 --- linux-2.6.32.43/kernel/taskstats.c 2011-07-13 17:23:04.000000000 -0400
63101 +++ linux-2.6.32.43/kernel/taskstats.c 2011-07-13 17:23:19.000000000 -0400
63102 @@ -26,9 +26,12 @@
63103 #include <linux/cgroup.h>
63104 #include <linux/fs.h>
63105 #include <linux/file.h>
63106 +#include <linux/grsecurity.h>
63107 #include <net/genetlink.h>
63108 #include <asm/atomic.h>
63109
63110 +extern int gr_is_taskstats_denied(int pid);
63111 +
63112 /*
63113 * Maximum length of a cpumask that can be specified in
63114 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
63115 @@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_
63116 size_t size;
63117 cpumask_var_t mask;
63118
63119 + if (gr_is_taskstats_denied(current->pid))
63120 + return -EACCES;
63121 +
63122 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
63123 return -ENOMEM;
63124
63125 diff -urNp linux-2.6.32.43/kernel/time/tick-broadcast.c linux-2.6.32.43/kernel/time/tick-broadcast.c
63126 --- linux-2.6.32.43/kernel/time/tick-broadcast.c 2011-05-23 16:56:59.000000000 -0400
63127 +++ linux-2.6.32.43/kernel/time/tick-broadcast.c 2011-05-23 16:57:13.000000000 -0400
63128 @@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
63129 * then clear the broadcast bit.
63130 */
63131 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
63132 - int cpu = smp_processor_id();
63133 + cpu = smp_processor_id();
63134
63135 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
63136 tick_broadcast_clear_oneshot(cpu);
63137 diff -urNp linux-2.6.32.43/kernel/time/timekeeping.c linux-2.6.32.43/kernel/time/timekeeping.c
63138 --- linux-2.6.32.43/kernel/time/timekeeping.c 2011-06-25 12:55:35.000000000 -0400
63139 +++ linux-2.6.32.43/kernel/time/timekeeping.c 2011-06-25 12:56:37.000000000 -0400
63140 @@ -14,6 +14,7 @@
63141 #include <linux/init.h>
63142 #include <linux/mm.h>
63143 #include <linux/sched.h>
63144 +#include <linux/grsecurity.h>
63145 #include <linux/sysdev.h>
63146 #include <linux/clocksource.h>
63147 #include <linux/jiffies.h>
63148 @@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
63149 */
63150 struct timespec ts = xtime;
63151 timespec_add_ns(&ts, nsec);
63152 - ACCESS_ONCE(xtime_cache) = ts;
63153 + ACCESS_ONCE_RW(xtime_cache) = ts;
63154 }
63155
63156 /* must hold xtime_lock */
63157 @@ -333,6 +334,8 @@ int do_settimeofday(struct timespec *tv)
63158 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
63159 return -EINVAL;
63160
63161 + gr_log_timechange();
63162 +
63163 write_seqlock_irqsave(&xtime_lock, flags);
63164
63165 timekeeping_forward_now();
63166 diff -urNp linux-2.6.32.43/kernel/time/timer_list.c linux-2.6.32.43/kernel/time/timer_list.c
63167 --- linux-2.6.32.43/kernel/time/timer_list.c 2011-03-27 14:31:47.000000000 -0400
63168 +++ linux-2.6.32.43/kernel/time/timer_list.c 2011-04-17 15:56:46.000000000 -0400
63169 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
63170
63171 static void print_name_offset(struct seq_file *m, void *sym)
63172 {
63173 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63174 + SEQ_printf(m, "<%p>", NULL);
63175 +#else
63176 char symname[KSYM_NAME_LEN];
63177
63178 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
63179 SEQ_printf(m, "<%p>", sym);
63180 else
63181 SEQ_printf(m, "%s", symname);
63182 +#endif
63183 }
63184
63185 static void
63186 @@ -112,7 +116,11 @@ next_one:
63187 static void
63188 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
63189 {
63190 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63191 + SEQ_printf(m, " .base: %p\n", NULL);
63192 +#else
63193 SEQ_printf(m, " .base: %p\n", base);
63194 +#endif
63195 SEQ_printf(m, " .index: %d\n",
63196 base->index);
63197 SEQ_printf(m, " .resolution: %Lu nsecs\n",
63198 @@ -289,7 +297,11 @@ static int __init init_timer_list_procfs
63199 {
63200 struct proc_dir_entry *pe;
63201
63202 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
63203 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
63204 +#else
63205 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
63206 +#endif
63207 if (!pe)
63208 return -ENOMEM;
63209 return 0;
63210 diff -urNp linux-2.6.32.43/kernel/time/timer_stats.c linux-2.6.32.43/kernel/time/timer_stats.c
63211 --- linux-2.6.32.43/kernel/time/timer_stats.c 2011-03-27 14:31:47.000000000 -0400
63212 +++ linux-2.6.32.43/kernel/time/timer_stats.c 2011-05-04 17:56:28.000000000 -0400
63213 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
63214 static unsigned long nr_entries;
63215 static struct entry entries[MAX_ENTRIES];
63216
63217 -static atomic_t overflow_count;
63218 +static atomic_unchecked_t overflow_count;
63219
63220 /*
63221 * The entries are in a hash-table, for fast lookup:
63222 @@ -140,7 +140,7 @@ static void reset_entries(void)
63223 nr_entries = 0;
63224 memset(entries, 0, sizeof(entries));
63225 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
63226 - atomic_set(&overflow_count, 0);
63227 + atomic_set_unchecked(&overflow_count, 0);
63228 }
63229
63230 static struct entry *alloc_entry(void)
63231 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
63232 if (likely(entry))
63233 entry->count++;
63234 else
63235 - atomic_inc(&overflow_count);
63236 + atomic_inc_unchecked(&overflow_count);
63237
63238 out_unlock:
63239 spin_unlock_irqrestore(lock, flags);
63240 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
63241
63242 static void print_name_offset(struct seq_file *m, unsigned long addr)
63243 {
63244 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63245 + seq_printf(m, "<%p>", NULL);
63246 +#else
63247 char symname[KSYM_NAME_LEN];
63248
63249 if (lookup_symbol_name(addr, symname) < 0)
63250 seq_printf(m, "<%p>", (void *)addr);
63251 else
63252 seq_printf(m, "%s", symname);
63253 +#endif
63254 }
63255
63256 static int tstats_show(struct seq_file *m, void *v)
63257 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
63258
63259 seq_puts(m, "Timer Stats Version: v0.2\n");
63260 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
63261 - if (atomic_read(&overflow_count))
63262 + if (atomic_read_unchecked(&overflow_count))
63263 seq_printf(m, "Overflow: %d entries\n",
63264 - atomic_read(&overflow_count));
63265 + atomic_read_unchecked(&overflow_count));
63266
63267 for (i = 0; i < nr_entries; i++) {
63268 entry = entries + i;
63269 @@ -415,7 +419,11 @@ static int __init init_tstats_procfs(voi
63270 {
63271 struct proc_dir_entry *pe;
63272
63273 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
63274 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
63275 +#else
63276 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
63277 +#endif
63278 if (!pe)
63279 return -ENOMEM;
63280 return 0;
63281 diff -urNp linux-2.6.32.43/kernel/time.c linux-2.6.32.43/kernel/time.c
63282 --- linux-2.6.32.43/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
63283 +++ linux-2.6.32.43/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
63284 @@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec
63285 return error;
63286
63287 if (tz) {
63288 + /* we log in do_settimeofday called below, so don't log twice
63289 + */
63290 + if (!tv)
63291 + gr_log_timechange();
63292 +
63293 /* SMP safe, global irq locking makes it work. */
63294 sys_tz = *tz;
63295 update_vsyscall_tz();
63296 @@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
63297 * Avoid unnecessary multiplications/divisions in the
63298 * two most common HZ cases:
63299 */
63300 -unsigned int inline jiffies_to_msecs(const unsigned long j)
63301 +inline unsigned int jiffies_to_msecs(const unsigned long j)
63302 {
63303 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
63304 return (MSEC_PER_SEC / HZ) * j;
63305 @@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(con
63306 }
63307 EXPORT_SYMBOL(jiffies_to_msecs);
63308
63309 -unsigned int inline jiffies_to_usecs(const unsigned long j)
63310 +inline unsigned int jiffies_to_usecs(const unsigned long j)
63311 {
63312 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
63313 return (USEC_PER_SEC / HZ) * j;
63314 diff -urNp linux-2.6.32.43/kernel/timer.c linux-2.6.32.43/kernel/timer.c
63315 --- linux-2.6.32.43/kernel/timer.c 2011-03-27 14:31:47.000000000 -0400
63316 +++ linux-2.6.32.43/kernel/timer.c 2011-04-17 15:56:46.000000000 -0400
63317 @@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
63318 /*
63319 * This function runs timers and the timer-tq in bottom half context.
63320 */
63321 -static void run_timer_softirq(struct softirq_action *h)
63322 +static void run_timer_softirq(void)
63323 {
63324 struct tvec_base *base = __get_cpu_var(tvec_bases);
63325
63326 diff -urNp linux-2.6.32.43/kernel/trace/blktrace.c linux-2.6.32.43/kernel/trace/blktrace.c
63327 --- linux-2.6.32.43/kernel/trace/blktrace.c 2011-03-27 14:31:47.000000000 -0400
63328 +++ linux-2.6.32.43/kernel/trace/blktrace.c 2011-05-04 17:56:28.000000000 -0400
63329 @@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct f
63330 struct blk_trace *bt = filp->private_data;
63331 char buf[16];
63332
63333 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
63334 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
63335
63336 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
63337 }
63338 @@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(str
63339 return 1;
63340
63341 bt = buf->chan->private_data;
63342 - atomic_inc(&bt->dropped);
63343 + atomic_inc_unchecked(&bt->dropped);
63344 return 0;
63345 }
63346
63347 @@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_qu
63348
63349 bt->dir = dir;
63350 bt->dev = dev;
63351 - atomic_set(&bt->dropped, 0);
63352 + atomic_set_unchecked(&bt->dropped, 0);
63353
63354 ret = -EIO;
63355 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
63356 diff -urNp linux-2.6.32.43/kernel/trace/ftrace.c linux-2.6.32.43/kernel/trace/ftrace.c
63357 --- linux-2.6.32.43/kernel/trace/ftrace.c 2011-06-25 12:55:35.000000000 -0400
63358 +++ linux-2.6.32.43/kernel/trace/ftrace.c 2011-06-25 12:56:37.000000000 -0400
63359 @@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod,
63360
63361 ip = rec->ip;
63362
63363 + ret = ftrace_arch_code_modify_prepare();
63364 + FTRACE_WARN_ON(ret);
63365 + if (ret)
63366 + return 0;
63367 +
63368 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
63369 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
63370 if (ret) {
63371 ftrace_bug(ret, ip);
63372 rec->flags |= FTRACE_FL_FAILED;
63373 - return 0;
63374 }
63375 - return 1;
63376 + return ret ? 0 : 1;
63377 }
63378
63379 /*
63380 diff -urNp linux-2.6.32.43/kernel/trace/ring_buffer.c linux-2.6.32.43/kernel/trace/ring_buffer.c
63381 --- linux-2.6.32.43/kernel/trace/ring_buffer.c 2011-03-27 14:31:47.000000000 -0400
63382 +++ linux-2.6.32.43/kernel/trace/ring_buffer.c 2011-04-17 15:56:46.000000000 -0400
63383 @@ -606,7 +606,7 @@ static struct list_head *rb_list_head(st
63384 * the reader page). But if the next page is a header page,
63385 * its flags will be non zero.
63386 */
63387 -static int inline
63388 +static inline int
63389 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
63390 struct buffer_page *page, struct list_head *list)
63391 {
63392 diff -urNp linux-2.6.32.43/kernel/trace/trace.c linux-2.6.32.43/kernel/trace/trace.c
63393 --- linux-2.6.32.43/kernel/trace/trace.c 2011-03-27 14:31:47.000000000 -0400
63394 +++ linux-2.6.32.43/kernel/trace/trace.c 2011-05-16 21:46:57.000000000 -0400
63395 @@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(
63396 size_t rem;
63397 unsigned int i;
63398
63399 + pax_track_stack();
63400 +
63401 /* copy the tracer to avoid using a global lock all around */
63402 mutex_lock(&trace_types_lock);
63403 if (unlikely(old_tracer != current_trace && current_trace)) {
63404 @@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file
63405 int entries, size, i;
63406 size_t ret;
63407
63408 + pax_track_stack();
63409 +
63410 if (*ppos & (PAGE_SIZE - 1)) {
63411 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
63412 return -EINVAL;
63413 @@ -3816,10 +3820,9 @@ static const struct file_operations trac
63414 };
63415 #endif
63416
63417 -static struct dentry *d_tracer;
63418 -
63419 struct dentry *tracing_init_dentry(void)
63420 {
63421 + static struct dentry *d_tracer;
63422 static int once;
63423
63424 if (d_tracer)
63425 @@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
63426 return d_tracer;
63427 }
63428
63429 -static struct dentry *d_percpu;
63430 -
63431 struct dentry *tracing_dentry_percpu(void)
63432 {
63433 + static struct dentry *d_percpu;
63434 static int once;
63435 struct dentry *d_tracer;
63436
63437 diff -urNp linux-2.6.32.43/kernel/trace/trace_events.c linux-2.6.32.43/kernel/trace/trace_events.c
63438 --- linux-2.6.32.43/kernel/trace/trace_events.c 2011-03-27 14:31:47.000000000 -0400
63439 +++ linux-2.6.32.43/kernel/trace/trace_events.c 2011-04-17 15:56:46.000000000 -0400
63440 @@ -951,6 +951,8 @@ static LIST_HEAD(ftrace_module_file_list
63441 * Modules must own their file_operations to keep up with
63442 * reference counting.
63443 */
63444 +
63445 +/* cannot be const */
63446 struct ftrace_module_file_ops {
63447 struct list_head list;
63448 struct module *mod;
63449 diff -urNp linux-2.6.32.43/kernel/trace/trace_mmiotrace.c linux-2.6.32.43/kernel/trace/trace_mmiotrace.c
63450 --- linux-2.6.32.43/kernel/trace/trace_mmiotrace.c 2011-03-27 14:31:47.000000000 -0400
63451 +++ linux-2.6.32.43/kernel/trace/trace_mmiotrace.c 2011-05-04 17:56:28.000000000 -0400
63452 @@ -23,7 +23,7 @@ struct header_iter {
63453 static struct trace_array *mmio_trace_array;
63454 static bool overrun_detected;
63455 static unsigned long prev_overruns;
63456 -static atomic_t dropped_count;
63457 +static atomic_unchecked_t dropped_count;
63458
63459 static void mmio_reset_data(struct trace_array *tr)
63460 {
63461 @@ -126,7 +126,7 @@ static void mmio_close(struct trace_iter
63462
63463 static unsigned long count_overruns(struct trace_iterator *iter)
63464 {
63465 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
63466 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
63467 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
63468
63469 if (over > prev_overruns)
63470 @@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct
63471 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
63472 sizeof(*entry), 0, pc);
63473 if (!event) {
63474 - atomic_inc(&dropped_count);
63475 + atomic_inc_unchecked(&dropped_count);
63476 return;
63477 }
63478 entry = ring_buffer_event_data(event);
63479 @@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct
63480 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
63481 sizeof(*entry), 0, pc);
63482 if (!event) {
63483 - atomic_inc(&dropped_count);
63484 + atomic_inc_unchecked(&dropped_count);
63485 return;
63486 }
63487 entry = ring_buffer_event_data(event);
63488 diff -urNp linux-2.6.32.43/kernel/trace/trace_output.c linux-2.6.32.43/kernel/trace/trace_output.c
63489 --- linux-2.6.32.43/kernel/trace/trace_output.c 2011-03-27 14:31:47.000000000 -0400
63490 +++ linux-2.6.32.43/kernel/trace/trace_output.c 2011-04-17 15:56:46.000000000 -0400
63491 @@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s,
63492 return 0;
63493 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
63494 if (!IS_ERR(p)) {
63495 - p = mangle_path(s->buffer + s->len, p, "\n");
63496 + p = mangle_path(s->buffer + s->len, p, "\n\\");
63497 if (p) {
63498 s->len = p - s->buffer;
63499 return 1;
63500 diff -urNp linux-2.6.32.43/kernel/trace/trace_stack.c linux-2.6.32.43/kernel/trace/trace_stack.c
63501 --- linux-2.6.32.43/kernel/trace/trace_stack.c 2011-03-27 14:31:47.000000000 -0400
63502 +++ linux-2.6.32.43/kernel/trace/trace_stack.c 2011-04-17 15:56:46.000000000 -0400
63503 @@ -50,7 +50,7 @@ static inline void check_stack(void)
63504 return;
63505
63506 /* we do not handle interrupt stacks yet */
63507 - if (!object_is_on_stack(&this_size))
63508 + if (!object_starts_on_stack(&this_size))
63509 return;
63510
63511 local_irq_save(flags);
63512 diff -urNp linux-2.6.32.43/kernel/trace/trace_workqueue.c linux-2.6.32.43/kernel/trace/trace_workqueue.c
63513 --- linux-2.6.32.43/kernel/trace/trace_workqueue.c 2011-03-27 14:31:47.000000000 -0400
63514 +++ linux-2.6.32.43/kernel/trace/trace_workqueue.c 2011-04-17 15:56:46.000000000 -0400
63515 @@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
63516 int cpu;
63517 pid_t pid;
63518 /* Can be inserted from interrupt or user context, need to be atomic */
63519 - atomic_t inserted;
63520 + atomic_unchecked_t inserted;
63521 /*
63522 * Don't need to be atomic, works are serialized in a single workqueue thread
63523 * on a single CPU.
63524 @@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_st
63525 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
63526 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
63527 if (node->pid == wq_thread->pid) {
63528 - atomic_inc(&node->inserted);
63529 + atomic_inc_unchecked(&node->inserted);
63530 goto found;
63531 }
63532 }
63533 @@ -205,7 +205,7 @@ static int workqueue_stat_show(struct se
63534 tsk = get_pid_task(pid, PIDTYPE_PID);
63535 if (tsk) {
63536 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
63537 - atomic_read(&cws->inserted), cws->executed,
63538 + atomic_read_unchecked(&cws->inserted), cws->executed,
63539 tsk->comm);
63540 put_task_struct(tsk);
63541 }
63542 diff -urNp linux-2.6.32.43/kernel/user.c linux-2.6.32.43/kernel/user.c
63543 --- linux-2.6.32.43/kernel/user.c 2011-03-27 14:31:47.000000000 -0400
63544 +++ linux-2.6.32.43/kernel/user.c 2011-04-17 15:56:46.000000000 -0400
63545 @@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct use
63546 spin_lock_irq(&uidhash_lock);
63547 up = uid_hash_find(uid, hashent);
63548 if (up) {
63549 + put_user_ns(ns);
63550 key_put(new->uid_keyring);
63551 key_put(new->session_keyring);
63552 kmem_cache_free(uid_cachep, new);
63553 diff -urNp linux-2.6.32.43/lib/bug.c linux-2.6.32.43/lib/bug.c
63554 --- linux-2.6.32.43/lib/bug.c 2011-03-27 14:31:47.000000000 -0400
63555 +++ linux-2.6.32.43/lib/bug.c 2011-04-17 15:56:46.000000000 -0400
63556 @@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
63557 return BUG_TRAP_TYPE_NONE;
63558
63559 bug = find_bug(bugaddr);
63560 + if (!bug)
63561 + return BUG_TRAP_TYPE_NONE;
63562
63563 printk(KERN_EMERG "------------[ cut here ]------------\n");
63564
63565 diff -urNp linux-2.6.32.43/lib/debugobjects.c linux-2.6.32.43/lib/debugobjects.c
63566 --- linux-2.6.32.43/lib/debugobjects.c 2011-07-13 17:23:04.000000000 -0400
63567 +++ linux-2.6.32.43/lib/debugobjects.c 2011-07-13 17:23:19.000000000 -0400
63568 @@ -277,7 +277,7 @@ static void debug_object_is_on_stack(voi
63569 if (limit > 4)
63570 return;
63571
63572 - is_on_stack = object_is_on_stack(addr);
63573 + is_on_stack = object_starts_on_stack(addr);
63574 if (is_on_stack == onstack)
63575 return;
63576
63577 diff -urNp linux-2.6.32.43/lib/dma-debug.c linux-2.6.32.43/lib/dma-debug.c
63578 --- linux-2.6.32.43/lib/dma-debug.c 2011-03-27 14:31:47.000000000 -0400
63579 +++ linux-2.6.32.43/lib/dma-debug.c 2011-04-17 15:56:46.000000000 -0400
63580 @@ -861,7 +861,7 @@ out:
63581
63582 static void check_for_stack(struct device *dev, void *addr)
63583 {
63584 - if (object_is_on_stack(addr))
63585 + if (object_starts_on_stack(addr))
63586 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
63587 "stack [addr=%p]\n", addr);
63588 }
63589 diff -urNp linux-2.6.32.43/lib/idr.c linux-2.6.32.43/lib/idr.c
63590 --- linux-2.6.32.43/lib/idr.c 2011-03-27 14:31:47.000000000 -0400
63591 +++ linux-2.6.32.43/lib/idr.c 2011-04-17 15:56:46.000000000 -0400
63592 @@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, in
63593 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
63594
63595 /* if already at the top layer, we need to grow */
63596 - if (id >= 1 << (idp->layers * IDR_BITS)) {
63597 + if (id >= (1 << (idp->layers * IDR_BITS))) {
63598 *starting_id = id;
63599 return IDR_NEED_TO_GROW;
63600 }
63601 diff -urNp linux-2.6.32.43/lib/inflate.c linux-2.6.32.43/lib/inflate.c
63602 --- linux-2.6.32.43/lib/inflate.c 2011-03-27 14:31:47.000000000 -0400
63603 +++ linux-2.6.32.43/lib/inflate.c 2011-04-17 15:56:46.000000000 -0400
63604 @@ -266,7 +266,7 @@ static void free(void *where)
63605 malloc_ptr = free_mem_ptr;
63606 }
63607 #else
63608 -#define malloc(a) kmalloc(a, GFP_KERNEL)
63609 +#define malloc(a) kmalloc((a), GFP_KERNEL)
63610 #define free(a) kfree(a)
63611 #endif
63612
63613 diff -urNp linux-2.6.32.43/lib/Kconfig.debug linux-2.6.32.43/lib/Kconfig.debug
63614 --- linux-2.6.32.43/lib/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
63615 +++ linux-2.6.32.43/lib/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
63616 @@ -905,7 +905,7 @@ config LATENCYTOP
63617 select STACKTRACE
63618 select SCHEDSTATS
63619 select SCHED_DEBUG
63620 - depends on HAVE_LATENCYTOP_SUPPORT
63621 + depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
63622 help
63623 Enable this option if you want to use the LatencyTOP tool
63624 to find out which userspace is blocking on what kernel operations.
63625 diff -urNp linux-2.6.32.43/lib/kobject.c linux-2.6.32.43/lib/kobject.c
63626 --- linux-2.6.32.43/lib/kobject.c 2011-03-27 14:31:47.000000000 -0400
63627 +++ linux-2.6.32.43/lib/kobject.c 2011-04-17 15:56:46.000000000 -0400
63628 @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct ko
63629 return ret;
63630 }
63631
63632 -struct sysfs_ops kobj_sysfs_ops = {
63633 +const struct sysfs_ops kobj_sysfs_ops = {
63634 .show = kobj_attr_show,
63635 .store = kobj_attr_store,
63636 };
63637 @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
63638 * If the kset was not able to be created, NULL will be returned.
63639 */
63640 static struct kset *kset_create(const char *name,
63641 - struct kset_uevent_ops *uevent_ops,
63642 + const struct kset_uevent_ops *uevent_ops,
63643 struct kobject *parent_kobj)
63644 {
63645 struct kset *kset;
63646 @@ -832,7 +832,7 @@ static struct kset *kset_create(const ch
63647 * If the kset was not able to be created, NULL will be returned.
63648 */
63649 struct kset *kset_create_and_add(const char *name,
63650 - struct kset_uevent_ops *uevent_ops,
63651 + const struct kset_uevent_ops *uevent_ops,
63652 struct kobject *parent_kobj)
63653 {
63654 struct kset *kset;
63655 diff -urNp linux-2.6.32.43/lib/kobject_uevent.c linux-2.6.32.43/lib/kobject_uevent.c
63656 --- linux-2.6.32.43/lib/kobject_uevent.c 2011-03-27 14:31:47.000000000 -0400
63657 +++ linux-2.6.32.43/lib/kobject_uevent.c 2011-04-17 15:56:46.000000000 -0400
63658 @@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *k
63659 const char *subsystem;
63660 struct kobject *top_kobj;
63661 struct kset *kset;
63662 - struct kset_uevent_ops *uevent_ops;
63663 + const struct kset_uevent_ops *uevent_ops;
63664 u64 seq;
63665 int i = 0;
63666 int retval = 0;
63667 diff -urNp linux-2.6.32.43/lib/kref.c linux-2.6.32.43/lib/kref.c
63668 --- linux-2.6.32.43/lib/kref.c 2011-03-27 14:31:47.000000000 -0400
63669 +++ linux-2.6.32.43/lib/kref.c 2011-04-17 15:56:46.000000000 -0400
63670 @@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
63671 */
63672 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
63673 {
63674 - WARN_ON(release == NULL);
63675 + BUG_ON(release == NULL);
63676 WARN_ON(release == (void (*)(struct kref *))kfree);
63677
63678 if (atomic_dec_and_test(&kref->refcount)) {
63679 diff -urNp linux-2.6.32.43/lib/parser.c linux-2.6.32.43/lib/parser.c
63680 --- linux-2.6.32.43/lib/parser.c 2011-03-27 14:31:47.000000000 -0400
63681 +++ linux-2.6.32.43/lib/parser.c 2011-04-17 15:56:46.000000000 -0400
63682 @@ -126,7 +126,7 @@ static int match_number(substring_t *s,
63683 char *buf;
63684 int ret;
63685
63686 - buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
63687 + buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
63688 if (!buf)
63689 return -ENOMEM;
63690 memcpy(buf, s->from, s->to - s->from);
63691 diff -urNp linux-2.6.32.43/lib/radix-tree.c linux-2.6.32.43/lib/radix-tree.c
63692 --- linux-2.6.32.43/lib/radix-tree.c 2011-03-27 14:31:47.000000000 -0400
63693 +++ linux-2.6.32.43/lib/radix-tree.c 2011-04-17 15:56:46.000000000 -0400
63694 @@ -81,7 +81,7 @@ struct radix_tree_preload {
63695 int nr;
63696 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
63697 };
63698 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
63699 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
63700
63701 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
63702 {
63703 diff -urNp linux-2.6.32.43/lib/random32.c linux-2.6.32.43/lib/random32.c
63704 --- linux-2.6.32.43/lib/random32.c 2011-03-27 14:31:47.000000000 -0400
63705 +++ linux-2.6.32.43/lib/random32.c 2011-04-17 15:56:46.000000000 -0400
63706 @@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
63707 */
63708 static inline u32 __seed(u32 x, u32 m)
63709 {
63710 - return (x < m) ? x + m : x;
63711 + return (x <= m) ? x + m + 1 : x;
63712 }
63713
63714 /**
63715 diff -urNp linux-2.6.32.43/lib/vsprintf.c linux-2.6.32.43/lib/vsprintf.c
63716 --- linux-2.6.32.43/lib/vsprintf.c 2011-03-27 14:31:47.000000000 -0400
63717 +++ linux-2.6.32.43/lib/vsprintf.c 2011-04-17 15:56:46.000000000 -0400
63718 @@ -16,6 +16,9 @@
63719 * - scnprintf and vscnprintf
63720 */
63721
63722 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63723 +#define __INCLUDED_BY_HIDESYM 1
63724 +#endif
63725 #include <stdarg.h>
63726 #include <linux/module.h>
63727 #include <linux/types.h>
63728 @@ -546,12 +549,12 @@ static char *number(char *buf, char *end
63729 return buf;
63730 }
63731
63732 -static char *string(char *buf, char *end, char *s, struct printf_spec spec)
63733 +static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
63734 {
63735 int len, i;
63736
63737 if ((unsigned long)s < PAGE_SIZE)
63738 - s = "<NULL>";
63739 + s = "(null)";
63740
63741 len = strnlen(s, spec.precision);
63742
63743 @@ -581,7 +584,7 @@ static char *symbol_string(char *buf, ch
63744 unsigned long value = (unsigned long) ptr;
63745 #ifdef CONFIG_KALLSYMS
63746 char sym[KSYM_SYMBOL_LEN];
63747 - if (ext != 'f' && ext != 's')
63748 + if (ext != 'f' && ext != 's' && ext != 'a')
63749 sprint_symbol(sym, value);
63750 else
63751 kallsyms_lookup(value, NULL, NULL, NULL, sym);
63752 @@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf,
63753 * - 'f' For simple symbolic function names without offset
63754 * - 'S' For symbolic direct pointers with offset
63755 * - 's' For symbolic direct pointers without offset
63756 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
63757 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
63758 * - 'R' For a struct resource pointer, it prints the range of
63759 * addresses (not the name nor the flags)
63760 * - 'M' For a 6-byte MAC address, it prints the address in the
63761 @@ -822,7 +827,7 @@ static char *pointer(const char *fmt, ch
63762 struct printf_spec spec)
63763 {
63764 if (!ptr)
63765 - return string(buf, end, "(null)", spec);
63766 + return string(buf, end, "(nil)", spec);
63767
63768 switch (*fmt) {
63769 case 'F':
63770 @@ -831,6 +836,14 @@ static char *pointer(const char *fmt, ch
63771 case 's':
63772 /* Fallthrough */
63773 case 'S':
63774 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63775 + break;
63776 +#else
63777 + return symbol_string(buf, end, ptr, spec, *fmt);
63778 +#endif
63779 + case 'a':
63780 + /* Fallthrough */
63781 + case 'A':
63782 return symbol_string(buf, end, ptr, spec, *fmt);
63783 case 'R':
63784 return resource_string(buf, end, ptr, spec);
63785 @@ -1445,7 +1458,7 @@ do { \
63786 size_t len;
63787 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
63788 || (unsigned long)save_str < PAGE_SIZE)
63789 - save_str = "<NULL>";
63790 + save_str = "(null)";
63791 len = strlen(save_str);
63792 if (str + len + 1 < end)
63793 memcpy(str, save_str, len + 1);
63794 @@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size,
63795 typeof(type) value; \
63796 if (sizeof(type) == 8) { \
63797 args = PTR_ALIGN(args, sizeof(u32)); \
63798 - *(u32 *)&value = *(u32 *)args; \
63799 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
63800 + *(u32 *)&value = *(const u32 *)args; \
63801 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
63802 } else { \
63803 args = PTR_ALIGN(args, sizeof(type)); \
63804 - value = *(typeof(type) *)args; \
63805 + value = *(const typeof(type) *)args; \
63806 } \
63807 args += sizeof(type); \
63808 value; \
63809 @@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size,
63810 const char *str_arg = args;
63811 size_t len = strlen(str_arg);
63812 args += len + 1;
63813 - str = string(str, end, (char *)str_arg, spec);
63814 + str = string(str, end, str_arg, spec);
63815 break;
63816 }
63817
63818 diff -urNp linux-2.6.32.43/localversion-grsec linux-2.6.32.43/localversion-grsec
63819 --- linux-2.6.32.43/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
63820 +++ linux-2.6.32.43/localversion-grsec 2011-04-17 15:56:46.000000000 -0400
63821 @@ -0,0 +1 @@
63822 +-grsec
63823 diff -urNp linux-2.6.32.43/Makefile linux-2.6.32.43/Makefile
63824 --- linux-2.6.32.43/Makefile 2011-07-13 17:23:04.000000000 -0400
63825 +++ linux-2.6.32.43/Makefile 2011-07-13 17:23:18.000000000 -0400
63826 @@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
63827
63828 HOSTCC = gcc
63829 HOSTCXX = g++
63830 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
63831 -HOSTCXXFLAGS = -O2
63832 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
63833 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
63834 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
63835
63836 # Decide whether to build built-in, modular, or both.
63837 # Normally, just do built-in.
63838 @@ -342,10 +343,12 @@ LINUXINCLUDE := -Iinclude \
63839 KBUILD_CPPFLAGS := -D__KERNEL__
63840
63841 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
63842 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
63843 -fno-strict-aliasing -fno-common \
63844 -Werror-implicit-function-declaration \
63845 -Wno-format-security \
63846 -fno-delete-null-pointer-checks
63847 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
63848 KBUILD_AFLAGS := -D__ASSEMBLY__
63849
63850 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
63851 @@ -376,8 +379,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
63852 # Rules shared between *config targets and build targets
63853
63854 # Basic helpers built in scripts/
63855 -PHONY += scripts_basic
63856 -scripts_basic:
63857 +PHONY += scripts_basic pax-plugin
63858 +scripts_basic: pax-plugin
63859 $(Q)$(MAKE) $(build)=scripts/basic
63860
63861 # To avoid any implicit rule to kick in, define an empty command.
63862 @@ -403,7 +406,7 @@ endif
63863 # of make so .config is not included in this case either (for *config).
63864
63865 no-dot-config-targets := clean mrproper distclean \
63866 - cscope TAGS tags help %docs check% \
63867 + cscope gtags TAGS tags help %docs check% \
63868 include/linux/version.h headers_% \
63869 kernelrelease kernelversion
63870
63871 @@ -528,6 +531,18 @@ endif
63872
63873 include $(srctree)/arch/$(SRCARCH)/Makefile
63874
63875 +ifeq ($(CONFIG_PAX_MEMORY_STACKLEAK),y)
63876 +KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0405, -fplugin=$(objtree)/tools/gcc/pax_plugin.so -fplugin-arg-pax_plugin-track-lowest-sp=100)
63877 +endif
63878 +pax-plugin:
63879 +ifneq (,$(findstring pax_plugin, $(KBUILD_CFLAGS)))
63880 + $(Q)$(MAKE) $(build)=tools/gcc
63881 +else
63882 +ifeq ($(CONFIG_PAX_MEMORY_STACKLEAK),y)
63883 + $(Q)echo "warning, your gcc does not support plugins, PAX_MEMORY_STACKLEAK will be less secure"
63884 +endif
63885 +endif
63886 +
63887 ifneq ($(CONFIG_FRAME_WARN),0)
63888 KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN})
63889 endif
63890 @@ -644,7 +659,7 @@ export mod_strip_cmd
63891
63892
63893 ifeq ($(KBUILD_EXTMOD),)
63894 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
63895 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
63896
63897 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
63898 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
63899 @@ -970,7 +985,7 @@ ifneq ($(KBUILD_SRC),)
63900 endif
63901
63902 # prepare2 creates a makefile if using a separate output directory
63903 -prepare2: prepare3 outputmakefile
63904 +prepare2: prepare3 outputmakefile pax-plugin
63905
63906 prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \
63907 include/asm include/config/auto.conf
63908 @@ -1198,7 +1213,7 @@ MRPROPER_FILES += .config .config.old in
63909 include/linux/autoconf.h include/linux/version.h \
63910 include/linux/utsrelease.h \
63911 include/linux/bounds.h include/asm*/asm-offsets.h \
63912 - Module.symvers Module.markers tags TAGS cscope*
63913 + Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
63914
63915 # clean - Delete most, but leave enough to build external modules
63916 #
63917 @@ -1289,6 +1304,7 @@ help:
63918 @echo ' modules_prepare - Set up for building external modules'
63919 @echo ' tags/TAGS - Generate tags file for editors'
63920 @echo ' cscope - Generate cscope index'
63921 + @echo ' gtags - Generate GNU GLOBAL index'
63922 @echo ' kernelrelease - Output the release version string'
63923 @echo ' kernelversion - Output the version stored in Makefile'
63924 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
63925 @@ -1421,7 +1437,7 @@ clean: $(clean-dirs)
63926 $(call cmd,rmdirs)
63927 $(call cmd,rmfiles)
63928 @find $(KBUILD_EXTMOD) $(RCS_FIND_IGNORE) \
63929 - \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
63930 + \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
63931 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
63932 -o -name '*.gcno' \) -type f -print | xargs rm -f
63933
63934 @@ -1445,7 +1461,7 @@ endif # KBUILD_EXTMOD
63935 quiet_cmd_tags = GEN $@
63936 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
63937
63938 -tags TAGS cscope: FORCE
63939 +tags TAGS cscope gtags: FORCE
63940 $(call cmd,tags)
63941
63942 # Scripts to check various things for consistency
63943 diff -urNp linux-2.6.32.43/mm/backing-dev.c linux-2.6.32.43/mm/backing-dev.c
63944 --- linux-2.6.32.43/mm/backing-dev.c 2011-03-27 14:31:47.000000000 -0400
63945 +++ linux-2.6.32.43/mm/backing-dev.c 2011-05-04 17:56:28.000000000 -0400
63946 @@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rc
63947 * Add the default flusher task that gets created for any bdi
63948 * that has dirty data pending writeout
63949 */
63950 -void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
63951 +static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
63952 {
63953 if (!bdi_cap_writeback_dirty(bdi))
63954 return;
63955 diff -urNp linux-2.6.32.43/mm/filemap.c linux-2.6.32.43/mm/filemap.c
63956 --- linux-2.6.32.43/mm/filemap.c 2011-03-27 14:31:47.000000000 -0400
63957 +++ linux-2.6.32.43/mm/filemap.c 2011-04-17 15:56:46.000000000 -0400
63958 @@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file
63959 struct address_space *mapping = file->f_mapping;
63960
63961 if (!mapping->a_ops->readpage)
63962 - return -ENOEXEC;
63963 + return -ENODEV;
63964 file_accessed(file);
63965 vma->vm_ops = &generic_file_vm_ops;
63966 vma->vm_flags |= VM_CAN_NONLINEAR;
63967 @@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct f
63968 *pos = i_size_read(inode);
63969
63970 if (limit != RLIM_INFINITY) {
63971 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
63972 if (*pos >= limit) {
63973 send_sig(SIGXFSZ, current, 0);
63974 return -EFBIG;
63975 diff -urNp linux-2.6.32.43/mm/fremap.c linux-2.6.32.43/mm/fremap.c
63976 --- linux-2.6.32.43/mm/fremap.c 2011-03-27 14:31:47.000000000 -0400
63977 +++ linux-2.6.32.43/mm/fremap.c 2011-04-17 15:56:46.000000000 -0400
63978 @@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
63979 retry:
63980 vma = find_vma(mm, start);
63981
63982 +#ifdef CONFIG_PAX_SEGMEXEC
63983 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
63984 + goto out;
63985 +#endif
63986 +
63987 /*
63988 * Make sure the vma is shared, that it supports prefaulting,
63989 * and that the remapped range is valid and fully within
63990 @@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
63991 /*
63992 * drop PG_Mlocked flag for over-mapped range
63993 */
63994 - unsigned int saved_flags = vma->vm_flags;
63995 + unsigned long saved_flags = vma->vm_flags;
63996 munlock_vma_pages_range(vma, start, start + size);
63997 vma->vm_flags = saved_flags;
63998 }
63999 diff -urNp linux-2.6.32.43/mm/highmem.c linux-2.6.32.43/mm/highmem.c
64000 --- linux-2.6.32.43/mm/highmem.c 2011-03-27 14:31:47.000000000 -0400
64001 +++ linux-2.6.32.43/mm/highmem.c 2011-04-17 15:56:46.000000000 -0400
64002 @@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
64003 * So no dangers, even with speculative execution.
64004 */
64005 page = pte_page(pkmap_page_table[i]);
64006 + pax_open_kernel();
64007 pte_clear(&init_mm, (unsigned long)page_address(page),
64008 &pkmap_page_table[i]);
64009 -
64010 + pax_close_kernel();
64011 set_page_address(page, NULL);
64012 need_flush = 1;
64013 }
64014 @@ -177,9 +178,11 @@ start:
64015 }
64016 }
64017 vaddr = PKMAP_ADDR(last_pkmap_nr);
64018 +
64019 + pax_open_kernel();
64020 set_pte_at(&init_mm, vaddr,
64021 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
64022 -
64023 + pax_close_kernel();
64024 pkmap_count[last_pkmap_nr] = 1;
64025 set_page_address(page, (void *)vaddr);
64026
64027 diff -urNp linux-2.6.32.43/mm/hugetlb.c linux-2.6.32.43/mm/hugetlb.c
64028 --- linux-2.6.32.43/mm/hugetlb.c 2011-07-13 17:23:04.000000000 -0400
64029 +++ linux-2.6.32.43/mm/hugetlb.c 2011-07-13 17:23:19.000000000 -0400
64030 @@ -1933,6 +1933,26 @@ static int unmap_ref_private(struct mm_s
64031 return 1;
64032 }
64033
64034 +#ifdef CONFIG_PAX_SEGMEXEC
64035 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
64036 +{
64037 + struct mm_struct *mm = vma->vm_mm;
64038 + struct vm_area_struct *vma_m;
64039 + unsigned long address_m;
64040 + pte_t *ptep_m;
64041 +
64042 + vma_m = pax_find_mirror_vma(vma);
64043 + if (!vma_m)
64044 + return;
64045 +
64046 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
64047 + address_m = address + SEGMEXEC_TASK_SIZE;
64048 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
64049 + get_page(page_m);
64050 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
64051 +}
64052 +#endif
64053 +
64054 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
64055 unsigned long address, pte_t *ptep, pte_t pte,
64056 struct page *pagecache_page)
64057 @@ -2004,6 +2024,11 @@ retry_avoidcopy:
64058 huge_ptep_clear_flush(vma, address, ptep);
64059 set_huge_pte_at(mm, address, ptep,
64060 make_huge_pte(vma, new_page, 1));
64061 +
64062 +#ifdef CONFIG_PAX_SEGMEXEC
64063 + pax_mirror_huge_pte(vma, address, new_page);
64064 +#endif
64065 +
64066 /* Make the old page be freed below */
64067 new_page = old_page;
64068 }
64069 @@ -2135,6 +2160,10 @@ retry:
64070 && (vma->vm_flags & VM_SHARED)));
64071 set_huge_pte_at(mm, address, ptep, new_pte);
64072
64073 +#ifdef CONFIG_PAX_SEGMEXEC
64074 + pax_mirror_huge_pte(vma, address, page);
64075 +#endif
64076 +
64077 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
64078 /* Optimization, do the COW without a second fault */
64079 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
64080 @@ -2163,6 +2192,28 @@ int hugetlb_fault(struct mm_struct *mm,
64081 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
64082 struct hstate *h = hstate_vma(vma);
64083
64084 +#ifdef CONFIG_PAX_SEGMEXEC
64085 + struct vm_area_struct *vma_m;
64086 +
64087 + vma_m = pax_find_mirror_vma(vma);
64088 + if (vma_m) {
64089 + unsigned long address_m;
64090 +
64091 + if (vma->vm_start > vma_m->vm_start) {
64092 + address_m = address;
64093 + address -= SEGMEXEC_TASK_SIZE;
64094 + vma = vma_m;
64095 + h = hstate_vma(vma);
64096 + } else
64097 + address_m = address + SEGMEXEC_TASK_SIZE;
64098 +
64099 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
64100 + return VM_FAULT_OOM;
64101 + address_m &= HPAGE_MASK;
64102 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
64103 + }
64104 +#endif
64105 +
64106 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
64107 if (!ptep)
64108 return VM_FAULT_OOM;
64109 diff -urNp linux-2.6.32.43/mm/internal.h linux-2.6.32.43/mm/internal.h
64110 --- linux-2.6.32.43/mm/internal.h 2011-03-27 14:31:47.000000000 -0400
64111 +++ linux-2.6.32.43/mm/internal.h 2011-07-09 09:13:08.000000000 -0400
64112 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
64113 * in mm/page_alloc.c
64114 */
64115 extern void __free_pages_bootmem(struct page *page, unsigned int order);
64116 +extern void free_compound_page(struct page *page);
64117 extern void prep_compound_page(struct page *page, unsigned long order);
64118
64119
64120 diff -urNp linux-2.6.32.43/mm/Kconfig linux-2.6.32.43/mm/Kconfig
64121 --- linux-2.6.32.43/mm/Kconfig 2011-03-27 14:31:47.000000000 -0400
64122 +++ linux-2.6.32.43/mm/Kconfig 2011-04-17 15:56:46.000000000 -0400
64123 @@ -228,7 +228,7 @@ config KSM
64124 config DEFAULT_MMAP_MIN_ADDR
64125 int "Low address space to protect from user allocation"
64126 depends on MMU
64127 - default 4096
64128 + default 65536
64129 help
64130 This is the portion of low virtual memory which should be protected
64131 from userspace allocation. Keeping a user from writing to low pages
64132 diff -urNp linux-2.6.32.43/mm/kmemleak.c linux-2.6.32.43/mm/kmemleak.c
64133 --- linux-2.6.32.43/mm/kmemleak.c 2011-06-25 12:55:35.000000000 -0400
64134 +++ linux-2.6.32.43/mm/kmemleak.c 2011-06-25 12:56:37.000000000 -0400
64135 @@ -358,7 +358,7 @@ static void print_unreferenced(struct se
64136
64137 for (i = 0; i < object->trace_len; i++) {
64138 void *ptr = (void *)object->trace[i];
64139 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
64140 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
64141 }
64142 }
64143
64144 diff -urNp linux-2.6.32.43/mm/maccess.c linux-2.6.32.43/mm/maccess.c
64145 --- linux-2.6.32.43/mm/maccess.c 2011-03-27 14:31:47.000000000 -0400
64146 +++ linux-2.6.32.43/mm/maccess.c 2011-04-17 15:56:46.000000000 -0400
64147 @@ -14,7 +14,7 @@
64148 * Safely read from address @src to the buffer at @dst. If a kernel fault
64149 * happens, handle that and return -EFAULT.
64150 */
64151 -long probe_kernel_read(void *dst, void *src, size_t size)
64152 +long probe_kernel_read(void *dst, const void *src, size_t size)
64153 {
64154 long ret;
64155 mm_segment_t old_fs = get_fs();
64156 @@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
64157 * Safely write to address @dst from the buffer at @src. If a kernel fault
64158 * happens, handle that and return -EFAULT.
64159 */
64160 -long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
64161 +long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
64162 {
64163 long ret;
64164 mm_segment_t old_fs = get_fs();
64165 diff -urNp linux-2.6.32.43/mm/madvise.c linux-2.6.32.43/mm/madvise.c
64166 --- linux-2.6.32.43/mm/madvise.c 2011-03-27 14:31:47.000000000 -0400
64167 +++ linux-2.6.32.43/mm/madvise.c 2011-04-17 15:56:46.000000000 -0400
64168 @@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_a
64169 pgoff_t pgoff;
64170 unsigned long new_flags = vma->vm_flags;
64171
64172 +#ifdef CONFIG_PAX_SEGMEXEC
64173 + struct vm_area_struct *vma_m;
64174 +#endif
64175 +
64176 switch (behavior) {
64177 case MADV_NORMAL:
64178 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
64179 @@ -103,6 +107,13 @@ success:
64180 /*
64181 * vm_flags is protected by the mmap_sem held in write mode.
64182 */
64183 +
64184 +#ifdef CONFIG_PAX_SEGMEXEC
64185 + vma_m = pax_find_mirror_vma(vma);
64186 + if (vma_m)
64187 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
64188 +#endif
64189 +
64190 vma->vm_flags = new_flags;
64191
64192 out:
64193 @@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_a
64194 struct vm_area_struct ** prev,
64195 unsigned long start, unsigned long end)
64196 {
64197 +
64198 +#ifdef CONFIG_PAX_SEGMEXEC
64199 + struct vm_area_struct *vma_m;
64200 +#endif
64201 +
64202 *prev = vma;
64203 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
64204 return -EINVAL;
64205 @@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_a
64206 zap_page_range(vma, start, end - start, &details);
64207 } else
64208 zap_page_range(vma, start, end - start, NULL);
64209 +
64210 +#ifdef CONFIG_PAX_SEGMEXEC
64211 + vma_m = pax_find_mirror_vma(vma);
64212 + if (vma_m) {
64213 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
64214 + struct zap_details details = {
64215 + .nonlinear_vma = vma_m,
64216 + .last_index = ULONG_MAX,
64217 + };
64218 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
64219 + } else
64220 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
64221 + }
64222 +#endif
64223 +
64224 return 0;
64225 }
64226
64227 @@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
64228 if (end < start)
64229 goto out;
64230
64231 +#ifdef CONFIG_PAX_SEGMEXEC
64232 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
64233 + if (end > SEGMEXEC_TASK_SIZE)
64234 + goto out;
64235 + } else
64236 +#endif
64237 +
64238 + if (end > TASK_SIZE)
64239 + goto out;
64240 +
64241 error = 0;
64242 if (end == start)
64243 goto out;
64244 diff -urNp linux-2.6.32.43/mm/memory.c linux-2.6.32.43/mm/memory.c
64245 --- linux-2.6.32.43/mm/memory.c 2011-07-13 17:23:04.000000000 -0400
64246 +++ linux-2.6.32.43/mm/memory.c 2011-07-13 17:23:23.000000000 -0400
64247 @@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
64248 return;
64249
64250 pmd = pmd_offset(pud, start);
64251 +
64252 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
64253 pud_clear(pud);
64254 pmd_free_tlb(tlb, pmd, start);
64255 +#endif
64256 +
64257 }
64258
64259 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
64260 @@ -219,9 +223,12 @@ static inline void free_pud_range(struct
64261 if (end - 1 > ceiling - 1)
64262 return;
64263
64264 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
64265 pud = pud_offset(pgd, start);
64266 pgd_clear(pgd);
64267 pud_free_tlb(tlb, pud, start);
64268 +#endif
64269 +
64270 }
64271
64272 /*
64273 @@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct
64274 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
64275 i = 0;
64276
64277 - do {
64278 + while (nr_pages) {
64279 struct vm_area_struct *vma;
64280
64281 - vma = find_extend_vma(mm, start);
64282 + vma = find_vma(mm, start);
64283 if (!vma && in_gate_area(tsk, start)) {
64284 unsigned long pg = start & PAGE_MASK;
64285 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
64286 @@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct
64287 continue;
64288 }
64289
64290 - if (!vma ||
64291 + if (!vma || start < vma->vm_start ||
64292 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
64293 !(vm_flags & vma->vm_flags))
64294 return i ? : -EFAULT;
64295 @@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct
64296 start += PAGE_SIZE;
64297 nr_pages--;
64298 } while (nr_pages && start < vma->vm_end);
64299 - } while (nr_pages);
64300 + }
64301 return i;
64302 }
64303
64304 @@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_st
64305 page_add_file_rmap(page);
64306 set_pte_at(mm, addr, pte, mk_pte(page, prot));
64307
64308 +#ifdef CONFIG_PAX_SEGMEXEC
64309 + pax_mirror_file_pte(vma, addr, page, ptl);
64310 +#endif
64311 +
64312 retval = 0;
64313 pte_unmap_unlock(pte, ptl);
64314 return retval;
64315 @@ -1560,10 +1571,22 @@ out:
64316 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
64317 struct page *page)
64318 {
64319 +
64320 +#ifdef CONFIG_PAX_SEGMEXEC
64321 + struct vm_area_struct *vma_m;
64322 +#endif
64323 +
64324 if (addr < vma->vm_start || addr >= vma->vm_end)
64325 return -EFAULT;
64326 if (!page_count(page))
64327 return -EINVAL;
64328 +
64329 +#ifdef CONFIG_PAX_SEGMEXEC
64330 + vma_m = pax_find_mirror_vma(vma);
64331 + if (vma_m)
64332 + vma_m->vm_flags |= VM_INSERTPAGE;
64333 +#endif
64334 +
64335 vma->vm_flags |= VM_INSERTPAGE;
64336 return insert_page(vma, addr, page, vma->vm_page_prot);
64337 }
64338 @@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struc
64339 unsigned long pfn)
64340 {
64341 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
64342 + BUG_ON(vma->vm_mirror);
64343
64344 if (addr < vma->vm_start || addr >= vma->vm_end)
64345 return -EFAULT;
64346 @@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct
64347 copy_user_highpage(dst, src, va, vma);
64348 }
64349
64350 +#ifdef CONFIG_PAX_SEGMEXEC
64351 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
64352 +{
64353 + struct mm_struct *mm = vma->vm_mm;
64354 + spinlock_t *ptl;
64355 + pte_t *pte, entry;
64356 +
64357 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
64358 + entry = *pte;
64359 + if (!pte_present(entry)) {
64360 + if (!pte_none(entry)) {
64361 + BUG_ON(pte_file(entry));
64362 + free_swap_and_cache(pte_to_swp_entry(entry));
64363 + pte_clear_not_present_full(mm, address, pte, 0);
64364 + }
64365 + } else {
64366 + struct page *page;
64367 +
64368 + flush_cache_page(vma, address, pte_pfn(entry));
64369 + entry = ptep_clear_flush(vma, address, pte);
64370 + BUG_ON(pte_dirty(entry));
64371 + page = vm_normal_page(vma, address, entry);
64372 + if (page) {
64373 + update_hiwater_rss(mm);
64374 + if (PageAnon(page))
64375 + dec_mm_counter(mm, anon_rss);
64376 + else
64377 + dec_mm_counter(mm, file_rss);
64378 + page_remove_rmap(page);
64379 + page_cache_release(page);
64380 + }
64381 + }
64382 + pte_unmap_unlock(pte, ptl);
64383 +}
64384 +
64385 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
64386 + *
64387 + * the ptl of the lower mapped page is held on entry and is not released on exit
64388 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
64389 + */
64390 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
64391 +{
64392 + struct mm_struct *mm = vma->vm_mm;
64393 + unsigned long address_m;
64394 + spinlock_t *ptl_m;
64395 + struct vm_area_struct *vma_m;
64396 + pmd_t *pmd_m;
64397 + pte_t *pte_m, entry_m;
64398 +
64399 + BUG_ON(!page_m || !PageAnon(page_m));
64400 +
64401 + vma_m = pax_find_mirror_vma(vma);
64402 + if (!vma_m)
64403 + return;
64404 +
64405 + BUG_ON(!PageLocked(page_m));
64406 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
64407 + address_m = address + SEGMEXEC_TASK_SIZE;
64408 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
64409 + pte_m = pte_offset_map_nested(pmd_m, address_m);
64410 + ptl_m = pte_lockptr(mm, pmd_m);
64411 + if (ptl != ptl_m) {
64412 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
64413 + if (!pte_none(*pte_m))
64414 + goto out;
64415 + }
64416 +
64417 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
64418 + page_cache_get(page_m);
64419 + page_add_anon_rmap(page_m, vma_m, address_m);
64420 + inc_mm_counter(mm, anon_rss);
64421 + set_pte_at(mm, address_m, pte_m, entry_m);
64422 + update_mmu_cache(vma_m, address_m, entry_m);
64423 +out:
64424 + if (ptl != ptl_m)
64425 + spin_unlock(ptl_m);
64426 + pte_unmap_nested(pte_m);
64427 + unlock_page(page_m);
64428 +}
64429 +
64430 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
64431 +{
64432 + struct mm_struct *mm = vma->vm_mm;
64433 + unsigned long address_m;
64434 + spinlock_t *ptl_m;
64435 + struct vm_area_struct *vma_m;
64436 + pmd_t *pmd_m;
64437 + pte_t *pte_m, entry_m;
64438 +
64439 + BUG_ON(!page_m || PageAnon(page_m));
64440 +
64441 + vma_m = pax_find_mirror_vma(vma);
64442 + if (!vma_m)
64443 + return;
64444 +
64445 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
64446 + address_m = address + SEGMEXEC_TASK_SIZE;
64447 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
64448 + pte_m = pte_offset_map_nested(pmd_m, address_m);
64449 + ptl_m = pte_lockptr(mm, pmd_m);
64450 + if (ptl != ptl_m) {
64451 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
64452 + if (!pte_none(*pte_m))
64453 + goto out;
64454 + }
64455 +
64456 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
64457 + page_cache_get(page_m);
64458 + page_add_file_rmap(page_m);
64459 + inc_mm_counter(mm, file_rss);
64460 + set_pte_at(mm, address_m, pte_m, entry_m);
64461 + update_mmu_cache(vma_m, address_m, entry_m);
64462 +out:
64463 + if (ptl != ptl_m)
64464 + spin_unlock(ptl_m);
64465 + pte_unmap_nested(pte_m);
64466 +}
64467 +
64468 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
64469 +{
64470 + struct mm_struct *mm = vma->vm_mm;
64471 + unsigned long address_m;
64472 + spinlock_t *ptl_m;
64473 + struct vm_area_struct *vma_m;
64474 + pmd_t *pmd_m;
64475 + pte_t *pte_m, entry_m;
64476 +
64477 + vma_m = pax_find_mirror_vma(vma);
64478 + if (!vma_m)
64479 + return;
64480 +
64481 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
64482 + address_m = address + SEGMEXEC_TASK_SIZE;
64483 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
64484 + pte_m = pte_offset_map_nested(pmd_m, address_m);
64485 + ptl_m = pte_lockptr(mm, pmd_m);
64486 + if (ptl != ptl_m) {
64487 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
64488 + if (!pte_none(*pte_m))
64489 + goto out;
64490 + }
64491 +
64492 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
64493 + set_pte_at(mm, address_m, pte_m, entry_m);
64494 +out:
64495 + if (ptl != ptl_m)
64496 + spin_unlock(ptl_m);
64497 + pte_unmap_nested(pte_m);
64498 +}
64499 +
64500 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
64501 +{
64502 + struct page *page_m;
64503 + pte_t entry;
64504 +
64505 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
64506 + goto out;
64507 +
64508 + entry = *pte;
64509 + page_m = vm_normal_page(vma, address, entry);
64510 + if (!page_m)
64511 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
64512 + else if (PageAnon(page_m)) {
64513 + if (pax_find_mirror_vma(vma)) {
64514 + pte_unmap_unlock(pte, ptl);
64515 + lock_page(page_m);
64516 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
64517 + if (pte_same(entry, *pte))
64518 + pax_mirror_anon_pte(vma, address, page_m, ptl);
64519 + else
64520 + unlock_page(page_m);
64521 + }
64522 + } else
64523 + pax_mirror_file_pte(vma, address, page_m, ptl);
64524 +
64525 +out:
64526 + pte_unmap_unlock(pte, ptl);
64527 +}
64528 +#endif
64529 +
64530 /*
64531 * This routine handles present pages, when users try to write
64532 * to a shared page. It is done by copying the page to a new address
64533 @@ -2156,6 +2360,12 @@ gotten:
64534 */
64535 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
64536 if (likely(pte_same(*page_table, orig_pte))) {
64537 +
64538 +#ifdef CONFIG_PAX_SEGMEXEC
64539 + if (pax_find_mirror_vma(vma))
64540 + BUG_ON(!trylock_page(new_page));
64541 +#endif
64542 +
64543 if (old_page) {
64544 if (!PageAnon(old_page)) {
64545 dec_mm_counter(mm, file_rss);
64546 @@ -2207,6 +2417,10 @@ gotten:
64547 page_remove_rmap(old_page);
64548 }
64549
64550 +#ifdef CONFIG_PAX_SEGMEXEC
64551 + pax_mirror_anon_pte(vma, address, new_page, ptl);
64552 +#endif
64553 +
64554 /* Free the old page.. */
64555 new_page = old_page;
64556 ret |= VM_FAULT_WRITE;
64557 @@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct
64558 swap_free(entry);
64559 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
64560 try_to_free_swap(page);
64561 +
64562 +#ifdef CONFIG_PAX_SEGMEXEC
64563 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
64564 +#endif
64565 +
64566 unlock_page(page);
64567
64568 if (flags & FAULT_FLAG_WRITE) {
64569 @@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct
64570
64571 /* No need to invalidate - it was non-present before */
64572 update_mmu_cache(vma, address, pte);
64573 +
64574 +#ifdef CONFIG_PAX_SEGMEXEC
64575 + pax_mirror_anon_pte(vma, address, page, ptl);
64576 +#endif
64577 +
64578 unlock:
64579 pte_unmap_unlock(page_table, ptl);
64580 out:
64581 @@ -2632,40 +2856,6 @@ out_release:
64582 }
64583
64584 /*
64585 - * This is like a special single-page "expand_{down|up}wards()",
64586 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
64587 - * doesn't hit another vma.
64588 - */
64589 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
64590 -{
64591 - address &= PAGE_MASK;
64592 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
64593 - struct vm_area_struct *prev = vma->vm_prev;
64594 -
64595 - /*
64596 - * Is there a mapping abutting this one below?
64597 - *
64598 - * That's only ok if it's the same stack mapping
64599 - * that has gotten split..
64600 - */
64601 - if (prev && prev->vm_end == address)
64602 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
64603 -
64604 - expand_stack(vma, address - PAGE_SIZE);
64605 - }
64606 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
64607 - struct vm_area_struct *next = vma->vm_next;
64608 -
64609 - /* As VM_GROWSDOWN but s/below/above/ */
64610 - if (next && next->vm_start == address + PAGE_SIZE)
64611 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
64612 -
64613 - expand_upwards(vma, address + PAGE_SIZE);
64614 - }
64615 - return 0;
64616 -}
64617 -
64618 -/*
64619 * We enter with non-exclusive mmap_sem (to exclude vma changes,
64620 * but allow concurrent faults), and pte mapped but not yet locked.
64621 * We return with mmap_sem still held, but pte unmapped and unlocked.
64622 @@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_s
64623 unsigned long address, pte_t *page_table, pmd_t *pmd,
64624 unsigned int flags)
64625 {
64626 - struct page *page;
64627 + struct page *page = NULL;
64628 spinlock_t *ptl;
64629 pte_t entry;
64630
64631 - pte_unmap(page_table);
64632 -
64633 - /* Check if we need to add a guard page to the stack */
64634 - if (check_stack_guard_page(vma, address) < 0)
64635 - return VM_FAULT_SIGBUS;
64636 -
64637 - /* Use the zero-page for reads */
64638 if (!(flags & FAULT_FLAG_WRITE)) {
64639 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
64640 vma->vm_page_prot));
64641 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
64642 + ptl = pte_lockptr(mm, pmd);
64643 + spin_lock(ptl);
64644 if (!pte_none(*page_table))
64645 goto unlock;
64646 goto setpte;
64647 }
64648
64649 /* Allocate our own private page. */
64650 + pte_unmap(page_table);
64651 +
64652 if (unlikely(anon_vma_prepare(vma)))
64653 goto oom;
64654 page = alloc_zeroed_user_highpage_movable(vma, address);
64655 @@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_s
64656 if (!pte_none(*page_table))
64657 goto release;
64658
64659 +#ifdef CONFIG_PAX_SEGMEXEC
64660 + if (pax_find_mirror_vma(vma))
64661 + BUG_ON(!trylock_page(page));
64662 +#endif
64663 +
64664 inc_mm_counter(mm, anon_rss);
64665 page_add_new_anon_rmap(page, vma, address);
64666 setpte:
64667 @@ -2720,6 +2911,12 @@ setpte:
64668
64669 /* No need to invalidate - it was non-present before */
64670 update_mmu_cache(vma, address, entry);
64671 +
64672 +#ifdef CONFIG_PAX_SEGMEXEC
64673 + if (page)
64674 + pax_mirror_anon_pte(vma, address, page, ptl);
64675 +#endif
64676 +
64677 unlock:
64678 pte_unmap_unlock(page_table, ptl);
64679 return 0;
64680 @@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *
64681 */
64682 /* Only go through if we didn't race with anybody else... */
64683 if (likely(pte_same(*page_table, orig_pte))) {
64684 +
64685 +#ifdef CONFIG_PAX_SEGMEXEC
64686 + if (anon && pax_find_mirror_vma(vma))
64687 + BUG_ON(!trylock_page(page));
64688 +#endif
64689 +
64690 flush_icache_page(vma, page);
64691 entry = mk_pte(page, vma->vm_page_prot);
64692 if (flags & FAULT_FLAG_WRITE)
64693 @@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *
64694
64695 /* no need to invalidate: a not-present page won't be cached */
64696 update_mmu_cache(vma, address, entry);
64697 +
64698 +#ifdef CONFIG_PAX_SEGMEXEC
64699 + if (anon)
64700 + pax_mirror_anon_pte(vma, address, page, ptl);
64701 + else
64702 + pax_mirror_file_pte(vma, address, page, ptl);
64703 +#endif
64704 +
64705 } else {
64706 if (charged)
64707 mem_cgroup_uncharge_page(page);
64708 @@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struc
64709 if (flags & FAULT_FLAG_WRITE)
64710 flush_tlb_page(vma, address);
64711 }
64712 +
64713 +#ifdef CONFIG_PAX_SEGMEXEC
64714 + pax_mirror_pte(vma, address, pte, pmd, ptl);
64715 + return 0;
64716 +#endif
64717 +
64718 unlock:
64719 pte_unmap_unlock(pte, ptl);
64720 return 0;
64721 @@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm
64722 pmd_t *pmd;
64723 pte_t *pte;
64724
64725 +#ifdef CONFIG_PAX_SEGMEXEC
64726 + struct vm_area_struct *vma_m;
64727 +#endif
64728 +
64729 __set_current_state(TASK_RUNNING);
64730
64731 count_vm_event(PGFAULT);
64732 @@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm
64733 if (unlikely(is_vm_hugetlb_page(vma)))
64734 return hugetlb_fault(mm, vma, address, flags);
64735
64736 +#ifdef CONFIG_PAX_SEGMEXEC
64737 + vma_m = pax_find_mirror_vma(vma);
64738 + if (vma_m) {
64739 + unsigned long address_m;
64740 + pgd_t *pgd_m;
64741 + pud_t *pud_m;
64742 + pmd_t *pmd_m;
64743 +
64744 + if (vma->vm_start > vma_m->vm_start) {
64745 + address_m = address;
64746 + address -= SEGMEXEC_TASK_SIZE;
64747 + vma = vma_m;
64748 + } else
64749 + address_m = address + SEGMEXEC_TASK_SIZE;
64750 +
64751 + pgd_m = pgd_offset(mm, address_m);
64752 + pud_m = pud_alloc(mm, pgd_m, address_m);
64753 + if (!pud_m)
64754 + return VM_FAULT_OOM;
64755 + pmd_m = pmd_alloc(mm, pud_m, address_m);
64756 + if (!pmd_m)
64757 + return VM_FAULT_OOM;
64758 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
64759 + return VM_FAULT_OOM;
64760 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
64761 + }
64762 +#endif
64763 +
64764 pgd = pgd_offset(mm, address);
64765 pud = pud_alloc(mm, pgd, address);
64766 if (!pud)
64767 @@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
64768 gate_vma.vm_start = FIXADDR_USER_START;
64769 gate_vma.vm_end = FIXADDR_USER_END;
64770 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
64771 - gate_vma.vm_page_prot = __P101;
64772 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
64773 /*
64774 * Make sure the vDSO gets into every core dump.
64775 * Dumping its contents makes post-mortem fully interpretable later
64776 diff -urNp linux-2.6.32.43/mm/memory-failure.c linux-2.6.32.43/mm/memory-failure.c
64777 --- linux-2.6.32.43/mm/memory-failure.c 2011-03-27 14:31:47.000000000 -0400
64778 +++ linux-2.6.32.43/mm/memory-failure.c 2011-04-17 15:56:46.000000000 -0400
64779 @@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __r
64780
64781 int sysctl_memory_failure_recovery __read_mostly = 1;
64782
64783 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64784 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64785
64786 /*
64787 * Send all the processes who have the page mapped an ``action optional''
64788 @@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn,
64789 return 0;
64790 }
64791
64792 - atomic_long_add(1, &mce_bad_pages);
64793 + atomic_long_add_unchecked(1, &mce_bad_pages);
64794
64795 /*
64796 * We need/can do nothing about count=0 pages.
64797 diff -urNp linux-2.6.32.43/mm/mempolicy.c linux-2.6.32.43/mm/mempolicy.c
64798 --- linux-2.6.32.43/mm/mempolicy.c 2011-03-27 14:31:47.000000000 -0400
64799 +++ linux-2.6.32.43/mm/mempolicy.c 2011-04-17 15:56:46.000000000 -0400
64800 @@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_st
64801 struct vm_area_struct *next;
64802 int err;
64803
64804 +#ifdef CONFIG_PAX_SEGMEXEC
64805 + struct vm_area_struct *vma_m;
64806 +#endif
64807 +
64808 err = 0;
64809 for (; vma && vma->vm_start < end; vma = next) {
64810 next = vma->vm_next;
64811 @@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_st
64812 err = policy_vma(vma, new);
64813 if (err)
64814 break;
64815 +
64816 +#ifdef CONFIG_PAX_SEGMEXEC
64817 + vma_m = pax_find_mirror_vma(vma);
64818 + if (vma_m) {
64819 + err = policy_vma(vma_m, new);
64820 + if (err)
64821 + break;
64822 + }
64823 +#endif
64824 +
64825 }
64826 return err;
64827 }
64828 @@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start
64829
64830 if (end < start)
64831 return -EINVAL;
64832 +
64833 +#ifdef CONFIG_PAX_SEGMEXEC
64834 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
64835 + if (end > SEGMEXEC_TASK_SIZE)
64836 + return -EINVAL;
64837 + } else
64838 +#endif
64839 +
64840 + if (end > TASK_SIZE)
64841 + return -EINVAL;
64842 +
64843 if (end == start)
64844 return 0;
64845
64846 @@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
64847 if (!mm)
64848 return -EINVAL;
64849
64850 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64851 + if (mm != current->mm &&
64852 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
64853 + err = -EPERM;
64854 + goto out;
64855 + }
64856 +#endif
64857 +
64858 /*
64859 * Check if this process has the right to modify the specified
64860 * process. The right exists if the process has administrative
64861 @@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
64862 rcu_read_lock();
64863 tcred = __task_cred(task);
64864 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
64865 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
64866 - !capable(CAP_SYS_NICE)) {
64867 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
64868 rcu_read_unlock();
64869 err = -EPERM;
64870 goto out;
64871 @@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, vo
64872
64873 if (file) {
64874 seq_printf(m, " file=");
64875 - seq_path(m, &file->f_path, "\n\t= ");
64876 + seq_path(m, &file->f_path, "\n\t\\= ");
64877 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
64878 seq_printf(m, " heap");
64879 } else if (vma->vm_start <= mm->start_stack &&
64880 diff -urNp linux-2.6.32.43/mm/migrate.c linux-2.6.32.43/mm/migrate.c
64881 --- linux-2.6.32.43/mm/migrate.c 2011-07-13 17:23:04.000000000 -0400
64882 +++ linux-2.6.32.43/mm/migrate.c 2011-07-13 17:23:23.000000000 -0400
64883 @@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struc
64884 unsigned long chunk_start;
64885 int err;
64886
64887 + pax_track_stack();
64888 +
64889 task_nodes = cpuset_mems_allowed(task);
64890
64891 err = -ENOMEM;
64892 @@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
64893 if (!mm)
64894 return -EINVAL;
64895
64896 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64897 + if (mm != current->mm &&
64898 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
64899 + err = -EPERM;
64900 + goto out;
64901 + }
64902 +#endif
64903 +
64904 /*
64905 * Check if this process has the right to modify the specified
64906 * process. The right exists if the process has administrative
64907 @@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
64908 rcu_read_lock();
64909 tcred = __task_cred(task);
64910 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
64911 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
64912 - !capable(CAP_SYS_NICE)) {
64913 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
64914 rcu_read_unlock();
64915 err = -EPERM;
64916 goto out;
64917 diff -urNp linux-2.6.32.43/mm/mlock.c linux-2.6.32.43/mm/mlock.c
64918 --- linux-2.6.32.43/mm/mlock.c 2011-03-27 14:31:47.000000000 -0400
64919 +++ linux-2.6.32.43/mm/mlock.c 2011-04-17 15:56:46.000000000 -0400
64920 @@ -13,6 +13,7 @@
64921 #include <linux/pagemap.h>
64922 #include <linux/mempolicy.h>
64923 #include <linux/syscalls.h>
64924 +#include <linux/security.h>
64925 #include <linux/sched.h>
64926 #include <linux/module.h>
64927 #include <linux/rmap.h>
64928 @@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
64929 }
64930 }
64931
64932 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
64933 -{
64934 - return (vma->vm_flags & VM_GROWSDOWN) &&
64935 - (vma->vm_start == addr) &&
64936 - !vma_stack_continue(vma->vm_prev, addr);
64937 -}
64938 -
64939 /**
64940 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
64941 * @vma: target vma
64942 @@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(stru
64943 if (vma->vm_flags & VM_WRITE)
64944 gup_flags |= FOLL_WRITE;
64945
64946 - /* We don't try to access the guard page of a stack vma */
64947 - if (stack_guard_page(vma, start)) {
64948 - addr += PAGE_SIZE;
64949 - nr_pages--;
64950 - }
64951 -
64952 while (nr_pages > 0) {
64953 int i;
64954
64955 @@ -440,7 +428,7 @@ static int do_mlock(unsigned long start,
64956 {
64957 unsigned long nstart, end, tmp;
64958 struct vm_area_struct * vma, * prev;
64959 - int error;
64960 + int error = -EINVAL;
64961
64962 len = PAGE_ALIGN(len);
64963 end = start + len;
64964 @@ -448,6 +436,9 @@ static int do_mlock(unsigned long start,
64965 return -EINVAL;
64966 if (end == start)
64967 return 0;
64968 + if (end > TASK_SIZE)
64969 + return -EINVAL;
64970 +
64971 vma = find_vma_prev(current->mm, start, &prev);
64972 if (!vma || vma->vm_start > start)
64973 return -ENOMEM;
64974 @@ -458,6 +449,11 @@ static int do_mlock(unsigned long start,
64975 for (nstart = start ; ; ) {
64976 unsigned int newflags;
64977
64978 +#ifdef CONFIG_PAX_SEGMEXEC
64979 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
64980 + break;
64981 +#endif
64982 +
64983 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
64984
64985 newflags = vma->vm_flags | VM_LOCKED;
64986 @@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
64987 lock_limit >>= PAGE_SHIFT;
64988
64989 /* check against resource limits */
64990 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
64991 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
64992 error = do_mlock(start, len, 1);
64993 up_write(&current->mm->mmap_sem);
64994 @@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
64995 static int do_mlockall(int flags)
64996 {
64997 struct vm_area_struct * vma, * prev = NULL;
64998 - unsigned int def_flags = 0;
64999
65000 if (flags & MCL_FUTURE)
65001 - def_flags = VM_LOCKED;
65002 - current->mm->def_flags = def_flags;
65003 + current->mm->def_flags |= VM_LOCKED;
65004 + else
65005 + current->mm->def_flags &= ~VM_LOCKED;
65006 if (flags == MCL_FUTURE)
65007 goto out;
65008
65009 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
65010 - unsigned int newflags;
65011 + unsigned long newflags;
65012 +
65013 +#ifdef CONFIG_PAX_SEGMEXEC
65014 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
65015 + break;
65016 +#endif
65017
65018 + BUG_ON(vma->vm_end > TASK_SIZE);
65019 newflags = vma->vm_flags | VM_LOCKED;
65020 if (!(flags & MCL_CURRENT))
65021 newflags &= ~VM_LOCKED;
65022 @@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
65023 lock_limit >>= PAGE_SHIFT;
65024
65025 ret = -ENOMEM;
65026 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
65027 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
65028 capable(CAP_IPC_LOCK))
65029 ret = do_mlockall(flags);
65030 diff -urNp linux-2.6.32.43/mm/mmap.c linux-2.6.32.43/mm/mmap.c
65031 --- linux-2.6.32.43/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
65032 +++ linux-2.6.32.43/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
65033 @@ -45,6 +45,16 @@
65034 #define arch_rebalance_pgtables(addr, len) (addr)
65035 #endif
65036
65037 +static inline void verify_mm_writelocked(struct mm_struct *mm)
65038 +{
65039 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
65040 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
65041 + up_read(&mm->mmap_sem);
65042 + BUG();
65043 + }
65044 +#endif
65045 +}
65046 +
65047 static void unmap_region(struct mm_struct *mm,
65048 struct vm_area_struct *vma, struct vm_area_struct *prev,
65049 unsigned long start, unsigned long end);
65050 @@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
65051 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
65052 *
65053 */
65054 -pgprot_t protection_map[16] = {
65055 +pgprot_t protection_map[16] __read_only = {
65056 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
65057 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
65058 };
65059
65060 pgprot_t vm_get_page_prot(unsigned long vm_flags)
65061 {
65062 - return __pgprot(pgprot_val(protection_map[vm_flags &
65063 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
65064 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
65065 pgprot_val(arch_vm_get_page_prot(vm_flags)));
65066 +
65067 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
65068 + if (!nx_enabled &&
65069 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
65070 + (vm_flags & (VM_READ | VM_WRITE)))
65071 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
65072 +#endif
65073 +
65074 + return prot;
65075 }
65076 EXPORT_SYMBOL(vm_get_page_prot);
65077
65078 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
65079 int sysctl_overcommit_ratio = 50; /* default is 50% */
65080 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
65081 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
65082 struct percpu_counter vm_committed_as;
65083
65084 /*
65085 @@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
65086 struct vm_area_struct *next = vma->vm_next;
65087
65088 might_sleep();
65089 + BUG_ON(vma->vm_mirror);
65090 if (vma->vm_ops && vma->vm_ops->close)
65091 vma->vm_ops->close(vma);
65092 if (vma->vm_file) {
65093 @@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
65094 * not page aligned -Ram Gupta
65095 */
65096 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
65097 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
65098 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
65099 (mm->end_data - mm->start_data) > rlim)
65100 goto out;
65101 @@ -704,6 +726,12 @@ static int
65102 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
65103 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
65104 {
65105 +
65106 +#ifdef CONFIG_PAX_SEGMEXEC
65107 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
65108 + return 0;
65109 +#endif
65110 +
65111 if (is_mergeable_vma(vma, file, vm_flags) &&
65112 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
65113 if (vma->vm_pgoff == vm_pgoff)
65114 @@ -723,6 +751,12 @@ static int
65115 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
65116 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
65117 {
65118 +
65119 +#ifdef CONFIG_PAX_SEGMEXEC
65120 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
65121 + return 0;
65122 +#endif
65123 +
65124 if (is_mergeable_vma(vma, file, vm_flags) &&
65125 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
65126 pgoff_t vm_pglen;
65127 @@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
65128 struct vm_area_struct *vma_merge(struct mm_struct *mm,
65129 struct vm_area_struct *prev, unsigned long addr,
65130 unsigned long end, unsigned long vm_flags,
65131 - struct anon_vma *anon_vma, struct file *file,
65132 + struct anon_vma *anon_vma, struct file *file,
65133 pgoff_t pgoff, struct mempolicy *policy)
65134 {
65135 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
65136 struct vm_area_struct *area, *next;
65137
65138 +#ifdef CONFIG_PAX_SEGMEXEC
65139 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
65140 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
65141 +
65142 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
65143 +#endif
65144 +
65145 /*
65146 * We later require that vma->vm_flags == vm_flags,
65147 * so this tests vma->vm_flags & VM_SPECIAL, too.
65148 @@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
65149 if (next && next->vm_end == end) /* cases 6, 7, 8 */
65150 next = next->vm_next;
65151
65152 +#ifdef CONFIG_PAX_SEGMEXEC
65153 + if (prev)
65154 + prev_m = pax_find_mirror_vma(prev);
65155 + if (area)
65156 + area_m = pax_find_mirror_vma(area);
65157 + if (next)
65158 + next_m = pax_find_mirror_vma(next);
65159 +#endif
65160 +
65161 /*
65162 * Can it merge with the predecessor?
65163 */
65164 @@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
65165 /* cases 1, 6 */
65166 vma_adjust(prev, prev->vm_start,
65167 next->vm_end, prev->vm_pgoff, NULL);
65168 - } else /* cases 2, 5, 7 */
65169 +
65170 +#ifdef CONFIG_PAX_SEGMEXEC
65171 + if (prev_m)
65172 + vma_adjust(prev_m, prev_m->vm_start,
65173 + next_m->vm_end, prev_m->vm_pgoff, NULL);
65174 +#endif
65175 +
65176 + } else { /* cases 2, 5, 7 */
65177 vma_adjust(prev, prev->vm_start,
65178 end, prev->vm_pgoff, NULL);
65179 +
65180 +#ifdef CONFIG_PAX_SEGMEXEC
65181 + if (prev_m)
65182 + vma_adjust(prev_m, prev_m->vm_start,
65183 + end_m, prev_m->vm_pgoff, NULL);
65184 +#endif
65185 +
65186 + }
65187 return prev;
65188 }
65189
65190 @@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
65191 mpol_equal(policy, vma_policy(next)) &&
65192 can_vma_merge_before(next, vm_flags,
65193 anon_vma, file, pgoff+pglen)) {
65194 - if (prev && addr < prev->vm_end) /* case 4 */
65195 + if (prev && addr < prev->vm_end) { /* case 4 */
65196 vma_adjust(prev, prev->vm_start,
65197 addr, prev->vm_pgoff, NULL);
65198 - else /* cases 3, 8 */
65199 +
65200 +#ifdef CONFIG_PAX_SEGMEXEC
65201 + if (prev_m)
65202 + vma_adjust(prev_m, prev_m->vm_start,
65203 + addr_m, prev_m->vm_pgoff, NULL);
65204 +#endif
65205 +
65206 + } else { /* cases 3, 8 */
65207 vma_adjust(area, addr, next->vm_end,
65208 next->vm_pgoff - pglen, NULL);
65209 +
65210 +#ifdef CONFIG_PAX_SEGMEXEC
65211 + if (area_m)
65212 + vma_adjust(area_m, addr_m, next_m->vm_end,
65213 + next_m->vm_pgoff - pglen, NULL);
65214 +#endif
65215 +
65216 + }
65217 return area;
65218 }
65219
65220 @@ -898,14 +978,11 @@ none:
65221 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
65222 struct file *file, long pages)
65223 {
65224 - const unsigned long stack_flags
65225 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
65226 -
65227 if (file) {
65228 mm->shared_vm += pages;
65229 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
65230 mm->exec_vm += pages;
65231 - } else if (flags & stack_flags)
65232 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
65233 mm->stack_vm += pages;
65234 if (flags & (VM_RESERVED|VM_IO))
65235 mm->reserved_vm += pages;
65236 @@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
65237 * (the exception is when the underlying filesystem is noexec
65238 * mounted, in which case we dont add PROT_EXEC.)
65239 */
65240 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
65241 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
65242 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
65243 prot |= PROT_EXEC;
65244
65245 @@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
65246 /* Obtain the address to map to. we verify (or select) it and ensure
65247 * that it represents a valid section of the address space.
65248 */
65249 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
65250 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
65251 if (addr & ~PAGE_MASK)
65252 return addr;
65253
65254 @@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file
65255 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
65256 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
65257
65258 +#ifdef CONFIG_PAX_MPROTECT
65259 + if (mm->pax_flags & MF_PAX_MPROTECT) {
65260 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
65261 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
65262 + gr_log_rwxmmap(file);
65263 +
65264 +#ifdef CONFIG_PAX_EMUPLT
65265 + vm_flags &= ~VM_EXEC;
65266 +#else
65267 + return -EPERM;
65268 +#endif
65269 +
65270 + }
65271 +
65272 + if (!(vm_flags & VM_EXEC))
65273 + vm_flags &= ~VM_MAYEXEC;
65274 +#else
65275 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
65276 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
65277 +#endif
65278 + else
65279 + vm_flags &= ~VM_MAYWRITE;
65280 + }
65281 +#endif
65282 +
65283 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
65284 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
65285 + vm_flags &= ~VM_PAGEEXEC;
65286 +#endif
65287 +
65288 if (flags & MAP_LOCKED)
65289 if (!can_do_mlock())
65290 return -EPERM;
65291 @@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file
65292 locked += mm->locked_vm;
65293 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
65294 lock_limit >>= PAGE_SHIFT;
65295 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
65296 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
65297 return -EAGAIN;
65298 }
65299 @@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file
65300 if (error)
65301 return error;
65302
65303 + if (!gr_acl_handle_mmap(file, prot))
65304 + return -EACCES;
65305 +
65306 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
65307 }
65308 EXPORT_SYMBOL(do_mmap_pgoff);
65309 @@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
65310 */
65311 int vma_wants_writenotify(struct vm_area_struct *vma)
65312 {
65313 - unsigned int vm_flags = vma->vm_flags;
65314 + unsigned long vm_flags = vma->vm_flags;
65315
65316 /* If it was private or non-writable, the write bit is already clear */
65317 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
65318 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
65319 return 0;
65320
65321 /* The backer wishes to know when pages are first written to? */
65322 @@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *f
65323 unsigned long charged = 0;
65324 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
65325
65326 +#ifdef CONFIG_PAX_SEGMEXEC
65327 + struct vm_area_struct *vma_m = NULL;
65328 +#endif
65329 +
65330 + /*
65331 + * mm->mmap_sem is required to protect against another thread
65332 + * changing the mappings in case we sleep.
65333 + */
65334 + verify_mm_writelocked(mm);
65335 +
65336 /* Clear old maps */
65337 error = -ENOMEM;
65338 -munmap_back:
65339 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65340 if (vma && vma->vm_start < addr + len) {
65341 if (do_munmap(mm, addr, len))
65342 return -ENOMEM;
65343 - goto munmap_back;
65344 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65345 + BUG_ON(vma && vma->vm_start < addr + len);
65346 }
65347
65348 /* Check against address space limit. */
65349 @@ -1173,6 +1294,16 @@ munmap_back:
65350 goto unacct_error;
65351 }
65352
65353 +#ifdef CONFIG_PAX_SEGMEXEC
65354 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
65355 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65356 + if (!vma_m) {
65357 + error = -ENOMEM;
65358 + goto free_vma;
65359 + }
65360 + }
65361 +#endif
65362 +
65363 vma->vm_mm = mm;
65364 vma->vm_start = addr;
65365 vma->vm_end = addr + len;
65366 @@ -1195,6 +1326,19 @@ munmap_back:
65367 error = file->f_op->mmap(file, vma);
65368 if (error)
65369 goto unmap_and_free_vma;
65370 +
65371 +#ifdef CONFIG_PAX_SEGMEXEC
65372 + if (vma_m && (vm_flags & VM_EXECUTABLE))
65373 + added_exe_file_vma(mm);
65374 +#endif
65375 +
65376 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
65377 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
65378 + vma->vm_flags |= VM_PAGEEXEC;
65379 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
65380 + }
65381 +#endif
65382 +
65383 if (vm_flags & VM_EXECUTABLE)
65384 added_exe_file_vma(mm);
65385
65386 @@ -1218,6 +1362,11 @@ munmap_back:
65387 vma_link(mm, vma, prev, rb_link, rb_parent);
65388 file = vma->vm_file;
65389
65390 +#ifdef CONFIG_PAX_SEGMEXEC
65391 + if (vma_m)
65392 + pax_mirror_vma(vma_m, vma);
65393 +#endif
65394 +
65395 /* Once vma denies write, undo our temporary denial count */
65396 if (correct_wcount)
65397 atomic_inc(&inode->i_writecount);
65398 @@ -1226,6 +1375,7 @@ out:
65399
65400 mm->total_vm += len >> PAGE_SHIFT;
65401 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
65402 + track_exec_limit(mm, addr, addr + len, vm_flags);
65403 if (vm_flags & VM_LOCKED) {
65404 /*
65405 * makes pages present; downgrades, drops, reacquires mmap_sem
65406 @@ -1248,6 +1398,12 @@ unmap_and_free_vma:
65407 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
65408 charged = 0;
65409 free_vma:
65410 +
65411 +#ifdef CONFIG_PAX_SEGMEXEC
65412 + if (vma_m)
65413 + kmem_cache_free(vm_area_cachep, vma_m);
65414 +#endif
65415 +
65416 kmem_cache_free(vm_area_cachep, vma);
65417 unacct_error:
65418 if (charged)
65419 @@ -1255,6 +1411,44 @@ unacct_error:
65420 return error;
65421 }
65422
65423 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
65424 +{
65425 + if (!vma) {
65426 +#ifdef CONFIG_STACK_GROWSUP
65427 + if (addr > sysctl_heap_stack_gap)
65428 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
65429 + else
65430 + vma = find_vma(current->mm, 0);
65431 + if (vma && (vma->vm_flags & VM_GROWSUP))
65432 + return false;
65433 +#endif
65434 + return true;
65435 + }
65436 +
65437 + if (addr + len > vma->vm_start)
65438 + return false;
65439 +
65440 + if (vma->vm_flags & VM_GROWSDOWN)
65441 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
65442 +#ifdef CONFIG_STACK_GROWSUP
65443 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
65444 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
65445 +#endif
65446 +
65447 + return true;
65448 +}
65449 +
65450 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
65451 +{
65452 + if (vma->vm_start < len)
65453 + return -ENOMEM;
65454 + if (!(vma->vm_flags & VM_GROWSDOWN))
65455 + return vma->vm_start - len;
65456 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
65457 + return vma->vm_start - len - sysctl_heap_stack_gap;
65458 + return -ENOMEM;
65459 +}
65460 +
65461 /* Get an address range which is currently unmapped.
65462 * For shmat() with addr=0.
65463 *
65464 @@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp
65465 if (flags & MAP_FIXED)
65466 return addr;
65467
65468 +#ifdef CONFIG_PAX_RANDMMAP
65469 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
65470 +#endif
65471 +
65472 if (addr) {
65473 addr = PAGE_ALIGN(addr);
65474 - vma = find_vma(mm, addr);
65475 - if (TASK_SIZE - len >= addr &&
65476 - (!vma || addr + len <= vma->vm_start))
65477 - return addr;
65478 + if (TASK_SIZE - len >= addr) {
65479 + vma = find_vma(mm, addr);
65480 + if (check_heap_stack_gap(vma, addr, len))
65481 + return addr;
65482 + }
65483 }
65484 if (len > mm->cached_hole_size) {
65485 - start_addr = addr = mm->free_area_cache;
65486 + start_addr = addr = mm->free_area_cache;
65487 } else {
65488 - start_addr = addr = TASK_UNMAPPED_BASE;
65489 - mm->cached_hole_size = 0;
65490 + start_addr = addr = mm->mmap_base;
65491 + mm->cached_hole_size = 0;
65492 }
65493
65494 full_search:
65495 @@ -1303,34 +1502,40 @@ full_search:
65496 * Start a new search - just in case we missed
65497 * some holes.
65498 */
65499 - if (start_addr != TASK_UNMAPPED_BASE) {
65500 - addr = TASK_UNMAPPED_BASE;
65501 - start_addr = addr;
65502 + if (start_addr != mm->mmap_base) {
65503 + start_addr = addr = mm->mmap_base;
65504 mm->cached_hole_size = 0;
65505 goto full_search;
65506 }
65507 return -ENOMEM;
65508 }
65509 - if (!vma || addr + len <= vma->vm_start) {
65510 - /*
65511 - * Remember the place where we stopped the search:
65512 - */
65513 - mm->free_area_cache = addr + len;
65514 - return addr;
65515 - }
65516 + if (check_heap_stack_gap(vma, addr, len))
65517 + break;
65518 if (addr + mm->cached_hole_size < vma->vm_start)
65519 mm->cached_hole_size = vma->vm_start - addr;
65520 addr = vma->vm_end;
65521 }
65522 +
65523 + /*
65524 + * Remember the place where we stopped the search:
65525 + */
65526 + mm->free_area_cache = addr + len;
65527 + return addr;
65528 }
65529 #endif
65530
65531 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
65532 {
65533 +
65534 +#ifdef CONFIG_PAX_SEGMEXEC
65535 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
65536 + return;
65537 +#endif
65538 +
65539 /*
65540 * Is this a new hole at the lowest possible address?
65541 */
65542 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
65543 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
65544 mm->free_area_cache = addr;
65545 mm->cached_hole_size = ~0UL;
65546 }
65547 @@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct fi
65548 {
65549 struct vm_area_struct *vma;
65550 struct mm_struct *mm = current->mm;
65551 - unsigned long addr = addr0;
65552 + unsigned long base = mm->mmap_base, addr = addr0;
65553
65554 /* requested length too big for entire address space */
65555 if (len > TASK_SIZE)
65556 @@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct fi
65557 if (flags & MAP_FIXED)
65558 return addr;
65559
65560 +#ifdef CONFIG_PAX_RANDMMAP
65561 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
65562 +#endif
65563 +
65564 /* requesting a specific address */
65565 if (addr) {
65566 addr = PAGE_ALIGN(addr);
65567 - vma = find_vma(mm, addr);
65568 - if (TASK_SIZE - len >= addr &&
65569 - (!vma || addr + len <= vma->vm_start))
65570 - return addr;
65571 + if (TASK_SIZE - len >= addr) {
65572 + vma = find_vma(mm, addr);
65573 + if (check_heap_stack_gap(vma, addr, len))
65574 + return addr;
65575 + }
65576 }
65577
65578 /* check if free_area_cache is useful for us */
65579 @@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct fi
65580 /* make sure it can fit in the remaining address space */
65581 if (addr > len) {
65582 vma = find_vma(mm, addr-len);
65583 - if (!vma || addr <= vma->vm_start)
65584 + if (check_heap_stack_gap(vma, addr - len, len))
65585 /* remember the address as a hint for next time */
65586 return (mm->free_area_cache = addr-len);
65587 }
65588 @@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct fi
65589 * return with success:
65590 */
65591 vma = find_vma(mm, addr);
65592 - if (!vma || addr+len <= vma->vm_start)
65593 + if (check_heap_stack_gap(vma, addr, len))
65594 /* remember the address as a hint for next time */
65595 return (mm->free_area_cache = addr);
65596
65597 @@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct fi
65598 mm->cached_hole_size = vma->vm_start - addr;
65599
65600 /* try just below the current vma->vm_start */
65601 - addr = vma->vm_start-len;
65602 - } while (len < vma->vm_start);
65603 + addr = skip_heap_stack_gap(vma, len);
65604 + } while (!IS_ERR_VALUE(addr));
65605
65606 bottomup:
65607 /*
65608 @@ -1414,13 +1624,21 @@ bottomup:
65609 * can happen with large stack limits and large mmap()
65610 * allocations.
65611 */
65612 + mm->mmap_base = TASK_UNMAPPED_BASE;
65613 +
65614 +#ifdef CONFIG_PAX_RANDMMAP
65615 + if (mm->pax_flags & MF_PAX_RANDMMAP)
65616 + mm->mmap_base += mm->delta_mmap;
65617 +#endif
65618 +
65619 + mm->free_area_cache = mm->mmap_base;
65620 mm->cached_hole_size = ~0UL;
65621 - mm->free_area_cache = TASK_UNMAPPED_BASE;
65622 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
65623 /*
65624 * Restore the topdown base:
65625 */
65626 - mm->free_area_cache = mm->mmap_base;
65627 + mm->mmap_base = base;
65628 + mm->free_area_cache = base;
65629 mm->cached_hole_size = ~0UL;
65630
65631 return addr;
65632 @@ -1429,6 +1647,12 @@ bottomup:
65633
65634 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
65635 {
65636 +
65637 +#ifdef CONFIG_PAX_SEGMEXEC
65638 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
65639 + return;
65640 +#endif
65641 +
65642 /*
65643 * Is this a new hole at the highest possible address?
65644 */
65645 @@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_s
65646 mm->free_area_cache = addr;
65647
65648 /* dont allow allocations above current base */
65649 - if (mm->free_area_cache > mm->mmap_base)
65650 + if (mm->free_area_cache > mm->mmap_base) {
65651 mm->free_area_cache = mm->mmap_base;
65652 + mm->cached_hole_size = ~0UL;
65653 + }
65654 }
65655
65656 unsigned long
65657 @@ -1545,6 +1771,27 @@ out:
65658 return prev ? prev->vm_next : vma;
65659 }
65660
65661 +#ifdef CONFIG_PAX_SEGMEXEC
65662 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
65663 +{
65664 + struct vm_area_struct *vma_m;
65665 +
65666 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
65667 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
65668 + BUG_ON(vma->vm_mirror);
65669 + return NULL;
65670 + }
65671 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
65672 + vma_m = vma->vm_mirror;
65673 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
65674 + BUG_ON(vma->vm_file != vma_m->vm_file);
65675 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
65676 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
65677 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
65678 + return vma_m;
65679 +}
65680 +#endif
65681 +
65682 /*
65683 * Verify that the stack growth is acceptable and
65684 * update accounting. This is shared with both the
65685 @@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_a
65686 return -ENOMEM;
65687
65688 /* Stack limit test */
65689 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
65690 if (size > rlim[RLIMIT_STACK].rlim_cur)
65691 return -ENOMEM;
65692
65693 @@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_a
65694 unsigned long limit;
65695 locked = mm->locked_vm + grow;
65696 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
65697 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
65698 if (locked > limit && !capable(CAP_IPC_LOCK))
65699 return -ENOMEM;
65700 }
65701 @@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_a
65702 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
65703 * vma is the last one with address > vma->vm_end. Have to extend vma.
65704 */
65705 +#ifndef CONFIG_IA64
65706 +static
65707 +#endif
65708 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
65709 {
65710 int error;
65711 + bool locknext;
65712
65713 if (!(vma->vm_flags & VM_GROWSUP))
65714 return -EFAULT;
65715
65716 + /* Also guard against wrapping around to address 0. */
65717 + if (address < PAGE_ALIGN(address+1))
65718 + address = PAGE_ALIGN(address+1);
65719 + else
65720 + return -ENOMEM;
65721 +
65722 /*
65723 * We must make sure the anon_vma is allocated
65724 * so that the anon_vma locking is not a noop.
65725 */
65726 if (unlikely(anon_vma_prepare(vma)))
65727 return -ENOMEM;
65728 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
65729 + if (locknext && anon_vma_prepare(vma->vm_next))
65730 + return -ENOMEM;
65731 anon_vma_lock(vma);
65732 + if (locknext)
65733 + anon_vma_lock(vma->vm_next);
65734
65735 /*
65736 * vma->vm_start/vm_end cannot change under us because the caller
65737 * is required to hold the mmap_sem in read mode. We need the
65738 - * anon_vma lock to serialize against concurrent expand_stacks.
65739 - * Also guard against wrapping around to address 0.
65740 + * anon_vma locks to serialize against concurrent expand_stacks
65741 + * and expand_upwards.
65742 */
65743 - if (address < PAGE_ALIGN(address+4))
65744 - address = PAGE_ALIGN(address+4);
65745 - else {
65746 - anon_vma_unlock(vma);
65747 - return -ENOMEM;
65748 - }
65749 error = 0;
65750
65751 /* Somebody else might have raced and expanded it already */
65752 - if (address > vma->vm_end) {
65753 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
65754 + error = -ENOMEM;
65755 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
65756 unsigned long size, grow;
65757
65758 size = address - vma->vm_start;
65759 @@ -1640,6 +1900,8 @@ int expand_upwards(struct vm_area_struct
65760 if (!error)
65761 vma->vm_end = address;
65762 }
65763 + if (locknext)
65764 + anon_vma_unlock(vma->vm_next);
65765 anon_vma_unlock(vma);
65766 return error;
65767 }
65768 @@ -1652,6 +1914,8 @@ static int expand_downwards(struct vm_ar
65769 unsigned long address)
65770 {
65771 int error;
65772 + bool lockprev = false;
65773 + struct vm_area_struct *prev;
65774
65775 /*
65776 * We must make sure the anon_vma is allocated
65777 @@ -1665,6 +1929,15 @@ static int expand_downwards(struct vm_ar
65778 if (error)
65779 return error;
65780
65781 + prev = vma->vm_prev;
65782 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
65783 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
65784 +#endif
65785 + if (lockprev && anon_vma_prepare(prev))
65786 + return -ENOMEM;
65787 + if (lockprev)
65788 + anon_vma_lock(prev);
65789 +
65790 anon_vma_lock(vma);
65791
65792 /*
65793 @@ -1674,9 +1947,17 @@ static int expand_downwards(struct vm_ar
65794 */
65795
65796 /* Somebody else might have raced and expanded it already */
65797 - if (address < vma->vm_start) {
65798 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
65799 + error = -ENOMEM;
65800 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
65801 unsigned long size, grow;
65802
65803 +#ifdef CONFIG_PAX_SEGMEXEC
65804 + struct vm_area_struct *vma_m;
65805 +
65806 + vma_m = pax_find_mirror_vma(vma);
65807 +#endif
65808 +
65809 size = vma->vm_end - address;
65810 grow = (vma->vm_start - address) >> PAGE_SHIFT;
65811
65812 @@ -1684,9 +1965,20 @@ static int expand_downwards(struct vm_ar
65813 if (!error) {
65814 vma->vm_start = address;
65815 vma->vm_pgoff -= grow;
65816 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
65817 +
65818 +#ifdef CONFIG_PAX_SEGMEXEC
65819 + if (vma_m) {
65820 + vma_m->vm_start -= grow << PAGE_SHIFT;
65821 + vma_m->vm_pgoff -= grow;
65822 + }
65823 +#endif
65824 +
65825 }
65826 }
65827 anon_vma_unlock(vma);
65828 + if (lockprev)
65829 + anon_vma_unlock(prev);
65830 return error;
65831 }
65832
65833 @@ -1762,6 +2054,13 @@ static void remove_vma_list(struct mm_st
65834 do {
65835 long nrpages = vma_pages(vma);
65836
65837 +#ifdef CONFIG_PAX_SEGMEXEC
65838 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
65839 + vma = remove_vma(vma);
65840 + continue;
65841 + }
65842 +#endif
65843 +
65844 mm->total_vm -= nrpages;
65845 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
65846 vma = remove_vma(vma);
65847 @@ -1807,6 +2106,16 @@ detach_vmas_to_be_unmapped(struct mm_str
65848 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
65849 vma->vm_prev = NULL;
65850 do {
65851 +
65852 +#ifdef CONFIG_PAX_SEGMEXEC
65853 + if (vma->vm_mirror) {
65854 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
65855 + vma->vm_mirror->vm_mirror = NULL;
65856 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
65857 + vma->vm_mirror = NULL;
65858 + }
65859 +#endif
65860 +
65861 rb_erase(&vma->vm_rb, &mm->mm_rb);
65862 mm->map_count--;
65863 tail_vma = vma;
65864 @@ -1834,10 +2143,25 @@ int split_vma(struct mm_struct * mm, str
65865 struct mempolicy *pol;
65866 struct vm_area_struct *new;
65867
65868 +#ifdef CONFIG_PAX_SEGMEXEC
65869 + struct vm_area_struct *vma_m, *new_m = NULL;
65870 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
65871 +#endif
65872 +
65873 if (is_vm_hugetlb_page(vma) && (addr &
65874 ~(huge_page_mask(hstate_vma(vma)))))
65875 return -EINVAL;
65876
65877 +#ifdef CONFIG_PAX_SEGMEXEC
65878 + vma_m = pax_find_mirror_vma(vma);
65879 +
65880 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
65881 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
65882 + if (mm->map_count >= sysctl_max_map_count-1)
65883 + return -ENOMEM;
65884 + } else
65885 +#endif
65886 +
65887 if (mm->map_count >= sysctl_max_map_count)
65888 return -ENOMEM;
65889
65890 @@ -1845,6 +2169,16 @@ int split_vma(struct mm_struct * mm, str
65891 if (!new)
65892 return -ENOMEM;
65893
65894 +#ifdef CONFIG_PAX_SEGMEXEC
65895 + if (vma_m) {
65896 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65897 + if (!new_m) {
65898 + kmem_cache_free(vm_area_cachep, new);
65899 + return -ENOMEM;
65900 + }
65901 + }
65902 +#endif
65903 +
65904 /* most fields are the same, copy all, and then fixup */
65905 *new = *vma;
65906
65907 @@ -1855,8 +2189,29 @@ int split_vma(struct mm_struct * mm, str
65908 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
65909 }
65910
65911 +#ifdef CONFIG_PAX_SEGMEXEC
65912 + if (vma_m) {
65913 + *new_m = *vma_m;
65914 + new_m->vm_mirror = new;
65915 + new->vm_mirror = new_m;
65916 +
65917 + if (new_below)
65918 + new_m->vm_end = addr_m;
65919 + else {
65920 + new_m->vm_start = addr_m;
65921 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
65922 + }
65923 + }
65924 +#endif
65925 +
65926 pol = mpol_dup(vma_policy(vma));
65927 if (IS_ERR(pol)) {
65928 +
65929 +#ifdef CONFIG_PAX_SEGMEXEC
65930 + if (new_m)
65931 + kmem_cache_free(vm_area_cachep, new_m);
65932 +#endif
65933 +
65934 kmem_cache_free(vm_area_cachep, new);
65935 return PTR_ERR(pol);
65936 }
65937 @@ -1877,6 +2232,28 @@ int split_vma(struct mm_struct * mm, str
65938 else
65939 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
65940
65941 +#ifdef CONFIG_PAX_SEGMEXEC
65942 + if (vma_m) {
65943 + mpol_get(pol);
65944 + vma_set_policy(new_m, pol);
65945 +
65946 + if (new_m->vm_file) {
65947 + get_file(new_m->vm_file);
65948 + if (vma_m->vm_flags & VM_EXECUTABLE)
65949 + added_exe_file_vma(mm);
65950 + }
65951 +
65952 + if (new_m->vm_ops && new_m->vm_ops->open)
65953 + new_m->vm_ops->open(new_m);
65954 +
65955 + if (new_below)
65956 + vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
65957 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
65958 + else
65959 + vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
65960 + }
65961 +#endif
65962 +
65963 return 0;
65964 }
65965
65966 @@ -1885,11 +2262,30 @@ int split_vma(struct mm_struct * mm, str
65967 * work. This now handles partial unmappings.
65968 * Jeremy Fitzhardinge <jeremy@goop.org>
65969 */
65970 +#ifdef CONFIG_PAX_SEGMEXEC
65971 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65972 +{
65973 + int ret = __do_munmap(mm, start, len);
65974 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
65975 + return ret;
65976 +
65977 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
65978 +}
65979 +
65980 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65981 +#else
65982 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65983 +#endif
65984 {
65985 unsigned long end;
65986 struct vm_area_struct *vma, *prev, *last;
65987
65988 + /*
65989 + * mm->mmap_sem is required to protect against another thread
65990 + * changing the mappings in case we sleep.
65991 + */
65992 + verify_mm_writelocked(mm);
65993 +
65994 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
65995 return -EINVAL;
65996
65997 @@ -1953,6 +2349,8 @@ int do_munmap(struct mm_struct *mm, unsi
65998 /* Fix up all other VM information */
65999 remove_vma_list(mm, vma);
66000
66001 + track_exec_limit(mm, start, end, 0UL);
66002 +
66003 return 0;
66004 }
66005
66006 @@ -1965,22 +2363,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
66007
66008 profile_munmap(addr);
66009
66010 +#ifdef CONFIG_PAX_SEGMEXEC
66011 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
66012 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
66013 + return -EINVAL;
66014 +#endif
66015 +
66016 down_write(&mm->mmap_sem);
66017 ret = do_munmap(mm, addr, len);
66018 up_write(&mm->mmap_sem);
66019 return ret;
66020 }
66021
66022 -static inline void verify_mm_writelocked(struct mm_struct *mm)
66023 -{
66024 -#ifdef CONFIG_DEBUG_VM
66025 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
66026 - WARN_ON(1);
66027 - up_read(&mm->mmap_sem);
66028 - }
66029 -#endif
66030 -}
66031 -
66032 /*
66033 * this is really a simplified "do_mmap". it only handles
66034 * anonymous maps. eventually we may be able to do some
66035 @@ -1994,6 +2388,7 @@ unsigned long do_brk(unsigned long addr,
66036 struct rb_node ** rb_link, * rb_parent;
66037 pgoff_t pgoff = addr >> PAGE_SHIFT;
66038 int error;
66039 + unsigned long charged;
66040
66041 len = PAGE_ALIGN(len);
66042 if (!len)
66043 @@ -2005,16 +2400,30 @@ unsigned long do_brk(unsigned long addr,
66044
66045 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
66046
66047 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
66048 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
66049 + flags &= ~VM_EXEC;
66050 +
66051 +#ifdef CONFIG_PAX_MPROTECT
66052 + if (mm->pax_flags & MF_PAX_MPROTECT)
66053 + flags &= ~VM_MAYEXEC;
66054 +#endif
66055 +
66056 + }
66057 +#endif
66058 +
66059 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
66060 if (error & ~PAGE_MASK)
66061 return error;
66062
66063 + charged = len >> PAGE_SHIFT;
66064 +
66065 /*
66066 * mlock MCL_FUTURE?
66067 */
66068 if (mm->def_flags & VM_LOCKED) {
66069 unsigned long locked, lock_limit;
66070 - locked = len >> PAGE_SHIFT;
66071 + locked = charged;
66072 locked += mm->locked_vm;
66073 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
66074 lock_limit >>= PAGE_SHIFT;
66075 @@ -2031,22 +2440,22 @@ unsigned long do_brk(unsigned long addr,
66076 /*
66077 * Clear old maps. this also does some error checking for us
66078 */
66079 - munmap_back:
66080 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
66081 if (vma && vma->vm_start < addr + len) {
66082 if (do_munmap(mm, addr, len))
66083 return -ENOMEM;
66084 - goto munmap_back;
66085 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
66086 + BUG_ON(vma && vma->vm_start < addr + len);
66087 }
66088
66089 /* Check against address space limits *after* clearing old maps... */
66090 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
66091 + if (!may_expand_vm(mm, charged))
66092 return -ENOMEM;
66093
66094 if (mm->map_count > sysctl_max_map_count)
66095 return -ENOMEM;
66096
66097 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
66098 + if (security_vm_enough_memory(charged))
66099 return -ENOMEM;
66100
66101 /* Can we just expand an old private anonymous mapping? */
66102 @@ -2060,7 +2469,7 @@ unsigned long do_brk(unsigned long addr,
66103 */
66104 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
66105 if (!vma) {
66106 - vm_unacct_memory(len >> PAGE_SHIFT);
66107 + vm_unacct_memory(charged);
66108 return -ENOMEM;
66109 }
66110
66111 @@ -2072,11 +2481,12 @@ unsigned long do_brk(unsigned long addr,
66112 vma->vm_page_prot = vm_get_page_prot(flags);
66113 vma_link(mm, vma, prev, rb_link, rb_parent);
66114 out:
66115 - mm->total_vm += len >> PAGE_SHIFT;
66116 + mm->total_vm += charged;
66117 if (flags & VM_LOCKED) {
66118 if (!mlock_vma_pages_range(vma, addr, addr + len))
66119 - mm->locked_vm += (len >> PAGE_SHIFT);
66120 + mm->locked_vm += charged;
66121 }
66122 + track_exec_limit(mm, addr, addr + len, flags);
66123 return addr;
66124 }
66125
66126 @@ -2123,8 +2533,10 @@ void exit_mmap(struct mm_struct *mm)
66127 * Walk the list again, actually closing and freeing it,
66128 * with preemption enabled, without holding any MM locks.
66129 */
66130 - while (vma)
66131 + while (vma) {
66132 + vma->vm_mirror = NULL;
66133 vma = remove_vma(vma);
66134 + }
66135
66136 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
66137 }
66138 @@ -2138,6 +2550,10 @@ int insert_vm_struct(struct mm_struct *
66139 struct vm_area_struct * __vma, * prev;
66140 struct rb_node ** rb_link, * rb_parent;
66141
66142 +#ifdef CONFIG_PAX_SEGMEXEC
66143 + struct vm_area_struct *vma_m = NULL;
66144 +#endif
66145 +
66146 /*
66147 * The vm_pgoff of a purely anonymous vma should be irrelevant
66148 * until its first write fault, when page's anon_vma and index
66149 @@ -2160,7 +2576,22 @@ int insert_vm_struct(struct mm_struct *
66150 if ((vma->vm_flags & VM_ACCOUNT) &&
66151 security_vm_enough_memory_mm(mm, vma_pages(vma)))
66152 return -ENOMEM;
66153 +
66154 +#ifdef CONFIG_PAX_SEGMEXEC
66155 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
66156 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
66157 + if (!vma_m)
66158 + return -ENOMEM;
66159 + }
66160 +#endif
66161 +
66162 vma_link(mm, vma, prev, rb_link, rb_parent);
66163 +
66164 +#ifdef CONFIG_PAX_SEGMEXEC
66165 + if (vma_m)
66166 + pax_mirror_vma(vma_m, vma);
66167 +#endif
66168 +
66169 return 0;
66170 }
66171
66172 @@ -2178,6 +2609,8 @@ struct vm_area_struct *copy_vma(struct v
66173 struct rb_node **rb_link, *rb_parent;
66174 struct mempolicy *pol;
66175
66176 + BUG_ON(vma->vm_mirror);
66177 +
66178 /*
66179 * If anonymous vma has not yet been faulted, update new pgoff
66180 * to match new location, to increase its chance of merging.
66181 @@ -2221,6 +2654,35 @@ struct vm_area_struct *copy_vma(struct v
66182 return new_vma;
66183 }
66184
66185 +#ifdef CONFIG_PAX_SEGMEXEC
66186 +void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
66187 +{
66188 + struct vm_area_struct *prev_m;
66189 + struct rb_node **rb_link_m, *rb_parent_m;
66190 + struct mempolicy *pol_m;
66191 +
66192 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
66193 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
66194 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
66195 + *vma_m = *vma;
66196 + pol_m = vma_policy(vma_m);
66197 + mpol_get(pol_m);
66198 + vma_set_policy(vma_m, pol_m);
66199 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
66200 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
66201 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
66202 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
66203 + if (vma_m->vm_file)
66204 + get_file(vma_m->vm_file);
66205 + if (vma_m->vm_ops && vma_m->vm_ops->open)
66206 + vma_m->vm_ops->open(vma_m);
66207 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
66208 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
66209 + vma_m->vm_mirror = vma;
66210 + vma->vm_mirror = vma_m;
66211 +}
66212 +#endif
66213 +
66214 /*
66215 * Return true if the calling process may expand its vm space by the passed
66216 * number of pages
66217 @@ -2231,7 +2693,7 @@ int may_expand_vm(struct mm_struct *mm,
66218 unsigned long lim;
66219
66220 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
66221 -
66222 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
66223 if (cur + npages > lim)
66224 return 0;
66225 return 1;
66226 @@ -2301,6 +2763,22 @@ int install_special_mapping(struct mm_st
66227 vma->vm_start = addr;
66228 vma->vm_end = addr + len;
66229
66230 +#ifdef CONFIG_PAX_MPROTECT
66231 + if (mm->pax_flags & MF_PAX_MPROTECT) {
66232 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
66233 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
66234 + return -EPERM;
66235 + if (!(vm_flags & VM_EXEC))
66236 + vm_flags &= ~VM_MAYEXEC;
66237 +#else
66238 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
66239 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
66240 +#endif
66241 + else
66242 + vm_flags &= ~VM_MAYWRITE;
66243 + }
66244 +#endif
66245 +
66246 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
66247 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
66248
66249 diff -urNp linux-2.6.32.43/mm/mprotect.c linux-2.6.32.43/mm/mprotect.c
66250 --- linux-2.6.32.43/mm/mprotect.c 2011-03-27 14:31:47.000000000 -0400
66251 +++ linux-2.6.32.43/mm/mprotect.c 2011-04-17 15:56:46.000000000 -0400
66252 @@ -24,10 +24,16 @@
66253 #include <linux/mmu_notifier.h>
66254 #include <linux/migrate.h>
66255 #include <linux/perf_event.h>
66256 +
66257 +#ifdef CONFIG_PAX_MPROTECT
66258 +#include <linux/elf.h>
66259 +#endif
66260 +
66261 #include <asm/uaccess.h>
66262 #include <asm/pgtable.h>
66263 #include <asm/cacheflush.h>
66264 #include <asm/tlbflush.h>
66265 +#include <asm/mmu_context.h>
66266
66267 #ifndef pgprot_modify
66268 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
66269 @@ -132,6 +138,48 @@ static void change_protection(struct vm_
66270 flush_tlb_range(vma, start, end);
66271 }
66272
66273 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66274 +/* called while holding the mmap semaphor for writing except stack expansion */
66275 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
66276 +{
66277 + unsigned long oldlimit, newlimit = 0UL;
66278 +
66279 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
66280 + return;
66281 +
66282 + spin_lock(&mm->page_table_lock);
66283 + oldlimit = mm->context.user_cs_limit;
66284 + if ((prot & VM_EXEC) && oldlimit < end)
66285 + /* USER_CS limit moved up */
66286 + newlimit = end;
66287 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
66288 + /* USER_CS limit moved down */
66289 + newlimit = start;
66290 +
66291 + if (newlimit) {
66292 + mm->context.user_cs_limit = newlimit;
66293 +
66294 +#ifdef CONFIG_SMP
66295 + wmb();
66296 + cpus_clear(mm->context.cpu_user_cs_mask);
66297 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
66298 +#endif
66299 +
66300 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
66301 + }
66302 + spin_unlock(&mm->page_table_lock);
66303 + if (newlimit == end) {
66304 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
66305 +
66306 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
66307 + if (is_vm_hugetlb_page(vma))
66308 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
66309 + else
66310 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
66311 + }
66312 +}
66313 +#endif
66314 +
66315 int
66316 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
66317 unsigned long start, unsigned long end, unsigned long newflags)
66318 @@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
66319 int error;
66320 int dirty_accountable = 0;
66321
66322 +#ifdef CONFIG_PAX_SEGMEXEC
66323 + struct vm_area_struct *vma_m = NULL;
66324 + unsigned long start_m, end_m;
66325 +
66326 + start_m = start + SEGMEXEC_TASK_SIZE;
66327 + end_m = end + SEGMEXEC_TASK_SIZE;
66328 +#endif
66329 +
66330 if (newflags == oldflags) {
66331 *pprev = vma;
66332 return 0;
66333 }
66334
66335 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
66336 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
66337 +
66338 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
66339 + return -ENOMEM;
66340 +
66341 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
66342 + return -ENOMEM;
66343 + }
66344 +
66345 /*
66346 * If we make a private mapping writable we increase our commit;
66347 * but (without finer accounting) cannot reduce our commit if we
66348 @@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
66349 }
66350 }
66351
66352 +#ifdef CONFIG_PAX_SEGMEXEC
66353 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
66354 + if (start != vma->vm_start) {
66355 + error = split_vma(mm, vma, start, 1);
66356 + if (error)
66357 + goto fail;
66358 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
66359 + *pprev = (*pprev)->vm_next;
66360 + }
66361 +
66362 + if (end != vma->vm_end) {
66363 + error = split_vma(mm, vma, end, 0);
66364 + if (error)
66365 + goto fail;
66366 + }
66367 +
66368 + if (pax_find_mirror_vma(vma)) {
66369 + error = __do_munmap(mm, start_m, end_m - start_m);
66370 + if (error)
66371 + goto fail;
66372 + } else {
66373 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
66374 + if (!vma_m) {
66375 + error = -ENOMEM;
66376 + goto fail;
66377 + }
66378 + vma->vm_flags = newflags;
66379 + pax_mirror_vma(vma_m, vma);
66380 + }
66381 + }
66382 +#endif
66383 +
66384 /*
66385 * First try to merge with previous and/or next vma.
66386 */
66387 @@ -195,9 +293,21 @@ success:
66388 * vm_flags and vm_page_prot are protected by the mmap_sem
66389 * held in write mode.
66390 */
66391 +
66392 +#ifdef CONFIG_PAX_SEGMEXEC
66393 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
66394 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
66395 +#endif
66396 +
66397 vma->vm_flags = newflags;
66398 +
66399 +#ifdef CONFIG_PAX_MPROTECT
66400 + if (mm->binfmt && mm->binfmt->handle_mprotect)
66401 + mm->binfmt->handle_mprotect(vma, newflags);
66402 +#endif
66403 +
66404 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
66405 - vm_get_page_prot(newflags));
66406 + vm_get_page_prot(vma->vm_flags));
66407
66408 if (vma_wants_writenotify(vma)) {
66409 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
66410 @@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66411 end = start + len;
66412 if (end <= start)
66413 return -ENOMEM;
66414 +
66415 +#ifdef CONFIG_PAX_SEGMEXEC
66416 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
66417 + if (end > SEGMEXEC_TASK_SIZE)
66418 + return -EINVAL;
66419 + } else
66420 +#endif
66421 +
66422 + if (end > TASK_SIZE)
66423 + return -EINVAL;
66424 +
66425 if (!arch_validate_prot(prot))
66426 return -EINVAL;
66427
66428 @@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66429 /*
66430 * Does the application expect PROT_READ to imply PROT_EXEC:
66431 */
66432 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
66433 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
66434 prot |= PROT_EXEC;
66435
66436 vm_flags = calc_vm_prot_bits(prot);
66437 @@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66438 if (start > vma->vm_start)
66439 prev = vma;
66440
66441 +#ifdef CONFIG_PAX_MPROTECT
66442 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
66443 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
66444 +#endif
66445 +
66446 for (nstart = start ; ; ) {
66447 unsigned long newflags;
66448
66449 @@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66450
66451 /* newflags >> 4 shift VM_MAY% in place of VM_% */
66452 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
66453 + if (prot & (PROT_WRITE | PROT_EXEC))
66454 + gr_log_rwxmprotect(vma->vm_file);
66455 +
66456 + error = -EACCES;
66457 + goto out;
66458 + }
66459 +
66460 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
66461 error = -EACCES;
66462 goto out;
66463 }
66464 @@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66465 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
66466 if (error)
66467 goto out;
66468 +
66469 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
66470 +
66471 nstart = tmp;
66472
66473 if (nstart < prev->vm_end)
66474 diff -urNp linux-2.6.32.43/mm/mremap.c linux-2.6.32.43/mm/mremap.c
66475 --- linux-2.6.32.43/mm/mremap.c 2011-04-17 17:00:52.000000000 -0400
66476 +++ linux-2.6.32.43/mm/mremap.c 2011-04-17 17:03:58.000000000 -0400
66477 @@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_str
66478 continue;
66479 pte = ptep_clear_flush(vma, old_addr, old_pte);
66480 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
66481 +
66482 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66483 + if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
66484 + pte = pte_exprotect(pte);
66485 +#endif
66486 +
66487 set_pte_at(mm, new_addr, new_pte, pte);
66488 }
66489
66490 @@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_res
66491 if (is_vm_hugetlb_page(vma))
66492 goto Einval;
66493
66494 +#ifdef CONFIG_PAX_SEGMEXEC
66495 + if (pax_find_mirror_vma(vma))
66496 + goto Einval;
66497 +#endif
66498 +
66499 /* We can't remap across vm area boundaries */
66500 if (old_len > vma->vm_end - addr)
66501 goto Efault;
66502 @@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned
66503 unsigned long ret = -EINVAL;
66504 unsigned long charged = 0;
66505 unsigned long map_flags;
66506 + unsigned long pax_task_size = TASK_SIZE;
66507
66508 if (new_addr & ~PAGE_MASK)
66509 goto out;
66510
66511 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
66512 +#ifdef CONFIG_PAX_SEGMEXEC
66513 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
66514 + pax_task_size = SEGMEXEC_TASK_SIZE;
66515 +#endif
66516 +
66517 + pax_task_size -= PAGE_SIZE;
66518 +
66519 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
66520 goto out;
66521
66522 /* Check if the location we're moving into overlaps the
66523 * old location at all, and fail if it does.
66524 */
66525 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
66526 - goto out;
66527 -
66528 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
66529 + if (addr + old_len > new_addr && new_addr + new_len > addr)
66530 goto out;
66531
66532 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
66533 @@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long ad
66534 struct vm_area_struct *vma;
66535 unsigned long ret = -EINVAL;
66536 unsigned long charged = 0;
66537 + unsigned long pax_task_size = TASK_SIZE;
66538
66539 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
66540 goto out;
66541 @@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long ad
66542 if (!new_len)
66543 goto out;
66544
66545 +#ifdef CONFIG_PAX_SEGMEXEC
66546 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
66547 + pax_task_size = SEGMEXEC_TASK_SIZE;
66548 +#endif
66549 +
66550 + pax_task_size -= PAGE_SIZE;
66551 +
66552 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
66553 + old_len > pax_task_size || addr > pax_task_size-old_len)
66554 + goto out;
66555 +
66556 if (flags & MREMAP_FIXED) {
66557 if (flags & MREMAP_MAYMOVE)
66558 ret = mremap_to(addr, old_len, new_addr, new_len);
66559 @@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long ad
66560 addr + new_len);
66561 }
66562 ret = addr;
66563 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
66564 goto out;
66565 }
66566 }
66567 @@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long ad
66568 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
66569 if (ret)
66570 goto out;
66571 +
66572 + map_flags = vma->vm_flags;
66573 ret = move_vma(vma, addr, old_len, new_len, new_addr);
66574 + if (!(ret & ~PAGE_MASK)) {
66575 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
66576 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
66577 + }
66578 }
66579 out:
66580 if (ret & ~PAGE_MASK)
66581 diff -urNp linux-2.6.32.43/mm/nommu.c linux-2.6.32.43/mm/nommu.c
66582 --- linux-2.6.32.43/mm/nommu.c 2011-03-27 14:31:47.000000000 -0400
66583 +++ linux-2.6.32.43/mm/nommu.c 2011-04-17 15:56:46.000000000 -0400
66584 @@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
66585 int sysctl_overcommit_ratio = 50; /* default is 50% */
66586 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
66587 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
66588 -int heap_stack_gap = 0;
66589
66590 atomic_long_t mmap_pages_allocated;
66591
66592 @@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
66593 EXPORT_SYMBOL(find_vma);
66594
66595 /*
66596 - * find a VMA
66597 - * - we don't extend stack VMAs under NOMMU conditions
66598 - */
66599 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
66600 -{
66601 - return find_vma(mm, addr);
66602 -}
66603 -
66604 -/*
66605 * expand a stack to a given address
66606 * - not supported under NOMMU conditions
66607 */
66608 diff -urNp linux-2.6.32.43/mm/page_alloc.c linux-2.6.32.43/mm/page_alloc.c
66609 --- linux-2.6.32.43/mm/page_alloc.c 2011-06-25 12:55:35.000000000 -0400
66610 +++ linux-2.6.32.43/mm/page_alloc.c 2011-07-09 09:13:08.000000000 -0400
66611 @@ -289,7 +289,7 @@ out:
66612 * This usage means that zero-order pages may not be compound.
66613 */
66614
66615 -static void free_compound_page(struct page *page)
66616 +void free_compound_page(struct page *page)
66617 {
66618 __free_pages_ok(page, compound_order(page));
66619 }
66620 @@ -587,6 +587,10 @@ static void __free_pages_ok(struct page
66621 int bad = 0;
66622 int wasMlocked = __TestClearPageMlocked(page);
66623
66624 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
66625 + unsigned long index = 1UL << order;
66626 +#endif
66627 +
66628 kmemcheck_free_shadow(page, order);
66629
66630 for (i = 0 ; i < (1 << order) ; ++i)
66631 @@ -599,6 +603,12 @@ static void __free_pages_ok(struct page
66632 debug_check_no_obj_freed(page_address(page),
66633 PAGE_SIZE << order);
66634 }
66635 +
66636 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
66637 + for (; index; --index)
66638 + sanitize_highpage(page + index - 1);
66639 +#endif
66640 +
66641 arch_free_page(page, order);
66642 kernel_map_pages(page, 1 << order, 0);
66643
66644 @@ -702,8 +712,10 @@ static int prep_new_page(struct page *pa
66645 arch_alloc_page(page, order);
66646 kernel_map_pages(page, 1 << order, 1);
66647
66648 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
66649 if (gfp_flags & __GFP_ZERO)
66650 prep_zero_page(page, order, gfp_flags);
66651 +#endif
66652
66653 if (order && (gfp_flags & __GFP_COMP))
66654 prep_compound_page(page, order);
66655 @@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct pa
66656 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
66657 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
66658 }
66659 +
66660 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
66661 + sanitize_highpage(page);
66662 +#endif
66663 +
66664 arch_free_page(page, 0);
66665 kernel_map_pages(page, 1, 0);
66666
66667 @@ -2179,6 +2196,8 @@ void show_free_areas(void)
66668 int cpu;
66669 struct zone *zone;
66670
66671 + pax_track_stack();
66672 +
66673 for_each_populated_zone(zone) {
66674 show_node(zone);
66675 printk("%s per-cpu:\n", zone->name);
66676 @@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct p
66677 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
66678 }
66679 #else
66680 -static void inline setup_usemap(struct pglist_data *pgdat,
66681 +static inline void setup_usemap(struct pglist_data *pgdat,
66682 struct zone *zone, unsigned long zonesize) {}
66683 #endif /* CONFIG_SPARSEMEM */
66684
66685 diff -urNp linux-2.6.32.43/mm/percpu.c linux-2.6.32.43/mm/percpu.c
66686 --- linux-2.6.32.43/mm/percpu.c 2011-03-27 14:31:47.000000000 -0400
66687 +++ linux-2.6.32.43/mm/percpu.c 2011-04-17 15:56:46.000000000 -0400
66688 @@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu
66689 static unsigned int pcpu_last_unit_cpu __read_mostly;
66690
66691 /* the address of the first chunk which starts with the kernel static area */
66692 -void *pcpu_base_addr __read_mostly;
66693 +void *pcpu_base_addr __read_only;
66694 EXPORT_SYMBOL_GPL(pcpu_base_addr);
66695
66696 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
66697 diff -urNp linux-2.6.32.43/mm/rmap.c linux-2.6.32.43/mm/rmap.c
66698 --- linux-2.6.32.43/mm/rmap.c 2011-03-27 14:31:47.000000000 -0400
66699 +++ linux-2.6.32.43/mm/rmap.c 2011-04-17 15:56:46.000000000 -0400
66700 @@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_stru
66701 /* page_table_lock to protect against threads */
66702 spin_lock(&mm->page_table_lock);
66703 if (likely(!vma->anon_vma)) {
66704 +
66705 +#ifdef CONFIG_PAX_SEGMEXEC
66706 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
66707 +
66708 + if (vma_m) {
66709 + BUG_ON(vma_m->anon_vma);
66710 + vma_m->anon_vma = anon_vma;
66711 + list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
66712 + }
66713 +#endif
66714 +
66715 vma->anon_vma = anon_vma;
66716 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
66717 allocated = NULL;
66718 diff -urNp linux-2.6.32.43/mm/shmem.c linux-2.6.32.43/mm/shmem.c
66719 --- linux-2.6.32.43/mm/shmem.c 2011-03-27 14:31:47.000000000 -0400
66720 +++ linux-2.6.32.43/mm/shmem.c 2011-05-18 20:09:37.000000000 -0400
66721 @@ -31,7 +31,7 @@
66722 #include <linux/swap.h>
66723 #include <linux/ima.h>
66724
66725 -static struct vfsmount *shm_mnt;
66726 +struct vfsmount *shm_mnt;
66727
66728 #ifdef CONFIG_SHMEM
66729 /*
66730 @@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *
66731 goto unlock;
66732 }
66733 entry = shmem_swp_entry(info, index, NULL);
66734 + if (!entry)
66735 + goto unlock;
66736 if (entry->val) {
66737 /*
66738 * The more uptodate page coming down from a stacked
66739 @@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_ent
66740 struct vm_area_struct pvma;
66741 struct page *page;
66742
66743 + pax_track_stack();
66744 +
66745 spol = mpol_cond_copy(&mpol,
66746 mpol_shared_policy_lookup(&info->policy, idx));
66747
66748 @@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *d
66749
66750 info = SHMEM_I(inode);
66751 inode->i_size = len-1;
66752 - if (len <= (char *)inode - (char *)info) {
66753 + if (len <= (char *)inode - (char *)info && len <= 64) {
66754 /* do it inline */
66755 memcpy(info, symname, len);
66756 inode->i_op = &shmem_symlink_inline_operations;
66757 @@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block
66758 int err = -ENOMEM;
66759
66760 /* Round up to L1_CACHE_BYTES to resist false sharing */
66761 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
66762 - L1_CACHE_BYTES), GFP_KERNEL);
66763 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
66764 if (!sbinfo)
66765 return -ENOMEM;
66766
66767 diff -urNp linux-2.6.32.43/mm/slab.c linux-2.6.32.43/mm/slab.c
66768 --- linux-2.6.32.43/mm/slab.c 2011-03-27 14:31:47.000000000 -0400
66769 +++ linux-2.6.32.43/mm/slab.c 2011-05-04 17:56:20.000000000 -0400
66770 @@ -174,7 +174,7 @@
66771
66772 /* Legal flag mask for kmem_cache_create(). */
66773 #if DEBUG
66774 -# define CREATE_MASK (SLAB_RED_ZONE | \
66775 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
66776 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
66777 SLAB_CACHE_DMA | \
66778 SLAB_STORE_USER | \
66779 @@ -182,7 +182,7 @@
66780 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
66781 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
66782 #else
66783 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
66784 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
66785 SLAB_CACHE_DMA | \
66786 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
66787 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
66788 @@ -308,7 +308,7 @@ struct kmem_list3 {
66789 * Need this for bootstrapping a per node allocator.
66790 */
66791 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
66792 -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
66793 +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
66794 #define CACHE_CACHE 0
66795 #define SIZE_AC MAX_NUMNODES
66796 #define SIZE_L3 (2 * MAX_NUMNODES)
66797 @@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_
66798 if ((x)->max_freeable < i) \
66799 (x)->max_freeable = i; \
66800 } while (0)
66801 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
66802 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
66803 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
66804 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
66805 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
66806 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
66807 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
66808 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
66809 #else
66810 #define STATS_INC_ACTIVE(x) do { } while (0)
66811 #define STATS_DEC_ACTIVE(x) do { } while (0)
66812 @@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
66813 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
66814 */
66815 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
66816 - const struct slab *slab, void *obj)
66817 + const struct slab *slab, const void *obj)
66818 {
66819 u32 offset = (obj - slab->s_mem);
66820 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
66821 @@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
66822 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
66823 sizes[INDEX_AC].cs_size,
66824 ARCH_KMALLOC_MINALIGN,
66825 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66826 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66827 NULL);
66828
66829 if (INDEX_AC != INDEX_L3) {
66830 @@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
66831 kmem_cache_create(names[INDEX_L3].name,
66832 sizes[INDEX_L3].cs_size,
66833 ARCH_KMALLOC_MINALIGN,
66834 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66835 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66836 NULL);
66837 }
66838
66839 @@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
66840 sizes->cs_cachep = kmem_cache_create(names->name,
66841 sizes->cs_size,
66842 ARCH_KMALLOC_MINALIGN,
66843 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66844 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66845 NULL);
66846 }
66847 #ifdef CONFIG_ZONE_DMA
66848 @@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, vo
66849 }
66850 /* cpu stats */
66851 {
66852 - unsigned long allochit = atomic_read(&cachep->allochit);
66853 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
66854 - unsigned long freehit = atomic_read(&cachep->freehit);
66855 - unsigned long freemiss = atomic_read(&cachep->freemiss);
66856 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
66857 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
66858 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
66859 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
66860
66861 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
66862 allochit, allocmiss, freehit, freemiss);
66863 @@ -4471,15 +4471,66 @@ static const struct file_operations proc
66864
66865 static int __init slab_proc_init(void)
66866 {
66867 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
66868 + mode_t gr_mode = S_IRUGO;
66869 +
66870 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66871 + gr_mode = S_IRUSR;
66872 +#endif
66873 +
66874 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
66875 #ifdef CONFIG_DEBUG_SLAB_LEAK
66876 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
66877 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
66878 #endif
66879 return 0;
66880 }
66881 module_init(slab_proc_init);
66882 #endif
66883
66884 +void check_object_size(const void *ptr, unsigned long n, bool to)
66885 +{
66886 +
66887 +#ifdef CONFIG_PAX_USERCOPY
66888 + struct page *page;
66889 + struct kmem_cache *cachep = NULL;
66890 + struct slab *slabp;
66891 + unsigned int objnr;
66892 + unsigned long offset;
66893 +
66894 + if (!n)
66895 + return;
66896 +
66897 + if (ZERO_OR_NULL_PTR(ptr))
66898 + goto report;
66899 +
66900 + if (!virt_addr_valid(ptr))
66901 + return;
66902 +
66903 + page = virt_to_head_page(ptr);
66904 +
66905 + if (!PageSlab(page)) {
66906 + if (object_is_on_stack(ptr, n) == -1)
66907 + goto report;
66908 + return;
66909 + }
66910 +
66911 + cachep = page_get_cache(page);
66912 + if (!(cachep->flags & SLAB_USERCOPY))
66913 + goto report;
66914 +
66915 + slabp = page_get_slab(page);
66916 + objnr = obj_to_index(cachep, slabp, ptr);
66917 + BUG_ON(objnr >= cachep->num);
66918 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
66919 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
66920 + return;
66921 +
66922 +report:
66923 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
66924 +#endif
66925 +
66926 +}
66927 +EXPORT_SYMBOL(check_object_size);
66928 +
66929 /**
66930 * ksize - get the actual amount of memory allocated for a given object
66931 * @objp: Pointer to the object
66932 diff -urNp linux-2.6.32.43/mm/slob.c linux-2.6.32.43/mm/slob.c
66933 --- linux-2.6.32.43/mm/slob.c 2011-03-27 14:31:47.000000000 -0400
66934 +++ linux-2.6.32.43/mm/slob.c 2011-07-06 19:53:33.000000000 -0400
66935 @@ -29,7 +29,7 @@
66936 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
66937 * alloc_pages() directly, allocating compound pages so the page order
66938 * does not have to be separately tracked, and also stores the exact
66939 - * allocation size in page->private so that it can be used to accurately
66940 + * allocation size in slob_page->size so that it can be used to accurately
66941 * provide ksize(). These objects are detected in kfree() because slob_page()
66942 * is false for them.
66943 *
66944 @@ -58,6 +58,7 @@
66945 */
66946
66947 #include <linux/kernel.h>
66948 +#include <linux/sched.h>
66949 #include <linux/slab.h>
66950 #include <linux/mm.h>
66951 #include <linux/swap.h> /* struct reclaim_state */
66952 @@ -100,7 +101,8 @@ struct slob_page {
66953 unsigned long flags; /* mandatory */
66954 atomic_t _count; /* mandatory */
66955 slobidx_t units; /* free units left in page */
66956 - unsigned long pad[2];
66957 + unsigned long pad[1];
66958 + unsigned long size; /* size when >=PAGE_SIZE */
66959 slob_t *free; /* first free slob_t in page */
66960 struct list_head list; /* linked list of free pages */
66961 };
66962 @@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
66963 */
66964 static inline int is_slob_page(struct slob_page *sp)
66965 {
66966 - return PageSlab((struct page *)sp);
66967 + return PageSlab((struct page *)sp) && !sp->size;
66968 }
66969
66970 static inline void set_slob_page(struct slob_page *sp)
66971 @@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
66972
66973 static inline struct slob_page *slob_page(const void *addr)
66974 {
66975 - return (struct slob_page *)virt_to_page(addr);
66976 + return (struct slob_page *)virt_to_head_page(addr);
66977 }
66978
66979 /*
66980 @@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
66981 /*
66982 * Return the size of a slob block.
66983 */
66984 -static slobidx_t slob_units(slob_t *s)
66985 +static slobidx_t slob_units(const slob_t *s)
66986 {
66987 if (s->units > 0)
66988 return s->units;
66989 @@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
66990 /*
66991 * Return the next free slob block pointer after this one.
66992 */
66993 -static slob_t *slob_next(slob_t *s)
66994 +static slob_t *slob_next(const slob_t *s)
66995 {
66996 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
66997 slobidx_t next;
66998 @@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
66999 /*
67000 * Returns true if s is the last free block in its page.
67001 */
67002 -static int slob_last(slob_t *s)
67003 +static int slob_last(const slob_t *s)
67004 {
67005 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
67006 }
67007 @@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
67008 if (!page)
67009 return NULL;
67010
67011 + set_slob_page(page);
67012 return page_address(page);
67013 }
67014
67015 @@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
67016 if (!b)
67017 return NULL;
67018 sp = slob_page(b);
67019 - set_slob_page(sp);
67020
67021 spin_lock_irqsave(&slob_lock, flags);
67022 sp->units = SLOB_UNITS(PAGE_SIZE);
67023 sp->free = b;
67024 + sp->size = 0;
67025 INIT_LIST_HEAD(&sp->list);
67026 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
67027 set_slob_page_free(sp, slob_list);
67028 @@ -475,10 +478,9 @@ out:
67029 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
67030 #endif
67031
67032 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
67033 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
67034 {
67035 - unsigned int *m;
67036 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
67037 + slob_t *m;
67038 void *ret;
67039
67040 lockdep_trace_alloc(gfp);
67041 @@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
67042
67043 if (!m)
67044 return NULL;
67045 - *m = size;
67046 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
67047 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
67048 + m[0].units = size;
67049 + m[1].units = align;
67050 ret = (void *)m + align;
67051
67052 trace_kmalloc_node(_RET_IP_, ret,
67053 @@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t
67054
67055 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
67056 if (ret) {
67057 - struct page *page;
67058 - page = virt_to_page(ret);
67059 - page->private = size;
67060 + struct slob_page *sp;
67061 + sp = slob_page(ret);
67062 + sp->size = size;
67063 }
67064
67065 trace_kmalloc_node(_RET_IP_, ret,
67066 size, PAGE_SIZE << order, gfp, node);
67067 }
67068
67069 - kmemleak_alloc(ret, size, 1, gfp);
67070 + return ret;
67071 +}
67072 +
67073 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
67074 +{
67075 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
67076 + void *ret = __kmalloc_node_align(size, gfp, node, align);
67077 +
67078 + if (!ZERO_OR_NULL_PTR(ret))
67079 + kmemleak_alloc(ret, size, 1, gfp);
67080 return ret;
67081 }
67082 EXPORT_SYMBOL(__kmalloc_node);
67083 @@ -528,13 +542,88 @@ void kfree(const void *block)
67084 sp = slob_page(block);
67085 if (is_slob_page(sp)) {
67086 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
67087 - unsigned int *m = (unsigned int *)(block - align);
67088 - slob_free(m, *m + align);
67089 - } else
67090 + slob_t *m = (slob_t *)(block - align);
67091 + slob_free(m, m[0].units + align);
67092 + } else {
67093 + clear_slob_page(sp);
67094 + free_slob_page(sp);
67095 + sp->size = 0;
67096 put_page(&sp->page);
67097 + }
67098 }
67099 EXPORT_SYMBOL(kfree);
67100
67101 +void check_object_size(const void *ptr, unsigned long n, bool to)
67102 +{
67103 +
67104 +#ifdef CONFIG_PAX_USERCOPY
67105 + struct slob_page *sp;
67106 + const slob_t *free;
67107 + const void *base;
67108 + unsigned long flags;
67109 +
67110 + if (!n)
67111 + return;
67112 +
67113 + if (ZERO_OR_NULL_PTR(ptr))
67114 + goto report;
67115 +
67116 + if (!virt_addr_valid(ptr))
67117 + return;
67118 +
67119 + sp = slob_page(ptr);
67120 + if (!PageSlab((struct page*)sp)) {
67121 + if (object_is_on_stack(ptr, n) == -1)
67122 + goto report;
67123 + return;
67124 + }
67125 +
67126 + if (sp->size) {
67127 + base = page_address(&sp->page);
67128 + if (base <= ptr && n <= sp->size - (ptr - base))
67129 + return;
67130 + goto report;
67131 + }
67132 +
67133 + /* some tricky double walking to find the chunk */
67134 + spin_lock_irqsave(&slob_lock, flags);
67135 + base = (void *)((unsigned long)ptr & PAGE_MASK);
67136 + free = sp->free;
67137 +
67138 + while (!slob_last(free) && (void *)free <= ptr) {
67139 + base = free + slob_units(free);
67140 + free = slob_next(free);
67141 + }
67142 +
67143 + while (base < (void *)free) {
67144 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
67145 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
67146 + int offset;
67147 +
67148 + if (ptr < base + align)
67149 + break;
67150 +
67151 + offset = ptr - base - align;
67152 + if (offset >= m) {
67153 + base += size;
67154 + continue;
67155 + }
67156 +
67157 + if (n > m - offset)
67158 + break;
67159 +
67160 + spin_unlock_irqrestore(&slob_lock, flags);
67161 + return;
67162 + }
67163 +
67164 + spin_unlock_irqrestore(&slob_lock, flags);
67165 +report:
67166 + pax_report_usercopy(ptr, n, to, NULL);
67167 +#endif
67168 +
67169 +}
67170 +EXPORT_SYMBOL(check_object_size);
67171 +
67172 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
67173 size_t ksize(const void *block)
67174 {
67175 @@ -547,10 +636,10 @@ size_t ksize(const void *block)
67176 sp = slob_page(block);
67177 if (is_slob_page(sp)) {
67178 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
67179 - unsigned int *m = (unsigned int *)(block - align);
67180 - return SLOB_UNITS(*m) * SLOB_UNIT;
67181 + slob_t *m = (slob_t *)(block - align);
67182 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
67183 } else
67184 - return sp->page.private;
67185 + return sp->size;
67186 }
67187 EXPORT_SYMBOL(ksize);
67188
67189 @@ -566,8 +655,13 @@ struct kmem_cache *kmem_cache_create(con
67190 {
67191 struct kmem_cache *c;
67192
67193 +#ifdef CONFIG_PAX_USERCOPY
67194 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
67195 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
67196 +#else
67197 c = slob_alloc(sizeof(struct kmem_cache),
67198 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
67199 +#endif
67200
67201 if (c) {
67202 c->name = name;
67203 @@ -605,17 +699,25 @@ void *kmem_cache_alloc_node(struct kmem_
67204 {
67205 void *b;
67206
67207 +#ifdef CONFIG_PAX_USERCOPY
67208 + b = __kmalloc_node_align(c->size, flags, node, c->align);
67209 +#else
67210 if (c->size < PAGE_SIZE) {
67211 b = slob_alloc(c->size, flags, c->align, node);
67212 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
67213 SLOB_UNITS(c->size) * SLOB_UNIT,
67214 flags, node);
67215 } else {
67216 + struct slob_page *sp;
67217 +
67218 b = slob_new_pages(flags, get_order(c->size), node);
67219 + sp = slob_page(b);
67220 + sp->size = c->size;
67221 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
67222 PAGE_SIZE << get_order(c->size),
67223 flags, node);
67224 }
67225 +#endif
67226
67227 if (c->ctor)
67228 c->ctor(b);
67229 @@ -627,10 +729,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
67230
67231 static void __kmem_cache_free(void *b, int size)
67232 {
67233 - if (size < PAGE_SIZE)
67234 + struct slob_page *sp = slob_page(b);
67235 +
67236 + if (is_slob_page(sp))
67237 slob_free(b, size);
67238 - else
67239 + else {
67240 + clear_slob_page(sp);
67241 + free_slob_page(sp);
67242 + sp->size = 0;
67243 slob_free_pages(b, get_order(size));
67244 + }
67245 }
67246
67247 static void kmem_rcu_free(struct rcu_head *head)
67248 @@ -643,18 +751,32 @@ static void kmem_rcu_free(struct rcu_hea
67249
67250 void kmem_cache_free(struct kmem_cache *c, void *b)
67251 {
67252 + int size = c->size;
67253 +
67254 +#ifdef CONFIG_PAX_USERCOPY
67255 + if (size + c->align < PAGE_SIZE) {
67256 + size += c->align;
67257 + b -= c->align;
67258 + }
67259 +#endif
67260 +
67261 kmemleak_free_recursive(b, c->flags);
67262 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
67263 struct slob_rcu *slob_rcu;
67264 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
67265 + slob_rcu = b + (size - sizeof(struct slob_rcu));
67266 INIT_RCU_HEAD(&slob_rcu->head);
67267 - slob_rcu->size = c->size;
67268 + slob_rcu->size = size;
67269 call_rcu(&slob_rcu->head, kmem_rcu_free);
67270 } else {
67271 - __kmem_cache_free(b, c->size);
67272 + __kmem_cache_free(b, size);
67273 }
67274
67275 +#ifdef CONFIG_PAX_USERCOPY
67276 + trace_kfree(_RET_IP_, b);
67277 +#else
67278 trace_kmem_cache_free(_RET_IP_, b);
67279 +#endif
67280 +
67281 }
67282 EXPORT_SYMBOL(kmem_cache_free);
67283
67284 diff -urNp linux-2.6.32.43/mm/slub.c linux-2.6.32.43/mm/slub.c
67285 --- linux-2.6.32.43/mm/slub.c 2011-03-27 14:31:47.000000000 -0400
67286 +++ linux-2.6.32.43/mm/slub.c 2011-04-17 15:56:46.000000000 -0400
67287 @@ -410,7 +410,7 @@ static void print_track(const char *s, s
67288 if (!t->addr)
67289 return;
67290
67291 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
67292 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
67293 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
67294 }
67295
67296 @@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *
67297
67298 page = virt_to_head_page(x);
67299
67300 + BUG_ON(!PageSlab(page));
67301 +
67302 slab_free(s, page, x, _RET_IP_);
67303
67304 trace_kmem_cache_free(_RET_IP_, x);
67305 @@ -1937,7 +1939,7 @@ static int slub_min_objects;
67306 * Merge control. If this is set then no merging of slab caches will occur.
67307 * (Could be removed. This was introduced to pacify the merge skeptics.)
67308 */
67309 -static int slub_nomerge;
67310 +static int slub_nomerge = 1;
67311
67312 /*
67313 * Calculate the order of allocation given an slab object size.
67314 @@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_c
67315 * list to avoid pounding the page allocator excessively.
67316 */
67317 set_min_partial(s, ilog2(s->size));
67318 - s->refcount = 1;
67319 + atomic_set(&s->refcount, 1);
67320 #ifdef CONFIG_NUMA
67321 s->remote_node_defrag_ratio = 1000;
67322 #endif
67323 @@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struc
67324 void kmem_cache_destroy(struct kmem_cache *s)
67325 {
67326 down_write(&slub_lock);
67327 - s->refcount--;
67328 - if (!s->refcount) {
67329 + if (atomic_dec_and_test(&s->refcount)) {
67330 list_del(&s->list);
67331 up_write(&slub_lock);
67332 if (kmem_cache_close(s)) {
67333 @@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(cha
67334 __setup("slub_nomerge", setup_slub_nomerge);
67335
67336 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
67337 - const char *name, int size, gfp_t gfp_flags)
67338 + const char *name, int size, gfp_t gfp_flags, unsigned int flags)
67339 {
67340 - unsigned int flags = 0;
67341 -
67342 if (gfp_flags & SLUB_DMA)
67343 - flags = SLAB_CACHE_DMA;
67344 + flags |= SLAB_CACHE_DMA;
67345
67346 /*
67347 * This function is called with IRQs disabled during early-boot on
67348 @@ -2915,6 +2914,46 @@ void *__kmalloc_node(size_t size, gfp_t
67349 EXPORT_SYMBOL(__kmalloc_node);
67350 #endif
67351
67352 +void check_object_size(const void *ptr, unsigned long n, bool to)
67353 +{
67354 +
67355 +#ifdef CONFIG_PAX_USERCOPY
67356 + struct page *page;
67357 + struct kmem_cache *s = NULL;
67358 + unsigned long offset;
67359 +
67360 + if (!n)
67361 + return;
67362 +
67363 + if (ZERO_OR_NULL_PTR(ptr))
67364 + goto report;
67365 +
67366 + if (!virt_addr_valid(ptr))
67367 + return;
67368 +
67369 + page = get_object_page(ptr);
67370 +
67371 + if (!page) {
67372 + if (object_is_on_stack(ptr, n) == -1)
67373 + goto report;
67374 + return;
67375 + }
67376 +
67377 + s = page->slab;
67378 + if (!(s->flags & SLAB_USERCOPY))
67379 + goto report;
67380 +
67381 + offset = (ptr - page_address(page)) % s->size;
67382 + if (offset <= s->objsize && n <= s->objsize - offset)
67383 + return;
67384 +
67385 +report:
67386 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
67387 +#endif
67388 +
67389 +}
67390 +EXPORT_SYMBOL(check_object_size);
67391 +
67392 size_t ksize(const void *object)
67393 {
67394 struct page *page;
67395 @@ -3185,8 +3224,8 @@ void __init kmem_cache_init(void)
67396 * kmem_cache_open for slab_state == DOWN.
67397 */
67398 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
67399 - sizeof(struct kmem_cache_node), GFP_NOWAIT);
67400 - kmalloc_caches[0].refcount = -1;
67401 + sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
67402 + atomic_set(&kmalloc_caches[0].refcount, -1);
67403 caches++;
67404
67405 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
67406 @@ -3198,18 +3237,18 @@ void __init kmem_cache_init(void)
67407 /* Caches that are not of the two-to-the-power-of size */
67408 if (KMALLOC_MIN_SIZE <= 32) {
67409 create_kmalloc_cache(&kmalloc_caches[1],
67410 - "kmalloc-96", 96, GFP_NOWAIT);
67411 + "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
67412 caches++;
67413 }
67414 if (KMALLOC_MIN_SIZE <= 64) {
67415 create_kmalloc_cache(&kmalloc_caches[2],
67416 - "kmalloc-192", 192, GFP_NOWAIT);
67417 + "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
67418 caches++;
67419 }
67420
67421 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
67422 create_kmalloc_cache(&kmalloc_caches[i],
67423 - "kmalloc", 1 << i, GFP_NOWAIT);
67424 + "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
67425 caches++;
67426 }
67427
67428 @@ -3293,7 +3332,7 @@ static int slab_unmergeable(struct kmem_
67429 /*
67430 * We may have set a slab to be unmergeable during bootstrap.
67431 */
67432 - if (s->refcount < 0)
67433 + if (atomic_read(&s->refcount) < 0)
67434 return 1;
67435
67436 return 0;
67437 @@ -3353,7 +3392,7 @@ struct kmem_cache *kmem_cache_create(con
67438 if (s) {
67439 int cpu;
67440
67441 - s->refcount++;
67442 + atomic_inc(&s->refcount);
67443 /*
67444 * Adjust the object sizes so that we clear
67445 * the complete object on kzalloc.
67446 @@ -3372,7 +3411,7 @@ struct kmem_cache *kmem_cache_create(con
67447
67448 if (sysfs_slab_alias(s, name)) {
67449 down_write(&slub_lock);
67450 - s->refcount--;
67451 + atomic_dec(&s->refcount);
67452 up_write(&slub_lock);
67453 goto err;
67454 }
67455 @@ -4101,7 +4140,7 @@ SLAB_ATTR_RO(ctor);
67456
67457 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
67458 {
67459 - return sprintf(buf, "%d\n", s->refcount - 1);
67460 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
67461 }
67462 SLAB_ATTR_RO(aliases);
67463
67464 @@ -4503,7 +4542,7 @@ static void kmem_cache_release(struct ko
67465 kfree(s);
67466 }
67467
67468 -static struct sysfs_ops slab_sysfs_ops = {
67469 +static const struct sysfs_ops slab_sysfs_ops = {
67470 .show = slab_attr_show,
67471 .store = slab_attr_store,
67472 };
67473 @@ -4522,7 +4561,7 @@ static int uevent_filter(struct kset *ks
67474 return 0;
67475 }
67476
67477 -static struct kset_uevent_ops slab_uevent_ops = {
67478 +static const struct kset_uevent_ops slab_uevent_ops = {
67479 .filter = uevent_filter,
67480 };
67481
67482 @@ -4785,7 +4824,13 @@ static const struct file_operations proc
67483
67484 static int __init slab_proc_init(void)
67485 {
67486 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
67487 + mode_t gr_mode = S_IRUGO;
67488 +
67489 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
67490 + gr_mode = S_IRUSR;
67491 +#endif
67492 +
67493 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
67494 return 0;
67495 }
67496 module_init(slab_proc_init);
67497 diff -urNp linux-2.6.32.43/mm/swap.c linux-2.6.32.43/mm/swap.c
67498 --- linux-2.6.32.43/mm/swap.c 2011-03-27 14:31:47.000000000 -0400
67499 +++ linux-2.6.32.43/mm/swap.c 2011-07-09 09:15:19.000000000 -0400
67500 @@ -30,6 +30,7 @@
67501 #include <linux/notifier.h>
67502 #include <linux/backing-dev.h>
67503 #include <linux/memcontrol.h>
67504 +#include <linux/hugetlb.h>
67505
67506 #include "internal.h"
67507
67508 @@ -65,6 +66,8 @@ static void put_compound_page(struct pag
67509 compound_page_dtor *dtor;
67510
67511 dtor = get_compound_page_dtor(page);
67512 + if (!PageHuge(page))
67513 + BUG_ON(dtor != free_compound_page);
67514 (*dtor)(page);
67515 }
67516 }
67517 diff -urNp linux-2.6.32.43/mm/util.c linux-2.6.32.43/mm/util.c
67518 --- linux-2.6.32.43/mm/util.c 2011-03-27 14:31:47.000000000 -0400
67519 +++ linux-2.6.32.43/mm/util.c 2011-04-17 15:56:46.000000000 -0400
67520 @@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
67521 void arch_pick_mmap_layout(struct mm_struct *mm)
67522 {
67523 mm->mmap_base = TASK_UNMAPPED_BASE;
67524 +
67525 +#ifdef CONFIG_PAX_RANDMMAP
67526 + if (mm->pax_flags & MF_PAX_RANDMMAP)
67527 + mm->mmap_base += mm->delta_mmap;
67528 +#endif
67529 +
67530 mm->get_unmapped_area = arch_get_unmapped_area;
67531 mm->unmap_area = arch_unmap_area;
67532 }
67533 diff -urNp linux-2.6.32.43/mm/vmalloc.c linux-2.6.32.43/mm/vmalloc.c
67534 --- linux-2.6.32.43/mm/vmalloc.c 2011-03-27 14:31:47.000000000 -0400
67535 +++ linux-2.6.32.43/mm/vmalloc.c 2011-04-17 15:56:46.000000000 -0400
67536 @@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
67537
67538 pte = pte_offset_kernel(pmd, addr);
67539 do {
67540 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
67541 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
67542 +
67543 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67544 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
67545 + BUG_ON(!pte_exec(*pte));
67546 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
67547 + continue;
67548 + }
67549 +#endif
67550 +
67551 + {
67552 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
67553 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
67554 + }
67555 } while (pte++, addr += PAGE_SIZE, addr != end);
67556 }
67557
67558 @@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
67559 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
67560 {
67561 pte_t *pte;
67562 + int ret = -ENOMEM;
67563
67564 /*
67565 * nr is a running index into the array which helps higher level
67566 @@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, un
67567 pte = pte_alloc_kernel(pmd, addr);
67568 if (!pte)
67569 return -ENOMEM;
67570 +
67571 + pax_open_kernel();
67572 do {
67573 struct page *page = pages[*nr];
67574
67575 - if (WARN_ON(!pte_none(*pte)))
67576 - return -EBUSY;
67577 - if (WARN_ON(!page))
67578 - return -ENOMEM;
67579 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67580 + if (!(pgprot_val(prot) & _PAGE_NX))
67581 + BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
67582 + else
67583 +#endif
67584 +
67585 + if (WARN_ON(!pte_none(*pte))) {
67586 + ret = -EBUSY;
67587 + goto out;
67588 + }
67589 + if (WARN_ON(!page)) {
67590 + ret = -ENOMEM;
67591 + goto out;
67592 + }
67593 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
67594 (*nr)++;
67595 } while (pte++, addr += PAGE_SIZE, addr != end);
67596 - return 0;
67597 + ret = 0;
67598 +out:
67599 + pax_close_kernel();
67600 + return ret;
67601 }
67602
67603 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
67604 @@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void
67605 * and fall back on vmalloc() if that fails. Others
67606 * just put it in the vmalloc space.
67607 */
67608 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
67609 +#ifdef CONFIG_MODULES
67610 +#ifdef MODULES_VADDR
67611 unsigned long addr = (unsigned long)x;
67612 if (addr >= MODULES_VADDR && addr < MODULES_END)
67613 return 1;
67614 #endif
67615 +
67616 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67617 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
67618 + return 1;
67619 +#endif
67620 +
67621 +#endif
67622 +
67623 return is_vmalloc_addr(x);
67624 }
67625
67626 @@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void
67627
67628 if (!pgd_none(*pgd)) {
67629 pud_t *pud = pud_offset(pgd, addr);
67630 +#ifdef CONFIG_X86
67631 + if (!pud_large(*pud))
67632 +#endif
67633 if (!pud_none(*pud)) {
67634 pmd_t *pmd = pmd_offset(pud, addr);
67635 +#ifdef CONFIG_X86
67636 + if (!pmd_large(*pmd))
67637 +#endif
67638 if (!pmd_none(*pmd)) {
67639 pte_t *ptep, pte;
67640
67641 @@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vm
67642 struct rb_node *tmp;
67643
67644 while (*p) {
67645 - struct vmap_area *tmp;
67646 + struct vmap_area *varea;
67647
67648 parent = *p;
67649 - tmp = rb_entry(parent, struct vmap_area, rb_node);
67650 - if (va->va_start < tmp->va_end)
67651 + varea = rb_entry(parent, struct vmap_area, rb_node);
67652 + if (va->va_start < varea->va_end)
67653 p = &(*p)->rb_left;
67654 - else if (va->va_end > tmp->va_start)
67655 + else if (va->va_end > varea->va_start)
67656 p = &(*p)->rb_right;
67657 else
67658 BUG();
67659 @@ -1232,6 +1274,16 @@ static struct vm_struct *__get_vm_area_n
67660 struct vm_struct *area;
67661
67662 BUG_ON(in_interrupt());
67663 +
67664 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67665 + if (flags & VM_KERNEXEC) {
67666 + if (start != VMALLOC_START || end != VMALLOC_END)
67667 + return NULL;
67668 + start = (unsigned long)MODULES_EXEC_VADDR;
67669 + end = (unsigned long)MODULES_EXEC_END;
67670 + }
67671 +#endif
67672 +
67673 if (flags & VM_IOREMAP) {
67674 int bit = fls(size);
67675
67676 @@ -1457,6 +1509,11 @@ void *vmap(struct page **pages, unsigned
67677 if (count > totalram_pages)
67678 return NULL;
67679
67680 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67681 + if (!(pgprot_val(prot) & _PAGE_NX))
67682 + flags |= VM_KERNEXEC;
67683 +#endif
67684 +
67685 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
67686 __builtin_return_address(0));
67687 if (!area)
67688 @@ -1567,6 +1624,13 @@ static void *__vmalloc_node(unsigned lon
67689 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
67690 return NULL;
67691
67692 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67693 + if (!(pgprot_val(prot) & _PAGE_NX))
67694 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
67695 + node, gfp_mask, caller);
67696 + else
67697 +#endif
67698 +
67699 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
67700 VMALLOC_END, node, gfp_mask, caller);
67701
67702 @@ -1585,6 +1649,7 @@ static void *__vmalloc_node(unsigned lon
67703 return addr;
67704 }
67705
67706 +#undef __vmalloc
67707 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
67708 {
67709 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
67710 @@ -1601,6 +1666,7 @@ EXPORT_SYMBOL(__vmalloc);
67711 * For tight control over page level allocator and protection flags
67712 * use __vmalloc() instead.
67713 */
67714 +#undef vmalloc
67715 void *vmalloc(unsigned long size)
67716 {
67717 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
67718 @@ -1615,6 +1681,7 @@ EXPORT_SYMBOL(vmalloc);
67719 * The resulting memory area is zeroed so it can be mapped to userspace
67720 * without leaking data.
67721 */
67722 +#undef vmalloc_user
67723 void *vmalloc_user(unsigned long size)
67724 {
67725 struct vm_struct *area;
67726 @@ -1642,6 +1709,7 @@ EXPORT_SYMBOL(vmalloc_user);
67727 * For tight control over page level allocator and protection flags
67728 * use __vmalloc() instead.
67729 */
67730 +#undef vmalloc_node
67731 void *vmalloc_node(unsigned long size, int node)
67732 {
67733 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
67734 @@ -1664,10 +1732,10 @@ EXPORT_SYMBOL(vmalloc_node);
67735 * For tight control over page level allocator and protection flags
67736 * use __vmalloc() instead.
67737 */
67738 -
67739 +#undef vmalloc_exec
67740 void *vmalloc_exec(unsigned long size)
67741 {
67742 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
67743 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
67744 -1, __builtin_return_address(0));
67745 }
67746
67747 @@ -1686,6 +1754,7 @@ void *vmalloc_exec(unsigned long size)
67748 * Allocate enough 32bit PA addressable pages to cover @size from the
67749 * page level allocator and map them into contiguous kernel virtual space.
67750 */
67751 +#undef vmalloc_32
67752 void *vmalloc_32(unsigned long size)
67753 {
67754 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
67755 @@ -1700,6 +1769,7 @@ EXPORT_SYMBOL(vmalloc_32);
67756 * The resulting memory area is 32bit addressable and zeroed so it can be
67757 * mapped to userspace without leaking data.
67758 */
67759 +#undef vmalloc_32_user
67760 void *vmalloc_32_user(unsigned long size)
67761 {
67762 struct vm_struct *area;
67763 @@ -1964,6 +2034,8 @@ int remap_vmalloc_range(struct vm_area_s
67764 unsigned long uaddr = vma->vm_start;
67765 unsigned long usize = vma->vm_end - vma->vm_start;
67766
67767 + BUG_ON(vma->vm_mirror);
67768 +
67769 if ((PAGE_SIZE-1) & (unsigned long)addr)
67770 return -EINVAL;
67771
67772 diff -urNp linux-2.6.32.43/mm/vmstat.c linux-2.6.32.43/mm/vmstat.c
67773 --- linux-2.6.32.43/mm/vmstat.c 2011-03-27 14:31:47.000000000 -0400
67774 +++ linux-2.6.32.43/mm/vmstat.c 2011-04-17 15:56:46.000000000 -0400
67775 @@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
67776 *
67777 * vm_stat contains the global counters
67778 */
67779 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67780 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67781 EXPORT_SYMBOL(vm_stat);
67782
67783 #ifdef CONFIG_SMP
67784 @@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
67785 v = p->vm_stat_diff[i];
67786 p->vm_stat_diff[i] = 0;
67787 local_irq_restore(flags);
67788 - atomic_long_add(v, &zone->vm_stat[i]);
67789 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
67790 global_diff[i] += v;
67791 #ifdef CONFIG_NUMA
67792 /* 3 seconds idle till flush */
67793 @@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
67794
67795 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
67796 if (global_diff[i])
67797 - atomic_long_add(global_diff[i], &vm_stat[i]);
67798 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
67799 }
67800
67801 #endif
67802 @@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
67803 start_cpu_timer(cpu);
67804 #endif
67805 #ifdef CONFIG_PROC_FS
67806 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
67807 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
67808 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
67809 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
67810 + {
67811 + mode_t gr_mode = S_IRUGO;
67812 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
67813 + gr_mode = S_IRUSR;
67814 +#endif
67815 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
67816 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
67817 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67818 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
67819 +#else
67820 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
67821 +#endif
67822 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
67823 + }
67824 #endif
67825 return 0;
67826 }
67827 diff -urNp linux-2.6.32.43/net/8021q/vlan.c linux-2.6.32.43/net/8021q/vlan.c
67828 --- linux-2.6.32.43/net/8021q/vlan.c 2011-03-27 14:31:47.000000000 -0400
67829 +++ linux-2.6.32.43/net/8021q/vlan.c 2011-04-17 15:56:46.000000000 -0400
67830 @@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net
67831 err = -EPERM;
67832 if (!capable(CAP_NET_ADMIN))
67833 break;
67834 - if ((args.u.name_type >= 0) &&
67835 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
67836 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
67837 struct vlan_net *vn;
67838
67839 vn = net_generic(net, vlan_net_id);
67840 diff -urNp linux-2.6.32.43/net/atm/atm_misc.c linux-2.6.32.43/net/atm/atm_misc.c
67841 --- linux-2.6.32.43/net/atm/atm_misc.c 2011-03-27 14:31:47.000000000 -0400
67842 +++ linux-2.6.32.43/net/atm/atm_misc.c 2011-04-17 15:56:46.000000000 -0400
67843 @@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t
67844 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
67845 return 1;
67846 atm_return(vcc,truesize);
67847 - atomic_inc(&vcc->stats->rx_drop);
67848 + atomic_inc_unchecked(&vcc->stats->rx_drop);
67849 return 0;
67850 }
67851
67852 @@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct
67853 }
67854 }
67855 atm_return(vcc,guess);
67856 - atomic_inc(&vcc->stats->rx_drop);
67857 + atomic_inc_unchecked(&vcc->stats->rx_drop);
67858 return NULL;
67859 }
67860
67861 @@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr
67862
67863 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
67864 {
67865 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
67866 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
67867 __SONET_ITEMS
67868 #undef __HANDLE_ITEM
67869 }
67870 @@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta
67871
67872 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
67873 {
67874 -#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
67875 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
67876 __SONET_ITEMS
67877 #undef __HANDLE_ITEM
67878 }
67879 diff -urNp linux-2.6.32.43/net/atm/mpoa_caches.c linux-2.6.32.43/net/atm/mpoa_caches.c
67880 --- linux-2.6.32.43/net/atm/mpoa_caches.c 2011-03-27 14:31:47.000000000 -0400
67881 +++ linux-2.6.32.43/net/atm/mpoa_caches.c 2011-05-16 21:46:57.000000000 -0400
67882 @@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_cl
67883 struct timeval now;
67884 struct k_message msg;
67885
67886 + pax_track_stack();
67887 +
67888 do_gettimeofday(&now);
67889
67890 write_lock_irq(&client->egress_lock);
67891 diff -urNp linux-2.6.32.43/net/atm/proc.c linux-2.6.32.43/net/atm/proc.c
67892 --- linux-2.6.32.43/net/atm/proc.c 2011-03-27 14:31:47.000000000 -0400
67893 +++ linux-2.6.32.43/net/atm/proc.c 2011-04-17 15:56:46.000000000 -0400
67894 @@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s
67895 const struct k_atm_aal_stats *stats)
67896 {
67897 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
67898 - atomic_read(&stats->tx),atomic_read(&stats->tx_err),
67899 - atomic_read(&stats->rx),atomic_read(&stats->rx_err),
67900 - atomic_read(&stats->rx_drop));
67901 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
67902 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
67903 + atomic_read_unchecked(&stats->rx_drop));
67904 }
67905
67906 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
67907 @@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *se
67908 {
67909 struct sock *sk = sk_atm(vcc);
67910
67911 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67912 + seq_printf(seq, "%p ", NULL);
67913 +#else
67914 seq_printf(seq, "%p ", vcc);
67915 +#endif
67916 +
67917 if (!vcc->dev)
67918 seq_printf(seq, "Unassigned ");
67919 else
67920 @@ -214,7 +219,11 @@ static void svc_info(struct seq_file *se
67921 {
67922 if (!vcc->dev)
67923 seq_printf(seq, sizeof(void *) == 4 ?
67924 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67925 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
67926 +#else
67927 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
67928 +#endif
67929 else
67930 seq_printf(seq, "%3d %3d %5d ",
67931 vcc->dev->number, vcc->vpi, vcc->vci);
67932 diff -urNp linux-2.6.32.43/net/atm/resources.c linux-2.6.32.43/net/atm/resources.c
67933 --- linux-2.6.32.43/net/atm/resources.c 2011-03-27 14:31:47.000000000 -0400
67934 +++ linux-2.6.32.43/net/atm/resources.c 2011-04-17 15:56:46.000000000 -0400
67935 @@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *
67936 static void copy_aal_stats(struct k_atm_aal_stats *from,
67937 struct atm_aal_stats *to)
67938 {
67939 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
67940 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
67941 __AAL_STAT_ITEMS
67942 #undef __HANDLE_ITEM
67943 }
67944 @@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_
67945 static void subtract_aal_stats(struct k_atm_aal_stats *from,
67946 struct atm_aal_stats *to)
67947 {
67948 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
67949 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
67950 __AAL_STAT_ITEMS
67951 #undef __HANDLE_ITEM
67952 }
67953 diff -urNp linux-2.6.32.43/net/bluetooth/l2cap.c linux-2.6.32.43/net/bluetooth/l2cap.c
67954 --- linux-2.6.32.43/net/bluetooth/l2cap.c 2011-03-27 14:31:47.000000000 -0400
67955 +++ linux-2.6.32.43/net/bluetooth/l2cap.c 2011-06-25 14:36:21.000000000 -0400
67956 @@ -1885,7 +1885,7 @@ static int l2cap_sock_getsockopt_old(str
67957 err = -ENOTCONN;
67958 break;
67959 }
67960 -
67961 + memset(&cinfo, 0, sizeof(cinfo));
67962 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
67963 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
67964
67965 @@ -2719,7 +2719,7 @@ static inline int l2cap_config_req(struc
67966
67967 /* Reject if config buffer is too small. */
67968 len = cmd_len - sizeof(*req);
67969 - if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
67970 + if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
67971 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
67972 l2cap_build_conf_rsp(sk, rsp,
67973 L2CAP_CONF_REJECT, flags), rsp);
67974 diff -urNp linux-2.6.32.43/net/bluetooth/rfcomm/sock.c linux-2.6.32.43/net/bluetooth/rfcomm/sock.c
67975 --- linux-2.6.32.43/net/bluetooth/rfcomm/sock.c 2011-03-27 14:31:47.000000000 -0400
67976 +++ linux-2.6.32.43/net/bluetooth/rfcomm/sock.c 2011-06-12 06:35:00.000000000 -0400
67977 @@ -878,6 +878,7 @@ static int rfcomm_sock_getsockopt_old(st
67978
67979 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
67980
67981 + memset(&cinfo, 0, sizeof(cinfo));
67982 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
67983 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
67984
67985 diff -urNp linux-2.6.32.43/net/bridge/br_private.h linux-2.6.32.43/net/bridge/br_private.h
67986 --- linux-2.6.32.43/net/bridge/br_private.h 2011-03-27 14:31:47.000000000 -0400
67987 +++ linux-2.6.32.43/net/bridge/br_private.h 2011-04-17 15:56:46.000000000 -0400
67988 @@ -254,7 +254,7 @@ extern void br_ifinfo_notify(int event,
67989
67990 #ifdef CONFIG_SYSFS
67991 /* br_sysfs_if.c */
67992 -extern struct sysfs_ops brport_sysfs_ops;
67993 +extern const struct sysfs_ops brport_sysfs_ops;
67994 extern int br_sysfs_addif(struct net_bridge_port *p);
67995
67996 /* br_sysfs_br.c */
67997 diff -urNp linux-2.6.32.43/net/bridge/br_stp_if.c linux-2.6.32.43/net/bridge/br_stp_if.c
67998 --- linux-2.6.32.43/net/bridge/br_stp_if.c 2011-03-27 14:31:47.000000000 -0400
67999 +++ linux-2.6.32.43/net/bridge/br_stp_if.c 2011-04-17 15:56:46.000000000 -0400
68000 @@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridg
68001 char *envp[] = { NULL };
68002
68003 if (br->stp_enabled == BR_USER_STP) {
68004 - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
68005 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
68006 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
68007 br->dev->name, r);
68008
68009 diff -urNp linux-2.6.32.43/net/bridge/br_sysfs_if.c linux-2.6.32.43/net/bridge/br_sysfs_if.c
68010 --- linux-2.6.32.43/net/bridge/br_sysfs_if.c 2011-03-27 14:31:47.000000000 -0400
68011 +++ linux-2.6.32.43/net/bridge/br_sysfs_if.c 2011-04-17 15:56:46.000000000 -0400
68012 @@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobje
68013 return ret;
68014 }
68015
68016 -struct sysfs_ops brport_sysfs_ops = {
68017 +const struct sysfs_ops brport_sysfs_ops = {
68018 .show = brport_show,
68019 .store = brport_store,
68020 };
68021 diff -urNp linux-2.6.32.43/net/bridge/netfilter/ebtables.c linux-2.6.32.43/net/bridge/netfilter/ebtables.c
68022 --- linux-2.6.32.43/net/bridge/netfilter/ebtables.c 2011-04-17 17:00:52.000000000 -0400
68023 +++ linux-2.6.32.43/net/bridge/netfilter/ebtables.c 2011-05-16 21:46:57.000000000 -0400
68024 @@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struc
68025 unsigned int entries_size, nentries;
68026 char *entries;
68027
68028 + pax_track_stack();
68029 +
68030 if (cmd == EBT_SO_GET_ENTRIES) {
68031 entries_size = t->private->entries_size;
68032 nentries = t->private->nentries;
68033 diff -urNp linux-2.6.32.43/net/can/bcm.c linux-2.6.32.43/net/can/bcm.c
68034 --- linux-2.6.32.43/net/can/bcm.c 2011-05-10 22:12:01.000000000 -0400
68035 +++ linux-2.6.32.43/net/can/bcm.c 2011-05-10 22:12:34.000000000 -0400
68036 @@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file
68037 struct bcm_sock *bo = bcm_sk(sk);
68038 struct bcm_op *op;
68039
68040 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68041 + seq_printf(m, ">>> socket %p", NULL);
68042 + seq_printf(m, " / sk %p", NULL);
68043 + seq_printf(m, " / bo %p", NULL);
68044 +#else
68045 seq_printf(m, ">>> socket %p", sk->sk_socket);
68046 seq_printf(m, " / sk %p", sk);
68047 seq_printf(m, " / bo %p", bo);
68048 +#endif
68049 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
68050 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
68051 seq_printf(m, " <<<\n");
68052 diff -urNp linux-2.6.32.43/net/core/dev.c linux-2.6.32.43/net/core/dev.c
68053 --- linux-2.6.32.43/net/core/dev.c 2011-04-17 17:00:52.000000000 -0400
68054 +++ linux-2.6.32.43/net/core/dev.c 2011-04-17 17:04:18.000000000 -0400
68055 @@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const cha
68056 if (no_module && capable(CAP_NET_ADMIN))
68057 no_module = request_module("netdev-%s", name);
68058 if (no_module && capable(CAP_SYS_MODULE)) {
68059 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
68060 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
68061 +#else
68062 if (!request_module("%s", name))
68063 pr_err("Loading kernel module for a network device "
68064 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
68065 "instead\n", name);
68066 +#endif
68067 }
68068 }
68069 EXPORT_SYMBOL(dev_load);
68070 @@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
68071 }
68072 EXPORT_SYMBOL(netif_rx_ni);
68073
68074 -static void net_tx_action(struct softirq_action *h)
68075 +static void net_tx_action(void)
68076 {
68077 struct softnet_data *sd = &__get_cpu_var(softnet_data);
68078
68079 @@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *
68080 EXPORT_SYMBOL(netif_napi_del);
68081
68082
68083 -static void net_rx_action(struct softirq_action *h)
68084 +static void net_rx_action(void)
68085 {
68086 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
68087 unsigned long time_limit = jiffies + 2;
68088 diff -urNp linux-2.6.32.43/net/core/flow.c linux-2.6.32.43/net/core/flow.c
68089 --- linux-2.6.32.43/net/core/flow.c 2011-03-27 14:31:47.000000000 -0400
68090 +++ linux-2.6.32.43/net/core/flow.c 2011-05-04 17:56:20.000000000 -0400
68091 @@ -35,11 +35,11 @@ struct flow_cache_entry {
68092 atomic_t *object_ref;
68093 };
68094
68095 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
68096 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
68097
68098 static u32 flow_hash_shift;
68099 #define flow_hash_size (1 << flow_hash_shift)
68100 -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
68101 +static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
68102
68103 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
68104
68105 @@ -52,7 +52,7 @@ struct flow_percpu_info {
68106 u32 hash_rnd;
68107 int count;
68108 };
68109 -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
68110 +static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
68111
68112 #define flow_hash_rnd_recalc(cpu) \
68113 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
68114 @@ -69,7 +69,7 @@ struct flow_flush_info {
68115 atomic_t cpuleft;
68116 struct completion completion;
68117 };
68118 -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
68119 +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
68120
68121 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
68122
68123 @@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net,
68124 if (fle->family == family &&
68125 fle->dir == dir &&
68126 flow_key_compare(key, &fle->key) == 0) {
68127 - if (fle->genid == atomic_read(&flow_cache_genid)) {
68128 + if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
68129 void *ret = fle->object;
68130
68131 if (ret)
68132 @@ -228,7 +228,7 @@ nocache:
68133 err = resolver(net, key, family, dir, &obj, &obj_ref);
68134
68135 if (fle && !err) {
68136 - fle->genid = atomic_read(&flow_cache_genid);
68137 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
68138
68139 if (fle->object)
68140 atomic_dec(fle->object_ref);
68141 @@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(uns
68142
68143 fle = flow_table(cpu)[i];
68144 for (; fle; fle = fle->next) {
68145 - unsigned genid = atomic_read(&flow_cache_genid);
68146 + unsigned genid = atomic_read_unchecked(&flow_cache_genid);
68147
68148 if (!fle->object || fle->genid == genid)
68149 continue;
68150 diff -urNp linux-2.6.32.43/net/core/skbuff.c linux-2.6.32.43/net/core/skbuff.c
68151 --- linux-2.6.32.43/net/core/skbuff.c 2011-03-27 14:31:47.000000000 -0400
68152 +++ linux-2.6.32.43/net/core/skbuff.c 2011-05-16 21:46:57.000000000 -0400
68153 @@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb,
68154 struct sk_buff *frag_iter;
68155 struct sock *sk = skb->sk;
68156
68157 + pax_track_stack();
68158 +
68159 /*
68160 * __skb_splice_bits() only fails if the output has no room left,
68161 * so no point in going over the frag_list for the error case.
68162 diff -urNp linux-2.6.32.43/net/core/sock.c linux-2.6.32.43/net/core/sock.c
68163 --- linux-2.6.32.43/net/core/sock.c 2011-03-27 14:31:47.000000000 -0400
68164 +++ linux-2.6.32.43/net/core/sock.c 2011-05-04 17:56:20.000000000 -0400
68165 @@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock,
68166 break;
68167
68168 case SO_PEERCRED:
68169 + {
68170 + struct ucred peercred;
68171 if (len > sizeof(sk->sk_peercred))
68172 len = sizeof(sk->sk_peercred);
68173 - if (copy_to_user(optval, &sk->sk_peercred, len))
68174 + peercred = sk->sk_peercred;
68175 + if (copy_to_user(optval, &peercred, len))
68176 return -EFAULT;
68177 goto lenout;
68178 + }
68179
68180 case SO_PEERNAME:
68181 {
68182 @@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock,
68183 */
68184 smp_wmb();
68185 atomic_set(&sk->sk_refcnt, 1);
68186 - atomic_set(&sk->sk_drops, 0);
68187 + atomic_set_unchecked(&sk->sk_drops, 0);
68188 }
68189 EXPORT_SYMBOL(sock_init_data);
68190
68191 diff -urNp linux-2.6.32.43/net/decnet/sysctl_net_decnet.c linux-2.6.32.43/net/decnet/sysctl_net_decnet.c
68192 --- linux-2.6.32.43/net/decnet/sysctl_net_decnet.c 2011-03-27 14:31:47.000000000 -0400
68193 +++ linux-2.6.32.43/net/decnet/sysctl_net_decnet.c 2011-04-17 15:56:46.000000000 -0400
68194 @@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_t
68195
68196 if (len > *lenp) len = *lenp;
68197
68198 - if (copy_to_user(buffer, addr, len))
68199 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
68200 return -EFAULT;
68201
68202 *lenp = len;
68203 @@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table
68204
68205 if (len > *lenp) len = *lenp;
68206
68207 - if (copy_to_user(buffer, devname, len))
68208 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
68209 return -EFAULT;
68210
68211 *lenp = len;
68212 diff -urNp linux-2.6.32.43/net/econet/Kconfig linux-2.6.32.43/net/econet/Kconfig
68213 --- linux-2.6.32.43/net/econet/Kconfig 2011-03-27 14:31:47.000000000 -0400
68214 +++ linux-2.6.32.43/net/econet/Kconfig 2011-04-17 15:56:46.000000000 -0400
68215 @@ -4,7 +4,7 @@
68216
68217 config ECONET
68218 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
68219 - depends on EXPERIMENTAL && INET
68220 + depends on EXPERIMENTAL && INET && BROKEN
68221 ---help---
68222 Econet is a fairly old and slow networking protocol mainly used by
68223 Acorn computers to access file and print servers. It uses native
68224 diff -urNp linux-2.6.32.43/net/ieee802154/dgram.c linux-2.6.32.43/net/ieee802154/dgram.c
68225 --- linux-2.6.32.43/net/ieee802154/dgram.c 2011-03-27 14:31:47.000000000 -0400
68226 +++ linux-2.6.32.43/net/ieee802154/dgram.c 2011-05-04 17:56:28.000000000 -0400
68227 @@ -318,7 +318,7 @@ out:
68228 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
68229 {
68230 if (sock_queue_rcv_skb(sk, skb) < 0) {
68231 - atomic_inc(&sk->sk_drops);
68232 + atomic_inc_unchecked(&sk->sk_drops);
68233 kfree_skb(skb);
68234 return NET_RX_DROP;
68235 }
68236 diff -urNp linux-2.6.32.43/net/ieee802154/raw.c linux-2.6.32.43/net/ieee802154/raw.c
68237 --- linux-2.6.32.43/net/ieee802154/raw.c 2011-03-27 14:31:47.000000000 -0400
68238 +++ linux-2.6.32.43/net/ieee802154/raw.c 2011-05-04 17:56:28.000000000 -0400
68239 @@ -206,7 +206,7 @@ out:
68240 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
68241 {
68242 if (sock_queue_rcv_skb(sk, skb) < 0) {
68243 - atomic_inc(&sk->sk_drops);
68244 + atomic_inc_unchecked(&sk->sk_drops);
68245 kfree_skb(skb);
68246 return NET_RX_DROP;
68247 }
68248 diff -urNp linux-2.6.32.43/net/ipv4/inet_diag.c linux-2.6.32.43/net/ipv4/inet_diag.c
68249 --- linux-2.6.32.43/net/ipv4/inet_diag.c 2011-07-13 17:23:04.000000000 -0400
68250 +++ linux-2.6.32.43/net/ipv4/inet_diag.c 2011-06-20 19:31:13.000000000 -0400
68251 @@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct soc
68252 r->idiag_retrans = 0;
68253
68254 r->id.idiag_if = sk->sk_bound_dev_if;
68255 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68256 + r->id.idiag_cookie[0] = 0;
68257 + r->id.idiag_cookie[1] = 0;
68258 +#else
68259 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
68260 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
68261 +#endif
68262
68263 r->id.idiag_sport = inet->sport;
68264 r->id.idiag_dport = inet->dport;
68265 @@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct in
68266 r->idiag_family = tw->tw_family;
68267 r->idiag_retrans = 0;
68268 r->id.idiag_if = tw->tw_bound_dev_if;
68269 +
68270 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68271 + r->id.idiag_cookie[0] = 0;
68272 + r->id.idiag_cookie[1] = 0;
68273 +#else
68274 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
68275 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
68276 +#endif
68277 +
68278 r->id.idiag_sport = tw->tw_sport;
68279 r->id.idiag_dport = tw->tw_dport;
68280 r->id.idiag_src[0] = tw->tw_rcv_saddr;
68281 @@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk
68282 if (sk == NULL)
68283 goto unlock;
68284
68285 +#ifndef CONFIG_GRKERNSEC_HIDESYM
68286 err = -ESTALE;
68287 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
68288 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
68289 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
68290 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
68291 goto out;
68292 +#endif
68293
68294 err = -ENOMEM;
68295 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
68296 @@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_
68297 r->idiag_retrans = req->retrans;
68298
68299 r->id.idiag_if = sk->sk_bound_dev_if;
68300 +
68301 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68302 + r->id.idiag_cookie[0] = 0;
68303 + r->id.idiag_cookie[1] = 0;
68304 +#else
68305 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
68306 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
68307 +#endif
68308
68309 tmo = req->expires - jiffies;
68310 if (tmo < 0)
68311 diff -urNp linux-2.6.32.43/net/ipv4/inet_hashtables.c linux-2.6.32.43/net/ipv4/inet_hashtables.c
68312 --- linux-2.6.32.43/net/ipv4/inet_hashtables.c 2011-03-27 14:31:47.000000000 -0400
68313 +++ linux-2.6.32.43/net/ipv4/inet_hashtables.c 2011-04-17 15:56:46.000000000 -0400
68314 @@ -18,11 +18,14 @@
68315 #include <linux/sched.h>
68316 #include <linux/slab.h>
68317 #include <linux/wait.h>
68318 +#include <linux/security.h>
68319
68320 #include <net/inet_connection_sock.h>
68321 #include <net/inet_hashtables.h>
68322 #include <net/ip.h>
68323
68324 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
68325 +
68326 /*
68327 * Allocate and initialize a new local port bind bucket.
68328 * The bindhash mutex for snum's hash chain must be held here.
68329 @@ -490,6 +493,8 @@ ok:
68330 }
68331 spin_unlock(&head->lock);
68332
68333 + gr_update_task_in_ip_table(current, inet_sk(sk));
68334 +
68335 if (tw) {
68336 inet_twsk_deschedule(tw, death_row);
68337 inet_twsk_put(tw);
68338 diff -urNp linux-2.6.32.43/net/ipv4/inetpeer.c linux-2.6.32.43/net/ipv4/inetpeer.c
68339 --- linux-2.6.32.43/net/ipv4/inetpeer.c 2011-03-27 14:31:47.000000000 -0400
68340 +++ linux-2.6.32.43/net/ipv4/inetpeer.c 2011-05-16 21:46:57.000000000 -0400
68341 @@ -366,6 +366,8 @@ struct inet_peer *inet_getpeer(__be32 da
68342 struct inet_peer *p, *n;
68343 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
68344
68345 + pax_track_stack();
68346 +
68347 /* Look up for the address quickly. */
68348 read_lock_bh(&peer_pool_lock);
68349 p = lookup(daddr, NULL);
68350 @@ -389,7 +391,7 @@ struct inet_peer *inet_getpeer(__be32 da
68351 return NULL;
68352 n->v4daddr = daddr;
68353 atomic_set(&n->refcnt, 1);
68354 - atomic_set(&n->rid, 0);
68355 + atomic_set_unchecked(&n->rid, 0);
68356 n->ip_id_count = secure_ip_id(daddr);
68357 n->tcp_ts_stamp = 0;
68358
68359 diff -urNp linux-2.6.32.43/net/ipv4/ip_fragment.c linux-2.6.32.43/net/ipv4/ip_fragment.c
68360 --- linux-2.6.32.43/net/ipv4/ip_fragment.c 2011-03-27 14:31:47.000000000 -0400
68361 +++ linux-2.6.32.43/net/ipv4/ip_fragment.c 2011-04-17 15:56:46.000000000 -0400
68362 @@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct
68363 return 0;
68364
68365 start = qp->rid;
68366 - end = atomic_inc_return(&peer->rid);
68367 + end = atomic_inc_return_unchecked(&peer->rid);
68368 qp->rid = end;
68369
68370 rc = qp->q.fragments && (end - start) > max;
68371 diff -urNp linux-2.6.32.43/net/ipv4/ip_sockglue.c linux-2.6.32.43/net/ipv4/ip_sockglue.c
68372 --- linux-2.6.32.43/net/ipv4/ip_sockglue.c 2011-03-27 14:31:47.000000000 -0400
68373 +++ linux-2.6.32.43/net/ipv4/ip_sockglue.c 2011-05-16 21:46:57.000000000 -0400
68374 @@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock
68375 int val;
68376 int len;
68377
68378 + pax_track_stack();
68379 +
68380 if (level != SOL_IP)
68381 return -EOPNOTSUPP;
68382
68383 diff -urNp linux-2.6.32.43/net/ipv4/netfilter/arp_tables.c linux-2.6.32.43/net/ipv4/netfilter/arp_tables.c
68384 --- linux-2.6.32.43/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:00:52.000000000 -0400
68385 +++ linux-2.6.32.43/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:04:18.000000000 -0400
68386 @@ -934,6 +934,7 @@ static int get_info(struct net *net, voi
68387 private = &tmp;
68388 }
68389 #endif
68390 + memset(&info, 0, sizeof(info));
68391 info.valid_hooks = t->valid_hooks;
68392 memcpy(info.hook_entry, private->hook_entry,
68393 sizeof(info.hook_entry));
68394 diff -urNp linux-2.6.32.43/net/ipv4/netfilter/ip_tables.c linux-2.6.32.43/net/ipv4/netfilter/ip_tables.c
68395 --- linux-2.6.32.43/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:00:52.000000000 -0400
68396 +++ linux-2.6.32.43/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:04:18.000000000 -0400
68397 @@ -1141,6 +1141,7 @@ static int get_info(struct net *net, voi
68398 private = &tmp;
68399 }
68400 #endif
68401 + memset(&info, 0, sizeof(info));
68402 info.valid_hooks = t->valid_hooks;
68403 memcpy(info.hook_entry, private->hook_entry,
68404 sizeof(info.hook_entry));
68405 diff -urNp linux-2.6.32.43/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.32.43/net/ipv4/netfilter/nf_nat_snmp_basic.c
68406 --- linux-2.6.32.43/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-03-27 14:31:47.000000000 -0400
68407 +++ linux-2.6.32.43/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-04-17 15:56:46.000000000 -0400
68408 @@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(
68409
68410 *len = 0;
68411
68412 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
68413 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
68414 if (*octets == NULL) {
68415 if (net_ratelimit())
68416 printk("OOM in bsalg (%d)\n", __LINE__);
68417 diff -urNp linux-2.6.32.43/net/ipv4/raw.c linux-2.6.32.43/net/ipv4/raw.c
68418 --- linux-2.6.32.43/net/ipv4/raw.c 2011-03-27 14:31:47.000000000 -0400
68419 +++ linux-2.6.32.43/net/ipv4/raw.c 2011-05-04 17:59:08.000000000 -0400
68420 @@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk,
68421 /* Charge it to the socket. */
68422
68423 if (sock_queue_rcv_skb(sk, skb) < 0) {
68424 - atomic_inc(&sk->sk_drops);
68425 + atomic_inc_unchecked(&sk->sk_drops);
68426 kfree_skb(skb);
68427 return NET_RX_DROP;
68428 }
68429 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk,
68430 int raw_rcv(struct sock *sk, struct sk_buff *skb)
68431 {
68432 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
68433 - atomic_inc(&sk->sk_drops);
68434 + atomic_inc_unchecked(&sk->sk_drops);
68435 kfree_skb(skb);
68436 return NET_RX_DROP;
68437 }
68438 @@ -724,15 +724,22 @@ static int raw_init(struct sock *sk)
68439
68440 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
68441 {
68442 + struct icmp_filter filter;
68443 +
68444 + if (optlen < 0)
68445 + return -EINVAL;
68446 if (optlen > sizeof(struct icmp_filter))
68447 optlen = sizeof(struct icmp_filter);
68448 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
68449 + if (copy_from_user(&filter, optval, optlen))
68450 return -EFAULT;
68451 + memcpy(&raw_sk(sk)->filter, &filter, optlen);
68452 +
68453 return 0;
68454 }
68455
68456 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
68457 {
68458 + struct icmp_filter filter;
68459 int len, ret = -EFAULT;
68460
68461 if (get_user(len, optlen))
68462 @@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock
68463 if (len > sizeof(struct icmp_filter))
68464 len = sizeof(struct icmp_filter);
68465 ret = -EFAULT;
68466 + memcpy(&filter, &raw_sk(sk)->filter, len);
68467 if (put_user(len, optlen) ||
68468 - copy_to_user(optval, &raw_sk(sk)->filter, len))
68469 + copy_to_user(optval, &filter, len))
68470 goto out;
68471 ret = 0;
68472 out: return ret;
68473 @@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq
68474 sk_wmem_alloc_get(sp),
68475 sk_rmem_alloc_get(sp),
68476 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
68477 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
68478 + atomic_read(&sp->sk_refcnt),
68479 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68480 + NULL,
68481 +#else
68482 + sp,
68483 +#endif
68484 + atomic_read_unchecked(&sp->sk_drops));
68485 }
68486
68487 static int raw_seq_show(struct seq_file *seq, void *v)
68488 diff -urNp linux-2.6.32.43/net/ipv4/route.c linux-2.6.32.43/net/ipv4/route.c
68489 --- linux-2.6.32.43/net/ipv4/route.c 2011-03-27 14:31:47.000000000 -0400
68490 +++ linux-2.6.32.43/net/ipv4/route.c 2011-05-04 17:56:28.000000000 -0400
68491 @@ -268,7 +268,7 @@ static inline unsigned int rt_hash(__be3
68492
68493 static inline int rt_genid(struct net *net)
68494 {
68495 - return atomic_read(&net->ipv4.rt_genid);
68496 + return atomic_read_unchecked(&net->ipv4.rt_genid);
68497 }
68498
68499 #ifdef CONFIG_PROC_FS
68500 @@ -888,7 +888,7 @@ static void rt_cache_invalidate(struct n
68501 unsigned char shuffle;
68502
68503 get_random_bytes(&shuffle, sizeof(shuffle));
68504 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
68505 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
68506 }
68507
68508 /*
68509 @@ -3356,7 +3356,7 @@ static __net_initdata struct pernet_oper
68510
68511 static __net_init int rt_secret_timer_init(struct net *net)
68512 {
68513 - atomic_set(&net->ipv4.rt_genid,
68514 + atomic_set_unchecked(&net->ipv4.rt_genid,
68515 (int) ((num_physpages ^ (num_physpages>>8)) ^
68516 (jiffies ^ (jiffies >> 7))));
68517
68518 diff -urNp linux-2.6.32.43/net/ipv4/tcp.c linux-2.6.32.43/net/ipv4/tcp.c
68519 --- linux-2.6.32.43/net/ipv4/tcp.c 2011-03-27 14:31:47.000000000 -0400
68520 +++ linux-2.6.32.43/net/ipv4/tcp.c 2011-05-16 21:46:57.000000000 -0400
68521 @@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock
68522 int val;
68523 int err = 0;
68524
68525 + pax_track_stack();
68526 +
68527 /* This is a string value all the others are int's */
68528 if (optname == TCP_CONGESTION) {
68529 char name[TCP_CA_NAME_MAX];
68530 @@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock
68531 struct tcp_sock *tp = tcp_sk(sk);
68532 int val, len;
68533
68534 + pax_track_stack();
68535 +
68536 if (get_user(len, optlen))
68537 return -EFAULT;
68538
68539 diff -urNp linux-2.6.32.43/net/ipv4/tcp_ipv4.c linux-2.6.32.43/net/ipv4/tcp_ipv4.c
68540 --- linux-2.6.32.43/net/ipv4/tcp_ipv4.c 2011-03-27 14:31:47.000000000 -0400
68541 +++ linux-2.6.32.43/net/ipv4/tcp_ipv4.c 2011-04-17 15:56:46.000000000 -0400
68542 @@ -84,6 +84,9 @@
68543 int sysctl_tcp_tw_reuse __read_mostly;
68544 int sysctl_tcp_low_latency __read_mostly;
68545
68546 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68547 +extern int grsec_enable_blackhole;
68548 +#endif
68549
68550 #ifdef CONFIG_TCP_MD5SIG
68551 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
68552 @@ -1542,6 +1545,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
68553 return 0;
68554
68555 reset:
68556 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68557 + if (!grsec_enable_blackhole)
68558 +#endif
68559 tcp_v4_send_reset(rsk, skb);
68560 discard:
68561 kfree_skb(skb);
68562 @@ -1603,12 +1609,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
68563 TCP_SKB_CB(skb)->sacked = 0;
68564
68565 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
68566 - if (!sk)
68567 + if (!sk) {
68568 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68569 + ret = 1;
68570 +#endif
68571 goto no_tcp_socket;
68572 + }
68573
68574 process:
68575 - if (sk->sk_state == TCP_TIME_WAIT)
68576 + if (sk->sk_state == TCP_TIME_WAIT) {
68577 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68578 + ret = 2;
68579 +#endif
68580 goto do_time_wait;
68581 + }
68582
68583 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
68584 goto discard_and_relse;
68585 @@ -1650,6 +1664,10 @@ no_tcp_socket:
68586 bad_packet:
68587 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
68588 } else {
68589 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68590 + if (!grsec_enable_blackhole || (ret == 1 &&
68591 + (skb->dev->flags & IFF_LOOPBACK)))
68592 +#endif
68593 tcp_v4_send_reset(NULL, skb);
68594 }
68595
68596 @@ -2237,7 +2255,11 @@ static void get_openreq4(struct sock *sk
68597 0, /* non standard timer */
68598 0, /* open_requests have no inode */
68599 atomic_read(&sk->sk_refcnt),
68600 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68601 + NULL,
68602 +#else
68603 req,
68604 +#endif
68605 len);
68606 }
68607
68608 @@ -2279,7 +2301,12 @@ static void get_tcp4_sock(struct sock *s
68609 sock_i_uid(sk),
68610 icsk->icsk_probes_out,
68611 sock_i_ino(sk),
68612 - atomic_read(&sk->sk_refcnt), sk,
68613 + atomic_read(&sk->sk_refcnt),
68614 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68615 + NULL,
68616 +#else
68617 + sk,
68618 +#endif
68619 jiffies_to_clock_t(icsk->icsk_rto),
68620 jiffies_to_clock_t(icsk->icsk_ack.ato),
68621 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
68622 @@ -2307,7 +2334,13 @@ static void get_timewait4_sock(struct in
68623 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
68624 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
68625 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
68626 - atomic_read(&tw->tw_refcnt), tw, len);
68627 + atomic_read(&tw->tw_refcnt),
68628 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68629 + NULL,
68630 +#else
68631 + tw,
68632 +#endif
68633 + len);
68634 }
68635
68636 #define TMPSZ 150
68637 diff -urNp linux-2.6.32.43/net/ipv4/tcp_minisocks.c linux-2.6.32.43/net/ipv4/tcp_minisocks.c
68638 --- linux-2.6.32.43/net/ipv4/tcp_minisocks.c 2011-03-27 14:31:47.000000000 -0400
68639 +++ linux-2.6.32.43/net/ipv4/tcp_minisocks.c 2011-04-17 15:56:46.000000000 -0400
68640 @@ -26,6 +26,10 @@
68641 #include <net/inet_common.h>
68642 #include <net/xfrm.h>
68643
68644 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68645 +extern int grsec_enable_blackhole;
68646 +#endif
68647 +
68648 #ifdef CONFIG_SYSCTL
68649 #define SYNC_INIT 0 /* let the user enable it */
68650 #else
68651 @@ -672,6 +676,10 @@ listen_overflow:
68652
68653 embryonic_reset:
68654 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
68655 +
68656 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68657 + if (!grsec_enable_blackhole)
68658 +#endif
68659 if (!(flg & TCP_FLAG_RST))
68660 req->rsk_ops->send_reset(sk, skb);
68661
68662 diff -urNp linux-2.6.32.43/net/ipv4/tcp_output.c linux-2.6.32.43/net/ipv4/tcp_output.c
68663 --- linux-2.6.32.43/net/ipv4/tcp_output.c 2011-03-27 14:31:47.000000000 -0400
68664 +++ linux-2.6.32.43/net/ipv4/tcp_output.c 2011-05-16 21:46:57.000000000 -0400
68665 @@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct s
68666 __u8 *md5_hash_location;
68667 int mss;
68668
68669 + pax_track_stack();
68670 +
68671 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
68672 if (skb == NULL)
68673 return NULL;
68674 diff -urNp linux-2.6.32.43/net/ipv4/tcp_probe.c linux-2.6.32.43/net/ipv4/tcp_probe.c
68675 --- linux-2.6.32.43/net/ipv4/tcp_probe.c 2011-03-27 14:31:47.000000000 -0400
68676 +++ linux-2.6.32.43/net/ipv4/tcp_probe.c 2011-04-17 15:56:46.000000000 -0400
68677 @@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file
68678 if (cnt + width >= len)
68679 break;
68680
68681 - if (copy_to_user(buf + cnt, tbuf, width))
68682 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
68683 return -EFAULT;
68684 cnt += width;
68685 }
68686 diff -urNp linux-2.6.32.43/net/ipv4/tcp_timer.c linux-2.6.32.43/net/ipv4/tcp_timer.c
68687 --- linux-2.6.32.43/net/ipv4/tcp_timer.c 2011-03-27 14:31:47.000000000 -0400
68688 +++ linux-2.6.32.43/net/ipv4/tcp_timer.c 2011-04-17 15:56:46.000000000 -0400
68689 @@ -21,6 +21,10 @@
68690 #include <linux/module.h>
68691 #include <net/tcp.h>
68692
68693 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68694 +extern int grsec_lastack_retries;
68695 +#endif
68696 +
68697 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
68698 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
68699 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
68700 @@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock
68701 }
68702 }
68703
68704 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68705 + if ((sk->sk_state == TCP_LAST_ACK) &&
68706 + (grsec_lastack_retries > 0) &&
68707 + (grsec_lastack_retries < retry_until))
68708 + retry_until = grsec_lastack_retries;
68709 +#endif
68710 +
68711 if (retransmits_timed_out(sk, retry_until)) {
68712 /* Has it gone just too far? */
68713 tcp_write_err(sk);
68714 diff -urNp linux-2.6.32.43/net/ipv4/udp.c linux-2.6.32.43/net/ipv4/udp.c
68715 --- linux-2.6.32.43/net/ipv4/udp.c 2011-07-13 17:23:04.000000000 -0400
68716 +++ linux-2.6.32.43/net/ipv4/udp.c 2011-07-13 17:23:27.000000000 -0400
68717 @@ -86,6 +86,7 @@
68718 #include <linux/types.h>
68719 #include <linux/fcntl.h>
68720 #include <linux/module.h>
68721 +#include <linux/security.h>
68722 #include <linux/socket.h>
68723 #include <linux/sockios.h>
68724 #include <linux/igmp.h>
68725 @@ -106,6 +107,10 @@
68726 #include <net/xfrm.h>
68727 #include "udp_impl.h"
68728
68729 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68730 +extern int grsec_enable_blackhole;
68731 +#endif
68732 +
68733 struct udp_table udp_table;
68734 EXPORT_SYMBOL(udp_table);
68735
68736 @@ -371,6 +376,9 @@ found:
68737 return s;
68738 }
68739
68740 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
68741 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
68742 +
68743 /*
68744 * This routine is called by the ICMP module when it gets some
68745 * sort of error condition. If err < 0 then the socket should
68746 @@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
68747 dport = usin->sin_port;
68748 if (dport == 0)
68749 return -EINVAL;
68750 +
68751 + err = gr_search_udp_sendmsg(sk, usin);
68752 + if (err)
68753 + return err;
68754 } else {
68755 if (sk->sk_state != TCP_ESTABLISHED)
68756 return -EDESTADDRREQ;
68757 +
68758 + err = gr_search_udp_sendmsg(sk, NULL);
68759 + if (err)
68760 + return err;
68761 +
68762 daddr = inet->daddr;
68763 dport = inet->dport;
68764 /* Open fast path for connected socket.
68765 @@ -945,6 +962,10 @@ try_again:
68766 if (!skb)
68767 goto out;
68768
68769 + err = gr_search_udp_recvmsg(sk, skb);
68770 + if (err)
68771 + goto out_free;
68772 +
68773 ulen = skb->len - sizeof(struct udphdr);
68774 copied = len;
68775 if (copied > ulen)
68776 @@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct so
68777 if (rc == -ENOMEM) {
68778 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
68779 is_udplite);
68780 - atomic_inc(&sk->sk_drops);
68781 + atomic_inc_unchecked(&sk->sk_drops);
68782 }
68783 goto drop;
68784 }
68785 @@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
68786 goto csum_error;
68787
68788 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
68789 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68790 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
68791 +#endif
68792 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
68793
68794 /*
68795 @@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock
68796 sk_wmem_alloc_get(sp),
68797 sk_rmem_alloc_get(sp),
68798 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
68799 - atomic_read(&sp->sk_refcnt), sp,
68800 - atomic_read(&sp->sk_drops), len);
68801 + atomic_read(&sp->sk_refcnt),
68802 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68803 + NULL,
68804 +#else
68805 + sp,
68806 +#endif
68807 + atomic_read_unchecked(&sp->sk_drops), len);
68808 }
68809
68810 int udp4_seq_show(struct seq_file *seq, void *v)
68811 diff -urNp linux-2.6.32.43/net/ipv6/inet6_connection_sock.c linux-2.6.32.43/net/ipv6/inet6_connection_sock.c
68812 --- linux-2.6.32.43/net/ipv6/inet6_connection_sock.c 2011-03-27 14:31:47.000000000 -0400
68813 +++ linux-2.6.32.43/net/ipv6/inet6_connection_sock.c 2011-05-04 17:56:28.000000000 -0400
68814 @@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *
68815 #ifdef CONFIG_XFRM
68816 {
68817 struct rt6_info *rt = (struct rt6_info *)dst;
68818 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
68819 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
68820 }
68821 #endif
68822 }
68823 @@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(
68824 #ifdef CONFIG_XFRM
68825 if (dst) {
68826 struct rt6_info *rt = (struct rt6_info *)dst;
68827 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
68828 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
68829 sk->sk_dst_cache = NULL;
68830 dst_release(dst);
68831 dst = NULL;
68832 diff -urNp linux-2.6.32.43/net/ipv6/inet6_hashtables.c linux-2.6.32.43/net/ipv6/inet6_hashtables.c
68833 --- linux-2.6.32.43/net/ipv6/inet6_hashtables.c 2011-03-27 14:31:47.000000000 -0400
68834 +++ linux-2.6.32.43/net/ipv6/inet6_hashtables.c 2011-05-04 17:56:28.000000000 -0400
68835 @@ -118,7 +118,7 @@ out:
68836 }
68837 EXPORT_SYMBOL(__inet6_lookup_established);
68838
68839 -static int inline compute_score(struct sock *sk, struct net *net,
68840 +static inline int compute_score(struct sock *sk, struct net *net,
68841 const unsigned short hnum,
68842 const struct in6_addr *daddr,
68843 const int dif)
68844 diff -urNp linux-2.6.32.43/net/ipv6/ipv6_sockglue.c linux-2.6.32.43/net/ipv6/ipv6_sockglue.c
68845 --- linux-2.6.32.43/net/ipv6/ipv6_sockglue.c 2011-03-27 14:31:47.000000000 -0400
68846 +++ linux-2.6.32.43/net/ipv6/ipv6_sockglue.c 2011-05-16 21:46:57.000000000 -0400
68847 @@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct soc
68848 int val, valbool;
68849 int retv = -ENOPROTOOPT;
68850
68851 + pax_track_stack();
68852 +
68853 if (optval == NULL)
68854 val=0;
68855 else {
68856 @@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct soc
68857 int len;
68858 int val;
68859
68860 + pax_track_stack();
68861 +
68862 if (ip6_mroute_opt(optname))
68863 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
68864
68865 diff -urNp linux-2.6.32.43/net/ipv6/netfilter/ip6_tables.c linux-2.6.32.43/net/ipv6/netfilter/ip6_tables.c
68866 --- linux-2.6.32.43/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:00:52.000000000 -0400
68867 +++ linux-2.6.32.43/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:04:18.000000000 -0400
68868 @@ -1173,6 +1173,7 @@ static int get_info(struct net *net, voi
68869 private = &tmp;
68870 }
68871 #endif
68872 + memset(&info, 0, sizeof(info));
68873 info.valid_hooks = t->valid_hooks;
68874 memcpy(info.hook_entry, private->hook_entry,
68875 sizeof(info.hook_entry));
68876 diff -urNp linux-2.6.32.43/net/ipv6/raw.c linux-2.6.32.43/net/ipv6/raw.c
68877 --- linux-2.6.32.43/net/ipv6/raw.c 2011-03-27 14:31:47.000000000 -0400
68878 +++ linux-2.6.32.43/net/ipv6/raw.c 2011-05-16 21:46:57.000000000 -0400
68879 @@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct s
68880 {
68881 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
68882 skb_checksum_complete(skb)) {
68883 - atomic_inc(&sk->sk_drops);
68884 + atomic_inc_unchecked(&sk->sk_drops);
68885 kfree_skb(skb);
68886 return NET_RX_DROP;
68887 }
68888
68889 /* Charge it to the socket. */
68890 if (sock_queue_rcv_skb(sk,skb)<0) {
68891 - atomic_inc(&sk->sk_drops);
68892 + atomic_inc_unchecked(&sk->sk_drops);
68893 kfree_skb(skb);
68894 return NET_RX_DROP;
68895 }
68896 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
68897 struct raw6_sock *rp = raw6_sk(sk);
68898
68899 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
68900 - atomic_inc(&sk->sk_drops);
68901 + atomic_inc_unchecked(&sk->sk_drops);
68902 kfree_skb(skb);
68903 return NET_RX_DROP;
68904 }
68905 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
68906
68907 if (inet->hdrincl) {
68908 if (skb_checksum_complete(skb)) {
68909 - atomic_inc(&sk->sk_drops);
68910 + atomic_inc_unchecked(&sk->sk_drops);
68911 kfree_skb(skb);
68912 return NET_RX_DROP;
68913 }
68914 @@ -518,7 +518,7 @@ csum_copy_err:
68915 as some normal condition.
68916 */
68917 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
68918 - atomic_inc(&sk->sk_drops);
68919 + atomic_inc_unchecked(&sk->sk_drops);
68920 goto out;
68921 }
68922
68923 @@ -600,7 +600,7 @@ out:
68924 return err;
68925 }
68926
68927 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
68928 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
68929 struct flowi *fl, struct rt6_info *rt,
68930 unsigned int flags)
68931 {
68932 @@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *i
68933 u16 proto;
68934 int err;
68935
68936 + pax_track_stack();
68937 +
68938 /* Rough check on arithmetic overflow,
68939 better check is made in ip6_append_data().
68940 */
68941 @@ -916,12 +918,17 @@ do_confirm:
68942 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
68943 char __user *optval, int optlen)
68944 {
68945 + struct icmp6_filter filter;
68946 +
68947 switch (optname) {
68948 case ICMPV6_FILTER:
68949 + if (optlen < 0)
68950 + return -EINVAL;
68951 if (optlen > sizeof(struct icmp6_filter))
68952 optlen = sizeof(struct icmp6_filter);
68953 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
68954 + if (copy_from_user(&filter, optval, optlen))
68955 return -EFAULT;
68956 + memcpy(&raw6_sk(sk)->filter, &filter, optlen);
68957 return 0;
68958 default:
68959 return -ENOPROTOOPT;
68960 @@ -933,6 +940,7 @@ static int rawv6_seticmpfilter(struct so
68961 static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
68962 char __user *optval, int __user *optlen)
68963 {
68964 + struct icmp6_filter filter;
68965 int len;
68966
68967 switch (optname) {
68968 @@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct so
68969 len = sizeof(struct icmp6_filter);
68970 if (put_user(len, optlen))
68971 return -EFAULT;
68972 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
68973 + memcpy(&filter, &raw6_sk(sk)->filter, len);
68974 + if (copy_to_user(optval, &filter, len))
68975 return -EFAULT;
68976 return 0;
68977 default:
68978 @@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct se
68979 0, 0L, 0,
68980 sock_i_uid(sp), 0,
68981 sock_i_ino(sp),
68982 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
68983 + atomic_read(&sp->sk_refcnt),
68984 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68985 + NULL,
68986 +#else
68987 + sp,
68988 +#endif
68989 + atomic_read_unchecked(&sp->sk_drops));
68990 }
68991
68992 static int raw6_seq_show(struct seq_file *seq, void *v)
68993 diff -urNp linux-2.6.32.43/net/ipv6/tcp_ipv6.c linux-2.6.32.43/net/ipv6/tcp_ipv6.c
68994 --- linux-2.6.32.43/net/ipv6/tcp_ipv6.c 2011-03-27 14:31:47.000000000 -0400
68995 +++ linux-2.6.32.43/net/ipv6/tcp_ipv6.c 2011-04-17 15:56:46.000000000 -0400
68996 @@ -88,6 +88,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
68997 }
68998 #endif
68999
69000 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69001 +extern int grsec_enable_blackhole;
69002 +#endif
69003 +
69004 static void tcp_v6_hash(struct sock *sk)
69005 {
69006 if (sk->sk_state != TCP_CLOSE) {
69007 @@ -1578,6 +1582,9 @@ static int tcp_v6_do_rcv(struct sock *sk
69008 return 0;
69009
69010 reset:
69011 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69012 + if (!grsec_enable_blackhole)
69013 +#endif
69014 tcp_v6_send_reset(sk, skb);
69015 discard:
69016 if (opt_skb)
69017 @@ -1655,12 +1662,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
69018 TCP_SKB_CB(skb)->sacked = 0;
69019
69020 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
69021 - if (!sk)
69022 + if (!sk) {
69023 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69024 + ret = 1;
69025 +#endif
69026 goto no_tcp_socket;
69027 + }
69028
69029 process:
69030 - if (sk->sk_state == TCP_TIME_WAIT)
69031 + if (sk->sk_state == TCP_TIME_WAIT) {
69032 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69033 + ret = 2;
69034 +#endif
69035 goto do_time_wait;
69036 + }
69037
69038 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
69039 goto discard_and_relse;
69040 @@ -1700,6 +1715,10 @@ no_tcp_socket:
69041 bad_packet:
69042 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
69043 } else {
69044 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69045 + if (!grsec_enable_blackhole || (ret == 1 &&
69046 + (skb->dev->flags & IFF_LOOPBACK)))
69047 +#endif
69048 tcp_v6_send_reset(NULL, skb);
69049 }
69050
69051 @@ -1915,7 +1934,13 @@ static void get_openreq6(struct seq_file
69052 uid,
69053 0, /* non standard timer */
69054 0, /* open_requests have no inode */
69055 - 0, req);
69056 + 0,
69057 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69058 + NULL
69059 +#else
69060 + req
69061 +#endif
69062 + );
69063 }
69064
69065 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
69066 @@ -1965,7 +1990,12 @@ static void get_tcp6_sock(struct seq_fil
69067 sock_i_uid(sp),
69068 icsk->icsk_probes_out,
69069 sock_i_ino(sp),
69070 - atomic_read(&sp->sk_refcnt), sp,
69071 + atomic_read(&sp->sk_refcnt),
69072 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69073 + NULL,
69074 +#else
69075 + sp,
69076 +#endif
69077 jiffies_to_clock_t(icsk->icsk_rto),
69078 jiffies_to_clock_t(icsk->icsk_ack.ato),
69079 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
69080 @@ -2000,7 +2030,13 @@ static void get_timewait6_sock(struct se
69081 dest->s6_addr32[2], dest->s6_addr32[3], destp,
69082 tw->tw_substate, 0, 0,
69083 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
69084 - atomic_read(&tw->tw_refcnt), tw);
69085 + atomic_read(&tw->tw_refcnt),
69086 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69087 + NULL
69088 +#else
69089 + tw
69090 +#endif
69091 + );
69092 }
69093
69094 static int tcp6_seq_show(struct seq_file *seq, void *v)
69095 diff -urNp linux-2.6.32.43/net/ipv6/udp.c linux-2.6.32.43/net/ipv6/udp.c
69096 --- linux-2.6.32.43/net/ipv6/udp.c 2011-07-13 17:23:04.000000000 -0400
69097 +++ linux-2.6.32.43/net/ipv6/udp.c 2011-07-13 17:23:27.000000000 -0400
69098 @@ -49,6 +49,10 @@
69099 #include <linux/seq_file.h>
69100 #include "udp_impl.h"
69101
69102 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69103 +extern int grsec_enable_blackhole;
69104 +#endif
69105 +
69106 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
69107 {
69108 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
69109 @@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
69110 if (rc == -ENOMEM) {
69111 UDP6_INC_STATS_BH(sock_net(sk),
69112 UDP_MIB_RCVBUFERRORS, is_udplite);
69113 - atomic_inc(&sk->sk_drops);
69114 + atomic_inc_unchecked(&sk->sk_drops);
69115 }
69116 goto drop;
69117 }
69118 @@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
69119 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
69120 proto == IPPROTO_UDPLITE);
69121
69122 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69123 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
69124 +#endif
69125 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
69126
69127 kfree_skb(skb);
69128 @@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct se
69129 0, 0L, 0,
69130 sock_i_uid(sp), 0,
69131 sock_i_ino(sp),
69132 - atomic_read(&sp->sk_refcnt), sp,
69133 - atomic_read(&sp->sk_drops));
69134 + atomic_read(&sp->sk_refcnt),
69135 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69136 + NULL,
69137 +#else
69138 + sp,
69139 +#endif
69140 + atomic_read_unchecked(&sp->sk_drops));
69141 }
69142
69143 int udp6_seq_show(struct seq_file *seq, void *v)
69144 diff -urNp linux-2.6.32.43/net/irda/ircomm/ircomm_tty.c linux-2.6.32.43/net/irda/ircomm/ircomm_tty.c
69145 --- linux-2.6.32.43/net/irda/ircomm/ircomm_tty.c 2011-03-27 14:31:47.000000000 -0400
69146 +++ linux-2.6.32.43/net/irda/ircomm/ircomm_tty.c 2011-04-17 15:56:46.000000000 -0400
69147 @@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(st
69148 add_wait_queue(&self->open_wait, &wait);
69149
69150 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
69151 - __FILE__,__LINE__, tty->driver->name, self->open_count );
69152 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
69153
69154 /* As far as I can see, we protect open_count - Jean II */
69155 spin_lock_irqsave(&self->spinlock, flags);
69156 if (!tty_hung_up_p(filp)) {
69157 extra_count = 1;
69158 - self->open_count--;
69159 + local_dec(&self->open_count);
69160 }
69161 spin_unlock_irqrestore(&self->spinlock, flags);
69162 - self->blocked_open++;
69163 + local_inc(&self->blocked_open);
69164
69165 while (1) {
69166 if (tty->termios->c_cflag & CBAUD) {
69167 @@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(st
69168 }
69169
69170 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
69171 - __FILE__,__LINE__, tty->driver->name, self->open_count );
69172 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
69173
69174 schedule();
69175 }
69176 @@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(st
69177 if (extra_count) {
69178 /* ++ is not atomic, so this should be protected - Jean II */
69179 spin_lock_irqsave(&self->spinlock, flags);
69180 - self->open_count++;
69181 + local_inc(&self->open_count);
69182 spin_unlock_irqrestore(&self->spinlock, flags);
69183 }
69184 - self->blocked_open--;
69185 + local_dec(&self->blocked_open);
69186
69187 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
69188 - __FILE__,__LINE__, tty->driver->name, self->open_count);
69189 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
69190
69191 if (!retval)
69192 self->flags |= ASYNC_NORMAL_ACTIVE;
69193 @@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_st
69194 }
69195 /* ++ is not atomic, so this should be protected - Jean II */
69196 spin_lock_irqsave(&self->spinlock, flags);
69197 - self->open_count++;
69198 + local_inc(&self->open_count);
69199
69200 tty->driver_data = self;
69201 self->tty = tty;
69202 spin_unlock_irqrestore(&self->spinlock, flags);
69203
69204 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
69205 - self->line, self->open_count);
69206 + self->line, local_read(&self->open_count));
69207
69208 /* Not really used by us, but lets do it anyway */
69209 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
69210 @@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_
69211 return;
69212 }
69213
69214 - if ((tty->count == 1) && (self->open_count != 1)) {
69215 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
69216 /*
69217 * Uh, oh. tty->count is 1, which means that the tty
69218 * structure will be freed. state->count should always
69219 @@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_
69220 */
69221 IRDA_DEBUG(0, "%s(), bad serial port count; "
69222 "tty->count is 1, state->count is %d\n", __func__ ,
69223 - self->open_count);
69224 - self->open_count = 1;
69225 + local_read(&self->open_count));
69226 + local_set(&self->open_count, 1);
69227 }
69228
69229 - if (--self->open_count < 0) {
69230 + if (local_dec_return(&self->open_count) < 0) {
69231 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
69232 - __func__, self->line, self->open_count);
69233 - self->open_count = 0;
69234 + __func__, self->line, local_read(&self->open_count));
69235 + local_set(&self->open_count, 0);
69236 }
69237 - if (self->open_count) {
69238 + if (local_read(&self->open_count)) {
69239 spin_unlock_irqrestore(&self->spinlock, flags);
69240
69241 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
69242 @@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_
69243 tty->closing = 0;
69244 self->tty = NULL;
69245
69246 - if (self->blocked_open) {
69247 + if (local_read(&self->blocked_open)) {
69248 if (self->close_delay)
69249 schedule_timeout_interruptible(self->close_delay);
69250 wake_up_interruptible(&self->open_wait);
69251 @@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty
69252 spin_lock_irqsave(&self->spinlock, flags);
69253 self->flags &= ~ASYNC_NORMAL_ACTIVE;
69254 self->tty = NULL;
69255 - self->open_count = 0;
69256 + local_set(&self->open_count, 0);
69257 spin_unlock_irqrestore(&self->spinlock, flags);
69258
69259 wake_up_interruptible(&self->open_wait);
69260 @@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct
69261 seq_putc(m, '\n');
69262
69263 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
69264 - seq_printf(m, "Open count: %d\n", self->open_count);
69265 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
69266 seq_printf(m, "Max data size: %d\n", self->max_data_size);
69267 seq_printf(m, "Max header size: %d\n", self->max_header_size);
69268
69269 diff -urNp linux-2.6.32.43/net/iucv/af_iucv.c linux-2.6.32.43/net/iucv/af_iucv.c
69270 --- linux-2.6.32.43/net/iucv/af_iucv.c 2011-03-27 14:31:47.000000000 -0400
69271 +++ linux-2.6.32.43/net/iucv/af_iucv.c 2011-05-04 17:56:28.000000000 -0400
69272 @@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct soc
69273
69274 write_lock_bh(&iucv_sk_list.lock);
69275
69276 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
69277 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
69278 while (__iucv_get_sock_by_name(name)) {
69279 sprintf(name, "%08x",
69280 - atomic_inc_return(&iucv_sk_list.autobind_name));
69281 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
69282 }
69283
69284 write_unlock_bh(&iucv_sk_list.lock);
69285 diff -urNp linux-2.6.32.43/net/key/af_key.c linux-2.6.32.43/net/key/af_key.c
69286 --- linux-2.6.32.43/net/key/af_key.c 2011-03-27 14:31:47.000000000 -0400
69287 +++ linux-2.6.32.43/net/key/af_key.c 2011-05-16 21:46:57.000000000 -0400
69288 @@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk
69289 struct xfrm_migrate m[XFRM_MAX_DEPTH];
69290 struct xfrm_kmaddress k;
69291
69292 + pax_track_stack();
69293 +
69294 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
69295 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
69296 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
69297 @@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_fil
69298 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
69299 else
69300 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
69301 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69302 + NULL,
69303 +#else
69304 s,
69305 +#endif
69306 atomic_read(&s->sk_refcnt),
69307 sk_rmem_alloc_get(s),
69308 sk_wmem_alloc_get(s),
69309 diff -urNp linux-2.6.32.43/net/mac80211/cfg.c linux-2.6.32.43/net/mac80211/cfg.c
69310 --- linux-2.6.32.43/net/mac80211/cfg.c 2011-03-27 14:31:47.000000000 -0400
69311 +++ linux-2.6.32.43/net/mac80211/cfg.c 2011-04-17 15:56:46.000000000 -0400
69312 @@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(st
69313 return err;
69314 }
69315
69316 -struct cfg80211_ops mac80211_config_ops = {
69317 +const struct cfg80211_ops mac80211_config_ops = {
69318 .add_virtual_intf = ieee80211_add_iface,
69319 .del_virtual_intf = ieee80211_del_iface,
69320 .change_virtual_intf = ieee80211_change_iface,
69321 diff -urNp linux-2.6.32.43/net/mac80211/cfg.h linux-2.6.32.43/net/mac80211/cfg.h
69322 --- linux-2.6.32.43/net/mac80211/cfg.h 2011-03-27 14:31:47.000000000 -0400
69323 +++ linux-2.6.32.43/net/mac80211/cfg.h 2011-04-17 15:56:46.000000000 -0400
69324 @@ -4,6 +4,6 @@
69325 #ifndef __CFG_H
69326 #define __CFG_H
69327
69328 -extern struct cfg80211_ops mac80211_config_ops;
69329 +extern const struct cfg80211_ops mac80211_config_ops;
69330
69331 #endif /* __CFG_H */
69332 diff -urNp linux-2.6.32.43/net/mac80211/debugfs_key.c linux-2.6.32.43/net/mac80211/debugfs_key.c
69333 --- linux-2.6.32.43/net/mac80211/debugfs_key.c 2011-03-27 14:31:47.000000000 -0400
69334 +++ linux-2.6.32.43/net/mac80211/debugfs_key.c 2011-04-17 15:56:46.000000000 -0400
69335 @@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file
69336 size_t count, loff_t *ppos)
69337 {
69338 struct ieee80211_key *key = file->private_data;
69339 - int i, res, bufsize = 2 * key->conf.keylen + 2;
69340 + int i, bufsize = 2 * key->conf.keylen + 2;
69341 char *buf = kmalloc(bufsize, GFP_KERNEL);
69342 char *p = buf;
69343 + ssize_t res;
69344 +
69345 + if (buf == NULL)
69346 + return -ENOMEM;
69347
69348 for (i = 0; i < key->conf.keylen; i++)
69349 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
69350 diff -urNp linux-2.6.32.43/net/mac80211/debugfs_sta.c linux-2.6.32.43/net/mac80211/debugfs_sta.c
69351 --- linux-2.6.32.43/net/mac80211/debugfs_sta.c 2011-03-27 14:31:47.000000000 -0400
69352 +++ linux-2.6.32.43/net/mac80211/debugfs_sta.c 2011-05-16 21:46:57.000000000 -0400
69353 @@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struc
69354 int i;
69355 struct sta_info *sta = file->private_data;
69356
69357 + pax_track_stack();
69358 +
69359 spin_lock_bh(&sta->lock);
69360 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
69361 sta->ampdu_mlme.dialog_token_allocator + 1);
69362 diff -urNp linux-2.6.32.43/net/mac80211/ieee80211_i.h linux-2.6.32.43/net/mac80211/ieee80211_i.h
69363 --- linux-2.6.32.43/net/mac80211/ieee80211_i.h 2011-03-27 14:31:47.000000000 -0400
69364 +++ linux-2.6.32.43/net/mac80211/ieee80211_i.h 2011-04-17 15:56:46.000000000 -0400
69365 @@ -25,6 +25,7 @@
69366 #include <linux/etherdevice.h>
69367 #include <net/cfg80211.h>
69368 #include <net/mac80211.h>
69369 +#include <asm/local.h>
69370 #include "key.h"
69371 #include "sta_info.h"
69372
69373 @@ -635,7 +636,7 @@ struct ieee80211_local {
69374 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
69375 spinlock_t queue_stop_reason_lock;
69376
69377 - int open_count;
69378 + local_t open_count;
69379 int monitors, cooked_mntrs;
69380 /* number of interfaces with corresponding FIF_ flags */
69381 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
69382 diff -urNp linux-2.6.32.43/net/mac80211/iface.c linux-2.6.32.43/net/mac80211/iface.c
69383 --- linux-2.6.32.43/net/mac80211/iface.c 2011-03-27 14:31:47.000000000 -0400
69384 +++ linux-2.6.32.43/net/mac80211/iface.c 2011-04-17 15:56:46.000000000 -0400
69385 @@ -166,7 +166,7 @@ static int ieee80211_open(struct net_dev
69386 break;
69387 }
69388
69389 - if (local->open_count == 0) {
69390 + if (local_read(&local->open_count) == 0) {
69391 res = drv_start(local);
69392 if (res)
69393 goto err_del_bss;
69394 @@ -196,7 +196,7 @@ static int ieee80211_open(struct net_dev
69395 * Validate the MAC address for this device.
69396 */
69397 if (!is_valid_ether_addr(dev->dev_addr)) {
69398 - if (!local->open_count)
69399 + if (!local_read(&local->open_count))
69400 drv_stop(local);
69401 return -EADDRNOTAVAIL;
69402 }
69403 @@ -292,7 +292,7 @@ static int ieee80211_open(struct net_dev
69404
69405 hw_reconf_flags |= __ieee80211_recalc_idle(local);
69406
69407 - local->open_count++;
69408 + local_inc(&local->open_count);
69409 if (hw_reconf_flags) {
69410 ieee80211_hw_config(local, hw_reconf_flags);
69411 /*
69412 @@ -320,7 +320,7 @@ static int ieee80211_open(struct net_dev
69413 err_del_interface:
69414 drv_remove_interface(local, &conf);
69415 err_stop:
69416 - if (!local->open_count)
69417 + if (!local_read(&local->open_count))
69418 drv_stop(local);
69419 err_del_bss:
69420 sdata->bss = NULL;
69421 @@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_dev
69422 WARN_ON(!list_empty(&sdata->u.ap.vlans));
69423 }
69424
69425 - local->open_count--;
69426 + local_dec(&local->open_count);
69427
69428 switch (sdata->vif.type) {
69429 case NL80211_IFTYPE_AP_VLAN:
69430 @@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_dev
69431
69432 ieee80211_recalc_ps(local, -1);
69433
69434 - if (local->open_count == 0) {
69435 + if (local_read(&local->open_count) == 0) {
69436 ieee80211_clear_tx_pending(local);
69437 ieee80211_stop_device(local);
69438
69439 diff -urNp linux-2.6.32.43/net/mac80211/main.c linux-2.6.32.43/net/mac80211/main.c
69440 --- linux-2.6.32.43/net/mac80211/main.c 2011-05-10 22:12:02.000000000 -0400
69441 +++ linux-2.6.32.43/net/mac80211/main.c 2011-05-10 22:12:34.000000000 -0400
69442 @@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211
69443 local->hw.conf.power_level = power;
69444 }
69445
69446 - if (changed && local->open_count) {
69447 + if (changed && local_read(&local->open_count)) {
69448 ret = drv_config(local, changed);
69449 /*
69450 * Goal:
69451 diff -urNp linux-2.6.32.43/net/mac80211/mlme.c linux-2.6.32.43/net/mac80211/mlme.c
69452 --- linux-2.6.32.43/net/mac80211/mlme.c 2011-03-27 14:31:47.000000000 -0400
69453 +++ linux-2.6.32.43/net/mac80211/mlme.c 2011-05-16 21:46:57.000000000 -0400
69454 @@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee
69455 bool have_higher_than_11mbit = false, newsta = false;
69456 u16 ap_ht_cap_flags;
69457
69458 + pax_track_stack();
69459 +
69460 /*
69461 * AssocResp and ReassocResp have identical structure, so process both
69462 * of them in this function.
69463 diff -urNp linux-2.6.32.43/net/mac80211/pm.c linux-2.6.32.43/net/mac80211/pm.c
69464 --- linux-2.6.32.43/net/mac80211/pm.c 2011-03-27 14:31:47.000000000 -0400
69465 +++ linux-2.6.32.43/net/mac80211/pm.c 2011-04-17 15:56:46.000000000 -0400
69466 @@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211
69467 }
69468
69469 /* stop hardware - this must stop RX */
69470 - if (local->open_count)
69471 + if (local_read(&local->open_count))
69472 ieee80211_stop_device(local);
69473
69474 local->suspended = true;
69475 diff -urNp linux-2.6.32.43/net/mac80211/rate.c linux-2.6.32.43/net/mac80211/rate.c
69476 --- linux-2.6.32.43/net/mac80211/rate.c 2011-03-27 14:31:47.000000000 -0400
69477 +++ linux-2.6.32.43/net/mac80211/rate.c 2011-04-17 15:56:46.000000000 -0400
69478 @@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct
69479 struct rate_control_ref *ref, *old;
69480
69481 ASSERT_RTNL();
69482 - if (local->open_count)
69483 + if (local_read(&local->open_count))
69484 return -EBUSY;
69485
69486 ref = rate_control_alloc(name, local);
69487 diff -urNp linux-2.6.32.43/net/mac80211/tx.c linux-2.6.32.43/net/mac80211/tx.c
69488 --- linux-2.6.32.43/net/mac80211/tx.c 2011-03-27 14:31:47.000000000 -0400
69489 +++ linux-2.6.32.43/net/mac80211/tx.c 2011-04-17 15:56:46.000000000 -0400
69490 @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
69491 return cpu_to_le16(dur);
69492 }
69493
69494 -static int inline is_ieee80211_device(struct ieee80211_local *local,
69495 +static inline int is_ieee80211_device(struct ieee80211_local *local,
69496 struct net_device *dev)
69497 {
69498 return local == wdev_priv(dev->ieee80211_ptr);
69499 diff -urNp linux-2.6.32.43/net/mac80211/util.c linux-2.6.32.43/net/mac80211/util.c
69500 --- linux-2.6.32.43/net/mac80211/util.c 2011-03-27 14:31:47.000000000 -0400
69501 +++ linux-2.6.32.43/net/mac80211/util.c 2011-04-17 15:56:46.000000000 -0400
69502 @@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_
69503 local->resuming = true;
69504
69505 /* restart hardware */
69506 - if (local->open_count) {
69507 + if (local_read(&local->open_count)) {
69508 /*
69509 * Upon resume hardware can sometimes be goofy due to
69510 * various platform / driver / bus issues, so restarting
69511 diff -urNp linux-2.6.32.43/net/netfilter/ipvs/ip_vs_app.c linux-2.6.32.43/net/netfilter/ipvs/ip_vs_app.c
69512 --- linux-2.6.32.43/net/netfilter/ipvs/ip_vs_app.c 2011-03-27 14:31:47.000000000 -0400
69513 +++ linux-2.6.32.43/net/netfilter/ipvs/ip_vs_app.c 2011-05-17 19:26:34.000000000 -0400
69514 @@ -564,7 +564,7 @@ static const struct file_operations ip_v
69515 .open = ip_vs_app_open,
69516 .read = seq_read,
69517 .llseek = seq_lseek,
69518 - .release = seq_release,
69519 + .release = seq_release_net,
69520 };
69521 #endif
69522
69523 diff -urNp linux-2.6.32.43/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.32.43/net/netfilter/ipvs/ip_vs_conn.c
69524 --- linux-2.6.32.43/net/netfilter/ipvs/ip_vs_conn.c 2011-03-27 14:31:47.000000000 -0400
69525 +++ linux-2.6.32.43/net/netfilter/ipvs/ip_vs_conn.c 2011-05-17 19:26:34.000000000 -0400
69526 @@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
69527 /* if the connection is not template and is created
69528 * by sync, preserve the activity flag.
69529 */
69530 - cp->flags |= atomic_read(&dest->conn_flags) &
69531 + cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
69532 (~IP_VS_CONN_F_INACTIVE);
69533 else
69534 - cp->flags |= atomic_read(&dest->conn_flags);
69535 + cp->flags |= atomic_read_unchecked(&dest->conn_flags);
69536 cp->dest = dest;
69537
69538 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
69539 @@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const
69540 atomic_set(&cp->refcnt, 1);
69541
69542 atomic_set(&cp->n_control, 0);
69543 - atomic_set(&cp->in_pkts, 0);
69544 + atomic_set_unchecked(&cp->in_pkts, 0);
69545
69546 atomic_inc(&ip_vs_conn_count);
69547 if (flags & IP_VS_CONN_F_NO_CPORT)
69548 @@ -871,7 +871,7 @@ static const struct file_operations ip_v
69549 .open = ip_vs_conn_open,
69550 .read = seq_read,
69551 .llseek = seq_lseek,
69552 - .release = seq_release,
69553 + .release = seq_release_net,
69554 };
69555
69556 static const char *ip_vs_origin_name(unsigned flags)
69557 @@ -934,7 +934,7 @@ static const struct file_operations ip_v
69558 .open = ip_vs_conn_sync_open,
69559 .read = seq_read,
69560 .llseek = seq_lseek,
69561 - .release = seq_release,
69562 + .release = seq_release_net,
69563 };
69564
69565 #endif
69566 @@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip
69567
69568 /* Don't drop the entry if its number of incoming packets is not
69569 located in [0, 8] */
69570 - i = atomic_read(&cp->in_pkts);
69571 + i = atomic_read_unchecked(&cp->in_pkts);
69572 if (i > 8 || i < 0) return 0;
69573
69574 if (!todrop_rate[i]) return 0;
69575 diff -urNp linux-2.6.32.43/net/netfilter/ipvs/ip_vs_core.c linux-2.6.32.43/net/netfilter/ipvs/ip_vs_core.c
69576 --- linux-2.6.32.43/net/netfilter/ipvs/ip_vs_core.c 2011-03-27 14:31:47.000000000 -0400
69577 +++ linux-2.6.32.43/net/netfilter/ipvs/ip_vs_core.c 2011-05-04 17:56:28.000000000 -0400
69578 @@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *sv
69579 ret = cp->packet_xmit(skb, cp, pp);
69580 /* do not touch skb anymore */
69581
69582 - atomic_inc(&cp->in_pkts);
69583 + atomic_inc_unchecked(&cp->in_pkts);
69584 ip_vs_conn_put(cp);
69585 return ret;
69586 }
69587 @@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk
69588 * Sync connection if it is about to close to
69589 * encorage the standby servers to update the connections timeout
69590 */
69591 - pkts = atomic_add_return(1, &cp->in_pkts);
69592 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
69593 if (af == AF_INET &&
69594 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
69595 (((cp->protocol != IPPROTO_TCP ||
69596 diff -urNp linux-2.6.32.43/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.32.43/net/netfilter/ipvs/ip_vs_ctl.c
69597 --- linux-2.6.32.43/net/netfilter/ipvs/ip_vs_ctl.c 2011-03-27 14:31:47.000000000 -0400
69598 +++ linux-2.6.32.43/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-17 19:26:34.000000000 -0400
69599 @@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service
69600 ip_vs_rs_hash(dest);
69601 write_unlock_bh(&__ip_vs_rs_lock);
69602 }
69603 - atomic_set(&dest->conn_flags, conn_flags);
69604 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
69605
69606 /* bind the service */
69607 if (!dest->svc) {
69608 @@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct se
69609 " %-7s %-6d %-10d %-10d\n",
69610 &dest->addr.in6,
69611 ntohs(dest->port),
69612 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
69613 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
69614 atomic_read(&dest->weight),
69615 atomic_read(&dest->activeconns),
69616 atomic_read(&dest->inactconns));
69617 @@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct se
69618 "%-7s %-6d %-10d %-10d\n",
69619 ntohl(dest->addr.ip),
69620 ntohs(dest->port),
69621 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
69622 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
69623 atomic_read(&dest->weight),
69624 atomic_read(&dest->activeconns),
69625 atomic_read(&dest->inactconns));
69626 @@ -1927,7 +1927,7 @@ static const struct file_operations ip_v
69627 .open = ip_vs_info_open,
69628 .read = seq_read,
69629 .llseek = seq_lseek,
69630 - .release = seq_release_private,
69631 + .release = seq_release_net,
69632 };
69633
69634 #endif
69635 @@ -1976,7 +1976,7 @@ static const struct file_operations ip_v
69636 .open = ip_vs_stats_seq_open,
69637 .read = seq_read,
69638 .llseek = seq_lseek,
69639 - .release = single_release,
69640 + .release = single_release_net,
69641 };
69642
69643 #endif
69644 @@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip
69645
69646 entry.addr = dest->addr.ip;
69647 entry.port = dest->port;
69648 - entry.conn_flags = atomic_read(&dest->conn_flags);
69649 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
69650 entry.weight = atomic_read(&dest->weight);
69651 entry.u_threshold = dest->u_threshold;
69652 entry.l_threshold = dest->l_threshold;
69653 @@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cm
69654 unsigned char arg[128];
69655 int ret = 0;
69656
69657 + pax_track_stack();
69658 +
69659 if (!capable(CAP_NET_ADMIN))
69660 return -EPERM;
69661
69662 @@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct s
69663 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
69664
69665 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
69666 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
69667 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
69668 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
69669 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
69670 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
69671 diff -urNp linux-2.6.32.43/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.32.43/net/netfilter/ipvs/ip_vs_sync.c
69672 --- linux-2.6.32.43/net/netfilter/ipvs/ip_vs_sync.c 2011-03-27 14:31:47.000000000 -0400
69673 +++ linux-2.6.32.43/net/netfilter/ipvs/ip_vs_sync.c 2011-05-04 17:56:28.000000000 -0400
69674 @@ -438,7 +438,7 @@ static void ip_vs_process_message(const
69675
69676 if (opt)
69677 memcpy(&cp->in_seq, opt, sizeof(*opt));
69678 - atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
69679 + atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
69680 cp->state = state;
69681 cp->old_state = cp->state;
69682 /*
69683 diff -urNp linux-2.6.32.43/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.32.43/net/netfilter/ipvs/ip_vs_xmit.c
69684 --- linux-2.6.32.43/net/netfilter/ipvs/ip_vs_xmit.c 2011-03-27 14:31:47.000000000 -0400
69685 +++ linux-2.6.32.43/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-04 17:56:28.000000000 -0400
69686 @@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
69687 else
69688 rc = NF_ACCEPT;
69689 /* do not touch skb anymore */
69690 - atomic_inc(&cp->in_pkts);
69691 + atomic_inc_unchecked(&cp->in_pkts);
69692 goto out;
69693 }
69694
69695 @@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
69696 else
69697 rc = NF_ACCEPT;
69698 /* do not touch skb anymore */
69699 - atomic_inc(&cp->in_pkts);
69700 + atomic_inc_unchecked(&cp->in_pkts);
69701 goto out;
69702 }
69703
69704 diff -urNp linux-2.6.32.43/net/netfilter/Kconfig linux-2.6.32.43/net/netfilter/Kconfig
69705 --- linux-2.6.32.43/net/netfilter/Kconfig 2011-03-27 14:31:47.000000000 -0400
69706 +++ linux-2.6.32.43/net/netfilter/Kconfig 2011-04-17 15:56:46.000000000 -0400
69707 @@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
69708
69709 To compile it as a module, choose M here. If unsure, say N.
69710
69711 +config NETFILTER_XT_MATCH_GRADM
69712 + tristate '"gradm" match support'
69713 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
69714 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
69715 + ---help---
69716 + The gradm match allows to match on grsecurity RBAC being enabled.
69717 + It is useful when iptables rules are applied early on bootup to
69718 + prevent connections to the machine (except from a trusted host)
69719 + while the RBAC system is disabled.
69720 +
69721 config NETFILTER_XT_MATCH_HASHLIMIT
69722 tristate '"hashlimit" match support'
69723 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
69724 diff -urNp linux-2.6.32.43/net/netfilter/Makefile linux-2.6.32.43/net/netfilter/Makefile
69725 --- linux-2.6.32.43/net/netfilter/Makefile 2011-03-27 14:31:47.000000000 -0400
69726 +++ linux-2.6.32.43/net/netfilter/Makefile 2011-04-17 15:56:46.000000000 -0400
69727 @@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRAC
69728 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
69729 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
69730 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
69731 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
69732 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
69733 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
69734 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
69735 diff -urNp linux-2.6.32.43/net/netfilter/nf_conntrack_netlink.c linux-2.6.32.43/net/netfilter/nf_conntrack_netlink.c
69736 --- linux-2.6.32.43/net/netfilter/nf_conntrack_netlink.c 2011-03-27 14:31:47.000000000 -0400
69737 +++ linux-2.6.32.43/net/netfilter/nf_conntrack_netlink.c 2011-04-17 15:56:46.000000000 -0400
69738 @@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlatt
69739 static int
69740 ctnetlink_parse_tuple(const struct nlattr * const cda[],
69741 struct nf_conntrack_tuple *tuple,
69742 - enum ctattr_tuple type, u_int8_t l3num)
69743 + enum ctattr_type type, u_int8_t l3num)
69744 {
69745 struct nlattr *tb[CTA_TUPLE_MAX+1];
69746 int err;
69747 diff -urNp linux-2.6.32.43/net/netfilter/nfnetlink_log.c linux-2.6.32.43/net/netfilter/nfnetlink_log.c
69748 --- linux-2.6.32.43/net/netfilter/nfnetlink_log.c 2011-03-27 14:31:47.000000000 -0400
69749 +++ linux-2.6.32.43/net/netfilter/nfnetlink_log.c 2011-05-04 17:56:28.000000000 -0400
69750 @@ -68,7 +68,7 @@ struct nfulnl_instance {
69751 };
69752
69753 static DEFINE_RWLOCK(instances_lock);
69754 -static atomic_t global_seq;
69755 +static atomic_unchecked_t global_seq;
69756
69757 #define INSTANCE_BUCKETS 16
69758 static struct hlist_head instance_table[INSTANCE_BUCKETS];
69759 @@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_ins
69760 /* global sequence number */
69761 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
69762 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
69763 - htonl(atomic_inc_return(&global_seq)));
69764 + htonl(atomic_inc_return_unchecked(&global_seq)));
69765
69766 if (data_len) {
69767 struct nlattr *nla;
69768 diff -urNp linux-2.6.32.43/net/netfilter/xt_gradm.c linux-2.6.32.43/net/netfilter/xt_gradm.c
69769 --- linux-2.6.32.43/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
69770 +++ linux-2.6.32.43/net/netfilter/xt_gradm.c 2011-04-17 15:56:46.000000000 -0400
69771 @@ -0,0 +1,51 @@
69772 +/*
69773 + * gradm match for netfilter
69774 + * Copyright © Zbigniew Krzystolik, 2010
69775 + *
69776 + * This program is free software; you can redistribute it and/or modify
69777 + * it under the terms of the GNU General Public License; either version
69778 + * 2 or 3 as published by the Free Software Foundation.
69779 + */
69780 +#include <linux/module.h>
69781 +#include <linux/moduleparam.h>
69782 +#include <linux/skbuff.h>
69783 +#include <linux/netfilter/x_tables.h>
69784 +#include <linux/grsecurity.h>
69785 +#include <linux/netfilter/xt_gradm.h>
69786 +
69787 +static bool
69788 +gradm_mt(const struct sk_buff *skb, const struct xt_match_param *par)
69789 +{
69790 + const struct xt_gradm_mtinfo *info = par->matchinfo;
69791 + bool retval = false;
69792 + if (gr_acl_is_enabled())
69793 + retval = true;
69794 + return retval ^ info->invflags;
69795 +}
69796 +
69797 +static struct xt_match gradm_mt_reg __read_mostly = {
69798 + .name = "gradm",
69799 + .revision = 0,
69800 + .family = NFPROTO_UNSPEC,
69801 + .match = gradm_mt,
69802 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
69803 + .me = THIS_MODULE,
69804 +};
69805 +
69806 +static int __init gradm_mt_init(void)
69807 +{
69808 + return xt_register_match(&gradm_mt_reg);
69809 +}
69810 +
69811 +static void __exit gradm_mt_exit(void)
69812 +{
69813 + xt_unregister_match(&gradm_mt_reg);
69814 +}
69815 +
69816 +module_init(gradm_mt_init);
69817 +module_exit(gradm_mt_exit);
69818 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
69819 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
69820 +MODULE_LICENSE("GPL");
69821 +MODULE_ALIAS("ipt_gradm");
69822 +MODULE_ALIAS("ip6t_gradm");
69823 diff -urNp linux-2.6.32.43/net/netlink/af_netlink.c linux-2.6.32.43/net/netlink/af_netlink.c
69824 --- linux-2.6.32.43/net/netlink/af_netlink.c 2011-03-27 14:31:47.000000000 -0400
69825 +++ linux-2.6.32.43/net/netlink/af_netlink.c 2011-05-04 17:56:28.000000000 -0400
69826 @@ -733,7 +733,7 @@ static void netlink_overrun(struct sock
69827 sk->sk_error_report(sk);
69828 }
69829 }
69830 - atomic_inc(&sk->sk_drops);
69831 + atomic_inc_unchecked(&sk->sk_drops);
69832 }
69833
69834 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
69835 @@ -1964,15 +1964,23 @@ static int netlink_seq_show(struct seq_f
69836 struct netlink_sock *nlk = nlk_sk(s);
69837
69838 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n",
69839 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69840 + NULL,
69841 +#else
69842 s,
69843 +#endif
69844 s->sk_protocol,
69845 nlk->pid,
69846 nlk->groups ? (u32)nlk->groups[0] : 0,
69847 sk_rmem_alloc_get(s),
69848 sk_wmem_alloc_get(s),
69849 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69850 + NULL,
69851 +#else
69852 nlk->cb,
69853 +#endif
69854 atomic_read(&s->sk_refcnt),
69855 - atomic_read(&s->sk_drops)
69856 + atomic_read_unchecked(&s->sk_drops)
69857 );
69858
69859 }
69860 diff -urNp linux-2.6.32.43/net/netrom/af_netrom.c linux-2.6.32.43/net/netrom/af_netrom.c
69861 --- linux-2.6.32.43/net/netrom/af_netrom.c 2011-03-27 14:31:47.000000000 -0400
69862 +++ linux-2.6.32.43/net/netrom/af_netrom.c 2011-04-17 15:56:46.000000000 -0400
69863 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *soc
69864 struct sock *sk = sock->sk;
69865 struct nr_sock *nr = nr_sk(sk);
69866
69867 + memset(sax, 0, sizeof(*sax));
69868 lock_sock(sk);
69869 if (peer != 0) {
69870 if (sk->sk_state != TCP_ESTABLISHED) {
69871 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *soc
69872 *uaddr_len = sizeof(struct full_sockaddr_ax25);
69873 } else {
69874 sax->fsa_ax25.sax25_family = AF_NETROM;
69875 - sax->fsa_ax25.sax25_ndigis = 0;
69876 sax->fsa_ax25.sax25_call = nr->source_addr;
69877 *uaddr_len = sizeof(struct sockaddr_ax25);
69878 }
69879 diff -urNp linux-2.6.32.43/net/packet/af_packet.c linux-2.6.32.43/net/packet/af_packet.c
69880 --- linux-2.6.32.43/net/packet/af_packet.c 2011-07-13 17:23:04.000000000 -0400
69881 +++ linux-2.6.32.43/net/packet/af_packet.c 2011-07-13 17:23:27.000000000 -0400
69882 @@ -2429,7 +2429,11 @@ static int packet_seq_show(struct seq_fi
69883
69884 seq_printf(seq,
69885 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
69886 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69887 + NULL,
69888 +#else
69889 s,
69890 +#endif
69891 atomic_read(&s->sk_refcnt),
69892 s->sk_type,
69893 ntohs(po->num),
69894 diff -urNp linux-2.6.32.43/net/phonet/af_phonet.c linux-2.6.32.43/net/phonet/af_phonet.c
69895 --- linux-2.6.32.43/net/phonet/af_phonet.c 2011-03-27 14:31:47.000000000 -0400
69896 +++ linux-2.6.32.43/net/phonet/af_phonet.c 2011-04-17 15:56:46.000000000 -0400
69897 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
69898 {
69899 struct phonet_protocol *pp;
69900
69901 - if (protocol >= PHONET_NPROTO)
69902 + if (protocol < 0 || protocol >= PHONET_NPROTO)
69903 return NULL;
69904
69905 spin_lock(&proto_tab_lock);
69906 @@ -402,7 +402,7 @@ int __init_or_module phonet_proto_regist
69907 {
69908 int err = 0;
69909
69910 - if (protocol >= PHONET_NPROTO)
69911 + if (protocol < 0 || protocol >= PHONET_NPROTO)
69912 return -EINVAL;
69913
69914 err = proto_register(pp->prot, 1);
69915 diff -urNp linux-2.6.32.43/net/phonet/datagram.c linux-2.6.32.43/net/phonet/datagram.c
69916 --- linux-2.6.32.43/net/phonet/datagram.c 2011-03-27 14:31:47.000000000 -0400
69917 +++ linux-2.6.32.43/net/phonet/datagram.c 2011-05-04 17:56:28.000000000 -0400
69918 @@ -162,7 +162,7 @@ static int pn_backlog_rcv(struct sock *s
69919 if (err < 0) {
69920 kfree_skb(skb);
69921 if (err == -ENOMEM)
69922 - atomic_inc(&sk->sk_drops);
69923 + atomic_inc_unchecked(&sk->sk_drops);
69924 }
69925 return err ? NET_RX_DROP : NET_RX_SUCCESS;
69926 }
69927 diff -urNp linux-2.6.32.43/net/phonet/pep.c linux-2.6.32.43/net/phonet/pep.c
69928 --- linux-2.6.32.43/net/phonet/pep.c 2011-03-27 14:31:47.000000000 -0400
69929 +++ linux-2.6.32.43/net/phonet/pep.c 2011-05-04 17:56:28.000000000 -0400
69930 @@ -348,7 +348,7 @@ static int pipe_do_rcv(struct sock *sk,
69931
69932 case PNS_PEP_CTRL_REQ:
69933 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
69934 - atomic_inc(&sk->sk_drops);
69935 + atomic_inc_unchecked(&sk->sk_drops);
69936 break;
69937 }
69938 __skb_pull(skb, 4);
69939 @@ -362,12 +362,12 @@ static int pipe_do_rcv(struct sock *sk,
69940 if (!err)
69941 return 0;
69942 if (err == -ENOMEM)
69943 - atomic_inc(&sk->sk_drops);
69944 + atomic_inc_unchecked(&sk->sk_drops);
69945 break;
69946 }
69947
69948 if (pn->rx_credits == 0) {
69949 - atomic_inc(&sk->sk_drops);
69950 + atomic_inc_unchecked(&sk->sk_drops);
69951 err = -ENOBUFS;
69952 break;
69953 }
69954 diff -urNp linux-2.6.32.43/net/phonet/socket.c linux-2.6.32.43/net/phonet/socket.c
69955 --- linux-2.6.32.43/net/phonet/socket.c 2011-03-27 14:31:47.000000000 -0400
69956 +++ linux-2.6.32.43/net/phonet/socket.c 2011-05-04 17:57:07.000000000 -0400
69957 @@ -482,8 +482,13 @@ static int pn_sock_seq_show(struct seq_f
69958 sk->sk_state,
69959 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
69960 sock_i_uid(sk), sock_i_ino(sk),
69961 - atomic_read(&sk->sk_refcnt), sk,
69962 - atomic_read(&sk->sk_drops), &len);
69963 + atomic_read(&sk->sk_refcnt),
69964 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69965 + NULL,
69966 +#else
69967 + sk,
69968 +#endif
69969 + atomic_read_unchecked(&sk->sk_drops), &len);
69970 }
69971 seq_printf(seq, "%*s\n", 127 - len, "");
69972 return 0;
69973 diff -urNp linux-2.6.32.43/net/rds/cong.c linux-2.6.32.43/net/rds/cong.c
69974 --- linux-2.6.32.43/net/rds/cong.c 2011-03-27 14:31:47.000000000 -0400
69975 +++ linux-2.6.32.43/net/rds/cong.c 2011-05-04 17:56:28.000000000 -0400
69976 @@ -77,7 +77,7 @@
69977 * finds that the saved generation number is smaller than the global generation
69978 * number, it wakes up the process.
69979 */
69980 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
69981 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
69982
69983 /*
69984 * Congestion monitoring
69985 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
69986 rdsdebug("waking map %p for %pI4\n",
69987 map, &map->m_addr);
69988 rds_stats_inc(s_cong_update_received);
69989 - atomic_inc(&rds_cong_generation);
69990 + atomic_inc_unchecked(&rds_cong_generation);
69991 if (waitqueue_active(&map->m_waitq))
69992 wake_up(&map->m_waitq);
69993 if (waitqueue_active(&rds_poll_waitq))
69994 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
69995
69996 int rds_cong_updated_since(unsigned long *recent)
69997 {
69998 - unsigned long gen = atomic_read(&rds_cong_generation);
69999 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
70000
70001 if (likely(*recent == gen))
70002 return 0;
70003 diff -urNp linux-2.6.32.43/net/rds/iw_rdma.c linux-2.6.32.43/net/rds/iw_rdma.c
70004 --- linux-2.6.32.43/net/rds/iw_rdma.c 2011-03-27 14:31:47.000000000 -0400
70005 +++ linux-2.6.32.43/net/rds/iw_rdma.c 2011-05-16 21:46:57.000000000 -0400
70006 @@ -181,6 +181,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
70007 struct rdma_cm_id *pcm_id;
70008 int rc;
70009
70010 + pax_track_stack();
70011 +
70012 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
70013 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
70014
70015 diff -urNp linux-2.6.32.43/net/rds/Kconfig linux-2.6.32.43/net/rds/Kconfig
70016 --- linux-2.6.32.43/net/rds/Kconfig 2011-03-27 14:31:47.000000000 -0400
70017 +++ linux-2.6.32.43/net/rds/Kconfig 2011-04-17 15:56:46.000000000 -0400
70018 @@ -1,7 +1,7 @@
70019
70020 config RDS
70021 tristate "The RDS Protocol (EXPERIMENTAL)"
70022 - depends on INET && EXPERIMENTAL
70023 + depends on INET && EXPERIMENTAL && BROKEN
70024 ---help---
70025 The RDS (Reliable Datagram Sockets) protocol provides reliable,
70026 sequenced delivery of datagrams over Infiniband, iWARP,
70027 diff -urNp linux-2.6.32.43/net/rxrpc/af_rxrpc.c linux-2.6.32.43/net/rxrpc/af_rxrpc.c
70028 --- linux-2.6.32.43/net/rxrpc/af_rxrpc.c 2011-03-27 14:31:47.000000000 -0400
70029 +++ linux-2.6.32.43/net/rxrpc/af_rxrpc.c 2011-05-04 17:56:28.000000000 -0400
70030 @@ -38,7 +38,7 @@ static const struct proto_ops rxrpc_rpc_
70031 __be32 rxrpc_epoch;
70032
70033 /* current debugging ID */
70034 -atomic_t rxrpc_debug_id;
70035 +atomic_unchecked_t rxrpc_debug_id;
70036
70037 /* count of skbs currently in use */
70038 atomic_t rxrpc_n_skbs;
70039 diff -urNp linux-2.6.32.43/net/rxrpc/ar-ack.c linux-2.6.32.43/net/rxrpc/ar-ack.c
70040 --- linux-2.6.32.43/net/rxrpc/ar-ack.c 2011-03-27 14:31:47.000000000 -0400
70041 +++ linux-2.6.32.43/net/rxrpc/ar-ack.c 2011-05-16 21:46:57.000000000 -0400
70042 @@ -174,7 +174,7 @@ static void rxrpc_resend(struct rxrpc_ca
70043
70044 _enter("{%d,%d,%d,%d},",
70045 call->acks_hard, call->acks_unacked,
70046 - atomic_read(&call->sequence),
70047 + atomic_read_unchecked(&call->sequence),
70048 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
70049
70050 stop = 0;
70051 @@ -198,7 +198,7 @@ static void rxrpc_resend(struct rxrpc_ca
70052
70053 /* each Tx packet has a new serial number */
70054 sp->hdr.serial =
70055 - htonl(atomic_inc_return(&call->conn->serial));
70056 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
70057
70058 hdr = (struct rxrpc_header *) txb->head;
70059 hdr->serial = sp->hdr.serial;
70060 @@ -401,7 +401,7 @@ static void rxrpc_rotate_tx_window(struc
70061 */
70062 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
70063 {
70064 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
70065 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
70066 }
70067
70068 /*
70069 @@ -627,7 +627,7 @@ process_further:
70070
70071 latest = ntohl(sp->hdr.serial);
70072 hard = ntohl(ack.firstPacket);
70073 - tx = atomic_read(&call->sequence);
70074 + tx = atomic_read_unchecked(&call->sequence);
70075
70076 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
70077 latest,
70078 @@ -840,6 +840,8 @@ void rxrpc_process_call(struct work_stru
70079 u32 abort_code = RX_PROTOCOL_ERROR;
70080 u8 *acks = NULL;
70081
70082 + pax_track_stack();
70083 +
70084 //printk("\n--------------------\n");
70085 _enter("{%d,%s,%lx} [%lu]",
70086 call->debug_id, rxrpc_call_states[call->state], call->events,
70087 @@ -1159,7 +1161,7 @@ void rxrpc_process_call(struct work_stru
70088 goto maybe_reschedule;
70089
70090 send_ACK_with_skew:
70091 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
70092 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
70093 ntohl(ack.serial));
70094 send_ACK:
70095 mtu = call->conn->trans->peer->if_mtu;
70096 @@ -1171,7 +1173,7 @@ send_ACK:
70097 ackinfo.rxMTU = htonl(5692);
70098 ackinfo.jumbo_max = htonl(4);
70099
70100 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
70101 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
70102 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
70103 ntohl(hdr.serial),
70104 ntohs(ack.maxSkew),
70105 @@ -1189,7 +1191,7 @@ send_ACK:
70106 send_message:
70107 _debug("send message");
70108
70109 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
70110 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
70111 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
70112 send_message_2:
70113
70114 diff -urNp linux-2.6.32.43/net/rxrpc/ar-call.c linux-2.6.32.43/net/rxrpc/ar-call.c
70115 --- linux-2.6.32.43/net/rxrpc/ar-call.c 2011-03-27 14:31:47.000000000 -0400
70116 +++ linux-2.6.32.43/net/rxrpc/ar-call.c 2011-05-04 17:56:28.000000000 -0400
70117 @@ -82,7 +82,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
70118 spin_lock_init(&call->lock);
70119 rwlock_init(&call->state_lock);
70120 atomic_set(&call->usage, 1);
70121 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
70122 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
70123 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
70124
70125 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
70126 diff -urNp linux-2.6.32.43/net/rxrpc/ar-connection.c linux-2.6.32.43/net/rxrpc/ar-connection.c
70127 --- linux-2.6.32.43/net/rxrpc/ar-connection.c 2011-03-27 14:31:47.000000000 -0400
70128 +++ linux-2.6.32.43/net/rxrpc/ar-connection.c 2011-05-04 17:56:28.000000000 -0400
70129 @@ -205,7 +205,7 @@ static struct rxrpc_connection *rxrpc_al
70130 rwlock_init(&conn->lock);
70131 spin_lock_init(&conn->state_lock);
70132 atomic_set(&conn->usage, 1);
70133 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
70134 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
70135 conn->avail_calls = RXRPC_MAXCALLS;
70136 conn->size_align = 4;
70137 conn->header_size = sizeof(struct rxrpc_header);
70138 diff -urNp linux-2.6.32.43/net/rxrpc/ar-connevent.c linux-2.6.32.43/net/rxrpc/ar-connevent.c
70139 --- linux-2.6.32.43/net/rxrpc/ar-connevent.c 2011-03-27 14:31:47.000000000 -0400
70140 +++ linux-2.6.32.43/net/rxrpc/ar-connevent.c 2011-05-04 17:56:28.000000000 -0400
70141 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
70142
70143 len = iov[0].iov_len + iov[1].iov_len;
70144
70145 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
70146 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
70147 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
70148
70149 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
70150 diff -urNp linux-2.6.32.43/net/rxrpc/ar-input.c linux-2.6.32.43/net/rxrpc/ar-input.c
70151 --- linux-2.6.32.43/net/rxrpc/ar-input.c 2011-03-27 14:31:47.000000000 -0400
70152 +++ linux-2.6.32.43/net/rxrpc/ar-input.c 2011-05-04 17:56:28.000000000 -0400
70153 @@ -339,9 +339,9 @@ void rxrpc_fast_process_packet(struct rx
70154 /* track the latest serial number on this connection for ACK packet
70155 * information */
70156 serial = ntohl(sp->hdr.serial);
70157 - hi_serial = atomic_read(&call->conn->hi_serial);
70158 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
70159 while (serial > hi_serial)
70160 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
70161 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
70162 serial);
70163
70164 /* request ACK generation for any ACK or DATA packet that requests
70165 diff -urNp linux-2.6.32.43/net/rxrpc/ar-internal.h linux-2.6.32.43/net/rxrpc/ar-internal.h
70166 --- linux-2.6.32.43/net/rxrpc/ar-internal.h 2011-03-27 14:31:47.000000000 -0400
70167 +++ linux-2.6.32.43/net/rxrpc/ar-internal.h 2011-05-04 17:56:28.000000000 -0400
70168 @@ -272,8 +272,8 @@ struct rxrpc_connection {
70169 int error; /* error code for local abort */
70170 int debug_id; /* debug ID for printks */
70171 unsigned call_counter; /* call ID counter */
70172 - atomic_t serial; /* packet serial number counter */
70173 - atomic_t hi_serial; /* highest serial number received */
70174 + atomic_unchecked_t serial; /* packet serial number counter */
70175 + atomic_unchecked_t hi_serial; /* highest serial number received */
70176 u8 avail_calls; /* number of calls available */
70177 u8 size_align; /* data size alignment (for security) */
70178 u8 header_size; /* rxrpc + security header size */
70179 @@ -346,7 +346,7 @@ struct rxrpc_call {
70180 spinlock_t lock;
70181 rwlock_t state_lock; /* lock for state transition */
70182 atomic_t usage;
70183 - atomic_t sequence; /* Tx data packet sequence counter */
70184 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
70185 u32 abort_code; /* local/remote abort code */
70186 enum { /* current state of call */
70187 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
70188 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
70189 */
70190 extern atomic_t rxrpc_n_skbs;
70191 extern __be32 rxrpc_epoch;
70192 -extern atomic_t rxrpc_debug_id;
70193 +extern atomic_unchecked_t rxrpc_debug_id;
70194 extern struct workqueue_struct *rxrpc_workqueue;
70195
70196 /*
70197 diff -urNp linux-2.6.32.43/net/rxrpc/ar-key.c linux-2.6.32.43/net/rxrpc/ar-key.c
70198 --- linux-2.6.32.43/net/rxrpc/ar-key.c 2011-03-27 14:31:47.000000000 -0400
70199 +++ linux-2.6.32.43/net/rxrpc/ar-key.c 2011-04-17 15:56:46.000000000 -0400
70200 @@ -88,11 +88,11 @@ static int rxrpc_instantiate_xdr_rxkad(s
70201 return ret;
70202
70203 plen -= sizeof(*token);
70204 - token = kmalloc(sizeof(*token), GFP_KERNEL);
70205 + token = kzalloc(sizeof(*token), GFP_KERNEL);
70206 if (!token)
70207 return -ENOMEM;
70208
70209 - token->kad = kmalloc(plen, GFP_KERNEL);
70210 + token->kad = kzalloc(plen, GFP_KERNEL);
70211 if (!token->kad) {
70212 kfree(token);
70213 return -ENOMEM;
70214 @@ -730,10 +730,10 @@ static int rxrpc_instantiate(struct key
70215 goto error;
70216
70217 ret = -ENOMEM;
70218 - token = kmalloc(sizeof(*token), GFP_KERNEL);
70219 + token = kzalloc(sizeof(*token), GFP_KERNEL);
70220 if (!token)
70221 goto error;
70222 - token->kad = kmalloc(plen, GFP_KERNEL);
70223 + token->kad = kzalloc(plen, GFP_KERNEL);
70224 if (!token->kad)
70225 goto error_free;
70226
70227 diff -urNp linux-2.6.32.43/net/rxrpc/ar-local.c linux-2.6.32.43/net/rxrpc/ar-local.c
70228 --- linux-2.6.32.43/net/rxrpc/ar-local.c 2011-03-27 14:31:47.000000000 -0400
70229 +++ linux-2.6.32.43/net/rxrpc/ar-local.c 2011-05-04 17:56:28.000000000 -0400
70230 @@ -44,7 +44,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
70231 spin_lock_init(&local->lock);
70232 rwlock_init(&local->services_lock);
70233 atomic_set(&local->usage, 1);
70234 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
70235 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
70236 memcpy(&local->srx, srx, sizeof(*srx));
70237 }
70238
70239 diff -urNp linux-2.6.32.43/net/rxrpc/ar-output.c linux-2.6.32.43/net/rxrpc/ar-output.c
70240 --- linux-2.6.32.43/net/rxrpc/ar-output.c 2011-03-27 14:31:47.000000000 -0400
70241 +++ linux-2.6.32.43/net/rxrpc/ar-output.c 2011-05-04 17:56:28.000000000 -0400
70242 @@ -680,9 +680,9 @@ static int rxrpc_send_data(struct kiocb
70243 sp->hdr.cid = call->cid;
70244 sp->hdr.callNumber = call->call_id;
70245 sp->hdr.seq =
70246 - htonl(atomic_inc_return(&call->sequence));
70247 + htonl(atomic_inc_return_unchecked(&call->sequence));
70248 sp->hdr.serial =
70249 - htonl(atomic_inc_return(&conn->serial));
70250 + htonl(atomic_inc_return_unchecked(&conn->serial));
70251 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
70252 sp->hdr.userStatus = 0;
70253 sp->hdr.securityIndex = conn->security_ix;
70254 diff -urNp linux-2.6.32.43/net/rxrpc/ar-peer.c linux-2.6.32.43/net/rxrpc/ar-peer.c
70255 --- linux-2.6.32.43/net/rxrpc/ar-peer.c 2011-03-27 14:31:47.000000000 -0400
70256 +++ linux-2.6.32.43/net/rxrpc/ar-peer.c 2011-05-04 17:56:28.000000000 -0400
70257 @@ -86,7 +86,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
70258 INIT_LIST_HEAD(&peer->error_targets);
70259 spin_lock_init(&peer->lock);
70260 atomic_set(&peer->usage, 1);
70261 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
70262 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
70263 memcpy(&peer->srx, srx, sizeof(*srx));
70264
70265 rxrpc_assess_MTU_size(peer);
70266 diff -urNp linux-2.6.32.43/net/rxrpc/ar-proc.c linux-2.6.32.43/net/rxrpc/ar-proc.c
70267 --- linux-2.6.32.43/net/rxrpc/ar-proc.c 2011-03-27 14:31:47.000000000 -0400
70268 +++ linux-2.6.32.43/net/rxrpc/ar-proc.c 2011-05-04 17:56:28.000000000 -0400
70269 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
70270 atomic_read(&conn->usage),
70271 rxrpc_conn_states[conn->state],
70272 key_serial(conn->key),
70273 - atomic_read(&conn->serial),
70274 - atomic_read(&conn->hi_serial));
70275 + atomic_read_unchecked(&conn->serial),
70276 + atomic_read_unchecked(&conn->hi_serial));
70277
70278 return 0;
70279 }
70280 diff -urNp linux-2.6.32.43/net/rxrpc/ar-transport.c linux-2.6.32.43/net/rxrpc/ar-transport.c
70281 --- linux-2.6.32.43/net/rxrpc/ar-transport.c 2011-03-27 14:31:47.000000000 -0400
70282 +++ linux-2.6.32.43/net/rxrpc/ar-transport.c 2011-05-04 17:56:28.000000000 -0400
70283 @@ -46,7 +46,7 @@ static struct rxrpc_transport *rxrpc_all
70284 spin_lock_init(&trans->client_lock);
70285 rwlock_init(&trans->conn_lock);
70286 atomic_set(&trans->usage, 1);
70287 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
70288 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
70289
70290 if (peer->srx.transport.family == AF_INET) {
70291 switch (peer->srx.transport_type) {
70292 diff -urNp linux-2.6.32.43/net/rxrpc/rxkad.c linux-2.6.32.43/net/rxrpc/rxkad.c
70293 --- linux-2.6.32.43/net/rxrpc/rxkad.c 2011-03-27 14:31:47.000000000 -0400
70294 +++ linux-2.6.32.43/net/rxrpc/rxkad.c 2011-05-16 21:46:57.000000000 -0400
70295 @@ -210,6 +210,8 @@ static int rxkad_secure_packet_encrypt(c
70296 u16 check;
70297 int nsg;
70298
70299 + pax_track_stack();
70300 +
70301 sp = rxrpc_skb(skb);
70302
70303 _enter("");
70304 @@ -337,6 +339,8 @@ static int rxkad_verify_packet_auth(cons
70305 u16 check;
70306 int nsg;
70307
70308 + pax_track_stack();
70309 +
70310 _enter("");
70311
70312 sp = rxrpc_skb(skb);
70313 @@ -609,7 +613,7 @@ static int rxkad_issue_challenge(struct
70314
70315 len = iov[0].iov_len + iov[1].iov_len;
70316
70317 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
70318 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
70319 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
70320
70321 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
70322 @@ -659,7 +663,7 @@ static int rxkad_send_response(struct rx
70323
70324 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
70325
70326 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
70327 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
70328 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
70329
70330 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
70331 diff -urNp linux-2.6.32.43/net/sctp/proc.c linux-2.6.32.43/net/sctp/proc.c
70332 --- linux-2.6.32.43/net/sctp/proc.c 2011-03-27 14:31:47.000000000 -0400
70333 +++ linux-2.6.32.43/net/sctp/proc.c 2011-04-17 15:56:46.000000000 -0400
70334 @@ -213,7 +213,12 @@ static int sctp_eps_seq_show(struct seq_
70335 sctp_for_each_hentry(epb, node, &head->chain) {
70336 ep = sctp_ep(epb);
70337 sk = epb->sk;
70338 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
70339 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
70340 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70341 + NULL, NULL,
70342 +#else
70343 + ep, sk,
70344 +#endif
70345 sctp_sk(sk)->type, sk->sk_state, hash,
70346 epb->bind_addr.port,
70347 sock_i_uid(sk), sock_i_ino(sk));
70348 @@ -320,7 +325,12 @@ static int sctp_assocs_seq_show(struct s
70349 seq_printf(seq,
70350 "%8p %8p %-3d %-3d %-2d %-4d "
70351 "%4d %8d %8d %7d %5lu %-5d %5d ",
70352 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
70353 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70354 + NULL, NULL,
70355 +#else
70356 + assoc, sk,
70357 +#endif
70358 + sctp_sk(sk)->type, sk->sk_state,
70359 assoc->state, hash,
70360 assoc->assoc_id,
70361 assoc->sndbuf_used,
70362 diff -urNp linux-2.6.32.43/net/sctp/socket.c linux-2.6.32.43/net/sctp/socket.c
70363 --- linux-2.6.32.43/net/sctp/socket.c 2011-03-27 14:31:47.000000000 -0400
70364 +++ linux-2.6.32.43/net/sctp/socket.c 2011-04-23 12:56:11.000000000 -0400
70365 @@ -5802,7 +5802,6 @@ pp_found:
70366 */
70367 int reuse = sk->sk_reuse;
70368 struct sock *sk2;
70369 - struct hlist_node *node;
70370
70371 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
70372 if (pp->fastreuse && sk->sk_reuse &&
70373 diff -urNp linux-2.6.32.43/net/socket.c linux-2.6.32.43/net/socket.c
70374 --- linux-2.6.32.43/net/socket.c 2011-03-27 14:31:47.000000000 -0400
70375 +++ linux-2.6.32.43/net/socket.c 2011-05-16 21:46:57.000000000 -0400
70376 @@ -87,6 +87,7 @@
70377 #include <linux/wireless.h>
70378 #include <linux/nsproxy.h>
70379 #include <linux/magic.h>
70380 +#include <linux/in.h>
70381
70382 #include <asm/uaccess.h>
70383 #include <asm/unistd.h>
70384 @@ -97,6 +98,21 @@
70385 #include <net/sock.h>
70386 #include <linux/netfilter.h>
70387
70388 +extern void gr_attach_curr_ip(const struct sock *sk);
70389 +extern int gr_handle_sock_all(const int family, const int type,
70390 + const int protocol);
70391 +extern int gr_handle_sock_server(const struct sockaddr *sck);
70392 +extern int gr_handle_sock_server_other(const struct sock *sck);
70393 +extern int gr_handle_sock_client(const struct sockaddr *sck);
70394 +extern int gr_search_connect(struct socket * sock,
70395 + struct sockaddr_in * addr);
70396 +extern int gr_search_bind(struct socket * sock,
70397 + struct sockaddr_in * addr);
70398 +extern int gr_search_listen(struct socket * sock);
70399 +extern int gr_search_accept(struct socket * sock);
70400 +extern int gr_search_socket(const int domain, const int type,
70401 + const int protocol);
70402 +
70403 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
70404 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
70405 unsigned long nr_segs, loff_t pos);
70406 @@ -298,7 +314,7 @@ static int sockfs_get_sb(struct file_sys
70407 mnt);
70408 }
70409
70410 -static struct vfsmount *sock_mnt __read_mostly;
70411 +struct vfsmount *sock_mnt __read_mostly;
70412
70413 static struct file_system_type sock_fs_type = {
70414 .name = "sockfs",
70415 @@ -1154,6 +1170,8 @@ static int __sock_create(struct net *net
70416 return -EAFNOSUPPORT;
70417 if (type < 0 || type >= SOCK_MAX)
70418 return -EINVAL;
70419 + if (protocol < 0)
70420 + return -EINVAL;
70421
70422 /* Compatibility.
70423
70424 @@ -1283,6 +1301,16 @@ SYSCALL_DEFINE3(socket, int, family, int
70425 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
70426 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
70427
70428 + if(!gr_search_socket(family, type, protocol)) {
70429 + retval = -EACCES;
70430 + goto out;
70431 + }
70432 +
70433 + if (gr_handle_sock_all(family, type, protocol)) {
70434 + retval = -EACCES;
70435 + goto out;
70436 + }
70437 +
70438 retval = sock_create(family, type, protocol, &sock);
70439 if (retval < 0)
70440 goto out;
70441 @@ -1415,6 +1443,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
70442 if (sock) {
70443 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
70444 if (err >= 0) {
70445 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
70446 + err = -EACCES;
70447 + goto error;
70448 + }
70449 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
70450 + if (err)
70451 + goto error;
70452 +
70453 err = security_socket_bind(sock,
70454 (struct sockaddr *)&address,
70455 addrlen);
70456 @@ -1423,6 +1459,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
70457 (struct sockaddr *)
70458 &address, addrlen);
70459 }
70460 +error:
70461 fput_light(sock->file, fput_needed);
70462 }
70463 return err;
70464 @@ -1446,10 +1483,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
70465 if ((unsigned)backlog > somaxconn)
70466 backlog = somaxconn;
70467
70468 + if (gr_handle_sock_server_other(sock->sk)) {
70469 + err = -EPERM;
70470 + goto error;
70471 + }
70472 +
70473 + err = gr_search_listen(sock);
70474 + if (err)
70475 + goto error;
70476 +
70477 err = security_socket_listen(sock, backlog);
70478 if (!err)
70479 err = sock->ops->listen(sock, backlog);
70480
70481 +error:
70482 fput_light(sock->file, fput_needed);
70483 }
70484 return err;
70485 @@ -1492,6 +1539,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
70486 newsock->type = sock->type;
70487 newsock->ops = sock->ops;
70488
70489 + if (gr_handle_sock_server_other(sock->sk)) {
70490 + err = -EPERM;
70491 + sock_release(newsock);
70492 + goto out_put;
70493 + }
70494 +
70495 + err = gr_search_accept(sock);
70496 + if (err) {
70497 + sock_release(newsock);
70498 + goto out_put;
70499 + }
70500 +
70501 /*
70502 * We don't need try_module_get here, as the listening socket (sock)
70503 * has the protocol module (sock->ops->owner) held.
70504 @@ -1534,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
70505 fd_install(newfd, newfile);
70506 err = newfd;
70507
70508 + gr_attach_curr_ip(newsock->sk);
70509 +
70510 out_put:
70511 fput_light(sock->file, fput_needed);
70512 out:
70513 @@ -1571,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
70514 int, addrlen)
70515 {
70516 struct socket *sock;
70517 + struct sockaddr *sck;
70518 struct sockaddr_storage address;
70519 int err, fput_needed;
70520
70521 @@ -1581,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
70522 if (err < 0)
70523 goto out_put;
70524
70525 + sck = (struct sockaddr *)&address;
70526 +
70527 + if (gr_handle_sock_client(sck)) {
70528 + err = -EACCES;
70529 + goto out_put;
70530 + }
70531 +
70532 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
70533 + if (err)
70534 + goto out_put;
70535 +
70536 err =
70537 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
70538 if (err)
70539 @@ -1882,6 +1955,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct
70540 int err, ctl_len, iov_size, total_len;
70541 int fput_needed;
70542
70543 + pax_track_stack();
70544 +
70545 err = -EFAULT;
70546 if (MSG_CMSG_COMPAT & flags) {
70547 if (get_compat_msghdr(&msg_sys, msg_compat))
70548 diff -urNp linux-2.6.32.43/net/sunrpc/sched.c linux-2.6.32.43/net/sunrpc/sched.c
70549 --- linux-2.6.32.43/net/sunrpc/sched.c 2011-03-27 14:31:47.000000000 -0400
70550 +++ linux-2.6.32.43/net/sunrpc/sched.c 2011-04-17 15:56:46.000000000 -0400
70551 @@ -234,10 +234,10 @@ static int rpc_wait_bit_killable(void *w
70552 #ifdef RPC_DEBUG
70553 static void rpc_task_set_debuginfo(struct rpc_task *task)
70554 {
70555 - static atomic_t rpc_pid;
70556 + static atomic_unchecked_t rpc_pid;
70557
70558 task->tk_magic = RPC_TASK_MAGIC_ID;
70559 - task->tk_pid = atomic_inc_return(&rpc_pid);
70560 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
70561 }
70562 #else
70563 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
70564 diff -urNp linux-2.6.32.43/net/sunrpc/xprtrdma/svc_rdma.c linux-2.6.32.43/net/sunrpc/xprtrdma/svc_rdma.c
70565 --- linux-2.6.32.43/net/sunrpc/xprtrdma/svc_rdma.c 2011-03-27 14:31:47.000000000 -0400
70566 +++ linux-2.6.32.43/net/sunrpc/xprtrdma/svc_rdma.c 2011-05-04 17:56:20.000000000 -0400
70567 @@ -59,15 +59,15 @@ unsigned int svcrdma_max_req_size = RPCR
70568 static unsigned int min_max_inline = 4096;
70569 static unsigned int max_max_inline = 65536;
70570
70571 -atomic_t rdma_stat_recv;
70572 -atomic_t rdma_stat_read;
70573 -atomic_t rdma_stat_write;
70574 -atomic_t rdma_stat_sq_starve;
70575 -atomic_t rdma_stat_rq_starve;
70576 -atomic_t rdma_stat_rq_poll;
70577 -atomic_t rdma_stat_rq_prod;
70578 -atomic_t rdma_stat_sq_poll;
70579 -atomic_t rdma_stat_sq_prod;
70580 +atomic_unchecked_t rdma_stat_recv;
70581 +atomic_unchecked_t rdma_stat_read;
70582 +atomic_unchecked_t rdma_stat_write;
70583 +atomic_unchecked_t rdma_stat_sq_starve;
70584 +atomic_unchecked_t rdma_stat_rq_starve;
70585 +atomic_unchecked_t rdma_stat_rq_poll;
70586 +atomic_unchecked_t rdma_stat_rq_prod;
70587 +atomic_unchecked_t rdma_stat_sq_poll;
70588 +atomic_unchecked_t rdma_stat_sq_prod;
70589
70590 /* Temporary NFS request map and context caches */
70591 struct kmem_cache *svc_rdma_map_cachep;
70592 @@ -105,7 +105,7 @@ static int read_reset_stat(ctl_table *ta
70593 len -= *ppos;
70594 if (len > *lenp)
70595 len = *lenp;
70596 - if (len && copy_to_user(buffer, str_buf, len))
70597 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
70598 return -EFAULT;
70599 *lenp = len;
70600 *ppos += len;
70601 @@ -149,63 +149,63 @@ static ctl_table svcrdma_parm_table[] =
70602 {
70603 .procname = "rdma_stat_read",
70604 .data = &rdma_stat_read,
70605 - .maxlen = sizeof(atomic_t),
70606 + .maxlen = sizeof(atomic_unchecked_t),
70607 .mode = 0644,
70608 .proc_handler = &read_reset_stat,
70609 },
70610 {
70611 .procname = "rdma_stat_recv",
70612 .data = &rdma_stat_recv,
70613 - .maxlen = sizeof(atomic_t),
70614 + .maxlen = sizeof(atomic_unchecked_t),
70615 .mode = 0644,
70616 .proc_handler = &read_reset_stat,
70617 },
70618 {
70619 .procname = "rdma_stat_write",
70620 .data = &rdma_stat_write,
70621 - .maxlen = sizeof(atomic_t),
70622 + .maxlen = sizeof(atomic_unchecked_t),
70623 .mode = 0644,
70624 .proc_handler = &read_reset_stat,
70625 },
70626 {
70627 .procname = "rdma_stat_sq_starve",
70628 .data = &rdma_stat_sq_starve,
70629 - .maxlen = sizeof(atomic_t),
70630 + .maxlen = sizeof(atomic_unchecked_t),
70631 .mode = 0644,
70632 .proc_handler = &read_reset_stat,
70633 },
70634 {
70635 .procname = "rdma_stat_rq_starve",
70636 .data = &rdma_stat_rq_starve,
70637 - .maxlen = sizeof(atomic_t),
70638 + .maxlen = sizeof(atomic_unchecked_t),
70639 .mode = 0644,
70640 .proc_handler = &read_reset_stat,
70641 },
70642 {
70643 .procname = "rdma_stat_rq_poll",
70644 .data = &rdma_stat_rq_poll,
70645 - .maxlen = sizeof(atomic_t),
70646 + .maxlen = sizeof(atomic_unchecked_t),
70647 .mode = 0644,
70648 .proc_handler = &read_reset_stat,
70649 },
70650 {
70651 .procname = "rdma_stat_rq_prod",
70652 .data = &rdma_stat_rq_prod,
70653 - .maxlen = sizeof(atomic_t),
70654 + .maxlen = sizeof(atomic_unchecked_t),
70655 .mode = 0644,
70656 .proc_handler = &read_reset_stat,
70657 },
70658 {
70659 .procname = "rdma_stat_sq_poll",
70660 .data = &rdma_stat_sq_poll,
70661 - .maxlen = sizeof(atomic_t),
70662 + .maxlen = sizeof(atomic_unchecked_t),
70663 .mode = 0644,
70664 .proc_handler = &read_reset_stat,
70665 },
70666 {
70667 .procname = "rdma_stat_sq_prod",
70668 .data = &rdma_stat_sq_prod,
70669 - .maxlen = sizeof(atomic_t),
70670 + .maxlen = sizeof(atomic_unchecked_t),
70671 .mode = 0644,
70672 .proc_handler = &read_reset_stat,
70673 },
70674 diff -urNp linux-2.6.32.43/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-2.6.32.43/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
70675 --- linux-2.6.32.43/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-03-27 14:31:47.000000000 -0400
70676 +++ linux-2.6.32.43/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-05-04 17:56:28.000000000 -0400
70677 @@ -495,7 +495,7 @@ next_sge:
70678 svc_rdma_put_context(ctxt, 0);
70679 goto out;
70680 }
70681 - atomic_inc(&rdma_stat_read);
70682 + atomic_inc_unchecked(&rdma_stat_read);
70683
70684 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
70685 chl_map->ch[ch_no].count -= read_wr.num_sge;
70686 @@ -606,7 +606,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
70687 dto_q);
70688 list_del_init(&ctxt->dto_q);
70689 } else {
70690 - atomic_inc(&rdma_stat_rq_starve);
70691 + atomic_inc_unchecked(&rdma_stat_rq_starve);
70692 clear_bit(XPT_DATA, &xprt->xpt_flags);
70693 ctxt = NULL;
70694 }
70695 @@ -626,7 +626,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
70696 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
70697 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
70698 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
70699 - atomic_inc(&rdma_stat_recv);
70700 + atomic_inc_unchecked(&rdma_stat_recv);
70701
70702 /* Build up the XDR from the receive buffers. */
70703 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
70704 diff -urNp linux-2.6.32.43/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-2.6.32.43/net/sunrpc/xprtrdma/svc_rdma_sendto.c
70705 --- linux-2.6.32.43/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-03-27 14:31:47.000000000 -0400
70706 +++ linux-2.6.32.43/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-05-04 17:56:28.000000000 -0400
70707 @@ -328,7 +328,7 @@ static int send_write(struct svcxprt_rdm
70708 write_wr.wr.rdma.remote_addr = to;
70709
70710 /* Post It */
70711 - atomic_inc(&rdma_stat_write);
70712 + atomic_inc_unchecked(&rdma_stat_write);
70713 if (svc_rdma_send(xprt, &write_wr))
70714 goto err;
70715 return 0;
70716 diff -urNp linux-2.6.32.43/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-2.6.32.43/net/sunrpc/xprtrdma/svc_rdma_transport.c
70717 --- linux-2.6.32.43/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-03-27 14:31:47.000000000 -0400
70718 +++ linux-2.6.32.43/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-05-04 17:56:28.000000000 -0400
70719 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rd
70720 return;
70721
70722 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
70723 - atomic_inc(&rdma_stat_rq_poll);
70724 + atomic_inc_unchecked(&rdma_stat_rq_poll);
70725
70726 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
70727 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
70728 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rd
70729 }
70730
70731 if (ctxt)
70732 - atomic_inc(&rdma_stat_rq_prod);
70733 + atomic_inc_unchecked(&rdma_stat_rq_prod);
70734
70735 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
70736 /*
70737 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rd
70738 return;
70739
70740 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
70741 - atomic_inc(&rdma_stat_sq_poll);
70742 + atomic_inc_unchecked(&rdma_stat_sq_poll);
70743 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
70744 if (wc.status != IB_WC_SUCCESS)
70745 /* Close the transport */
70746 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rd
70747 }
70748
70749 if (ctxt)
70750 - atomic_inc(&rdma_stat_sq_prod);
70751 + atomic_inc_unchecked(&rdma_stat_sq_prod);
70752 }
70753
70754 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
70755 @@ -1260,7 +1260,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
70756 spin_lock_bh(&xprt->sc_lock);
70757 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
70758 spin_unlock_bh(&xprt->sc_lock);
70759 - atomic_inc(&rdma_stat_sq_starve);
70760 + atomic_inc_unchecked(&rdma_stat_sq_starve);
70761
70762 /* See if we can opportunistically reap SQ WR to make room */
70763 sq_cq_reap(xprt);
70764 diff -urNp linux-2.6.32.43/net/sysctl_net.c linux-2.6.32.43/net/sysctl_net.c
70765 --- linux-2.6.32.43/net/sysctl_net.c 2011-03-27 14:31:47.000000000 -0400
70766 +++ linux-2.6.32.43/net/sysctl_net.c 2011-04-17 15:56:46.000000000 -0400
70767 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
70768 struct ctl_table *table)
70769 {
70770 /* Allow network administrator to have same access as root. */
70771 - if (capable(CAP_NET_ADMIN)) {
70772 + if (capable_nolog(CAP_NET_ADMIN)) {
70773 int mode = (table->mode >> 6) & 7;
70774 return (mode << 6) | (mode << 3) | mode;
70775 }
70776 diff -urNp linux-2.6.32.43/net/unix/af_unix.c linux-2.6.32.43/net/unix/af_unix.c
70777 --- linux-2.6.32.43/net/unix/af_unix.c 2011-05-10 22:12:02.000000000 -0400
70778 +++ linux-2.6.32.43/net/unix/af_unix.c 2011-05-10 22:12:34.000000000 -0400
70779 @@ -745,6 +745,12 @@ static struct sock *unix_find_other(stru
70780 err = -ECONNREFUSED;
70781 if (!S_ISSOCK(inode->i_mode))
70782 goto put_fail;
70783 +
70784 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
70785 + err = -EACCES;
70786 + goto put_fail;
70787 + }
70788 +
70789 u = unix_find_socket_byinode(net, inode);
70790 if (!u)
70791 goto put_fail;
70792 @@ -765,6 +771,13 @@ static struct sock *unix_find_other(stru
70793 if (u) {
70794 struct dentry *dentry;
70795 dentry = unix_sk(u)->dentry;
70796 +
70797 + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
70798 + err = -EPERM;
70799 + sock_put(u);
70800 + goto fail;
70801 + }
70802 +
70803 if (dentry)
70804 touch_atime(unix_sk(u)->mnt, dentry);
70805 } else
70806 @@ -850,11 +863,18 @@ static int unix_bind(struct socket *sock
70807 err = security_path_mknod(&nd.path, dentry, mode, 0);
70808 if (err)
70809 goto out_mknod_drop_write;
70810 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
70811 + err = -EACCES;
70812 + goto out_mknod_drop_write;
70813 + }
70814 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
70815 out_mknod_drop_write:
70816 mnt_drop_write(nd.path.mnt);
70817 if (err)
70818 goto out_mknod_dput;
70819 +
70820 + gr_handle_create(dentry, nd.path.mnt);
70821 +
70822 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
70823 dput(nd.path.dentry);
70824 nd.path.dentry = dentry;
70825 @@ -872,6 +892,10 @@ out_mknod_drop_write:
70826 goto out_unlock;
70827 }
70828
70829 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
70830 + sk->sk_peercred.pid = current->pid;
70831 +#endif
70832 +
70833 list = &unix_socket_table[addr->hash];
70834 } else {
70835 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
70836 @@ -2211,7 +2235,11 @@ static int unix_seq_show(struct seq_file
70837 unix_state_lock(s);
70838
70839 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
70840 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70841 + NULL,
70842 +#else
70843 s,
70844 +#endif
70845 atomic_read(&s->sk_refcnt),
70846 0,
70847 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
70848 diff -urNp linux-2.6.32.43/net/wireless/wext.c linux-2.6.32.43/net/wireless/wext.c
70849 --- linux-2.6.32.43/net/wireless/wext.c 2011-03-27 14:31:47.000000000 -0400
70850 +++ linux-2.6.32.43/net/wireless/wext.c 2011-04-17 15:56:46.000000000 -0400
70851 @@ -816,8 +816,7 @@ static int ioctl_standard_iw_point(struc
70852 */
70853
70854 /* Support for very large requests */
70855 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
70856 - (user_length > descr->max_tokens)) {
70857 + if (user_length > descr->max_tokens) {
70858 /* Allow userspace to GET more than max so
70859 * we can support any size GET requests.
70860 * There is still a limit : -ENOMEM.
70861 @@ -854,22 +853,6 @@ static int ioctl_standard_iw_point(struc
70862 }
70863 }
70864
70865 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
70866 - /*
70867 - * If this is a GET, but not NOMAX, it means that the extra
70868 - * data is not bounded by userspace, but by max_tokens. Thus
70869 - * set the length to max_tokens. This matches the extra data
70870 - * allocation.
70871 - * The driver should fill it with the number of tokens it
70872 - * provided, and it may check iwp->length rather than having
70873 - * knowledge of max_tokens. If the driver doesn't change the
70874 - * iwp->length, this ioctl just copies back max_token tokens
70875 - * filled with zeroes. Hopefully the driver isn't claiming
70876 - * them to be valid data.
70877 - */
70878 - iwp->length = descr->max_tokens;
70879 - }
70880 -
70881 err = handler(dev, info, (union iwreq_data *) iwp, extra);
70882
70883 iwp->length += essid_compat;
70884 diff -urNp linux-2.6.32.43/net/xfrm/xfrm_policy.c linux-2.6.32.43/net/xfrm/xfrm_policy.c
70885 --- linux-2.6.32.43/net/xfrm/xfrm_policy.c 2011-03-27 14:31:47.000000000 -0400
70886 +++ linux-2.6.32.43/net/xfrm/xfrm_policy.c 2011-05-04 17:56:20.000000000 -0400
70887 @@ -586,7 +586,7 @@ int xfrm_policy_insert(int dir, struct x
70888 hlist_add_head(&policy->bydst, chain);
70889 xfrm_pol_hold(policy);
70890 net->xfrm.policy_count[dir]++;
70891 - atomic_inc(&flow_cache_genid);
70892 + atomic_inc_unchecked(&flow_cache_genid);
70893 if (delpol)
70894 __xfrm_policy_unlink(delpol, dir);
70895 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
70896 @@ -669,7 +669,7 @@ struct xfrm_policy *xfrm_policy_bysel_ct
70897 write_unlock_bh(&xfrm_policy_lock);
70898
70899 if (ret && delete) {
70900 - atomic_inc(&flow_cache_genid);
70901 + atomic_inc_unchecked(&flow_cache_genid);
70902 xfrm_policy_kill(ret);
70903 }
70904 return ret;
70905 @@ -710,7 +710,7 @@ struct xfrm_policy *xfrm_policy_byid(str
70906 write_unlock_bh(&xfrm_policy_lock);
70907
70908 if (ret && delete) {
70909 - atomic_inc(&flow_cache_genid);
70910 + atomic_inc_unchecked(&flow_cache_genid);
70911 xfrm_policy_kill(ret);
70912 }
70913 return ret;
70914 @@ -824,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u
70915 }
70916
70917 }
70918 - atomic_inc(&flow_cache_genid);
70919 + atomic_inc_unchecked(&flow_cache_genid);
70920 out:
70921 write_unlock_bh(&xfrm_policy_lock);
70922 return err;
70923 @@ -1088,7 +1088,7 @@ int xfrm_policy_delete(struct xfrm_polic
70924 write_unlock_bh(&xfrm_policy_lock);
70925 if (pol) {
70926 if (dir < XFRM_POLICY_MAX)
70927 - atomic_inc(&flow_cache_genid);
70928 + atomic_inc_unchecked(&flow_cache_genid);
70929 xfrm_policy_kill(pol);
70930 return 0;
70931 }
70932 @@ -1477,7 +1477,7 @@ free_dst:
70933 goto out;
70934 }
70935
70936 -static int inline
70937 +static inline int
70938 xfrm_dst_alloc_copy(void **target, void *src, int size)
70939 {
70940 if (!*target) {
70941 @@ -1489,7 +1489,7 @@ xfrm_dst_alloc_copy(void **target, void
70942 return 0;
70943 }
70944
70945 -static int inline
70946 +static inline int
70947 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
70948 {
70949 #ifdef CONFIG_XFRM_SUB_POLICY
70950 @@ -1501,7 +1501,7 @@ xfrm_dst_update_parent(struct dst_entry
70951 #endif
70952 }
70953
70954 -static int inline
70955 +static inline int
70956 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
70957 {
70958 #ifdef CONFIG_XFRM_SUB_POLICY
70959 @@ -1537,7 +1537,7 @@ int __xfrm_lookup(struct net *net, struc
70960 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
70961
70962 restart:
70963 - genid = atomic_read(&flow_cache_genid);
70964 + genid = atomic_read_unchecked(&flow_cache_genid);
70965 policy = NULL;
70966 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
70967 pols[pi] = NULL;
70968 @@ -1680,7 +1680,7 @@ restart:
70969 goto error;
70970 }
70971 if (nx == -EAGAIN ||
70972 - genid != atomic_read(&flow_cache_genid)) {
70973 + genid != atomic_read_unchecked(&flow_cache_genid)) {
70974 xfrm_pols_put(pols, npols);
70975 goto restart;
70976 }
70977 diff -urNp linux-2.6.32.43/net/xfrm/xfrm_user.c linux-2.6.32.43/net/xfrm/xfrm_user.c
70978 --- linux-2.6.32.43/net/xfrm/xfrm_user.c 2011-03-27 14:31:47.000000000 -0400
70979 +++ linux-2.6.32.43/net/xfrm/xfrm_user.c 2011-05-16 21:46:57.000000000 -0400
70980 @@ -1169,6 +1169,8 @@ static int copy_to_user_tmpl(struct xfrm
70981 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
70982 int i;
70983
70984 + pax_track_stack();
70985 +
70986 if (xp->xfrm_nr == 0)
70987 return 0;
70988
70989 @@ -1784,6 +1786,8 @@ static int xfrm_do_migrate(struct sk_buf
70990 int err;
70991 int n = 0;
70992
70993 + pax_track_stack();
70994 +
70995 if (attrs[XFRMA_MIGRATE] == NULL)
70996 return -EINVAL;
70997
70998 diff -urNp linux-2.6.32.43/samples/kobject/kset-example.c linux-2.6.32.43/samples/kobject/kset-example.c
70999 --- linux-2.6.32.43/samples/kobject/kset-example.c 2011-03-27 14:31:47.000000000 -0400
71000 +++ linux-2.6.32.43/samples/kobject/kset-example.c 2011-04-17 15:56:46.000000000 -0400
71001 @@ -87,7 +87,7 @@ static ssize_t foo_attr_store(struct kob
71002 }
71003
71004 /* Our custom sysfs_ops that we will associate with our ktype later on */
71005 -static struct sysfs_ops foo_sysfs_ops = {
71006 +static const struct sysfs_ops foo_sysfs_ops = {
71007 .show = foo_attr_show,
71008 .store = foo_attr_store,
71009 };
71010 diff -urNp linux-2.6.32.43/scripts/basic/fixdep.c linux-2.6.32.43/scripts/basic/fixdep.c
71011 --- linux-2.6.32.43/scripts/basic/fixdep.c 2011-03-27 14:31:47.000000000 -0400
71012 +++ linux-2.6.32.43/scripts/basic/fixdep.c 2011-04-17 15:56:46.000000000 -0400
71013 @@ -222,9 +222,9 @@ static void use_config(char *m, int slen
71014
71015 static void parse_config_file(char *map, size_t len)
71016 {
71017 - int *end = (int *) (map + len);
71018 + unsigned int *end = (unsigned int *) (map + len);
71019 /* start at +1, so that p can never be < map */
71020 - int *m = (int *) map + 1;
71021 + unsigned int *m = (unsigned int *) map + 1;
71022 char *p, *q;
71023
71024 for (; m < end; m++) {
71025 @@ -371,7 +371,7 @@ static void print_deps(void)
71026 static void traps(void)
71027 {
71028 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
71029 - int *p = (int *)test;
71030 + unsigned int *p = (unsigned int *)test;
71031
71032 if (*p != INT_CONF) {
71033 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
71034 diff -urNp linux-2.6.32.43/scripts/Makefile.build linux-2.6.32.43/scripts/Makefile.build
71035 --- linux-2.6.32.43/scripts/Makefile.build 2011-03-27 14:31:47.000000000 -0400
71036 +++ linux-2.6.32.43/scripts/Makefile.build 2011-06-04 20:46:51.000000000 -0400
71037 @@ -59,7 +59,7 @@ endif
71038 endif
71039
71040 # Do not include host rules unless needed
71041 -ifneq ($(hostprogs-y)$(hostprogs-m),)
71042 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
71043 include scripts/Makefile.host
71044 endif
71045
71046 diff -urNp linux-2.6.32.43/scripts/Makefile.clean linux-2.6.32.43/scripts/Makefile.clean
71047 --- linux-2.6.32.43/scripts/Makefile.clean 2011-03-27 14:31:47.000000000 -0400
71048 +++ linux-2.6.32.43/scripts/Makefile.clean 2011-06-04 20:47:19.000000000 -0400
71049 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
71050 __clean-files := $(extra-y) $(always) \
71051 $(targets) $(clean-files) \
71052 $(host-progs) \
71053 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
71054 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
71055 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
71056
71057 # as clean-files is given relative to the current directory, this adds
71058 # a $(obj) prefix, except for absolute paths
71059 diff -urNp linux-2.6.32.43/scripts/Makefile.host linux-2.6.32.43/scripts/Makefile.host
71060 --- linux-2.6.32.43/scripts/Makefile.host 2011-03-27 14:31:47.000000000 -0400
71061 +++ linux-2.6.32.43/scripts/Makefile.host 2011-06-04 20:48:22.000000000 -0400
71062 @@ -31,6 +31,7 @@
71063 # Note: Shared libraries consisting of C++ files are not supported
71064
71065 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
71066 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
71067
71068 # C code
71069 # Executables compiled from a single .c file
71070 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
71071 # Shared libaries (only .c supported)
71072 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
71073 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
71074 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
71075 # Remove .so files from "xxx-objs"
71076 host-cobjs := $(filter-out %.so,$(host-cobjs))
71077
71078 diff -urNp linux-2.6.32.43/scripts/mod/file2alias.c linux-2.6.32.43/scripts/mod/file2alias.c
71079 --- linux-2.6.32.43/scripts/mod/file2alias.c 2011-03-27 14:31:47.000000000 -0400
71080 +++ linux-2.6.32.43/scripts/mod/file2alias.c 2011-04-17 15:56:46.000000000 -0400
71081 @@ -72,7 +72,7 @@ static void device_id_check(const char *
71082 unsigned long size, unsigned long id_size,
71083 void *symval)
71084 {
71085 - int i;
71086 + unsigned int i;
71087
71088 if (size % id_size || size < id_size) {
71089 if (cross_build != 0)
71090 @@ -102,7 +102,7 @@ static void device_id_check(const char *
71091 /* USB is special because the bcdDevice can be matched against a numeric range */
71092 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
71093 static void do_usb_entry(struct usb_device_id *id,
71094 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
71095 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
71096 unsigned char range_lo, unsigned char range_hi,
71097 struct module *mod)
71098 {
71099 @@ -368,7 +368,7 @@ static void do_pnp_device_entry(void *sy
71100 for (i = 0; i < count; i++) {
71101 const char *id = (char *)devs[i].id;
71102 char acpi_id[sizeof(devs[0].id)];
71103 - int j;
71104 + unsigned int j;
71105
71106 buf_printf(&mod->dev_table_buf,
71107 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
71108 @@ -398,7 +398,7 @@ static void do_pnp_card_entries(void *sy
71109
71110 for (j = 0; j < PNP_MAX_DEVICES; j++) {
71111 const char *id = (char *)card->devs[j].id;
71112 - int i2, j2;
71113 + unsigned int i2, j2;
71114 int dup = 0;
71115
71116 if (!id[0])
71117 @@ -424,7 +424,7 @@ static void do_pnp_card_entries(void *sy
71118 /* add an individual alias for every device entry */
71119 if (!dup) {
71120 char acpi_id[sizeof(card->devs[0].id)];
71121 - int k;
71122 + unsigned int k;
71123
71124 buf_printf(&mod->dev_table_buf,
71125 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
71126 @@ -699,7 +699,7 @@ static void dmi_ascii_filter(char *d, co
71127 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
71128 char *alias)
71129 {
71130 - int i, j;
71131 + unsigned int i, j;
71132
71133 sprintf(alias, "dmi*");
71134
71135 diff -urNp linux-2.6.32.43/scripts/mod/modpost.c linux-2.6.32.43/scripts/mod/modpost.c
71136 --- linux-2.6.32.43/scripts/mod/modpost.c 2011-03-27 14:31:47.000000000 -0400
71137 +++ linux-2.6.32.43/scripts/mod/modpost.c 2011-07-06 19:53:33.000000000 -0400
71138 @@ -835,6 +835,7 @@ enum mismatch {
71139 INIT_TO_EXIT,
71140 EXIT_TO_INIT,
71141 EXPORT_TO_INIT_EXIT,
71142 + DATA_TO_TEXT
71143 };
71144
71145 struct sectioncheck {
71146 @@ -920,6 +921,12 @@ const struct sectioncheck sectioncheck[]
71147 .fromsec = { "__ksymtab*", NULL },
71148 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
71149 .mismatch = EXPORT_TO_INIT_EXIT
71150 +},
71151 +/* Do not reference code from writable data */
71152 +{
71153 + .fromsec = { DATA_SECTIONS, NULL },
71154 + .tosec = { TEXT_SECTIONS, NULL },
71155 + .mismatch = DATA_TO_TEXT
71156 }
71157 };
71158
71159 @@ -1024,10 +1031,10 @@ static Elf_Sym *find_elf_symbol(struct e
71160 continue;
71161 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
71162 continue;
71163 - if (sym->st_value == addr)
71164 - return sym;
71165 /* Find a symbol nearby - addr are maybe negative */
71166 d = sym->st_value - addr;
71167 + if (d == 0)
71168 + return sym;
71169 if (d < 0)
71170 d = addr - sym->st_value;
71171 if (d < distance) {
71172 @@ -1268,6 +1275,14 @@ static void report_sec_mismatch(const ch
71173 "Fix this by removing the %sannotation of %s "
71174 "or drop the export.\n",
71175 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
71176 + case DATA_TO_TEXT:
71177 +/*
71178 + fprintf(stderr,
71179 + "The variable %s references\n"
71180 + "the %s %s%s%s\n",
71181 + fromsym, to, sec2annotation(tosec), tosym, to_p);
71182 +*/
71183 + break;
71184 case NO_MISMATCH:
71185 /* To get warnings on missing members */
71186 break;
71187 @@ -1495,7 +1510,7 @@ static void section_rel(const char *modn
71188 static void check_sec_ref(struct module *mod, const char *modname,
71189 struct elf_info *elf)
71190 {
71191 - int i;
71192 + unsigned int i;
71193 Elf_Shdr *sechdrs = elf->sechdrs;
71194
71195 /* Walk through all sections */
71196 @@ -1651,7 +1666,7 @@ void __attribute__((format(printf, 2, 3)
71197 va_end(ap);
71198 }
71199
71200 -void buf_write(struct buffer *buf, const char *s, int len)
71201 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
71202 {
71203 if (buf->size - buf->pos < len) {
71204 buf->size += len + SZ;
71205 @@ -1863,7 +1878,7 @@ static void write_if_changed(struct buff
71206 if (fstat(fileno(file), &st) < 0)
71207 goto close_write;
71208
71209 - if (st.st_size != b->pos)
71210 + if (st.st_size != (off_t)b->pos)
71211 goto close_write;
71212
71213 tmp = NOFAIL(malloc(b->pos));
71214 diff -urNp linux-2.6.32.43/scripts/mod/modpost.h linux-2.6.32.43/scripts/mod/modpost.h
71215 --- linux-2.6.32.43/scripts/mod/modpost.h 2011-03-27 14:31:47.000000000 -0400
71216 +++ linux-2.6.32.43/scripts/mod/modpost.h 2011-04-17 15:56:46.000000000 -0400
71217 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
71218
71219 struct buffer {
71220 char *p;
71221 - int pos;
71222 - int size;
71223 + unsigned int pos;
71224 + unsigned int size;
71225 };
71226
71227 void __attribute__((format(printf, 2, 3)))
71228 buf_printf(struct buffer *buf, const char *fmt, ...);
71229
71230 void
71231 -buf_write(struct buffer *buf, const char *s, int len);
71232 +buf_write(struct buffer *buf, const char *s, unsigned int len);
71233
71234 struct module {
71235 struct module *next;
71236 diff -urNp linux-2.6.32.43/scripts/mod/sumversion.c linux-2.6.32.43/scripts/mod/sumversion.c
71237 --- linux-2.6.32.43/scripts/mod/sumversion.c 2011-03-27 14:31:47.000000000 -0400
71238 +++ linux-2.6.32.43/scripts/mod/sumversion.c 2011-04-17 15:56:46.000000000 -0400
71239 @@ -455,7 +455,7 @@ static void write_version(const char *fi
71240 goto out;
71241 }
71242
71243 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
71244 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
71245 warn("writing sum in %s failed: %s\n",
71246 filename, strerror(errno));
71247 goto out;
71248 diff -urNp linux-2.6.32.43/scripts/pnmtologo.c linux-2.6.32.43/scripts/pnmtologo.c
71249 --- linux-2.6.32.43/scripts/pnmtologo.c 2011-03-27 14:31:47.000000000 -0400
71250 +++ linux-2.6.32.43/scripts/pnmtologo.c 2011-04-17 15:56:46.000000000 -0400
71251 @@ -237,14 +237,14 @@ static void write_header(void)
71252 fprintf(out, " * Linux logo %s\n", logoname);
71253 fputs(" */\n\n", out);
71254 fputs("#include <linux/linux_logo.h>\n\n", out);
71255 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
71256 + fprintf(out, "static unsigned char %s_data[] = {\n",
71257 logoname);
71258 }
71259
71260 static void write_footer(void)
71261 {
71262 fputs("\n};\n\n", out);
71263 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
71264 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
71265 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
71266 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
71267 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
71268 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
71269 fputs("\n};\n\n", out);
71270
71271 /* write logo clut */
71272 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
71273 + fprintf(out, "static unsigned char %s_clut[] = {\n",
71274 logoname);
71275 write_hex_cnt = 0;
71276 for (i = 0; i < logo_clutsize; i++) {
71277 diff -urNp linux-2.6.32.43/scripts/tags.sh linux-2.6.32.43/scripts/tags.sh
71278 --- linux-2.6.32.43/scripts/tags.sh 2011-03-27 14:31:47.000000000 -0400
71279 +++ linux-2.6.32.43/scripts/tags.sh 2011-06-07 18:06:04.000000000 -0400
71280 @@ -93,6 +93,11 @@ docscope()
71281 cscope -b -f cscope.out
71282 }
71283
71284 +dogtags()
71285 +{
71286 + all_sources | gtags -f -
71287 +}
71288 +
71289 exuberant()
71290 {
71291 all_sources | xargs $1 -a \
71292 @@ -164,6 +169,10 @@ case "$1" in
71293 docscope
71294 ;;
71295
71296 + "gtags")
71297 + dogtags
71298 + ;;
71299 +
71300 "tags")
71301 rm -f tags
71302 xtags ctags
71303 diff -urNp linux-2.6.32.43/security/capability.c linux-2.6.32.43/security/capability.c
71304 --- linux-2.6.32.43/security/capability.c 2011-03-27 14:31:47.000000000 -0400
71305 +++ linux-2.6.32.43/security/capability.c 2011-04-17 15:56:46.000000000 -0400
71306 @@ -890,7 +890,7 @@ static void cap_audit_rule_free(void *ls
71307 }
71308 #endif /* CONFIG_AUDIT */
71309
71310 -struct security_operations default_security_ops = {
71311 +struct security_operations default_security_ops __read_only = {
71312 .name = "default",
71313 };
71314
71315 diff -urNp linux-2.6.32.43/security/commoncap.c linux-2.6.32.43/security/commoncap.c
71316 --- linux-2.6.32.43/security/commoncap.c 2011-03-27 14:31:47.000000000 -0400
71317 +++ linux-2.6.32.43/security/commoncap.c 2011-04-17 15:56:46.000000000 -0400
71318 @@ -27,7 +27,7 @@
71319 #include <linux/sched.h>
71320 #include <linux/prctl.h>
71321 #include <linux/securebits.h>
71322 -
71323 +#include <net/sock.h>
71324 /*
71325 * If a non-root user executes a setuid-root binary in
71326 * !secure(SECURE_NOROOT) mode, then we raise capabilities.
71327 @@ -50,9 +50,11 @@ static void warn_setuid_and_fcaps_mixed(
71328 }
71329 }
71330
71331 +extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
71332 +
71333 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
71334 {
71335 - NETLINK_CB(skb).eff_cap = current_cap();
71336 + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
71337 return 0;
71338 }
71339
71340 @@ -582,6 +584,9 @@ int cap_bprm_secureexec(struct linux_bin
71341 {
71342 const struct cred *cred = current_cred();
71343
71344 + if (gr_acl_enable_at_secure())
71345 + return 1;
71346 +
71347 if (cred->uid != 0) {
71348 if (bprm->cap_effective)
71349 return 1;
71350 diff -urNp linux-2.6.32.43/security/integrity/ima/ima_api.c linux-2.6.32.43/security/integrity/ima/ima_api.c
71351 --- linux-2.6.32.43/security/integrity/ima/ima_api.c 2011-03-27 14:31:47.000000000 -0400
71352 +++ linux-2.6.32.43/security/integrity/ima/ima_api.c 2011-04-17 15:56:46.000000000 -0400
71353 @@ -74,7 +74,7 @@ void ima_add_violation(struct inode *ino
71354 int result;
71355
71356 /* can overflow, only indicator */
71357 - atomic_long_inc(&ima_htable.violations);
71358 + atomic_long_inc_unchecked(&ima_htable.violations);
71359
71360 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
71361 if (!entry) {
71362 diff -urNp linux-2.6.32.43/security/integrity/ima/ima_fs.c linux-2.6.32.43/security/integrity/ima/ima_fs.c
71363 --- linux-2.6.32.43/security/integrity/ima/ima_fs.c 2011-03-27 14:31:47.000000000 -0400
71364 +++ linux-2.6.32.43/security/integrity/ima/ima_fs.c 2011-04-17 15:56:46.000000000 -0400
71365 @@ -27,12 +27,12 @@
71366 static int valid_policy = 1;
71367 #define TMPBUFLEN 12
71368 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
71369 - loff_t *ppos, atomic_long_t *val)
71370 + loff_t *ppos, atomic_long_unchecked_t *val)
71371 {
71372 char tmpbuf[TMPBUFLEN];
71373 ssize_t len;
71374
71375 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
71376 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
71377 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
71378 }
71379
71380 diff -urNp linux-2.6.32.43/security/integrity/ima/ima.h linux-2.6.32.43/security/integrity/ima/ima.h
71381 --- linux-2.6.32.43/security/integrity/ima/ima.h 2011-03-27 14:31:47.000000000 -0400
71382 +++ linux-2.6.32.43/security/integrity/ima/ima.h 2011-04-17 15:56:46.000000000 -0400
71383 @@ -84,8 +84,8 @@ void ima_add_violation(struct inode *ino
71384 extern spinlock_t ima_queue_lock;
71385
71386 struct ima_h_table {
71387 - atomic_long_t len; /* number of stored measurements in the list */
71388 - atomic_long_t violations;
71389 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
71390 + atomic_long_unchecked_t violations;
71391 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
71392 };
71393 extern struct ima_h_table ima_htable;
71394 diff -urNp linux-2.6.32.43/security/integrity/ima/ima_queue.c linux-2.6.32.43/security/integrity/ima/ima_queue.c
71395 --- linux-2.6.32.43/security/integrity/ima/ima_queue.c 2011-03-27 14:31:47.000000000 -0400
71396 +++ linux-2.6.32.43/security/integrity/ima/ima_queue.c 2011-04-17 15:56:46.000000000 -0400
71397 @@ -78,7 +78,7 @@ static int ima_add_digest_entry(struct i
71398 INIT_LIST_HEAD(&qe->later);
71399 list_add_tail_rcu(&qe->later, &ima_measurements);
71400
71401 - atomic_long_inc(&ima_htable.len);
71402 + atomic_long_inc_unchecked(&ima_htable.len);
71403 key = ima_hash_key(entry->digest);
71404 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
71405 return 0;
71406 diff -urNp linux-2.6.32.43/security/Kconfig linux-2.6.32.43/security/Kconfig
71407 --- linux-2.6.32.43/security/Kconfig 2011-03-27 14:31:47.000000000 -0400
71408 +++ linux-2.6.32.43/security/Kconfig 2011-07-06 19:58:11.000000000 -0400
71409 @@ -4,6 +4,555 @@
71410
71411 menu "Security options"
71412
71413 +source grsecurity/Kconfig
71414 +
71415 +menu "PaX"
71416 +
71417 + config ARCH_TRACK_EXEC_LIMIT
71418 + bool
71419 +
71420 + config PAX_PER_CPU_PGD
71421 + bool
71422 +
71423 + config TASK_SIZE_MAX_SHIFT
71424 + int
71425 + depends on X86_64
71426 + default 47 if !PAX_PER_CPU_PGD
71427 + default 42 if PAX_PER_CPU_PGD
71428 +
71429 + config PAX_ENABLE_PAE
71430 + bool
71431 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
71432 +
71433 +config PAX
71434 + bool "Enable various PaX features"
71435 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
71436 + help
71437 + This allows you to enable various PaX features. PaX adds
71438 + intrusion prevention mechanisms to the kernel that reduce
71439 + the risks posed by exploitable memory corruption bugs.
71440 +
71441 +menu "PaX Control"
71442 + depends on PAX
71443 +
71444 +config PAX_SOFTMODE
71445 + bool 'Support soft mode'
71446 + select PAX_PT_PAX_FLAGS
71447 + help
71448 + Enabling this option will allow you to run PaX in soft mode, that
71449 + is, PaX features will not be enforced by default, only on executables
71450 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
71451 + is the only way to mark executables for soft mode use.
71452 +
71453 + Soft mode can be activated by using the "pax_softmode=1" kernel command
71454 + line option on boot. Furthermore you can control various PaX features
71455 + at runtime via the entries in /proc/sys/kernel/pax.
71456 +
71457 +config PAX_EI_PAX
71458 + bool 'Use legacy ELF header marking'
71459 + help
71460 + Enabling this option will allow you to control PaX features on
71461 + a per executable basis via the 'chpax' utility available at
71462 + http://pax.grsecurity.net/. The control flags will be read from
71463 + an otherwise reserved part of the ELF header. This marking has
71464 + numerous drawbacks (no support for soft-mode, toolchain does not
71465 + know about the non-standard use of the ELF header) therefore it
71466 + has been deprecated in favour of PT_PAX_FLAGS support.
71467 +
71468 + Note that if you enable PT_PAX_FLAGS marking support as well,
71469 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
71470 +
71471 +config PAX_PT_PAX_FLAGS
71472 + bool 'Use ELF program header marking'
71473 + help
71474 + Enabling this option will allow you to control PaX features on
71475 + a per executable basis via the 'paxctl' utility available at
71476 + http://pax.grsecurity.net/. The control flags will be read from
71477 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
71478 + has the benefits of supporting both soft mode and being fully
71479 + integrated into the toolchain (the binutils patch is available
71480 + from http://pax.grsecurity.net).
71481 +
71482 + If your toolchain does not support PT_PAX_FLAGS markings,
71483 + you can create one in most cases with 'paxctl -C'.
71484 +
71485 + Note that if you enable the legacy EI_PAX marking support as well,
71486 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
71487 +
71488 +choice
71489 + prompt 'MAC system integration'
71490 + default PAX_HAVE_ACL_FLAGS
71491 + help
71492 + Mandatory Access Control systems have the option of controlling
71493 + PaX flags on a per executable basis, choose the method supported
71494 + by your particular system.
71495 +
71496 + - "none": if your MAC system does not interact with PaX,
71497 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
71498 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
71499 +
71500 + NOTE: this option is for developers/integrators only.
71501 +
71502 + config PAX_NO_ACL_FLAGS
71503 + bool 'none'
71504 +
71505 + config PAX_HAVE_ACL_FLAGS
71506 + bool 'direct'
71507 +
71508 + config PAX_HOOK_ACL_FLAGS
71509 + bool 'hook'
71510 +endchoice
71511 +
71512 +endmenu
71513 +
71514 +menu "Non-executable pages"
71515 + depends on PAX
71516 +
71517 +config PAX_NOEXEC
71518 + bool "Enforce non-executable pages"
71519 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
71520 + help
71521 + By design some architectures do not allow for protecting memory
71522 + pages against execution or even if they do, Linux does not make
71523 + use of this feature. In practice this means that if a page is
71524 + readable (such as the stack or heap) it is also executable.
71525 +
71526 + There is a well known exploit technique that makes use of this
71527 + fact and a common programming mistake where an attacker can
71528 + introduce code of his choice somewhere in the attacked program's
71529 + memory (typically the stack or the heap) and then execute it.
71530 +
71531 + If the attacked program was running with different (typically
71532 + higher) privileges than that of the attacker, then he can elevate
71533 + his own privilege level (e.g. get a root shell, write to files for
71534 + which he does not have write access to, etc).
71535 +
71536 + Enabling this option will let you choose from various features
71537 + that prevent the injection and execution of 'foreign' code in
71538 + a program.
71539 +
71540 + This will also break programs that rely on the old behaviour and
71541 + expect that dynamically allocated memory via the malloc() family
71542 + of functions is executable (which it is not). Notable examples
71543 + are the XFree86 4.x server, the java runtime and wine.
71544 +
71545 +config PAX_PAGEEXEC
71546 + bool "Paging based non-executable pages"
71547 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
71548 + select S390_SWITCH_AMODE if S390
71549 + select S390_EXEC_PROTECT if S390
71550 + select ARCH_TRACK_EXEC_LIMIT if X86_32
71551 + help
71552 + This implementation is based on the paging feature of the CPU.
71553 + On i386 without hardware non-executable bit support there is a
71554 + variable but usually low performance impact, however on Intel's
71555 + P4 core based CPUs it is very high so you should not enable this
71556 + for kernels meant to be used on such CPUs.
71557 +
71558 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
71559 + with hardware non-executable bit support there is no performance
71560 + impact, on ppc the impact is negligible.
71561 +
71562 + Note that several architectures require various emulations due to
71563 + badly designed userland ABIs, this will cause a performance impact
71564 + but will disappear as soon as userland is fixed. For example, ppc
71565 + userland MUST have been built with secure-plt by a recent toolchain.
71566 +
71567 +config PAX_SEGMEXEC
71568 + bool "Segmentation based non-executable pages"
71569 + depends on PAX_NOEXEC && X86_32
71570 + help
71571 + This implementation is based on the segmentation feature of the
71572 + CPU and has a very small performance impact, however applications
71573 + will be limited to a 1.5 GB address space instead of the normal
71574 + 3 GB.
71575 +
71576 +config PAX_EMUTRAMP
71577 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
71578 + default y if PARISC
71579 + help
71580 + There are some programs and libraries that for one reason or
71581 + another attempt to execute special small code snippets from
71582 + non-executable memory pages. Most notable examples are the
71583 + signal handler return code generated by the kernel itself and
71584 + the GCC trampolines.
71585 +
71586 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
71587 + such programs will no longer work under your kernel.
71588 +
71589 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
71590 + utilities to enable trampoline emulation for the affected programs
71591 + yet still have the protection provided by the non-executable pages.
71592 +
71593 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
71594 + your system will not even boot.
71595 +
71596 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
71597 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
71598 + for the affected files.
71599 +
71600 + NOTE: enabling this feature *may* open up a loophole in the
71601 + protection provided by non-executable pages that an attacker
71602 + could abuse. Therefore the best solution is to not have any
71603 + files on your system that would require this option. This can
71604 + be achieved by not using libc5 (which relies on the kernel
71605 + signal handler return code) and not using or rewriting programs
71606 + that make use of the nested function implementation of GCC.
71607 + Skilled users can just fix GCC itself so that it implements
71608 + nested function calls in a way that does not interfere with PaX.
71609 +
71610 +config PAX_EMUSIGRT
71611 + bool "Automatically emulate sigreturn trampolines"
71612 + depends on PAX_EMUTRAMP && PARISC
71613 + default y
71614 + help
71615 + Enabling this option will have the kernel automatically detect
71616 + and emulate signal return trampolines executing on the stack
71617 + that would otherwise lead to task termination.
71618 +
71619 + This solution is intended as a temporary one for users with
71620 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
71621 + Modula-3 runtime, etc) or executables linked to such, basically
71622 + everything that does not specify its own SA_RESTORER function in
71623 + normal executable memory like glibc 2.1+ does.
71624 +
71625 + On parisc you MUST enable this option, otherwise your system will
71626 + not even boot.
71627 +
71628 + NOTE: this feature cannot be disabled on a per executable basis
71629 + and since it *does* open up a loophole in the protection provided
71630 + by non-executable pages, the best solution is to not have any
71631 + files on your system that would require this option.
71632 +
71633 +config PAX_MPROTECT
71634 + bool "Restrict mprotect()"
71635 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
71636 + help
71637 + Enabling this option will prevent programs from
71638 + - changing the executable status of memory pages that were
71639 + not originally created as executable,
71640 + - making read-only executable pages writable again,
71641 + - creating executable pages from anonymous memory,
71642 + - making read-only-after-relocations (RELRO) data pages writable again.
71643 +
71644 + You should say Y here to complete the protection provided by
71645 + the enforcement of non-executable pages.
71646 +
71647 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
71648 + this feature on a per file basis.
71649 +
71650 +config PAX_MPROTECT_COMPAT
71651 + bool "Use legacy/compat protection demoting (read help)"
71652 + depends on PAX_MPROTECT
71653 + default n
71654 + help
71655 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
71656 + by sending the proper error code to the application. For some broken
71657 + userland, this can cause problems with Python or other applications. The
71658 + current implementation however allows for applications like clamav to
71659 + detect if JIT compilation/execution is allowed and to fall back gracefully
71660 + to an interpreter-based mode if it does not. While we encourage everyone
71661 + to use the current implementation as-is and push upstream to fix broken
71662 + userland (note that the RWX logging option can assist with this), in some
71663 + environments this may not be possible. Having to disable MPROTECT
71664 + completely on certain binaries reduces the security benefit of PaX,
71665 + so this option is provided for those environments to revert to the old
71666 + behavior.
71667 +
71668 +config PAX_ELFRELOCS
71669 + bool "Allow ELF text relocations (read help)"
71670 + depends on PAX_MPROTECT
71671 + default n
71672 + help
71673 + Non-executable pages and mprotect() restrictions are effective
71674 + in preventing the introduction of new executable code into an
71675 + attacked task's address space. There remain only two venues
71676 + for this kind of attack: if the attacker can execute already
71677 + existing code in the attacked task then he can either have it
71678 + create and mmap() a file containing his code or have it mmap()
71679 + an already existing ELF library that does not have position
71680 + independent code in it and use mprotect() on it to make it
71681 + writable and copy his code there. While protecting against
71682 + the former approach is beyond PaX, the latter can be prevented
71683 + by having only PIC ELF libraries on one's system (which do not
71684 + need to relocate their code). If you are sure this is your case,
71685 + as is the case with all modern Linux distributions, then leave
71686 + this option disabled. You should say 'n' here.
71687 +
71688 +config PAX_ETEXECRELOCS
71689 + bool "Allow ELF ET_EXEC text relocations"
71690 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
71691 + select PAX_ELFRELOCS
71692 + default y
71693 + help
71694 + On some architectures there are incorrectly created applications
71695 + that require text relocations and would not work without enabling
71696 + this option. If you are an alpha, ia64 or parisc user, you should
71697 + enable this option and disable it once you have made sure that
71698 + none of your applications need it.
71699 +
71700 +config PAX_EMUPLT
71701 + bool "Automatically emulate ELF PLT"
71702 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
71703 + default y
71704 + help
71705 + Enabling this option will have the kernel automatically detect
71706 + and emulate the Procedure Linkage Table entries in ELF files.
71707 + On some architectures such entries are in writable memory, and
71708 + become non-executable leading to task termination. Therefore
71709 + it is mandatory that you enable this option on alpha, parisc,
71710 + sparc and sparc64, otherwise your system would not even boot.
71711 +
71712 + NOTE: this feature *does* open up a loophole in the protection
71713 + provided by the non-executable pages, therefore the proper
71714 + solution is to modify the toolchain to produce a PLT that does
71715 + not need to be writable.
71716 +
71717 +config PAX_DLRESOLVE
71718 + bool 'Emulate old glibc resolver stub'
71719 + depends on PAX_EMUPLT && SPARC
71720 + default n
71721 + help
71722 + This option is needed if userland has an old glibc (before 2.4)
71723 + that puts a 'save' instruction into the runtime generated resolver
71724 + stub that needs special emulation.
71725 +
71726 +config PAX_KERNEXEC
71727 + bool "Enforce non-executable kernel pages"
71728 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
71729 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
71730 + help
71731 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
71732 + that is, enabling this option will make it harder to inject
71733 + and execute 'foreign' code in kernel memory itself.
71734 +
71735 + Note that on x86_64 kernels there is a known regression when
71736 + this feature and KVM/VMX are both enabled in the host kernel.
71737 +
71738 +config PAX_KERNEXEC_MODULE_TEXT
71739 + int "Minimum amount of memory reserved for module code"
71740 + default "4"
71741 + depends on PAX_KERNEXEC && X86_32 && MODULES
71742 + help
71743 + Due to implementation details the kernel must reserve a fixed
71744 + amount of memory for module code at compile time that cannot be
71745 + changed at runtime. Here you can specify the minimum amount
71746 + in MB that will be reserved. Due to the same implementation
71747 + details this size will always be rounded up to the next 2/4 MB
71748 + boundary (depends on PAE) so the actually available memory for
71749 + module code will usually be more than this minimum.
71750 +
71751 + The default 4 MB should be enough for most users but if you have
71752 + an excessive number of modules (e.g., most distribution configs
71753 + compile many drivers as modules) or use huge modules such as
71754 + nvidia's kernel driver, you will need to adjust this amount.
71755 + A good rule of thumb is to look at your currently loaded kernel
71756 + modules and add up their sizes.
71757 +
71758 +endmenu
71759 +
71760 +menu "Address Space Layout Randomization"
71761 + depends on PAX
71762 +
71763 +config PAX_ASLR
71764 + bool "Address Space Layout Randomization"
71765 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
71766 + help
71767 + Many if not most exploit techniques rely on the knowledge of
71768 + certain addresses in the attacked program. The following options
71769 + will allow the kernel to apply a certain amount of randomization
71770 + to specific parts of the program thereby forcing an attacker to
71771 + guess them in most cases. Any failed guess will most likely crash
71772 + the attacked program which allows the kernel to detect such attempts
71773 + and react on them. PaX itself provides no reaction mechanisms,
71774 + instead it is strongly encouraged that you make use of Nergal's
71775 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
71776 + (http://www.grsecurity.net/) built-in crash detection features or
71777 + develop one yourself.
71778 +
71779 + By saying Y here you can choose to randomize the following areas:
71780 + - top of the task's kernel stack
71781 + - top of the task's userland stack
71782 + - base address for mmap() requests that do not specify one
71783 + (this includes all libraries)
71784 + - base address of the main executable
71785 +
71786 + It is strongly recommended to say Y here as address space layout
71787 + randomization has negligible impact on performance yet it provides
71788 + a very effective protection.
71789 +
71790 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
71791 + this feature on a per file basis.
71792 +
71793 +config PAX_RANDKSTACK
71794 + bool "Randomize kernel stack base"
71795 + depends on PAX_ASLR && X86_TSC && X86
71796 + help
71797 + By saying Y here the kernel will randomize every task's kernel
71798 + stack on every system call. This will not only force an attacker
71799 + to guess it but also prevent him from making use of possible
71800 + leaked information about it.
71801 +
71802 + Since the kernel stack is a rather scarce resource, randomization
71803 + may cause unexpected stack overflows, therefore you should very
71804 + carefully test your system. Note that once enabled in the kernel
71805 + configuration, this feature cannot be disabled on a per file basis.
71806 +
71807 +config PAX_RANDUSTACK
71808 + bool "Randomize user stack base"
71809 + depends on PAX_ASLR
71810 + help
71811 + By saying Y here the kernel will randomize every task's userland
71812 + stack. The randomization is done in two steps where the second
71813 + one may apply a big amount of shift to the top of the stack and
71814 + cause problems for programs that want to use lots of memory (more
71815 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
71816 + For this reason the second step can be controlled by 'chpax' or
71817 + 'paxctl' on a per file basis.
71818 +
71819 +config PAX_RANDMMAP
71820 + bool "Randomize mmap() base"
71821 + depends on PAX_ASLR
71822 + help
71823 + By saying Y here the kernel will use a randomized base address for
71824 + mmap() requests that do not specify one themselves. As a result
71825 + all dynamically loaded libraries will appear at random addresses
71826 + and therefore be harder to exploit by a technique where an attacker
71827 + attempts to execute library code for his purposes (e.g. spawn a
71828 + shell from an exploited program that is running at an elevated
71829 + privilege level).
71830 +
71831 + Furthermore, if a program is relinked as a dynamic ELF file, its
71832 + base address will be randomized as well, completing the full
71833 + randomization of the address space layout. Attacking such programs
71834 + becomes a guess game. You can find an example of doing this at
71835 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
71836 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
71837 +
71838 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
71839 + feature on a per file basis.
71840 +
71841 +endmenu
71842 +
71843 +menu "Miscellaneous hardening features"
71844 +
71845 +config PAX_MEMORY_SANITIZE
71846 + bool "Sanitize all freed memory"
71847 + help
71848 + By saying Y here the kernel will erase memory pages as soon as they
71849 + are freed. This in turn reduces the lifetime of data stored in the
71850 + pages, making it less likely that sensitive information such as
71851 + passwords, cryptographic secrets, etc stay in memory for too long.
71852 +
71853 + This is especially useful for programs whose runtime is short, long
71854 + lived processes and the kernel itself benefit from this as long as
71855 + they operate on whole memory pages and ensure timely freeing of pages
71856 + that may hold sensitive information.
71857 +
71858 + The tradeoff is performance impact, on a single CPU system kernel
71859 + compilation sees a 3% slowdown, other systems and workloads may vary
71860 + and you are advised to test this feature on your expected workload
71861 + before deploying it.
71862 +
71863 + Note that this feature does not protect data stored in live pages,
71864 + e.g., process memory swapped to disk may stay there for a long time.
71865 +
71866 +config PAX_MEMORY_STACKLEAK
71867 + bool "Sanitize kernel stack"
71868 + depends on X86
71869 + help
71870 + By saying Y here the kernel will erase the kernel stack before it
71871 + returns from a system call. This in turn reduces the information
71872 + that a kernel stack leak bug can reveal.
71873 +
71874 + Note that such a bug can still leak information that was put on
71875 + the stack by the current system call (the one eventually triggering
71876 + the bug) but traces of earlier system calls on the kernel stack
71877 + cannot leak anymore.
71878 +
71879 + The tradeoff is performance impact, on a single CPU system kernel
71880 + compilation sees a 1% slowdown, other systems and workloads may vary
71881 + and you are advised to test this feature on your expected workload
71882 + before deploying it.
71883 +
71884 + Note: full support for this feature requires gcc with plugin support
71885 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
71886 + is not supported). Using older gcc versions means that functions
71887 + with large enough stack frames may leave uninitialized memory behind
71888 + that may be exposed to a later syscall leaking the stack.
71889 +
71890 +config PAX_MEMORY_UDEREF
71891 + bool "Prevent invalid userland pointer dereference"
71892 + depends on X86 && !UML_X86 && !XEN
71893 + select PAX_PER_CPU_PGD if X86_64
71894 + help
71895 + By saying Y here the kernel will be prevented from dereferencing
71896 + userland pointers in contexts where the kernel expects only kernel
71897 + pointers. This is both a useful runtime debugging feature and a
71898 + security measure that prevents exploiting a class of kernel bugs.
71899 +
71900 + The tradeoff is that some virtualization solutions may experience
71901 + a huge slowdown and therefore you should not enable this feature
71902 + for kernels meant to run in such environments. Whether a given VM
71903 + solution is affected or not is best determined by simply trying it
71904 + out, the performance impact will be obvious right on boot as this
71905 + mechanism engages from very early on. A good rule of thumb is that
71906 + VMs running on CPUs without hardware virtualization support (i.e.,
71907 + the majority of IA-32 CPUs) will likely experience the slowdown.
71908 +
71909 +config PAX_REFCOUNT
71910 + bool "Prevent various kernel object reference counter overflows"
71911 + depends on GRKERNSEC && (X86 || SPARC64)
71912 + help
71913 + By saying Y here the kernel will detect and prevent overflowing
71914 + various (but not all) kinds of object reference counters. Such
71915 + overflows can normally occur due to bugs only and are often, if
71916 + not always, exploitable.
71917 +
71918 + The tradeoff is that data structures protected by an overflowed
71919 + refcount will never be freed and therefore will leak memory. Note
71920 + that this leak also happens even without this protection but in
71921 + that case the overflow can eventually trigger the freeing of the
71922 + data structure while it is still being used elsewhere, resulting
71923 + in the exploitable situation that this feature prevents.
71924 +
71925 + Since this has a negligible performance impact, you should enable
71926 + this feature.
71927 +
71928 +config PAX_USERCOPY
71929 + bool "Harden heap object copies between kernel and userland"
71930 + depends on X86 || PPC || SPARC || ARM
71931 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
71932 + help
71933 + By saying Y here the kernel will enforce the size of heap objects
71934 + when they are copied in either direction between the kernel and
71935 + userland, even if only a part of the heap object is copied.
71936 +
71937 + Specifically, this checking prevents information leaking from the
71938 + kernel heap during kernel to userland copies (if the kernel heap
71939 + object is otherwise fully initialized) and prevents kernel heap
71940 + overflows during userland to kernel copies.
71941 +
71942 + Note that the current implementation provides the strictest bounds
71943 + checks for the SLUB allocator.
71944 +
71945 + Enabling this option also enables per-slab cache protection against
71946 + data in a given cache being copied into/out of via userland
71947 + accessors. Though the whitelist of regions will be reduced over
71948 + time, it notably protects important data structures like task structs.
71949 +
71950 +
71951 + If frame pointers are enabled on x86, this option will also
71952 + restrict copies into and out of the kernel stack to local variables
71953 + within a single frame.
71954 +
71955 + Since this has a negligible performance impact, you should enable
71956 + this feature.
71957 +
71958 +endmenu
71959 +
71960 +endmenu
71961 +
71962 config KEYS
71963 bool "Enable access key retention support"
71964 help
71965 @@ -146,7 +695,7 @@ config INTEL_TXT
71966 config LSM_MMAP_MIN_ADDR
71967 int "Low address space for LSM to protect from user allocation"
71968 depends on SECURITY && SECURITY_SELINUX
71969 - default 65536
71970 + default 32768
71971 help
71972 This is the portion of low virtual memory which should be protected
71973 from userspace allocation. Keeping a user from writing to low pages
71974 diff -urNp linux-2.6.32.43/security/keys/keyring.c linux-2.6.32.43/security/keys/keyring.c
71975 --- linux-2.6.32.43/security/keys/keyring.c 2011-03-27 14:31:47.000000000 -0400
71976 +++ linux-2.6.32.43/security/keys/keyring.c 2011-04-18 22:03:00.000000000 -0400
71977 @@ -214,15 +214,15 @@ static long keyring_read(const struct ke
71978 ret = -EFAULT;
71979
71980 for (loop = 0; loop < klist->nkeys; loop++) {
71981 + key_serial_t serial;
71982 key = klist->keys[loop];
71983 + serial = key->serial;
71984
71985 tmp = sizeof(key_serial_t);
71986 if (tmp > buflen)
71987 tmp = buflen;
71988
71989 - if (copy_to_user(buffer,
71990 - &key->serial,
71991 - tmp) != 0)
71992 + if (copy_to_user(buffer, &serial, tmp))
71993 goto error;
71994
71995 buflen -= tmp;
71996 diff -urNp linux-2.6.32.43/security/min_addr.c linux-2.6.32.43/security/min_addr.c
71997 --- linux-2.6.32.43/security/min_addr.c 2011-03-27 14:31:47.000000000 -0400
71998 +++ linux-2.6.32.43/security/min_addr.c 2011-04-17 15:56:46.000000000 -0400
71999 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
72000 */
72001 static void update_mmap_min_addr(void)
72002 {
72003 +#ifndef SPARC
72004 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
72005 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
72006 mmap_min_addr = dac_mmap_min_addr;
72007 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
72008 #else
72009 mmap_min_addr = dac_mmap_min_addr;
72010 #endif
72011 +#endif
72012 }
72013
72014 /*
72015 diff -urNp linux-2.6.32.43/security/root_plug.c linux-2.6.32.43/security/root_plug.c
72016 --- linux-2.6.32.43/security/root_plug.c 2011-03-27 14:31:47.000000000 -0400
72017 +++ linux-2.6.32.43/security/root_plug.c 2011-04-17 15:56:46.000000000 -0400
72018 @@ -70,7 +70,7 @@ static int rootplug_bprm_check_security
72019 return 0;
72020 }
72021
72022 -static struct security_operations rootplug_security_ops = {
72023 +static struct security_operations rootplug_security_ops __read_only = {
72024 .bprm_check_security = rootplug_bprm_check_security,
72025 };
72026
72027 diff -urNp linux-2.6.32.43/security/security.c linux-2.6.32.43/security/security.c
72028 --- linux-2.6.32.43/security/security.c 2011-03-27 14:31:47.000000000 -0400
72029 +++ linux-2.6.32.43/security/security.c 2011-04-17 15:56:46.000000000 -0400
72030 @@ -24,7 +24,7 @@ static __initdata char chosen_lsm[SECURI
72031 extern struct security_operations default_security_ops;
72032 extern void security_fixup_ops(struct security_operations *ops);
72033
72034 -struct security_operations *security_ops; /* Initialized to NULL */
72035 +struct security_operations *security_ops __read_only; /* Initialized to NULL */
72036
72037 static inline int verify(struct security_operations *ops)
72038 {
72039 @@ -106,7 +106,7 @@ int __init security_module_enable(struct
72040 * If there is already a security module registered with the kernel,
72041 * an error will be returned. Otherwise %0 is returned on success.
72042 */
72043 -int register_security(struct security_operations *ops)
72044 +int __init register_security(struct security_operations *ops)
72045 {
72046 if (verify(ops)) {
72047 printk(KERN_DEBUG "%s could not verify "
72048 diff -urNp linux-2.6.32.43/security/selinux/hooks.c linux-2.6.32.43/security/selinux/hooks.c
72049 --- linux-2.6.32.43/security/selinux/hooks.c 2011-03-27 14:31:47.000000000 -0400
72050 +++ linux-2.6.32.43/security/selinux/hooks.c 2011-04-17 15:56:46.000000000 -0400
72051 @@ -131,7 +131,7 @@ int selinux_enabled = 1;
72052 * Minimal support for a secondary security module,
72053 * just to allow the use of the capability module.
72054 */
72055 -static struct security_operations *secondary_ops;
72056 +static struct security_operations *secondary_ops __read_only;
72057
72058 /* Lists of inode and superblock security structures initialized
72059 before the policy was loaded. */
72060 @@ -5457,7 +5457,7 @@ static int selinux_key_getsecurity(struc
72061
72062 #endif
72063
72064 -static struct security_operations selinux_ops = {
72065 +static struct security_operations selinux_ops __read_only = {
72066 .name = "selinux",
72067
72068 .ptrace_access_check = selinux_ptrace_access_check,
72069 @@ -5841,7 +5841,9 @@ int selinux_disable(void)
72070 avc_disable();
72071
72072 /* Reset security_ops to the secondary module, dummy or capability. */
72073 + pax_open_kernel();
72074 security_ops = secondary_ops;
72075 + pax_close_kernel();
72076
72077 /* Unregister netfilter hooks. */
72078 selinux_nf_ip_exit();
72079 diff -urNp linux-2.6.32.43/security/selinux/include/xfrm.h linux-2.6.32.43/security/selinux/include/xfrm.h
72080 --- linux-2.6.32.43/security/selinux/include/xfrm.h 2011-03-27 14:31:47.000000000 -0400
72081 +++ linux-2.6.32.43/security/selinux/include/xfrm.h 2011-05-18 20:09:37.000000000 -0400
72082 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
72083
72084 static inline void selinux_xfrm_notify_policyload(void)
72085 {
72086 - atomic_inc(&flow_cache_genid);
72087 + atomic_inc_unchecked(&flow_cache_genid);
72088 }
72089 #else
72090 static inline int selinux_xfrm_enabled(void)
72091 diff -urNp linux-2.6.32.43/security/selinux/ss/services.c linux-2.6.32.43/security/selinux/ss/services.c
72092 --- linux-2.6.32.43/security/selinux/ss/services.c 2011-03-27 14:31:47.000000000 -0400
72093 +++ linux-2.6.32.43/security/selinux/ss/services.c 2011-05-16 21:46:57.000000000 -0400
72094 @@ -1715,6 +1715,8 @@ int security_load_policy(void *data, siz
72095 int rc = 0;
72096 struct policy_file file = { data, len }, *fp = &file;
72097
72098 + pax_track_stack();
72099 +
72100 if (!ss_initialized) {
72101 avtab_cache_init();
72102 if (policydb_read(&policydb, fp)) {
72103 diff -urNp linux-2.6.32.43/security/smack/smack_lsm.c linux-2.6.32.43/security/smack/smack_lsm.c
72104 --- linux-2.6.32.43/security/smack/smack_lsm.c 2011-03-27 14:31:47.000000000 -0400
72105 +++ linux-2.6.32.43/security/smack/smack_lsm.c 2011-04-17 15:56:46.000000000 -0400
72106 @@ -3073,7 +3073,7 @@ static int smack_inode_getsecctx(struct
72107 return 0;
72108 }
72109
72110 -struct security_operations smack_ops = {
72111 +struct security_operations smack_ops __read_only = {
72112 .name = "smack",
72113
72114 .ptrace_access_check = smack_ptrace_access_check,
72115 diff -urNp linux-2.6.32.43/security/tomoyo/tomoyo.c linux-2.6.32.43/security/tomoyo/tomoyo.c
72116 --- linux-2.6.32.43/security/tomoyo/tomoyo.c 2011-03-27 14:31:47.000000000 -0400
72117 +++ linux-2.6.32.43/security/tomoyo/tomoyo.c 2011-04-17 15:56:46.000000000 -0400
72118 @@ -275,7 +275,7 @@ static int tomoyo_dentry_open(struct fil
72119 * tomoyo_security_ops is a "struct security_operations" which is used for
72120 * registering TOMOYO.
72121 */
72122 -static struct security_operations tomoyo_security_ops = {
72123 +static struct security_operations tomoyo_security_ops __read_only = {
72124 .name = "tomoyo",
72125 .cred_alloc_blank = tomoyo_cred_alloc_blank,
72126 .cred_prepare = tomoyo_cred_prepare,
72127 diff -urNp linux-2.6.32.43/sound/aoa/codecs/onyx.c linux-2.6.32.43/sound/aoa/codecs/onyx.c
72128 --- linux-2.6.32.43/sound/aoa/codecs/onyx.c 2011-03-27 14:31:47.000000000 -0400
72129 +++ linux-2.6.32.43/sound/aoa/codecs/onyx.c 2011-04-17 15:56:46.000000000 -0400
72130 @@ -53,7 +53,7 @@ struct onyx {
72131 spdif_locked:1,
72132 analog_locked:1,
72133 original_mute:2;
72134 - int open_count;
72135 + local_t open_count;
72136 struct codec_info *codec_info;
72137
72138 /* mutex serializes concurrent access to the device
72139 @@ -752,7 +752,7 @@ static int onyx_open(struct codec_info_i
72140 struct onyx *onyx = cii->codec_data;
72141
72142 mutex_lock(&onyx->mutex);
72143 - onyx->open_count++;
72144 + local_inc(&onyx->open_count);
72145 mutex_unlock(&onyx->mutex);
72146
72147 return 0;
72148 @@ -764,8 +764,7 @@ static int onyx_close(struct codec_info_
72149 struct onyx *onyx = cii->codec_data;
72150
72151 mutex_lock(&onyx->mutex);
72152 - onyx->open_count--;
72153 - if (!onyx->open_count)
72154 + if (local_dec_and_test(&onyx->open_count))
72155 onyx->spdif_locked = onyx->analog_locked = 0;
72156 mutex_unlock(&onyx->mutex);
72157
72158 diff -urNp linux-2.6.32.43/sound/aoa/codecs/onyx.h linux-2.6.32.43/sound/aoa/codecs/onyx.h
72159 --- linux-2.6.32.43/sound/aoa/codecs/onyx.h 2011-03-27 14:31:47.000000000 -0400
72160 +++ linux-2.6.32.43/sound/aoa/codecs/onyx.h 2011-04-17 15:56:46.000000000 -0400
72161 @@ -11,6 +11,7 @@
72162 #include <linux/i2c.h>
72163 #include <asm/pmac_low_i2c.h>
72164 #include <asm/prom.h>
72165 +#include <asm/local.h>
72166
72167 /* PCM3052 register definitions */
72168
72169 diff -urNp linux-2.6.32.43/sound/drivers/mts64.c linux-2.6.32.43/sound/drivers/mts64.c
72170 --- linux-2.6.32.43/sound/drivers/mts64.c 2011-03-27 14:31:47.000000000 -0400
72171 +++ linux-2.6.32.43/sound/drivers/mts64.c 2011-04-17 15:56:46.000000000 -0400
72172 @@ -27,6 +27,7 @@
72173 #include <sound/initval.h>
72174 #include <sound/rawmidi.h>
72175 #include <sound/control.h>
72176 +#include <asm/local.h>
72177
72178 #define CARD_NAME "Miditerminal 4140"
72179 #define DRIVER_NAME "MTS64"
72180 @@ -65,7 +66,7 @@ struct mts64 {
72181 struct pardevice *pardev;
72182 int pardev_claimed;
72183
72184 - int open_count;
72185 + local_t open_count;
72186 int current_midi_output_port;
72187 int current_midi_input_port;
72188 u8 mode[MTS64_NUM_INPUT_PORTS];
72189 @@ -695,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct
72190 {
72191 struct mts64 *mts = substream->rmidi->private_data;
72192
72193 - if (mts->open_count == 0) {
72194 + if (local_read(&mts->open_count) == 0) {
72195 /* We don't need a spinlock here, because this is just called
72196 if the device has not been opened before.
72197 So there aren't any IRQs from the device */
72198 @@ -703,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct
72199
72200 msleep(50);
72201 }
72202 - ++(mts->open_count);
72203 + local_inc(&mts->open_count);
72204
72205 return 0;
72206 }
72207 @@ -713,8 +714,7 @@ static int snd_mts64_rawmidi_close(struc
72208 struct mts64 *mts = substream->rmidi->private_data;
72209 unsigned long flags;
72210
72211 - --(mts->open_count);
72212 - if (mts->open_count == 0) {
72213 + if (local_dec_return(&mts->open_count) == 0) {
72214 /* We need the spinlock_irqsave here because we can still
72215 have IRQs at this point */
72216 spin_lock_irqsave(&mts->lock, flags);
72217 @@ -723,8 +723,8 @@ static int snd_mts64_rawmidi_close(struc
72218
72219 msleep(500);
72220
72221 - } else if (mts->open_count < 0)
72222 - mts->open_count = 0;
72223 + } else if (local_read(&mts->open_count) < 0)
72224 + local_set(&mts->open_count, 0);
72225
72226 return 0;
72227 }
72228 diff -urNp linux-2.6.32.43/sound/drivers/portman2x4.c linux-2.6.32.43/sound/drivers/portman2x4.c
72229 --- linux-2.6.32.43/sound/drivers/portman2x4.c 2011-03-27 14:31:47.000000000 -0400
72230 +++ linux-2.6.32.43/sound/drivers/portman2x4.c 2011-04-17 15:56:46.000000000 -0400
72231 @@ -46,6 +46,7 @@
72232 #include <sound/initval.h>
72233 #include <sound/rawmidi.h>
72234 #include <sound/control.h>
72235 +#include <asm/local.h>
72236
72237 #define CARD_NAME "Portman 2x4"
72238 #define DRIVER_NAME "portman"
72239 @@ -83,7 +84,7 @@ struct portman {
72240 struct pardevice *pardev;
72241 int pardev_claimed;
72242
72243 - int open_count;
72244 + local_t open_count;
72245 int mode[PORTMAN_NUM_INPUT_PORTS];
72246 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
72247 };
72248 diff -urNp linux-2.6.32.43/sound/oss/sb_audio.c linux-2.6.32.43/sound/oss/sb_audio.c
72249 --- linux-2.6.32.43/sound/oss/sb_audio.c 2011-03-27 14:31:47.000000000 -0400
72250 +++ linux-2.6.32.43/sound/oss/sb_audio.c 2011-04-17 15:56:46.000000000 -0400
72251 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
72252 buf16 = (signed short *)(localbuf + localoffs);
72253 while (c)
72254 {
72255 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
72256 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
72257 if (copy_from_user(lbuf8,
72258 userbuf+useroffs + p,
72259 locallen))
72260 diff -urNp linux-2.6.32.43/sound/oss/swarm_cs4297a.c linux-2.6.32.43/sound/oss/swarm_cs4297a.c
72261 --- linux-2.6.32.43/sound/oss/swarm_cs4297a.c 2011-03-27 14:31:47.000000000 -0400
72262 +++ linux-2.6.32.43/sound/oss/swarm_cs4297a.c 2011-04-17 15:56:46.000000000 -0400
72263 @@ -2577,7 +2577,6 @@ static int __init cs4297a_init(void)
72264 {
72265 struct cs4297a_state *s;
72266 u32 pwr, id;
72267 - mm_segment_t fs;
72268 int rval;
72269 #ifndef CONFIG_BCM_CS4297A_CSWARM
72270 u64 cfg;
72271 @@ -2667,22 +2666,23 @@ static int __init cs4297a_init(void)
72272 if (!rval) {
72273 char *sb1250_duart_present;
72274
72275 +#if 0
72276 + mm_segment_t fs;
72277 fs = get_fs();
72278 set_fs(KERNEL_DS);
72279 -#if 0
72280 val = SOUND_MASK_LINE;
72281 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
72282 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
72283 val = initvol[i].vol;
72284 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
72285 }
72286 + set_fs(fs);
72287 // cs4297a_write_ac97(s, 0x18, 0x0808);
72288 #else
72289 // cs4297a_write_ac97(s, 0x5e, 0x180);
72290 cs4297a_write_ac97(s, 0x02, 0x0808);
72291 cs4297a_write_ac97(s, 0x18, 0x0808);
72292 #endif
72293 - set_fs(fs);
72294
72295 list_add(&s->list, &cs4297a_devs);
72296
72297 diff -urNp linux-2.6.32.43/sound/pci/ac97/ac97_codec.c linux-2.6.32.43/sound/pci/ac97/ac97_codec.c
72298 --- linux-2.6.32.43/sound/pci/ac97/ac97_codec.c 2011-03-27 14:31:47.000000000 -0400
72299 +++ linux-2.6.32.43/sound/pci/ac97/ac97_codec.c 2011-04-17 15:56:46.000000000 -0400
72300 @@ -1952,7 +1952,7 @@ static int snd_ac97_dev_disconnect(struc
72301 }
72302
72303 /* build_ops to do nothing */
72304 -static struct snd_ac97_build_ops null_build_ops;
72305 +static const struct snd_ac97_build_ops null_build_ops;
72306
72307 #ifdef CONFIG_SND_AC97_POWER_SAVE
72308 static void do_update_power(struct work_struct *work)
72309 diff -urNp linux-2.6.32.43/sound/pci/ac97/ac97_patch.c linux-2.6.32.43/sound/pci/ac97/ac97_patch.c
72310 --- linux-2.6.32.43/sound/pci/ac97/ac97_patch.c 2011-03-27 14:31:47.000000000 -0400
72311 +++ linux-2.6.32.43/sound/pci/ac97/ac97_patch.c 2011-04-23 12:56:12.000000000 -0400
72312 @@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spd
72313 return 0;
72314 }
72315
72316 -static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
72317 +static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
72318 .build_spdif = patch_yamaha_ymf743_build_spdif,
72319 .build_3d = patch_yamaha_ymf7x3_3d,
72320 };
72321 @@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdi
72322 return 0;
72323 }
72324
72325 -static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
72326 +static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
72327 .build_3d = patch_yamaha_ymf7x3_3d,
72328 .build_post_spdif = patch_yamaha_ymf753_post_spdif
72329 };
72330 @@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific
72331 return 0;
72332 }
72333
72334 -static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
72335 +static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
72336 .build_specific = patch_wolfson_wm9703_specific,
72337 };
72338
72339 @@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific
72340 return 0;
72341 }
72342
72343 -static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
72344 +static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
72345 .build_specific = patch_wolfson_wm9704_specific,
72346 };
72347
72348 @@ -555,7 +555,7 @@ static int patch_wolfson_wm9705_specific
72349 return 0;
72350 }
72351
72352 -static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
72353 +static const struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
72354 .build_specific = patch_wolfson_wm9705_specific,
72355 };
72356
72357 @@ -692,7 +692,7 @@ static int patch_wolfson_wm9711_specific
72358 return 0;
72359 }
72360
72361 -static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
72362 +static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
72363 .build_specific = patch_wolfson_wm9711_specific,
72364 };
72365
72366 @@ -886,7 +886,7 @@ static void patch_wolfson_wm9713_resume
72367 }
72368 #endif
72369
72370 -static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
72371 +static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
72372 .build_specific = patch_wolfson_wm9713_specific,
72373 .build_3d = patch_wolfson_wm9713_3d,
72374 #ifdef CONFIG_PM
72375 @@ -991,7 +991,7 @@ static int patch_sigmatel_stac97xx_speci
72376 return 0;
72377 }
72378
72379 -static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
72380 +static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
72381 .build_3d = patch_sigmatel_stac9700_3d,
72382 .build_specific = patch_sigmatel_stac97xx_specific
72383 };
72384 @@ -1038,7 +1038,7 @@ static int patch_sigmatel_stac9708_speci
72385 return patch_sigmatel_stac97xx_specific(ac97);
72386 }
72387
72388 -static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
72389 +static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
72390 .build_3d = patch_sigmatel_stac9708_3d,
72391 .build_specific = patch_sigmatel_stac9708_specific
72392 };
72393 @@ -1267,7 +1267,7 @@ static int patch_sigmatel_stac9758_speci
72394 return 0;
72395 }
72396
72397 -static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
72398 +static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
72399 .build_3d = patch_sigmatel_stac9700_3d,
72400 .build_specific = patch_sigmatel_stac9758_specific
72401 };
72402 @@ -1342,7 +1342,7 @@ static int patch_cirrus_build_spdif(stru
72403 return 0;
72404 }
72405
72406 -static struct snd_ac97_build_ops patch_cirrus_ops = {
72407 +static const struct snd_ac97_build_ops patch_cirrus_ops = {
72408 .build_spdif = patch_cirrus_build_spdif
72409 };
72410
72411 @@ -1399,7 +1399,7 @@ static int patch_conexant_build_spdif(st
72412 return 0;
72413 }
72414
72415 -static struct snd_ac97_build_ops patch_conexant_ops = {
72416 +static const struct snd_ac97_build_ops patch_conexant_ops = {
72417 .build_spdif = patch_conexant_build_spdif
72418 };
72419
72420 @@ -1575,7 +1575,7 @@ static void patch_ad1881_chained(struct
72421 }
72422 }
72423
72424 -static struct snd_ac97_build_ops patch_ad1881_build_ops = {
72425 +static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
72426 #ifdef CONFIG_PM
72427 .resume = ad18xx_resume
72428 #endif
72429 @@ -1662,7 +1662,7 @@ static int patch_ad1885_specific(struct
72430 return 0;
72431 }
72432
72433 -static struct snd_ac97_build_ops patch_ad1885_build_ops = {
72434 +static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
72435 .build_specific = &patch_ad1885_specific,
72436 #ifdef CONFIG_PM
72437 .resume = ad18xx_resume
72438 @@ -1689,7 +1689,7 @@ static int patch_ad1886_specific(struct
72439 return 0;
72440 }
72441
72442 -static struct snd_ac97_build_ops patch_ad1886_build_ops = {
72443 +static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
72444 .build_specific = &patch_ad1886_specific,
72445 #ifdef CONFIG_PM
72446 .resume = ad18xx_resume
72447 @@ -1896,7 +1896,7 @@ static int patch_ad1981a_specific(struct
72448 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
72449 }
72450
72451 -static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
72452 +static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
72453 .build_post_spdif = patch_ad198x_post_spdif,
72454 .build_specific = patch_ad1981a_specific,
72455 #ifdef CONFIG_PM
72456 @@ -1951,7 +1951,7 @@ static int patch_ad1981b_specific(struct
72457 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
72458 }
72459
72460 -static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
72461 +static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
72462 .build_post_spdif = patch_ad198x_post_spdif,
72463 .build_specific = patch_ad1981b_specific,
72464 #ifdef CONFIG_PM
72465 @@ -2090,7 +2090,7 @@ static int patch_ad1888_specific(struct
72466 return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
72467 }
72468
72469 -static struct snd_ac97_build_ops patch_ad1888_build_ops = {
72470 +static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
72471 .build_post_spdif = patch_ad198x_post_spdif,
72472 .build_specific = patch_ad1888_specific,
72473 #ifdef CONFIG_PM
72474 @@ -2139,7 +2139,7 @@ static int patch_ad1980_specific(struct
72475 return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
72476 }
72477
72478 -static struct snd_ac97_build_ops patch_ad1980_build_ops = {
72479 +static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
72480 .build_post_spdif = patch_ad198x_post_spdif,
72481 .build_specific = patch_ad1980_specific,
72482 #ifdef CONFIG_PM
72483 @@ -2254,7 +2254,7 @@ static int patch_ad1985_specific(struct
72484 ARRAY_SIZE(snd_ac97_ad1985_controls));
72485 }
72486
72487 -static struct snd_ac97_build_ops patch_ad1985_build_ops = {
72488 +static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
72489 .build_post_spdif = patch_ad198x_post_spdif,
72490 .build_specific = patch_ad1985_specific,
72491 #ifdef CONFIG_PM
72492 @@ -2546,7 +2546,7 @@ static int patch_ad1986_specific(struct
72493 ARRAY_SIZE(snd_ac97_ad1985_controls));
72494 }
72495
72496 -static struct snd_ac97_build_ops patch_ad1986_build_ops = {
72497 +static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
72498 .build_post_spdif = patch_ad198x_post_spdif,
72499 .build_specific = patch_ad1986_specific,
72500 #ifdef CONFIG_PM
72501 @@ -2651,7 +2651,7 @@ static int patch_alc650_specific(struct
72502 return 0;
72503 }
72504
72505 -static struct snd_ac97_build_ops patch_alc650_ops = {
72506 +static const struct snd_ac97_build_ops patch_alc650_ops = {
72507 .build_specific = patch_alc650_specific,
72508 .update_jacks = alc650_update_jacks
72509 };
72510 @@ -2803,7 +2803,7 @@ static int patch_alc655_specific(struct
72511 return 0;
72512 }
72513
72514 -static struct snd_ac97_build_ops patch_alc655_ops = {
72515 +static const struct snd_ac97_build_ops patch_alc655_ops = {
72516 .build_specific = patch_alc655_specific,
72517 .update_jacks = alc655_update_jacks
72518 };
72519 @@ -2915,7 +2915,7 @@ static int patch_alc850_specific(struct
72520 return 0;
72521 }
72522
72523 -static struct snd_ac97_build_ops patch_alc850_ops = {
72524 +static const struct snd_ac97_build_ops patch_alc850_ops = {
72525 .build_specific = patch_alc850_specific,
72526 .update_jacks = alc850_update_jacks
72527 };
72528 @@ -2977,7 +2977,7 @@ static int patch_cm9738_specific(struct
72529 return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
72530 }
72531
72532 -static struct snd_ac97_build_ops patch_cm9738_ops = {
72533 +static const struct snd_ac97_build_ops patch_cm9738_ops = {
72534 .build_specific = patch_cm9738_specific,
72535 .update_jacks = cm9738_update_jacks
72536 };
72537 @@ -3068,7 +3068,7 @@ static int patch_cm9739_post_spdif(struc
72538 return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
72539 }
72540
72541 -static struct snd_ac97_build_ops patch_cm9739_ops = {
72542 +static const struct snd_ac97_build_ops patch_cm9739_ops = {
72543 .build_specific = patch_cm9739_specific,
72544 .build_post_spdif = patch_cm9739_post_spdif,
72545 .update_jacks = cm9739_update_jacks
72546 @@ -3242,7 +3242,7 @@ static int patch_cm9761_specific(struct
72547 return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
72548 }
72549
72550 -static struct snd_ac97_build_ops patch_cm9761_ops = {
72551 +static const struct snd_ac97_build_ops patch_cm9761_ops = {
72552 .build_specific = patch_cm9761_specific,
72553 .build_post_spdif = patch_cm9761_post_spdif,
72554 .update_jacks = cm9761_update_jacks
72555 @@ -3338,7 +3338,7 @@ static int patch_cm9780_specific(struct
72556 return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
72557 }
72558
72559 -static struct snd_ac97_build_ops patch_cm9780_ops = {
72560 +static const struct snd_ac97_build_ops patch_cm9780_ops = {
72561 .build_specific = patch_cm9780_specific,
72562 .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
72563 };
72564 @@ -3458,7 +3458,7 @@ static int patch_vt1616_specific(struct
72565 return 0;
72566 }
72567
72568 -static struct snd_ac97_build_ops patch_vt1616_ops = {
72569 +static const struct snd_ac97_build_ops patch_vt1616_ops = {
72570 .build_specific = patch_vt1616_specific
72571 };
72572
72573 @@ -3812,7 +3812,7 @@ static int patch_it2646_specific(struct
72574 return 0;
72575 }
72576
72577 -static struct snd_ac97_build_ops patch_it2646_ops = {
72578 +static const struct snd_ac97_build_ops patch_it2646_ops = {
72579 .build_specific = patch_it2646_specific,
72580 .update_jacks = it2646_update_jacks
72581 };
72582 @@ -3846,7 +3846,7 @@ static int patch_si3036_specific(struct
72583 return 0;
72584 }
72585
72586 -static struct snd_ac97_build_ops patch_si3036_ops = {
72587 +static const struct snd_ac97_build_ops patch_si3036_ops = {
72588 .build_specific = patch_si3036_specific,
72589 };
72590
72591 @@ -3913,7 +3913,7 @@ static int patch_ucb1400_specific(struct
72592 return 0;
72593 }
72594
72595 -static struct snd_ac97_build_ops patch_ucb1400_ops = {
72596 +static const struct snd_ac97_build_ops patch_ucb1400_ops = {
72597 .build_specific = patch_ucb1400_specific,
72598 };
72599
72600 diff -urNp linux-2.6.32.43/sound/pci/hda/patch_intelhdmi.c linux-2.6.32.43/sound/pci/hda/patch_intelhdmi.c
72601 --- linux-2.6.32.43/sound/pci/hda/patch_intelhdmi.c 2011-03-27 14:31:47.000000000 -0400
72602 +++ linux-2.6.32.43/sound/pci/hda/patch_intelhdmi.c 2011-04-17 15:56:46.000000000 -0400
72603 @@ -511,10 +511,10 @@ static void hdmi_non_intrinsic_event(str
72604 cp_ready);
72605
72606 /* TODO */
72607 - if (cp_state)
72608 - ;
72609 - if (cp_ready)
72610 - ;
72611 + if (cp_state) {
72612 + }
72613 + if (cp_ready) {
72614 + }
72615 }
72616
72617
72618 diff -urNp linux-2.6.32.43/sound/pci/intel8x0m.c linux-2.6.32.43/sound/pci/intel8x0m.c
72619 --- linux-2.6.32.43/sound/pci/intel8x0m.c 2011-03-27 14:31:47.000000000 -0400
72620 +++ linux-2.6.32.43/sound/pci/intel8x0m.c 2011-04-23 12:56:12.000000000 -0400
72621 @@ -1264,7 +1264,7 @@ static struct shortname_table {
72622 { 0x5455, "ALi M5455" },
72623 { 0x746d, "AMD AMD8111" },
72624 #endif
72625 - { 0 },
72626 + { 0, },
72627 };
72628
72629 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
72630 diff -urNp linux-2.6.32.43/sound/pci/ymfpci/ymfpci_main.c linux-2.6.32.43/sound/pci/ymfpci/ymfpci_main.c
72631 --- linux-2.6.32.43/sound/pci/ymfpci/ymfpci_main.c 2011-03-27 14:31:47.000000000 -0400
72632 +++ linux-2.6.32.43/sound/pci/ymfpci/ymfpci_main.c 2011-05-04 17:56:28.000000000 -0400
72633 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
72634 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
72635 break;
72636 }
72637 - if (atomic_read(&chip->interrupt_sleep_count)) {
72638 - atomic_set(&chip->interrupt_sleep_count, 0);
72639 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
72640 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
72641 wake_up(&chip->interrupt_sleep);
72642 }
72643 __end:
72644 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
72645 continue;
72646 init_waitqueue_entry(&wait, current);
72647 add_wait_queue(&chip->interrupt_sleep, &wait);
72648 - atomic_inc(&chip->interrupt_sleep_count);
72649 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
72650 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
72651 remove_wait_queue(&chip->interrupt_sleep, &wait);
72652 }
72653 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
72654 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
72655 spin_unlock(&chip->reg_lock);
72656
72657 - if (atomic_read(&chip->interrupt_sleep_count)) {
72658 - atomic_set(&chip->interrupt_sleep_count, 0);
72659 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
72660 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
72661 wake_up(&chip->interrupt_sleep);
72662 }
72663 }
72664 @@ -2369,7 +2369,7 @@ int __devinit snd_ymfpci_create(struct s
72665 spin_lock_init(&chip->reg_lock);
72666 spin_lock_init(&chip->voice_lock);
72667 init_waitqueue_head(&chip->interrupt_sleep);
72668 - atomic_set(&chip->interrupt_sleep_count, 0);
72669 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
72670 chip->card = card;
72671 chip->pci = pci;
72672 chip->irq = -1;
72673 diff -urNp linux-2.6.32.43/tools/gcc/Makefile linux-2.6.32.43/tools/gcc/Makefile
72674 --- linux-2.6.32.43/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
72675 +++ linux-2.6.32.43/tools/gcc/Makefile 2011-06-04 20:52:13.000000000 -0400
72676 @@ -0,0 +1,11 @@
72677 +#CC := gcc
72678 +#PLUGIN_SOURCE_FILES := pax_plugin.c
72679 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
72680 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
72681 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
72682 +
72683 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
72684 +
72685 +hostlibs-y := pax_plugin.so
72686 +always := $(hostlibs-y)
72687 +pax_plugin-objs := pax_plugin.o
72688 diff -urNp linux-2.6.32.43/tools/gcc/pax_plugin.c linux-2.6.32.43/tools/gcc/pax_plugin.c
72689 --- linux-2.6.32.43/tools/gcc/pax_plugin.c 1969-12-31 19:00:00.000000000 -0500
72690 +++ linux-2.6.32.43/tools/gcc/pax_plugin.c 2011-07-06 19:53:33.000000000 -0400
72691 @@ -0,0 +1,243 @@
72692 +/*
72693 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
72694 + * Licensed under the GPL v2
72695 + *
72696 + * Note: the choice of the license means that the compilation process is
72697 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
72698 + * but for the kernel it doesn't matter since it doesn't link against
72699 + * any of the gcc libraries
72700 + *
72701 + * gcc plugin to help implement various PaX features
72702 + *
72703 + * - track lowest stack pointer
72704 + *
72705 + * TODO:
72706 + * - initialize all local variables
72707 + *
72708 + * BUGS:
72709 + * - cloned functions are instrumented twice
72710 + */
72711 +#include "gcc-plugin.h"
72712 +#include "plugin-version.h"
72713 +#include "config.h"
72714 +#include "system.h"
72715 +#include "coretypes.h"
72716 +#include "tm.h"
72717 +#include "toplev.h"
72718 +#include "basic-block.h"
72719 +#include "gimple.h"
72720 +//#include "expr.h" where are you...
72721 +#include "diagnostic.h"
72722 +#include "rtl.h"
72723 +#include "emit-rtl.h"
72724 +#include "function.h"
72725 +#include "tree.h"
72726 +#include "tree-pass.h"
72727 +#include "intl.h"
72728 +
72729 +int plugin_is_GPL_compatible;
72730 +
72731 +static int track_frame_size = -1;
72732 +static const char track_function[] = "pax_track_stack";
72733 +static bool init_locals;
72734 +
72735 +static struct plugin_info pax_plugin_info = {
72736 + .version = "201106030000",
72737 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
72738 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
72739 +};
72740 +
72741 +static bool gate_pax_track_stack(void);
72742 +static unsigned int execute_pax_tree_instrument(void);
72743 +static unsigned int execute_pax_final(void);
72744 +
72745 +static struct gimple_opt_pass pax_tree_instrument_pass = {
72746 + .pass = {
72747 + .type = GIMPLE_PASS,
72748 + .name = "pax_tree_instrument",
72749 + .gate = gate_pax_track_stack,
72750 + .execute = execute_pax_tree_instrument,
72751 + .sub = NULL,
72752 + .next = NULL,
72753 + .static_pass_number = 0,
72754 + .tv_id = TV_NONE,
72755 + .properties_required = PROP_gimple_leh | PROP_cfg,
72756 + .properties_provided = 0,
72757 + .properties_destroyed = 0,
72758 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
72759 + .todo_flags_finish = TODO_verify_stmts // | TODO_dump_func
72760 + }
72761 +};
72762 +
72763 +static struct rtl_opt_pass pax_final_rtl_opt_pass = {
72764 + .pass = {
72765 + .type = RTL_PASS,
72766 + .name = "pax_final",
72767 + .gate = gate_pax_track_stack,
72768 + .execute = execute_pax_final,
72769 + .sub = NULL,
72770 + .next = NULL,
72771 + .static_pass_number = 0,
72772 + .tv_id = TV_NONE,
72773 + .properties_required = 0,
72774 + .properties_provided = 0,
72775 + .properties_destroyed = 0,
72776 + .todo_flags_start = 0,
72777 + .todo_flags_finish = 0
72778 + }
72779 +};
72780 +
72781 +static bool gate_pax_track_stack(void)
72782 +{
72783 + return track_frame_size >= 0;
72784 +}
72785 +
72786 +static void pax_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
72787 +{
72788 + gimple call;
72789 + tree decl, type;
72790 +
72791 + // insert call to void pax_track_stack(void)
72792 + type = build_function_type_list(void_type_node, NULL_TREE);
72793 + decl = build_fn_decl(track_function, type);
72794 + DECL_ASSEMBLER_NAME(decl); // for LTO
72795 + call = gimple_build_call(decl, 0);
72796 + if (before)
72797 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
72798 + else
72799 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
72800 +}
72801 +
72802 +static unsigned int execute_pax_tree_instrument(void)
72803 +{
72804 + basic_block bb;
72805 + gimple_stmt_iterator gsi;
72806 +
72807 + // 1. loop through BBs and GIMPLE statements
72808 + FOR_EACH_BB(bb) {
72809 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
72810 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
72811 + tree decl;
72812 + gimple stmt = gsi_stmt(gsi);
72813 +
72814 + if (!is_gimple_call(stmt))
72815 + continue;
72816 + decl = gimple_call_fndecl(stmt);
72817 + if (!decl)
72818 + continue;
72819 + if (TREE_CODE(decl) != FUNCTION_DECL)
72820 + continue;
72821 + if (!DECL_BUILT_IN(decl))
72822 + continue;
72823 + if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
72824 + continue;
72825 + if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
72826 + continue;
72827 +
72828 + // 2. insert track call after each __builtin_alloca call
72829 + pax_add_instrumentation(&gsi, false);
72830 +// print_node(stderr, "pax", decl, 4);
72831 + }
72832 + }
72833 +
72834 + // 3. insert track call at the beginning
72835 + bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
72836 + gsi = gsi_start_bb(bb);
72837 + pax_add_instrumentation(&gsi, true);
72838 +
72839 + return 0;
72840 +}
72841 +
72842 +static unsigned int execute_pax_final(void)
72843 +{
72844 + rtx insn;
72845 +
72846 + if (cfun->calls_alloca)
72847 + return 0;
72848 +
72849 + // 1. find pax_track_stack calls
72850 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
72851 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
72852 + rtx body;
72853 +
72854 + if (!CALL_P(insn))
72855 + continue;
72856 + body = PATTERN(insn);
72857 + if (GET_CODE(body) != CALL)
72858 + continue;
72859 + body = XEXP(body, 0);
72860 + if (GET_CODE(body) != MEM)
72861 + continue;
72862 + body = XEXP(body, 0);
72863 + if (GET_CODE(body) != SYMBOL_REF)
72864 + continue;
72865 + if (strcmp(XSTR(body, 0), track_function))
72866 + continue;
72867 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
72868 + // 2. delete call if function frame is not big enough
72869 + if (get_frame_size() >= track_frame_size)
72870 + continue;
72871 + delete_insn_and_edges(insn);
72872 + }
72873 +
72874 +// print_simple_rtl(stderr, get_insns());
72875 +// print_rtl(stderr, get_insns());
72876 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
72877 +
72878 + return 0;
72879 +}
72880 +
72881 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
72882 +{
72883 + const char * const plugin_name = plugin_info->base_name;
72884 + const int argc = plugin_info->argc;
72885 + const struct plugin_argument * const argv = plugin_info->argv;
72886 + int i;
72887 + struct register_pass_info pax_tree_instrument_pass_info = {
72888 + .pass = &pax_tree_instrument_pass.pass,
72889 +// .reference_pass_name = "tree_profile",
72890 + .reference_pass_name = "optimized",
72891 + .ref_pass_instance_number = 0,
72892 + .pos_op = PASS_POS_INSERT_AFTER
72893 + };
72894 + struct register_pass_info pax_final_pass_info = {
72895 + .pass = &pax_final_rtl_opt_pass.pass,
72896 + .reference_pass_name = "final",
72897 + .ref_pass_instance_number = 0,
72898 + .pos_op = PASS_POS_INSERT_BEFORE
72899 + };
72900 +
72901 + if (!plugin_default_version_check(version, &gcc_version)) {
72902 + error(G_("incompatible gcc/plugin versions"));
72903 + return 1;
72904 + }
72905 +
72906 + register_callback(plugin_name, PLUGIN_INFO, NULL, &pax_plugin_info);
72907 +
72908 + for (i = 0; i < argc; ++i) {
72909 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
72910 + if (!argv[i].value) {
72911 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
72912 + continue;
72913 + }
72914 + track_frame_size = atoi(argv[i].value);
72915 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
72916 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
72917 + continue;
72918 + }
72919 + if (!strcmp(argv[i].key, "initialize-locals")) {
72920 + if (argv[i].value) {
72921 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
72922 + continue;
72923 + }
72924 + init_locals = true;
72925 + continue;
72926 + }
72927 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
72928 + }
72929 +
72930 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pax_tree_instrument_pass_info);
72931 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pax_final_pass_info);
72932 +
72933 + return 0;
72934 +}
72935 Binary files linux-2.6.32.43/tools/gcc/pax_plugin.so and linux-2.6.32.43/tools/gcc/pax_plugin.so differ
72936 diff -urNp linux-2.6.32.43/usr/gen_init_cpio.c linux-2.6.32.43/usr/gen_init_cpio.c
72937 --- linux-2.6.32.43/usr/gen_init_cpio.c 2011-03-27 14:31:47.000000000 -0400
72938 +++ linux-2.6.32.43/usr/gen_init_cpio.c 2011-04-17 15:56:46.000000000 -0400
72939 @@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name,
72940 int retval;
72941 int rc = -1;
72942 int namesize;
72943 - int i;
72944 + unsigned int i;
72945
72946 mode |= S_IFREG;
72947
72948 @@ -383,9 +383,10 @@ static char *cpio_replace_env(char *new_
72949 *env_var = *expanded = '\0';
72950 strncat(env_var, start + 2, end - start - 2);
72951 strncat(expanded, new_location, start - new_location);
72952 - strncat(expanded, getenv(env_var), PATH_MAX);
72953 - strncat(expanded, end + 1, PATH_MAX);
72954 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
72955 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
72956 strncpy(new_location, expanded, PATH_MAX);
72957 + new_location[PATH_MAX] = 0;
72958 } else
72959 break;
72960 }
72961 diff -urNp linux-2.6.32.43/virt/kvm/kvm_main.c linux-2.6.32.43/virt/kvm/kvm_main.c
72962 --- linux-2.6.32.43/virt/kvm/kvm_main.c 2011-03-27 14:31:47.000000000 -0400
72963 +++ linux-2.6.32.43/virt/kvm/kvm_main.c 2011-04-23 21:41:37.000000000 -0400
72964 @@ -1748,6 +1748,7 @@ static int kvm_vcpu_release(struct inode
72965 return 0;
72966 }
72967
72968 +/* cannot be const */
72969 static struct file_operations kvm_vcpu_fops = {
72970 .release = kvm_vcpu_release,
72971 .unlocked_ioctl = kvm_vcpu_ioctl,
72972 @@ -2344,6 +2345,7 @@ static int kvm_vm_mmap(struct file *file
72973 return 0;
72974 }
72975
72976 +/* cannot be const */
72977 static struct file_operations kvm_vm_fops = {
72978 .release = kvm_vm_release,
72979 .unlocked_ioctl = kvm_vm_ioctl,
72980 @@ -2431,6 +2433,7 @@ out:
72981 return r;
72982 }
72983
72984 +/* cannot be const */
72985 static struct file_operations kvm_chardev_ops = {
72986 .unlocked_ioctl = kvm_dev_ioctl,
72987 .compat_ioctl = kvm_dev_ioctl,
72988 @@ -2494,7 +2497,7 @@ asmlinkage void kvm_handle_fault_on_rebo
72989 if (kvm_rebooting)
72990 /* spin while reset goes on */
72991 while (true)
72992 - ;
72993 + cpu_relax();
72994 /* Fault while not rebooting. We want the trace. */
72995 BUG();
72996 }
72997 @@ -2714,7 +2717,7 @@ static void kvm_sched_out(struct preempt
72998 kvm_arch_vcpu_put(vcpu);
72999 }
73000
73001 -int kvm_init(void *opaque, unsigned int vcpu_size,
73002 +int kvm_init(const void *opaque, unsigned int vcpu_size,
73003 struct module *module)
73004 {
73005 int r;
73006 @@ -2767,7 +2770,7 @@ int kvm_init(void *opaque, unsigned int
73007 /* A kmem cache lets us meet the alignment requirements of fx_save. */
73008 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
73009 __alignof__(struct kvm_vcpu),
73010 - 0, NULL);
73011 + SLAB_USERCOPY, NULL);
73012 if (!kvm_vcpu_cache) {
73013 r = -ENOMEM;
73014 goto out_free_5;